mmu.c 166.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
A
Avi Kivity 已提交
2 3 4 5 6 7 8 9 10
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
N
Nicolas Kaiser 已提交
11
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
12 13 14 15 16
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 */
A
Avi Kivity 已提交
17

18
#include "irq.h"
19
#include "ioapic.h"
20
#include "mmu.h"
21
#include "mmu_internal.h"
22
#include "tdp_mmu.h"
23
#include "x86.h"
A
Avi Kivity 已提交
24
#include "kvm_cache_regs.h"
25
#include "kvm_emulate.h"
26
#include "cpuid.h"
27
#include "spte.h"
A
Avi Kivity 已提交
28

29
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
30 31 32 33
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
34 35
#include <linux/moduleparam.h>
#include <linux/export.h>
36
#include <linux/swap.h>
M
Marcelo Tosatti 已提交
37
#include <linux/hugetlb.h>
38
#include <linux/compiler.h>
39
#include <linux/srcu.h>
40
#include <linux/slab.h>
41
#include <linux/sched/signal.h>
42
#include <linux/uaccess.h>
43
#include <linux/hash.h>
44
#include <linux/kern_levels.h>
45
#include <linux/kthread.h>
A
Avi Kivity 已提交
46

A
Avi Kivity 已提交
47
#include <asm/page.h>
48
#include <asm/memtype.h>
A
Avi Kivity 已提交
49
#include <asm/cmpxchg.h>
50
#include <asm/io.h>
51
#include <asm/set_memory.h>
52
#include <asm/vmx.h>
53
#include <asm/kvm_page_track.h>
54
#include "trace.h"
A
Avi Kivity 已提交
55

56 57
#include "paging.h"

P
Paolo Bonzini 已提交
58 59
extern bool itlb_multihit_kvm_mitigation;

60
int __read_mostly nx_huge_pages = -1;
61 62 63 64
#ifdef CONFIG_PREEMPT_RT
/* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
#else
65
static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
66
#endif
P
Paolo Bonzini 已提交
67 68

static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
69
static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
P
Paolo Bonzini 已提交
70

71
static const struct kernel_param_ops nx_huge_pages_ops = {
P
Paolo Bonzini 已提交
72 73 74 75
	.set = set_nx_huge_pages,
	.get = param_get_bool,
};

76
static const struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
77 78 79 80
	.set = set_nx_huge_pages_recovery_ratio,
	.get = param_get_uint,
};

P
Paolo Bonzini 已提交
81 82
module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
__MODULE_PARM_TYPE(nx_huge_pages, "bool");
83 84 85
module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
		&nx_huge_pages_recovery_ratio, 0644);
__MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
P
Paolo Bonzini 已提交
86

87 88 89
static bool __read_mostly force_flush_and_sync_on_reuse;
module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);

90 91 92 93 94 95 96
/*
 * When setting this variable to true it enables Two-Dimensional-Paging
 * where the hardware walks 2 page tables:
 * 1. the guest-virtual to guest-physical
 * 2. while doing 1. it walks guest-physical to host-physical
 * If the hardware supports that we don't need to do shadow paging.
 */
97
bool tdp_enabled = false;
98

99
static int max_huge_page_level __read_mostly;
100
static int tdp_root_level __read_mostly;
101
static int max_tdp_level __read_mostly;
102

103 104 105 106
enum {
	AUDIT_PRE_PAGE_FAULT,
	AUDIT_POST_PAGE_FAULT,
	AUDIT_PRE_PTE_WRITE,
107 108 109
	AUDIT_POST_PTE_WRITE,
	AUDIT_PRE_SYNC,
	AUDIT_POST_SYNC
110
};
111 112

#ifdef MMU_DEBUG
113
bool dbg = 0;
114
module_param(dbg, bool, 0644);
115
#endif
A
Avi Kivity 已提交
116

117 118
#define PTE_PREFETCH_NUM		8

A
Avi Kivity 已提交
119 120 121
#define PT32_LEVEL_BITS 10

#define PT32_LEVEL_SHIFT(level) \
M
Mike Day 已提交
122
		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
A
Avi Kivity 已提交
123

124 125 126
#define PT32_LVL_OFFSET_MASK(level) \
	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
127 128 129 130 131 132 133 134

#define PT32_INDEX(address, level)\
	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))


#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
135 136 137
#define PT32_LVL_ADDR_MASK(level) \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
					    * PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
138

139 140
#include <trace/events/kvm.h>

141
/* make pte_list_desc fit well in cache lines */
142
#define PTE_LIST_EXT 14
143

144 145 146 147 148
/*
 * Slight optimization of cacheline layout, by putting `more' and `spte_count'
 * at the start; then accessing it will only use one single cacheline for
 * either full (entries==PTE_LIST_EXT) case or entries<=6.
 */
149 150
struct pte_list_desc {
	struct pte_list_desc *more;
151 152 153 154 155 156
	/*
	 * Stores number of entries stored in the pte_list_desc.  No need to be
	 * u64 but just for easier alignment.  When PTE_LIST_EXT, means full.
	 */
	u64 spte_count;
	u64 *sptes[PTE_LIST_EXT];
157 158
};

159 160 161 162
struct kvm_shadow_walk_iterator {
	u64 addr;
	hpa_t shadow_addr;
	u64 *sptep;
163
	int level;
164 165 166
	unsigned index;
};

167 168 169 170 171 172 173
#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
					 (_root), (_addr));                \
	     shadow_walk_okay(&(_walker));			           \
	     shadow_walk_next(&(_walker)))

#define for_each_shadow_entry(_vcpu, _addr, _walker)            \
174 175 176 177
	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
	     shadow_walk_okay(&(_walker));			\
	     shadow_walk_next(&(_walker)))

178 179 180 181 182 183
#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
	     shadow_walk_okay(&(_walker)) &&				\
		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
	     __shadow_walk_next(&(_walker), spte))

184
static struct kmem_cache *pte_list_desc_cache;
185
struct kmem_cache *mmu_page_header_cache;
186
static struct percpu_counter kvm_total_used_mmu_pages;
187

188
static void mmu_spte_set(u64 *sptep, u64 spte);
189 190
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
191

192 193 194 195 196 197
struct kvm_mmu_role_regs {
	const unsigned long cr0;
	const unsigned long cr4;
	const u64 efer;
};

198 199 200
#define CREATE_TRACE_POINTS
#include "mmutrace.h"

201 202 203 204 205 206
/*
 * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
 * reading from the role_regs.  Once the mmu_role is constructed, it becomes
 * the single source of truth for the MMU's state.
 */
#define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)			\
207
static inline bool __maybe_unused ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
208 209 210 211 212 213 214 215 216 217 218 219 220 221
{									\
	return !!(regs->reg & flag);					\
}
BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);

222 223 224 225 226 227 228
/*
 * The MMU itself (with a valid role) is the single source of truth for the
 * MMU.  Do not use the regs used to build the MMU/role, nor the vCPU.  The
 * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
 * and the vCPU may be incorrect/irrelevant.
 */
#define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)		\
229
static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu)	\
230 231 232 233 234 235 236 237 238 239 240 241 242
{								\
	return !!(mmu->mmu_role. base_or_ext . reg##_##name);	\
}
BUILD_MMU_ROLE_ACCESSOR(ext,  cr0, pg);
BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pae);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);

243 244 245 246 247 248 249 250 251 252
static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu_role_regs regs = {
		.cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
		.cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
		.efer = vcpu->arch.efer,
	};

	return regs;
}
253

254 255 256 257 258 259 260 261 262 263 264 265
static int role_regs_to_root_level(struct kvm_mmu_role_regs *regs)
{
	if (!____is_cr0_pg(regs))
		return 0;
	else if (____is_efer_lma(regs))
		return ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
					       PT64_ROOT_4LEVEL;
	else if (____is_cr4_pae(regs))
		return PT32E_ROOT_LEVEL;
	else
		return PT32_ROOT_LEVEL;
}
266 267 268

static inline bool kvm_available_flush_tlb_with_range(void)
{
269
	return kvm_x86_ops.tlb_remote_flush_with_range;
270 271 272 273 274 275 276
}

static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
		struct kvm_tlb_range *range)
{
	int ret = -ENOTSUPP;

277
	if (range && kvm_x86_ops.tlb_remote_flush_with_range)
278
		ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range);
279 280 281 282 283

	if (ret)
		kvm_flush_remote_tlbs(kvm);
}

284
void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
285 286 287 288 289 290 291 292 293 294
		u64 start_gfn, u64 pages)
{
	struct kvm_tlb_range range;

	range.start_gfn = start_gfn;
	range.pages = pages;

	kvm_flush_remote_tlbs_with_range(kvm, &range);
}

295 296 297
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
			   unsigned int access)
{
298
	u64 spte = make_mmio_spte(vcpu, gfn, access);
299

300 301
	trace_mark_mmio_spte(sptep, gfn, spte);
	mmu_spte_set(sptep, spte);
302 303 304 305
}

static gfn_t get_mmio_spte_gfn(u64 spte)
{
306
	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
307

308
	gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
309 310 311
	       & shadow_nonpresent_or_rsvd_mask;

	return gpa >> PAGE_SHIFT;
312 313 314 315
}

static unsigned get_mmio_spte_access(u64 spte)
{
316
	return spte & shadow_mmio_access_mask;
317 318
}

319
static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
320
{
321
	u64 kvm_gen, spte_gen, gen;
322

323 324 325
	gen = kvm_vcpu_memslots(vcpu)->generation;
	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
		return false;
326

327
	kvm_gen = gen & MMIO_SPTE_GEN_MASK;
328 329 330 331
	spte_gen = get_mmio_spte_generation(spte);

	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
	return likely(kvm_gen == spte_gen);
332 333
}

334 335 336 337 338 339
static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
                                  struct x86_exception *exception)
{
        return gpa;
}

A
Avi Kivity 已提交
340 341 342 343 344
static int is_cpuid_PSE36(void)
{
	return 1;
}

345 346 347 348 349 350 351
static gfn_t pse36_gfn_delta(u32 gpte)
{
	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;

	return (gpte & PT32_DIR_PSE36_MASK) << shift;
}

352
#ifdef CONFIG_X86_64
A
Avi Kivity 已提交
353
static void __set_spte(u64 *sptep, u64 spte)
354
{
355
	WRITE_ONCE(*sptep, spte);
356 357
}

358
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
359
{
360
	WRITE_ONCE(*sptep, spte);
361 362 363 364 365 366
}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
	return xchg(sptep, spte);
}
367 368 369

static u64 __get_spte_lockless(u64 *sptep)
{
370
	return READ_ONCE(*sptep);
371
}
372
#else
373 374 375 376 377 378 379
union split_spte {
	struct {
		u32 spte_low;
		u32 spte_high;
	};
	u64 spte;
};
380

381 382
static void count_spte_clear(u64 *sptep, u64 spte)
{
383
	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
384 385 386 387 388 389 390 391 392

	if (is_shadow_present_pte(spte))
		return;

	/* Ensure the spte is completely set before we increase the count */
	smp_wmb();
	sp->clear_spte_count++;
}

393 394 395
static void __set_spte(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte;
396

397 398 399 400 401 402 403 404 405 406 407 408
	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	ssptep->spte_high = sspte.spte_high;

	/*
	 * If we map the spte from nonpresent to present, We should store
	 * the high bits firstly, then set present bit, so cpu can not
	 * fetch this spte while we are setting the spte.
	 */
	smp_wmb();

409
	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
410 411
}

412 413 414 415 416 417 418
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

419
	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
420 421 422 423 424 425 426 427

	/*
	 * If we map the spte from present to nonpresent, we should clear
	 * present bit firstly to avoid vcpu fetch the old high bits.
	 */
	smp_wmb();

	ssptep->spte_high = sspte.spte_high;
428
	count_spte_clear(sptep, spte);
429 430 431 432 433 434 435 436 437 438 439
}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte, orig;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	/* xchg acts as a barrier before the setting of the high bits */
	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
440 441
	orig.spte_high = ssptep->spte_high;
	ssptep->spte_high = sspte.spte_high;
442
	count_spte_clear(sptep, spte);
443 444 445

	return orig.spte;
}
446 447 448

/*
 * The idea using the light way get the spte on x86_32 guest is from
449
 * gup_get_pte (mm/gup.c).
450 451 452 453 454 455 456 457 458 459 460 461 462 463
 *
 * An spte tlb flush may be pending, because kvm_set_pte_rmapp
 * coalesces them and we are running out of the MMU lock.  Therefore
 * we need to protect against in-progress updates of the spte.
 *
 * Reading the spte while an update is in progress may get the old value
 * for the high part of the spte.  The race is fine for a present->non-present
 * change (because the high part of the spte is ignored for non-present spte),
 * but for a present->present change we must reread the spte.
 *
 * All such changes are done in two steps (present->non-present and
 * non-present->present), hence it is enough to count the number of
 * present->non-present updates: if it changed while reading the spte,
 * we might have hit the race.  This is done using clear_spte_count.
464 465 466
 */
static u64 __get_spte_lockless(u64 *sptep)
{
467
	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
	union split_spte spte, *orig = (union split_spte *)sptep;
	int count;

retry:
	count = sp->clear_spte_count;
	smp_rmb();

	spte.spte_low = orig->spte_low;
	smp_rmb();

	spte.spte_high = orig->spte_high;
	smp_rmb();

	if (unlikely(spte.spte_low != orig->spte_low ||
	      count != sp->clear_spte_count))
		goto retry;

	return spte.spte;
}
487 488
#endif

489 490
static bool spte_has_volatile_bits(u64 spte)
{
491 492 493
	if (!is_shadow_present_pte(spte))
		return false;

494
	/*
495
	 * Always atomically update spte if it can be updated
496 497 498 499
	 * out of mmu-lock, it can ensure dirty bit is not lost,
	 * also, it can help us to get a stable is_writable_pte()
	 * to ensure tlb flush is not missed.
	 */
500 501
	if (spte_can_locklessly_be_made_writable(spte) ||
	    is_access_track_spte(spte))
502 503
		return true;

504
	if (spte_ad_enabled(spte)) {
505 506 507 508
		if ((spte & shadow_accessed_mask) == 0 ||
	    	    (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
			return true;
	}
509

510
	return false;
511 512
}

513 514 515 516 517 518 519 520 521 522 523 524
/* Rules for using mmu_spte_set:
 * Set the sptep from nonpresent to present.
 * Note: the sptep being assigned *must* be either not present
 * or in a state where the hardware will not attempt to update
 * the spte.
 */
static void mmu_spte_set(u64 *sptep, u64 new_spte)
{
	WARN_ON(is_shadow_present_pte(*sptep));
	__set_spte(sptep, new_spte);
}

525 526 527
/*
 * Update the SPTE (excluding the PFN), but do not track changes in its
 * accessed/dirty status.
528
 */
529
static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
530
{
531
	u64 old_spte = *sptep;
532

533
	WARN_ON(!is_shadow_present_pte(new_spte));
534

535 536
	if (!is_shadow_present_pte(old_spte)) {
		mmu_spte_set(sptep, new_spte);
537
		return old_spte;
538
	}
539

540
	if (!spte_has_volatile_bits(old_spte))
541
		__update_clear_spte_fast(sptep, new_spte);
542
	else
543
		old_spte = __update_clear_spte_slow(sptep, new_spte);
544

545 546
	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));

547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
	return old_spte;
}

/* Rules for using mmu_spte_update:
 * Update the state bits, it means the mapped pfn is not changed.
 *
 * Whenever we overwrite a writable spte with a read-only one we
 * should flush remote TLBs. Otherwise rmap_write_protect
 * will find a read-only spte, even though the writable spte
 * might be cached on a CPU's TLB, the return value indicates this
 * case.
 *
 * Returns true if the TLB needs to be flushed
 */
static bool mmu_spte_update(u64 *sptep, u64 new_spte)
{
	bool flush = false;
	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);

	if (!is_shadow_present_pte(old_spte))
		return false;

569 570
	/*
	 * For the spte updated out of mmu-lock is safe, since
571
	 * we always atomically update it, see the comments in
572 573
	 * spte_has_volatile_bits().
	 */
574
	if (spte_can_locklessly_be_made_writable(old_spte) &&
575
	      !is_writable_pte(new_spte))
576
		flush = true;
577

578
	/*
579
	 * Flush TLB when accessed/dirty states are changed in the page tables,
580 581 582
	 * to guarantee consistency between TLB and page tables.
	 */

583 584
	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
		flush = true;
585
		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
586 587 588 589
	}

	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
		flush = true;
590
		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
591
	}
592

593
	return flush;
594 595
}

596 597 598 599
/*
 * Rules for using mmu_spte_clear_track_bits:
 * It sets the sptep from present to nonpresent, and track the
 * state bits, it is used to clear the last level sptep.
600
 * Returns the old PTE.
601
 */
602
static int mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
603
{
D
Dan Williams 已提交
604
	kvm_pfn_t pfn;
605
	u64 old_spte = *sptep;
606
	int level = sptep_to_sp(sptep)->role.level;
607 608

	if (!spte_has_volatile_bits(old_spte))
609
		__update_clear_spte_fast(sptep, 0ull);
610
	else
611
		old_spte = __update_clear_spte_slow(sptep, 0ull);
612

613
	if (!is_shadow_present_pte(old_spte))
614
		return old_spte;
615

616 617
	kvm_update_page_stats(kvm, level, -1);

618
	pfn = spte_to_pfn(old_spte);
619 620 621 622 623 624

	/*
	 * KVM does not hold the refcount of the page used by
	 * kvm mmu, before reclaiming the page, we should
	 * unmap it from mmu first.
	 */
625
	WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
626

627
	if (is_accessed_spte(old_spte))
628
		kvm_set_pfn_accessed(pfn);
629 630

	if (is_dirty_spte(old_spte))
631
		kvm_set_pfn_dirty(pfn);
632

633
	return old_spte;
634 635 636 637 638 639 640 641 642
}

/*
 * Rules for using mmu_spte_clear_no_track:
 * Directly clear spte without caring the state bits of sptep,
 * it is used to set the upper level spte.
 */
static void mmu_spte_clear_no_track(u64 *sptep)
{
643
	__update_clear_spte_fast(sptep, 0ull);
644 645
}

646 647 648 649 650
static u64 mmu_spte_get_lockless(u64 *sptep)
{
	return __get_spte_lockless(sptep);
}

651 652 653 654
/* Restore an acc-track PTE back to a regular PTE */
static u64 restore_acc_track_spte(u64 spte)
{
	u64 new_spte = spte;
655 656
	u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
			 & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
657

658
	WARN_ON_ONCE(spte_ad_enabled(spte));
659 660 661
	WARN_ON_ONCE(!is_access_track_spte(spte));

	new_spte &= ~shadow_acc_track_mask;
662 663
	new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
		      SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
664 665 666 667 668
	new_spte |= saved_bits;

	return new_spte;
}

669 670 671 672 673 674 675 676
/* Returns the Accessed status of the PTE and resets it at the same time. */
static bool mmu_spte_age(u64 *sptep)
{
	u64 spte = mmu_spte_get_lockless(sptep);

	if (!is_accessed_spte(spte))
		return false;

677
	if (spte_ad_enabled(spte)) {
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
		clear_bit((ffs(shadow_accessed_mask) - 1),
			  (unsigned long *)sptep);
	} else {
		/*
		 * Capture the dirty status of the page, so that it doesn't get
		 * lost when the SPTE is marked for access tracking.
		 */
		if (is_writable_pte(spte))
			kvm_set_pfn_dirty(spte_to_pfn(spte));

		spte = mark_spte_for_access_track(spte);
		mmu_spte_update_no_track(sptep, spte);
	}

	return true;
}

695 696
static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{
697 698 699 700 701 702 703 704
	if (is_tdp_mmu(vcpu->arch.mmu)) {
		kvm_tdp_mmu_walk_lockless_begin();
	} else {
		/*
		 * Prevent page table teardown by making any free-er wait during
		 * kvm_flush_remote_tlbs() IPI to all active vcpus.
		 */
		local_irq_disable();
705

706 707 708 709 710 711
		/*
		 * Make sure a following spte read is not reordered ahead of the write
		 * to vcpu->mode.
		 */
		smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
	}
712 713 714 715
}

static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
716 717 718 719 720 721 722 723 724 725 726
	if (is_tdp_mmu(vcpu->arch.mmu)) {
		kvm_tdp_mmu_walk_lockless_end();
	} else {
		/*
		 * Make sure the write to vcpu->mode is not reordered in front of
		 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
		 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
		 */
		smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
		local_irq_enable();
	}
727 728
}

729
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
730
{
731 732
	int r;

733
	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
734 735
	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
736
	if (r)
737
		return r;
738 739
	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
				       PT64_ROOT_MAX_LEVEL);
740
	if (r)
741
		return r;
742
	if (maybe_indirect) {
743 744
		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
					       PT64_ROOT_MAX_LEVEL);
745 746 747
		if (r)
			return r;
	}
748 749
	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
					  PT64_ROOT_MAX_LEVEL);
750 751 752 753
}

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
754 755 756 757
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
758 759
}

760
static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
761
{
762
	return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
763 764
}

765
static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
766
{
767
	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
768 769
}

770 771 772 773 774 775 776 777 778 779
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{
	if (!sp->role.direct)
		return sp->gfns[index];

	return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
}

static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
{
780
	if (!sp->role.direct) {
781
		sp->gfns[index] = gfn;
782 783 784 785 786 787 788 789
		return;
	}

	if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
		pr_err_ratelimited("gfn mismatch under direct page %llx "
				   "(expected %llx, got %llx)\n",
				   sp->gfn,
				   kvm_mmu_page_get_gfn(sp, index), gfn);
790 791
}

M
Marcelo Tosatti 已提交
792
/*
793 794
 * Return the pointer to the large page information for a given gfn,
 * handling slots that are not large page aligned.
M
Marcelo Tosatti 已提交
795
 */
796
static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
797
		const struct kvm_memory_slot *slot, int level)
M
Marcelo Tosatti 已提交
798 799 800
{
	unsigned long idx;

801
	idx = gfn_to_index(gfn, slot->base_gfn, level);
802
	return &slot->arch.lpage_info[level - 2][idx];
M
Marcelo Tosatti 已提交
803 804
}

805
static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
806 807 808 809 810
					    gfn_t gfn, int count)
{
	struct kvm_lpage_info *linfo;
	int i;

811
	for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
812 813 814 815 816 817
		linfo = lpage_info_slot(gfn, slot, i);
		linfo->disallow_lpage += count;
		WARN_ON(linfo->disallow_lpage < 0);
	}
}

818
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
819 820 821 822
{
	update_gfn_disallow_lpage_count(slot, gfn, 1);
}

823
void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
824 825 826 827
{
	update_gfn_disallow_lpage_count(slot, gfn, -1);
}

828
static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
M
Marcelo Tosatti 已提交
829
{
830
	struct kvm_memslots *slots;
831
	struct kvm_memory_slot *slot;
832
	gfn_t gfn;
M
Marcelo Tosatti 已提交
833

834
	kvm->arch.indirect_shadow_pages++;
835
	gfn = sp->gfn;
836 837
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
838 839

	/* the non-leaf shadow pages are keeping readonly. */
840
	if (sp->role.level > PG_LEVEL_4K)
841 842 843
		return kvm_slot_page_track_add_page(kvm, slot, gfn,
						    KVM_PAGE_TRACK_WRITE);

844
	kvm_mmu_gfn_disallow_lpage(slot, gfn);
M
Marcelo Tosatti 已提交
845 846
}

847
void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
P
Paolo Bonzini 已提交
848 849 850 851 852
{
	if (sp->lpage_disallowed)
		return;

	++kvm->stat.nx_lpage_splits;
853 854
	list_add_tail(&sp->lpage_disallowed_link,
		      &kvm->arch.lpage_disallowed_mmu_pages);
P
Paolo Bonzini 已提交
855 856 857
	sp->lpage_disallowed = true;
}

858
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
M
Marcelo Tosatti 已提交
859
{
860
	struct kvm_memslots *slots;
861
	struct kvm_memory_slot *slot;
862
	gfn_t gfn;
M
Marcelo Tosatti 已提交
863

864
	kvm->arch.indirect_shadow_pages--;
865
	gfn = sp->gfn;
866 867
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
868
	if (sp->role.level > PG_LEVEL_4K)
869 870 871
		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
						       KVM_PAGE_TRACK_WRITE);

872
	kvm_mmu_gfn_allow_lpage(slot, gfn);
M
Marcelo Tosatti 已提交
873 874
}

875
void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
P
Paolo Bonzini 已提交
876 877 878
{
	--kvm->stat.nx_lpage_splits;
	sp->lpage_disallowed = false;
879
	list_del(&sp->lpage_disallowed_link);
P
Paolo Bonzini 已提交
880 881
}

882 883 884
static struct kvm_memory_slot *
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
			    bool no_dirty_log)
M
Marcelo Tosatti 已提交
885 886
{
	struct kvm_memory_slot *slot;
887

888
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
889 890
	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
		return NULL;
891
	if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
892
		return NULL;
893 894 895 896

	return slot;
}

897
/*
898
 * About rmap_head encoding:
899
 *
900 901
 * If the bit zero of rmap_head->val is clear, then it points to the only spte
 * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
902
 * pte_list_desc containing more mappings.
903 904 905 906
 */

/*
 * Returns the number of pointers in the rmap chain, not counting the new one.
907
 */
908
static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
909
			struct kvm_rmap_head *rmap_head)
910
{
911
	struct pte_list_desc *desc;
912
	int count = 0;
913

914
	if (!rmap_head->val) {
915
		rmap_printk("%p %llx 0->1\n", spte, *spte);
916 917
		rmap_head->val = (unsigned long)spte;
	} else if (!(rmap_head->val & 1)) {
918
		rmap_printk("%p %llx 1->many\n", spte, *spte);
919
		desc = mmu_alloc_pte_list_desc(vcpu);
920
		desc->sptes[0] = (u64 *)rmap_head->val;
A
Avi Kivity 已提交
921
		desc->sptes[1] = spte;
922
		desc->spte_count = 2;
923
		rmap_head->val = (unsigned long)desc | 1;
924
		++count;
925
	} else {
926
		rmap_printk("%p %llx many->many\n", spte, *spte);
927
		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
928
		while (desc->spte_count == PTE_LIST_EXT) {
929
			count += PTE_LIST_EXT;
930 931 932
			if (!desc->more) {
				desc->more = mmu_alloc_pte_list_desc(vcpu);
				desc = desc->more;
933
				desc->spte_count = 0;
934 935
				break;
			}
936 937
			desc = desc->more;
		}
938 939
		count += desc->spte_count;
		desc->sptes[desc->spte_count++] = spte;
940
	}
941
	return count;
942 943
}

944
static void
945 946 947
pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
			   struct pte_list_desc *desc, int i,
			   struct pte_list_desc *prev_desc)
948
{
949
	int j = desc->spte_count - 1;
950

A
Avi Kivity 已提交
951 952
	desc->sptes[i] = desc->sptes[j];
	desc->sptes[j] = NULL;
953 954
	desc->spte_count--;
	if (desc->spte_count)
955 956
		return;
	if (!prev_desc && !desc->more)
957
		rmap_head->val = 0;
958 959 960 961
	else
		if (prev_desc)
			prev_desc->more = desc->more;
		else
962
			rmap_head->val = (unsigned long)desc->more | 1;
963
	mmu_free_pte_list_desc(desc);
964 965
}

966
static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
967
{
968 969
	struct pte_list_desc *desc;
	struct pte_list_desc *prev_desc;
970 971
	int i;

972
	if (!rmap_head->val) {
973
		pr_err("%s: %p 0->BUG\n", __func__, spte);
974
		BUG();
975
	} else if (!(rmap_head->val & 1)) {
976
		rmap_printk("%p 1->0\n", spte);
977
		if ((u64 *)rmap_head->val != spte) {
978
			pr_err("%s:  %p 1->BUG\n", __func__, spte);
979 980
			BUG();
		}
981
		rmap_head->val = 0;
982
	} else {
983
		rmap_printk("%p many->many\n", spte);
984
		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
985 986
		prev_desc = NULL;
		while (desc) {
987
			for (i = 0; i < desc->spte_count; ++i) {
A
Avi Kivity 已提交
988
				if (desc->sptes[i] == spte) {
989 990
					pte_list_desc_remove_entry(rmap_head,
							desc, i, prev_desc);
991 992
					return;
				}
993
			}
994 995 996
			prev_desc = desc;
			desc = desc->more;
		}
997
		pr_err("%s: %p many->many\n", __func__, spte);
998 999 1000 1001
		BUG();
	}
}

1002 1003
static void pte_list_remove(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			    u64 *sptep)
1004
{
1005
	mmu_spte_clear_track_bits(kvm, sptep);
1006 1007 1008
	__pte_list_remove(sptep, rmap_head);
}

P
Peter Xu 已提交
1009
/* Return true if rmap existed, false otherwise */
1010
static bool pte_list_destroy(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
P
Peter Xu 已提交
1011 1012 1013 1014 1015 1016 1017 1018
{
	struct pte_list_desc *desc, *next;
	int i;

	if (!rmap_head->val)
		return false;

	if (!(rmap_head->val & 1)) {
1019
		mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
P
Peter Xu 已提交
1020 1021 1022 1023 1024 1025 1026
		goto out;
	}

	desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);

	for (; desc; desc = next) {
		for (i = 0; i < desc->spte_count; i++)
1027
			mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
P
Peter Xu 已提交
1028 1029 1030 1031 1032 1033 1034 1035 1036
		next = desc->more;
		mmu_free_pte_list_desc(desc);
	}
out:
	/* rmap_head is meaningless now, remember to reset it */
	rmap_head->val = 0;
	return true;
}

1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
{
	struct pte_list_desc *desc;
	unsigned int count = 0;

	if (!rmap_head->val)
		return 0;
	else if (!(rmap_head->val & 1))
		return 1;

	desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);

	while (desc) {
		count += desc->spte_count;
		desc = desc->more;
	}

	return count;
}

1057 1058
static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
					 const struct kvm_memory_slot *slot)
1059
{
1060
	unsigned long idx;
1061

1062
	idx = gfn_to_index(gfn, slot->base_gfn, level);
1063
	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1064 1065
}

1066 1067
static bool rmap_can_add(struct kvm_vcpu *vcpu)
{
1068
	struct kvm_mmu_memory_cache *mc;
1069

1070
	mc = &vcpu->arch.mmu_pte_list_desc_cache;
1071
	return kvm_mmu_memory_cache_nr_free_objects(mc);
1072 1073
}

1074 1075
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
1076
	struct kvm_memory_slot *slot;
1077
	struct kvm_mmu_page *sp;
1078
	struct kvm_rmap_head *rmap_head;
1079

1080
	sp = sptep_to_sp(spte);
1081
	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1082
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1083
	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1084
	return pte_list_add(vcpu, spte, rmap_head);
1085 1086
}

1087

1088 1089
static void rmap_remove(struct kvm *kvm, u64 *spte)
{
1090 1091
	struct kvm_memslots *slots;
	struct kvm_memory_slot *slot;
1092 1093
	struct kvm_mmu_page *sp;
	gfn_t gfn;
1094
	struct kvm_rmap_head *rmap_head;
1095

1096
	sp = sptep_to_sp(spte);
1097
	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1098 1099 1100 1101 1102 1103 1104 1105 1106

	/*
	 * Unlike rmap_add and rmap_recycle, rmap_remove does not run in the
	 * context of a vCPU so have to determine which memslots to use based
	 * on context information in sp->role.
	 */
	slots = kvm_memslots_for_spte_role(kvm, sp->role);

	slot = __gfn_to_memslot(slots, gfn);
1107
	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1108

1109
	__pte_list_remove(spte, rmap_head);
1110 1111
}

1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
/*
 * Used by the following functions to iterate through the sptes linked by a
 * rmap.  All fields are private and not assumed to be used outside.
 */
struct rmap_iterator {
	/* private fields */
	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
	int pos;			/* index of the sptep */
};

/*
 * Iteration must be started by this function.  This should also be used after
 * removing/dropping sptes from the rmap link because in such cases the
M
Miaohe Lin 已提交
1125
 * information in the iterator may not be valid.
1126 1127 1128
 *
 * Returns sptep if found, NULL otherwise.
 */
1129 1130
static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
			   struct rmap_iterator *iter)
1131
{
1132 1133
	u64 *sptep;

1134
	if (!rmap_head->val)
1135 1136
		return NULL;

1137
	if (!(rmap_head->val & 1)) {
1138
		iter->desc = NULL;
1139 1140
		sptep = (u64 *)rmap_head->val;
		goto out;
1141 1142
	}

1143
	iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1144
	iter->pos = 0;
1145 1146 1147 1148
	sptep = iter->desc->sptes[iter->pos];
out:
	BUG_ON(!is_shadow_present_pte(*sptep));
	return sptep;
1149 1150 1151 1152 1153 1154 1155 1156 1157
}

/*
 * Must be used with a valid iterator: e.g. after rmap_get_first().
 *
 * Returns sptep if found, NULL otherwise.
 */
static u64 *rmap_get_next(struct rmap_iterator *iter)
{
1158 1159
	u64 *sptep;

1160 1161 1162 1163 1164
	if (iter->desc) {
		if (iter->pos < PTE_LIST_EXT - 1) {
			++iter->pos;
			sptep = iter->desc->sptes[iter->pos];
			if (sptep)
1165
				goto out;
1166 1167 1168 1169 1170 1171 1172
		}

		iter->desc = iter->desc->more;

		if (iter->desc) {
			iter->pos = 0;
			/* desc->sptes[0] cannot be NULL */
1173 1174
			sptep = iter->desc->sptes[iter->pos];
			goto out;
1175 1176 1177 1178
		}
	}

	return NULL;
1179 1180 1181
out:
	BUG_ON(!is_shadow_present_pte(*sptep));
	return sptep;
1182 1183
}

1184 1185
#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)			\
	for (_spte_ = rmap_get_first(_rmap_head_, _iter_);		\
1186
	     _spte_; _spte_ = rmap_get_next(_iter_))
1187

1188
static void drop_spte(struct kvm *kvm, u64 *sptep)
1189
{
1190
	u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
1191 1192

	if (is_shadow_present_pte(old_spte))
1193
		rmap_remove(kvm, sptep);
A
Avi Kivity 已提交
1194 1195
}

1196 1197 1198 1199

static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
{
	if (is_large_pte(*sptep)) {
1200
		WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
1201 1202 1203 1204 1205 1206 1207 1208 1209
		drop_spte(kvm, sptep);
		return true;
	}

	return false;
}

static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
1210
	if (__drop_large_spte(vcpu->kvm, sptep)) {
1211
		struct kvm_mmu_page *sp = sptep_to_sp(sptep);
1212 1213 1214 1215

		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
			KVM_PAGES_PER_HPAGE(sp->role.level));
	}
1216 1217 1218
}

/*
1219
 * Write-protect on the specified @sptep, @pt_protect indicates whether
1220
 * spte write-protection is caused by protecting shadow page table.
1221
 *
T
Tiejun Chen 已提交
1222
 * Note: write protection is difference between dirty logging and spte
1223 1224 1225 1226 1227
 * protection:
 * - for dirty logging, the spte can be set to writable at anytime if
 *   its dirty bitmap is properly set.
 * - for spte protection, the spte can be writable only after unsync-ing
 *   shadow page.
1228
 *
1229
 * Return true if tlb need be flushed.
1230
 */
1231
static bool spte_write_protect(u64 *sptep, bool pt_protect)
1232 1233 1234
{
	u64 spte = *sptep;

1235
	if (!is_writable_pte(spte) &&
1236
	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1237 1238
		return false;

1239
	rmap_printk("spte %p %llx\n", sptep, *sptep);
1240

1241
	if (pt_protect)
1242
		spte &= ~shadow_mmu_writable_mask;
1243
	spte = spte & ~PT_WRITABLE_MASK;
1244

1245
	return mmu_spte_update(sptep, spte);
1246 1247
}

1248 1249
static bool __rmap_write_protect(struct kvm *kvm,
				 struct kvm_rmap_head *rmap_head,
1250
				 bool pt_protect)
1251
{
1252 1253
	u64 *sptep;
	struct rmap_iterator iter;
1254
	bool flush = false;
1255

1256
	for_each_rmap_spte(rmap_head, &iter, sptep)
1257
		flush |= spte_write_protect(sptep, pt_protect);
1258

1259
	return flush;
1260 1261
}

1262
static bool spte_clear_dirty(u64 *sptep)
1263 1264 1265
{
	u64 spte = *sptep;

1266
	rmap_printk("spte %p %llx\n", sptep, *sptep);
1267

1268
	MMU_WARN_ON(!spte_ad_enabled(spte));
1269 1270 1271 1272
	spte &= ~shadow_dirty_mask;
	return mmu_spte_update(sptep, spte);
}

1273
static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1274 1275 1276
{
	bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
					       (unsigned long *)sptep);
1277
	if (was_writable && !spte_ad_enabled(*sptep))
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
		kvm_set_pfn_dirty(spte_to_pfn(*sptep));

	return was_writable;
}

/*
 * Gets the GFN ready for another round of dirty logging by clearing the
 *	- D bit on ad-enabled SPTEs, and
 *	- W bit on ad-disabled SPTEs.
 * Returns true iff any D or W bits were cleared.
 */
1289
static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1290
			       const struct kvm_memory_slot *slot)
1291 1292 1293 1294 1295
{
	u64 *sptep;
	struct rmap_iterator iter;
	bool flush = false;

1296
	for_each_rmap_spte(rmap_head, &iter, sptep)
1297 1298
		if (spte_ad_need_write_protect(*sptep))
			flush |= spte_wrprot_for_clear_dirty(sptep);
1299
		else
1300
			flush |= spte_clear_dirty(sptep);
1301 1302 1303 1304

	return flush;
}

1305
/**
1306
 * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1307 1308 1309 1310 1311
 * @kvm: kvm instance
 * @slot: slot to protect
 * @gfn_offset: start of the BITS_PER_LONG pages we care about
 * @mask: indicates which pages we should protect
 *
1312
 * Used when we do not need to care about huge page mappings.
1313
 */
1314
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1315 1316
				     struct kvm_memory_slot *slot,
				     gfn_t gfn_offset, unsigned long mask)
1317
{
1318
	struct kvm_rmap_head *rmap_head;
1319

1320
	if (is_tdp_mmu_enabled(kvm))
1321 1322
		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
				slot->base_gfn + gfn_offset, mask, true);
1323 1324 1325 1326

	if (!kvm_memslots_have_rmaps(kvm))
		return;

1327
	while (mask) {
1328 1329
		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
					PG_LEVEL_4K, slot);
1330
		__rmap_write_protect(kvm, rmap_head, false);
M
Marcelo Tosatti 已提交
1331

1332 1333 1334
		/* clear the first set bit */
		mask &= mask - 1;
	}
1335 1336
}

1337
/**
1338 1339
 * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
 * protect the page if the D-bit isn't supported.
1340 1341 1342 1343 1344 1345 1346
 * @kvm: kvm instance
 * @slot: slot to clear D-bit
 * @gfn_offset: start of the BITS_PER_LONG pages we care about
 * @mask: indicates which pages we should clear D-bit
 *
 * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
 */
1347 1348 1349
static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
					 struct kvm_memory_slot *slot,
					 gfn_t gfn_offset, unsigned long mask)
1350
{
1351
	struct kvm_rmap_head *rmap_head;
1352

1353
	if (is_tdp_mmu_enabled(kvm))
1354 1355
		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
				slot->base_gfn + gfn_offset, mask, false);
1356 1357 1358 1359

	if (!kvm_memslots_have_rmaps(kvm))
		return;

1360
	while (mask) {
1361 1362
		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
					PG_LEVEL_4K, slot);
1363
		__rmap_clear_dirty(kvm, rmap_head, slot);
1364 1365 1366 1367 1368 1369

		/* clear the first set bit */
		mask &= mask - 1;
	}
}

1370 1371 1372 1373 1374 1375 1376
/**
 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
 * PT level pages.
 *
 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
 * enable dirty logging for them.
 *
1377 1378
 * We need to care about huge page mappings: e.g. during dirty logging we may
 * have such mappings.
1379 1380 1381 1382 1383
 */
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
				struct kvm_memory_slot *slot,
				gfn_t gfn_offset, unsigned long mask)
{
1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
	/*
	 * Huge pages are NOT write protected when we start dirty logging in
	 * initially-all-set mode; must write protect them here so that they
	 * are split to 4K on the first write.
	 *
	 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
	 * of memslot has no such restriction, so the range can cross two large
	 * pages.
	 */
	if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
		gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
		gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);

		kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);

		/* Cross two large pages? */
		if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
		    ALIGN(end << PAGE_SHIFT, PMD_SIZE))
			kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
						       PG_LEVEL_2M);
	}

	/* Now handle 4K PTEs.  */
1407 1408
	if (kvm_x86_ops.cpu_dirty_log_size)
		kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1409 1410
	else
		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1411 1412
}

1413 1414
int kvm_cpu_dirty_log_size(void)
{
1415
	return kvm_x86_ops.cpu_dirty_log_size;
1416 1417
}

1418
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1419 1420
				    struct kvm_memory_slot *slot, u64 gfn,
				    int min_level)
1421
{
1422
	struct kvm_rmap_head *rmap_head;
1423
	int i;
1424
	bool write_protected = false;
1425

1426 1427
	if (kvm_memslots_have_rmaps(kvm)) {
		for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1428
			rmap_head = gfn_to_rmap(gfn, i, slot);
1429 1430
			write_protected |= __rmap_write_protect(kvm, rmap_head, true);
		}
1431 1432
	}

1433
	if (is_tdp_mmu_enabled(kvm))
1434
		write_protected |=
1435
			kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
1436

1437
	return write_protected;
1438 1439
}

1440 1441 1442 1443 1444
static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
{
	struct kvm_memory_slot *slot;

	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1445
	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1446 1447
}

1448
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1449
			  const struct kvm_memory_slot *slot)
1450
{
1451
	return pte_list_destroy(kvm, rmap_head);
1452 1453
}

1454 1455 1456
static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			    struct kvm_memory_slot *slot, gfn_t gfn, int level,
			    pte_t unused)
1457
{
1458
	return kvm_zap_rmapp(kvm, rmap_head, slot);
1459 1460
}

1461 1462 1463
static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			      struct kvm_memory_slot *slot, gfn_t gfn, int level,
			      pte_t pte)
1464
{
1465 1466
	u64 *sptep;
	struct rmap_iterator iter;
1467
	int need_flush = 0;
1468
	u64 new_spte;
D
Dan Williams 已提交
1469
	kvm_pfn_t new_pfn;
1470

1471 1472
	WARN_ON(pte_huge(pte));
	new_pfn = pte_pfn(pte);
1473

1474
restart:
1475
	for_each_rmap_spte(rmap_head, &iter, sptep) {
1476
		rmap_printk("spte %p %llx gfn %llx (%d)\n",
1477
			    sptep, *sptep, gfn, level);
1478

1479
		need_flush = 1;
1480

1481
		if (pte_write(pte)) {
1482
			pte_list_remove(kvm, rmap_head, sptep);
1483
			goto restart;
1484
		} else {
1485 1486
			new_spte = kvm_mmu_changed_pte_notifier_make_spte(
					*sptep, new_pfn);
1487

1488
			mmu_spte_clear_track_bits(kvm, sptep);
1489
			mmu_spte_set(sptep, new_spte);
1490 1491
		}
	}
1492

1493 1494 1495 1496 1497
	if (need_flush && kvm_available_flush_tlb_with_range()) {
		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
		return 0;
	}

1498
	return need_flush;
1499 1500
}

1501 1502
struct slot_rmap_walk_iterator {
	/* input fields. */
1503
	const struct kvm_memory_slot *slot;
1504 1505 1506 1507 1508 1509 1510
	gfn_t start_gfn;
	gfn_t end_gfn;
	int start_level;
	int end_level;

	/* output fields. */
	gfn_t gfn;
1511
	struct kvm_rmap_head *rmap;
1512 1513 1514
	int level;

	/* private field. */
1515
	struct kvm_rmap_head *end_rmap;
1516 1517 1518 1519 1520 1521 1522
};

static void
rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
{
	iterator->level = level;
	iterator->gfn = iterator->start_gfn;
1523 1524
	iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
	iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
1525 1526 1527 1528
}

static void
slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1529
		    const struct kvm_memory_slot *slot, int start_level,
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567
		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
{
	iterator->slot = slot;
	iterator->start_level = start_level;
	iterator->end_level = end_level;
	iterator->start_gfn = start_gfn;
	iterator->end_gfn = end_gfn;

	rmap_walk_init_level(iterator, iterator->start_level);
}

static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
{
	return !!iterator->rmap;
}

static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
{
	if (++iterator->rmap <= iterator->end_rmap) {
		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
		return;
	}

	if (++iterator->level > iterator->end_level) {
		iterator->rmap = NULL;
		return;
	}

	rmap_walk_init_level(iterator, iterator->level);
}

#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
	   _start_gfn, _end_gfn, _iter_)				\
	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
				 _end_level_, _start_gfn, _end_gfn);	\
	     slot_rmap_walk_okay(_iter_);				\
	     slot_rmap_walk_next(_iter_))

1568 1569 1570
typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			       struct kvm_memory_slot *slot, gfn_t gfn,
			       int level, pte_t pte);
1571

1572 1573 1574
static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
						 struct kvm_gfn_range *range,
						 rmap_handler_t handler)
1575
{
1576
	struct slot_rmap_walk_iterator iterator;
1577
	bool ret = false;
1578

1579 1580 1581 1582
	for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
				 range->start, range->end - 1, &iterator)
		ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
			       iterator.level, range->pte);
1583

1584
	return ret;
1585 1586
}

1587
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1588
{
1589
	bool flush = false;
1590

1591 1592
	if (kvm_memslots_have_rmaps(kvm))
		flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
1593

1594
	if (is_tdp_mmu_enabled(kvm))
1595
		flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1596

1597
	return flush;
1598 1599
}

1600
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1601
{
1602
	bool flush = false;
1603

1604 1605
	if (kvm_memslots_have_rmaps(kvm))
		flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
1606

1607
	if (is_tdp_mmu_enabled(kvm))
1608
		flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
1609

1610
	return flush;
1611 1612
}

1613 1614 1615
static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			  struct kvm_memory_slot *slot, gfn_t gfn, int level,
			  pte_t unused)
1616
{
1617
	u64 *sptep;
1618
	struct rmap_iterator iter;
1619 1620
	int young = 0;

1621 1622
	for_each_rmap_spte(rmap_head, &iter, sptep)
		young |= mmu_spte_age(sptep);
1623

1624 1625 1626
	return young;
}

1627 1628 1629
static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			       struct kvm_memory_slot *slot, gfn_t gfn,
			       int level, pte_t unused)
A
Andrea Arcangeli 已提交
1630
{
1631 1632
	u64 *sptep;
	struct rmap_iterator iter;
A
Andrea Arcangeli 已提交
1633

1634 1635 1636 1637
	for_each_rmap_spte(rmap_head, &iter, sptep)
		if (is_accessed_spte(*sptep))
			return 1;
	return 0;
A
Andrea Arcangeli 已提交
1638 1639
}

1640 1641
#define RMAP_RECYCLE_THRESHOLD 1000

1642
static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1643
{
1644
	struct kvm_memory_slot *slot;
1645
	struct kvm_rmap_head *rmap_head;
1646 1647
	struct kvm_mmu_page *sp;

1648
	sp = sptep_to_sp(spte);
1649
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1650
	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1651

1652
	kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
1653 1654
	kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
			KVM_PAGES_PER_HPAGE(sp->role.level));
1655 1656
}

1657
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1658
{
1659
	bool young = false;
1660

1661 1662
	if (kvm_memslots_have_rmaps(kvm))
		young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
1663

1664
	if (is_tdp_mmu_enabled(kvm))
1665
		young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
1666 1667

	return young;
1668 1669
}

1670
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
A
Andrea Arcangeli 已提交
1671
{
1672
	bool young = false;
1673

1674 1675
	if (kvm_memslots_have_rmaps(kvm))
		young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
1676

1677
	if (is_tdp_mmu_enabled(kvm))
1678
		young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
1679 1680

	return young;
A
Andrea Arcangeli 已提交
1681 1682
}

1683
#ifdef MMU_DEBUG
1684
static int is_empty_shadow_page(u64 *spt)
A
Avi Kivity 已提交
1685
{
1686 1687 1688
	u64 *pos;
	u64 *end;

1689
	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1690
		if (is_shadow_present_pte(*pos)) {
1691
			printk(KERN_ERR "%s: %p %llx\n", __func__,
1692
			       pos, *pos);
A
Avi Kivity 已提交
1693
			return 0;
1694
		}
A
Avi Kivity 已提交
1695 1696
	return 1;
}
1697
#endif
A
Avi Kivity 已提交
1698

1699 1700 1701 1702 1703 1704
/*
 * This value is the sum of all of the kvm instances's
 * kvm->arch.n_used_mmu_pages values.  We need a global,
 * aggregate version in order to make the slab shrinker
 * faster
 */
1705
static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
1706 1707 1708 1709 1710
{
	kvm->arch.n_used_mmu_pages += nr;
	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}

1711
static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1712
{
1713
	MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1714
	hlist_del(&sp->hash_link);
1715 1716
	list_del(&sp->link);
	free_page((unsigned long)sp->spt);
1717 1718
	if (!sp->role.direct)
		free_page((unsigned long)sp->gfns);
1719
	kmem_cache_free(mmu_page_header_cache, sp);
1720 1721
}

1722 1723
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
1724
	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1725 1726
}

1727
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1728
				    struct kvm_mmu_page *sp, u64 *parent_pte)
1729 1730 1731 1732
{
	if (!parent_pte)
		return;

1733
	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1734 1735
}

1736
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1737 1738
				       u64 *parent_pte)
{
1739
	__pte_list_remove(parent_pte, &sp->parent_ptes);
1740 1741
}

1742 1743 1744 1745
static void drop_parent_pte(struct kvm_mmu_page *sp,
			    u64 *parent_pte)
{
	mmu_page_remove_parent_pte(sp, parent_pte);
1746
	mmu_spte_clear_no_track(parent_pte);
1747 1748
}

1749
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
M
Marcelo Tosatti 已提交
1750
{
1751
	struct kvm_mmu_page *sp;
1752

1753 1754
	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
1755
	if (!direct)
1756
		sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
1757
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1758 1759 1760 1761 1762 1763

	/*
	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
	 * depends on valid pages being added to the head of the list.  See
	 * comments in kvm_zap_obsolete_pages().
	 */
1764
	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
1765 1766 1767
	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
	return sp;
M
Marcelo Tosatti 已提交
1768 1769
}

1770
static void mark_unsync(u64 *spte);
1771
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1772
{
1773 1774 1775 1776 1777 1778
	u64 *sptep;
	struct rmap_iterator iter;

	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
		mark_unsync(sptep);
	}
1779 1780
}

1781
static void mark_unsync(u64 *spte)
1782
{
1783
	struct kvm_mmu_page *sp;
1784
	unsigned int index;
1785

1786
	sp = sptep_to_sp(spte);
1787 1788
	index = spte - sp->spt;
	if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1789
		return;
1790
	if (sp->unsync_children++)
1791
		return;
1792
	kvm_mmu_mark_parents_unsync(sp);
1793 1794
}

1795
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1796
			       struct kvm_mmu_page *sp)
1797
{
1798
	return 0;
1799 1800
}

1801 1802 1803 1804 1805 1806 1807 1808 1809 1810
#define KVM_PAGE_ARRAY_NR 16

struct kvm_mmu_pages {
	struct mmu_page_and_offset {
		struct kvm_mmu_page *sp;
		unsigned int idx;
	} page[KVM_PAGE_ARRAY_NR];
	unsigned int nr;
};

1811 1812
static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
			 int idx)
1813
{
1814
	int i;
1815

1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826
	if (sp->unsync)
		for (i=0; i < pvec->nr; i++)
			if (pvec->page[i].sp == sp)
				return 0;

	pvec->page[pvec->nr].sp = sp;
	pvec->page[pvec->nr].idx = idx;
	pvec->nr++;
	return (pvec->nr == KVM_PAGE_ARRAY_NR);
}

1827 1828 1829 1830 1831 1832 1833
static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
{
	--sp->unsync_children;
	WARN_ON((int)sp->unsync_children < 0);
	__clear_bit(idx, sp->unsync_child_bitmap);
}

1834 1835 1836 1837
static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
	int i, ret, nr_unsync_leaf = 0;
1838

1839
	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1840
		struct kvm_mmu_page *child;
1841 1842
		u64 ent = sp->spt[i];

1843 1844 1845 1846
		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
			clear_unsync_child_bit(sp, i);
			continue;
		}
1847

1848
		child = to_shadow_page(ent & PT64_BASE_ADDR_MASK);
1849 1850 1851 1852 1853 1854

		if (child->unsync_children) {
			if (mmu_pages_add(pvec, child, i))
				return -ENOSPC;

			ret = __mmu_unsync_walk(child, pvec);
1855 1856 1857 1858
			if (!ret) {
				clear_unsync_child_bit(sp, i);
				continue;
			} else if (ret > 0) {
1859
				nr_unsync_leaf += ret;
1860
			} else
1861 1862 1863 1864 1865 1866
				return ret;
		} else if (child->unsync) {
			nr_unsync_leaf++;
			if (mmu_pages_add(pvec, child, i))
				return -ENOSPC;
		} else
1867
			clear_unsync_child_bit(sp, i);
1868 1869
	}

1870 1871 1872
	return nr_unsync_leaf;
}

1873 1874
#define INVALID_INDEX (-1)

1875 1876 1877
static int mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
P
Paolo Bonzini 已提交
1878
	pvec->nr = 0;
1879 1880 1881
	if (!sp->unsync_children)
		return 0;

1882
	mmu_pages_add(pvec, sp, INVALID_INDEX);
1883
	return __mmu_unsync_walk(sp, pvec);
1884 1885 1886 1887 1888
}

static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	WARN_ON(!sp->unsync);
1889
	trace_kvm_mmu_sync_page(sp);
1890 1891 1892 1893
	sp->unsync = 0;
	--kvm->stat.mmu_unsync;
}

1894 1895
static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				     struct list_head *invalid_list);
1896 1897
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list);
1898

1899 1900
#define for_each_valid_sp(_kvm, _sp, _list)				\
	hlist_for_each_entry(_sp, _list, hash_link)			\
1901
		if (is_obsolete_sp((_kvm), (_sp))) {			\
1902
		} else
1903 1904

#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
1905 1906
	for_each_valid_sp(_kvm, _sp,					\
	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
1907
		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
1908

1909 1910
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
			 struct list_head *invalid_list)
1911
{
1912
	if (vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
1913
		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1914
		return false;
1915 1916
	}

1917
	return true;
1918 1919
}

1920 1921 1922 1923
static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
					struct list_head *invalid_list,
					bool remote_flush)
{
1924
	if (!remote_flush && list_empty(invalid_list))
1925 1926 1927 1928 1929 1930 1931 1932 1933
		return false;

	if (!list_empty(invalid_list))
		kvm_mmu_commit_zap_page(kvm, invalid_list);
	else
		kvm_flush_remote_tlbs(kvm);
	return true;
}

1934 1935 1936 1937 1938 1939 1940
#ifdef CONFIG_KVM_MMU_AUDIT
#include "mmu_audit.c"
#else
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
static void mmu_audit_disable(void) { }
#endif

1941 1942
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
1943 1944
	return sp->role.invalid ||
	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1945 1946
}

1947
struct mmu_page_path {
1948 1949
	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
	unsigned int idx[PT64_ROOT_MAX_LEVEL];
1950 1951
};

1952
#define for_each_sp(pvec, sp, parents, i)			\
P
Paolo Bonzini 已提交
1953
		for (i = mmu_pages_first(&pvec, &parents);	\
1954 1955 1956
			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
			i = mmu_pages_next(&pvec, &parents, i))

1957 1958 1959
static int mmu_pages_next(struct kvm_mmu_pages *pvec,
			  struct mmu_page_path *parents,
			  int i)
1960 1961 1962 1963 1964
{
	int n;

	for (n = i+1; n < pvec->nr; n++) {
		struct kvm_mmu_page *sp = pvec->page[n].sp;
P
Paolo Bonzini 已提交
1965 1966
		unsigned idx = pvec->page[n].idx;
		int level = sp->role.level;
1967

P
Paolo Bonzini 已提交
1968
		parents->idx[level-1] = idx;
1969
		if (level == PG_LEVEL_4K)
P
Paolo Bonzini 已提交
1970
			break;
1971

P
Paolo Bonzini 已提交
1972
		parents->parent[level-2] = sp;
1973 1974 1975 1976 1977
	}

	return n;
}

P
Paolo Bonzini 已提交
1978 1979 1980 1981 1982 1983 1984 1985 1986
static int mmu_pages_first(struct kvm_mmu_pages *pvec,
			   struct mmu_page_path *parents)
{
	struct kvm_mmu_page *sp;
	int level;

	if (pvec->nr == 0)
		return 0;

1987 1988
	WARN_ON(pvec->page[0].idx != INVALID_INDEX);

P
Paolo Bonzini 已提交
1989 1990
	sp = pvec->page[0].sp;
	level = sp->role.level;
1991
	WARN_ON(level == PG_LEVEL_4K);
P
Paolo Bonzini 已提交
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001

	parents->parent[level-2] = sp;

	/* Also set up a sentinel.  Further entries in pvec are all
	 * children of sp, so this element is never overwritten.
	 */
	parents->parent[level-1] = NULL;
	return mmu_pages_next(pvec, parents, 0);
}

2002
static void mmu_pages_clear_parents(struct mmu_page_path *parents)
2003
{
2004 2005 2006 2007 2008 2009 2010 2011 2012
	struct kvm_mmu_page *sp;
	unsigned int level = 0;

	do {
		unsigned int idx = parents->idx[level];
		sp = parents->parent[level];
		if (!sp)
			return;

2013
		WARN_ON(idx == INVALID_INDEX);
2014
		clear_unsync_child_bit(sp, idx);
2015
		level++;
P
Paolo Bonzini 已提交
2016
	} while (!sp->unsync_children);
2017
}
2018

2019 2020
static int mmu_sync_children(struct kvm_vcpu *vcpu,
			     struct kvm_mmu_page *parent, bool can_yield)
2021 2022 2023 2024 2025
{
	int i;
	struct kvm_mmu_page *sp;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
2026
	LIST_HEAD(invalid_list);
2027 2028

	while (mmu_unsync_walk(parent, &pages)) {
2029
		bool protected = false;
2030 2031

		for_each_sp(pages, sp, parents, i)
2032
			protected |= rmap_write_protect(vcpu, sp->gfn);
2033

2034
		if (protected) {
2035
			kvm_flush_remote_tlbs(vcpu->kvm);
2036
		}
2037

2038
		for_each_sp(pages, sp, parents, i) {
2039
			kvm_unlink_unsync_page(vcpu->kvm, sp);
2040
			kvm_sync_page(vcpu, sp, &invalid_list);
2041 2042
			mmu_pages_clear_parents(&parents);
		}
2043
		if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2044
			kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, false);
2045 2046 2047 2048 2049
			if (!can_yield) {
				kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
				return -EINTR;
			}

2050
			cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2051
		}
2052
	}
2053

2054
	kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, false);
2055
	return 0;
2056 2057
}

2058 2059
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
{
2060
	atomic_set(&sp->write_flooding_count,  0);
2061 2062 2063 2064
}

static void clear_sp_write_flooding_count(u64 *spte)
{
2065
	__clear_sp_write_flooding_count(sptep_to_sp(spte));
2066 2067
}

2068 2069 2070 2071
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
					     gfn_t gfn,
					     gva_t gaddr,
					     unsigned level,
2072
					     int direct,
2073
					     unsigned int access)
2074
{
2075
	bool direct_mmu = vcpu->arch.mmu->direct_map;
2076
	union kvm_mmu_page_role role;
2077
	struct hlist_head *sp_list;
2078
	unsigned quadrant;
2079
	struct kvm_mmu_page *sp;
2080
	int collisions = 0;
2081
	LIST_HEAD(invalid_list);
2082

2083
	role = vcpu->arch.mmu->mmu_role.base;
2084
	role.level = level;
2085
	role.direct = direct;
2086
	if (role.direct)
2087
		role.gpte_is_8_bytes = true;
2088
	role.access = access;
2089
	if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
2090 2091 2092 2093
		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
		role.quadrant = quadrant;
	}
2094 2095 2096

	sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
	for_each_valid_sp(vcpu->kvm, sp, sp_list) {
2097 2098 2099 2100 2101
		if (sp->gfn != gfn) {
			collisions++;
			continue;
		}

2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114
		if (sp->role.word != role.word) {
			/*
			 * If the guest is creating an upper-level page, zap
			 * unsync pages for the same gfn.  While it's possible
			 * the guest is using recursive page tables, in all
			 * likelihood the guest has stopped using the unsync
			 * page and is installing a completely unrelated page.
			 * Unsync pages must not be left as is, because the new
			 * upper-level page will be write-protected.
			 */
			if (level > PG_LEVEL_4K && sp->unsync)
				kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
							 &invalid_list);
2115
			continue;
2116
		}
2117

2118 2119 2120
		if (direct_mmu)
			goto trace_get_page;

2121
		if (sp->unsync) {
2122
			/*
2123
			 * The page is good, but is stale.  kvm_sync_page does
2124 2125 2126 2127 2128 2129 2130 2131 2132
			 * get the latest guest state, but (unlike mmu_unsync_children)
			 * it doesn't write-protect the page or mark it synchronized!
			 * This way the validity of the mapping is ensured, but the
			 * overhead of write protection is not incurred until the
			 * guest invalidates the TLB mapping.  This allows multiple
			 * SPs for a single gfn to be unsync.
			 *
			 * If the sync fails, the page is zapped.  If so, break
			 * in order to rebuild it.
2133
			 */
2134
			if (!kvm_sync_page(vcpu, sp, &invalid_list))
2135 2136 2137 2138
				break;

			WARN_ON(!list_empty(&invalid_list));
		}
2139

2140
		__clear_sp_write_flooding_count(sp);
2141 2142

trace_get_page:
2143
		trace_kvm_mmu_get_page(sp, false);
2144
		goto out;
2145
	}
2146

A
Avi Kivity 已提交
2147
	++vcpu->kvm->stat.mmu_cache_miss;
2148 2149 2150

	sp = kvm_mmu_alloc_page(vcpu, direct);

2151 2152
	sp->gfn = gfn;
	sp->role = role;
2153
	hlist_add_head(&sp->hash_link, sp_list);
2154
	if (!direct) {
2155
		account_shadowed(vcpu->kvm, sp);
2156
		if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
2157
			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2158
	}
A
Avi Kivity 已提交
2159
	trace_kvm_mmu_get_page(sp, true);
2160
out:
2161 2162
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);

2163 2164
	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2165
	return sp;
2166 2167
}

2168 2169 2170
static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
					struct kvm_vcpu *vcpu, hpa_t root,
					u64 addr)
2171 2172
{
	iterator->addr = addr;
2173
	iterator->shadow_addr = root;
2174
	iterator->level = vcpu->arch.mmu->shadow_root_level;
2175

2176
	if (iterator->level == PT64_ROOT_4LEVEL &&
2177 2178
	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
	    !vcpu->arch.mmu->direct_map)
2179 2180
		--iterator->level;

2181
	if (iterator->level == PT32E_ROOT_LEVEL) {
2182 2183 2184 2185
		/*
		 * prev_root is currently only used for 64-bit hosts. So only
		 * the active root_hpa is valid here.
		 */
2186
		BUG_ON(root != vcpu->arch.mmu->root_hpa);
2187

2188
		iterator->shadow_addr
2189
			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2190 2191 2192 2193 2194 2195 2196
		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
		--iterator->level;
		if (!iterator->shadow_addr)
			iterator->level = 0;
	}
}

2197 2198 2199
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
			     struct kvm_vcpu *vcpu, u64 addr)
{
2200
	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2201 2202 2203
				    addr);
}

2204 2205
static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{
2206
	if (iterator->level < PG_LEVEL_4K)
2207
		return false;
2208

2209 2210 2211 2212 2213
	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
	return true;
}

2214 2215
static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
			       u64 spte)
2216
{
2217
	if (is_last_spte(spte, iterator->level)) {
2218 2219 2220 2221
		iterator->level = 0;
		return;
	}

2222
	iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2223 2224 2225
	--iterator->level;
}

2226 2227
static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
{
2228
	__shadow_walk_next(iterator, *iterator->sptep);
2229 2230
}

2231 2232 2233 2234 2235 2236 2237 2238 2239
static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
			     struct kvm_mmu_page *sp)
{
	u64 spte;

	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);

	spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));

2240
	mmu_spte_set(sptep, spte);
2241 2242 2243 2244 2245

	mmu_page_add_parent_pte(vcpu, sp, sptep);

	if (sp->unsync_children || sp->unsync)
		mark_unsync(sptep);
2246 2247
}

2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260
static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
				   unsigned direct_access)
{
	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
		struct kvm_mmu_page *child;

		/*
		 * For the direct sp, if the guest pte's dirty bit
		 * changed form clean to dirty, it will corrupt the
		 * sp's access: allow writable in the read-only sp,
		 * so we should update the spte at this point to get
		 * a new sp with the correct access.
		 */
2261
		child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK);
2262 2263 2264
		if (child->role.access == direct_access)
			return;

2265
		drop_parent_pte(child, sptep);
2266
		kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2267 2268 2269
	}
}

2270 2271 2272
/* Returns the number of zapped non-leaf child shadow pages. */
static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
			    u64 *spte, struct list_head *invalid_list)
2273 2274 2275 2276 2277 2278
{
	u64 pte;
	struct kvm_mmu_page *child;

	pte = *spte;
	if (is_shadow_present_pte(pte)) {
X
Xiao Guangrong 已提交
2279
		if (is_last_spte(pte, sp->role.level)) {
2280
			drop_spte(kvm, spte);
X
Xiao Guangrong 已提交
2281
		} else {
2282
			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2283
			drop_parent_pte(child, spte);
2284 2285 2286 2287 2288 2289 2290 2291 2292 2293

			/*
			 * Recursively zap nested TDP SPs, parentless SPs are
			 * unlikely to be used again in the near future.  This
			 * avoids retaining a large number of stale nested SPs.
			 */
			if (tdp_enabled && invalid_list &&
			    child->role.guest_mode && !child->parent_ptes.val)
				return kvm_mmu_prepare_zap_page(kvm, child,
								invalid_list);
2294
		}
2295
	} else if (is_mmio_spte(pte)) {
2296
		mmu_spte_clear_no_track(spte);
2297
	}
2298
	return 0;
2299 2300
}

2301 2302 2303
static int kvm_mmu_page_unlink_children(struct kvm *kvm,
					struct kvm_mmu_page *sp,
					struct list_head *invalid_list)
2304
{
2305
	int zapped = 0;
2306 2307
	unsigned i;

2308
	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2309 2310 2311
		zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);

	return zapped;
2312 2313
}

2314
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2315
{
2316 2317
	u64 *sptep;
	struct rmap_iterator iter;
2318

2319
	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2320
		drop_parent_pte(sp, sptep);
2321 2322
}

2323
static int mmu_zap_unsync_children(struct kvm *kvm,
2324 2325
				   struct kvm_mmu_page *parent,
				   struct list_head *invalid_list)
2326
{
2327 2328 2329
	int i, zapped = 0;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
2330

2331
	if (parent->role.level == PG_LEVEL_4K)
2332
		return 0;
2333 2334 2335 2336 2337

	while (mmu_unsync_walk(parent, &pages)) {
		struct kvm_mmu_page *sp;

		for_each_sp(pages, sp, parents, i) {
2338
			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2339
			mmu_pages_clear_parents(&parents);
2340
			zapped++;
2341 2342 2343 2344
		}
	}

	return zapped;
2345 2346
}

2347 2348 2349 2350
static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
				       struct kvm_mmu_page *sp,
				       struct list_head *invalid_list,
				       int *nr_zapped)
2351
{
2352
	bool list_unstable;
A
Avi Kivity 已提交
2353

2354
	trace_kvm_mmu_prepare_zap_page(sp);
2355
	++kvm->stat.mmu_shadow_zapped;
2356
	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2357
	*nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2358
	kvm_mmu_unlink_parents(kvm, sp);
2359

2360 2361 2362
	/* Zapping children means active_mmu_pages has become unstable. */
	list_unstable = *nr_zapped;

2363
	if (!sp->role.invalid && !sp->role.direct)
2364
		unaccount_shadowed(kvm, sp);
2365

2366 2367
	if (sp->unsync)
		kvm_unlink_unsync_page(kvm, sp);
2368
	if (!sp->root_count) {
2369
		/* Count self */
2370
		(*nr_zapped)++;
2371 2372 2373 2374 2375 2376 2377 2378 2379 2380

		/*
		 * Already invalid pages (previously active roots) are not on
		 * the active page list.  See list_del() in the "else" case of
		 * !sp->root_count.
		 */
		if (sp->role.invalid)
			list_add(&sp->link, invalid_list);
		else
			list_move(&sp->link, invalid_list);
2381
		kvm_mod_used_mmu_pages(kvm, -1);
2382
	} else {
2383 2384 2385 2386 2387
		/*
		 * Remove the active root from the active page list, the root
		 * will be explicitly freed when the root_count hits zero.
		 */
		list_del(&sp->link);
2388

2389 2390 2391 2392 2393 2394
		/*
		 * Obsolete pages cannot be used on any vCPUs, see the comment
		 * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
		 * treats invalid shadow pages as being obsolete.
		 */
		if (!is_obsolete_sp(kvm, sp))
2395
			kvm_reload_remote_mmus(kvm);
2396
	}
2397

P
Paolo Bonzini 已提交
2398 2399 2400
	if (sp->lpage_disallowed)
		unaccount_huge_nx_page(kvm, sp);

2401
	sp->role.invalid = 1;
2402 2403 2404 2405 2406 2407 2408 2409 2410 2411
	return list_unstable;
}

static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				     struct list_head *invalid_list)
{
	int nr_zapped;

	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
	return nr_zapped;
2412 2413
}

2414 2415 2416
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list)
{
2417
	struct kvm_mmu_page *sp, *nsp;
2418 2419 2420 2421

	if (list_empty(invalid_list))
		return;

2422
	/*
2423 2424 2425 2426 2427 2428 2429
	 * We need to make sure everyone sees our modifications to
	 * the page tables and see changes to vcpu->mode here. The barrier
	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
	 *
	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
	 * guest mode and/or lockless shadow page table walks.
2430 2431
	 */
	kvm_flush_remote_tlbs(kvm);
2432

2433
	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2434
		WARN_ON(!sp->role.invalid || sp->root_count);
2435
		kvm_mmu_free_page(sp);
2436
	}
2437 2438
}

2439 2440
static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
						  unsigned long nr_to_zap)
2441
{
2442 2443
	unsigned long total_zapped = 0;
	struct kvm_mmu_page *sp, *tmp;
2444
	LIST_HEAD(invalid_list);
2445 2446
	bool unstable;
	int nr_zapped;
2447 2448

	if (list_empty(&kvm->arch.active_mmu_pages))
2449 2450
		return 0;

2451
restart:
2452
	list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463
		/*
		 * Don't zap active root pages, the page itself can't be freed
		 * and zapping it will just force vCPUs to realloc and reload.
		 */
		if (sp->root_count)
			continue;

		unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
						      &nr_zapped);
		total_zapped += nr_zapped;
		if (total_zapped >= nr_to_zap)
2464 2465
			break;

2466 2467
		if (unstable)
			goto restart;
2468
	}
2469

2470 2471 2472 2473 2474 2475
	kvm_mmu_commit_zap_page(kvm, &invalid_list);

	kvm->stat.mmu_recycled += total_zapped;
	return total_zapped;
}

2476 2477 2478 2479 2480 2481 2482
static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
{
	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
		return kvm->arch.n_max_mmu_pages -
			kvm->arch.n_used_mmu_pages;

	return 0;
2483 2484
}

2485 2486
static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
{
2487
	unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2488

2489
	if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2490 2491
		return 0;

2492
	kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2493

2494 2495 2496 2497 2498
	/*
	 * Note, this check is intentionally soft, it only guarantees that one
	 * page is available, while the caller may end up allocating as many as
	 * four pages, e.g. for PAE roots or for 5-level paging.  Temporarily
	 * exceeding the (arbitrary by default) limit will not harm the host,
I
Ingo Molnar 已提交
2499
	 * being too aggressive may unnecessarily kill the guest, and getting an
2500 2501 2502
	 * exact count is far more trouble than it's worth, especially in the
	 * page fault paths.
	 */
2503 2504 2505 2506 2507
	if (!kvm_mmu_available_pages(vcpu->kvm))
		return -ENOSPC;
	return 0;
}

2508 2509
/*
 * Changing the number of mmu pages allocated to the vm
2510
 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2511
 */
2512
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2513
{
2514
	write_lock(&kvm->mmu_lock);
2515

2516
	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2517 2518
		kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
						  goal_nr_mmu_pages);
2519

2520
		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2521 2522
	}

2523
	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2524

2525
	write_unlock(&kvm->mmu_lock);
2526 2527
}

2528
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2529
{
2530
	struct kvm_mmu_page *sp;
2531
	LIST_HEAD(invalid_list);
2532 2533
	int r;

2534
	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2535
	r = 0;
2536
	write_lock(&kvm->mmu_lock);
2537
	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2538
		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2539 2540
			 sp->role.word);
		r = 1;
2541
		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2542
	}
2543
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2544
	write_unlock(&kvm->mmu_lock);
2545

2546
	return r;
2547
}
2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562

static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
	gpa_t gpa;
	int r;

	if (vcpu->arch.mmu->direct_map)
		return 0;

	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);

	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);

	return r;
}
2563

2564
static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2565 2566 2567 2568 2569 2570 2571 2572
{
	trace_kvm_mmu_unsync_page(sp);
	++vcpu->kvm->stat.mmu_unsync;
	sp->unsync = 1;

	kvm_mmu_mark_parents_unsync(sp);
}

2573 2574 2575 2576 2577 2578 2579
/*
 * Attempt to unsync any shadow pages that can be reached by the specified gfn,
 * KVM is creating a writable mapping for said gfn.  Returns 0 if all pages
 * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
 * be write-protected.
 */
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
2580
{
2581
	struct kvm_mmu_page *sp;
2582
	bool locked = false;
2583

2584 2585 2586 2587 2588
	/*
	 * Force write-protection if the page is being tracked.  Note, the page
	 * track machinery is used to write-protect upper-level shadow pages,
	 * i.e. this guards the role.level == 4K assertion below!
	 */
2589
	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
2590
		return -EPERM;
2591

2592 2593 2594 2595 2596 2597
	/*
	 * The page is not write-tracked, mark existing shadow pages unsync
	 * unless KVM is synchronizing an unsync SP (can_unsync = false).  In
	 * that case, KVM must complete emulation of the guest TLB flush before
	 * allowing shadow pages to become unsync (writable by the guest).
	 */
2598
	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2599
		if (!can_unsync)
2600
			return -EPERM;
2601

2602 2603
		if (sp->unsync)
			continue;
2604

2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627
		/*
		 * TDP MMU page faults require an additional spinlock as they
		 * run with mmu_lock held for read, not write, and the unsync
		 * logic is not thread safe.  Take the spinklock regardless of
		 * the MMU type to avoid extra conditionals/parameters, there's
		 * no meaningful penalty if mmu_lock is held for write.
		 */
		if (!locked) {
			locked = true;
			spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock);

			/*
			 * Recheck after taking the spinlock, a different vCPU
			 * may have since marked the page unsync.  A false
			 * positive on the unprotected check above is not
			 * possible as clearing sp->unsync _must_ hold mmu_lock
			 * for write, i.e. unsync cannot transition from 0->1
			 * while this CPU holds mmu_lock for read (or write).
			 */
			if (READ_ONCE(sp->unsync))
				continue;
		}

2628
		WARN_ON(sp->role.level != PG_LEVEL_4K);
2629
		kvm_unsync_page(vcpu, sp);
2630
	}
2631 2632
	if (locked)
		spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
2633

2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655
	/*
	 * We need to ensure that the marking of unsync pages is visible
	 * before the SPTE is updated to allow writes because
	 * kvm_mmu_sync_roots() checks the unsync flags without holding
	 * the MMU lock and so can race with this. If the SPTE was updated
	 * before the page had been marked as unsync-ed, something like the
	 * following could happen:
	 *
	 * CPU 1                    CPU 2
	 * ---------------------------------------------------------------------
	 * 1.2 Host updates SPTE
	 *     to be writable
	 *                      2.1 Guest writes a GPTE for GVA X.
	 *                          (GPTE being in the guest page table shadowed
	 *                           by the SP from CPU 1.)
	 *                          This reads SPTE during the page table walk.
	 *                          Since SPTE.W is read as 1, there is no
	 *                          fault.
	 *
	 *                      2.2 Guest issues TLB flush.
	 *                          That causes a VM Exit.
	 *
2656 2657
	 *                      2.3 Walking of unsync pages sees sp->unsync is
	 *                          false and skips the page.
2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672
	 *
	 *                      2.4 Guest accesses GVA X.
	 *                          Since the mapping in the SP was not updated,
	 *                          so the old mapping for GVA X incorrectly
	 *                          gets used.
	 * 1.1 Host marks SP
	 *     as unsync
	 *     (sp->unsync = true)
	 *
	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
	 * the situation in 2.4 does not arise. The implicit barrier in 2.2
	 * pairs with this write barrier.
	 */
	smp_wmb();

2673
	return 0;
2674 2675
}

2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
		    unsigned int pte_access, int level,
		    gfn_t gfn, kvm_pfn_t pfn, bool speculative,
		    bool can_unsync, bool host_writable)
{
	u64 spte;
	struct kvm_mmu_page *sp;
	int ret;

	sp = sptep_to_sp(sptep);

	ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
			can_unsync, host_writable, sp_ad_disabled(sp), &spte);

	if (spte & PT_WRITABLE_MASK)
		kvm_vcpu_mark_page_dirty(vcpu, gfn);

2693 2694 2695
	if (*sptep == spte)
		ret |= SET_SPTE_SPURIOUS;
	else if (mmu_spte_update(sptep, spte))
2696
		ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
M
Marcelo Tosatti 已提交
2697 2698 2699
	return ret;
}

2700
static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2701
			unsigned int pte_access, bool write_fault, int level,
2702 2703
			gfn_t gfn, kvm_pfn_t pfn, bool speculative,
			bool host_writable)
M
Marcelo Tosatti 已提交
2704 2705
{
	int was_rmapped = 0;
2706
	int rmap_count;
2707
	int set_spte_ret;
2708
	int ret = RET_PF_FIXED;
2709
	bool flush = false;
M
Marcelo Tosatti 已提交
2710

2711 2712
	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
		 *sptep, write_fault, gfn);
M
Marcelo Tosatti 已提交
2713

2714 2715 2716 2717 2718
	if (unlikely(is_noslot_pfn(pfn))) {
		mark_mmio_spte(vcpu, sptep, gfn, pte_access);
		return RET_PF_EMULATE;
	}

2719
	if (is_shadow_present_pte(*sptep)) {
M
Marcelo Tosatti 已提交
2720 2721 2722 2723
		/*
		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
		 * the parent of the now unreachable PTE.
		 */
2724
		if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
M
Marcelo Tosatti 已提交
2725
			struct kvm_mmu_page *child;
A
Avi Kivity 已提交
2726
			u64 pte = *sptep;
M
Marcelo Tosatti 已提交
2727

2728
			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2729
			drop_parent_pte(child, sptep);
2730
			flush = true;
A
Avi Kivity 已提交
2731
		} else if (pfn != spte_to_pfn(*sptep)) {
2732
			pgprintk("hfn old %llx new %llx\n",
A
Avi Kivity 已提交
2733
				 spte_to_pfn(*sptep), pfn);
2734
			drop_spte(vcpu->kvm, sptep);
2735
			flush = true;
2736 2737
		} else
			was_rmapped = 1;
M
Marcelo Tosatti 已提交
2738
	}
2739

2740 2741 2742
	set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
				speculative, true, host_writable);
	if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
M
Marcelo Tosatti 已提交
2743
		if (write_fault)
2744
			ret = RET_PF_EMULATE;
2745
	}
2746

2747
	if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
2748 2749
		kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
				KVM_PAGES_PER_HPAGE(level));
M
Marcelo Tosatti 已提交
2750

2751 2752 2753 2754 2755 2756 2757 2758 2759
	/*
	 * The fault is fully spurious if and only if the new SPTE and old SPTE
	 * are identical, and emulation is not required.
	 */
	if ((set_spte_ret & SET_SPTE_SPURIOUS) && ret == RET_PF_FIXED) {
		WARN_ON_ONCE(!was_rmapped);
		return RET_PF_SPURIOUS;
	}

A
Avi Kivity 已提交
2760
	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2761
	trace_kvm_mmu_set_spte(level, gfn, sptep);
M
Marcelo Tosatti 已提交
2762

2763
	if (!was_rmapped) {
2764
		kvm_update_page_stats(vcpu->kvm, level, 1);
2765 2766 2767
		rmap_count = rmap_add(vcpu, sptep, gfn);
		if (rmap_count > RMAP_RECYCLE_THRESHOLD)
			rmap_recycle(vcpu, sptep, gfn);
2768
	}
2769

2770
	return ret;
2771 2772
}

D
Dan Williams 已提交
2773
static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2774 2775 2776 2777
				     bool no_dirty_log)
{
	struct kvm_memory_slot *slot;

2778
	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2779
	if (!slot)
2780
		return KVM_PFN_ERR_FAULT;
2781

2782
	return gfn_to_pfn_memslot_atomic(slot, gfn);
2783 2784 2785 2786 2787 2788 2789
}

static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp,
				    u64 *start, u64 *end)
{
	struct page *pages[PTE_PREFETCH_NUM];
2790
	struct kvm_memory_slot *slot;
2791
	unsigned int access = sp->role.access;
2792 2793 2794 2795
	int i, ret;
	gfn_t gfn;

	gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2796 2797
	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
	if (!slot)
2798 2799
		return -1;

2800
	ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2801 2802 2803
	if (ret <= 0)
		return -1;

2804
	for (i = 0; i < ret; i++, gfn++, start++) {
2805
		mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn,
2806
			     page_to_pfn(pages[i]), true, true);
2807 2808
		put_page(pages[i]);
	}
2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824

	return 0;
}

static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
				  struct kvm_mmu_page *sp, u64 *sptep)
{
	u64 *spte, *start = NULL;
	int i;

	WARN_ON(!sp->role.direct);

	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
	spte = sp->spt + i;

	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2825
		if (is_shadow_present_pte(*spte) || spte == sptep) {
2826 2827 2828
			if (!start)
				continue;
			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2829
				return;
2830 2831 2832 2833
			start = NULL;
		} else if (!start)
			start = spte;
	}
2834 2835
	if (start)
		direct_pte_prefetch_many(vcpu, sp, start, spte);
2836 2837 2838 2839 2840 2841
}

static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
{
	struct kvm_mmu_page *sp;

2842
	sp = sptep_to_sp(sptep);
2843

2844
	/*
2845 2846 2847
	 * Without accessed bits, there's no way to distinguish between
	 * actually accessed translations and prefetched, so disable pte
	 * prefetch if accessed bits aren't available.
2848
	 */
2849
	if (sp_ad_disabled(sp))
2850 2851
		return;

2852
	if (sp->role.level > PG_LEVEL_4K)
2853 2854
		return;

2855 2856 2857 2858 2859 2860 2861
	/*
	 * If addresses are being invalidated, skip prefetching to avoid
	 * accidentally prefetching those addresses.
	 */
	if (unlikely(vcpu->kvm->mmu_notifier_count))
		return;

2862 2863 2864
	__direct_pte_prefetch(vcpu, sp, sptep);
}

2865
static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
2866
				  const struct kvm_memory_slot *slot)
2867 2868 2869 2870 2871
{
	unsigned long hva;
	pte_t *pte;
	int level;

2872
	if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
2873
		return PG_LEVEL_4K;
2874

2875 2876 2877 2878 2879 2880 2881 2882
	/*
	 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
	 * is not solely for performance, it's also necessary to avoid the
	 * "writable" check in __gfn_to_hva_many(), which will always fail on
	 * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
	 * page fault steps have already verified the guest isn't writing a
	 * read-only memslot.
	 */
2883 2884
	hva = __gfn_to_hva_memslot(slot, gfn);

2885
	pte = lookup_address_in_mm(kvm->mm, hva, &level);
2886
	if (unlikely(!pte))
2887
		return PG_LEVEL_4K;
2888 2889 2890 2891

	return level;
}

2892 2893 2894
int kvm_mmu_max_mapping_level(struct kvm *kvm,
			      const struct kvm_memory_slot *slot, gfn_t gfn,
			      kvm_pfn_t pfn, int max_level)
2895 2896
{
	struct kvm_lpage_info *linfo;
2897
	int host_level;
2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908

	max_level = min(max_level, max_huge_page_level);
	for ( ; max_level > PG_LEVEL_4K; max_level--) {
		linfo = lpage_info_slot(gfn, slot, max_level);
		if (!linfo->disallow_lpage)
			break;
	}

	if (max_level == PG_LEVEL_4K)
		return PG_LEVEL_4K;

2909 2910
	host_level = host_pfn_mapping_level(kvm, gfn, pfn, slot);
	return min(host_level, max_level);
2911 2912
}

B
Ben Gardon 已提交
2913 2914 2915
int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
			    int max_level, kvm_pfn_t *pfnp,
			    bool huge_page_disallowed, int *req_level)
2916
{
2917
	struct kvm_memory_slot *slot;
2918
	kvm_pfn_t pfn = *pfnp;
2919
	kvm_pfn_t mask;
2920
	int level;
2921

2922 2923
	*req_level = PG_LEVEL_4K;

2924 2925
	if (unlikely(max_level == PG_LEVEL_4K))
		return PG_LEVEL_4K;
2926

2927
	if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn))
2928
		return PG_LEVEL_4K;
2929

2930 2931
	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true);
	if (!slot)
2932
		return PG_LEVEL_4K;
2933

2934 2935 2936 2937
	/*
	 * Enforce the iTLB multihit workaround after capturing the requested
	 * level, which will be used to do precise, accurate accounting.
	 */
2938 2939
	*req_level = level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
	if (level == PG_LEVEL_4K || huge_page_disallowed)
2940
		return PG_LEVEL_4K;
2941 2942

	/*
2943 2944
	 * mmu_notifier_retry() was successful and mmu_lock is held, so
	 * the pmd can't be split from under us.
2945
	 */
2946 2947 2948
	mask = KVM_PAGES_PER_HPAGE(level) - 1;
	VM_BUG_ON((gfn & mask) != (pfn & mask));
	*pfnp = pfn & ~mask;
2949 2950

	return level;
2951 2952
}

B
Ben Gardon 已提交
2953 2954
void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
				kvm_pfn_t *pfnp, int *goal_levelp)
P
Paolo Bonzini 已提交
2955
{
B
Ben Gardon 已提交
2956
	int level = *goal_levelp;
P
Paolo Bonzini 已提交
2957

2958
	if (cur_level == level && level > PG_LEVEL_4K &&
P
Paolo Bonzini 已提交
2959 2960 2961 2962 2963 2964 2965 2966 2967
	    is_shadow_present_pte(spte) &&
	    !is_large_pte(spte)) {
		/*
		 * A small SPTE exists for this pfn, but FNAME(fetch)
		 * and __direct_map would like to create a large PTE
		 * instead: just force them to go down another level,
		 * patching back for them into pfn the next 9 bits of
		 * the address.
		 */
2968 2969
		u64 page_mask = KVM_PAGES_PER_HPAGE(level) -
				KVM_PAGES_PER_HPAGE(level - 1);
P
Paolo Bonzini 已提交
2970
		*pfnp |= gfn & page_mask;
B
Ben Gardon 已提交
2971
		(*goal_levelp)--;
P
Paolo Bonzini 已提交
2972 2973 2974
	}
}

2975
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
2976
			int map_writable, int max_level, kvm_pfn_t pfn,
2977
			bool prefault, bool is_tdp)
2978
{
2979 2980 2981 2982
	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
	bool write = error_code & PFERR_WRITE_MASK;
	bool exec = error_code & PFERR_FETCH_MASK;
	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
2983
	struct kvm_shadow_walk_iterator it;
2984
	struct kvm_mmu_page *sp;
2985
	int level, req_level, ret;
2986 2987
	gfn_t gfn = gpa >> PAGE_SHIFT;
	gfn_t base_gfn = gfn;
A
Avi Kivity 已提交
2988

2989 2990
	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
					huge_page_disallowed, &req_level);
2991

2992
	trace_kvm_mmu_spte_requested(gpa, level, pfn);
2993
	for_each_shadow_entry(vcpu, gpa, it) {
P
Paolo Bonzini 已提交
2994 2995 2996 2997
		/*
		 * We cannot overwrite existing page tables with an NX
		 * large page, as the leaf could be executable.
		 */
2998
		if (nx_huge_page_workaround_enabled)
2999 3000
			disallowed_hugepage_adjust(*it.sptep, gfn, it.level,
						   &pfn, &level);
P
Paolo Bonzini 已提交
3001

3002 3003
		base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
		if (it.level == level)
3004
			break;
A
Avi Kivity 已提交
3005

3006
		drop_large_spte(vcpu, it.sptep);
3007 3008 3009 3010 3011 3012 3013 3014 3015 3016
		if (is_shadow_present_pte(*it.sptep))
			continue;

		sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
				      it.level - 1, true, ACC_ALL);

		link_shadow_page(vcpu, it.sptep, sp);
		if (is_tdp && huge_page_disallowed &&
		    req_level >= it.level)
			account_huge_nx_page(vcpu->kvm, sp);
3017
	}
3018 3019 3020 3021

	ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
			   write, level, base_gfn, pfn, prefault,
			   map_writable);
3022 3023 3024
	if (ret == RET_PF_SPURIOUS)
		return ret;

3025 3026 3027
	direct_pte_prefetch(vcpu, it.sptep);
	++vcpu->stat.pf_fixed;
	return ret;
A
Avi Kivity 已提交
3028 3029
}

H
Huang Ying 已提交
3030
static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
3031
{
3032
	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
3033 3034
}

D
Dan Williams 已提交
3035
static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
3036
{
X
Xiao Guangrong 已提交
3037 3038 3039 3040 3041 3042
	/*
	 * Do not cache the mmio info caused by writing the readonly gfn
	 * into the spte otherwise read access on readonly gfn also can
	 * caused mmio page fault and treat it as mmio access.
	 */
	if (pfn == KVM_PFN_ERR_RO_FAULT)
3043
		return RET_PF_EMULATE;
X
Xiao Guangrong 已提交
3044

3045
	if (pfn == KVM_PFN_ERR_HWPOISON) {
3046
		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
3047
		return RET_PF_RETRY;
3048
	}
3049

3050
	return -EFAULT;
3051 3052
}

3053
static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
3054 3055
				kvm_pfn_t pfn, unsigned int access,
				int *ret_val)
3056 3057
{
	/* The pfn is invalid, report the error! */
3058
	if (unlikely(is_error_pfn(pfn))) {
3059
		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
3060
		return true;
3061 3062
	}

3063
	if (unlikely(is_noslot_pfn(pfn))) {
3064 3065
		vcpu_cache_mmio_info(vcpu, gva, gfn,
				     access & shadow_mmio_access_mask);
3066 3067 3068 3069 3070 3071 3072 3073 3074 3075
		/*
		 * If MMIO caching is disabled, emulate immediately without
		 * touching the shadow page tables as attempting to install an
		 * MMIO SPTE will just be an expensive nop.
		 */
		if (unlikely(!shadow_mmio_value)) {
			*ret_val = RET_PF_EMULATE;
			return true;
		}
	}
3076

3077
	return false;
3078 3079
}

3080
static bool page_fault_can_be_fast(u32 error_code)
3081
{
3082 3083 3084 3085 3086 3087 3088
	/*
	 * Do not fix the mmio spte with invalid generation number which
	 * need to be updated by slow page fault path.
	 */
	if (unlikely(error_code & PFERR_RSVD_MASK))
		return false;

3089 3090 3091 3092 3093
	/* See if the page fault is due to an NX violation */
	if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))
		      == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
		return false;

3094
	/*
3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105
	 * #PF can be fast if:
	 * 1. The shadow page table entry is not present, which could mean that
	 *    the fault is potentially caused by access tracking (if enabled).
	 * 2. The shadow page table entry is present and the fault
	 *    is caused by write-protect, that means we just need change the W
	 *    bit of the spte which can be done out of mmu-lock.
	 *
	 * However, if access tracking is disabled we know that a non-present
	 * page must be a genuine page fault where we have to create a new SPTE.
	 * So, if access tracking is disabled, we return true only for write
	 * accesses to a present page.
3106 3107
	 */

3108 3109 3110
	return shadow_acc_track_mask != 0 ||
	       ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
		== (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
3111 3112
}

3113 3114 3115 3116
/*
 * Returns true if the SPTE was fixed successfully. Otherwise,
 * someone else modified the SPTE from its original value.
 */
3117
static bool
3118
fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
3119
			u64 *sptep, u64 old_spte, u64 new_spte)
3120 3121 3122 3123 3124
{
	gfn_t gfn;

	WARN_ON(!sp->role.direct);

3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136
	/*
	 * Theoretically we could also set dirty bit (and flush TLB) here in
	 * order to eliminate unnecessary PML logging. See comments in
	 * set_spte. But fast_page_fault is very unlikely to happen with PML
	 * enabled, so we do not do this. This might result in the same GPA
	 * to be logged in PML buffer again when the write really happens, and
	 * eventually to be called by mark_page_dirty twice. But it's also no
	 * harm. This also avoids the TLB flush needed after setting dirty bit
	 * so non-PML cases won't be impacted.
	 *
	 * Compare with set_spte where instead shadow_dirty_mask is set.
	 */
3137
	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3138 3139
		return false;

3140
	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
3141 3142 3143 3144 3145 3146 3147
		/*
		 * The gfn of direct spte is stable since it is
		 * calculated by sp->gfn.
		 */
		gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
		kvm_vcpu_mark_page_dirty(vcpu, gfn);
	}
3148 3149 3150 3151

	return true;
}

3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163
static bool is_access_allowed(u32 fault_err_code, u64 spte)
{
	if (fault_err_code & PFERR_FETCH_MASK)
		return is_executable_pte(spte);

	if (fault_err_code & PFERR_WRITE_MASK)
		return is_writable_pte(spte);

	/* Fault was on Read access */
	return spte & PT_PRESENT_MASK;
}

3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189
/*
 * Returns the last level spte pointer of the shadow page walk for the given
 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
 * walk could be performed, returns NULL and *spte does not contain valid data.
 *
 * Contract:
 *  - Must be called between walk_shadow_page_lockless_{begin,end}.
 *  - The returned sptep must not be used after walk_shadow_page_lockless_end.
 */
static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
{
	struct kvm_shadow_walk_iterator iterator;
	u64 old_spte;
	u64 *sptep = NULL;

	for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
		sptep = iterator.sptep;
		*spte = old_spte;

		if (!is_shadow_present_pte(old_spte))
			break;
	}

	return sptep;
}

3190
/*
3191
 * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3192
 */
3193
static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
3194
{
3195
	struct kvm_mmu_page *sp;
3196
	int ret = RET_PF_INVALID;
3197
	u64 spte = 0ull;
3198
	u64 *sptep = NULL;
3199
	uint retry_count = 0;
3200

3201
	if (!page_fault_can_be_fast(error_code))
3202
		return ret;
3203 3204 3205

	walk_shadow_page_lockless_begin(vcpu);

3206
	do {
3207
		u64 new_spte;
3208

3209 3210 3211 3212
		if (is_tdp_mmu(vcpu->arch.mmu))
			sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, gpa, &spte);
		else
			sptep = fast_pf_get_last_sptep(vcpu, gpa, &spte);
3213

3214 3215 3216
		if (!is_shadow_present_pte(spte))
			break;

3217
		sp = sptep_to_sp(sptep);
3218 3219
		if (!is_last_spte(spte, sp->role.level))
			break;
3220

3221
		/*
3222 3223 3224 3225 3226
		 * Check whether the memory access that caused the fault would
		 * still cause it if it were to be performed right now. If not,
		 * then this is a spurious fault caused by TLB lazily flushed,
		 * or some other CPU has already fixed the PTE after the
		 * current CPU took the fault.
3227 3228 3229 3230
		 *
		 * Need not check the access of upper level table entries since
		 * they are always ACC_ALL.
		 */
3231
		if (is_access_allowed(error_code, spte)) {
3232
			ret = RET_PF_SPURIOUS;
3233 3234
			break;
		}
3235

3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246
		new_spte = spte;

		if (is_access_track_spte(spte))
			new_spte = restore_acc_track_spte(new_spte);

		/*
		 * Currently, to simplify the code, write-protection can
		 * be removed in the fast path only if the SPTE was
		 * write-protected for dirty-logging or access tracking.
		 */
		if ((error_code & PFERR_WRITE_MASK) &&
3247
		    spte_can_locklessly_be_made_writable(spte)) {
3248
			new_spte |= PT_WRITABLE_MASK;
3249 3250

			/*
3251 3252 3253 3254 3255 3256 3257 3258 3259
			 * Do not fix write-permission on the large spte.  Since
			 * we only dirty the first page into the dirty-bitmap in
			 * fast_pf_fix_direct_spte(), other pages are missed
			 * if its slot has dirty logging enabled.
			 *
			 * Instead, we let the slow page fault path create a
			 * normal spte to fix the access.
			 *
			 * See the comments in kvm_arch_commit_memory_region().
3260
			 */
3261
			if (sp->role.level > PG_LEVEL_4K)
3262
				break;
3263
		}
3264

3265
		/* Verify that the fault can be handled in the fast path */
3266 3267
		if (new_spte == spte ||
		    !is_access_allowed(error_code, new_spte))
3268 3269 3270 3271 3272
			break;

		/*
		 * Currently, fast page fault only works for direct mapping
		 * since the gfn is not stable for indirect shadow page. See
3273
		 * Documentation/virt/kvm/locking.rst to get more detail.
3274
		 */
3275
		if (fast_pf_fix_direct_spte(vcpu, sp, sptep, spte, new_spte)) {
3276
			ret = RET_PF_FIXED;
3277
			break;
3278
		}
3279 3280 3281 3282 3283 3284 3285 3286

		if (++retry_count > 4) {
			printk_once(KERN_WARNING
				"kvm: Fast #PF retrying more than 4 times.\n");
			break;
		}

	} while (true);
3287

3288
	trace_fast_page_fault(vcpu, gpa, error_code, sptep, spte, ret);
3289 3290
	walk_shadow_page_lockless_end(vcpu);

3291
	return ret;
3292 3293
}

3294 3295
static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
			       struct list_head *invalid_list)
3296
{
3297
	struct kvm_mmu_page *sp;
3298

3299
	if (!VALID_PAGE(*root_hpa))
A
Avi Kivity 已提交
3300
		return;
3301

3302
	sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
3303

3304
	if (is_tdp_mmu_page(sp))
3305
		kvm_tdp_mmu_put_root(kvm, sp, false);
3306 3307
	else if (!--sp->root_count && sp->role.invalid)
		kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3308

3309 3310 3311
	*root_hpa = INVALID_PAGE;
}

3312
/* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
3313 3314
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			ulong roots_to_free)
3315
{
3316
	struct kvm *kvm = vcpu->kvm;
3317 3318
	int i;
	LIST_HEAD(invalid_list);
3319
	bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
3320

3321
	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3322

3323
	/* Before acquiring the MMU lock, see if we need to do any real work. */
3324 3325 3326 3327 3328 3329 3330 3331 3332
	if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
			    VALID_PAGE(mmu->prev_roots[i].hpa))
				break;

		if (i == KVM_MMU_NUM_PREV_ROOTS)
			return;
	}
3333

3334
	write_lock(&kvm->mmu_lock);
3335

3336 3337
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
3338
			mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3339
					   &invalid_list);
3340

3341 3342 3343
	if (free_active_root) {
		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
3344
			mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
3345
		} else if (mmu->pae_root) {
3346 3347 3348 3349 3350 3351 3352 3353
			for (i = 0; i < 4; ++i) {
				if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
					continue;

				mmu_free_root_page(kvm, &mmu->pae_root[i],
						   &invalid_list);
				mmu->pae_root[i] = INVALID_PAE_ROOT;
			}
3354
		}
3355
		mmu->root_hpa = INVALID_PAGE;
3356
		mmu->root_pgd = 0;
3357
	}
3358

3359
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
3360
	write_unlock(&kvm->mmu_lock);
3361
}
3362
EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3363

3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390
void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
{
	unsigned long roots_to_free = 0;
	hpa_t root_hpa;
	int i;

	/*
	 * This should not be called while L2 is active, L2 can't invalidate
	 * _only_ its own roots, e.g. INVVPID unconditionally exits.
	 */
	WARN_ON_ONCE(mmu->mmu_role.base.guest_mode);

	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		root_hpa = mmu->prev_roots[i].hpa;
		if (!VALID_PAGE(root_hpa))
			continue;

		if (!to_shadow_page(root_hpa) ||
			to_shadow_page(root_hpa)->role.guest_mode)
			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
	}

	kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
}
EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);


3391 3392 3393 3394
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
{
	int ret = 0;

3395
	if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3396
		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3397 3398 3399 3400 3401 3402
		ret = 1;
	}

	return ret;
}

3403 3404
static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
			    u8 level, bool direct)
3405 3406
{
	struct kvm_mmu_page *sp;
3407 3408 3409 3410 3411 3412 3413 3414 3415

	sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
	++sp->root_count;

	return __pa(sp->spt);
}

static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
{
3416 3417
	struct kvm_mmu *mmu = vcpu->arch.mmu;
	u8 shadow_root_level = mmu->shadow_root_level;
3418
	hpa_t root;
3419
	unsigned i;
3420 3421 3422 3423 3424 3425
	int r;

	write_lock(&vcpu->kvm->mmu_lock);
	r = make_mmu_pages_available(vcpu);
	if (r < 0)
		goto out_unlock;
3426

3427
	if (is_tdp_mmu_enabled(vcpu->kvm)) {
3428
		root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3429
		mmu->root_hpa = root;
3430
	} else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3431
		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
3432
		mmu->root_hpa = root;
3433
	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
3434 3435 3436 3437
		if (WARN_ON_ONCE(!mmu->pae_root)) {
			r = -EIO;
			goto out_unlock;
		}
3438

3439
		for (i = 0; i < 4; ++i) {
3440
			WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3441

3442 3443
			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
					      i << 30, PT32_ROOT_LEVEL, true);
3444 3445
			mmu->pae_root[i] = root | PT_PRESENT_MASK |
					   shadow_me_mask;
3446
		}
3447
		mmu->root_hpa = __pa(mmu->pae_root);
3448 3449
	} else {
		WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
3450 3451
		r = -EIO;
		goto out_unlock;
3452
	}
3453

3454
	/* root_pgd is ignored for direct MMUs. */
3455
	mmu->root_pgd = 0;
3456 3457 3458
out_unlock:
	write_unlock(&vcpu->kvm->mmu_lock);
	return r;
3459 3460 3461
}

static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3462
{
3463
	struct kvm_mmu *mmu = vcpu->arch.mmu;
3464
	u64 pdptrs[4], pm_mask;
3465
	gfn_t root_gfn, root_pgd;
3466
	hpa_t root;
3467 3468
	unsigned i;
	int r;
3469

3470
	root_pgd = mmu->get_guest_pgd(vcpu);
3471
	root_gfn = root_pgd >> PAGE_SHIFT;
3472

3473 3474 3475
	if (mmu_check_root(vcpu, root_gfn))
		return 1;

3476 3477 3478 3479
	/*
	 * On SVM, reading PDPTRs might access guest memory, which might fault
	 * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
	 */
3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490
	if (mmu->root_level == PT32E_ROOT_LEVEL) {
		for (i = 0; i < 4; ++i) {
			pdptrs[i] = mmu->get_pdptr(vcpu, i);
			if (!(pdptrs[i] & PT_PRESENT_MASK))
				continue;

			if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
				return 1;
		}
	}

3491 3492 3493 3494
	r = alloc_all_memslots_rmaps(vcpu->kvm);
	if (r)
		return r;

3495 3496 3497 3498 3499
	write_lock(&vcpu->kvm->mmu_lock);
	r = make_mmu_pages_available(vcpu);
	if (r < 0)
		goto out_unlock;

3500 3501 3502 3503
	/*
	 * Do we shadow a long mode page table? If so we need to
	 * write-protect the guests page table root.
	 */
3504
	if (mmu->root_level >= PT64_ROOT_4LEVEL) {
3505
		root = mmu_alloc_root(vcpu, root_gfn, 0,
3506 3507
				      mmu->shadow_root_level, false);
		mmu->root_hpa = root;
3508
		goto set_root_pgd;
3509
	}
3510

3511 3512 3513 3514
	if (WARN_ON_ONCE(!mmu->pae_root)) {
		r = -EIO;
		goto out_unlock;
	}
3515

3516 3517
	/*
	 * We shadow a 32 bit page table. This may be a legacy 2-level
3518 3519
	 * or a PAE 3-level page table. In either case we need to be aware that
	 * the shadow page table may be a PAE or a long mode page table.
3520
	 */
3521
	pm_mask = PT_PRESENT_MASK | shadow_me_mask;
3522
	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
3523 3524
		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;

3525
		if (WARN_ON_ONCE(!mmu->pml4_root)) {
3526 3527 3528
			r = -EIO;
			goto out_unlock;
		}
3529
		mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
3530 3531 3532 3533 3534 3535 3536 3537

		if (mmu->shadow_root_level == PT64_ROOT_5LEVEL) {
			if (WARN_ON_ONCE(!mmu->pml5_root)) {
				r = -EIO;
				goto out_unlock;
			}
			mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;
		}
3538 3539
	}

3540
	for (i = 0; i < 4; ++i) {
3541
		WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3542

3543
		if (mmu->root_level == PT32E_ROOT_LEVEL) {
3544
			if (!(pdptrs[i] & PT_PRESENT_MASK)) {
3545
				mmu->pae_root[i] = INVALID_PAE_ROOT;
A
Avi Kivity 已提交
3546 3547
				continue;
			}
3548
			root_gfn = pdptrs[i] >> PAGE_SHIFT;
3549
		}
3550

3551 3552
		root = mmu_alloc_root(vcpu, root_gfn, i << 30,
				      PT32_ROOT_LEVEL, false);
3553
		mmu->pae_root[i] = root | pm_mask;
3554
	}
3555

3556 3557 3558
	if (mmu->shadow_root_level == PT64_ROOT_5LEVEL)
		mmu->root_hpa = __pa(mmu->pml5_root);
	else if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
3559
		mmu->root_hpa = __pa(mmu->pml4_root);
3560 3561
	else
		mmu->root_hpa = __pa(mmu->pae_root);
3562

3563
set_root_pgd:
3564
	mmu->root_pgd = root_pgd;
3565 3566
out_unlock:
	write_unlock(&vcpu->kvm->mmu_lock);
3567

3568
	return 0;
3569 3570
}

3571 3572 3573
static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *mmu = vcpu->arch.mmu;
3574
	bool need_pml5 = mmu->shadow_root_level > PT64_ROOT_4LEVEL;
3575 3576 3577
	u64 *pml5_root = NULL;
	u64 *pml4_root = NULL;
	u64 *pae_root;
3578 3579

	/*
3580 3581 3582 3583
	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
	 * tables are allocated and initialized at root creation as there is no
	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
	 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
3584
	 */
3585 3586 3587
	if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
	    mmu->shadow_root_level < PT64_ROOT_4LEVEL)
		return 0;
3588

3589 3590 3591 3592 3593 3594 3595 3596
	/*
	 * NPT, the only paging mode that uses this horror, uses a fixed number
	 * of levels for the shadow page tables, e.g. all MMUs are 4-level or
	 * all MMus are 5-level.  Thus, this can safely require that pml5_root
	 * is allocated if the other roots are valid and pml5 is needed, as any
	 * prior MMU would also have required pml5.
	 */
	if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root))
3597
		return 0;
3598

3599 3600 3601 3602
	/*
	 * The special roots should always be allocated in concert.  Yell and
	 * bail if KVM ends up in a state where only one of the roots is valid.
	 */
3603
	if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
3604
			 (need_pml5 && mmu->pml5_root)))
3605
		return -EIO;
3606

3607 3608 3609 3610
	/*
	 * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
	 * doesn't need to be decrypted.
	 */
3611 3612 3613
	pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
	if (!pae_root)
		return -ENOMEM;
3614

3615
#ifdef CONFIG_X86_64
3616
	pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3617 3618 3619
	if (!pml4_root)
		goto err_pml4;

3620
	if (need_pml5) {
3621 3622 3623
		pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
		if (!pml5_root)
			goto err_pml5;
3624
	}
3625
#endif
3626

3627
	mmu->pae_root = pae_root;
3628
	mmu->pml4_root = pml4_root;
3629
	mmu->pml5_root = pml5_root;
3630

3631
	return 0;
3632 3633 3634 3635 3636 3637 3638 3639

#ifdef CONFIG_X86_64
err_pml5:
	free_page((unsigned long)pml4_root);
err_pml4:
	free_page((unsigned long)pae_root);
	return -ENOMEM;
#endif
3640 3641
}

3642
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3643 3644 3645 3646
{
	int i;
	struct kvm_mmu_page *sp;

3647
	if (vcpu->arch.mmu->direct_map)
3648 3649
		return;

3650
	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3651
		return;
3652

3653
	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3654

3655 3656
	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
		hpa_t root = vcpu->arch.mmu->root_hpa;
3657
		sp = to_shadow_page(root);
3658 3659 3660 3661 3662 3663 3664 3665

		/*
		 * Even if another CPU was marking the SP as unsync-ed
		 * simultaneously, any guest page table changes are not
		 * guaranteed to be visible anyway until this VCPU issues a TLB
		 * flush strictly after those changes are made. We only need to
		 * ensure that the other CPU sets these flags before any actual
		 * changes to the page tables are made. The comments in
3666 3667
		 * mmu_try_to_unsync_pages() describe what could go wrong if
		 * this requirement isn't satisfied.
3668 3669 3670 3671 3672
		 */
		if (!smp_load_acquire(&sp->unsync) &&
		    !smp_load_acquire(&sp->unsync_children))
			return;

3673
		write_lock(&vcpu->kvm->mmu_lock);
3674 3675
		kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);

3676
		mmu_sync_children(vcpu, sp, true);
3677

3678
		kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3679
		write_unlock(&vcpu->kvm->mmu_lock);
3680 3681
		return;
	}
3682

3683
	write_lock(&vcpu->kvm->mmu_lock);
3684 3685
	kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);

3686
	for (i = 0; i < 4; ++i) {
3687
		hpa_t root = vcpu->arch.mmu->pae_root[i];
3688

3689
		if (IS_VALID_PAE_ROOT(root)) {
3690
			root &= PT64_BASE_ADDR_MASK;
3691
			sp = to_shadow_page(root);
3692
			mmu_sync_children(vcpu, sp, true);
3693 3694 3695
		}
	}

3696
	kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3697
	write_unlock(&vcpu->kvm->mmu_lock);
3698 3699
}

3700
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
3701
				  u32 access, struct x86_exception *exception)
A
Avi Kivity 已提交
3702
{
3703 3704
	if (exception)
		exception->error_code = 0;
A
Avi Kivity 已提交
3705 3706 3707
	return vaddr;
}

3708
static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
3709 3710
					 u32 access,
					 struct x86_exception *exception)
3711
{
3712 3713
	if (exception)
		exception->error_code = 0;
3714
	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3715 3716
}

3717
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3718
{
3719 3720 3721 3722 3723 3724 3725
	/*
	 * A nested guest cannot use the MMIO cache if it is using nested
	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
	 */
	if (mmu_is_nested(vcpu))
		return false;

3726 3727 3728 3729 3730 3731
	if (direct)
		return vcpu_match_mmio_gpa(vcpu, addr);

	return vcpu_match_mmio_gva(vcpu, addr);
}

3732 3733 3734
/*
 * Return the level of the lowest level SPTE added to sptes.
 * That SPTE may be non-present.
3735 3736
 *
 * Must be called between walk_shadow_page_lockless_{begin,end}.
3737
 */
3738
static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
3739 3740
{
	struct kvm_shadow_walk_iterator iterator;
3741
	int leaf = -1;
3742
	u64 spte;
3743

3744 3745
	for (shadow_walk_init(&iterator, vcpu, addr),
	     *root_level = iterator.level;
3746 3747
	     shadow_walk_okay(&iterator);
	     __shadow_walk_next(&iterator, spte)) {
3748
		leaf = iterator.level;
3749 3750
		spte = mmu_spte_get_lockless(iterator.sptep);

3751
		sptes[leaf] = spte;
3752

3753 3754
		if (!is_shadow_present_pte(spte))
			break;
3755 3756 3757 3758 3759
	}

	return leaf;
}

3760
/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
3761 3762
static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
{
3763
	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
3764
	struct rsvd_bits_validate *rsvd_check;
3765
	int root, leaf, level;
3766 3767
	bool reserved = false;

3768 3769
	walk_shadow_page_lockless_begin(vcpu);

3770
	if (is_tdp_mmu(vcpu->arch.mmu))
3771
		leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
3772
	else
3773
		leaf = get_walk(vcpu, addr, sptes, &root);
3774

3775 3776
	walk_shadow_page_lockless_end(vcpu);

3777 3778 3779 3780 3781
	if (unlikely(leaf < 0)) {
		*sptep = 0ull;
		return reserved;
	}

3782 3783 3784 3785 3786 3787 3788 3789 3790 3791
	*sptep = sptes[leaf];

	/*
	 * Skip reserved bits checks on the terminal leaf if it's not a valid
	 * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
	 * design, always have reserved bits set.  The purpose of the checks is
	 * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
	 */
	if (!is_shadow_present_pte(sptes[leaf]))
		leaf++;
3792 3793 3794

	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;

3795
	for (level = root; level >= leaf; level--)
3796
		reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
3797 3798

	if (reserved) {
3799
		pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
3800
		       __func__, addr);
3801
		for (level = root; level >= leaf; level--)
3802 3803
			pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
			       sptes[level], level,
3804
			       get_rsvd_bits(rsvd_check, sptes[level], level));
3805
	}
3806

3807
	return reserved;
3808 3809
}

P
Paolo Bonzini 已提交
3810
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3811 3812
{
	u64 spte;
3813
	bool reserved;
3814

3815
	if (mmio_info_in_cache(vcpu, addr, direct))
3816
		return RET_PF_EMULATE;
3817

3818
	reserved = get_mmio_spte(vcpu, addr, &spte);
3819
	if (WARN_ON(reserved))
3820
		return -EINVAL;
3821 3822 3823

	if (is_mmio_spte(spte)) {
		gfn_t gfn = get_mmio_spte_gfn(spte);
3824
		unsigned int access = get_mmio_spte_access(spte);
3825

3826
		if (!check_mmio_spte(vcpu, spte))
3827
			return RET_PF_INVALID;
3828

3829 3830
		if (direct)
			addr = 0;
X
Xiao Guangrong 已提交
3831 3832

		trace_handle_mmio_page_fault(addr, gfn, access);
3833
		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3834
		return RET_PF_EMULATE;
3835 3836 3837 3838 3839 3840
	}

	/*
	 * If the page table is zapped by other cpus, let CPU fault again on
	 * the address.
	 */
3841
	return RET_PF_RETRY;
3842 3843
}

3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863
static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
					 u32 error_code, gfn_t gfn)
{
	if (unlikely(error_code & PFERR_RSVD_MASK))
		return false;

	if (!(error_code & PFERR_PRESENT_MASK) ||
	      !(error_code & PFERR_WRITE_MASK))
		return false;

	/*
	 * guest is writing the page which is write tracked which can
	 * not be fixed by page fault handler.
	 */
	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
		return true;

	return false;
}

3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877
static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
{
	struct kvm_shadow_walk_iterator iterator;
	u64 spte;

	walk_shadow_page_lockless_begin(vcpu);
	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
		clear_sp_write_flooding_count(iterator.sptep);
		if (!is_shadow_present_pte(spte))
			break;
	}
	walk_shadow_page_lockless_end(vcpu);
}

3878 3879
static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
				    gfn_t gfn)
3880 3881
{
	struct kvm_arch_async_pf arch;
X
Xiao Guangrong 已提交
3882

3883
	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3884
	arch.gfn = gfn;
3885
	arch.direct_map = vcpu->arch.mmu->direct_map;
3886
	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
3887

3888 3889
	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3890 3891
}

3892
static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3893
			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, hva_t *hva,
3894
			 bool write, bool *writable, int *r)
3895
{
3896
	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3897 3898
	bool async;

3899 3900 3901 3902 3903 3904
	/*
	 * Retry the page fault if the gfn hit a memslot that is being deleted
	 * or moved.  This ensures any existing SPTEs for the old memslot will
	 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
	 */
	if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
3905
		goto out_retry;
3906

3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924
	if (!kvm_is_visible_memslot(slot)) {
		/* Don't expose private memslots to L2. */
		if (is_guest_mode(vcpu)) {
			*pfn = KVM_PFN_NOSLOT;
			*writable = false;
			return false;
		}
		/*
		 * If the APIC access page exists but is disabled, go directly
		 * to emulation without caching the MMIO access or creating a
		 * MMIO SPTE.  That way the cache doesn't need to be purged
		 * when the AVIC is re-enabled.
		 */
		if (slot && slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT &&
		    !kvm_apicv_activated(vcpu->kvm)) {
			*r = RET_PF_EMULATE;
			return true;
		}
3925 3926
	}

3927
	async = false;
3928 3929
	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async,
				    write, writable, hva);
3930 3931 3932
	if (!async)
		return false; /* *pfn has correct page already */

3933
	if (!prefault && kvm_can_do_async_pf(vcpu)) {
3934
		trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
3935
		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
3936
			trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
3937
			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
3938
			goto out_retry;
3939
		} else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
3940
			goto out_retry;
3941 3942
	}

3943 3944
	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL,
				    write, writable, hva);
3945 3946 3947 3948

out_retry:
	*r = RET_PF_RETRY;
	return true;
3949 3950
}

3951 3952
static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
			     bool prefault, int max_level, bool is_tdp)
A
Avi Kivity 已提交
3953
{
3954
	bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
3955
	bool write = error_code & PFERR_WRITE_MASK;
3956
	bool map_writable;
A
Avi Kivity 已提交
3957

3958 3959 3960
	gfn_t gfn = gpa >> PAGE_SHIFT;
	unsigned long mmu_seq;
	kvm_pfn_t pfn;
3961
	hva_t hva;
3962
	int r;
3963

3964
	if (page_fault_handle_page_track(vcpu, error_code, gfn))
3965
		return RET_PF_EMULATE;
3966

3967 3968 3969
	r = fast_page_fault(vcpu, gpa, error_code);
	if (r != RET_PF_INVALID)
		return r;
3970

3971
	r = mmu_topup_memory_caches(vcpu, false);
3972 3973
	if (r)
		return r;
3974

3975 3976 3977
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
	smp_rmb();

3978
	if (kvm_faultin_pfn(vcpu, prefault, gfn, gpa, &pfn, &hva,
3979 3980
			 write, &map_writable, &r))
		return r;
3981

3982
	if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
3983
		return r;
A
Avi Kivity 已提交
3984

3985
	r = RET_PF_RETRY;
3986

3987
	if (is_tdp_mmu_fault)
3988 3989 3990 3991
		read_lock(&vcpu->kvm->mmu_lock);
	else
		write_lock(&vcpu->kvm->mmu_lock);

3992
	if (!is_noslot_pfn(pfn) && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, hva))
3993
		goto out_unlock;
3994 3995
	r = make_mmu_pages_available(vcpu);
	if (r)
3996
		goto out_unlock;
B
Ben Gardon 已提交
3997

3998
	if (is_tdp_mmu_fault)
B
Ben Gardon 已提交
3999 4000 4001 4002 4003
		r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
				    pfn, prefault);
	else
		r = __direct_map(vcpu, gpa, error_code, map_writable, max_level, pfn,
				 prefault, is_tdp);
4004

4005
out_unlock:
4006
	if (is_tdp_mmu_fault)
4007 4008 4009
		read_unlock(&vcpu->kvm->mmu_lock);
	else
		write_unlock(&vcpu->kvm->mmu_lock);
4010 4011
	kvm_release_pfn_clean(pfn);
	return r;
A
Avi Kivity 已提交
4012 4013
}

4014 4015 4016 4017 4018 4019 4020
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
				u32 error_code, bool prefault)
{
	pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);

	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
	return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault,
4021
				 PG_LEVEL_2M, false);
4022 4023
}

4024
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
4025
				u64 fault_address, char *insn, int insn_len)
4026 4027
{
	int r = 1;
4028
	u32 flags = vcpu->arch.apf.host_apf_flags;
4029

4030 4031 4032 4033 4034 4035
#ifndef CONFIG_X86_64
	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
	if (WARN_ON_ONCE(fault_address >> 32))
		return -EFAULT;
#endif

P
Paolo Bonzini 已提交
4036
	vcpu->arch.l1tf_flush_l1d = true;
4037
	if (!flags) {
4038 4039
		trace_kvm_page_fault(fault_address, error_code);

4040
		if (kvm_event_needs_reinjection(vcpu))
4041 4042 4043
			kvm_mmu_unprotect_page_virt(vcpu, fault_address);
		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
				insn_len);
4044
	} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
4045
		vcpu->arch.apf.host_apf_flags = 0;
4046
		local_irq_disable();
4047
		kvm_async_pf_task_wait_schedule(fault_address);
4048
		local_irq_enable();
4049 4050
	} else {
		WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
4051
	}
4052

4053 4054 4055 4056
	return r;
}
EXPORT_SYMBOL_GPL(kvm_handle_page_fault);

4057 4058
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
		       bool prefault)
4059
{
4060
	int max_level;
4061

4062
	for (max_level = KVM_MAX_HUGEPAGE_LEVEL;
4063
	     max_level > PG_LEVEL_4K;
4064 4065
	     max_level--) {
		int page_num = KVM_PAGES_PER_HPAGE(max_level);
4066
		gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1);
4067

4068 4069
		if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
			break;
4070
	}
4071

4072 4073
	return direct_page_fault(vcpu, gpa, error_code, prefault,
				 max_level, true);
4074 4075
}

4076
static void nonpaging_init_context(struct kvm_mmu *context)
A
Avi Kivity 已提交
4077 4078 4079
{
	context->page_fault = nonpaging_page_fault;
	context->gva_to_gpa = nonpaging_gva_to_gpa;
4080
	context->sync_page = nonpaging_sync_page;
4081
	context->invlpg = NULL;
4082
	context->direct_map = true;
A
Avi Kivity 已提交
4083 4084
}

4085
static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
4086 4087
				  union kvm_mmu_page_role role)
{
4088
	return (role.direct || pgd == root->pgd) &&
4089 4090
	       VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) &&
	       role.word == to_shadow_page(root->hpa)->role.word;
4091 4092
}

4093
/*
4094
 * Find out if a previously cached root matching the new pgd/role is available.
4095 4096 4097 4098 4099 4100
 * The current root is also inserted into the cache.
 * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
 * returned.
 * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
 * false is returned. This root should now be freed by the caller.
 */
4101
static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4102 4103 4104 4105
				  union kvm_mmu_page_role new_role)
{
	uint i;
	struct kvm_mmu_root_info root;
4106
	struct kvm_mmu *mmu = vcpu->arch.mmu;
4107

4108
	root.pgd = mmu->root_pgd;
4109 4110
	root.hpa = mmu->root_hpa;

4111
	if (is_root_usable(&root, new_pgd, new_role))
4112 4113
		return true;

4114 4115 4116
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		swap(root, mmu->prev_roots[i]);

4117
		if (is_root_usable(&root, new_pgd, new_role))
4118 4119 4120 4121
			break;
	}

	mmu->root_hpa = root.hpa;
4122
	mmu->root_pgd = root.pgd;
4123 4124 4125 4126

	return i < KVM_MMU_NUM_PREV_ROOTS;
}

4127
static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4128
			    union kvm_mmu_page_role new_role)
A
Avi Kivity 已提交
4129
{
4130
	struct kvm_mmu *mmu = vcpu->arch.mmu;
4131 4132 4133 4134 4135 4136 4137

	/*
	 * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
	 * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
	 * later if necessary.
	 */
	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
4138
	    mmu->root_level >= PT64_ROOT_4LEVEL)
4139
		return cached_root_available(vcpu, new_pgd, new_role);
4140 4141

	return false;
A
Avi Kivity 已提交
4142 4143
}

4144
static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4145
			      union kvm_mmu_page_role new_role)
A
Avi Kivity 已提交
4146
{
4147
	if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159
		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
		return;
	}

	/*
	 * It's possible that the cached previous root page is obsolete because
	 * of a change in the MMU generation number. However, changing the
	 * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will
	 * free the root set here and allocate a new one.
	 */
	kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);

4160
	if (force_flush_and_sync_on_reuse) {
4161 4162
		kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
4163
	}
4164 4165 4166 4167 4168 4169 4170 4171 4172

	/*
	 * The last MMIO access's GVA and GPA are cached in the VCPU. When
	 * switching to a new CR3, that GVA->GPA mapping may no longer be
	 * valid. So clear any cached MMIO info even when we don't need to sync
	 * the shadow page tables.
	 */
	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);

4173 4174 4175 4176 4177 4178 4179
	/*
	 * If this is a direct root page, it doesn't have a write flooding
	 * count. Otherwise, clear the write flooding count.
	 */
	if (!new_role.direct)
		__clear_sp_write_flooding_count(
				to_shadow_page(vcpu->arch.mmu->root_hpa));
A
Avi Kivity 已提交
4180 4181
}

4182
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
4183
{
4184
	__kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu));
4185
}
4186
EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
4187

4188 4189
static unsigned long get_cr3(struct kvm_vcpu *vcpu)
{
4190
	return kvm_read_cr3(vcpu);
4191 4192
}

4193
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4194
			   unsigned int access, int *nr_present)
4195 4196 4197 4198 4199 4200 4201 4202
{
	if (unlikely(is_mmio_spte(*sptep))) {
		if (gfn != get_mmio_spte_gfn(*sptep)) {
			mmu_spte_clear_no_track(sptep);
			return true;
		}

		(*nr_present)++;
4203
		mark_mmio_spte(vcpu, sptep, gfn, access);
4204 4205 4206 4207 4208 4209
		return true;
	}

	return false;
}

4210 4211 4212 4213 4214
#define PTTYPE_EPT 18 /* arbitrary */
#define PTTYPE PTTYPE_EPT
#include "paging_tmpl.h"
#undef PTTYPE

A
Avi Kivity 已提交
4215 4216 4217 4218 4219 4220 4221 4222
#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE

#define PTTYPE 32
#include "paging_tmpl.h"
#undef PTTYPE

4223
static void
4224
__reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
4225
			u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
4226
			bool pse, bool amd)
4227
{
4228
	u64 gbpages_bit_rsvd = 0;
4229
	u64 nonleaf_bit8_rsvd = 0;
4230
	u64 high_bits_rsvd;
4231

4232
	rsvd_check->bad_mt_xwr = 0;
4233

4234
	if (!gbpages)
4235
		gbpages_bit_rsvd = rsvd_bits(7, 7);
4236

4237 4238 4239 4240 4241 4242 4243 4244 4245
	if (level == PT32E_ROOT_LEVEL)
		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
	else
		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);

	/* Note, NX doesn't exist in PDPTEs, this is handled below. */
	if (!nx)
		high_bits_rsvd |= rsvd_bits(63, 63);

4246 4247 4248 4249
	/*
	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
	 * leaf entries) on AMD CPUs only.
	 */
4250
	if (amd)
4251 4252
		nonleaf_bit8_rsvd = rsvd_bits(8, 8);

4253
	switch (level) {
4254 4255
	case PT32_ROOT_LEVEL:
		/* no rsvd bits for 2 level 4K page table entries */
4256 4257 4258 4259
		rsvd_check->rsvd_bits_mask[0][1] = 0;
		rsvd_check->rsvd_bits_mask[0][0] = 0;
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4260

4261
		if (!pse) {
4262
			rsvd_check->rsvd_bits_mask[1][1] = 0;
4263 4264 4265
			break;
		}

4266 4267
		if (is_cpuid_PSE36())
			/* 36bits PSE 4MB page */
4268
			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4269 4270
		else
			/* 32 bits PSE 4MB page */
4271
			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4272 4273
		break;
	case PT32E_ROOT_LEVEL:
4274 4275 4276 4277 4278 4279 4280 4281
		rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
						   high_bits_rsvd |
						   rsvd_bits(5, 8) |
						   rsvd_bits(1, 2);	/* PDPTE */
		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;	/* PDE */
		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;	/* PTE */
		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
						   rsvd_bits(13, 20);	/* large page */
4282 4283
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4284
		break;
4285
	case PT64_ROOT_5LEVEL:
4286 4287 4288
		rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
						   nonleaf_bit8_rsvd |
						   rsvd_bits(7, 7);
4289 4290
		rsvd_check->rsvd_bits_mask[1][4] =
			rsvd_check->rsvd_bits_mask[0][4];
4291
		fallthrough;
4292
	case PT64_ROOT_4LEVEL:
4293 4294 4295 4296 4297 4298 4299
		rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
						   nonleaf_bit8_rsvd |
						   rsvd_bits(7, 7);
		rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
						   gbpages_bit_rsvd;
		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4300 4301
		rsvd_check->rsvd_bits_mask[1][3] =
			rsvd_check->rsvd_bits_mask[0][3];
4302 4303 4304 4305 4306
		rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
						   gbpages_bit_rsvd |
						   rsvd_bits(13, 29);
		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
						   rsvd_bits(13, 20); /* large page */
4307 4308
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4309 4310 4311 4312
		break;
	}
}

4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327
static bool guest_can_use_gbpages(struct kvm_vcpu *vcpu)
{
	/*
	 * If TDP is enabled, let the guest use GBPAGES if they're supported in
	 * hardware.  The hardware page walker doesn't let KVM disable GBPAGES,
	 * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
	 * walk for performance and complexity reasons.  Not to mention KVM
	 * _can't_ solve the problem because GVA->GPA walks aren't visible to
	 * KVM once a TDP translation is installed.  Mimic hardware behavior so
	 * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
	 */
	return tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
			     guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
}

4328 4329 4330
static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
				  struct kvm_mmu *context)
{
4331
	__reset_rsvds_bits_mask(&context->guest_rsvd_check,
4332
				vcpu->arch.reserved_gpa_bits,
4333
				context->root_level, is_efer_nx(context),
4334
				guest_can_use_gbpages(vcpu),
4335
				is_cr4_pse(context),
4336
				guest_cpuid_is_amd_or_hygon(vcpu));
4337 4338
}

4339 4340
static void
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
4341
			    u64 pa_bits_rsvd, bool execonly)
4342
{
4343
	u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4344
	u64 bad_mt_xwr;
4345

4346 4347 4348 4349 4350
	rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
	rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
	rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6);
	rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6);
	rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4351 4352

	/* large page */
4353
	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4354
	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
4355 4356
	rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29);
	rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20);
4357
	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4358

4359 4360 4361 4362 4363 4364 4365 4366
	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
	if (!execonly) {
		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4367
	}
4368
	rsvd_check->bad_mt_xwr = bad_mt_xwr;
4369 4370
}

4371 4372 4373 4374
static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
		struct kvm_mmu *context, bool execonly)
{
	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
4375
				    vcpu->arch.reserved_gpa_bits, execonly);
4376 4377
}

4378 4379 4380 4381 4382
static inline u64 reserved_hpa_bits(void)
{
	return rsvd_bits(shadow_phys_bits, 63);
}

4383 4384 4385 4386 4387
/*
 * the page table on host is the shadow page table for the page
 * table in guest or amd nested guest, its mmu features completely
 * follow the features in guest.
 */
4388 4389
static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
					struct kvm_mmu *context)
4390
{
4391 4392 4393 4394 4395 4396 4397 4398
	/*
	 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
	 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
	 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
	 * The iTLB multi-hit workaround can be toggled at any time, so assume
	 * NX can be used by any non-nested shadow MMU to avoid having to reset
	 * MMU contexts.  Note, KVM forces EFER.NX=1 when TDP is disabled.
	 */
4399
	bool uses_nx = is_efer_nx(context) || !tdp_enabled;
4400 4401 4402 4403 4404

	/* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
	bool is_amd = true;
	/* KVM doesn't use 2-level page tables for the shadow MMU. */
	bool is_pse = false;
4405 4406
	struct rsvd_bits_validate *shadow_zero_check;
	int i;
4407

4408 4409
	WARN_ON_ONCE(context->shadow_root_level < PT32E_ROOT_LEVEL);

4410
	shadow_zero_check = &context->shadow_zero_check;
4411
	__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4412
				context->shadow_root_level, uses_nx,
4413
				guest_can_use_gbpages(vcpu), is_pse, is_amd);
4414 4415 4416 4417 4418 4419 4420 4421 4422

	if (!shadow_me_mask)
		return;

	for (i = context->shadow_root_level; --i >= 0;) {
		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
	}

4423 4424
}

4425 4426 4427 4428 4429 4430
static inline bool boot_cpu_is_amd(void)
{
	WARN_ON_ONCE(!tdp_enabled);
	return shadow_x_mask == 0;
}

4431 4432 4433 4434 4435 4436 4437 4438
/*
 * the direct page table on host, use as much mmu features as
 * possible, however, kvm currently does not do execution-protection.
 */
static void
reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
				struct kvm_mmu *context)
{
4439 4440 4441 4442 4443
	struct rsvd_bits_validate *shadow_zero_check;
	int i;

	shadow_zero_check = &context->shadow_zero_check;

4444
	if (boot_cpu_is_amd())
4445
		__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4446
					context->shadow_root_level, false,
4447
					boot_cpu_has(X86_FEATURE_GBPAGES),
4448
					false, true);
4449
	else
4450
		__reset_rsvds_bits_mask_ept(shadow_zero_check,
4451
					    reserved_hpa_bits(), false);
4452

4453 4454 4455 4456 4457 4458 4459
	if (!shadow_me_mask)
		return;

	for (i = context->shadow_root_level; --i >= 0;) {
		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
	}
4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470
}

/*
 * as the comments in reset_shadow_zero_bits_mask() except it
 * is the shadow page table for intel nested guest.
 */
static void
reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
				struct kvm_mmu *context, bool execonly)
{
	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
4471
				    reserved_hpa_bits(), execonly);
4472 4473
}

4474 4475 4476 4477 4478 4479 4480 4481 4482 4483
#define BYTE_MASK(access) \
	((1 & (access) ? 2 : 0) | \
	 (2 & (access) ? 4 : 0) | \
	 (3 & (access) ? 8 : 0) | \
	 (4 & (access) ? 16 : 0) | \
	 (5 & (access) ? 32 : 0) | \
	 (6 & (access) ? 64 : 0) | \
	 (7 & (access) ? 128 : 0))


4484
static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
4485
{
4486 4487 4488 4489 4490 4491
	unsigned byte;

	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
	const u8 u = BYTE_MASK(ACC_USER_MASK);

4492 4493 4494
	bool cr4_smep = is_cr4_smep(mmu);
	bool cr4_smap = is_cr4_smap(mmu);
	bool cr0_wp = is_cr0_wp(mmu);
4495
	bool efer_nx = is_efer_nx(mmu);
4496 4497

	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4498 4499
		unsigned pfec = byte << 1;

F
Feng Wu 已提交
4500
		/*
4501 4502
		 * Each "*f" variable has a 1 bit for each UWX value
		 * that causes a fault with the given PFEC.
F
Feng Wu 已提交
4503
		 */
4504

4505
		/* Faults from writes to non-writable pages */
4506
		u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4507
		/* Faults from user mode accesses to supervisor pages */
4508
		u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4509
		/* Faults from fetches of non-executable pages*/
4510
		u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4511 4512 4513 4514 4515 4516 4517 4518 4519 4520
		/* Faults from kernel mode fetches of user pages */
		u8 smepf = 0;
		/* Faults from kernel mode accesses of user pages */
		u8 smapf = 0;

		if (!ept) {
			/* Faults from kernel mode accesses to user pages */
			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;

			/* Not really needed: !nx will cause pte.nx to fault */
4521
			if (!efer_nx)
4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535
				ff = 0;

			/* Allow supervisor writes if !cr0.wp */
			if (!cr0_wp)
				wf = (pfec & PFERR_USER_MASK) ? wf : 0;

			/* Disallow supervisor fetches of user code if cr4.smep */
			if (cr4_smep)
				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;

			/*
			 * SMAP:kernel-mode data accesses from user-mode
			 * mappings should fault. A fault is considered
			 * as a SMAP violation if all of the following
P
Peng Hao 已提交
4536
			 * conditions are true:
4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549
			 *   - X86_CR4_SMAP is set in CR4
			 *   - A user page is accessed
			 *   - The access is not a fetch
			 *   - Page fault in kernel mode
			 *   - if CPL = 3 or X86_EFLAGS_AC is clear
			 *
			 * Here, we cover the first three conditions.
			 * The fourth is computed dynamically in permission_fault();
			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
			 * *not* subject to SMAP restrictions.
			 */
			if (cr4_smap)
				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4550
		}
4551 4552

		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4553 4554 4555
	}
}

4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579
/*
* PKU is an additional mechanism by which the paging controls access to
* user-mode addresses based on the value in the PKRU register.  Protection
* key violations are reported through a bit in the page fault error code.
* Unlike other bits of the error code, the PK bit is not known at the
* call site of e.g. gva_to_gpa; it must be computed directly in
* permission_fault based on two bits of PKRU, on some machine state (CR4,
* CR0, EFER, CPL), and on other bits of the error code and the page tables.
*
* In particular the following conditions come from the error code, the
* page tables and the machine state:
* - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
* - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
* - PK is always zero if U=0 in the page tables
* - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
*
* The PKRU bitmask caches the result of these four conditions.  The error
* code (minus the P bit) and the page table's U bit form an index into the
* PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
* with the two bits of the PKRU register corresponding to the protection key.
* For the first three conditions above the bits will be 00, thus masking
* away both AD and WD.  For all reads or if the last condition holds, WD
* only will be masked away.
*/
4580
static void update_pkru_bitmask(struct kvm_mmu *mmu)
4581 4582 4583 4584
{
	unsigned bit;
	bool wp;

4585
	if (!is_cr4_pke(mmu)) {
4586 4587 4588 4589
		mmu->pkru_mask = 0;
		return;
	}

4590
	wp = is_cr0_wp(mmu);
4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623

	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
		unsigned pfec, pkey_bits;
		bool check_pkey, check_write, ff, uf, wf, pte_user;

		pfec = bit << 1;
		ff = pfec & PFERR_FETCH_MASK;
		uf = pfec & PFERR_USER_MASK;
		wf = pfec & PFERR_WRITE_MASK;

		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
		pte_user = pfec & PFERR_RSVD_MASK;

		/*
		 * Only need to check the access which is not an
		 * instruction fetch and is to a user page.
		 */
		check_pkey = (!ff && pte_user);
		/*
		 * write access is controlled by PKRU if it is a
		 * user access or CR0.WP = 1.
		 */
		check_write = check_pkey && wf && (uf || wp);

		/* PKRU.AD stops both read and write access. */
		pkey_bits = !!check_pkey;
		/* PKRU.WD stops write access. */
		pkey_bits |= (!!check_write) << 1;

		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
	}
}

4624 4625
static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
					struct kvm_mmu *mmu)
A
Avi Kivity 已提交
4626
{
4627 4628
	if (!is_cr0_pg(mmu))
		return;
4629

4630 4631 4632
	reset_rsvds_bits_mask(vcpu, mmu);
	update_permission_bitmask(mmu, false);
	update_pkru_bitmask(mmu);
A
Avi Kivity 已提交
4633 4634
}

4635
static void paging64_init_context(struct kvm_mmu *context)
A
Avi Kivity 已提交
4636 4637 4638
{
	context->page_fault = paging64_page_fault;
	context->gva_to_gpa = paging64_gva_to_gpa;
4639
	context->sync_page = paging64_sync_page;
M
Marcelo Tosatti 已提交
4640
	context->invlpg = paging64_invlpg;
4641
	context->direct_map = false;
A
Avi Kivity 已提交
4642 4643
}

4644
static void paging32_init_context(struct kvm_mmu *context)
A
Avi Kivity 已提交
4645 4646 4647
{
	context->page_fault = paging32_page_fault;
	context->gva_to_gpa = paging32_gva_to_gpa;
4648
	context->sync_page = paging32_sync_page;
M
Marcelo Tosatti 已提交
4649
	context->invlpg = paging32_invlpg;
4650
	context->direct_map = false;
A
Avi Kivity 已提交
4651 4652
}

4653 4654
static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
							 struct kvm_mmu_role_regs *regs)
4655 4656 4657
{
	union kvm_mmu_extended_role ext = {0};

4658 4659 4660 4661 4662 4663
	if (____is_cr0_pg(regs)) {
		ext.cr0_pg = 1;
		ext.cr4_pae = ____is_cr4_pae(regs);
		ext.cr4_smep = ____is_cr4_smep(regs);
		ext.cr4_smap = ____is_cr4_smap(regs);
		ext.cr4_pse = ____is_cr4_pse(regs);
4664 4665 4666 4667

		/* PKEY and LA57 are active iff long mode is active. */
		ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
		ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
4668
	}
4669 4670 4671 4672 4673 4674

	ext.valid = 1;

	return ext;
}

4675
static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
4676
						   struct kvm_mmu_role_regs *regs,
4677 4678 4679 4680 4681
						   bool base_only)
{
	union kvm_mmu_role role = {0};

	role.base.access = ACC_ALL;
4682 4683 4684 4685
	if (____is_cr0_pg(regs)) {
		role.base.efer_nx = ____is_efer_nx(regs);
		role.base.cr0_wp = ____is_cr0_wp(regs);
	}
4686 4687 4688 4689 4690 4691
	role.base.smm = is_smm(vcpu);
	role.base.guest_mode = is_guest_mode(vcpu);

	if (base_only)
		return role;

4692
	role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
4693 4694 4695 4696

	return role;
}

4697 4698
static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
{
4699 4700 4701 4702
	/* tdp_root_level is architecture forced level, use it if nonzero */
	if (tdp_root_level)
		return tdp_root_level;

4703
	/* Use 5-level TDP if and only if it's useful/necessary. */
4704
	if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
4705 4706
		return 4;

4707
	return max_tdp_level;
4708 4709
}

4710
static union kvm_mmu_role
4711 4712
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
				struct kvm_mmu_role_regs *regs, bool base_only)
4713
{
4714
	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4715

4716
	role.base.ad_disabled = (shadow_accessed_mask == 0);
4717
	role.base.level = kvm_mmu_get_tdp_level(vcpu);
4718
	role.base.direct = true;
4719
	role.base.gpte_is_8_bytes = true;
4720 4721 4722 4723

	return role;
}

4724
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4725
{
4726
	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4727
	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4728
	union kvm_mmu_role new_role =
4729
		kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, false);
4730

4731 4732 4733 4734
	if (new_role.as_u64 == context->mmu_role.as_u64)
		return;

	context->mmu_role.as_u64 = new_role.as_u64;
4735
	context->page_fault = kvm_tdp_page_fault;
4736
	context->sync_page = nonpaging_sync_page;
4737
	context->invlpg = NULL;
4738
	context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
4739
	context->direct_map = true;
4740
	context->get_guest_pgd = get_cr3;
4741
	context->get_pdptr = kvm_pdptr_read;
4742
	context->inject_page_fault = kvm_inject_page_fault;
4743
	context->root_level = role_regs_to_root_level(&regs);
4744

4745
	if (!is_cr0_pg(context))
4746
		context->gva_to_gpa = nonpaging_gva_to_gpa;
4747
	else if (is_cr4_pae(context))
4748
		context->gva_to_gpa = paging64_gva_to_gpa;
4749
	else
4750
		context->gva_to_gpa = paging32_gva_to_gpa;
4751

4752
	reset_guest_paging_metadata(vcpu, context);
4753
	reset_tdp_shadow_zero_bits_mask(vcpu, context);
4754 4755
}

4756
static union kvm_mmu_role
4757 4758
kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
				      struct kvm_mmu_role_regs *regs, bool base_only)
4759
{
4760
	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4761

4762 4763
	role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
	role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
4764
	role.base.gpte_is_8_bytes = ____is_cr0_pg(regs) && ____is_cr4_pae(regs);
4765

4766 4767 4768 4769
	return role;
}

static union kvm_mmu_role
4770 4771
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
				   struct kvm_mmu_role_regs *regs, bool base_only)
4772 4773
{
	union kvm_mmu_role role =
4774
		kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
4775

4776
	role.base.direct = !____is_cr0_pg(regs);
4777

4778
	if (!____is_efer_lma(regs))
4779
		role.base.level = PT32E_ROOT_LEVEL;
4780
	else if (____is_cr4_la57(regs))
4781
		role.base.level = PT64_ROOT_5LEVEL;
4782
	else
4783
		role.base.level = PT64_ROOT_4LEVEL;
4784 4785 4786 4787

	return role;
}

4788
static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
4789
				    struct kvm_mmu_role_regs *regs,
4790
				    union kvm_mmu_role new_role)
4791
{
4792 4793
	if (new_role.as_u64 == context->mmu_role.as_u64)
		return;
4794

4795
	context->mmu_role.as_u64 = new_role.as_u64;
4796

4797
	if (!is_cr0_pg(context))
4798
		nonpaging_init_context(context);
4799
	else if (is_cr4_pae(context))
4800
		paging64_init_context(context);
A
Avi Kivity 已提交
4801
	else
4802
		paging32_init_context(context);
4803
	context->root_level = role_regs_to_root_level(regs);
4804

4805
	reset_guest_paging_metadata(vcpu, context);
4806 4807
	context->shadow_root_level = new_role.base.level;

4808
	reset_shadow_zero_bits_mask(vcpu, context);
4809
}
4810

4811 4812
static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
				struct kvm_mmu_role_regs *regs)
4813
{
4814
	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4815
	union kvm_mmu_role new_role =
4816
		kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
4817

4818
	shadow_mmu_init_context(vcpu, context, regs, new_role);
4819 4820
}

4821
static union kvm_mmu_role
4822 4823
kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
				   struct kvm_mmu_role_regs *regs)
4824 4825
{
	union kvm_mmu_role role =
4826
		kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
4827 4828

	role.base.direct = false;
4829
	role.base.level = kvm_mmu_get_tdp_level(vcpu);
4830 4831 4832 4833

	return role;
}

4834 4835
void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
			     unsigned long cr4, u64 efer, gpa_t nested_cr3)
4836
{
4837
	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4838 4839 4840 4841 4842
	struct kvm_mmu_role_regs regs = {
		.cr0 = cr0,
		.cr4 = cr4,
		.efer = efer,
	};
4843
	union kvm_mmu_role new_role;
4844

4845
	new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
4846

4847
	__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
4848

4849
	shadow_mmu_init_context(vcpu, context, &regs, new_role);
4850 4851
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
4852

4853 4854
static union kvm_mmu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
4855
				   bool execonly, u8 level)
4856
{
4857
	union kvm_mmu_role role = {0};
4858

4859 4860
	/* SMM flag is inherited from root_mmu */
	role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
4861

4862
	role.base.level = level;
4863
	role.base.gpte_is_8_bytes = true;
4864 4865 4866 4867
	role.base.direct = false;
	role.base.ad_disabled = !accessed_dirty;
	role.base.guest_mode = true;
	role.base.access = ACC_ALL;
4868

4869 4870
	/* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
	role.ext.word = 0;
4871
	role.ext.execonly = execonly;
4872
	role.ext.valid = 1;
4873 4874 4875 4876

	return role;
}

4877
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4878
			     bool accessed_dirty, gpa_t new_eptp)
N
Nadav Har'El 已提交
4879
{
4880
	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4881
	u8 level = vmx_eptp_page_walk_level(new_eptp);
4882 4883
	union kvm_mmu_role new_role =
		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
4884
						   execonly, level);
4885

4886
	__kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base);
4887 4888 4889

	if (new_role.as_u64 == context->mmu_role.as_u64)
		return;
4890

4891 4892
	context->mmu_role.as_u64 = new_role.as_u64;

4893
	context->shadow_root_level = level;
N
Nadav Har'El 已提交
4894

4895
	context->ept_ad = accessed_dirty;
N
Nadav Har'El 已提交
4896 4897 4898 4899
	context->page_fault = ept_page_fault;
	context->gva_to_gpa = ept_gva_to_gpa;
	context->sync_page = ept_sync_page;
	context->invlpg = ept_invlpg;
4900
	context->root_level = level;
N
Nadav Har'El 已提交
4901
	context->direct_map = false;
4902

4903
	update_permission_bitmask(context, true);
4904
	update_pkru_bitmask(context);
N
Nadav Har'El 已提交
4905
	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4906
	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
N
Nadav Har'El 已提交
4907 4908 4909
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);

4910
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
4911
{
4912
	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4913
	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4914

4915
	kvm_init_shadow_mmu(vcpu, &regs);
4916

4917
	context->get_guest_pgd     = get_cr3;
4918 4919
	context->get_pdptr         = kvm_pdptr_read;
	context->inject_page_fault = kvm_inject_page_fault;
A
Avi Kivity 已提交
4920 4921
}

4922 4923
static union kvm_mmu_role
kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs)
4924
{
4925 4926 4927
	union kvm_mmu_role role;

	role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
4928 4929 4930 4931 4932 4933 4934

	/*
	 * Nested MMUs are used only for walking L2's gva->gpa, they never have
	 * shadow pages of their own and so "direct" has no meaning.   Set it
	 * to "true" to try to detect bogus usage of the nested MMU.
	 */
	role.base.direct = true;
4935
	role.base.level = role_regs_to_root_level(regs);
4936 4937 4938
	return role;
}

4939
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
4940
{
4941 4942
	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
	union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, &regs);
4943 4944
	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;

4945 4946 4947 4948
	if (new_role.as_u64 == g_context->mmu_role.as_u64)
		return;

	g_context->mmu_role.as_u64 = new_role.as_u64;
4949
	g_context->get_guest_pgd     = get_cr3;
4950
	g_context->get_pdptr         = kvm_pdptr_read;
4951
	g_context->inject_page_fault = kvm_inject_page_fault;
4952
	g_context->root_level        = new_role.base.level;
4953

4954 4955 4956 4957 4958 4959
	/*
	 * L2 page tables are never shadowed, so there is no need to sync
	 * SPTEs.
	 */
	g_context->invlpg            = NULL;

4960
	/*
4961
	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
4962 4963 4964 4965 4966
	 * L1's nested page tables (e.g. EPT12). The nested translation
	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
	 * L2's page tables as the first level of translation and L1's
	 * nested page tables as the second level of translation. Basically
	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
4967
	 */
4968
	if (!is_paging(vcpu))
4969
		g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
4970
	else if (is_long_mode(vcpu))
4971
		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4972
	else if (is_pae(vcpu))
4973
		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4974
	else
4975 4976
		g_context->gva_to_gpa = paging32_gva_to_gpa_nested;

4977
	reset_guest_paging_metadata(vcpu, g_context);
4978 4979
}

4980
void kvm_init_mmu(struct kvm_vcpu *vcpu)
4981
{
4982
	if (mmu_is_nested(vcpu))
4983
		init_kvm_nested_mmu(vcpu);
4984
	else if (tdp_enabled)
4985
		init_kvm_tdp_mmu(vcpu);
4986
	else
4987
		init_kvm_softmmu(vcpu);
4988
}
4989
EXPORT_SYMBOL_GPL(kvm_init_mmu);
4990

4991 4992 4993
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
{
4994
	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4995 4996
	union kvm_mmu_role role;

4997
	if (tdp_enabled)
4998
		role = kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, true);
4999
	else
5000
		role = kvm_calc_shadow_mmu_root_page_role(vcpu, &regs, true);
5001 5002

	return role.base;
5003
}
5004

5005 5006 5007 5008 5009 5010 5011 5012 5013 5014
void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
	/*
	 * Invalidate all MMU roles to force them to reinitialize as CPUID
	 * information is factored into reserved bit calculations.
	 */
	vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
	vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
	vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
	kvm_mmu_reset_context(vcpu);
5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034

	/*
	 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
	 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
	 * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
	 * faults due to reusing SPs/SPTEs.  Alert userspace, but otherwise
	 * sweep the problem under the rug.
	 *
	 * KVM's horrific CPUID ABI makes the problem all but impossible to
	 * solve, as correctly handling multiple vCPU models (with respect to
	 * paging and physical address properties) in a single VM would require
	 * tracking all relevant CPUID information in kvm_mmu_page_role.  That
	 * is very undesirable as it would double the memory requirements for
	 * gfn_track (see struct kvm_mmu_page_role comments), and in practice
	 * no sane VMM mucks with the core vCPU model on the fly.
	 */
	if (vcpu->arch.last_vmentry_cpu != -1) {
		pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
		pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
	}
5035 5036
}

5037
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
5038
{
5039
	kvm_mmu_unload(vcpu);
5040
	kvm_init_mmu(vcpu);
A
Avi Kivity 已提交
5041
}
5042
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
A
Avi Kivity 已提交
5043 5044

int kvm_mmu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
5045
{
5046 5047
	int r;

5048
	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
A
Avi Kivity 已提交
5049 5050
	if (r)
		goto out;
5051
	r = mmu_alloc_special_roots(vcpu);
A
Avi Kivity 已提交
5052 5053
	if (r)
		goto out;
5054
	if (vcpu->arch.mmu->direct_map)
5055 5056 5057
		r = mmu_alloc_direct_roots(vcpu);
	else
		r = mmu_alloc_shadow_roots(vcpu);
5058 5059
	if (r)
		goto out;
5060 5061 5062

	kvm_mmu_sync_roots(vcpu);

5063
	kvm_mmu_load_pgd(vcpu);
5064
	static_call(kvm_x86_tlb_flush_current)(vcpu);
5065 5066
out:
	return r;
A
Avi Kivity 已提交
5067
}
A
Avi Kivity 已提交
5068 5069 5070

void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
5071 5072 5073 5074
	kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
	WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
	WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
A
Avi Kivity 已提交
5075
}
A
Avi Kivity 已提交
5076

5077 5078 5079 5080 5081 5082 5083 5084
static bool need_remote_flush(u64 old, u64 new)
{
	if (!is_shadow_present_pte(old))
		return false;
	if (!is_shadow_present_pte(new))
		return true;
	if ((old ^ new) & PT64_BASE_ADDR_MASK)
		return true;
5085 5086
	old ^= shadow_nx_mask;
	new ^= shadow_nx_mask;
5087 5088 5089
	return (old & ~new & PT64_PERM_MASK) != 0;
}

5090
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5091
				    int *bytes)
5092
{
5093
	u64 gentry = 0;
5094
	int r;
5095 5096 5097

	/*
	 * Assume that the pte write on a page table of the same type
5098 5099
	 * as the current vcpu paging mode since we update the sptes only
	 * when they have the same mode.
5100
	 */
5101
	if (is_pae(vcpu) && *bytes == 4) {
5102
		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5103 5104
		*gpa &= ~(gpa_t)7;
		*bytes = 8;
5105 5106
	}

5107 5108 5109 5110
	if (*bytes == 4 || *bytes == 8) {
		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
		if (r)
			gentry = 0;
5111 5112
	}

5113 5114 5115 5116 5117 5118 5119
	return gentry;
}

/*
 * If we're seeing too many writes to a page, it may no longer be a page table,
 * or we may be forking, in which case it is better to unmap the page.
 */
5120
static bool detect_write_flooding(struct kvm_mmu_page *sp)
5121
{
5122 5123 5124 5125
	/*
	 * Skip write-flooding detected for the sp whose level is 1, because
	 * it can become unsync, then the guest page is not write-protected.
	 */
5126
	if (sp->role.level == PG_LEVEL_4K)
5127
		return false;
5128

5129 5130
	atomic_inc(&sp->write_flooding_count);
	return atomic_read(&sp->write_flooding_count) >= 3;
5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145
}

/*
 * Misaligned accesses are too much trouble to fix up; also, they usually
 * indicate a page is not used as a page table.
 */
static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
				    int bytes)
{
	unsigned offset, pte_size, misaligned;

	pgprintk("misaligned: gpa %llx bytes %d role %x\n",
		 gpa, bytes, sp->role.word);

	offset = offset_in_page(gpa);
5146
	pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
5147 5148 5149 5150 5151 5152 5153 5154

	/*
	 * Sometimes, the OS only writes the last one bytes to update status
	 * bits, for example, in linux, andb instruction is used in clear_bit().
	 */
	if (!(offset & (pte_size - 1)) && bytes == 1)
		return false;

5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169
	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
	misaligned |= bytes < 4;

	return misaligned;
}

static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
{
	unsigned page_offset, quadrant;
	u64 *spte;
	int level;

	page_offset = offset_in_page(gpa);
	level = sp->role.level;
	*nspte = 1;
5170
	if (!sp->role.gpte_is_8_bytes) {
5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191
		page_offset <<= 1;	/* 32->64 */
		/*
		 * A 32-bit pde maps 4MB while the shadow pdes map
		 * only 2MB.  So we need to double the offset again
		 * and zap two pdes instead of one.
		 */
		if (level == PT32_ROOT_LEVEL) {
			page_offset &= ~7; /* kill rounding error */
			page_offset <<= 1;
			*nspte = 2;
		}
		quadrant = page_offset >> PAGE_SHIFT;
		page_offset &= ~PAGE_MASK;
		if (quadrant != sp->role.quadrant)
			return NULL;
	}

	spte = &sp->spt[page_offset / sizeof(*spte)];
	return spte;
}

5192
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5193 5194
			      const u8 *new, int bytes,
			      struct kvm_page_track_notifier_node *node)
5195 5196 5197 5198 5199 5200
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	struct kvm_mmu_page *sp;
	LIST_HEAD(invalid_list);
	u64 entry, gentry, *spte;
	int npte;
5201
	bool flush = false;
5202 5203 5204 5205 5206

	/*
	 * If we don't have indirect shadow pages, it means no page is
	 * write-protected, so we can exit simply.
	 */
5207
	if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5208 5209 5210 5211 5212 5213
		return;

	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);

	/*
	 * No need to care whether allocation memory is successful
I
Ingo Molnar 已提交
5214
	 * or not since pte prefetch is skipped if it does not have
5215 5216
	 * enough objects in the cache.
	 */
5217
	mmu_topup_memory_caches(vcpu, true);
5218

5219
	write_lock(&vcpu->kvm->mmu_lock);
5220 5221 5222

	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);

5223
	++vcpu->kvm->stat.mmu_pte_write;
5224
	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5225

5226
	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5227
		if (detect_write_misaligned(sp, gpa, bytes) ||
5228
		      detect_write_flooding(sp)) {
5229
			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
A
Avi Kivity 已提交
5230
			++vcpu->kvm->stat.mmu_flooded;
5231 5232
			continue;
		}
5233 5234 5235 5236 5237

		spte = get_written_sptes(sp, gpa, &npte);
		if (!spte)
			continue;

5238
		while (npte--) {
5239
			entry = *spte;
5240
			mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5241 5242
			if (gentry && sp->role.level != PG_LEVEL_4K)
				++vcpu->kvm->stat.mmu_pde_zapped;
G
Gleb Natapov 已提交
5243
			if (need_remote_flush(entry, *spte))
5244
				flush = true;
5245
			++spte;
5246 5247
		}
	}
5248
	kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
5249
	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
5250
	write_unlock(&vcpu->kvm->mmu_lock);
5251 5252
}

5253
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5254
		       void *insn, int insn_len)
5255
{
5256
	int r, emulation_type = EMULTYPE_PF;
5257
	bool direct = vcpu->arch.mmu->direct_map;
5258

5259
	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
5260 5261
		return RET_PF_RETRY;

5262
	r = RET_PF_INVALID;
5263
	if (unlikely(error_code & PFERR_RSVD_MASK)) {
5264
		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5265
		if (r == RET_PF_EMULATE)
5266 5267
			goto emulate;
	}
5268

5269
	if (r == RET_PF_INVALID) {
5270 5271
		r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
					  lower_32_bits(error_code), false);
5272
		if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
5273
			return -EIO;
5274 5275
	}

5276
	if (r < 0)
5277
		return r;
5278 5279
	if (r != RET_PF_EMULATE)
		return 1;
5280

5281 5282 5283 5284 5285 5286 5287
	/*
	 * Before emulating the instruction, check if the error code
	 * was due to a RO violation while translating the guest page.
	 * This can occur when using nested virtualization with nested
	 * paging in both guests. If true, we simply unprotect the page
	 * and resume the guest.
	 */
5288
	if (vcpu->arch.mmu->direct_map &&
5289
	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5290
		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5291 5292 5293
		return 1;
	}

5294 5295 5296 5297 5298 5299
	/*
	 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
	 * optimistically try to just unprotect the page and let the processor
	 * re-execute the instruction that caused the page fault.  Do not allow
	 * retrying MMIO emulation, as it's not only pointless but could also
	 * cause us to enter an infinite loop because the processor will keep
5300 5301 5302 5303
	 * faulting on the non-existent MMIO address.  Retrying an instruction
	 * from a nested guest is also pointless and dangerous as we are only
	 * explicitly shadowing L1's page tables, i.e. unprotecting something
	 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5304
	 */
5305
	if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
5306
		emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5307
emulate:
5308
	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5309
				       insn_len);
5310 5311 5312
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);

5313 5314
void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			    gva_t gva, hpa_t root_hpa)
M
Marcelo Tosatti 已提交
5315
{
5316
	int i;
5317

5318 5319 5320 5321 5322 5323
	/* It's actually a GPA for vcpu->arch.guest_mmu.  */
	if (mmu != &vcpu->arch.guest_mmu) {
		/* INVLPG on a non-canonical address is a NOP according to the SDM.  */
		if (is_noncanonical_address(gva, vcpu))
			return;

5324
		static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5325 5326 5327
	}

	if (!mmu->invlpg)
5328 5329
		return;

5330 5331
	if (root_hpa == INVALID_PAGE) {
		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5332

5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350
		/*
		 * INVLPG is required to invalidate any global mappings for the VA,
		 * irrespective of PCID. Since it would take us roughly similar amount
		 * of work to determine whether any of the prev_root mappings of the VA
		 * is marked global, or to just sync it blindly, so we might as well
		 * just always sync it.
		 *
		 * Mappings not reachable via the current cr3 or the prev_roots will be
		 * synced when switching to that cr3, so nothing needs to be done here
		 * for them.
		 */
		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
			if (VALID_PAGE(mmu->prev_roots[i].hpa))
				mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
	} else {
		mmu->invlpg(vcpu, gva, root_hpa);
	}
}
5351

5352 5353 5354
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
	kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
M
Marcelo Tosatti 已提交
5355 5356 5357 5358
	++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);

5359

5360 5361
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
{
5362
	struct kvm_mmu *mmu = vcpu->arch.mmu;
5363
	bool tlb_flush = false;
5364
	uint i;
5365 5366

	if (pcid == kvm_get_active_pcid(vcpu)) {
5367
		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5368
		tlb_flush = true;
5369 5370
	}

5371 5372
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
5373
		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
5374 5375 5376
			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
			tlb_flush = true;
		}
5377
	}
5378

5379
	if (tlb_flush)
5380
		static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5381

5382 5383 5384
	++vcpu->stat.invlpg;

	/*
5385 5386 5387
	 * Mappings not reachable via the current cr3 or the prev_roots will be
	 * synced when switching to that cr3, so nothing needs to be done here
	 * for them.
5388 5389 5390
	 */
}

5391 5392
void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
		       int tdp_max_root_level, int tdp_huge_page_level)
5393
{
5394
	tdp_enabled = enable_tdp;
5395
	tdp_root_level = tdp_forced_root_level;
5396
	max_tdp_level = tdp_max_root_level;
5397 5398

	/*
5399
	 * max_huge_page_level reflects KVM's MMU capabilities irrespective
5400 5401 5402 5403 5404 5405
	 * of kernel support, e.g. KVM may be capable of using 1GB pages when
	 * the kernel is not.  But, KVM never creates a page size greater than
	 * what is used by the kernel for any given HVA, i.e. the kernel's
	 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
	 */
	if (tdp_enabled)
5406
		max_huge_page_level = tdp_huge_page_level;
5407
	else if (boot_cpu_has(X86_FEATURE_GBPAGES))
5408
		max_huge_page_level = PG_LEVEL_1G;
5409
	else
5410
		max_huge_page_level = PG_LEVEL_2M;
5411
}
5412
EXPORT_SYMBOL_GPL(kvm_configure_mmu);
5413 5414

/* The return value indicates if tlb flush on all vcpus is needed. */
5415 5416 5417
typedef bool (*slot_level_handler) (struct kvm *kvm,
				    struct kvm_rmap_head *rmap_head,
				    const struct kvm_memory_slot *slot);
5418 5419 5420

/* The caller should hold mmu-lock before calling this function. */
static __always_inline bool
5421
slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5422
			slot_level_handler fn, int start_level, int end_level,
5423 5424
			gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield,
			bool flush)
5425 5426 5427 5428 5429 5430
{
	struct slot_rmap_walk_iterator iterator;

	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
			end_gfn, &iterator) {
		if (iterator.rmap)
5431
			flush |= fn(kvm, iterator.rmap, memslot);
5432

5433
		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
5434
			if (flush && flush_on_yield) {
5435 5436 5437
				kvm_flush_remote_tlbs_with_address(kvm,
						start_gfn,
						iterator.gfn - start_gfn + 1);
5438 5439
				flush = false;
			}
5440
			cond_resched_rwlock_write(&kvm->mmu_lock);
5441 5442 5443 5444 5445 5446 5447
		}
	}

	return flush;
}

static __always_inline bool
5448
slot_handle_level(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5449
		  slot_level_handler fn, int start_level, int end_level,
5450
		  bool flush_on_yield)
5451 5452 5453 5454
{
	return slot_handle_level_range(kvm, memslot, fn, start_level,
			end_level, memslot->base_gfn,
			memslot->base_gfn + memslot->npages - 1,
5455
			flush_on_yield, false);
5456 5457 5458
}

static __always_inline bool
5459
slot_handle_leaf(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5460
		 slot_level_handler fn, bool flush_on_yield)
5461
{
5462
	return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5463
				 PG_LEVEL_4K, flush_on_yield);
5464 5465
}

5466
static void free_mmu_pages(struct kvm_mmu *mmu)
A
Avi Kivity 已提交
5467
{
5468 5469
	if (!tdp_enabled && mmu->pae_root)
		set_memory_encrypted((unsigned long)mmu->pae_root, 1);
5470
	free_page((unsigned long)mmu->pae_root);
5471
	free_page((unsigned long)mmu->pml4_root);
5472
	free_page((unsigned long)mmu->pml5_root);
A
Avi Kivity 已提交
5473 5474
}

5475
static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
A
Avi Kivity 已提交
5476
{
5477
	struct page *page;
A
Avi Kivity 已提交
5478 5479
	int i;

5480 5481 5482 5483 5484 5485
	mmu->root_hpa = INVALID_PAGE;
	mmu->root_pgd = 0;
	mmu->translate_gpa = translate_gpa;
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;

5486
	/*
5487 5488 5489 5490
	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
	 * while the PDP table is a per-vCPU construct that's allocated at MMU
	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
	 * x86_64.  Therefore we need to allocate the PDP table in the first
5491 5492 5493 5494 5495
	 * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
	 * generally doesn't use PAE paging and can skip allocating the PDP
	 * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
	 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
	 * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
5496
	 */
5497
	if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
5498 5499
		return 0;

5500
	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5501
	if (!page)
5502 5503
		return -ENOMEM;

5504
	mmu->pae_root = page_address(page);
5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518

	/*
	 * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
	 * get the CPU to treat the PDPTEs as encrypted.  Decrypt the page so
	 * that KVM's writes and the CPU's reads get along.  Note, this is
	 * only necessary when using shadow paging, as 64-bit NPT can get at
	 * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
	 * by 32-bit kernels (when KVM itself uses 32-bit NPT).
	 */
	if (!tdp_enabled)
		set_memory_decrypted((unsigned long)mmu->pae_root, 1);
	else
		WARN_ON_ONCE(shadow_me_mask);

5519
	for (i = 0; i < 4; ++i)
5520
		mmu->pae_root[i] = INVALID_PAE_ROOT;
5521

A
Avi Kivity 已提交
5522 5523 5524
	return 0;
}

5525
int kvm_mmu_create(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
5526
{
5527
	int ret;
5528

5529
	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
5530 5531
	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;

5532
	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
5533
	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
5534

5535 5536
	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;

5537 5538
	vcpu->arch.mmu = &vcpu->arch.root_mmu;
	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
A
Avi Kivity 已提交
5539

5540
	vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5541

5542
	ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
5543 5544 5545
	if (ret)
		return ret;

5546
	ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
5547 5548 5549 5550 5551 5552 5553
	if (ret)
		goto fail_allocate_root;

	return ret;
 fail_allocate_root:
	free_mmu_pages(&vcpu->arch.guest_mmu);
	return ret;
A
Avi Kivity 已提交
5554 5555
}

5556
#define BATCH_ZAP_PAGES	10
5557 5558 5559
static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
	struct kvm_mmu_page *sp, *node;
5560
	int nr_zapped, batch = 0;
5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572

restart:
	list_for_each_entry_safe_reverse(sp, node,
	      &kvm->arch.active_mmu_pages, link) {
		/*
		 * No obsolete valid page exists before a newly created page
		 * since active_mmu_pages is a FIFO list.
		 */
		if (!is_obsolete_sp(kvm, sp))
			break;

		/*
5573 5574 5575
		 * Invalid pages should never land back on the list of active
		 * pages.  Skip the bogus page, otherwise we'll get stuck in an
		 * infinite loop if the page gets put back on the list (again).
5576
		 */
5577
		if (WARN_ON(sp->role.invalid))
5578 5579
			continue;

5580 5581 5582 5583 5584 5585
		/*
		 * No need to flush the TLB since we're only zapping shadow
		 * pages with an obsolete generation number and all vCPUS have
		 * loaded a new root, i.e. the shadow pages being zapped cannot
		 * be in active use by the guest.
		 */
5586
		if (batch >= BATCH_ZAP_PAGES &&
5587
		    cond_resched_rwlock_write(&kvm->mmu_lock)) {
5588
			batch = 0;
5589 5590 5591
			goto restart;
		}

5592 5593
		if (__kvm_mmu_prepare_zap_page(kvm, sp,
				&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5594
			batch += nr_zapped;
5595
			goto restart;
5596
		}
5597 5598
	}

5599 5600 5601 5602 5603
	/*
	 * Trigger a remote TLB flush before freeing the page tables to ensure
	 * KVM is not in the middle of a lockless shadow page table walk, which
	 * may reference the pages.
	 */
5604
	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617
}

/*
 * Fast invalidate all shadow pages and use lock-break technique
 * to zap obsolete pages.
 *
 * It's required when memslot is being deleted or VM is being
 * destroyed, in these cases, we should ensure that KVM MMU does
 * not use any resource of the being-deleted slot or all slots
 * after calling the function.
 */
static void kvm_mmu_zap_all_fast(struct kvm *kvm)
{
5618 5619
	lockdep_assert_held(&kvm->slots_lock);

5620
	write_lock(&kvm->mmu_lock);
5621
	trace_kvm_mmu_zap_all_fast(kvm);
5622 5623 5624 5625 5626 5627 5628 5629 5630

	/*
	 * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
	 * held for the entire duration of zapping obsolete pages, it's
	 * impossible for there to be multiple invalid generations associated
	 * with *valid* shadow pages at any given time, i.e. there is exactly
	 * one valid generation and (at most) one invalid generation.
	 */
	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5631

5632 5633 5634 5635 5636 5637 5638 5639 5640
	/* In order to ensure all threads see this change when
	 * handling the MMU reload signal, this must happen in the
	 * same critical section as kvm_reload_remote_mmus, and
	 * before kvm_zap_obsolete_pages as kvm_zap_obsolete_pages
	 * could drop the MMU lock and yield.
	 */
	if (is_tdp_mmu_enabled(kvm))
		kvm_tdp_mmu_invalidate_all_roots(kvm);

5641 5642 5643 5644 5645 5646 5647 5648 5649 5650
	/*
	 * Notify all vcpus to reload its shadow page table and flush TLB.
	 * Then all vcpus will switch to new shadow page table with the new
	 * mmu_valid_gen.
	 *
	 * Note: we need to do this under the protection of mmu_lock,
	 * otherwise, vcpu would purge shadow page but miss tlb flush.
	 */
	kvm_reload_remote_mmus(kvm);

5651
	kvm_zap_obsolete_pages(kvm);
5652

5653
	write_unlock(&kvm->mmu_lock);
5654 5655 5656 5657 5658 5659

	if (is_tdp_mmu_enabled(kvm)) {
		read_lock(&kvm->mmu_lock);
		kvm_tdp_mmu_zap_invalidated_roots(kvm);
		read_unlock(&kvm->mmu_lock);
	}
5660 5661
}

5662 5663 5664 5665 5666
static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
{
	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
}

5667
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5668 5669
			struct kvm_memory_slot *slot,
			struct kvm_page_track_notifier_node *node)
5670
{
5671
	kvm_mmu_zap_all_fast(kvm);
5672 5673
}

5674
void kvm_mmu_init_vm(struct kvm *kvm)
5675
{
5676
	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5677

5678 5679
	spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);

5680 5681 5682 5683 5684 5685 5686
	if (!kvm_mmu_init_tdp_mmu(kvm))
		/*
		 * No smp_load/store wrappers needed here as we are in
		 * VM init and there cannot be any memslots / other threads
		 * accessing this struct kvm yet.
		 */
		kvm->arch.memslots_have_rmaps = true;
5687

5688
	node->track_write = kvm_mmu_pte_write;
5689
	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5690
	kvm_page_track_register_notifier(kvm, node);
5691 5692
}

5693
void kvm_mmu_uninit_vm(struct kvm *kvm)
5694
{
5695
	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5696

5697
	kvm_page_track_unregister_notifier(kvm, node);
5698 5699

	kvm_mmu_uninit_tdp_mmu(kvm);
5700 5701
}

5702 5703 5704 5705
/*
 * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
 * (not including it)
 */
X
Xiao Guangrong 已提交
5706 5707 5708 5709
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
5710
	int i;
5711
	bool flush = false;
X
Xiao Guangrong 已提交
5712

5713 5714
	write_lock(&kvm->mmu_lock);

5715 5716
	kvm_inc_notifier_count(kvm, gfn_start, gfn_end);

5717 5718 5719 5720 5721 5722 5723 5724 5725 5726
	if (kvm_memslots_have_rmaps(kvm)) {
		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
			slots = __kvm_memslots(kvm, i);
			kvm_for_each_memslot(memslot, slots) {
				gfn_t start, end;

				start = max(gfn_start, memslot->base_gfn);
				end = min(gfn_end, memslot->base_gfn + memslot->npages);
				if (start >= end)
					continue;
X
Xiao Guangrong 已提交
5727

5728 5729
				flush = slot_handle_level_range(kvm,
						(const struct kvm_memory_slot *) memslot,
5730 5731 5732 5733
						kvm_zap_rmapp, PG_LEVEL_4K,
						KVM_MAX_HUGEPAGE_LEVEL, start,
						end - 1, true, flush);
			}
5734
		}
5735
		if (flush)
5736 5737
			kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
							   gfn_end - gfn_start);
X
Xiao Guangrong 已提交
5738 5739
	}

5740
	if (is_tdp_mmu_enabled(kvm)) {
5741 5742
		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
			flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
5743
							  gfn_end, flush);
5744 5745 5746
		if (flush)
			kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
							   gfn_end - gfn_start);
5747
	}
5748 5749 5750 5751

	if (flush)
		kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);

5752 5753
	kvm_dec_notifier_count(kvm, gfn_start, gfn_end);

5754
	write_unlock(&kvm->mmu_lock);
X
Xiao Guangrong 已提交
5755 5756
}

5757
static bool slot_rmap_write_protect(struct kvm *kvm,
5758
				    struct kvm_rmap_head *rmap_head,
5759
				    const struct kvm_memory_slot *slot)
5760
{
5761
	return __rmap_write_protect(kvm, rmap_head, false);
5762 5763
}

5764
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
5765
				      const struct kvm_memory_slot *memslot,
5766
				      int start_level)
A
Avi Kivity 已提交
5767
{
5768
	bool flush = false;
A
Avi Kivity 已提交
5769

5770 5771 5772 5773 5774 5775 5776
	if (kvm_memslots_have_rmaps(kvm)) {
		write_lock(&kvm->mmu_lock);
		flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
					  start_level, KVM_MAX_HUGEPAGE_LEVEL,
					  false);
		write_unlock(&kvm->mmu_lock);
	}
5777

5778 5779 5780 5781 5782 5783
	if (is_tdp_mmu_enabled(kvm)) {
		read_lock(&kvm->mmu_lock);
		flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
		read_unlock(&kvm->mmu_lock);
	}

5784 5785 5786 5787 5788 5789 5790
	/*
	 * We can flush all the TLBs out of the mmu lock without TLB
	 * corruption since we just change the spte from writable to
	 * readonly so that we only need to care the case of changing
	 * spte from present to present (changing the spte from present
	 * to nonpresent will flush all the TLBs immediately), in other
	 * words, the only case we care is mmu_spte_update() where we
5791 5792 5793
	 * have checked Host-writable | MMU-writable instead of
	 * PT_WRITABLE_MASK, that means it does not depend on PT_WRITABLE_MASK
	 * anymore.
5794
	 */
5795
	if (flush)
5796
		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
A
Avi Kivity 已提交
5797
}
5798

5799
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
5800
					 struct kvm_rmap_head *rmap_head,
5801
					 const struct kvm_memory_slot *slot)
5802 5803 5804 5805
{
	u64 *sptep;
	struct rmap_iterator iter;
	int need_tlb_flush = 0;
D
Dan Williams 已提交
5806
	kvm_pfn_t pfn;
5807 5808
	struct kvm_mmu_page *sp;

5809
restart:
5810
	for_each_rmap_spte(rmap_head, &iter, sptep) {
5811
		sp = sptep_to_sp(sptep);
5812 5813 5814
		pfn = spte_to_pfn(*sptep);

		/*
5815 5816 5817 5818 5819
		 * We cannot do huge page mapping for indirect shadow pages,
		 * which are found on the last rmap (level = 1) when not using
		 * tdp; such shadow pages are synced with the page table in
		 * the guest, and the guest page table is using 4K page size
		 * mapping if the indirect sp has level = 1.
5820
		 */
5821
		if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
5822 5823
		    sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
							       pfn, PG_LEVEL_NUM)) {
5824
			pte_list_remove(kvm, rmap_head, sptep);
5825 5826 5827 5828 5829 5830 5831

			if (kvm_available_flush_tlb_with_range())
				kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
					KVM_PAGES_PER_HPAGE(sp->role.level));
			else
				need_tlb_flush = 1;

5832 5833
			goto restart;
		}
5834 5835 5836 5837 5838 5839
	}

	return need_tlb_flush;
}

void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5840
				   const struct kvm_memory_slot *slot)
5841
{
5842
	bool flush = false;
5843

5844 5845 5846 5847 5848 5849 5850
	if (kvm_memslots_have_rmaps(kvm)) {
		write_lock(&kvm->mmu_lock);
		flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
		if (flush)
			kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
		write_unlock(&kvm->mmu_lock);
	}
5851 5852 5853 5854 5855 5856 5857 5858

	if (is_tdp_mmu_enabled(kvm)) {
		read_lock(&kvm->mmu_lock);
		flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
		if (flush)
			kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
		read_unlock(&kvm->mmu_lock);
	}
5859 5860
}

5861
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
5862
					const struct kvm_memory_slot *memslot)
5863 5864
{
	/*
5865
	 * All current use cases for flushing the TLBs for a specific memslot
5866
	 * related to dirty logging, and many do the TLB flush out of mmu_lock.
5867 5868 5869
	 * The interaction between the various operations on memslot must be
	 * serialized by slots_locks to ensure the TLB flush from one operation
	 * is observed by any other operation on the same memslot.
5870 5871
	 */
	lockdep_assert_held(&kvm->slots_lock);
5872 5873
	kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
					   memslot->npages);
5874 5875
}

5876
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
5877
				   const struct kvm_memory_slot *memslot)
5878
{
5879
	bool flush = false;
5880

5881 5882 5883 5884 5885 5886
	if (kvm_memslots_have_rmaps(kvm)) {
		write_lock(&kvm->mmu_lock);
		flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty,
					 false);
		write_unlock(&kvm->mmu_lock);
	}
5887

5888 5889 5890 5891 5892 5893
	if (is_tdp_mmu_enabled(kvm)) {
		read_lock(&kvm->mmu_lock);
		flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
		read_unlock(&kvm->mmu_lock);
	}

5894 5895 5896 5897 5898 5899 5900
	/*
	 * It's also safe to flush TLBs out of mmu lock here as currently this
	 * function is only used for dirty logging, in which case flushing TLB
	 * out of mmu lock also guarantees no dirty pages will be lost in
	 * dirty_bitmap.
	 */
	if (flush)
5901
		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5902 5903
}

5904
void kvm_mmu_zap_all(struct kvm *kvm)
5905 5906
{
	struct kvm_mmu_page *sp, *node;
5907
	LIST_HEAD(invalid_list);
5908
	int ign;
5909

5910
	write_lock(&kvm->mmu_lock);
5911
restart:
5912
	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
5913
		if (WARN_ON(sp->role.invalid))
5914
			continue;
5915
		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
5916
			goto restart;
5917
		if (cond_resched_rwlock_write(&kvm->mmu_lock))
5918 5919 5920
			goto restart;
	}

5921
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
5922

5923
	if (is_tdp_mmu_enabled(kvm))
5924 5925
		kvm_tdp_mmu_zap_all(kvm);

5926
	write_unlock(&kvm->mmu_lock);
5927 5928
}

5929
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
5930
{
5931
	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
5932

5933
	gen &= MMIO_SPTE_GEN_MASK;
5934

5935
	/*
5936 5937 5938 5939 5940 5941 5942 5943
	 * Generation numbers are incremented in multiples of the number of
	 * address spaces in order to provide unique generations across all
	 * address spaces.  Strip what is effectively the address space
	 * modifier prior to checking for a wrap of the MMIO generation so
	 * that a wrap in any address space is detected.
	 */
	gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);

5944
	/*
5945
	 * The very rare case: if the MMIO generation number has wrapped,
5946 5947
	 * zap all shadow pages.
	 */
5948
	if (unlikely(gen == 0)) {
5949
		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5950
		kvm_mmu_zap_all_fast(kvm);
5951
	}
5952 5953
}

5954 5955
static unsigned long
mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5956 5957
{
	struct kvm *kvm;
5958
	int nr_to_scan = sc->nr_to_scan;
5959
	unsigned long freed = 0;
5960

J
Junaid Shahid 已提交
5961
	mutex_lock(&kvm_lock);
5962 5963

	list_for_each_entry(kvm, &vm_list, vm_list) {
5964
		int idx;
5965
		LIST_HEAD(invalid_list);
5966

5967 5968 5969 5970 5971 5972 5973 5974
		/*
		 * Never scan more than sc->nr_to_scan VM instances.
		 * Will not hit this condition practically since we do not try
		 * to shrink more than one VM and it is very unlikely to see
		 * !n_used_mmu_pages so many times.
		 */
		if (!nr_to_scan--)
			break;
5975 5976 5977 5978 5979 5980
		/*
		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
		 * here. We may skip a VM instance errorneosly, but we do not
		 * want to shrink a VM that only started to populate its MMU
		 * anyway.
		 */
5981 5982
		if (!kvm->arch.n_used_mmu_pages &&
		    !kvm_has_zapped_obsolete_pages(kvm))
5983 5984
			continue;

5985
		idx = srcu_read_lock(&kvm->srcu);
5986
		write_lock(&kvm->mmu_lock);
5987

5988 5989 5990 5991 5992 5993
		if (kvm_has_zapped_obsolete_pages(kvm)) {
			kvm_mmu_commit_zap_page(kvm,
			      &kvm->arch.zapped_obsolete_pages);
			goto unlock;
		}

5994
		freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
5995

5996
unlock:
5997
		write_unlock(&kvm->mmu_lock);
5998
		srcu_read_unlock(&kvm->srcu, idx);
5999

6000 6001 6002 6003 6004
		/*
		 * unfair on small ones
		 * per-vm shrinkers cry out
		 * sadness comes quickly
		 */
6005 6006
		list_move_tail(&kvm->vm_list, &vm_list);
		break;
6007 6008
	}

J
Junaid Shahid 已提交
6009
	mutex_unlock(&kvm_lock);
6010 6011 6012 6013 6014 6015
	return freed;
}

static unsigned long
mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
6016
	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
6017 6018 6019
}

static struct shrinker mmu_shrinker = {
6020 6021
	.count_objects = mmu_shrink_count,
	.scan_objects = mmu_shrink_scan,
6022 6023 6024
	.seeks = DEFAULT_SEEKS * 10,
};

I
Ingo Molnar 已提交
6025
static void mmu_destroy_caches(void)
6026
{
6027 6028
	kmem_cache_destroy(pte_list_desc_cache);
	kmem_cache_destroy(mmu_page_header_cache);
6029 6030
}

P
Paolo Bonzini 已提交
6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064
static bool get_nx_auto_mode(void)
{
	/* Return true when CPU has the bug, and mitigations are ON */
	return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
}

static void __set_nx_huge_pages(bool val)
{
	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
}

static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
{
	bool old_val = nx_huge_pages;
	bool new_val;

	/* In "auto" mode deploy workaround only if CPU has the bug. */
	if (sysfs_streq(val, "off"))
		new_val = 0;
	else if (sysfs_streq(val, "force"))
		new_val = 1;
	else if (sysfs_streq(val, "auto"))
		new_val = get_nx_auto_mode();
	else if (strtobool(val, &new_val) < 0)
		return -EINVAL;

	__set_nx_huge_pages(new_val);

	if (new_val != old_val) {
		struct kvm *kvm;

		mutex_lock(&kvm_lock);

		list_for_each_entry(kvm, &vm_list, vm_list) {
6065
			mutex_lock(&kvm->slots_lock);
P
Paolo Bonzini 已提交
6066
			kvm_mmu_zap_all_fast(kvm);
6067
			mutex_unlock(&kvm->slots_lock);
6068 6069

			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
P
Paolo Bonzini 已提交
6070 6071 6072 6073 6074 6075 6076
		}
		mutex_unlock(&kvm_lock);
	}

	return 0;
}

6077 6078
int kvm_mmu_module_init(void)
{
6079 6080
	int ret = -ENOMEM;

P
Paolo Bonzini 已提交
6081 6082 6083
	if (nx_huge_pages == -1)
		__set_nx_huge_pages(get_nx_auto_mode());

6084 6085 6086 6087 6088 6089 6090 6091 6092 6093
	/*
	 * MMU roles use union aliasing which is, generally speaking, an
	 * undefined behavior. However, we supposedly know how compilers behave
	 * and the current status quo is unlikely to change. Guardians below are
	 * supposed to let us know if the assumption becomes false.
	 */
	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
	BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));

6094
	kvm_mmu_reset_all_pte_masks();
6095

6096 6097
	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
					    sizeof(struct pte_list_desc),
6098
					    0, SLAB_ACCOUNT, NULL);
6099
	if (!pte_list_desc_cache)
6100
		goto out;
6101

6102 6103
	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
						  sizeof(struct kvm_mmu_page),
6104
						  0, SLAB_ACCOUNT, NULL);
6105
	if (!mmu_page_header_cache)
6106
		goto out;
6107

6108
	if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
6109
		goto out;
6110

6111 6112 6113
	ret = register_shrinker(&mmu_shrinker);
	if (ret)
		goto out;
6114

6115 6116
	return 0;

6117
out:
6118
	mmu_destroy_caches();
6119
	return ret;
6120 6121
}

6122
/*
P
Peng Hao 已提交
6123
 * Calculate mmu pages needed for kvm.
6124
 */
6125
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
6126
{
6127 6128
	unsigned long nr_mmu_pages;
	unsigned long nr_pages = 0;
6129
	struct kvm_memslots *slots;
6130
	struct kvm_memory_slot *memslot;
6131
	int i;
6132

6133 6134
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);
6135

6136 6137 6138
		kvm_for_each_memslot(memslot, slots)
			nr_pages += memslot->npages;
	}
6139 6140

	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
6141
	nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
6142 6143 6144 6145

	return nr_mmu_pages;
}

6146 6147
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
6148
	kvm_mmu_unload(vcpu);
6149 6150
	free_mmu_pages(&vcpu->arch.root_mmu);
	free_mmu_pages(&vcpu->arch.guest_mmu);
6151
	mmu_free_memory_caches(vcpu);
6152 6153 6154 6155 6156 6157 6158
}

void kvm_mmu_module_exit(void)
{
	mmu_destroy_caches();
	percpu_counter_destroy(&kvm_total_used_mmu_pages);
	unregister_shrinker(&mmu_shrinker);
6159 6160
	mmu_audit_disable();
}
6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188

static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
{
	unsigned int old_val;
	int err;

	old_val = nx_huge_pages_recovery_ratio;
	err = param_set_uint(val, kp);
	if (err)
		return err;

	if (READ_ONCE(nx_huge_pages) &&
	    !old_val && nx_huge_pages_recovery_ratio) {
		struct kvm *kvm;

		mutex_lock(&kvm_lock);

		list_for_each_entry(kvm, &vm_list, vm_list)
			wake_up_process(kvm->arch.nx_lpage_recovery_thread);

		mutex_unlock(&kvm_lock);
	}

	return err;
}

static void kvm_recover_nx_lpages(struct kvm *kvm)
{
6189
	unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
6190 6191 6192 6193
	int rcu_idx;
	struct kvm_mmu_page *sp;
	unsigned int ratio;
	LIST_HEAD(invalid_list);
6194
	bool flush = false;
6195 6196 6197
	ulong to_zap;

	rcu_idx = srcu_read_lock(&kvm->srcu);
6198
	write_lock(&kvm->mmu_lock);
6199 6200

	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
6201
	to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
6202 6203 6204 6205
	for ( ; to_zap; --to_zap) {
		if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
			break;

6206 6207 6208 6209 6210 6211 6212 6213 6214
		/*
		 * We use a separate list instead of just using active_mmu_pages
		 * because the number of lpage_disallowed pages is expected to
		 * be relatively small compared to the total.
		 */
		sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
				      struct kvm_mmu_page,
				      lpage_disallowed_link);
		WARN_ON_ONCE(!sp->lpage_disallowed);
6215
		if (is_tdp_mmu_page(sp)) {
6216
			flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
6217
		} else {
6218 6219 6220
			kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
			WARN_ON_ONCE(sp->lpage_disallowed);
		}
6221

6222
		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6223
			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6224
			cond_resched_rwlock_write(&kvm->mmu_lock);
6225
			flush = false;
6226 6227
		}
	}
6228
	kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6229

6230
	write_unlock(&kvm->mmu_lock);
6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283
	srcu_read_unlock(&kvm->srcu, rcu_idx);
}

static long get_nx_lpage_recovery_timeout(u64 start_time)
{
	return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
		? start_time + 60 * HZ - get_jiffies_64()
		: MAX_SCHEDULE_TIMEOUT;
}

static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
{
	u64 start_time;
	long remaining_time;

	while (true) {
		start_time = get_jiffies_64();
		remaining_time = get_nx_lpage_recovery_timeout(start_time);

		set_current_state(TASK_INTERRUPTIBLE);
		while (!kthread_should_stop() && remaining_time > 0) {
			schedule_timeout(remaining_time);
			remaining_time = get_nx_lpage_recovery_timeout(start_time);
			set_current_state(TASK_INTERRUPTIBLE);
		}

		set_current_state(TASK_RUNNING);

		if (kthread_should_stop())
			return 0;

		kvm_recover_nx_lpages(kvm);
	}
}

int kvm_mmu_post_init_vm(struct kvm *kvm)
{
	int err;

	err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
					  "kvm-nx-lpage-recovery",
					  &kvm->arch.nx_lpage_recovery_thread);
	if (!err)
		kthread_unpark(kvm->arch.nx_lpage_recovery_thread);

	return err;
}

void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
{
	if (kvm->arch.nx_lpage_recovery_thread)
		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
}