mmu.c 175.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
A
Avi Kivity 已提交
2 3 4 5 6 7 8 9 10
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
N
Nicolas Kaiser 已提交
11
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
12 13 14 15 16
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 */
A
Avi Kivity 已提交
17

18
#include "irq.h"
19
#include "ioapic.h"
20
#include "mmu.h"
21
#include "mmu_internal.h"
22
#include "tdp_mmu.h"
23
#include "x86.h"
A
Avi Kivity 已提交
24
#include "kvm_cache_regs.h"
25
#include "kvm_emulate.h"
26
#include "cpuid.h"
27
#include "spte.h"
A
Avi Kivity 已提交
28

29
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
30 31 32 33
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
34 35
#include <linux/moduleparam.h>
#include <linux/export.h>
36
#include <linux/swap.h>
M
Marcelo Tosatti 已提交
37
#include <linux/hugetlb.h>
38
#include <linux/compiler.h>
39
#include <linux/srcu.h>
40
#include <linux/slab.h>
41
#include <linux/sched/signal.h>
42
#include <linux/uaccess.h>
43
#include <linux/hash.h>
44
#include <linux/kern_levels.h>
45
#include <linux/kthread.h>
A
Avi Kivity 已提交
46

A
Avi Kivity 已提交
47
#include <asm/page.h>
48
#include <asm/memtype.h>
A
Avi Kivity 已提交
49
#include <asm/cmpxchg.h>
50
#include <asm/io.h>
51
#include <asm/set_memory.h>
52
#include <asm/vmx.h>
53
#include <asm/kvm_page_track.h>
54
#include "trace.h"
A
Avi Kivity 已提交
55

56 57
#include "paging.h"

P
Paolo Bonzini 已提交
58 59
extern bool itlb_multihit_kvm_mitigation;

60
int __read_mostly nx_huge_pages = -1;
61
static uint __read_mostly nx_huge_pages_recovery_period_ms;
62 63 64 65
#ifdef CONFIG_PREEMPT_RT
/* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
#else
66
static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
67
#endif
P
Paolo Bonzini 已提交
68 69

static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
70
static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp);
P
Paolo Bonzini 已提交
71

72
static const struct kernel_param_ops nx_huge_pages_ops = {
P
Paolo Bonzini 已提交
73 74 75 76
	.set = set_nx_huge_pages,
	.get = param_get_bool,
};

77 78
static const struct kernel_param_ops nx_huge_pages_recovery_param_ops = {
	.set = set_nx_huge_pages_recovery_param,
79 80 81
	.get = param_get_uint,
};

P
Paolo Bonzini 已提交
82 83
module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
__MODULE_PARM_TYPE(nx_huge_pages, "bool");
84
module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_param_ops,
85 86
		&nx_huge_pages_recovery_ratio, 0644);
__MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
87 88 89
module_param_cb(nx_huge_pages_recovery_period_ms, &nx_huge_pages_recovery_param_ops,
		&nx_huge_pages_recovery_period_ms, 0644);
__MODULE_PARM_TYPE(nx_huge_pages_recovery_period_ms, "uint");
P
Paolo Bonzini 已提交
90

91 92 93
static bool __read_mostly force_flush_and_sync_on_reuse;
module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);

94 95 96 97 98 99 100
/*
 * When setting this variable to true it enables Two-Dimensional-Paging
 * where the hardware walks 2 page tables:
 * 1. the guest-virtual to guest-physical
 * 2. while doing 1. it walks guest-physical to host-physical
 * If the hardware supports that we don't need to do shadow paging.
 */
101
bool tdp_enabled = false;
102

103
static int max_huge_page_level __read_mostly;
104
static int tdp_root_level __read_mostly;
105
static int max_tdp_level __read_mostly;
106

107
#ifdef MMU_DEBUG
108
bool dbg = 0;
109
module_param(dbg, bool, 0644);
110
#endif
A
Avi Kivity 已提交
111

112 113
#define PTE_PREFETCH_NUM		8

A
Avi Kivity 已提交
114 115 116
#define PT32_LEVEL_BITS 10

#define PT32_LEVEL_SHIFT(level) \
M
Mike Day 已提交
117
		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
A
Avi Kivity 已提交
118

119 120 121
#define PT32_LVL_OFFSET_MASK(level) \
	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
122 123 124 125 126 127 128 129

#define PT32_INDEX(address, level)\
	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))


#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
130 131 132
#define PT32_LVL_ADDR_MASK(level) \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
					    * PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
133

134 135
#include <trace/events/kvm.h>

136
/* make pte_list_desc fit well in cache lines */
137
#define PTE_LIST_EXT 14
138

139 140 141 142 143
/*
 * Slight optimization of cacheline layout, by putting `more' and `spte_count'
 * at the start; then accessing it will only use one single cacheline for
 * either full (entries==PTE_LIST_EXT) case or entries<=6.
 */
144 145
struct pte_list_desc {
	struct pte_list_desc *more;
146 147 148 149 150 151
	/*
	 * Stores number of entries stored in the pte_list_desc.  No need to be
	 * u64 but just for easier alignment.  When PTE_LIST_EXT, means full.
	 */
	u64 spte_count;
	u64 *sptes[PTE_LIST_EXT];
152 153
};

154 155 156 157
struct kvm_shadow_walk_iterator {
	u64 addr;
	hpa_t shadow_addr;
	u64 *sptep;
158
	int level;
159 160 161
	unsigned index;
};

162 163 164 165 166 167 168
#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
					 (_root), (_addr));                \
	     shadow_walk_okay(&(_walker));			           \
	     shadow_walk_next(&(_walker)))

#define for_each_shadow_entry(_vcpu, _addr, _walker)            \
169 170 171 172
	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
	     shadow_walk_okay(&(_walker));			\
	     shadow_walk_next(&(_walker)))

173 174 175 176 177 178
#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
	     shadow_walk_okay(&(_walker)) &&				\
		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
	     __shadow_walk_next(&(_walker), spte))

179
static struct kmem_cache *pte_list_desc_cache;
180
struct kmem_cache *mmu_page_header_cache;
181
static struct percpu_counter kvm_total_used_mmu_pages;
182

183 184
static void mmu_spte_set(u64 *sptep, u64 spte);

185 186 187 188 189 190
struct kvm_mmu_role_regs {
	const unsigned long cr0;
	const unsigned long cr4;
	const u64 efer;
};

191 192 193
#define CREATE_TRACE_POINTS
#include "mmutrace.h"

194 195 196 197 198 199
/*
 * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
 * reading from the role_regs.  Once the mmu_role is constructed, it becomes
 * the single source of truth for the MMU's state.
 */
#define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)			\
200 201
static inline bool __maybe_unused					\
____is_##reg##_##name(const struct kvm_mmu_role_regs *regs)		\
202 203 204 205 206 207 208 209 210 211 212 213 214 215
{									\
	return !!(regs->reg & flag);					\
}
BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);

216 217 218 219 220 221 222
/*
 * The MMU itself (with a valid role) is the single source of truth for the
 * MMU.  Do not use the regs used to build the MMU/role, nor the vCPU.  The
 * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
 * and the vCPU may be incorrect/irrelevant.
 */
#define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)		\
223
static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu)	\
224
{								\
225
	return !!(mmu->cpu_role. base_or_ext . reg##_##name);	\
226 227 228 229 230 231 232 233 234 235 236
}
BUILD_MMU_ROLE_ACCESSOR(ext,  cr0, pg);
BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pae);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);

237 238 239 240 241 242 243 244 245 246
static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu_role_regs regs = {
		.cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
		.cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
		.efer = vcpu->arch.efer,
	};

	return regs;
}
247

248
static int role_regs_to_root_level(const struct kvm_mmu_role_regs *regs)
249 250 251 252 253 254 255 256 257 258 259
{
	if (!____is_cr0_pg(regs))
		return 0;
	else if (____is_efer_lma(regs))
		return ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
					       PT64_ROOT_4LEVEL;
	else if (____is_cr4_pae(regs))
		return PT32E_ROOT_LEVEL;
	else
		return PT32_ROOT_LEVEL;
}
260 261 262

static inline bool kvm_available_flush_tlb_with_range(void)
{
263
	return kvm_x86_ops.tlb_remote_flush_with_range;
264 265 266 267 268 269 270
}

static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
		struct kvm_tlb_range *range)
{
	int ret = -ENOTSUPP;

271
	if (range && kvm_x86_ops.tlb_remote_flush_with_range)
272
		ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range);
273 274 275 276 277

	if (ret)
		kvm_flush_remote_tlbs(kvm);
}

278
void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
279 280 281 282 283 284 285 286 287 288
		u64 start_gfn, u64 pages)
{
	struct kvm_tlb_range range;

	range.start_gfn = start_gfn;
	range.pages = pages;

	kvm_flush_remote_tlbs_with_range(kvm, &range);
}

289 290 291
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
			   unsigned int access)
{
292
	u64 spte = make_mmio_spte(vcpu, gfn, access);
293

294 295
	trace_mark_mmio_spte(sptep, gfn, spte);
	mmu_spte_set(sptep, spte);
296 297 298 299
}

static gfn_t get_mmio_spte_gfn(u64 spte)
{
300
	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
301

302
	gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
303 304 305
	       & shadow_nonpresent_or_rsvd_mask;

	return gpa >> PAGE_SHIFT;
306 307 308 309
}

static unsigned get_mmio_spte_access(u64 spte)
{
310
	return spte & shadow_mmio_access_mask;
311 312
}

313
static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
314
{
315
	u64 kvm_gen, spte_gen, gen;
316

317 318 319
	gen = kvm_vcpu_memslots(vcpu)->generation;
	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
		return false;
320

321
	kvm_gen = gen & MMIO_SPTE_GEN_MASK;
322 323 324 325
	spte_gen = get_mmio_spte_generation(spte);

	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
	return likely(kvm_gen == spte_gen);
326 327
}

A
Avi Kivity 已提交
328 329 330 331 332
static int is_cpuid_PSE36(void)
{
	return 1;
}

333 334 335 336 337 338 339
static gfn_t pse36_gfn_delta(u32 gpte)
{
	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;

	return (gpte & PT32_DIR_PSE36_MASK) << shift;
}

340
#ifdef CONFIG_X86_64
A
Avi Kivity 已提交
341
static void __set_spte(u64 *sptep, u64 spte)
342
{
343
	WRITE_ONCE(*sptep, spte);
344 345
}

346
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
347
{
348
	WRITE_ONCE(*sptep, spte);
349 350 351 352 353 354
}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
	return xchg(sptep, spte);
}
355 356 357

static u64 __get_spte_lockless(u64 *sptep)
{
358
	return READ_ONCE(*sptep);
359
}
360
#else
361 362 363 364 365 366 367
union split_spte {
	struct {
		u32 spte_low;
		u32 spte_high;
	};
	u64 spte;
};
368

369 370
static void count_spte_clear(u64 *sptep, u64 spte)
{
371
	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
372 373 374 375 376 377 378 379 380

	if (is_shadow_present_pte(spte))
		return;

	/* Ensure the spte is completely set before we increase the count */
	smp_wmb();
	sp->clear_spte_count++;
}

381 382 383
static void __set_spte(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte;
384

385 386 387 388 389 390 391 392 393 394 395 396
	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	ssptep->spte_high = sspte.spte_high;

	/*
	 * If we map the spte from nonpresent to present, We should store
	 * the high bits firstly, then set present bit, so cpu can not
	 * fetch this spte while we are setting the spte.
	 */
	smp_wmb();

397
	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
398 399
}

400 401 402 403 404 405 406
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

407
	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
408 409 410 411 412 413 414 415

	/*
	 * If we map the spte from present to nonpresent, we should clear
	 * present bit firstly to avoid vcpu fetch the old high bits.
	 */
	smp_wmb();

	ssptep->spte_high = sspte.spte_high;
416
	count_spte_clear(sptep, spte);
417 418 419 420 421 422 423 424 425 426 427
}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte, orig;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	/* xchg acts as a barrier before the setting of the high bits */
	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
428 429
	orig.spte_high = ssptep->spte_high;
	ssptep->spte_high = sspte.spte_high;
430
	count_spte_clear(sptep, spte);
431 432 433

	return orig.spte;
}
434 435 436

/*
 * The idea using the light way get the spte on x86_32 guest is from
437
 * gup_get_pte (mm/gup.c).
438 439 440 441 442 443 444 445 446 447 448 449 450 451
 *
 * An spte tlb flush may be pending, because kvm_set_pte_rmapp
 * coalesces them and we are running out of the MMU lock.  Therefore
 * we need to protect against in-progress updates of the spte.
 *
 * Reading the spte while an update is in progress may get the old value
 * for the high part of the spte.  The race is fine for a present->non-present
 * change (because the high part of the spte is ignored for non-present spte),
 * but for a present->present change we must reread the spte.
 *
 * All such changes are done in two steps (present->non-present and
 * non-present->present), hence it is enough to count the number of
 * present->non-present updates: if it changed while reading the spte,
 * we might have hit the race.  This is done using clear_spte_count.
452 453 454
 */
static u64 __get_spte_lockless(u64 *sptep)
{
455
	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
	union split_spte spte, *orig = (union split_spte *)sptep;
	int count;

retry:
	count = sp->clear_spte_count;
	smp_rmb();

	spte.spte_low = orig->spte_low;
	smp_rmb();

	spte.spte_high = orig->spte_high;
	smp_rmb();

	if (unlikely(spte.spte_low != orig->spte_low ||
	      count != sp->clear_spte_count))
		goto retry;

	return spte.spte;
}
475 476
#endif

477 478
static bool spte_has_volatile_bits(u64 spte)
{
479 480 481
	if (!is_shadow_present_pte(spte))
		return false;

482
	/*
483
	 * Always atomically update spte if it can be updated
484 485 486 487
	 * out of mmu-lock, it can ensure dirty bit is not lost,
	 * also, it can help us to get a stable is_writable_pte()
	 * to ensure tlb flush is not missed.
	 */
488 489
	if (spte_can_locklessly_be_made_writable(spte) ||
	    is_access_track_spte(spte))
490 491
		return true;

492
	if (spte_ad_enabled(spte)) {
493 494 495 496
		if ((spte & shadow_accessed_mask) == 0 ||
	    	    (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
			return true;
	}
497

498
	return false;
499 500
}

501 502 503 504 505 506 507 508 509 510 511 512
/* Rules for using mmu_spte_set:
 * Set the sptep from nonpresent to present.
 * Note: the sptep being assigned *must* be either not present
 * or in a state where the hardware will not attempt to update
 * the spte.
 */
static void mmu_spte_set(u64 *sptep, u64 new_spte)
{
	WARN_ON(is_shadow_present_pte(*sptep));
	__set_spte(sptep, new_spte);
}

513 514 515
/*
 * Update the SPTE (excluding the PFN), but do not track changes in its
 * accessed/dirty status.
516
 */
517
static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
518
{
519
	u64 old_spte = *sptep;
520

521
	WARN_ON(!is_shadow_present_pte(new_spte));
522
	check_spte_writable_invariants(new_spte);
523

524 525
	if (!is_shadow_present_pte(old_spte)) {
		mmu_spte_set(sptep, new_spte);
526
		return old_spte;
527
	}
528

529
	if (!spte_has_volatile_bits(old_spte))
530
		__update_clear_spte_fast(sptep, new_spte);
531
	else
532
		old_spte = __update_clear_spte_slow(sptep, new_spte);
533

534 535
	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));

536 537 538 539 540 541
	return old_spte;
}

/* Rules for using mmu_spte_update:
 * Update the state bits, it means the mapped pfn is not changed.
 *
542 543 544
 * Whenever an MMU-writable SPTE is overwritten with a read-only SPTE, remote
 * TLBs must be flushed. Otherwise rmap_write_protect will find a read-only
 * spte, even though the writable spte might be cached on a CPU's TLB.
545 546 547 548 549 550 551 552 553 554 555
 *
 * Returns true if the TLB needs to be flushed
 */
static bool mmu_spte_update(u64 *sptep, u64 new_spte)
{
	bool flush = false;
	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);

	if (!is_shadow_present_pte(old_spte))
		return false;

556 557
	/*
	 * For the spte updated out of mmu-lock is safe, since
558
	 * we always atomically update it, see the comments in
559 560
	 * spte_has_volatile_bits().
	 */
561
	if (spte_can_locklessly_be_made_writable(old_spte) &&
562
	      !is_writable_pte(new_spte))
563
		flush = true;
564

565
	/*
566
	 * Flush TLB when accessed/dirty states are changed in the page tables,
567 568 569
	 * to guarantee consistency between TLB and page tables.
	 */

570 571
	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
		flush = true;
572
		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
573 574 575 576
	}

	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
		flush = true;
577
		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
578
	}
579

580
	return flush;
581 582
}

583 584 585 586
/*
 * Rules for using mmu_spte_clear_track_bits:
 * It sets the sptep from present to nonpresent, and track the
 * state bits, it is used to clear the last level sptep.
587
 * Returns the old PTE.
588
 */
589
static int mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
590
{
D
Dan Williams 已提交
591
	kvm_pfn_t pfn;
592
	u64 old_spte = *sptep;
593
	int level = sptep_to_sp(sptep)->role.level;
594 595

	if (!spte_has_volatile_bits(old_spte))
596
		__update_clear_spte_fast(sptep, 0ull);
597
	else
598
		old_spte = __update_clear_spte_slow(sptep, 0ull);
599

600
	if (!is_shadow_present_pte(old_spte))
601
		return old_spte;
602

603 604
	kvm_update_page_stats(kvm, level, -1);

605
	pfn = spte_to_pfn(old_spte);
606 607 608 609 610 611

	/*
	 * KVM does not hold the refcount of the page used by
	 * kvm mmu, before reclaiming the page, we should
	 * unmap it from mmu first.
	 */
612
	WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
613

614
	if (is_accessed_spte(old_spte))
615
		kvm_set_pfn_accessed(pfn);
616 617

	if (is_dirty_spte(old_spte))
618
		kvm_set_pfn_dirty(pfn);
619

620
	return old_spte;
621 622 623 624 625 626 627 628 629
}

/*
 * Rules for using mmu_spte_clear_no_track:
 * Directly clear spte without caring the state bits of sptep,
 * it is used to set the upper level spte.
 */
static void mmu_spte_clear_no_track(u64 *sptep)
{
630
	__update_clear_spte_fast(sptep, 0ull);
631 632
}

633 634 635 636 637
static u64 mmu_spte_get_lockless(u64 *sptep)
{
	return __get_spte_lockless(sptep);
}

638 639 640 641 642 643 644 645
/* Returns the Accessed status of the PTE and resets it at the same time. */
static bool mmu_spte_age(u64 *sptep)
{
	u64 spte = mmu_spte_get_lockless(sptep);

	if (!is_accessed_spte(spte))
		return false;

646
	if (spte_ad_enabled(spte)) {
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663
		clear_bit((ffs(shadow_accessed_mask) - 1),
			  (unsigned long *)sptep);
	} else {
		/*
		 * Capture the dirty status of the page, so that it doesn't get
		 * lost when the SPTE is marked for access tracking.
		 */
		if (is_writable_pte(spte))
			kvm_set_pfn_dirty(spte_to_pfn(spte));

		spte = mark_spte_for_access_track(spte);
		mmu_spte_update_no_track(sptep, spte);
	}

	return true;
}

664 665
static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{
666 667 668 669 670 671 672 673
	if (is_tdp_mmu(vcpu->arch.mmu)) {
		kvm_tdp_mmu_walk_lockless_begin();
	} else {
		/*
		 * Prevent page table teardown by making any free-er wait during
		 * kvm_flush_remote_tlbs() IPI to all active vcpus.
		 */
		local_irq_disable();
674

675 676 677 678 679 680
		/*
		 * Make sure a following spte read is not reordered ahead of the write
		 * to vcpu->mode.
		 */
		smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
	}
681 682 683 684
}

static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
685 686 687 688 689 690 691 692 693 694 695
	if (is_tdp_mmu(vcpu->arch.mmu)) {
		kvm_tdp_mmu_walk_lockless_end();
	} else {
		/*
		 * Make sure the write to vcpu->mode is not reordered in front of
		 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
		 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
		 */
		smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
		local_irq_enable();
	}
696 697
}

698
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
699
{
700 701
	int r;

702
	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
703 704
	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
705
	if (r)
706
		return r;
707 708
	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
				       PT64_ROOT_MAX_LEVEL);
709
	if (r)
710
		return r;
711
	if (maybe_indirect) {
712 713
		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
					       PT64_ROOT_MAX_LEVEL);
714 715 716
		if (r)
			return r;
	}
717 718
	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
					  PT64_ROOT_MAX_LEVEL);
719 720 721 722
}

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
723 724 725 726
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
727 728
}

729
static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
730
{
731
	return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
732 733
}

734
static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
735
{
736
	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
737 738
}

739 740 741 742 743 744 745 746 747 748
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{
	if (!sp->role.direct)
		return sp->gfns[index];

	return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
}

static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
{
749
	if (!sp->role.direct) {
750
		sp->gfns[index] = gfn;
751 752 753 754 755 756 757 758
		return;
	}

	if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
		pr_err_ratelimited("gfn mismatch under direct page %llx "
				   "(expected %llx, got %llx)\n",
				   sp->gfn,
				   kvm_mmu_page_get_gfn(sp, index), gfn);
759 760
}

M
Marcelo Tosatti 已提交
761
/*
762 763
 * Return the pointer to the large page information for a given gfn,
 * handling slots that are not large page aligned.
M
Marcelo Tosatti 已提交
764
 */
765
static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
766
		const struct kvm_memory_slot *slot, int level)
M
Marcelo Tosatti 已提交
767 768 769
{
	unsigned long idx;

770
	idx = gfn_to_index(gfn, slot->base_gfn, level);
771
	return &slot->arch.lpage_info[level - 2][idx];
M
Marcelo Tosatti 已提交
772 773
}

774
static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
775 776 777 778 779
					    gfn_t gfn, int count)
{
	struct kvm_lpage_info *linfo;
	int i;

780
	for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
781 782 783 784 785 786
		linfo = lpage_info_slot(gfn, slot, i);
		linfo->disallow_lpage += count;
		WARN_ON(linfo->disallow_lpage < 0);
	}
}

787
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
788 789 790 791
{
	update_gfn_disallow_lpage_count(slot, gfn, 1);
}

792
void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
793 794 795 796
{
	update_gfn_disallow_lpage_count(slot, gfn, -1);
}

797
static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
M
Marcelo Tosatti 已提交
798
{
799
	struct kvm_memslots *slots;
800
	struct kvm_memory_slot *slot;
801
	gfn_t gfn;
M
Marcelo Tosatti 已提交
802

803
	kvm->arch.indirect_shadow_pages++;
804
	gfn = sp->gfn;
805 806
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
807 808

	/* the non-leaf shadow pages are keeping readonly. */
809
	if (sp->role.level > PG_LEVEL_4K)
810 811 812
		return kvm_slot_page_track_add_page(kvm, slot, gfn,
						    KVM_PAGE_TRACK_WRITE);

813
	kvm_mmu_gfn_disallow_lpage(slot, gfn);
M
Marcelo Tosatti 已提交
814 815
}

816
void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
P
Paolo Bonzini 已提交
817 818 819 820 821
{
	if (sp->lpage_disallowed)
		return;

	++kvm->stat.nx_lpage_splits;
822 823
	list_add_tail(&sp->lpage_disallowed_link,
		      &kvm->arch.lpage_disallowed_mmu_pages);
P
Paolo Bonzini 已提交
824 825 826
	sp->lpage_disallowed = true;
}

827
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
M
Marcelo Tosatti 已提交
828
{
829
	struct kvm_memslots *slots;
830
	struct kvm_memory_slot *slot;
831
	gfn_t gfn;
M
Marcelo Tosatti 已提交
832

833
	kvm->arch.indirect_shadow_pages--;
834
	gfn = sp->gfn;
835 836
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
837
	if (sp->role.level > PG_LEVEL_4K)
838 839 840
		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
						       KVM_PAGE_TRACK_WRITE);

841
	kvm_mmu_gfn_allow_lpage(slot, gfn);
M
Marcelo Tosatti 已提交
842 843
}

844
void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
P
Paolo Bonzini 已提交
845 846 847
{
	--kvm->stat.nx_lpage_splits;
	sp->lpage_disallowed = false;
848
	list_del(&sp->lpage_disallowed_link);
P
Paolo Bonzini 已提交
849 850
}

851 852 853
static struct kvm_memory_slot *
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
			    bool no_dirty_log)
M
Marcelo Tosatti 已提交
854 855
{
	struct kvm_memory_slot *slot;
856

857
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
858 859
	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
		return NULL;
860
	if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
861
		return NULL;
862 863 864 865

	return slot;
}

866
/*
867
 * About rmap_head encoding:
868
 *
869 870
 * If the bit zero of rmap_head->val is clear, then it points to the only spte
 * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
871
 * pte_list_desc containing more mappings.
872 873 874 875
 */

/*
 * Returns the number of pointers in the rmap chain, not counting the new one.
876
 */
877
static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
878
			struct kvm_rmap_head *rmap_head)
879
{
880
	struct pte_list_desc *desc;
881
	int count = 0;
882

883
	if (!rmap_head->val) {
884
		rmap_printk("%p %llx 0->1\n", spte, *spte);
885 886
		rmap_head->val = (unsigned long)spte;
	} else if (!(rmap_head->val & 1)) {
887
		rmap_printk("%p %llx 1->many\n", spte, *spte);
888
		desc = mmu_alloc_pte_list_desc(vcpu);
889
		desc->sptes[0] = (u64 *)rmap_head->val;
A
Avi Kivity 已提交
890
		desc->sptes[1] = spte;
891
		desc->spte_count = 2;
892
		rmap_head->val = (unsigned long)desc | 1;
893
		++count;
894
	} else {
895
		rmap_printk("%p %llx many->many\n", spte, *spte);
896
		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
897
		while (desc->spte_count == PTE_LIST_EXT) {
898
			count += PTE_LIST_EXT;
899 900 901
			if (!desc->more) {
				desc->more = mmu_alloc_pte_list_desc(vcpu);
				desc = desc->more;
902
				desc->spte_count = 0;
903 904
				break;
			}
905 906
			desc = desc->more;
		}
907 908
		count += desc->spte_count;
		desc->sptes[desc->spte_count++] = spte;
909
	}
910
	return count;
911 912
}

913
static void
914 915 916
pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
			   struct pte_list_desc *desc, int i,
			   struct pte_list_desc *prev_desc)
917
{
918
	int j = desc->spte_count - 1;
919

A
Avi Kivity 已提交
920 921
	desc->sptes[i] = desc->sptes[j];
	desc->sptes[j] = NULL;
922 923
	desc->spte_count--;
	if (desc->spte_count)
924 925
		return;
	if (!prev_desc && !desc->more)
926
		rmap_head->val = 0;
927 928 929 930
	else
		if (prev_desc)
			prev_desc->more = desc->more;
		else
931
			rmap_head->val = (unsigned long)desc->more | 1;
932
	mmu_free_pte_list_desc(desc);
933 934
}

935
static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
936
{
937 938
	struct pte_list_desc *desc;
	struct pte_list_desc *prev_desc;
939 940
	int i;

941
	if (!rmap_head->val) {
942
		pr_err("%s: %p 0->BUG\n", __func__, spte);
943
		BUG();
944
	} else if (!(rmap_head->val & 1)) {
945
		rmap_printk("%p 1->0\n", spte);
946
		if ((u64 *)rmap_head->val != spte) {
947
			pr_err("%s:  %p 1->BUG\n", __func__, spte);
948 949
			BUG();
		}
950
		rmap_head->val = 0;
951
	} else {
952
		rmap_printk("%p many->many\n", spte);
953
		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
954 955
		prev_desc = NULL;
		while (desc) {
956
			for (i = 0; i < desc->spte_count; ++i) {
A
Avi Kivity 已提交
957
				if (desc->sptes[i] == spte) {
958 959
					pte_list_desc_remove_entry(rmap_head,
							desc, i, prev_desc);
960 961
					return;
				}
962
			}
963 964 965
			prev_desc = desc;
			desc = desc->more;
		}
966
		pr_err("%s: %p many->many\n", __func__, spte);
967 968 969 970
		BUG();
	}
}

971 972
static void pte_list_remove(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			    u64 *sptep)
973
{
974
	mmu_spte_clear_track_bits(kvm, sptep);
975 976 977
	__pte_list_remove(sptep, rmap_head);
}

P
Peter Xu 已提交
978
/* Return true if rmap existed, false otherwise */
979
static bool pte_list_destroy(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
P
Peter Xu 已提交
980 981 982 983 984 985 986 987
{
	struct pte_list_desc *desc, *next;
	int i;

	if (!rmap_head->val)
		return false;

	if (!(rmap_head->val & 1)) {
988
		mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
P
Peter Xu 已提交
989 990 991 992 993 994 995
		goto out;
	}

	desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);

	for (; desc; desc = next) {
		for (i = 0; i < desc->spte_count; i++)
996
			mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
P
Peter Xu 已提交
997 998 999 1000 1001 1002 1003 1004 1005
		next = desc->more;
		mmu_free_pte_list_desc(desc);
	}
out:
	/* rmap_head is meaningless now, remember to reset it */
	rmap_head->val = 0;
	return true;
}

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
{
	struct pte_list_desc *desc;
	unsigned int count = 0;

	if (!rmap_head->val)
		return 0;
	else if (!(rmap_head->val & 1))
		return 1;

	desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);

	while (desc) {
		count += desc->spte_count;
		desc = desc->more;
	}

	return count;
}

1026 1027
static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
					 const struct kvm_memory_slot *slot)
1028
{
1029
	unsigned long idx;
1030

1031
	idx = gfn_to_index(gfn, slot->base_gfn, level);
1032
	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1033 1034
}

1035 1036
static bool rmap_can_add(struct kvm_vcpu *vcpu)
{
1037
	struct kvm_mmu_memory_cache *mc;
1038

1039
	mc = &vcpu->arch.mmu_pte_list_desc_cache;
1040
	return kvm_mmu_memory_cache_nr_free_objects(mc);
1041 1042
}

1043 1044
static void rmap_remove(struct kvm *kvm, u64 *spte)
{
1045 1046
	struct kvm_memslots *slots;
	struct kvm_memory_slot *slot;
1047 1048
	struct kvm_mmu_page *sp;
	gfn_t gfn;
1049
	struct kvm_rmap_head *rmap_head;
1050

1051
	sp = sptep_to_sp(spte);
1052
	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1053 1054

	/*
1055 1056 1057
	 * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
	 * so we have to determine which memslots to use based on context
	 * information in sp->role.
1058 1059 1060 1061
	 */
	slots = kvm_memslots_for_spte_role(kvm, sp->role);

	slot = __gfn_to_memslot(slots, gfn);
1062
	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1063

1064
	__pte_list_remove(spte, rmap_head);
1065 1066
}

1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
/*
 * Used by the following functions to iterate through the sptes linked by a
 * rmap.  All fields are private and not assumed to be used outside.
 */
struct rmap_iterator {
	/* private fields */
	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
	int pos;			/* index of the sptep */
};

/*
 * Iteration must be started by this function.  This should also be used after
 * removing/dropping sptes from the rmap link because in such cases the
M
Miaohe Lin 已提交
1080
 * information in the iterator may not be valid.
1081 1082 1083
 *
 * Returns sptep if found, NULL otherwise.
 */
1084 1085
static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
			   struct rmap_iterator *iter)
1086
{
1087 1088
	u64 *sptep;

1089
	if (!rmap_head->val)
1090 1091
		return NULL;

1092
	if (!(rmap_head->val & 1)) {
1093
		iter->desc = NULL;
1094 1095
		sptep = (u64 *)rmap_head->val;
		goto out;
1096 1097
	}

1098
	iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1099
	iter->pos = 0;
1100 1101 1102 1103
	sptep = iter->desc->sptes[iter->pos];
out:
	BUG_ON(!is_shadow_present_pte(*sptep));
	return sptep;
1104 1105 1106 1107 1108 1109 1110 1111 1112
}

/*
 * Must be used with a valid iterator: e.g. after rmap_get_first().
 *
 * Returns sptep if found, NULL otherwise.
 */
static u64 *rmap_get_next(struct rmap_iterator *iter)
{
1113 1114
	u64 *sptep;

1115 1116 1117 1118 1119
	if (iter->desc) {
		if (iter->pos < PTE_LIST_EXT - 1) {
			++iter->pos;
			sptep = iter->desc->sptes[iter->pos];
			if (sptep)
1120
				goto out;
1121 1122 1123 1124 1125 1126 1127
		}

		iter->desc = iter->desc->more;

		if (iter->desc) {
			iter->pos = 0;
			/* desc->sptes[0] cannot be NULL */
1128 1129
			sptep = iter->desc->sptes[iter->pos];
			goto out;
1130 1131 1132 1133
		}
	}

	return NULL;
1134 1135 1136
out:
	BUG_ON(!is_shadow_present_pte(*sptep));
	return sptep;
1137 1138
}

1139 1140
#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)			\
	for (_spte_ = rmap_get_first(_rmap_head_, _iter_);		\
1141
	     _spte_; _spte_ = rmap_get_next(_iter_))
1142

1143
static void drop_spte(struct kvm *kvm, u64 *sptep)
1144
{
1145
	u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
1146 1147

	if (is_shadow_present_pte(old_spte))
1148
		rmap_remove(kvm, sptep);
A
Avi Kivity 已提交
1149 1150
}

1151 1152 1153 1154

static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
{
	if (is_large_pte(*sptep)) {
1155
		WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
1156 1157 1158 1159 1160 1161 1162 1163 1164
		drop_spte(kvm, sptep);
		return true;
	}

	return false;
}

static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
1165
	if (__drop_large_spte(vcpu->kvm, sptep)) {
1166
		struct kvm_mmu_page *sp = sptep_to_sp(sptep);
1167 1168 1169 1170

		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
			KVM_PAGES_PER_HPAGE(sp->role.level));
	}
1171 1172 1173
}

/*
1174
 * Write-protect on the specified @sptep, @pt_protect indicates whether
1175
 * spte write-protection is caused by protecting shadow page table.
1176
 *
T
Tiejun Chen 已提交
1177
 * Note: write protection is difference between dirty logging and spte
1178 1179 1180 1181 1182
 * protection:
 * - for dirty logging, the spte can be set to writable at anytime if
 *   its dirty bitmap is properly set.
 * - for spte protection, the spte can be writable only after unsync-ing
 *   shadow page.
1183
 *
1184
 * Return true if tlb need be flushed.
1185
 */
1186
static bool spte_write_protect(u64 *sptep, bool pt_protect)
1187 1188 1189
{
	u64 spte = *sptep;

1190
	if (!is_writable_pte(spte) &&
1191
	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1192 1193
		return false;

1194
	rmap_printk("spte %p %llx\n", sptep, *sptep);
1195

1196
	if (pt_protect)
1197
		spte &= ~shadow_mmu_writable_mask;
1198
	spte = spte & ~PT_WRITABLE_MASK;
1199

1200
	return mmu_spte_update(sptep, spte);
1201 1202
}

1203 1204
static bool rmap_write_protect(struct kvm_rmap_head *rmap_head,
			       bool pt_protect)
1205
{
1206 1207
	u64 *sptep;
	struct rmap_iterator iter;
1208
	bool flush = false;
1209

1210
	for_each_rmap_spte(rmap_head, &iter, sptep)
1211
		flush |= spte_write_protect(sptep, pt_protect);
1212

1213
	return flush;
1214 1215
}

1216
static bool spte_clear_dirty(u64 *sptep)
1217 1218 1219
{
	u64 spte = *sptep;

1220
	rmap_printk("spte %p %llx\n", sptep, *sptep);
1221

1222
	MMU_WARN_ON(!spte_ad_enabled(spte));
1223 1224 1225 1226
	spte &= ~shadow_dirty_mask;
	return mmu_spte_update(sptep, spte);
}

1227
static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1228 1229 1230
{
	bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
					       (unsigned long *)sptep);
1231
	if (was_writable && !spte_ad_enabled(*sptep))
1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
		kvm_set_pfn_dirty(spte_to_pfn(*sptep));

	return was_writable;
}

/*
 * Gets the GFN ready for another round of dirty logging by clearing the
 *	- D bit on ad-enabled SPTEs, and
 *	- W bit on ad-disabled SPTEs.
 * Returns true iff any D or W bits were cleared.
 */
1243
static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1244
			       const struct kvm_memory_slot *slot)
1245 1246 1247 1248 1249
{
	u64 *sptep;
	struct rmap_iterator iter;
	bool flush = false;

1250
	for_each_rmap_spte(rmap_head, &iter, sptep)
1251 1252
		if (spte_ad_need_write_protect(*sptep))
			flush |= spte_wrprot_for_clear_dirty(sptep);
1253
		else
1254
			flush |= spte_clear_dirty(sptep);
1255 1256 1257 1258

	return flush;
}

1259
/**
1260
 * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1261 1262 1263 1264 1265
 * @kvm: kvm instance
 * @slot: slot to protect
 * @gfn_offset: start of the BITS_PER_LONG pages we care about
 * @mask: indicates which pages we should protect
 *
1266
 * Used when we do not need to care about huge page mappings.
1267
 */
1268
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1269 1270
				     struct kvm_memory_slot *slot,
				     gfn_t gfn_offset, unsigned long mask)
1271
{
1272
	struct kvm_rmap_head *rmap_head;
1273

1274
	if (is_tdp_mmu_enabled(kvm))
1275 1276
		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
				slot->base_gfn + gfn_offset, mask, true);
1277 1278 1279 1280

	if (!kvm_memslots_have_rmaps(kvm))
		return;

1281
	while (mask) {
1282 1283
		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
					PG_LEVEL_4K, slot);
1284
		rmap_write_protect(rmap_head, false);
M
Marcelo Tosatti 已提交
1285

1286 1287 1288
		/* clear the first set bit */
		mask &= mask - 1;
	}
1289 1290
}

1291
/**
1292 1293
 * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
 * protect the page if the D-bit isn't supported.
1294 1295 1296 1297 1298 1299 1300
 * @kvm: kvm instance
 * @slot: slot to clear D-bit
 * @gfn_offset: start of the BITS_PER_LONG pages we care about
 * @mask: indicates which pages we should clear D-bit
 *
 * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
 */
1301 1302 1303
static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
					 struct kvm_memory_slot *slot,
					 gfn_t gfn_offset, unsigned long mask)
1304
{
1305
	struct kvm_rmap_head *rmap_head;
1306

1307
	if (is_tdp_mmu_enabled(kvm))
1308 1309
		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
				slot->base_gfn + gfn_offset, mask, false);
1310 1311 1312 1313

	if (!kvm_memslots_have_rmaps(kvm))
		return;

1314
	while (mask) {
1315 1316
		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
					PG_LEVEL_4K, slot);
1317
		__rmap_clear_dirty(kvm, rmap_head, slot);
1318 1319 1320 1321 1322 1323

		/* clear the first set bit */
		mask &= mask - 1;
	}
}

1324 1325 1326 1327 1328 1329 1330
/**
 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
 * PT level pages.
 *
 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
 * enable dirty logging for them.
 *
1331 1332
 * We need to care about huge page mappings: e.g. during dirty logging we may
 * have such mappings.
1333 1334 1335 1336 1337
 */
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
				struct kvm_memory_slot *slot,
				gfn_t gfn_offset, unsigned long mask)
{
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
	/*
	 * Huge pages are NOT write protected when we start dirty logging in
	 * initially-all-set mode; must write protect them here so that they
	 * are split to 4K on the first write.
	 *
	 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
	 * of memslot has no such restriction, so the range can cross two large
	 * pages.
	 */
	if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
		gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
		gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);

1351 1352 1353
		if (READ_ONCE(eager_page_split))
			kvm_mmu_try_split_huge_pages(kvm, slot, start, end, PG_LEVEL_4K);

1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
		kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);

		/* Cross two large pages? */
		if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
		    ALIGN(end << PAGE_SHIFT, PMD_SIZE))
			kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
						       PG_LEVEL_2M);
	}

	/* Now handle 4K PTEs.  */
1364 1365
	if (kvm_x86_ops.cpu_dirty_log_size)
		kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1366 1367
	else
		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1368 1369
}

1370 1371
int kvm_cpu_dirty_log_size(void)
{
1372
	return kvm_x86_ops.cpu_dirty_log_size;
1373 1374
}

1375
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1376 1377
				    struct kvm_memory_slot *slot, u64 gfn,
				    int min_level)
1378
{
1379
	struct kvm_rmap_head *rmap_head;
1380
	int i;
1381
	bool write_protected = false;
1382

1383 1384
	if (kvm_memslots_have_rmaps(kvm)) {
		for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1385
			rmap_head = gfn_to_rmap(gfn, i, slot);
1386
			write_protected |= rmap_write_protect(rmap_head, true);
1387
		}
1388 1389
	}

1390
	if (is_tdp_mmu_enabled(kvm))
1391
		write_protected |=
1392
			kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
1393

1394
	return write_protected;
1395 1396
}

1397
static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
1398 1399 1400 1401
{
	struct kvm_memory_slot *slot;

	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1402
	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1403 1404
}

1405
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1406
			  const struct kvm_memory_slot *slot)
1407
{
1408
	return pte_list_destroy(kvm, rmap_head);
1409 1410
}

1411 1412 1413
static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			    struct kvm_memory_slot *slot, gfn_t gfn, int level,
			    pte_t unused)
1414
{
1415
	return kvm_zap_rmapp(kvm, rmap_head, slot);
1416 1417
}

1418 1419 1420
static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			      struct kvm_memory_slot *slot, gfn_t gfn, int level,
			      pte_t pte)
1421
{
1422 1423
	u64 *sptep;
	struct rmap_iterator iter;
1424
	bool need_flush = false;
1425
	u64 new_spte;
D
Dan Williams 已提交
1426
	kvm_pfn_t new_pfn;
1427

1428 1429
	WARN_ON(pte_huge(pte));
	new_pfn = pte_pfn(pte);
1430

1431
restart:
1432
	for_each_rmap_spte(rmap_head, &iter, sptep) {
1433
		rmap_printk("spte %p %llx gfn %llx (%d)\n",
1434
			    sptep, *sptep, gfn, level);
1435

1436
		need_flush = true;
1437

1438
		if (pte_write(pte)) {
1439
			pte_list_remove(kvm, rmap_head, sptep);
1440
			goto restart;
1441
		} else {
1442 1443
			new_spte = kvm_mmu_changed_pte_notifier_make_spte(
					*sptep, new_pfn);
1444

1445
			mmu_spte_clear_track_bits(kvm, sptep);
1446
			mmu_spte_set(sptep, new_spte);
1447 1448
		}
	}
1449

1450 1451
	if (need_flush && kvm_available_flush_tlb_with_range()) {
		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
1452
		return false;
1453 1454
	}

1455
	return need_flush;
1456 1457
}

1458 1459
struct slot_rmap_walk_iterator {
	/* input fields. */
1460
	const struct kvm_memory_slot *slot;
1461 1462 1463 1464 1465 1466 1467
	gfn_t start_gfn;
	gfn_t end_gfn;
	int start_level;
	int end_level;

	/* output fields. */
	gfn_t gfn;
1468
	struct kvm_rmap_head *rmap;
1469 1470 1471
	int level;

	/* private field. */
1472
	struct kvm_rmap_head *end_rmap;
1473 1474 1475 1476 1477 1478 1479
};

static void
rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
{
	iterator->level = level;
	iterator->gfn = iterator->start_gfn;
1480 1481
	iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
	iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
1482 1483 1484 1485
}

static void
slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1486
		    const struct kvm_memory_slot *slot, int start_level,
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
{
	iterator->slot = slot;
	iterator->start_level = start_level;
	iterator->end_level = end_level;
	iterator->start_gfn = start_gfn;
	iterator->end_gfn = end_gfn;

	rmap_walk_init_level(iterator, iterator->start_level);
}

static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
{
	return !!iterator->rmap;
}

static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
{
	if (++iterator->rmap <= iterator->end_rmap) {
		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
		return;
	}

	if (++iterator->level > iterator->end_level) {
		iterator->rmap = NULL;
		return;
	}

	rmap_walk_init_level(iterator, iterator->level);
}

#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
	   _start_gfn, _end_gfn, _iter_)				\
	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
				 _end_level_, _start_gfn, _end_gfn);	\
	     slot_rmap_walk_okay(_iter_);				\
	     slot_rmap_walk_next(_iter_))

1525 1526 1527
typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			       struct kvm_memory_slot *slot, gfn_t gfn,
			       int level, pte_t pte);
1528

1529 1530 1531
static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
						 struct kvm_gfn_range *range,
						 rmap_handler_t handler)
1532
{
1533
	struct slot_rmap_walk_iterator iterator;
1534
	bool ret = false;
1535

1536 1537 1538 1539
	for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
				 range->start, range->end - 1, &iterator)
		ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
			       iterator.level, range->pte);
1540

1541
	return ret;
1542 1543
}

1544
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1545
{
1546
	bool flush = false;
1547

1548 1549
	if (kvm_memslots_have_rmaps(kvm))
		flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
1550

1551
	if (is_tdp_mmu_enabled(kvm))
1552
		flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1553

1554
	return flush;
1555 1556
}

1557
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1558
{
1559
	bool flush = false;
1560

1561 1562
	if (kvm_memslots_have_rmaps(kvm))
		flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
1563

1564
	if (is_tdp_mmu_enabled(kvm))
1565
		flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
1566

1567
	return flush;
1568 1569
}

1570 1571 1572
static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			  struct kvm_memory_slot *slot, gfn_t gfn, int level,
			  pte_t unused)
1573
{
1574
	u64 *sptep;
1575
	struct rmap_iterator iter;
1576 1577
	int young = 0;

1578 1579
	for_each_rmap_spte(rmap_head, &iter, sptep)
		young |= mmu_spte_age(sptep);
1580

1581 1582 1583
	return young;
}

1584 1585 1586
static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			       struct kvm_memory_slot *slot, gfn_t gfn,
			       int level, pte_t unused)
A
Andrea Arcangeli 已提交
1587
{
1588 1589
	u64 *sptep;
	struct rmap_iterator iter;
A
Andrea Arcangeli 已提交
1590

1591 1592
	for_each_rmap_spte(rmap_head, &iter, sptep)
		if (is_accessed_spte(*sptep))
1593 1594
			return true;
	return false;
A
Andrea Arcangeli 已提交
1595 1596
}

1597 1598
#define RMAP_RECYCLE_THRESHOLD 1000

1599 1600
static void rmap_add(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
		     u64 *spte, gfn_t gfn)
1601
{
1602
	struct kvm_mmu_page *sp;
1603 1604
	struct kvm_rmap_head *rmap_head;
	int rmap_count;
1605

1606
	sp = sptep_to_sp(spte);
1607
	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1608
	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1609
	rmap_count = pte_list_add(vcpu, spte, rmap_head);
1610

1611 1612 1613 1614 1615
	if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
		kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
		kvm_flush_remote_tlbs_with_address(
				vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
	}
1616 1617
}

1618
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1619
{
1620
	bool young = false;
1621

1622 1623
	if (kvm_memslots_have_rmaps(kvm))
		young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
1624

1625
	if (is_tdp_mmu_enabled(kvm))
1626
		young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
1627 1628

	return young;
1629 1630
}

1631
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
A
Andrea Arcangeli 已提交
1632
{
1633
	bool young = false;
1634

1635 1636
	if (kvm_memslots_have_rmaps(kvm))
		young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
1637

1638
	if (is_tdp_mmu_enabled(kvm))
1639
		young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
1640 1641

	return young;
A
Andrea Arcangeli 已提交
1642 1643
}

1644
#ifdef MMU_DEBUG
1645
static int is_empty_shadow_page(u64 *spt)
A
Avi Kivity 已提交
1646
{
1647 1648 1649
	u64 *pos;
	u64 *end;

1650
	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1651
		if (is_shadow_present_pte(*pos)) {
1652
			printk(KERN_ERR "%s: %p %llx\n", __func__,
1653
			       pos, *pos);
A
Avi Kivity 已提交
1654
			return 0;
1655
		}
A
Avi Kivity 已提交
1656 1657
	return 1;
}
1658
#endif
A
Avi Kivity 已提交
1659

1660 1661 1662 1663 1664 1665
/*
 * This value is the sum of all of the kvm instances's
 * kvm->arch.n_used_mmu_pages values.  We need a global,
 * aggregate version in order to make the slab shrinker
 * faster
 */
1666
static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
1667 1668 1669 1670 1671
{
	kvm->arch.n_used_mmu_pages += nr;
	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}

1672
static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1673
{
1674
	MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1675
	hlist_del(&sp->hash_link);
1676 1677
	list_del(&sp->link);
	free_page((unsigned long)sp->spt);
1678 1679
	if (!sp->role.direct)
		free_page((unsigned long)sp->gfns);
1680
	kmem_cache_free(mmu_page_header_cache, sp);
1681 1682
}

1683 1684
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
1685
	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1686 1687
}

1688
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1689
				    struct kvm_mmu_page *sp, u64 *parent_pte)
1690 1691 1692 1693
{
	if (!parent_pte)
		return;

1694
	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1695 1696
}

1697
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1698 1699
				       u64 *parent_pte)
{
1700
	__pte_list_remove(parent_pte, &sp->parent_ptes);
1701 1702
}

1703 1704 1705 1706
static void drop_parent_pte(struct kvm_mmu_page *sp,
			    u64 *parent_pte)
{
	mmu_page_remove_parent_pte(sp, parent_pte);
1707
	mmu_spte_clear_no_track(parent_pte);
1708 1709
}

1710
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
M
Marcelo Tosatti 已提交
1711
{
1712
	struct kvm_mmu_page *sp;
1713

1714 1715
	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
1716
	if (!direct)
1717
		sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
1718
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1719 1720 1721 1722 1723 1724

	/*
	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
	 * depends on valid pages being added to the head of the list.  See
	 * comments in kvm_zap_obsolete_pages().
	 */
1725
	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
1726 1727 1728
	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
	return sp;
M
Marcelo Tosatti 已提交
1729 1730
}

1731
static void mark_unsync(u64 *spte);
1732
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1733
{
1734 1735 1736 1737 1738 1739
	u64 *sptep;
	struct rmap_iterator iter;

	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
		mark_unsync(sptep);
	}
1740 1741
}

1742
static void mark_unsync(u64 *spte)
1743
{
1744
	struct kvm_mmu_page *sp;
1745
	unsigned int index;
1746

1747
	sp = sptep_to_sp(spte);
1748 1749
	index = spte - sp->spt;
	if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1750
		return;
1751
	if (sp->unsync_children++)
1752
		return;
1753
	kvm_mmu_mark_parents_unsync(sp);
1754 1755
}

1756
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1757
			       struct kvm_mmu_page *sp)
1758
{
1759
	return -1;
1760 1761
}

1762 1763 1764 1765 1766 1767 1768 1769 1770 1771
#define KVM_PAGE_ARRAY_NR 16

struct kvm_mmu_pages {
	struct mmu_page_and_offset {
		struct kvm_mmu_page *sp;
		unsigned int idx;
	} page[KVM_PAGE_ARRAY_NR];
	unsigned int nr;
};

1772 1773
static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
			 int idx)
1774
{
1775
	int i;
1776

1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787
	if (sp->unsync)
		for (i=0; i < pvec->nr; i++)
			if (pvec->page[i].sp == sp)
				return 0;

	pvec->page[pvec->nr].sp = sp;
	pvec->page[pvec->nr].idx = idx;
	pvec->nr++;
	return (pvec->nr == KVM_PAGE_ARRAY_NR);
}

1788 1789 1790 1791 1792 1793 1794
static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
{
	--sp->unsync_children;
	WARN_ON((int)sp->unsync_children < 0);
	__clear_bit(idx, sp->unsync_child_bitmap);
}

1795 1796 1797 1798
static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
	int i, ret, nr_unsync_leaf = 0;
1799

1800
	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1801
		struct kvm_mmu_page *child;
1802 1803
		u64 ent = sp->spt[i];

1804 1805 1806 1807
		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
			clear_unsync_child_bit(sp, i);
			continue;
		}
1808

1809
		child = to_shadow_page(ent & PT64_BASE_ADDR_MASK);
1810 1811 1812 1813 1814 1815

		if (child->unsync_children) {
			if (mmu_pages_add(pvec, child, i))
				return -ENOSPC;

			ret = __mmu_unsync_walk(child, pvec);
1816 1817 1818 1819
			if (!ret) {
				clear_unsync_child_bit(sp, i);
				continue;
			} else if (ret > 0) {
1820
				nr_unsync_leaf += ret;
1821
			} else
1822 1823 1824 1825 1826 1827
				return ret;
		} else if (child->unsync) {
			nr_unsync_leaf++;
			if (mmu_pages_add(pvec, child, i))
				return -ENOSPC;
		} else
1828
			clear_unsync_child_bit(sp, i);
1829 1830
	}

1831 1832 1833
	return nr_unsync_leaf;
}

1834 1835
#define INVALID_INDEX (-1)

1836 1837 1838
static int mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
P
Paolo Bonzini 已提交
1839
	pvec->nr = 0;
1840 1841 1842
	if (!sp->unsync_children)
		return 0;

1843
	mmu_pages_add(pvec, sp, INVALID_INDEX);
1844
	return __mmu_unsync_walk(sp, pvec);
1845 1846 1847 1848 1849
}

static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	WARN_ON(!sp->unsync);
1850
	trace_kvm_mmu_sync_page(sp);
1851 1852 1853 1854
	sp->unsync = 0;
	--kvm->stat.mmu_unsync;
}

1855 1856
static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				     struct list_head *invalid_list);
1857 1858
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list);
1859

1860 1861
#define for_each_valid_sp(_kvm, _sp, _list)				\
	hlist_for_each_entry(_sp, _list, hash_link)			\
1862
		if (is_obsolete_sp((_kvm), (_sp))) {			\
1863
		} else
1864 1865

#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
1866 1867
	for_each_valid_sp(_kvm, _sp,					\
	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
1868
		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
1869

1870
static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1871
			 struct list_head *invalid_list)
1872
{
1873 1874
	int ret = vcpu->arch.mmu->sync_page(vcpu, sp);

1875
	if (ret < 0)
1876
		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1877
	return ret;
1878 1879
}

1880 1881 1882 1883
static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
					struct list_head *invalid_list,
					bool remote_flush)
{
1884
	if (!remote_flush && list_empty(invalid_list))
1885 1886 1887 1888 1889 1890 1891 1892 1893
		return false;

	if (!list_empty(invalid_list))
		kvm_mmu_commit_zap_page(kvm, invalid_list);
	else
		kvm_flush_remote_tlbs(kvm);
	return true;
}

1894 1895
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
1896 1897 1898 1899 1900
	if (sp->role.invalid)
		return true;

	/* TDP MMU pages due not use the MMU generation. */
	return !sp->tdp_mmu_page &&
1901
	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1902 1903
}

1904
struct mmu_page_path {
1905 1906
	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
	unsigned int idx[PT64_ROOT_MAX_LEVEL];
1907 1908
};

1909
#define for_each_sp(pvec, sp, parents, i)			\
P
Paolo Bonzini 已提交
1910
		for (i = mmu_pages_first(&pvec, &parents);	\
1911 1912 1913
			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
			i = mmu_pages_next(&pvec, &parents, i))

1914 1915 1916
static int mmu_pages_next(struct kvm_mmu_pages *pvec,
			  struct mmu_page_path *parents,
			  int i)
1917 1918 1919 1920 1921
{
	int n;

	for (n = i+1; n < pvec->nr; n++) {
		struct kvm_mmu_page *sp = pvec->page[n].sp;
P
Paolo Bonzini 已提交
1922 1923
		unsigned idx = pvec->page[n].idx;
		int level = sp->role.level;
1924

P
Paolo Bonzini 已提交
1925
		parents->idx[level-1] = idx;
1926
		if (level == PG_LEVEL_4K)
P
Paolo Bonzini 已提交
1927
			break;
1928

P
Paolo Bonzini 已提交
1929
		parents->parent[level-2] = sp;
1930 1931 1932 1933 1934
	}

	return n;
}

P
Paolo Bonzini 已提交
1935 1936 1937 1938 1939 1940 1941 1942 1943
static int mmu_pages_first(struct kvm_mmu_pages *pvec,
			   struct mmu_page_path *parents)
{
	struct kvm_mmu_page *sp;
	int level;

	if (pvec->nr == 0)
		return 0;

1944 1945
	WARN_ON(pvec->page[0].idx != INVALID_INDEX);

P
Paolo Bonzini 已提交
1946 1947
	sp = pvec->page[0].sp;
	level = sp->role.level;
1948
	WARN_ON(level == PG_LEVEL_4K);
P
Paolo Bonzini 已提交
1949 1950 1951 1952 1953 1954 1955 1956 1957 1958

	parents->parent[level-2] = sp;

	/* Also set up a sentinel.  Further entries in pvec are all
	 * children of sp, so this element is never overwritten.
	 */
	parents->parent[level-1] = NULL;
	return mmu_pages_next(pvec, parents, 0);
}

1959
static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1960
{
1961 1962 1963 1964 1965 1966 1967 1968 1969
	struct kvm_mmu_page *sp;
	unsigned int level = 0;

	do {
		unsigned int idx = parents->idx[level];
		sp = parents->parent[level];
		if (!sp)
			return;

1970
		WARN_ON(idx == INVALID_INDEX);
1971
		clear_unsync_child_bit(sp, idx);
1972
		level++;
P
Paolo Bonzini 已提交
1973
	} while (!sp->unsync_children);
1974
}
1975

1976 1977
static int mmu_sync_children(struct kvm_vcpu *vcpu,
			     struct kvm_mmu_page *parent, bool can_yield)
1978 1979 1980 1981 1982
{
	int i;
	struct kvm_mmu_page *sp;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
1983
	LIST_HEAD(invalid_list);
1984
	bool flush = false;
1985 1986

	while (mmu_unsync_walk(parent, &pages)) {
1987
		bool protected = false;
1988 1989

		for_each_sp(pages, sp, parents, i)
1990
			protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn);
1991

1992
		if (protected) {
1993
			kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true);
1994 1995
			flush = false;
		}
1996

1997
		for_each_sp(pages, sp, parents, i) {
1998
			kvm_unlink_unsync_page(vcpu->kvm, sp);
1999
			flush |= kvm_sync_page(vcpu, sp, &invalid_list) > 0;
2000 2001
			mmu_pages_clear_parents(&parents);
		}
2002
		if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2003
			kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2004 2005 2006 2007 2008
			if (!can_yield) {
				kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
				return -EINTR;
			}

2009
			cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2010 2011
			flush = false;
		}
2012
	}
2013

2014
	kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2015
	return 0;
2016 2017
}

2018 2019
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
{
2020
	atomic_set(&sp->write_flooding_count,  0);
2021 2022 2023 2024
}

static void clear_sp_write_flooding_count(u64 *spte)
{
2025
	__clear_sp_write_flooding_count(sptep_to_sp(spte));
2026 2027
}

2028 2029 2030 2031
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
					     gfn_t gfn,
					     gva_t gaddr,
					     unsigned level,
2032
					     int direct,
2033
					     unsigned int access)
2034
{
2035
	bool direct_mmu = vcpu->arch.mmu->direct_map;
2036
	union kvm_mmu_page_role role;
2037
	struct hlist_head *sp_list;
2038
	unsigned quadrant;
2039
	struct kvm_mmu_page *sp;
2040
	int ret;
2041
	int collisions = 0;
2042
	LIST_HEAD(invalid_list);
2043

2044
	role = vcpu->arch.mmu->mmu_role.base;
2045
	role.level = level;
2046
	role.direct = direct;
2047
	role.access = access;
2048
	if (role.has_4_byte_gpte) {
2049 2050 2051 2052
		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
		role.quadrant = quadrant;
	}
2053 2054 2055

	sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
	for_each_valid_sp(vcpu->kvm, sp, sp_list) {
2056 2057 2058 2059 2060
		if (sp->gfn != gfn) {
			collisions++;
			continue;
		}

2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073
		if (sp->role.word != role.word) {
			/*
			 * If the guest is creating an upper-level page, zap
			 * unsync pages for the same gfn.  While it's possible
			 * the guest is using recursive page tables, in all
			 * likelihood the guest has stopped using the unsync
			 * page and is installing a completely unrelated page.
			 * Unsync pages must not be left as is, because the new
			 * upper-level page will be write-protected.
			 */
			if (level > PG_LEVEL_4K && sp->unsync)
				kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
							 &invalid_list);
2074
			continue;
2075
		}
2076

2077 2078 2079
		if (direct_mmu)
			goto trace_get_page;

2080
		if (sp->unsync) {
2081
			/*
2082
			 * The page is good, but is stale.  kvm_sync_page does
2083 2084 2085 2086 2087 2088 2089 2090 2091
			 * get the latest guest state, but (unlike mmu_unsync_children)
			 * it doesn't write-protect the page or mark it synchronized!
			 * This way the validity of the mapping is ensured, but the
			 * overhead of write protection is not incurred until the
			 * guest invalidates the TLB mapping.  This allows multiple
			 * SPs for a single gfn to be unsync.
			 *
			 * If the sync fails, the page is zapped.  If so, break
			 * in order to rebuild it.
2092
			 */
2093 2094
			ret = kvm_sync_page(vcpu, sp, &invalid_list);
			if (ret < 0)
2095 2096 2097
				break;

			WARN_ON(!list_empty(&invalid_list));
2098 2099
			if (ret > 0)
				kvm_flush_remote_tlbs(vcpu->kvm);
2100
		}
2101

2102
		__clear_sp_write_flooding_count(sp);
2103 2104

trace_get_page:
2105
		trace_kvm_mmu_get_page(sp, false);
2106
		goto out;
2107
	}
2108

A
Avi Kivity 已提交
2109
	++vcpu->kvm->stat.mmu_cache_miss;
2110 2111 2112

	sp = kvm_mmu_alloc_page(vcpu, direct);

2113 2114
	sp->gfn = gfn;
	sp->role = role;
2115
	hlist_add_head(&sp->hash_link, sp_list);
2116
	if (!direct) {
2117
		account_shadowed(vcpu->kvm, sp);
2118
		if (level == PG_LEVEL_4K && kvm_vcpu_write_protect_gfn(vcpu, gfn))
2119
			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2120
	}
A
Avi Kivity 已提交
2121
	trace_kvm_mmu_get_page(sp, true);
2122
out:
2123 2124
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);

2125 2126
	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2127
	return sp;
2128 2129
}

2130 2131 2132
static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
					struct kvm_vcpu *vcpu, hpa_t root,
					u64 addr)
2133 2134
{
	iterator->addr = addr;
2135
	iterator->shadow_addr = root;
2136
	iterator->level = vcpu->arch.mmu->shadow_root_level;
2137

2138
	if (iterator->level >= PT64_ROOT_4LEVEL &&
2139 2140
	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
	    !vcpu->arch.mmu->direct_map)
2141
		iterator->level = PT32E_ROOT_LEVEL;
2142

2143
	if (iterator->level == PT32E_ROOT_LEVEL) {
2144 2145 2146 2147
		/*
		 * prev_root is currently only used for 64-bit hosts. So only
		 * the active root_hpa is valid here.
		 */
2148
		BUG_ON(root != vcpu->arch.mmu->root.hpa);
2149

2150
		iterator->shadow_addr
2151
			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2152 2153 2154 2155 2156 2157 2158
		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
		--iterator->level;
		if (!iterator->shadow_addr)
			iterator->level = 0;
	}
}

2159 2160 2161
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
			     struct kvm_vcpu *vcpu, u64 addr)
{
2162
	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root.hpa,
2163 2164 2165
				    addr);
}

2166 2167
static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{
2168
	if (iterator->level < PG_LEVEL_4K)
2169
		return false;
2170

2171 2172 2173 2174 2175
	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
	return true;
}

2176 2177
static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
			       u64 spte)
2178
{
2179
	if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) {
2180 2181 2182 2183
		iterator->level = 0;
		return;
	}

2184
	iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2185 2186 2187
	--iterator->level;
}

2188 2189
static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
{
2190
	__shadow_walk_next(iterator, *iterator->sptep);
2191 2192
}

2193 2194 2195 2196 2197 2198 2199 2200 2201
static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
			     struct kvm_mmu_page *sp)
{
	u64 spte;

	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);

	spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));

2202
	mmu_spte_set(sptep, spte);
2203 2204 2205 2206 2207

	mmu_page_add_parent_pte(vcpu, sp, sptep);

	if (sp->unsync_children || sp->unsync)
		mark_unsync(sptep);
2208 2209
}

2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
				   unsigned direct_access)
{
	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
		struct kvm_mmu_page *child;

		/*
		 * For the direct sp, if the guest pte's dirty bit
		 * changed form clean to dirty, it will corrupt the
		 * sp's access: allow writable in the read-only sp,
		 * so we should update the spte at this point to get
		 * a new sp with the correct access.
		 */
2223
		child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK);
2224 2225 2226
		if (child->role.access == direct_access)
			return;

2227
		drop_parent_pte(child, sptep);
2228
		kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2229 2230 2231
	}
}

2232 2233 2234
/* Returns the number of zapped non-leaf child shadow pages. */
static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
			    u64 *spte, struct list_head *invalid_list)
2235 2236 2237 2238 2239 2240
{
	u64 pte;
	struct kvm_mmu_page *child;

	pte = *spte;
	if (is_shadow_present_pte(pte)) {
X
Xiao Guangrong 已提交
2241
		if (is_last_spte(pte, sp->role.level)) {
2242
			drop_spte(kvm, spte);
X
Xiao Guangrong 已提交
2243
		} else {
2244
			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2245
			drop_parent_pte(child, spte);
2246 2247 2248 2249 2250 2251 2252 2253 2254 2255

			/*
			 * Recursively zap nested TDP SPs, parentless SPs are
			 * unlikely to be used again in the near future.  This
			 * avoids retaining a large number of stale nested SPs.
			 */
			if (tdp_enabled && invalid_list &&
			    child->role.guest_mode && !child->parent_ptes.val)
				return kvm_mmu_prepare_zap_page(kvm, child,
								invalid_list);
2256
		}
2257
	} else if (is_mmio_spte(pte)) {
2258
		mmu_spte_clear_no_track(spte);
2259
	}
2260
	return 0;
2261 2262
}

2263 2264 2265
static int kvm_mmu_page_unlink_children(struct kvm *kvm,
					struct kvm_mmu_page *sp,
					struct list_head *invalid_list)
2266
{
2267
	int zapped = 0;
2268 2269
	unsigned i;

2270
	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2271 2272 2273
		zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);

	return zapped;
2274 2275
}

2276
static void kvm_mmu_unlink_parents(struct kvm_mmu_page *sp)
2277
{
2278 2279
	u64 *sptep;
	struct rmap_iterator iter;
2280

2281
	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2282
		drop_parent_pte(sp, sptep);
2283 2284
}

2285
static int mmu_zap_unsync_children(struct kvm *kvm,
2286 2287
				   struct kvm_mmu_page *parent,
				   struct list_head *invalid_list)
2288
{
2289 2290 2291
	int i, zapped = 0;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
2292

2293
	if (parent->role.level == PG_LEVEL_4K)
2294
		return 0;
2295 2296 2297 2298 2299

	while (mmu_unsync_walk(parent, &pages)) {
		struct kvm_mmu_page *sp;

		for_each_sp(pages, sp, parents, i) {
2300
			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2301
			mmu_pages_clear_parents(&parents);
2302
			zapped++;
2303 2304 2305 2306
		}
	}

	return zapped;
2307 2308
}

2309 2310 2311 2312
static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
				       struct kvm_mmu_page *sp,
				       struct list_head *invalid_list,
				       int *nr_zapped)
2313
{
2314
	bool list_unstable, zapped_root = false;
A
Avi Kivity 已提交
2315

2316
	trace_kvm_mmu_prepare_zap_page(sp);
2317
	++kvm->stat.mmu_shadow_zapped;
2318
	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2319
	*nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2320
	kvm_mmu_unlink_parents(sp);
2321

2322 2323 2324
	/* Zapping children means active_mmu_pages has become unstable. */
	list_unstable = *nr_zapped;

2325
	if (!sp->role.invalid && !sp->role.direct)
2326
		unaccount_shadowed(kvm, sp);
2327

2328 2329
	if (sp->unsync)
		kvm_unlink_unsync_page(kvm, sp);
2330
	if (!sp->root_count) {
2331
		/* Count self */
2332
		(*nr_zapped)++;
2333 2334 2335 2336 2337 2338 2339 2340 2341 2342

		/*
		 * Already invalid pages (previously active roots) are not on
		 * the active page list.  See list_del() in the "else" case of
		 * !sp->root_count.
		 */
		if (sp->role.invalid)
			list_add(&sp->link, invalid_list);
		else
			list_move(&sp->link, invalid_list);
2343
		kvm_mod_used_mmu_pages(kvm, -1);
2344
	} else {
2345 2346 2347 2348 2349
		/*
		 * Remove the active root from the active page list, the root
		 * will be explicitly freed when the root_count hits zero.
		 */
		list_del(&sp->link);
2350

2351 2352 2353 2354 2355
		/*
		 * Obsolete pages cannot be used on any vCPUs, see the comment
		 * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
		 * treats invalid shadow pages as being obsolete.
		 */
2356
		zapped_root = !is_obsolete_sp(kvm, sp);
2357
	}
2358

P
Paolo Bonzini 已提交
2359 2360 2361
	if (sp->lpage_disallowed)
		unaccount_huge_nx_page(kvm, sp);

2362
	sp->role.invalid = 1;
2363 2364 2365 2366 2367 2368 2369

	/*
	 * Make the request to free obsolete roots after marking the root
	 * invalid, otherwise other vCPUs may not see it as invalid.
	 */
	if (zapped_root)
		kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);
2370 2371 2372 2373 2374 2375 2376 2377 2378 2379
	return list_unstable;
}

static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				     struct list_head *invalid_list)
{
	int nr_zapped;

	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
	return nr_zapped;
2380 2381
}

2382 2383 2384
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list)
{
2385
	struct kvm_mmu_page *sp, *nsp;
2386 2387 2388 2389

	if (list_empty(invalid_list))
		return;

2390
	/*
2391 2392 2393 2394 2395 2396 2397
	 * We need to make sure everyone sees our modifications to
	 * the page tables and see changes to vcpu->mode here. The barrier
	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
	 *
	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
	 * guest mode and/or lockless shadow page table walks.
2398 2399
	 */
	kvm_flush_remote_tlbs(kvm);
2400

2401
	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2402
		WARN_ON(!sp->role.invalid || sp->root_count);
2403
		kvm_mmu_free_page(sp);
2404
	}
2405 2406
}

2407 2408
static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
						  unsigned long nr_to_zap)
2409
{
2410 2411
	unsigned long total_zapped = 0;
	struct kvm_mmu_page *sp, *tmp;
2412
	LIST_HEAD(invalid_list);
2413 2414
	bool unstable;
	int nr_zapped;
2415 2416

	if (list_empty(&kvm->arch.active_mmu_pages))
2417 2418
		return 0;

2419
restart:
2420
	list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431
		/*
		 * Don't zap active root pages, the page itself can't be freed
		 * and zapping it will just force vCPUs to realloc and reload.
		 */
		if (sp->root_count)
			continue;

		unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
						      &nr_zapped);
		total_zapped += nr_zapped;
		if (total_zapped >= nr_to_zap)
2432 2433
			break;

2434 2435
		if (unstable)
			goto restart;
2436
	}
2437

2438 2439 2440 2441 2442 2443
	kvm_mmu_commit_zap_page(kvm, &invalid_list);

	kvm->stat.mmu_recycled += total_zapped;
	return total_zapped;
}

2444 2445 2446 2447 2448 2449 2450
static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
{
	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
		return kvm->arch.n_max_mmu_pages -
			kvm->arch.n_used_mmu_pages;

	return 0;
2451 2452
}

2453 2454
static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
{
2455
	unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2456

2457
	if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2458 2459
		return 0;

2460
	kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2461

2462 2463 2464 2465 2466
	/*
	 * Note, this check is intentionally soft, it only guarantees that one
	 * page is available, while the caller may end up allocating as many as
	 * four pages, e.g. for PAE roots or for 5-level paging.  Temporarily
	 * exceeding the (arbitrary by default) limit will not harm the host,
I
Ingo Molnar 已提交
2467
	 * being too aggressive may unnecessarily kill the guest, and getting an
2468 2469 2470
	 * exact count is far more trouble than it's worth, especially in the
	 * page fault paths.
	 */
2471 2472 2473 2474 2475
	if (!kvm_mmu_available_pages(vcpu->kvm))
		return -ENOSPC;
	return 0;
}

2476 2477
/*
 * Changing the number of mmu pages allocated to the vm
2478
 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2479
 */
2480
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2481
{
2482
	write_lock(&kvm->mmu_lock);
2483

2484
	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2485 2486
		kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
						  goal_nr_mmu_pages);
2487

2488
		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2489 2490
	}

2491
	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2492

2493
	write_unlock(&kvm->mmu_lock);
2494 2495
}

2496
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2497
{
2498
	struct kvm_mmu_page *sp;
2499
	LIST_HEAD(invalid_list);
2500 2501
	int r;

2502
	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2503
	r = 0;
2504
	write_lock(&kvm->mmu_lock);
2505
	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2506
		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2507 2508
			 sp->role.word);
		r = 1;
2509
		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2510
	}
2511
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2512
	write_unlock(&kvm->mmu_lock);
2513

2514
	return r;
2515
}
2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530

static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
	gpa_t gpa;
	int r;

	if (vcpu->arch.mmu->direct_map)
		return 0;

	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);

	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);

	return r;
}
2531

2532
static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
2533 2534
{
	trace_kvm_mmu_unsync_page(sp);
2535
	++kvm->stat.mmu_unsync;
2536 2537 2538 2539 2540
	sp->unsync = 1;

	kvm_mmu_mark_parents_unsync(sp);
}

2541 2542 2543 2544 2545 2546
/*
 * Attempt to unsync any shadow pages that can be reached by the specified gfn,
 * KVM is creating a writable mapping for said gfn.  Returns 0 if all pages
 * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
 * be write-protected.
 */
2547
int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
2548
			    gfn_t gfn, bool can_unsync, bool prefetch)
2549
{
2550
	struct kvm_mmu_page *sp;
2551
	bool locked = false;
2552

2553 2554 2555 2556 2557
	/*
	 * Force write-protection if the page is being tracked.  Note, the page
	 * track machinery is used to write-protect upper-level shadow pages,
	 * i.e. this guards the role.level == 4K assertion below!
	 */
2558
	if (kvm_slot_page_track_is_active(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE))
2559
		return -EPERM;
2560

2561 2562 2563 2564 2565 2566
	/*
	 * The page is not write-tracked, mark existing shadow pages unsync
	 * unless KVM is synchronizing an unsync SP (can_unsync = false).  In
	 * that case, KVM must complete emulation of the guest TLB flush before
	 * allowing shadow pages to become unsync (writable by the guest).
	 */
2567
	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2568
		if (!can_unsync)
2569
			return -EPERM;
2570

2571 2572
		if (sp->unsync)
			continue;
2573

2574
		if (prefetch)
2575 2576
			return -EEXIST;

2577 2578 2579 2580 2581 2582 2583 2584 2585
		/*
		 * TDP MMU page faults require an additional spinlock as they
		 * run with mmu_lock held for read, not write, and the unsync
		 * logic is not thread safe.  Take the spinklock regardless of
		 * the MMU type to avoid extra conditionals/parameters, there's
		 * no meaningful penalty if mmu_lock is held for write.
		 */
		if (!locked) {
			locked = true;
2586
			spin_lock(&kvm->arch.mmu_unsync_pages_lock);
2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599

			/*
			 * Recheck after taking the spinlock, a different vCPU
			 * may have since marked the page unsync.  A false
			 * positive on the unprotected check above is not
			 * possible as clearing sp->unsync _must_ hold mmu_lock
			 * for write, i.e. unsync cannot transition from 0->1
			 * while this CPU holds mmu_lock for read (or write).
			 */
			if (READ_ONCE(sp->unsync))
				continue;
		}

2600
		WARN_ON(sp->role.level != PG_LEVEL_4K);
2601
		kvm_unsync_page(kvm, sp);
2602
	}
2603
	if (locked)
2604
		spin_unlock(&kvm->arch.mmu_unsync_pages_lock);
2605

2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627
	/*
	 * We need to ensure that the marking of unsync pages is visible
	 * before the SPTE is updated to allow writes because
	 * kvm_mmu_sync_roots() checks the unsync flags without holding
	 * the MMU lock and so can race with this. If the SPTE was updated
	 * before the page had been marked as unsync-ed, something like the
	 * following could happen:
	 *
	 * CPU 1                    CPU 2
	 * ---------------------------------------------------------------------
	 * 1.2 Host updates SPTE
	 *     to be writable
	 *                      2.1 Guest writes a GPTE for GVA X.
	 *                          (GPTE being in the guest page table shadowed
	 *                           by the SP from CPU 1.)
	 *                          This reads SPTE during the page table walk.
	 *                          Since SPTE.W is read as 1, there is no
	 *                          fault.
	 *
	 *                      2.2 Guest issues TLB flush.
	 *                          That causes a VM Exit.
	 *
2628 2629
	 *                      2.3 Walking of unsync pages sees sp->unsync is
	 *                          false and skips the page.
2630 2631 2632 2633 2634 2635 2636 2637 2638 2639
	 *
	 *                      2.4 Guest accesses GVA X.
	 *                          Since the mapping in the SP was not updated,
	 *                          so the old mapping for GVA X incorrectly
	 *                          gets used.
	 * 1.1 Host marks SP
	 *     as unsync
	 *     (sp->unsync = true)
	 *
	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
2640 2641
	 * the situation in 2.4 does not arise.  It pairs with the read barrier
	 * in is_unsync_root(), placed between 2.1's load of SPTE.W and 2.3.
2642 2643 2644
	 */
	smp_wmb();

2645
	return 0;
2646 2647
}

2648 2649
static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
			u64 *sptep, unsigned int pte_access, gfn_t gfn,
2650
			kvm_pfn_t pfn, struct kvm_page_fault *fault)
M
Marcelo Tosatti 已提交
2651
{
2652
	struct kvm_mmu_page *sp = sptep_to_sp(sptep);
2653
	int level = sp->role.level;
M
Marcelo Tosatti 已提交
2654
	int was_rmapped = 0;
2655
	int ret = RET_PF_FIXED;
2656
	bool flush = false;
2657
	bool wrprot;
2658
	u64 spte;
M
Marcelo Tosatti 已提交
2659

2660 2661
	/* Prefetching always gets a writable pfn.  */
	bool host_writable = !fault || fault->map_writable;
2662
	bool prefetch = !fault || fault->prefetch;
2663
	bool write_fault = fault && fault->write;
M
Marcelo Tosatti 已提交
2664

2665 2666
	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
		 *sptep, write_fault, gfn);
M
Marcelo Tosatti 已提交
2667

2668 2669 2670 2671 2672
	if (unlikely(is_noslot_pfn(pfn))) {
		mark_mmio_spte(vcpu, sptep, gfn, pte_access);
		return RET_PF_EMULATE;
	}

2673
	if (is_shadow_present_pte(*sptep)) {
M
Marcelo Tosatti 已提交
2674 2675 2676 2677
		/*
		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
		 * the parent of the now unreachable PTE.
		 */
2678
		if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
M
Marcelo Tosatti 已提交
2679
			struct kvm_mmu_page *child;
A
Avi Kivity 已提交
2680
			u64 pte = *sptep;
M
Marcelo Tosatti 已提交
2681

2682
			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2683
			drop_parent_pte(child, sptep);
2684
			flush = true;
A
Avi Kivity 已提交
2685
		} else if (pfn != spte_to_pfn(*sptep)) {
2686
			pgprintk("hfn old %llx new %llx\n",
A
Avi Kivity 已提交
2687
				 spte_to_pfn(*sptep), pfn);
2688
			drop_spte(vcpu->kvm, sptep);
2689
			flush = true;
2690 2691
		} else
			was_rmapped = 1;
M
Marcelo Tosatti 已提交
2692
	}
2693

2694
	wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
2695
			   true, host_writable, &spte);
2696 2697 2698 2699 2700

	if (*sptep == spte) {
		ret = RET_PF_SPURIOUS;
	} else {
		flush |= mmu_spte_update(sptep, spte);
2701
		trace_kvm_mmu_set_spte(level, gfn, sptep);
2702 2703
	}

2704
	if (wrprot) {
M
Marcelo Tosatti 已提交
2705
		if (write_fault)
2706
			ret = RET_PF_EMULATE;
2707
	}
2708

2709
	if (flush)
2710 2711
		kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
				KVM_PAGES_PER_HPAGE(level));
M
Marcelo Tosatti 已提交
2712

A
Avi Kivity 已提交
2713
	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
M
Marcelo Tosatti 已提交
2714

2715
	if (!was_rmapped) {
2716
		WARN_ON_ONCE(ret == RET_PF_SPURIOUS);
2717
		kvm_update_page_stats(vcpu->kvm, level, 1);
2718
		rmap_add(vcpu, slot, sptep, gfn);
2719
	}
2720

2721
	return ret;
2722 2723
}

2724 2725 2726 2727 2728
static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp,
				    u64 *start, u64 *end)
{
	struct page *pages[PTE_PREFETCH_NUM];
2729
	struct kvm_memory_slot *slot;
2730
	unsigned int access = sp->role.access;
2731 2732 2733 2734
	int i, ret;
	gfn_t gfn;

	gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2735 2736
	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
	if (!slot)
2737 2738
		return -1;

2739
	ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2740 2741 2742
	if (ret <= 0)
		return -1;

2743
	for (i = 0; i < ret; i++, gfn++, start++) {
2744
		mmu_set_spte(vcpu, slot, start, access, gfn,
2745
			     page_to_pfn(pages[i]), NULL);
2746 2747
		put_page(pages[i]);
	}
2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763

	return 0;
}

static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
				  struct kvm_mmu_page *sp, u64 *sptep)
{
	u64 *spte, *start = NULL;
	int i;

	WARN_ON(!sp->role.direct);

	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
	spte = sp->spt + i;

	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2764
		if (is_shadow_present_pte(*spte) || spte == sptep) {
2765 2766 2767
			if (!start)
				continue;
			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2768
				return;
2769 2770 2771 2772
			start = NULL;
		} else if (!start)
			start = spte;
	}
2773 2774
	if (start)
		direct_pte_prefetch_many(vcpu, sp, start, spte);
2775 2776 2777 2778 2779 2780
}

static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
{
	struct kvm_mmu_page *sp;

2781
	sp = sptep_to_sp(sptep);
2782

2783
	/*
2784 2785 2786
	 * Without accessed bits, there's no way to distinguish between
	 * actually accessed translations and prefetched, so disable pte
	 * prefetch if accessed bits aren't available.
2787
	 */
2788
	if (sp_ad_disabled(sp))
2789 2790
		return;

2791
	if (sp->role.level > PG_LEVEL_4K)
2792 2793
		return;

2794 2795 2796 2797 2798 2799 2800
	/*
	 * If addresses are being invalidated, skip prefetching to avoid
	 * accidentally prefetching those addresses.
	 */
	if (unlikely(vcpu->kvm->mmu_notifier_count))
		return;

2801 2802 2803
	__direct_pte_prefetch(vcpu, sp, sptep);
}

2804
static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
2805
				  const struct kvm_memory_slot *slot)
2806 2807
{
	unsigned long hva;
2808 2809 2810 2811 2812 2813
	unsigned long flags;
	int level = PG_LEVEL_4K;
	pgd_t pgd;
	p4d_t p4d;
	pud_t pud;
	pmd_t pmd;
2814

2815
	if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
2816
		return PG_LEVEL_4K;
2817

2818 2819 2820 2821 2822 2823 2824 2825
	/*
	 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
	 * is not solely for performance, it's also necessary to avoid the
	 * "writable" check in __gfn_to_hva_many(), which will always fail on
	 * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
	 * page fault steps have already verified the guest isn't writing a
	 * read-only memslot.
	 */
2826 2827
	hva = __gfn_to_hva_memslot(slot, gfn);

2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845
	/*
	 * Lookup the mapping level in the current mm.  The information
	 * may become stale soon, but it is safe to use as long as
	 * 1) mmu_notifier_retry was checked after taking mmu_lock, and
	 * 2) mmu_lock is taken now.
	 *
	 * We still need to disable IRQs to prevent concurrent tear down
	 * of page tables.
	 */
	local_irq_save(flags);

	pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
	if (pgd_none(pgd))
		goto out;

	p4d = READ_ONCE(*p4d_offset(&pgd, hva));
	if (p4d_none(p4d) || !p4d_present(p4d))
		goto out;
2846

2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864
	pud = READ_ONCE(*pud_offset(&p4d, hva));
	if (pud_none(pud) || !pud_present(pud))
		goto out;

	if (pud_large(pud)) {
		level = PG_LEVEL_1G;
		goto out;
	}

	pmd = READ_ONCE(*pmd_offset(&pud, hva));
	if (pmd_none(pmd) || !pmd_present(pmd))
		goto out;

	if (pmd_large(pmd))
		level = PG_LEVEL_2M;

out:
	local_irq_restore(flags);
2865 2866 2867
	return level;
}

2868 2869 2870
int kvm_mmu_max_mapping_level(struct kvm *kvm,
			      const struct kvm_memory_slot *slot, gfn_t gfn,
			      kvm_pfn_t pfn, int max_level)
2871 2872
{
	struct kvm_lpage_info *linfo;
2873
	int host_level;
2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884

	max_level = min(max_level, max_huge_page_level);
	for ( ; max_level > PG_LEVEL_4K; max_level--) {
		linfo = lpage_info_slot(gfn, slot, max_level);
		if (!linfo->disallow_lpage)
			break;
	}

	if (max_level == PG_LEVEL_4K)
		return PG_LEVEL_4K;

2885 2886
	host_level = host_pfn_mapping_level(kvm, gfn, pfn, slot);
	return min(host_level, max_level);
2887 2888
}

2889
void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
2890
{
2891
	struct kvm_memory_slot *slot = fault->slot;
2892 2893
	kvm_pfn_t mask;

2894
	fault->huge_page_disallowed = fault->exec && fault->nx_huge_page_workaround_enabled;
2895

2896 2897
	if (unlikely(fault->max_level == PG_LEVEL_4K))
		return;
2898

2899 2900
	if (is_error_noslot_pfn(fault->pfn) || kvm_is_reserved_pfn(fault->pfn))
		return;
2901

2902
	if (kvm_slot_dirty_track_enabled(slot))
2903
		return;
2904

2905 2906 2907 2908
	/*
	 * Enforce the iTLB multihit workaround after capturing the requested
	 * level, which will be used to do precise, accurate accounting.
	 */
2909 2910 2911 2912 2913
	fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot,
						     fault->gfn, fault->pfn,
						     fault->max_level);
	if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
		return;
2914 2915

	/*
2916 2917
	 * mmu_notifier_retry() was successful and mmu_lock is held, so
	 * the pmd can't be split from under us.
2918
	 */
2919 2920 2921 2922
	fault->goal_level = fault->req_level;
	mask = KVM_PAGES_PER_HPAGE(fault->goal_level) - 1;
	VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask));
	fault->pfn &= ~mask;
2923 2924
}

2925
void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
P
Paolo Bonzini 已提交
2926
{
2927 2928
	if (cur_level > PG_LEVEL_4K &&
	    cur_level == fault->goal_level &&
P
Paolo Bonzini 已提交
2929 2930 2931 2932 2933 2934 2935 2936 2937
	    is_shadow_present_pte(spte) &&
	    !is_large_pte(spte)) {
		/*
		 * A small SPTE exists for this pfn, but FNAME(fetch)
		 * and __direct_map would like to create a large PTE
		 * instead: just force them to go down another level,
		 * patching back for them into pfn the next 9 bits of
		 * the address.
		 */
2938 2939 2940 2941
		u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) -
				KVM_PAGES_PER_HPAGE(cur_level - 1);
		fault->pfn |= fault->gfn & page_mask;
		fault->goal_level--;
P
Paolo Bonzini 已提交
2942 2943 2944
	}
}

2945
static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
2946
{
2947
	struct kvm_shadow_walk_iterator it;
2948
	struct kvm_mmu_page *sp;
2949
	int ret;
2950
	gfn_t base_gfn = fault->gfn;
A
Avi Kivity 已提交
2951

2952
	kvm_mmu_hugepage_adjust(vcpu, fault);
2953

2954
	trace_kvm_mmu_spte_requested(fault);
2955
	for_each_shadow_entry(vcpu, fault->addr, it) {
P
Paolo Bonzini 已提交
2956 2957 2958 2959
		/*
		 * We cannot overwrite existing page tables with an NX
		 * large page, as the leaf could be executable.
		 */
2960
		if (fault->nx_huge_page_workaround_enabled)
2961
			disallowed_hugepage_adjust(fault, *it.sptep, it.level);
P
Paolo Bonzini 已提交
2962

2963
		base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
2964
		if (it.level == fault->goal_level)
2965
			break;
A
Avi Kivity 已提交
2966

2967
		drop_large_spte(vcpu, it.sptep);
2968 2969 2970 2971 2972 2973 2974
		if (is_shadow_present_pte(*it.sptep))
			continue;

		sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
				      it.level - 1, true, ACC_ALL);

		link_shadow_page(vcpu, it.sptep, sp);
2975 2976
		if (fault->is_tdp && fault->huge_page_disallowed &&
		    fault->req_level >= it.level)
2977
			account_huge_nx_page(vcpu->kvm, sp);
2978
	}
2979

2980 2981 2982
	if (WARN_ON_ONCE(it.level != fault->goal_level))
		return -EFAULT;

2983
	ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL,
2984
			   base_gfn, fault->pfn, fault);
2985 2986 2987
	if (ret == RET_PF_SPURIOUS)
		return ret;

2988 2989 2990
	direct_pte_prefetch(vcpu, it.sptep);
	++vcpu->stat.pf_fixed;
	return ret;
A
Avi Kivity 已提交
2991 2992
}

H
Huang Ying 已提交
2993
static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2994
{
2995
	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
2996 2997
}

D
Dan Williams 已提交
2998
static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
2999
{
X
Xiao Guangrong 已提交
3000 3001 3002 3003 3004 3005
	/*
	 * Do not cache the mmio info caused by writing the readonly gfn
	 * into the spte otherwise read access on readonly gfn also can
	 * caused mmio page fault and treat it as mmio access.
	 */
	if (pfn == KVM_PFN_ERR_RO_FAULT)
3006
		return RET_PF_EMULATE;
X
Xiao Guangrong 已提交
3007

3008
	if (pfn == KVM_PFN_ERR_HWPOISON) {
3009
		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
3010
		return RET_PF_RETRY;
3011
	}
3012

3013
	return -EFAULT;
3014 3015
}

3016 3017
static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
				unsigned int access, int *ret_val)
3018 3019
{
	/* The pfn is invalid, report the error! */
3020 3021
	if (unlikely(is_error_pfn(fault->pfn))) {
		*ret_val = kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn);
3022
		return true;
3023 3024
	}

3025
	if (unlikely(!fault->slot)) {
3026 3027 3028
		gva_t gva = fault->is_tdp ? 0 : fault->addr;

		vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
3029
				     access & shadow_mmio_access_mask);
3030 3031 3032
		/*
		 * If MMIO caching is disabled, emulate immediately without
		 * touching the shadow page tables as attempting to install an
3033 3034 3035 3036 3037 3038
		 * MMIO SPTE will just be an expensive nop.  Do not cache MMIO
		 * whose gfn is greater than host.MAXPHYADDR, any guest that
		 * generates such gfns is running nested and is being tricked
		 * by L0 userspace (you can observe gfn > L1.MAXPHYADDR if
		 * and only if L1's MAXPHYADDR is inaccurate with respect to
		 * the hardware's).
3039
		 */
3040
		if (unlikely(!enable_mmio_caching) ||
3041
		    unlikely(fault->gfn > kvm_mmu_max_gfn())) {
3042 3043 3044 3045
			*ret_val = RET_PF_EMULATE;
			return true;
		}
	}
3046

3047
	return false;
3048 3049
}

3050
static bool page_fault_can_be_fast(struct kvm_page_fault *fault)
3051
{
3052 3053 3054 3055
	/*
	 * Do not fix the mmio spte with invalid generation number which
	 * need to be updated by slow page fault path.
	 */
3056
	if (fault->rsvd)
3057 3058
		return false;

3059
	/* See if the page fault is due to an NX violation */
3060
	if (unlikely(fault->exec && fault->present))
3061 3062
		return false;

3063
	/*
3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074
	 * #PF can be fast if:
	 * 1. The shadow page table entry is not present, which could mean that
	 *    the fault is potentially caused by access tracking (if enabled).
	 * 2. The shadow page table entry is present and the fault
	 *    is caused by write-protect, that means we just need change the W
	 *    bit of the spte which can be done out of mmu-lock.
	 *
	 * However, if access tracking is disabled we know that a non-present
	 * page must be a genuine page fault where we have to create a new SPTE.
	 * So, if access tracking is disabled, we return true only for write
	 * accesses to a present page.
3075 3076
	 */

3077
	return shadow_acc_track_mask != 0 || (fault->write && fault->present);
3078 3079
}

3080 3081 3082 3083
/*
 * Returns true if the SPTE was fixed successfully. Otherwise,
 * someone else modified the SPTE from its original value.
 */
3084
static bool
3085
fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
3086
			u64 *sptep, u64 old_spte, u64 new_spte)
3087
{
3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099
	/*
	 * Theoretically we could also set dirty bit (and flush TLB) here in
	 * order to eliminate unnecessary PML logging. See comments in
	 * set_spte. But fast_page_fault is very unlikely to happen with PML
	 * enabled, so we do not do this. This might result in the same GPA
	 * to be logged in PML buffer again when the write really happens, and
	 * eventually to be called by mark_page_dirty twice. But it's also no
	 * harm. This also avoids the TLB flush needed after setting dirty bit
	 * so non-PML cases won't be impacted.
	 *
	 * Compare with set_spte where instead shadow_dirty_mask is set.
	 */
3100
	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3101 3102
		return false;

3103 3104
	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte))
		mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn);
3105 3106 3107 3108

	return true;
}

3109
static bool is_access_allowed(struct kvm_page_fault *fault, u64 spte)
3110
{
3111
	if (fault->exec)
3112 3113
		return is_executable_pte(spte);

3114
	if (fault->write)
3115 3116 3117 3118 3119 3120
		return is_writable_pte(spte);

	/* Fault was on Read access */
	return spte & PT_PRESENT_MASK;
}

3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143
/*
 * Returns the last level spte pointer of the shadow page walk for the given
 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
 * walk could be performed, returns NULL and *spte does not contain valid data.
 *
 * Contract:
 *  - Must be called between walk_shadow_page_lockless_{begin,end}.
 *  - The returned sptep must not be used after walk_shadow_page_lockless_end.
 */
static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
{
	struct kvm_shadow_walk_iterator iterator;
	u64 old_spte;
	u64 *sptep = NULL;

	for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
		sptep = iterator.sptep;
		*spte = old_spte;
	}

	return sptep;
}

3144
/*
3145
 * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3146
 */
3147
static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3148
{
3149
	struct kvm_mmu_page *sp;
3150
	int ret = RET_PF_INVALID;
3151
	u64 spte = 0ull;
3152
	u64 *sptep = NULL;
3153
	uint retry_count = 0;
3154

3155
	if (!page_fault_can_be_fast(fault))
3156
		return ret;
3157 3158 3159

	walk_shadow_page_lockless_begin(vcpu);

3160
	do {
3161
		u64 new_spte;
3162

3163
		if (is_tdp_mmu(vcpu->arch.mmu))
3164
			sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3165
		else
3166
			sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3167

3168 3169 3170
		if (!is_shadow_present_pte(spte))
			break;

3171
		sp = sptep_to_sp(sptep);
3172 3173
		if (!is_last_spte(spte, sp->role.level))
			break;
3174

3175
		/*
3176 3177 3178 3179 3180
		 * Check whether the memory access that caused the fault would
		 * still cause it if it were to be performed right now. If not,
		 * then this is a spurious fault caused by TLB lazily flushed,
		 * or some other CPU has already fixed the PTE after the
		 * current CPU took the fault.
3181 3182 3183 3184
		 *
		 * Need not check the access of upper level table entries since
		 * they are always ACC_ALL.
		 */
3185
		if (is_access_allowed(fault, spte)) {
3186
			ret = RET_PF_SPURIOUS;
3187 3188
			break;
		}
3189

3190 3191 3192 3193 3194 3195 3196 3197 3198 3199
		new_spte = spte;

		if (is_access_track_spte(spte))
			new_spte = restore_acc_track_spte(new_spte);

		/*
		 * Currently, to simplify the code, write-protection can
		 * be removed in the fast path only if the SPTE was
		 * write-protected for dirty-logging or access tracking.
		 */
3200
		if (fault->write &&
3201
		    spte_can_locklessly_be_made_writable(spte)) {
3202
			new_spte |= PT_WRITABLE_MASK;
3203 3204

			/*
3205 3206 3207
			 * Do not fix write-permission on the large spte when
			 * dirty logging is enabled. Since we only dirty the
			 * first page into the dirty-bitmap in
3208 3209 3210 3211 3212
			 * fast_pf_fix_direct_spte(), other pages are missed
			 * if its slot has dirty logging enabled.
			 *
			 * Instead, we let the slow page fault path create a
			 * normal spte to fix the access.
3213
			 */
3214 3215
			if (sp->role.level > PG_LEVEL_4K &&
			    kvm_slot_dirty_track_enabled(fault->slot))
3216
				break;
3217
		}
3218

3219
		/* Verify that the fault can be handled in the fast path */
3220
		if (new_spte == spte ||
3221
		    !is_access_allowed(fault, new_spte))
3222 3223 3224 3225 3226
			break;

		/*
		 * Currently, fast page fault only works for direct mapping
		 * since the gfn is not stable for indirect shadow page. See
3227
		 * Documentation/virt/kvm/locking.rst to get more detail.
3228
		 */
3229
		if (fast_pf_fix_direct_spte(vcpu, fault, sptep, spte, new_spte)) {
3230
			ret = RET_PF_FIXED;
3231
			break;
3232
		}
3233 3234 3235 3236 3237 3238 3239 3240

		if (++retry_count > 4) {
			printk_once(KERN_WARNING
				"kvm: Fast #PF retrying more than 4 times.\n");
			break;
		}

	} while (true);
3241

3242
	trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
3243 3244
	walk_shadow_page_lockless_end(vcpu);

3245
	return ret;
3246 3247
}

3248 3249
static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
			       struct list_head *invalid_list)
3250
{
3251
	struct kvm_mmu_page *sp;
3252

3253
	if (!VALID_PAGE(*root_hpa))
A
Avi Kivity 已提交
3254
		return;
3255

3256
	sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
3257 3258
	if (WARN_ON(!sp))
		return;
3259

3260
	if (is_tdp_mmu_page(sp))
3261
		kvm_tdp_mmu_put_root(kvm, sp, false);
3262 3263
	else if (!--sp->root_count && sp->role.invalid)
		kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3264

3265 3266 3267
	*root_hpa = INVALID_PAGE;
}

3268
/* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
3269
void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
3270
			ulong roots_to_free)
3271 3272 3273
{
	int i;
	LIST_HEAD(invalid_list);
3274
	bool free_active_root;
3275

3276
	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3277

3278
	/* Before acquiring the MMU lock, see if we need to do any real work. */
3279 3280 3281 3282
	free_active_root = (roots_to_free & KVM_MMU_ROOT_CURRENT)
		&& VALID_PAGE(mmu->root.hpa);

	if (!free_active_root) {
3283 3284 3285 3286 3287 3288 3289 3290
		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
			    VALID_PAGE(mmu->prev_roots[i].hpa))
				break;

		if (i == KVM_MMU_NUM_PREV_ROOTS)
			return;
	}
3291

3292
	write_lock(&kvm->mmu_lock);
3293

3294 3295
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
3296
			mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3297
					   &invalid_list);
3298

3299
	if (free_active_root) {
3300
		if (to_shadow_page(mmu->root.hpa)) {
3301
			mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list);
3302
		} else if (mmu->pae_root) {
3303 3304 3305 3306 3307 3308 3309 3310
			for (i = 0; i < 4; ++i) {
				if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
					continue;

				mmu_free_root_page(kvm, &mmu->pae_root[i],
						   &invalid_list);
				mmu->pae_root[i] = INVALID_PAE_ROOT;
			}
3311
		}
3312 3313
		mmu->root.hpa = INVALID_PAGE;
		mmu->root.pgd = 0;
3314
	}
3315

3316
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
3317
	write_unlock(&kvm->mmu_lock);
3318
}
3319
EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3320

3321
void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342
{
	unsigned long roots_to_free = 0;
	hpa_t root_hpa;
	int i;

	/*
	 * This should not be called while L2 is active, L2 can't invalidate
	 * _only_ its own roots, e.g. INVVPID unconditionally exits.
	 */
	WARN_ON_ONCE(mmu->mmu_role.base.guest_mode);

	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		root_hpa = mmu->prev_roots[i].hpa;
		if (!VALID_PAGE(root_hpa))
			continue;

		if (!to_shadow_page(root_hpa) ||
			to_shadow_page(root_hpa)->role.guest_mode)
			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
	}

3343
	kvm_mmu_free_roots(kvm, mmu, roots_to_free);
3344 3345 3346 3347
}
EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);


3348 3349 3350 3351
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
{
	int ret = 0;

3352
	if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3353
		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3354 3355 3356 3357 3358 3359
		ret = 1;
	}

	return ret;
}

3360 3361
static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
			    u8 level, bool direct)
3362 3363
{
	struct kvm_mmu_page *sp;
3364 3365 3366 3367 3368 3369 3370 3371 3372

	sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
	++sp->root_count;

	return __pa(sp->spt);
}

static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
{
3373 3374
	struct kvm_mmu *mmu = vcpu->arch.mmu;
	u8 shadow_root_level = mmu->shadow_root_level;
3375
	hpa_t root;
3376
	unsigned i;
3377 3378 3379 3380 3381 3382
	int r;

	write_lock(&vcpu->kvm->mmu_lock);
	r = make_mmu_pages_available(vcpu);
	if (r < 0)
		goto out_unlock;
3383

3384
	if (is_tdp_mmu_enabled(vcpu->kvm)) {
3385
		root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3386
		mmu->root.hpa = root;
3387
	} else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3388
		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
3389
		mmu->root.hpa = root;
3390
	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
3391 3392 3393 3394
		if (WARN_ON_ONCE(!mmu->pae_root)) {
			r = -EIO;
			goto out_unlock;
		}
3395

3396
		for (i = 0; i < 4; ++i) {
3397
			WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3398

3399 3400
			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
					      i << 30, PT32_ROOT_LEVEL, true);
3401 3402
			mmu->pae_root[i] = root | PT_PRESENT_MASK |
					   shadow_me_mask;
3403
		}
3404
		mmu->root.hpa = __pa(mmu->pae_root);
3405 3406
	} else {
		WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
3407 3408
		r = -EIO;
		goto out_unlock;
3409
	}
3410

3411 3412
	/* root.pgd is ignored for direct MMUs. */
	mmu->root.pgd = 0;
3413 3414 3415
out_unlock:
	write_unlock(&vcpu->kvm->mmu_lock);
	return r;
3416 3417
}

3418 3419 3420 3421
static int mmu_first_shadow_root_alloc(struct kvm *kvm)
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *slot;
3422
	int r = 0, i, bkt;
3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446

	/*
	 * Check if this is the first shadow root being allocated before
	 * taking the lock.
	 */
	if (kvm_shadow_root_allocated(kvm))
		return 0;

	mutex_lock(&kvm->slots_arch_lock);

	/* Recheck, under the lock, whether this is the first shadow root. */
	if (kvm_shadow_root_allocated(kvm))
		goto out_unlock;

	/*
	 * Check if anything actually needs to be allocated, e.g. all metadata
	 * will be allocated upfront if TDP is disabled.
	 */
	if (kvm_memslots_have_rmaps(kvm) &&
	    kvm_page_track_write_tracking_enabled(kvm))
		goto out_success;

	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);
3447
		kvm_for_each_memslot(slot, bkt, slots) {
3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478
			/*
			 * Both of these functions are no-ops if the target is
			 * already allocated, so unconditionally calling both
			 * is safe.  Intentionally do NOT free allocations on
			 * failure to avoid having to track which allocations
			 * were made now versus when the memslot was created.
			 * The metadata is guaranteed to be freed when the slot
			 * is freed, and will be kept/used if userspace retries
			 * KVM_RUN instead of killing the VM.
			 */
			r = memslot_rmap_alloc(slot, slot->npages);
			if (r)
				goto out_unlock;
			r = kvm_page_track_write_tracking_alloc(slot);
			if (r)
				goto out_unlock;
		}
	}

	/*
	 * Ensure that shadow_root_allocated becomes true strictly after
	 * all the related pointers are set.
	 */
out_success:
	smp_store_release(&kvm->arch.shadow_root_allocated, true);

out_unlock:
	mutex_unlock(&kvm->slots_arch_lock);
	return r;
}

3479
static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3480
{
3481
	struct kvm_mmu *mmu = vcpu->arch.mmu;
3482
	u64 pdptrs[4], pm_mask;
3483
	gfn_t root_gfn, root_pgd;
3484
	hpa_t root;
3485 3486
	unsigned i;
	int r;
3487

3488
	root_pgd = mmu->get_guest_pgd(vcpu);
3489
	root_gfn = root_pgd >> PAGE_SHIFT;
3490

3491 3492 3493
	if (mmu_check_root(vcpu, root_gfn))
		return 1;

3494 3495 3496 3497
	/*
	 * On SVM, reading PDPTRs might access guest memory, which might fault
	 * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
	 */
3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508
	if (mmu->root_level == PT32E_ROOT_LEVEL) {
		for (i = 0; i < 4; ++i) {
			pdptrs[i] = mmu->get_pdptr(vcpu, i);
			if (!(pdptrs[i] & PT_PRESENT_MASK))
				continue;

			if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
				return 1;
		}
	}

3509
	r = mmu_first_shadow_root_alloc(vcpu->kvm);
3510 3511 3512
	if (r)
		return r;

3513 3514 3515 3516 3517
	write_lock(&vcpu->kvm->mmu_lock);
	r = make_mmu_pages_available(vcpu);
	if (r < 0)
		goto out_unlock;

3518 3519 3520 3521
	/*
	 * Do we shadow a long mode page table? If so we need to
	 * write-protect the guests page table root.
	 */
3522
	if (mmu->root_level >= PT64_ROOT_4LEVEL) {
3523
		root = mmu_alloc_root(vcpu, root_gfn, 0,
3524
				      mmu->shadow_root_level, false);
3525
		mmu->root.hpa = root;
3526
		goto set_root_pgd;
3527
	}
3528

3529 3530 3531 3532
	if (WARN_ON_ONCE(!mmu->pae_root)) {
		r = -EIO;
		goto out_unlock;
	}
3533

3534 3535
	/*
	 * We shadow a 32 bit page table. This may be a legacy 2-level
3536 3537
	 * or a PAE 3-level page table. In either case we need to be aware that
	 * the shadow page table may be a PAE or a long mode page table.
3538
	 */
3539
	pm_mask = PT_PRESENT_MASK | shadow_me_mask;
3540
	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
3541 3542
		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;

3543
		if (WARN_ON_ONCE(!mmu->pml4_root)) {
3544 3545 3546
			r = -EIO;
			goto out_unlock;
		}
3547
		mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
3548 3549 3550 3551 3552 3553 3554 3555

		if (mmu->shadow_root_level == PT64_ROOT_5LEVEL) {
			if (WARN_ON_ONCE(!mmu->pml5_root)) {
				r = -EIO;
				goto out_unlock;
			}
			mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;
		}
3556 3557
	}

3558
	for (i = 0; i < 4; ++i) {
3559
		WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3560

3561
		if (mmu->root_level == PT32E_ROOT_LEVEL) {
3562
			if (!(pdptrs[i] & PT_PRESENT_MASK)) {
3563
				mmu->pae_root[i] = INVALID_PAE_ROOT;
A
Avi Kivity 已提交
3564 3565
				continue;
			}
3566
			root_gfn = pdptrs[i] >> PAGE_SHIFT;
3567
		}
3568

3569 3570
		root = mmu_alloc_root(vcpu, root_gfn, i << 30,
				      PT32_ROOT_LEVEL, false);
3571
		mmu->pae_root[i] = root | pm_mask;
3572
	}
3573

3574
	if (mmu->shadow_root_level == PT64_ROOT_5LEVEL)
3575
		mmu->root.hpa = __pa(mmu->pml5_root);
3576
	else if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
3577
		mmu->root.hpa = __pa(mmu->pml4_root);
3578
	else
3579
		mmu->root.hpa = __pa(mmu->pae_root);
3580

3581
set_root_pgd:
3582
	mmu->root.pgd = root_pgd;
3583 3584
out_unlock:
	write_unlock(&vcpu->kvm->mmu_lock);
3585

3586
	return r;
3587 3588
}

3589 3590 3591
static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *mmu = vcpu->arch.mmu;
3592
	bool need_pml5 = mmu->shadow_root_level > PT64_ROOT_4LEVEL;
3593 3594 3595
	u64 *pml5_root = NULL;
	u64 *pml4_root = NULL;
	u64 *pae_root;
3596 3597

	/*
3598 3599 3600 3601
	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
	 * tables are allocated and initialized at root creation as there is no
	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
	 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
3602
	 */
3603 3604 3605
	if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
	    mmu->shadow_root_level < PT64_ROOT_4LEVEL)
		return 0;
3606

3607 3608 3609 3610 3611 3612 3613 3614
	/*
	 * NPT, the only paging mode that uses this horror, uses a fixed number
	 * of levels for the shadow page tables, e.g. all MMUs are 4-level or
	 * all MMus are 5-level.  Thus, this can safely require that pml5_root
	 * is allocated if the other roots are valid and pml5 is needed, as any
	 * prior MMU would also have required pml5.
	 */
	if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root))
3615
		return 0;
3616

3617 3618 3619 3620
	/*
	 * The special roots should always be allocated in concert.  Yell and
	 * bail if KVM ends up in a state where only one of the roots is valid.
	 */
3621
	if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
3622
			 (need_pml5 && mmu->pml5_root)))
3623
		return -EIO;
3624

3625 3626 3627 3628
	/*
	 * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
	 * doesn't need to be decrypted.
	 */
3629 3630 3631
	pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
	if (!pae_root)
		return -ENOMEM;
3632

3633
#ifdef CONFIG_X86_64
3634
	pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3635 3636 3637
	if (!pml4_root)
		goto err_pml4;

3638
	if (need_pml5) {
3639 3640 3641
		pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
		if (!pml5_root)
			goto err_pml5;
3642
	}
3643
#endif
3644

3645
	mmu->pae_root = pae_root;
3646
	mmu->pml4_root = pml4_root;
3647
	mmu->pml5_root = pml5_root;
3648

3649
	return 0;
3650 3651 3652 3653 3654 3655 3656 3657

#ifdef CONFIG_X86_64
err_pml5:
	free_page((unsigned long)pml4_root);
err_pml4:
	free_page((unsigned long)pae_root);
	return -ENOMEM;
#endif
3658 3659
}

3660 3661 3662 3663
static bool is_unsync_root(hpa_t root)
{
	struct kvm_mmu_page *sp;

3664 3665 3666
	if (!VALID_PAGE(root))
		return false;

3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680
	/*
	 * The read barrier orders the CPU's read of SPTE.W during the page table
	 * walk before the reads of sp->unsync/sp->unsync_children here.
	 *
	 * Even if another CPU was marking the SP as unsync-ed simultaneously,
	 * any guest page table changes are not guaranteed to be visible anyway
	 * until this VCPU issues a TLB flush strictly after those changes are
	 * made.  We only need to ensure that the other CPU sets these flags
	 * before any actual changes to the page tables are made.  The comments
	 * in mmu_try_to_unsync_pages() describe what could go wrong if this
	 * requirement isn't satisfied.
	 */
	smp_rmb();
	sp = to_shadow_page(root);
3681 3682 3683 3684 3685 3686 3687 3688

	/*
	 * PAE roots (somewhat arbitrarily) aren't backed by shadow pages, the
	 * PDPTEs for a given PAE root need to be synchronized individually.
	 */
	if (WARN_ON_ONCE(!sp))
		return false;

3689 3690 3691 3692 3693 3694
	if (sp->unsync || sp->unsync_children)
		return true;

	return false;
}

3695
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3696 3697 3698 3699
{
	int i;
	struct kvm_mmu_page *sp;

3700
	if (vcpu->arch.mmu->direct_map)
3701 3702
		return;

3703
	if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
3704
		return;
3705

3706
	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3707

3708
	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
3709
		hpa_t root = vcpu->arch.mmu->root.hpa;
3710
		sp = to_shadow_page(root);
3711

3712
		if (!is_unsync_root(root))
3713 3714
			return;

3715
		write_lock(&vcpu->kvm->mmu_lock);
3716
		mmu_sync_children(vcpu, sp, true);
3717
		write_unlock(&vcpu->kvm->mmu_lock);
3718 3719
		return;
	}
3720

3721
	write_lock(&vcpu->kvm->mmu_lock);
3722

3723
	for (i = 0; i < 4; ++i) {
3724
		hpa_t root = vcpu->arch.mmu->pae_root[i];
3725

3726
		if (IS_VALID_PAE_ROOT(root)) {
3727
			root &= PT64_BASE_ADDR_MASK;
3728
			sp = to_shadow_page(root);
3729
			mmu_sync_children(vcpu, sp, true);
3730 3731 3732
		}
	}

3733
	write_unlock(&vcpu->kvm->mmu_lock);
3734 3735
}

3736 3737 3738 3739 3740 3741 3742 3743 3744 3745
void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
{
	unsigned long roots_to_free = 0;
	int i;

	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa))
			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);

	/* sync prev_roots by simply freeing them */
3746
	kvm_mmu_free_roots(vcpu->kvm, vcpu->arch.mmu, roots_to_free);
3747 3748
}

3749
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3750
				  gpa_t vaddr, u64 access,
3751
				  struct x86_exception *exception)
A
Avi Kivity 已提交
3752
{
3753 3754
	if (exception)
		exception->error_code = 0;
3755
	return kvm_translate_gpa(vcpu, mmu, vaddr, access, exception);
3756 3757
}

3758
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3759
{
3760 3761 3762 3763 3764 3765 3766
	/*
	 * A nested guest cannot use the MMIO cache if it is using nested
	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
	 */
	if (mmu_is_nested(vcpu))
		return false;

3767 3768 3769 3770 3771 3772
	if (direct)
		return vcpu_match_mmio_gpa(vcpu, addr);

	return vcpu_match_mmio_gva(vcpu, addr);
}

3773 3774 3775
/*
 * Return the level of the lowest level SPTE added to sptes.
 * That SPTE may be non-present.
3776 3777
 *
 * Must be called between walk_shadow_page_lockless_{begin,end}.
3778
 */
3779
static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
3780 3781
{
	struct kvm_shadow_walk_iterator iterator;
3782
	int leaf = -1;
3783
	u64 spte;
3784

3785 3786
	for (shadow_walk_init(&iterator, vcpu, addr),
	     *root_level = iterator.level;
3787 3788
	     shadow_walk_okay(&iterator);
	     __shadow_walk_next(&iterator, spte)) {
3789
		leaf = iterator.level;
3790 3791
		spte = mmu_spte_get_lockless(iterator.sptep);

3792
		sptes[leaf] = spte;
3793 3794 3795 3796 3797
	}

	return leaf;
}

3798
/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
3799 3800
static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
{
3801
	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
3802
	struct rsvd_bits_validate *rsvd_check;
3803
	int root, leaf, level;
3804 3805
	bool reserved = false;

3806 3807
	walk_shadow_page_lockless_begin(vcpu);

3808
	if (is_tdp_mmu(vcpu->arch.mmu))
3809
		leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
3810
	else
3811
		leaf = get_walk(vcpu, addr, sptes, &root);
3812

3813 3814
	walk_shadow_page_lockless_end(vcpu);

3815 3816 3817 3818 3819
	if (unlikely(leaf < 0)) {
		*sptep = 0ull;
		return reserved;
	}

3820 3821 3822 3823 3824 3825 3826 3827 3828 3829
	*sptep = sptes[leaf];

	/*
	 * Skip reserved bits checks on the terminal leaf if it's not a valid
	 * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
	 * design, always have reserved bits set.  The purpose of the checks is
	 * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
	 */
	if (!is_shadow_present_pte(sptes[leaf]))
		leaf++;
3830 3831 3832

	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;

3833
	for (level = root; level >= leaf; level--)
3834
		reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
3835 3836

	if (reserved) {
3837
		pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
3838
		       __func__, addr);
3839
		for (level = root; level >= leaf; level--)
3840 3841
			pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
			       sptes[level], level,
3842
			       get_rsvd_bits(rsvd_check, sptes[level], level));
3843
	}
3844

3845
	return reserved;
3846 3847
}

P
Paolo Bonzini 已提交
3848
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3849 3850
{
	u64 spte;
3851
	bool reserved;
3852

3853
	if (mmio_info_in_cache(vcpu, addr, direct))
3854
		return RET_PF_EMULATE;
3855

3856
	reserved = get_mmio_spte(vcpu, addr, &spte);
3857
	if (WARN_ON(reserved))
3858
		return -EINVAL;
3859 3860 3861

	if (is_mmio_spte(spte)) {
		gfn_t gfn = get_mmio_spte_gfn(spte);
3862
		unsigned int access = get_mmio_spte_access(spte);
3863

3864
		if (!check_mmio_spte(vcpu, spte))
3865
			return RET_PF_INVALID;
3866

3867 3868
		if (direct)
			addr = 0;
X
Xiao Guangrong 已提交
3869 3870

		trace_handle_mmio_page_fault(addr, gfn, access);
3871
		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3872
		return RET_PF_EMULATE;
3873 3874 3875 3876 3877 3878
	}

	/*
	 * If the page table is zapped by other cpus, let CPU fault again on
	 * the address.
	 */
3879
	return RET_PF_RETRY;
3880 3881
}

3882
static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
3883
					 struct kvm_page_fault *fault)
3884
{
3885
	if (unlikely(fault->rsvd))
3886 3887
		return false;

3888
	if (!fault->present || !fault->write)
3889 3890 3891 3892 3893 3894
		return false;

	/*
	 * guest is writing the page which is write tracked which can
	 * not be fixed by page fault handler.
	 */
3895
	if (kvm_slot_page_track_is_active(vcpu->kvm, fault->slot, fault->gfn, KVM_PAGE_TRACK_WRITE))
3896 3897 3898 3899 3900
		return true;

	return false;
}

3901 3902 3903 3904 3905 3906
static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
{
	struct kvm_shadow_walk_iterator iterator;
	u64 spte;

	walk_shadow_page_lockless_begin(vcpu);
3907
	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
3908 3909 3910 3911
		clear_sp_write_flooding_count(iterator.sptep);
	walk_shadow_page_lockless_end(vcpu);
}

3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922
static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
{
	/* make sure the token value is not 0 */
	u32 id = vcpu->arch.apf.id;

	if (id << 12 == 0)
		vcpu->arch.apf.id = 1;

	return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
}

3923 3924
static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
				    gfn_t gfn)
3925 3926
{
	struct kvm_arch_async_pf arch;
X
Xiao Guangrong 已提交
3927

3928
	arch.token = alloc_apf_token(vcpu);
3929
	arch.gfn = gfn;
3930
	arch.direct_map = vcpu->arch.mmu->direct_map;
3931
	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
3932

3933 3934
	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3935 3936
}

3937
static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, int *r)
3938
{
3939
	struct kvm_memory_slot *slot = fault->slot;
3940 3941
	bool async;

3942 3943 3944 3945 3946 3947
	/*
	 * Retry the page fault if the gfn hit a memslot that is being deleted
	 * or moved.  This ensures any existing SPTEs for the old memslot will
	 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
	 */
	if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
3948
		goto out_retry;
3949

3950 3951 3952
	if (!kvm_is_visible_memslot(slot)) {
		/* Don't expose private memslots to L2. */
		if (is_guest_mode(vcpu)) {
3953
			fault->slot = NULL;
3954 3955
			fault->pfn = KVM_PFN_NOSLOT;
			fault->map_writable = false;
3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968
			return false;
		}
		/*
		 * If the APIC access page exists but is disabled, go directly
		 * to emulation without caching the MMIO access or creating a
		 * MMIO SPTE.  That way the cache doesn't need to be purged
		 * when the AVIC is re-enabled.
		 */
		if (slot && slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT &&
		    !kvm_apicv_activated(vcpu->kvm)) {
			*r = RET_PF_EMULATE;
			return true;
		}
3969 3970
	}

3971
	async = false;
3972 3973 3974
	fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, &async,
					  fault->write, &fault->map_writable,
					  &fault->hva);
3975 3976 3977
	if (!async)
		return false; /* *pfn has correct page already */

3978
	if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
3979 3980 3981
		trace_kvm_try_async_get_page(fault->addr, fault->gfn);
		if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
			trace_kvm_async_pf_doublefault(fault->addr, fault->gfn);
3982
			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
3983
			goto out_retry;
3984
		} else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn))
3985
			goto out_retry;
3986 3987
	}

3988 3989 3990
	fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, NULL,
					  fault->write, &fault->map_writable,
					  &fault->hva);
3991
	return false;
3992 3993 3994 3995

out_retry:
	*r = RET_PF_RETRY;
	return true;
3996 3997
}

3998 3999 4000 4001 4002 4003 4004
/*
 * Returns true if the page fault is stale and needs to be retried, i.e. if the
 * root was invalidated by a memslot update or a relevant mmu_notifier fired.
 */
static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
				struct kvm_page_fault *fault, int mmu_seq)
{
4005
	struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa);
4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018

	/* Special roots, e.g. pae_root, are not backed by shadow pages. */
	if (sp && is_obsolete_sp(vcpu->kvm, sp))
		return true;

	/*
	 * Roots without an associated shadow page are considered invalid if
	 * there is a pending request to free obsolete roots.  The request is
	 * only a hint that the current root _may_ be obsolete and needs to be
	 * reloaded, e.g. if the guest frees a PGD that KVM is tracking as a
	 * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs
	 * to reload even if no vCPU is actively using the root.
	 */
4019
	if (!sp && kvm_test_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
4020 4021 4022 4023 4024 4025
		return true;

	return fault->slot &&
	       mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
}

4026
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
A
Avi Kivity 已提交
4027
{
4028
	bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
A
Avi Kivity 已提交
4029

4030
	unsigned long mmu_seq;
4031
	int r;
4032

4033
	fault->gfn = fault->addr >> PAGE_SHIFT;
4034 4035
	fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn);

4036
	if (page_fault_handle_page_track(vcpu, fault))
4037
		return RET_PF_EMULATE;
4038

4039
	r = fast_page_fault(vcpu, fault);
4040 4041
	if (r != RET_PF_INVALID)
		return r;
4042

4043
	r = mmu_topup_memory_caches(vcpu, false);
4044 4045
	if (r)
		return r;
4046

4047 4048 4049
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
	smp_rmb();

4050
	if (kvm_faultin_pfn(vcpu, fault, &r))
4051
		return r;
4052

4053
	if (handle_abnormal_pfn(vcpu, fault, ACC_ALL, &r))
4054
		return r;
A
Avi Kivity 已提交
4055

4056
	r = RET_PF_RETRY;
4057

4058
	if (is_tdp_mmu_fault)
4059 4060 4061 4062
		read_lock(&vcpu->kvm->mmu_lock);
	else
		write_lock(&vcpu->kvm->mmu_lock);

4063
	if (is_page_fault_stale(vcpu, fault, mmu_seq))
4064
		goto out_unlock;
4065

4066 4067
	r = make_mmu_pages_available(vcpu);
	if (r)
4068
		goto out_unlock;
B
Ben Gardon 已提交
4069

4070
	if (is_tdp_mmu_fault)
4071
		r = kvm_tdp_mmu_map(vcpu, fault);
B
Ben Gardon 已提交
4072
	else
4073
		r = __direct_map(vcpu, fault);
4074

4075
out_unlock:
4076
	if (is_tdp_mmu_fault)
4077 4078 4079
		read_unlock(&vcpu->kvm->mmu_lock);
	else
		write_unlock(&vcpu->kvm->mmu_lock);
4080
	kvm_release_pfn_clean(fault->pfn);
4081
	return r;
A
Avi Kivity 已提交
4082 4083
}

4084 4085
static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
				struct kvm_page_fault *fault)
4086
{
4087
	pgprintk("%s: gva %lx error %x\n", __func__, fault->addr, fault->error_code);
4088 4089

	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
4090 4091
	fault->max_level = PG_LEVEL_2M;
	return direct_page_fault(vcpu, fault);
4092 4093
}

4094
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
4095
				u64 fault_address, char *insn, int insn_len)
4096 4097
{
	int r = 1;
4098
	u32 flags = vcpu->arch.apf.host_apf_flags;
4099

4100 4101 4102 4103 4104 4105
#ifndef CONFIG_X86_64
	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
	if (WARN_ON_ONCE(fault_address >> 32))
		return -EFAULT;
#endif

P
Paolo Bonzini 已提交
4106
	vcpu->arch.l1tf_flush_l1d = true;
4107
	if (!flags) {
4108 4109
		trace_kvm_page_fault(fault_address, error_code);

4110
		if (kvm_event_needs_reinjection(vcpu))
4111 4112 4113
			kvm_mmu_unprotect_page_virt(vcpu, fault_address);
		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
				insn_len);
4114
	} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
4115
		vcpu->arch.apf.host_apf_flags = 0;
4116
		local_irq_disable();
4117
		kvm_async_pf_task_wait_schedule(fault_address);
4118
		local_irq_enable();
4119 4120
	} else {
		WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
4121
	}
4122

4123 4124 4125 4126
	return r;
}
EXPORT_SYMBOL_GPL(kvm_handle_page_fault);

4127
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
4128
{
4129 4130 4131
	while (fault->max_level > PG_LEVEL_4K) {
		int page_num = KVM_PAGES_PER_HPAGE(fault->max_level);
		gfn_t base = (fault->addr >> PAGE_SHIFT) & ~(page_num - 1);
4132

4133 4134
		if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
			break;
4135 4136

		--fault->max_level;
4137
	}
4138

4139
	return direct_page_fault(vcpu, fault);
4140 4141
}

4142
static void nonpaging_init_context(struct kvm_mmu *context)
A
Avi Kivity 已提交
4143 4144 4145
{
	context->page_fault = nonpaging_page_fault;
	context->gva_to_gpa = nonpaging_gva_to_gpa;
4146
	context->sync_page = nonpaging_sync_page;
4147
	context->invlpg = NULL;
4148
	context->direct_map = true;
A
Avi Kivity 已提交
4149 4150
}

4151
static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
4152 4153
				  union kvm_mmu_page_role role)
{
4154
	return (role.direct || pgd == root->pgd) &&
4155
	       VALID_PAGE(root->hpa) &&
4156
	       role.word == to_shadow_page(root->hpa)->role.word;
4157 4158
}

4159
/*
4160 4161 4162 4163 4164 4165
 * Find out if a previously cached root matching the new pgd/role is available,
 * and insert the current root as the MRU in the cache.
 * If a matching root is found, it is assigned to kvm_mmu->root and
 * true is returned.
 * If no match is found, kvm_mmu->root is left invalid, the LRU root is
 * evicted to make room for the current root, and false is returned.
4166
 */
4167 4168 4169
static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu,
					      gpa_t new_pgd,
					      union kvm_mmu_page_role new_role)
4170 4171 4172
{
	uint i;

4173
	if (is_root_usable(&mmu->root, new_pgd, new_role))
4174 4175
		return true;

4176
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
4177 4178 4179 4180 4181 4182 4183 4184
		/*
		 * The swaps end up rotating the cache like this:
		 *   C   0 1 2 3   (on entry to the function)
		 *   0   C 1 2 3
		 *   1   C 0 2 3
		 *   2   C 0 1 3
		 *   3   C 0 1 2   (on exit from the loop)
		 */
4185 4186
		swap(mmu->root, mmu->prev_roots[i]);
		if (is_root_usable(&mmu->root, new_pgd, new_role))
4187
			return true;
4188 4189
	}

4190 4191
	kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
	return false;
4192 4193
}

4194 4195 4196 4197 4198 4199 4200 4201 4202 4203
/*
 * Find out if a previously cached root matching the new pgd/role is available.
 * On entry, mmu->root is invalid.
 * If a matching root is found, it is assigned to kvm_mmu->root, the LRU entry
 * of the cache becomes invalid, and true is returned.
 * If no match is found, kvm_mmu->root is left invalid and false is returned.
 */
static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu,
					     gpa_t new_pgd,
					     union kvm_mmu_page_role new_role)
A
Avi Kivity 已提交
4204
{
4205 4206 4207 4208 4209
	uint i;

	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		if (is_root_usable(&mmu->prev_roots[i], new_pgd, new_role))
			goto hit;
4210

4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224
	return false;

hit:
	swap(mmu->root, mmu->prev_roots[i]);
	/* Bubble up the remaining roots.  */
	for (; i < KVM_MMU_NUM_PREV_ROOTS - 1; i++)
		mmu->prev_roots[i] = mmu->prev_roots[i + 1];
	mmu->prev_roots[i].hpa = INVALID_PAGE;
	return true;
}

static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
			    gpa_t new_pgd, union kvm_mmu_page_role new_role)
{
4225
	/*
4226
	 * For now, limit the caching to 64-bit hosts+VMs in order to avoid
4227 4228 4229
	 * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
	 * later if necessary.
	 */
4230 4231
	if (VALID_PAGE(mmu->root.hpa) && !to_shadow_page(mmu->root.hpa))
		kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
4232

4233 4234 4235 4236
	if (VALID_PAGE(mmu->root.hpa))
		return cached_root_find_and_keep_current(kvm, mmu, new_pgd, new_role);
	else
		return cached_root_find_without_current(kvm, mmu, new_pgd, new_role);
A
Avi Kivity 已提交
4237 4238
}

4239
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
A
Avi Kivity 已提交
4240
{
4241
	struct kvm_mmu *mmu = vcpu->arch.mmu;
4242
	union kvm_mmu_page_role new_role = mmu->mmu_role.base;
4243

4244 4245
	if (!fast_pgd_switch(vcpu->kvm, mmu, new_pgd, new_role)) {
		/* kvm_mmu_ensure_valid_pgd will set up a new root.  */
4246 4247 4248 4249 4250 4251
		return;
	}

	/*
	 * It's possible that the cached previous root page is obsolete because
	 * of a change in the MMU generation number. However, changing the
4252 4253
	 * generation number is accompanied by KVM_REQ_MMU_FREE_OBSOLETE_ROOTS,
	 * which will free the root set here and allocate a new one.
4254 4255 4256
	 */
	kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);

4257
	if (force_flush_and_sync_on_reuse) {
4258 4259
		kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
4260
	}
4261 4262 4263 4264 4265 4266 4267 4268 4269

	/*
	 * The last MMIO access's GVA and GPA are cached in the VCPU. When
	 * switching to a new CR3, that GVA->GPA mapping may no longer be
	 * valid. So clear any cached MMIO info even when we don't need to sync
	 * the shadow page tables.
	 */
	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);

4270 4271 4272 4273 4274 4275
	/*
	 * If this is a direct root page, it doesn't have a write flooding
	 * count. Otherwise, clear the write flooding count.
	 */
	if (!new_role.direct)
		__clear_sp_write_flooding_count(
4276
				to_shadow_page(vcpu->arch.mmu->root.hpa));
A
Avi Kivity 已提交
4277
}
4278
EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
4279

4280 4281
static unsigned long get_cr3(struct kvm_vcpu *vcpu)
{
4282
	return kvm_read_cr3(vcpu);
4283 4284
}

4285
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4286
			   unsigned int access)
4287 4288 4289 4290 4291 4292 4293
{
	if (unlikely(is_mmio_spte(*sptep))) {
		if (gfn != get_mmio_spte_gfn(*sptep)) {
			mmu_spte_clear_no_track(sptep);
			return true;
		}

4294
		mark_mmio_spte(vcpu, sptep, gfn, access);
4295 4296 4297 4298 4299 4300
		return true;
	}

	return false;
}

4301 4302 4303 4304 4305
#define PTTYPE_EPT 18 /* arbitrary */
#define PTTYPE PTTYPE_EPT
#include "paging_tmpl.h"
#undef PTTYPE

A
Avi Kivity 已提交
4306 4307 4308 4309 4310 4311 4312 4313
#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE

#define PTTYPE 32
#include "paging_tmpl.h"
#undef PTTYPE

4314
static void
4315
__reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
4316
			u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
4317
			bool pse, bool amd)
4318
{
4319
	u64 gbpages_bit_rsvd = 0;
4320
	u64 nonleaf_bit8_rsvd = 0;
4321
	u64 high_bits_rsvd;
4322

4323
	rsvd_check->bad_mt_xwr = 0;
4324

4325
	if (!gbpages)
4326
		gbpages_bit_rsvd = rsvd_bits(7, 7);
4327

4328 4329 4330 4331 4332 4333 4334 4335 4336
	if (level == PT32E_ROOT_LEVEL)
		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
	else
		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);

	/* Note, NX doesn't exist in PDPTEs, this is handled below. */
	if (!nx)
		high_bits_rsvd |= rsvd_bits(63, 63);

4337 4338 4339 4340
	/*
	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
	 * leaf entries) on AMD CPUs only.
	 */
4341
	if (amd)
4342 4343
		nonleaf_bit8_rsvd = rsvd_bits(8, 8);

4344
	switch (level) {
4345 4346
	case PT32_ROOT_LEVEL:
		/* no rsvd bits for 2 level 4K page table entries */
4347 4348 4349 4350
		rsvd_check->rsvd_bits_mask[0][1] = 0;
		rsvd_check->rsvd_bits_mask[0][0] = 0;
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4351

4352
		if (!pse) {
4353
			rsvd_check->rsvd_bits_mask[1][1] = 0;
4354 4355 4356
			break;
		}

4357 4358
		if (is_cpuid_PSE36())
			/* 36bits PSE 4MB page */
4359
			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4360 4361
		else
			/* 32 bits PSE 4MB page */
4362
			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4363 4364
		break;
	case PT32E_ROOT_LEVEL:
4365 4366 4367 4368 4369 4370 4371 4372
		rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
						   high_bits_rsvd |
						   rsvd_bits(5, 8) |
						   rsvd_bits(1, 2);	/* PDPTE */
		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;	/* PDE */
		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;	/* PTE */
		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
						   rsvd_bits(13, 20);	/* large page */
4373 4374
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4375
		break;
4376
	case PT64_ROOT_5LEVEL:
4377 4378 4379
		rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
						   nonleaf_bit8_rsvd |
						   rsvd_bits(7, 7);
4380 4381
		rsvd_check->rsvd_bits_mask[1][4] =
			rsvd_check->rsvd_bits_mask[0][4];
4382
		fallthrough;
4383
	case PT64_ROOT_4LEVEL:
4384 4385 4386 4387 4388 4389 4390
		rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
						   nonleaf_bit8_rsvd |
						   rsvd_bits(7, 7);
		rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
						   gbpages_bit_rsvd;
		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4391 4392
		rsvd_check->rsvd_bits_mask[1][3] =
			rsvd_check->rsvd_bits_mask[0][3];
4393 4394 4395 4396 4397
		rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
						   gbpages_bit_rsvd |
						   rsvd_bits(13, 29);
		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
						   rsvd_bits(13, 20); /* large page */
4398 4399
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4400 4401 4402 4403
		break;
	}
}

4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418
static bool guest_can_use_gbpages(struct kvm_vcpu *vcpu)
{
	/*
	 * If TDP is enabled, let the guest use GBPAGES if they're supported in
	 * hardware.  The hardware page walker doesn't let KVM disable GBPAGES,
	 * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
	 * walk for performance and complexity reasons.  Not to mention KVM
	 * _can't_ solve the problem because GVA->GPA walks aren't visible to
	 * KVM once a TDP translation is installed.  Mimic hardware behavior so
	 * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
	 */
	return tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
			     guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
}

4419 4420 4421
static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
				  struct kvm_mmu *context)
{
4422
	__reset_rsvds_bits_mask(&context->guest_rsvd_check,
4423
				vcpu->arch.reserved_gpa_bits,
4424
				context->root_level, is_efer_nx(context),
4425
				guest_can_use_gbpages(vcpu),
4426
				is_cr4_pse(context),
4427
				guest_cpuid_is_amd_or_hygon(vcpu));
4428 4429
}

4430 4431
static void
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
4432
			    u64 pa_bits_rsvd, bool execonly, int huge_page_level)
4433
{
4434
	u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4435
	u64 large_1g_rsvd = 0, large_2m_rsvd = 0;
4436
	u64 bad_mt_xwr;
4437

4438 4439 4440 4441 4442
	if (huge_page_level < PG_LEVEL_1G)
		large_1g_rsvd = rsvd_bits(7, 7);
	if (huge_page_level < PG_LEVEL_2M)
		large_2m_rsvd = rsvd_bits(7, 7);

4443 4444
	rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
	rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
4445 4446
	rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6) | large_1g_rsvd;
	rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6) | large_2m_rsvd;
4447
	rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4448 4449

	/* large page */
4450
	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4451
	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
4452 4453
	rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29) | large_1g_rsvd;
	rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20) | large_2m_rsvd;
4454
	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4455

4456 4457 4458 4459 4460 4461 4462 4463
	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
	if (!execonly) {
		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4464
	}
4465
	rsvd_check->bad_mt_xwr = bad_mt_xwr;
4466 4467
}

4468
static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
4469
		struct kvm_mmu *context, bool execonly, int huge_page_level)
4470 4471
{
	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
4472 4473
				    vcpu->arch.reserved_gpa_bits, execonly,
				    huge_page_level);
4474 4475
}

4476 4477 4478 4479 4480
static inline u64 reserved_hpa_bits(void)
{
	return rsvd_bits(shadow_phys_bits, 63);
}

4481 4482 4483 4484 4485
/*
 * the page table on host is the shadow page table for the page
 * table in guest or amd nested guest, its mmu features completely
 * follow the features in guest.
 */
4486 4487
static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
					struct kvm_mmu *context)
4488
{
4489 4490 4491 4492 4493 4494 4495 4496
	/*
	 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
	 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
	 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
	 * The iTLB multi-hit workaround can be toggled at any time, so assume
	 * NX can be used by any non-nested shadow MMU to avoid having to reset
	 * MMU contexts.  Note, KVM forces EFER.NX=1 when TDP is disabled.
	 */
4497
	bool uses_nx = is_efer_nx(context) || !tdp_enabled;
4498 4499 4500 4501 4502

	/* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
	bool is_amd = true;
	/* KVM doesn't use 2-level page tables for the shadow MMU. */
	bool is_pse = false;
4503 4504
	struct rsvd_bits_validate *shadow_zero_check;
	int i;
4505

4506 4507
	WARN_ON_ONCE(context->shadow_root_level < PT32E_ROOT_LEVEL);

4508
	shadow_zero_check = &context->shadow_zero_check;
4509
	__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4510
				context->shadow_root_level, uses_nx,
4511
				guest_can_use_gbpages(vcpu), is_pse, is_amd);
4512 4513 4514 4515 4516 4517 4518 4519 4520

	if (!shadow_me_mask)
		return;

	for (i = context->shadow_root_level; --i >= 0;) {
		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
	}

4521 4522
}

4523 4524 4525 4526 4527 4528
static inline bool boot_cpu_is_amd(void)
{
	WARN_ON_ONCE(!tdp_enabled);
	return shadow_x_mask == 0;
}

4529 4530 4531 4532 4533
/*
 * the direct page table on host, use as much mmu features as
 * possible, however, kvm currently does not do execution-protection.
 */
static void
4534
reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context)
4535
{
4536 4537 4538 4539 4540
	struct rsvd_bits_validate *shadow_zero_check;
	int i;

	shadow_zero_check = &context->shadow_zero_check;

4541
	if (boot_cpu_is_amd())
4542
		__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4543
					context->shadow_root_level, false,
4544
					boot_cpu_has(X86_FEATURE_GBPAGES),
4545
					false, true);
4546
	else
4547
		__reset_rsvds_bits_mask_ept(shadow_zero_check,
4548 4549
					    reserved_hpa_bits(), false,
					    max_huge_page_level);
4550

4551 4552 4553 4554 4555 4556 4557
	if (!shadow_me_mask)
		return;

	for (i = context->shadow_root_level; --i >= 0;) {
		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
	}
4558 4559 4560 4561 4562 4563 4564
}

/*
 * as the comments in reset_shadow_zero_bits_mask() except it
 * is the shadow page table for intel nested guest.
 */
static void
4565
reset_ept_shadow_zero_bits_mask(struct kvm_mmu *context, bool execonly)
4566 4567
{
	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
4568 4569
				    reserved_hpa_bits(), execonly,
				    max_huge_page_level);
4570 4571
}

4572 4573 4574 4575 4576 4577 4578 4579 4580 4581
#define BYTE_MASK(access) \
	((1 & (access) ? 2 : 0) | \
	 (2 & (access) ? 4 : 0) | \
	 (3 & (access) ? 8 : 0) | \
	 (4 & (access) ? 16 : 0) | \
	 (5 & (access) ? 32 : 0) | \
	 (6 & (access) ? 64 : 0) | \
	 (7 & (access) ? 128 : 0))


4582
static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
4583
{
4584 4585 4586 4587 4588 4589
	unsigned byte;

	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
	const u8 u = BYTE_MASK(ACC_USER_MASK);

4590 4591 4592
	bool cr4_smep = is_cr4_smep(mmu);
	bool cr4_smap = is_cr4_smap(mmu);
	bool cr0_wp = is_cr0_wp(mmu);
4593
	bool efer_nx = is_efer_nx(mmu);
4594 4595

	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4596 4597
		unsigned pfec = byte << 1;

F
Feng Wu 已提交
4598
		/*
4599 4600
		 * Each "*f" variable has a 1 bit for each UWX value
		 * that causes a fault with the given PFEC.
F
Feng Wu 已提交
4601
		 */
4602

4603
		/* Faults from writes to non-writable pages */
4604
		u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4605
		/* Faults from user mode accesses to supervisor pages */
4606
		u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4607
		/* Faults from fetches of non-executable pages*/
4608
		u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4609 4610 4611 4612 4613 4614 4615 4616 4617 4618
		/* Faults from kernel mode fetches of user pages */
		u8 smepf = 0;
		/* Faults from kernel mode accesses of user pages */
		u8 smapf = 0;

		if (!ept) {
			/* Faults from kernel mode accesses to user pages */
			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;

			/* Not really needed: !nx will cause pte.nx to fault */
4619
			if (!efer_nx)
4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633
				ff = 0;

			/* Allow supervisor writes if !cr0.wp */
			if (!cr0_wp)
				wf = (pfec & PFERR_USER_MASK) ? wf : 0;

			/* Disallow supervisor fetches of user code if cr4.smep */
			if (cr4_smep)
				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;

			/*
			 * SMAP:kernel-mode data accesses from user-mode
			 * mappings should fault. A fault is considered
			 * as a SMAP violation if all of the following
P
Peng Hao 已提交
4634
			 * conditions are true:
4635 4636 4637
			 *   - X86_CR4_SMAP is set in CR4
			 *   - A user page is accessed
			 *   - The access is not a fetch
4638 4639
			 *   - The access is supervisor mode
			 *   - If implicit supervisor access or X86_EFLAGS_AC is clear
4640
			 *
4641 4642
			 * Here, we cover the first four conditions.
			 * The fifth is computed dynamically in permission_fault();
4643 4644 4645 4646 4647
			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
			 * *not* subject to SMAP restrictions.
			 */
			if (cr4_smap)
				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4648
		}
4649 4650

		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4651 4652 4653
	}
}

4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677
/*
* PKU is an additional mechanism by which the paging controls access to
* user-mode addresses based on the value in the PKRU register.  Protection
* key violations are reported through a bit in the page fault error code.
* Unlike other bits of the error code, the PK bit is not known at the
* call site of e.g. gva_to_gpa; it must be computed directly in
* permission_fault based on two bits of PKRU, on some machine state (CR4,
* CR0, EFER, CPL), and on other bits of the error code and the page tables.
*
* In particular the following conditions come from the error code, the
* page tables and the machine state:
* - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
* - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
* - PK is always zero if U=0 in the page tables
* - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
*
* The PKRU bitmask caches the result of these four conditions.  The error
* code (minus the P bit) and the page table's U bit form an index into the
* PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
* with the two bits of the PKRU register corresponding to the protection key.
* For the first three conditions above the bits will be 00, thus masking
* away both AD and WD.  For all reads or if the last condition holds, WD
* only will be masked away.
*/
4678
static void update_pkru_bitmask(struct kvm_mmu *mmu)
4679 4680 4681 4682
{
	unsigned bit;
	bool wp;

4683 4684 4685
	mmu->pkru_mask = 0;

	if (!is_cr4_pke(mmu))
4686 4687
		return;

4688
	wp = is_cr0_wp(mmu);
4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721

	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
		unsigned pfec, pkey_bits;
		bool check_pkey, check_write, ff, uf, wf, pte_user;

		pfec = bit << 1;
		ff = pfec & PFERR_FETCH_MASK;
		uf = pfec & PFERR_USER_MASK;
		wf = pfec & PFERR_WRITE_MASK;

		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
		pte_user = pfec & PFERR_RSVD_MASK;

		/*
		 * Only need to check the access which is not an
		 * instruction fetch and is to a user page.
		 */
		check_pkey = (!ff && pte_user);
		/*
		 * write access is controlled by PKRU if it is a
		 * user access or CR0.WP = 1.
		 */
		check_write = check_pkey && wf && (uf || wp);

		/* PKRU.AD stops both read and write access. */
		pkey_bits = !!check_pkey;
		/* PKRU.WD stops write access. */
		pkey_bits |= (!!check_write) << 1;

		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
	}
}

4722 4723
static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
					struct kvm_mmu *mmu)
A
Avi Kivity 已提交
4724
{
4725 4726
	if (!is_cr0_pg(mmu))
		return;
4727

4728 4729 4730
	reset_rsvds_bits_mask(vcpu, mmu);
	update_permission_bitmask(mmu, false);
	update_pkru_bitmask(mmu);
A
Avi Kivity 已提交
4731 4732
}

4733
static void paging64_init_context(struct kvm_mmu *context)
A
Avi Kivity 已提交
4734 4735 4736
{
	context->page_fault = paging64_page_fault;
	context->gva_to_gpa = paging64_gva_to_gpa;
4737
	context->sync_page = paging64_sync_page;
M
Marcelo Tosatti 已提交
4738
	context->invlpg = paging64_invlpg;
4739
	context->direct_map = false;
A
Avi Kivity 已提交
4740 4741
}

4742
static void paging32_init_context(struct kvm_mmu *context)
A
Avi Kivity 已提交
4743 4744 4745
{
	context->page_fault = paging32_page_fault;
	context->gva_to_gpa = paging32_gva_to_gpa;
4746
	context->sync_page = paging32_sync_page;
M
Marcelo Tosatti 已提交
4747
	context->invlpg = paging32_invlpg;
4748
	context->direct_map = false;
A
Avi Kivity 已提交
4749 4750
}

4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785
static union kvm_mmu_role
kvm_calc_cpu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
{
	union kvm_mmu_role role = {0};

	role.base.access = ACC_ALL;
	role.base.smm = is_smm(vcpu);
	role.base.guest_mode = is_guest_mode(vcpu);
	role.ext.valid = 1;

	if (!____is_cr0_pg(regs)) {
		role.base.direct = 1;
		return role;
	}

	role.base.efer_nx = ____is_efer_nx(regs);
	role.base.cr0_wp = ____is_cr0_wp(regs);
	role.base.smep_andnot_wp = ____is_cr4_smep(regs) && !____is_cr0_wp(regs);
	role.base.smap_andnot_wp = ____is_cr4_smap(regs) && !____is_cr0_wp(regs);
	role.base.has_4_byte_gpte = !____is_cr4_pae(regs);
	role.base.level = role_regs_to_root_level(regs);

	role.ext.cr0_pg = 1;
	role.ext.cr4_pae = ____is_cr4_pae(regs);
	role.ext.cr4_smep = ____is_cr4_smep(regs);
	role.ext.cr4_smap = ____is_cr4_smap(regs);
	role.ext.cr4_pse = ____is_cr4_pse(regs);

	/* PKEY and LA57 are active iff long mode is active. */
	role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
	role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
	role.ext.efer_lma = ____is_efer_lma(regs);
	return role;
}

4786
static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
4787
						   const struct kvm_mmu_role_regs *regs)
4788 4789 4790 4791
{
	union kvm_mmu_role role = {0};

	role.base.access = ACC_ALL;
4792
	if (____is_cr0_pg(regs)) {
4793
		role.ext.cr0_pg = 1;
4794 4795
		role.base.efer_nx = ____is_efer_nx(regs);
		role.base.cr0_wp = ____is_cr0_wp(regs);
4796 4797 4798 4799 4800 4801 4802 4803 4804 4805

		role.ext.cr4_pae = ____is_cr4_pae(regs);
		role.ext.cr4_smep = ____is_cr4_smep(regs);
		role.ext.cr4_smap = ____is_cr4_smap(regs);
		role.ext.cr4_pse = ____is_cr4_pse(regs);

		/* PKEY and LA57 are active iff long mode is active. */
		role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
		role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
		role.ext.efer_lma = ____is_efer_lma(regs);
4806
	}
4807 4808
	role.base.smm = is_smm(vcpu);
	role.base.guest_mode = is_guest_mode(vcpu);
4809
	role.ext.valid = 1;
4810 4811 4812 4813

	return role;
}

4814 4815
static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
{
4816 4817 4818 4819
	/* tdp_root_level is architecture forced level, use it if nonzero */
	if (tdp_root_level)
		return tdp_root_level;

4820
	/* Use 5-level TDP if and only if it's useful/necessary. */
4821
	if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
4822 4823
		return 4;

4824
	return max_tdp_level;
4825 4826
}

4827
static union kvm_mmu_role
4828
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
4829
				const struct kvm_mmu_role_regs *regs)
4830
{
4831
	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs);
4832

4833
	role.base.ad_disabled = (shadow_accessed_mask == 0);
4834
	role.base.level = kvm_mmu_get_tdp_level(vcpu);
4835
	role.base.direct = true;
4836
	role.base.has_4_byte_gpte = false;
4837 4838 4839 4840

	return role;
}

4841 4842
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
			     const struct kvm_mmu_role_regs *regs)
4843
{
4844
	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4845 4846
	union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs);
	union kvm_mmu_role mmu_role =
4847
		kvm_calc_tdp_mmu_root_page_role(vcpu, regs);
4848

4849 4850
	if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
	    mmu_role.as_u64 == context->mmu_role.as_u64)
4851 4852
		return;

4853 4854
	context->cpu_role.as_u64 = cpu_role.as_u64;
	context->mmu_role.as_u64 = mmu_role.as_u64;
4855
	context->page_fault = kvm_tdp_page_fault;
4856
	context->sync_page = nonpaging_sync_page;
4857
	context->invlpg = NULL;
4858
	context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
4859
	context->direct_map = true;
4860
	context->get_guest_pgd = get_cr3;
4861
	context->get_pdptr = kvm_pdptr_read;
4862
	context->inject_page_fault = kvm_inject_page_fault;
4863
	context->root_level = role_regs_to_root_level(regs);
4864

4865
	if (!is_cr0_pg(context))
4866
		context->gva_to_gpa = nonpaging_gva_to_gpa;
4867
	else if (is_cr4_pae(context))
4868
		context->gva_to_gpa = paging64_gva_to_gpa;
4869
	else
4870
		context->gva_to_gpa = paging32_gva_to_gpa;
4871

4872
	reset_guest_paging_metadata(vcpu, context);
4873
	reset_tdp_shadow_zero_bits_mask(context);
4874 4875
}

4876
static union kvm_mmu_role
4877
kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
4878
				      const struct kvm_mmu_role_regs *regs)
4879
{
4880
	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs);
4881

4882 4883
	role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
	role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
4884
	role.base.has_4_byte_gpte = ____is_cr0_pg(regs) && !____is_cr4_pae(regs);
4885

4886 4887 4888 4889
	return role;
}

static union kvm_mmu_role
4890
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
4891
				   const struct kvm_mmu_role_regs *regs)
4892 4893
{
	union kvm_mmu_role role =
4894
		kvm_calc_shadow_root_page_role_common(vcpu, regs);
4895

4896
	role.base.direct = !____is_cr0_pg(regs);
4897

4898
	if (!____is_efer_lma(regs))
4899
		role.base.level = PT32E_ROOT_LEVEL;
4900
	else if (____is_cr4_la57(regs))
4901
		role.base.level = PT64_ROOT_5LEVEL;
4902
	else
4903
		role.base.level = PT64_ROOT_4LEVEL;
4904 4905 4906 4907

	return role;
}

4908
static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
4909 4910
				    union kvm_mmu_role cpu_role,
				    union kvm_mmu_role mmu_role)
4911
{
4912 4913
	if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
	    mmu_role.as_u64 == context->mmu_role.as_u64)
4914
		return;
4915

4916 4917
	context->cpu_role.as_u64 = cpu_role.as_u64;
	context->mmu_role.as_u64 = mmu_role.as_u64;
4918

4919
	if (!is_cr0_pg(context))
4920
		nonpaging_init_context(context);
4921
	else if (is_cr4_pae(context))
4922
		paging64_init_context(context);
A
Avi Kivity 已提交
4923
	else
4924
		paging32_init_context(context);
4925
	context->root_level = cpu_role.base.level;
4926

4927
	reset_guest_paging_metadata(vcpu, context);
4928
	context->shadow_root_level = mmu_role.base.level;
4929

4930
	reset_shadow_zero_bits_mask(vcpu, context);
4931
}
4932

4933
static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
4934
				const struct kvm_mmu_role_regs *regs)
4935
{
4936
	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4937 4938
	union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs);
	union kvm_mmu_role mmu_role =
4939
		kvm_calc_shadow_mmu_root_page_role(vcpu, regs);
4940

4941
	shadow_mmu_init_context(vcpu, context, cpu_role, mmu_role);
4942 4943
}

4944
static union kvm_mmu_role
4945
kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
4946
				   const struct kvm_mmu_role_regs *regs)
4947 4948
{
	union kvm_mmu_role role =
4949
		kvm_calc_shadow_root_page_role_common(vcpu, regs);
4950 4951

	role.base.direct = false;
4952
	role.base.level = kvm_mmu_get_tdp_level(vcpu);
4953 4954 4955 4956

	return role;
}

4957 4958
void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
			     unsigned long cr4, u64 efer, gpa_t nested_cr3)
4959
{
4960
	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4961 4962
	struct kvm_mmu_role_regs regs = {
		.cr0 = cr0,
4963
		.cr4 = cr4 & ~X86_CR4_PKE,
4964 4965
		.efer = efer,
	};
4966 4967
	union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, &regs);
	union kvm_mmu_role mmu_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
4968

4969
	shadow_mmu_init_context(vcpu, context, cpu_role, mmu_role);
4970
	kvm_mmu_new_pgd(vcpu, nested_cr3);
4971 4972
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
4973

4974 4975
static union kvm_mmu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
4976
				   bool execonly, u8 level)
4977
{
4978
	union kvm_mmu_role role = {0};
4979

4980 4981 4982 4983 4984
	/*
	 * KVM does not support SMM transfer monitors, and consequently does not
	 * support the "entry to SMM" control either.  role.base.smm is always 0.
	 */
	WARN_ON_ONCE(is_smm(vcpu));
4985
	role.base.level = level;
4986
	role.base.has_4_byte_gpte = false;
4987 4988 4989 4990
	role.base.direct = false;
	role.base.ad_disabled = !accessed_dirty;
	role.base.guest_mode = true;
	role.base.access = ACC_ALL;
4991

4992
	role.ext.word = 0;
4993
	role.ext.execonly = execonly;
4994
	role.ext.valid = 1;
4995 4996 4997 4998

	return role;
}

4999
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
5000 5001
			     int huge_page_level, bool accessed_dirty,
			     gpa_t new_eptp)
N
Nadav Har'El 已提交
5002
{
5003
	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5004
	u8 level = vmx_eptp_page_walk_level(new_eptp);
5005
	union kvm_mmu_role new_mode =
5006
		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
5007
						   execonly, level);
5008

5009 5010 5011 5012
	if (new_mode.as_u64 != context->cpu_role.as_u64) {
		/* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
		context->cpu_role.as_u64 = new_mode.as_u64;
		context->mmu_role.as_u64 = new_mode.as_u64;
5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027

		context->shadow_root_level = level;

		context->ept_ad = accessed_dirty;
		context->page_fault = ept_page_fault;
		context->gva_to_gpa = ept_gva_to_gpa;
		context->sync_page = ept_sync_page;
		context->invlpg = ept_invlpg;
		context->root_level = level;
		context->direct_map = false;
		update_permission_bitmask(context, true);
		context->pkru_mask = 0;
		reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level);
		reset_ept_shadow_zero_bits_mask(context, execonly);
	}
5028

5029
	kvm_mmu_new_pgd(vcpu, new_eptp);
N
Nadav Har'El 已提交
5030 5031 5032
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);

5033 5034
static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
			     const struct kvm_mmu_role_regs *regs)
5035
{
5036
	struct kvm_mmu *context = &vcpu->arch.root_mmu;
5037

5038
	kvm_init_shadow_mmu(vcpu, regs);
5039

5040
	context->get_guest_pgd     = get_cr3;
5041 5042
	context->get_pdptr         = kvm_pdptr_read;
	context->inject_page_fault = kvm_inject_page_fault;
A
Avi Kivity 已提交
5043 5044
}

5045 5046
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
				const struct kvm_mmu_role_regs *regs)
5047
{
5048
	union kvm_mmu_role new_mode = kvm_calc_cpu_role(vcpu, regs);
5049 5050
	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;

5051
	if (new_mode.as_u64 == g_context->cpu_role.as_u64)
5052 5053
		return;

5054
	g_context->cpu_role.as_u64   = new_mode.as_u64;
5055
	g_context->get_guest_pgd     = get_cr3;
5056
	g_context->get_pdptr         = kvm_pdptr_read;
5057
	g_context->inject_page_fault = kvm_inject_page_fault;
5058
	g_context->root_level        = new_mode.base.level;
5059

5060 5061 5062 5063 5064 5065
	/*
	 * L2 page tables are never shadowed, so there is no need to sync
	 * SPTEs.
	 */
	g_context->invlpg            = NULL;

5066
	/*
5067
	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
5068 5069 5070 5071 5072
	 * L1's nested page tables (e.g. EPT12). The nested translation
	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
	 * L2's page tables as the first level of translation and L1's
	 * nested page tables as the second level of translation. Basically
	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
5073
	 */
5074
	if (!is_paging(vcpu))
5075
		g_context->gva_to_gpa = nonpaging_gva_to_gpa;
5076
	else if (is_long_mode(vcpu))
5077
		g_context->gva_to_gpa = paging64_gva_to_gpa;
5078
	else if (is_pae(vcpu))
5079
		g_context->gva_to_gpa = paging64_gva_to_gpa;
5080
	else
5081
		g_context->gva_to_gpa = paging32_gva_to_gpa;
5082

5083
	reset_guest_paging_metadata(vcpu, g_context);
5084 5085
}

5086
void kvm_init_mmu(struct kvm_vcpu *vcpu)
5087
{
5088 5089
	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);

5090
	if (mmu_is_nested(vcpu))
5091
		init_kvm_nested_mmu(vcpu, &regs);
5092
	else if (tdp_enabled)
5093
		init_kvm_tdp_mmu(vcpu, &regs);
5094
	else
5095
		init_kvm_softmmu(vcpu, &regs);
5096
}
5097
EXPORT_SYMBOL_GPL(kvm_init_mmu);
5098

5099 5100 5101 5102 5103
void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
	/*
	 * Invalidate all MMU roles to force them to reinitialize as CPUID
	 * information is factored into reserved bit calculations.
5104 5105 5106 5107 5108 5109 5110 5111
	 *
	 * Correctly handling multiple vCPU models with respect to paging and
	 * physical address properties) in a single VM would require tracking
	 * all relevant CPUID information in kvm_mmu_page_role. That is very
	 * undesirable as it would increase the memory requirements for
	 * gfn_track (see struct kvm_mmu_page_role comments).  For now that
	 * problem is swept under the rug; KVM's CPUID API is horrific and
	 * it's all but impossible to solve it without introducing a new API.
5112 5113 5114 5115
	 */
	vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
	vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
	vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
5116 5117 5118
	vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
	vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
	vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
5119
	kvm_mmu_reset_context(vcpu);
5120 5121

	/*
5122 5123
	 * Changing guest CPUID after KVM_RUN is forbidden, see the comment in
	 * kvm_arch_vcpu_ioctl().
5124
	 */
5125
	KVM_BUG_ON(vcpu->arch.last_vmentry_cpu != -1, vcpu->kvm);
5126 5127
}

5128
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
5129
{
5130
	kvm_mmu_unload(vcpu);
5131
	kvm_init_mmu(vcpu);
A
Avi Kivity 已提交
5132
}
5133
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
A
Avi Kivity 已提交
5134 5135

int kvm_mmu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
5136
{
5137 5138
	int r;

5139
	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
A
Avi Kivity 已提交
5140 5141
	if (r)
		goto out;
5142
	r = mmu_alloc_special_roots(vcpu);
A
Avi Kivity 已提交
5143 5144
	if (r)
		goto out;
5145
	if (vcpu->arch.mmu->direct_map)
5146 5147 5148
		r = mmu_alloc_direct_roots(vcpu);
	else
		r = mmu_alloc_shadow_roots(vcpu);
5149 5150
	if (r)
		goto out;
5151 5152 5153

	kvm_mmu_sync_roots(vcpu);

5154
	kvm_mmu_load_pgd(vcpu);
5155 5156 5157 5158 5159 5160 5161 5162

	/*
	 * Flush any TLB entries for the new root, the provenance of the root
	 * is unknown.  Even if KVM ensures there are no stale TLB entries
	 * for a freed root, in theory another hypervisor could have left
	 * stale entries.  Flushing on alloc also allows KVM to skip the TLB
	 * flush when freeing a root (see kvm_tdp_mmu_put_root()).
	 */
5163
	static_call(kvm_x86_flush_tlb_current)(vcpu);
5164 5165
out:
	return r;
A
Avi Kivity 已提交
5166
}
A
Avi Kivity 已提交
5167 5168 5169

void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
5170 5171 5172
	struct kvm *kvm = vcpu->kvm;

	kvm_mmu_free_roots(kvm, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
5173
	WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root.hpa));
5174
	kvm_mmu_free_roots(kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5175
	WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa));
5176
	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
A
Avi Kivity 已提交
5177
}
A
Avi Kivity 已提交
5178

5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223
static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
{
	struct kvm_mmu_page *sp;

	if (!VALID_PAGE(root_hpa))
		return false;

	/*
	 * When freeing obsolete roots, treat roots as obsolete if they don't
	 * have an associated shadow page.  This does mean KVM will get false
	 * positives and free roots that don't strictly need to be freed, but
	 * such false positives are relatively rare:
	 *
	 *  (a) only PAE paging and nested NPT has roots without shadow pages
	 *  (b) remote reloads due to a memslot update obsoletes _all_ roots
	 *  (c) KVM doesn't track previous roots for PAE paging, and the guest
	 *      is unlikely to zap an in-use PGD.
	 */
	sp = to_shadow_page(root_hpa);
	return !sp || is_obsolete_sp(kvm, sp);
}

static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu)
{
	unsigned long roots_to_free = 0;
	int i;

	if (is_obsolete_root(kvm, mmu->root.hpa))
		roots_to_free |= KVM_MMU_ROOT_CURRENT;

	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		if (is_obsolete_root(kvm, mmu->root.hpa))
			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
	}

	if (roots_to_free)
		kvm_mmu_free_roots(kvm, mmu, roots_to_free);
}

void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
{
	__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
	__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
}

5224 5225 5226 5227 5228 5229 5230 5231
static bool need_remote_flush(u64 old, u64 new)
{
	if (!is_shadow_present_pte(old))
		return false;
	if (!is_shadow_present_pte(new))
		return true;
	if ((old ^ new) & PT64_BASE_ADDR_MASK)
		return true;
5232 5233
	old ^= shadow_nx_mask;
	new ^= shadow_nx_mask;
5234 5235 5236
	return (old & ~new & PT64_PERM_MASK) != 0;
}

5237
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5238
				    int *bytes)
5239
{
5240
	u64 gentry = 0;
5241
	int r;
5242 5243 5244

	/*
	 * Assume that the pte write on a page table of the same type
5245 5246
	 * as the current vcpu paging mode since we update the sptes only
	 * when they have the same mode.
5247
	 */
5248
	if (is_pae(vcpu) && *bytes == 4) {
5249
		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5250 5251
		*gpa &= ~(gpa_t)7;
		*bytes = 8;
5252 5253
	}

5254 5255 5256 5257
	if (*bytes == 4 || *bytes == 8) {
		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
		if (r)
			gentry = 0;
5258 5259
	}

5260 5261 5262 5263 5264 5265 5266
	return gentry;
}

/*
 * If we're seeing too many writes to a page, it may no longer be a page table,
 * or we may be forking, in which case it is better to unmap the page.
 */
5267
static bool detect_write_flooding(struct kvm_mmu_page *sp)
5268
{
5269 5270 5271 5272
	/*
	 * Skip write-flooding detected for the sp whose level is 1, because
	 * it can become unsync, then the guest page is not write-protected.
	 */
5273
	if (sp->role.level == PG_LEVEL_4K)
5274
		return false;
5275

5276 5277
	atomic_inc(&sp->write_flooding_count);
	return atomic_read(&sp->write_flooding_count) >= 3;
5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292
}

/*
 * Misaligned accesses are too much trouble to fix up; also, they usually
 * indicate a page is not used as a page table.
 */
static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
				    int bytes)
{
	unsigned offset, pte_size, misaligned;

	pgprintk("misaligned: gpa %llx bytes %d role %x\n",
		 gpa, bytes, sp->role.word);

	offset = offset_in_page(gpa);
5293
	pte_size = sp->role.has_4_byte_gpte ? 4 : 8;
5294 5295 5296 5297 5298 5299 5300 5301

	/*
	 * Sometimes, the OS only writes the last one bytes to update status
	 * bits, for example, in linux, andb instruction is used in clear_bit().
	 */
	if (!(offset & (pte_size - 1)) && bytes == 1)
		return false;

5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316
	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
	misaligned |= bytes < 4;

	return misaligned;
}

static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
{
	unsigned page_offset, quadrant;
	u64 *spte;
	int level;

	page_offset = offset_in_page(gpa);
	level = sp->role.level;
	*nspte = 1;
5317
	if (sp->role.has_4_byte_gpte) {
5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338
		page_offset <<= 1;	/* 32->64 */
		/*
		 * A 32-bit pde maps 4MB while the shadow pdes map
		 * only 2MB.  So we need to double the offset again
		 * and zap two pdes instead of one.
		 */
		if (level == PT32_ROOT_LEVEL) {
			page_offset &= ~7; /* kill rounding error */
			page_offset <<= 1;
			*nspte = 2;
		}
		quadrant = page_offset >> PAGE_SHIFT;
		page_offset &= ~PAGE_MASK;
		if (quadrant != sp->role.quadrant)
			return NULL;
	}

	spte = &sp->spt[page_offset / sizeof(*spte)];
	return spte;
}

5339
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5340 5341
			      const u8 *new, int bytes,
			      struct kvm_page_track_notifier_node *node)
5342 5343 5344 5345 5346 5347
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	struct kvm_mmu_page *sp;
	LIST_HEAD(invalid_list);
	u64 entry, gentry, *spte;
	int npte;
5348
	bool flush = false;
5349 5350 5351 5352 5353

	/*
	 * If we don't have indirect shadow pages, it means no page is
	 * write-protected, so we can exit simply.
	 */
5354
	if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5355 5356 5357 5358 5359 5360
		return;

	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);

	/*
	 * No need to care whether allocation memory is successful
I
Ingo Molnar 已提交
5361
	 * or not since pte prefetch is skipped if it does not have
5362 5363
	 * enough objects in the cache.
	 */
5364
	mmu_topup_memory_caches(vcpu, true);
5365

5366
	write_lock(&vcpu->kvm->mmu_lock);
5367 5368 5369

	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);

5370 5371
	++vcpu->kvm->stat.mmu_pte_write;

5372
	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5373
		if (detect_write_misaligned(sp, gpa, bytes) ||
5374
		      detect_write_flooding(sp)) {
5375
			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
A
Avi Kivity 已提交
5376
			++vcpu->kvm->stat.mmu_flooded;
5377 5378
			continue;
		}
5379 5380 5381 5382 5383

		spte = get_written_sptes(sp, gpa, &npte);
		if (!spte)
			continue;

5384
		while (npte--) {
5385
			entry = *spte;
5386
			mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5387 5388
			if (gentry && sp->role.level != PG_LEVEL_4K)
				++vcpu->kvm->stat.mmu_pde_zapped;
G
Gleb Natapov 已提交
5389
			if (need_remote_flush(entry, *spte))
5390
				flush = true;
5391
			++spte;
5392 5393
		}
	}
5394
	kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
5395
	write_unlock(&vcpu->kvm->mmu_lock);
5396 5397
}

5398
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5399
		       void *insn, int insn_len)
5400
{
5401
	int r, emulation_type = EMULTYPE_PF;
5402
	bool direct = vcpu->arch.mmu->direct_map;
5403

5404
	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
5405 5406
		return RET_PF_RETRY;

5407
	r = RET_PF_INVALID;
5408
	if (unlikely(error_code & PFERR_RSVD_MASK)) {
5409
		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5410
		if (r == RET_PF_EMULATE)
5411 5412
			goto emulate;
	}
5413

5414
	if (r == RET_PF_INVALID) {
5415 5416
		r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
					  lower_32_bits(error_code), false);
5417
		if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
5418
			return -EIO;
5419 5420
	}

5421
	if (r < 0)
5422
		return r;
5423 5424
	if (r != RET_PF_EMULATE)
		return 1;
5425

5426 5427 5428 5429 5430 5431 5432
	/*
	 * Before emulating the instruction, check if the error code
	 * was due to a RO violation while translating the guest page.
	 * This can occur when using nested virtualization with nested
	 * paging in both guests. If true, we simply unprotect the page
	 * and resume the guest.
	 */
5433
	if (vcpu->arch.mmu->direct_map &&
5434
	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5435
		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5436 5437 5438
		return 1;
	}

5439 5440 5441 5442 5443 5444
	/*
	 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
	 * optimistically try to just unprotect the page and let the processor
	 * re-execute the instruction that caused the page fault.  Do not allow
	 * retrying MMIO emulation, as it's not only pointless but could also
	 * cause us to enter an infinite loop because the processor will keep
5445 5446 5447 5448
	 * faulting on the non-existent MMIO address.  Retrying an instruction
	 * from a nested guest is also pointless and dangerous as we are only
	 * explicitly shadowing L1's page tables, i.e. unprotecting something
	 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5449
	 */
5450
	if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
5451
		emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5452
emulate:
5453
	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5454
				       insn_len);
5455 5456 5457
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);

5458 5459
void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			    gva_t gva, hpa_t root_hpa)
M
Marcelo Tosatti 已提交
5460
{
5461
	int i;
5462

5463 5464 5465 5466 5467 5468
	/* It's actually a GPA for vcpu->arch.guest_mmu.  */
	if (mmu != &vcpu->arch.guest_mmu) {
		/* INVLPG on a non-canonical address is a NOP according to the SDM.  */
		if (is_noncanonical_address(gva, vcpu))
			return;

5469
		static_call(kvm_x86_flush_tlb_gva)(vcpu, gva);
5470 5471 5472
	}

	if (!mmu->invlpg)
5473 5474
		return;

5475
	if (root_hpa == INVALID_PAGE) {
5476
		mmu->invlpg(vcpu, gva, mmu->root.hpa);
5477

5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495
		/*
		 * INVLPG is required to invalidate any global mappings for the VA,
		 * irrespective of PCID. Since it would take us roughly similar amount
		 * of work to determine whether any of the prev_root mappings of the VA
		 * is marked global, or to just sync it blindly, so we might as well
		 * just always sync it.
		 *
		 * Mappings not reachable via the current cr3 or the prev_roots will be
		 * synced when switching to that cr3, so nothing needs to be done here
		 * for them.
		 */
		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
			if (VALID_PAGE(mmu->prev_roots[i].hpa))
				mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
	} else {
		mmu->invlpg(vcpu, gva, root_hpa);
	}
}
5496

5497 5498
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
5499
	kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
M
Marcelo Tosatti 已提交
5500 5501 5502 5503
	++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);

5504

5505 5506
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
{
5507
	struct kvm_mmu *mmu = vcpu->arch.mmu;
5508
	bool tlb_flush = false;
5509
	uint i;
5510 5511

	if (pcid == kvm_get_active_pcid(vcpu)) {
5512
		mmu->invlpg(vcpu, gva, mmu->root.hpa);
5513
		tlb_flush = true;
5514 5515
	}

5516 5517
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
5518
		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
5519 5520 5521
			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
			tlb_flush = true;
		}
5522
	}
5523

5524
	if (tlb_flush)
5525
		static_call(kvm_x86_flush_tlb_gva)(vcpu, gva);
5526

5527 5528 5529
	++vcpu->stat.invlpg;

	/*
5530 5531 5532
	 * Mappings not reachable via the current cr3 or the prev_roots will be
	 * synced when switching to that cr3, so nothing needs to be done here
	 * for them.
5533 5534 5535
	 */
}

5536 5537
void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
		       int tdp_max_root_level, int tdp_huge_page_level)
5538
{
5539
	tdp_enabled = enable_tdp;
5540
	tdp_root_level = tdp_forced_root_level;
5541
	max_tdp_level = tdp_max_root_level;
5542 5543

	/*
5544
	 * max_huge_page_level reflects KVM's MMU capabilities irrespective
5545 5546 5547 5548 5549 5550
	 * of kernel support, e.g. KVM may be capable of using 1GB pages when
	 * the kernel is not.  But, KVM never creates a page size greater than
	 * what is used by the kernel for any given HVA, i.e. the kernel's
	 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
	 */
	if (tdp_enabled)
5551
		max_huge_page_level = tdp_huge_page_level;
5552
	else if (boot_cpu_has(X86_FEATURE_GBPAGES))
5553
		max_huge_page_level = PG_LEVEL_1G;
5554
	else
5555
		max_huge_page_level = PG_LEVEL_2M;
5556
}
5557
EXPORT_SYMBOL_GPL(kvm_configure_mmu);
5558 5559

/* The return value indicates if tlb flush on all vcpus is needed. */
5560 5561 5562
typedef bool (*slot_level_handler) (struct kvm *kvm,
				    struct kvm_rmap_head *rmap_head,
				    const struct kvm_memory_slot *slot);
5563 5564 5565

/* The caller should hold mmu-lock before calling this function. */
static __always_inline bool
5566
slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5567
			slot_level_handler fn, int start_level, int end_level,
5568 5569
			gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield,
			bool flush)
5570 5571 5572 5573 5574 5575
{
	struct slot_rmap_walk_iterator iterator;

	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
			end_gfn, &iterator) {
		if (iterator.rmap)
5576
			flush |= fn(kvm, iterator.rmap, memslot);
5577

5578
		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
5579
			if (flush && flush_on_yield) {
5580 5581 5582
				kvm_flush_remote_tlbs_with_address(kvm,
						start_gfn,
						iterator.gfn - start_gfn + 1);
5583 5584
				flush = false;
			}
5585
			cond_resched_rwlock_write(&kvm->mmu_lock);
5586 5587 5588 5589 5590 5591 5592
		}
	}

	return flush;
}

static __always_inline bool
5593
slot_handle_level(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5594
		  slot_level_handler fn, int start_level, int end_level,
5595
		  bool flush_on_yield)
5596 5597 5598 5599
{
	return slot_handle_level_range(kvm, memslot, fn, start_level,
			end_level, memslot->base_gfn,
			memslot->base_gfn + memslot->npages - 1,
5600
			flush_on_yield, false);
5601 5602 5603
}

static __always_inline bool
5604 5605
slot_handle_level_4k(struct kvm *kvm, const struct kvm_memory_slot *memslot,
		     slot_level_handler fn, bool flush_on_yield)
5606
{
5607
	return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5608
				 PG_LEVEL_4K, flush_on_yield);
5609 5610
}

5611
static void free_mmu_pages(struct kvm_mmu *mmu)
A
Avi Kivity 已提交
5612
{
5613 5614
	if (!tdp_enabled && mmu->pae_root)
		set_memory_encrypted((unsigned long)mmu->pae_root, 1);
5615
	free_page((unsigned long)mmu->pae_root);
5616
	free_page((unsigned long)mmu->pml4_root);
5617
	free_page((unsigned long)mmu->pml5_root);
A
Avi Kivity 已提交
5618 5619
}

5620
static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
A
Avi Kivity 已提交
5621
{
5622
	struct page *page;
A
Avi Kivity 已提交
5623 5624
	int i;

5625 5626
	mmu->root.hpa = INVALID_PAGE;
	mmu->root.pgd = 0;
5627 5628 5629
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;

5630 5631 5632 5633
	/* vcpu->arch.guest_mmu isn't used when !tdp_enabled. */
	if (!tdp_enabled && mmu == &vcpu->arch.guest_mmu)
		return 0;

5634
	/*
5635 5636 5637 5638
	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
	 * while the PDP table is a per-vCPU construct that's allocated at MMU
	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
	 * x86_64.  Therefore we need to allocate the PDP table in the first
5639 5640 5641 5642
	 * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
	 * generally doesn't use PAE paging and can skip allocating the PDP
	 * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
	 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
5643
	 * KVM; that horror is handled on-demand by mmu_alloc_special_roots().
5644
	 */
5645
	if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
5646 5647
		return 0;

5648
	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5649
	if (!page)
5650 5651
		return -ENOMEM;

5652
	mmu->pae_root = page_address(page);
5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666

	/*
	 * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
	 * get the CPU to treat the PDPTEs as encrypted.  Decrypt the page so
	 * that KVM's writes and the CPU's reads get along.  Note, this is
	 * only necessary when using shadow paging, as 64-bit NPT can get at
	 * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
	 * by 32-bit kernels (when KVM itself uses 32-bit NPT).
	 */
	if (!tdp_enabled)
		set_memory_decrypted((unsigned long)mmu->pae_root, 1);
	else
		WARN_ON_ONCE(shadow_me_mask);

5667
	for (i = 0; i < 4; ++i)
5668
		mmu->pae_root[i] = INVALID_PAE_ROOT;
5669

A
Avi Kivity 已提交
5670 5671 5672
	return 0;
}

5673
int kvm_mmu_create(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
5674
{
5675
	int ret;
5676

5677
	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
5678 5679
	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;

5680
	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
5681
	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
5682

5683 5684
	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;

5685 5686
	vcpu->arch.mmu = &vcpu->arch.root_mmu;
	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
A
Avi Kivity 已提交
5687

5688
	ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
5689 5690 5691
	if (ret)
		return ret;

5692
	ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
5693 5694 5695 5696 5697 5698 5699
	if (ret)
		goto fail_allocate_root;

	return ret;
 fail_allocate_root:
	free_mmu_pages(&vcpu->arch.guest_mmu);
	return ret;
A
Avi Kivity 已提交
5700 5701
}

5702
#define BATCH_ZAP_PAGES	10
5703 5704 5705
static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
	struct kvm_mmu_page *sp, *node;
5706
	int nr_zapped, batch = 0;
5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718

restart:
	list_for_each_entry_safe_reverse(sp, node,
	      &kvm->arch.active_mmu_pages, link) {
		/*
		 * No obsolete valid page exists before a newly created page
		 * since active_mmu_pages is a FIFO list.
		 */
		if (!is_obsolete_sp(kvm, sp))
			break;

		/*
5719 5720 5721
		 * Invalid pages should never land back on the list of active
		 * pages.  Skip the bogus page, otherwise we'll get stuck in an
		 * infinite loop if the page gets put back on the list (again).
5722
		 */
5723
		if (WARN_ON(sp->role.invalid))
5724 5725
			continue;

5726 5727 5728 5729 5730 5731
		/*
		 * No need to flush the TLB since we're only zapping shadow
		 * pages with an obsolete generation number and all vCPUS have
		 * loaded a new root, i.e. the shadow pages being zapped cannot
		 * be in active use by the guest.
		 */
5732
		if (batch >= BATCH_ZAP_PAGES &&
5733
		    cond_resched_rwlock_write(&kvm->mmu_lock)) {
5734
			batch = 0;
5735 5736 5737
			goto restart;
		}

5738 5739
		if (__kvm_mmu_prepare_zap_page(kvm, sp,
				&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5740
			batch += nr_zapped;
5741
			goto restart;
5742
		}
5743 5744
	}

5745
	/*
5746 5747 5748 5749 5750 5751 5752
	 * Kick all vCPUs (via remote TLB flush) before freeing the page tables
	 * to ensure KVM is not in the middle of a lockless shadow page table
	 * walk, which may reference the pages.  The remote TLB flush itself is
	 * not required and is simply a convenient way to kick vCPUs as needed.
	 * KVM performs a local TLB flush when allocating a new root (see
	 * kvm_mmu_load()), and the reload in the caller ensure no vCPUs are
	 * running with an obsolete MMU.
5753
	 */
5754
	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767
}

/*
 * Fast invalidate all shadow pages and use lock-break technique
 * to zap obsolete pages.
 *
 * It's required when memslot is being deleted or VM is being
 * destroyed, in these cases, we should ensure that KVM MMU does
 * not use any resource of the being-deleted slot or all slots
 * after calling the function.
 */
static void kvm_mmu_zap_all_fast(struct kvm *kvm)
{
5768 5769
	lockdep_assert_held(&kvm->slots_lock);

5770
	write_lock(&kvm->mmu_lock);
5771
	trace_kvm_mmu_zap_all_fast(kvm);
5772 5773 5774 5775 5776 5777 5778 5779 5780

	/*
	 * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
	 * held for the entire duration of zapping obsolete pages, it's
	 * impossible for there to be multiple invalid generations associated
	 * with *valid* shadow pages at any given time, i.e. there is exactly
	 * one valid generation and (at most) one invalid generation.
	 */
	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5781

5782 5783 5784 5785 5786
	/*
	 * In order to ensure all vCPUs drop their soon-to-be invalid roots,
	 * invalidating TDP MMU roots must be done while holding mmu_lock for
	 * write and in the same critical section as making the reload request,
	 * e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield.
5787 5788 5789 5790
	 */
	if (is_tdp_mmu_enabled(kvm))
		kvm_tdp_mmu_invalidate_all_roots(kvm);

5791 5792 5793 5794 5795 5796 5797 5798
	/*
	 * Notify all vcpus to reload its shadow page table and flush TLB.
	 * Then all vcpus will switch to new shadow page table with the new
	 * mmu_valid_gen.
	 *
	 * Note: we need to do this under the protection of mmu_lock,
	 * otherwise, vcpu would purge shadow page but miss tlb flush.
	 */
5799
	kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS);
5800

5801
	kvm_zap_obsolete_pages(kvm);
5802

5803
	write_unlock(&kvm->mmu_lock);
5804

5805 5806 5807 5808 5809 5810 5811 5812
	/*
	 * Zap the invalidated TDP MMU roots, all SPTEs must be dropped before
	 * returning to the caller, e.g. if the zap is in response to a memslot
	 * deletion, mmu_notifier callbacks will be unable to reach the SPTEs
	 * associated with the deleted memslot once the update completes, and
	 * Deferring the zap until the final reference to the root is put would
	 * lead to use-after-free.
	 */
5813
	if (is_tdp_mmu_enabled(kvm))
5814
		kvm_tdp_mmu_zap_invalidated_roots(kvm);
5815 5816
}

5817 5818 5819 5820 5821
static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
{
	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
}

5822
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5823 5824
			struct kvm_memory_slot *slot,
			struct kvm_page_track_notifier_node *node)
5825
{
5826
	kvm_mmu_zap_all_fast(kvm);
5827 5828
}

5829
int kvm_mmu_init_vm(struct kvm *kvm)
5830
{
5831
	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5832
	int r;
5833

5834 5835 5836
	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
	INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
	INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
5837 5838
	spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);

5839 5840 5841
	r = kvm_mmu_init_tdp_mmu(kvm);
	if (r < 0)
		return r;
5842

5843
	node->track_write = kvm_mmu_pte_write;
5844
	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5845
	kvm_page_track_register_notifier(kvm, node);
5846
	return 0;
5847 5848
}

5849
void kvm_mmu_uninit_vm(struct kvm *kvm)
5850
{
5851
	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5852

5853
	kvm_page_track_unregister_notifier(kvm, node);
5854 5855

	kvm_mmu_uninit_tdp_mmu(kvm);
5856 5857
}

5858 5859 5860 5861
static bool __kvm_zap_rmaps(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
	const struct kvm_memory_slot *memslot;
	struct kvm_memslots *slots;
5862
	struct kvm_memslot_iter iter;
5863 5864
	bool flush = false;
	gfn_t start, end;
5865
	int i;
5866 5867 5868 5869 5870 5871

	if (!kvm_memslots_have_rmaps(kvm))
		return flush;

	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);
5872 5873 5874

		kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
			memslot = iter.slot;
5875 5876
			start = max(gfn_start, memslot->base_gfn);
			end = min(gfn_end, memslot->base_gfn + memslot->npages);
5877
			if (WARN_ON_ONCE(start >= end))
5878 5879 5880
				continue;

			flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
5881

5882 5883 5884 5885 5886 5887 5888 5889
							PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
							start, end - 1, true, flush);
		}
	}

	return flush;
}

5890 5891 5892 5893
/*
 * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
 * (not including it)
 */
X
Xiao Guangrong 已提交
5894 5895
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
5896
	bool flush;
5897
	int i;
X
Xiao Guangrong 已提交
5898

5899 5900 5901
	if (WARN_ON_ONCE(gfn_end <= gfn_start))
		return;

5902 5903
	write_lock(&kvm->mmu_lock);

5904 5905
	kvm_inc_notifier_count(kvm, gfn_start, gfn_end);

5906
	flush = __kvm_zap_rmaps(kvm, gfn_start, gfn_end);
X
Xiao Guangrong 已提交
5907

5908
	if (is_tdp_mmu_enabled(kvm)) {
5909
		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
5910 5911
			flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
						      gfn_end, true, flush);
5912
	}
5913 5914

	if (flush)
5915 5916
		kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
						   gfn_end - gfn_start);
5917

5918 5919
	kvm_dec_notifier_count(kvm, gfn_start, gfn_end);

5920
	write_unlock(&kvm->mmu_lock);
X
Xiao Guangrong 已提交
5921 5922
}

5923
static bool slot_rmap_write_protect(struct kvm *kvm,
5924
				    struct kvm_rmap_head *rmap_head,
5925
				    const struct kvm_memory_slot *slot)
5926
{
5927
	return rmap_write_protect(rmap_head, false);
5928 5929
}

5930
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
5931
				      const struct kvm_memory_slot *memslot,
5932
				      int start_level)
A
Avi Kivity 已提交
5933
{
5934
	bool flush = false;
A
Avi Kivity 已提交
5935

5936 5937 5938 5939 5940 5941 5942
	if (kvm_memslots_have_rmaps(kvm)) {
		write_lock(&kvm->mmu_lock);
		flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
					  start_level, KVM_MAX_HUGEPAGE_LEVEL,
					  false);
		write_unlock(&kvm->mmu_lock);
	}
5943

5944 5945 5946 5947 5948 5949
	if (is_tdp_mmu_enabled(kvm)) {
		read_lock(&kvm->mmu_lock);
		flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
		read_unlock(&kvm->mmu_lock);
	}

5950
	/*
5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970
	 * Flush TLBs if any SPTEs had to be write-protected to ensure that
	 * guest writes are reflected in the dirty bitmap before the memslot
	 * update completes, i.e. before enabling dirty logging is visible to
	 * userspace.
	 *
	 * Perform the TLB flush outside the mmu_lock to reduce the amount of
	 * time the lock is held. However, this does mean that another CPU can
	 * now grab mmu_lock and encounter a write-protected SPTE while CPUs
	 * still have a writable mapping for the associated GFN in their TLB.
	 *
	 * This is safe but requires KVM to be careful when making decisions
	 * based on the write-protection status of an SPTE. Specifically, KVM
	 * also write-protects SPTEs to monitor changes to guest page tables
	 * during shadow paging, and must guarantee no CPUs can write to those
	 * page before the lock is dropped. As mentioned in the previous
	 * paragraph, a write-protected SPTE is no guarantee that CPU cannot
	 * perform writes. So to determine if a TLB flush is truly required, KVM
	 * will clear a separate software-only bit (MMU-writable) and skip the
	 * flush if-and-only-if this bit was already clear.
	 *
5971
	 * See is_writable_pte() for more details.
5972
	 */
5973
	if (flush)
5974
		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
A
Avi Kivity 已提交
5975
}
5976

5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992
/* Must be called with the mmu_lock held in write-mode. */
void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
				   const struct kvm_memory_slot *memslot,
				   u64 start, u64 end,
				   int target_level)
{
	if (is_tdp_mmu_enabled(kvm))
		kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end,
						 target_level, false);

	/*
	 * A TLB flush is unnecessary at this point for the same resons as in
	 * kvm_mmu_slot_try_split_huge_pages().
	 */
}

5993
void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
5994 5995
					const struct kvm_memory_slot *memslot,
					int target_level)
5996 5997 5998 5999 6000 6001
{
	u64 start = memslot->base_gfn;
	u64 end = start + memslot->npages;

	if (is_tdp_mmu_enabled(kvm)) {
		read_lock(&kvm->mmu_lock);
6002
		kvm_tdp_mmu_try_split_huge_pages(kvm, memslot, start, end, target_level, true);
6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016
		read_unlock(&kvm->mmu_lock);
	}

	/*
	 * No TLB flush is necessary here. KVM will flush TLBs after
	 * write-protecting and/or clearing dirty on the newly split SPTEs to
	 * ensure that guest writes are reflected in the dirty log before the
	 * ioctl to enable dirty logging on this memslot completes. Since the
	 * split SPTEs retain the write and dirty bits of the huge SPTE, it is
	 * safe for KVM to decide if a TLB flush is necessary based on the split
	 * SPTEs.
	 */
}

6017
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
6018
					 struct kvm_rmap_head *rmap_head,
6019
					 const struct kvm_memory_slot *slot)
6020 6021 6022 6023
{
	u64 *sptep;
	struct rmap_iterator iter;
	int need_tlb_flush = 0;
D
Dan Williams 已提交
6024
	kvm_pfn_t pfn;
6025 6026
	struct kvm_mmu_page *sp;

6027
restart:
6028
	for_each_rmap_spte(rmap_head, &iter, sptep) {
6029
		sp = sptep_to_sp(sptep);
6030 6031 6032
		pfn = spte_to_pfn(*sptep);

		/*
6033 6034 6035 6036 6037
		 * We cannot do huge page mapping for indirect shadow pages,
		 * which are found on the last rmap (level = 1) when not using
		 * tdp; such shadow pages are synced with the page table in
		 * the guest, and the guest page table is using 4K page size
		 * mapping if the indirect sp has level = 1.
6038
		 */
6039
		if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
6040 6041
		    sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
							       pfn, PG_LEVEL_NUM)) {
6042
			pte_list_remove(kvm, rmap_head, sptep);
6043 6044 6045 6046 6047 6048 6049

			if (kvm_available_flush_tlb_with_range())
				kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
					KVM_PAGES_PER_HPAGE(sp->role.level));
			else
				need_tlb_flush = 1;

6050 6051
			goto restart;
		}
6052 6053 6054 6055 6056 6057
	}

	return need_tlb_flush;
}

void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
6058
				   const struct kvm_memory_slot *slot)
6059
{
6060 6061
	if (kvm_memslots_have_rmaps(kvm)) {
		write_lock(&kvm->mmu_lock);
6062 6063 6064 6065 6066
		/*
		 * Zap only 4k SPTEs since the legacy MMU only supports dirty
		 * logging at a 4k granularity and never creates collapsible
		 * 2m SPTEs during dirty logging.
		 */
6067
		if (slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true))
6068 6069 6070
			kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
		write_unlock(&kvm->mmu_lock);
	}
6071 6072 6073

	if (is_tdp_mmu_enabled(kvm)) {
		read_lock(&kvm->mmu_lock);
6074
		kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
6075 6076
		read_unlock(&kvm->mmu_lock);
	}
6077 6078
}

6079
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
6080
					const struct kvm_memory_slot *memslot)
6081 6082
{
	/*
6083
	 * All current use cases for flushing the TLBs for a specific memslot
6084
	 * related to dirty logging, and many do the TLB flush out of mmu_lock.
6085 6086 6087
	 * The interaction between the various operations on memslot must be
	 * serialized by slots_locks to ensure the TLB flush from one operation
	 * is observed by any other operation on the same memslot.
6088 6089
	 */
	lockdep_assert_held(&kvm->slots_lock);
6090 6091
	kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
					   memslot->npages);
6092 6093
}

6094
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
6095
				   const struct kvm_memory_slot *memslot)
6096
{
6097
	bool flush = false;
6098

6099 6100
	if (kvm_memslots_have_rmaps(kvm)) {
		write_lock(&kvm->mmu_lock);
6101 6102 6103 6104 6105
		/*
		 * Clear dirty bits only on 4k SPTEs since the legacy MMU only
		 * support dirty logging at a 4k granularity.
		 */
		flush = slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false);
6106 6107
		write_unlock(&kvm->mmu_lock);
	}
6108

6109 6110 6111 6112 6113 6114
	if (is_tdp_mmu_enabled(kvm)) {
		read_lock(&kvm->mmu_lock);
		flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
		read_unlock(&kvm->mmu_lock);
	}

6115 6116 6117 6118 6119 6120 6121
	/*
	 * It's also safe to flush TLBs out of mmu lock here as currently this
	 * function is only used for dirty logging, in which case flushing TLB
	 * out of mmu lock also guarantees no dirty pages will be lost in
	 * dirty_bitmap.
	 */
	if (flush)
6122
		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
6123 6124
}

6125
void kvm_mmu_zap_all(struct kvm *kvm)
6126 6127
{
	struct kvm_mmu_page *sp, *node;
6128
	LIST_HEAD(invalid_list);
6129
	int ign;
6130

6131
	write_lock(&kvm->mmu_lock);
6132
restart:
6133
	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
6134
		if (WARN_ON(sp->role.invalid))
6135
			continue;
6136
		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
6137
			goto restart;
6138
		if (cond_resched_rwlock_write(&kvm->mmu_lock))
6139 6140 6141
			goto restart;
	}

6142
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
6143

6144
	if (is_tdp_mmu_enabled(kvm))
6145 6146
		kvm_tdp_mmu_zap_all(kvm);

6147
	write_unlock(&kvm->mmu_lock);
6148 6149
}

6150
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
6151
{
6152
	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
6153

6154
	gen &= MMIO_SPTE_GEN_MASK;
6155

6156
	/*
6157 6158 6159 6160 6161 6162 6163 6164
	 * Generation numbers are incremented in multiples of the number of
	 * address spaces in order to provide unique generations across all
	 * address spaces.  Strip what is effectively the address space
	 * modifier prior to checking for a wrap of the MMIO generation so
	 * that a wrap in any address space is detected.
	 */
	gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);

6165
	/*
6166
	 * The very rare case: if the MMIO generation number has wrapped,
6167 6168
	 * zap all shadow pages.
	 */
6169
	if (unlikely(gen == 0)) {
6170
		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
6171
		kvm_mmu_zap_all_fast(kvm);
6172
	}
6173 6174
}

6175 6176
static unsigned long
mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
6177 6178
{
	struct kvm *kvm;
6179
	int nr_to_scan = sc->nr_to_scan;
6180
	unsigned long freed = 0;
6181

J
Junaid Shahid 已提交
6182
	mutex_lock(&kvm_lock);
6183 6184

	list_for_each_entry(kvm, &vm_list, vm_list) {
6185
		int idx;
6186
		LIST_HEAD(invalid_list);
6187

6188 6189 6190 6191 6192 6193 6194 6195
		/*
		 * Never scan more than sc->nr_to_scan VM instances.
		 * Will not hit this condition practically since we do not try
		 * to shrink more than one VM and it is very unlikely to see
		 * !n_used_mmu_pages so many times.
		 */
		if (!nr_to_scan--)
			break;
6196 6197 6198 6199 6200 6201
		/*
		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
		 * here. We may skip a VM instance errorneosly, but we do not
		 * want to shrink a VM that only started to populate its MMU
		 * anyway.
		 */
6202 6203
		if (!kvm->arch.n_used_mmu_pages &&
		    !kvm_has_zapped_obsolete_pages(kvm))
6204 6205
			continue;

6206
		idx = srcu_read_lock(&kvm->srcu);
6207
		write_lock(&kvm->mmu_lock);
6208

6209 6210 6211 6212 6213 6214
		if (kvm_has_zapped_obsolete_pages(kvm)) {
			kvm_mmu_commit_zap_page(kvm,
			      &kvm->arch.zapped_obsolete_pages);
			goto unlock;
		}

6215
		freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
6216

6217
unlock:
6218
		write_unlock(&kvm->mmu_lock);
6219
		srcu_read_unlock(&kvm->srcu, idx);
6220

6221 6222 6223 6224 6225
		/*
		 * unfair on small ones
		 * per-vm shrinkers cry out
		 * sadness comes quickly
		 */
6226 6227
		list_move_tail(&kvm->vm_list, &vm_list);
		break;
6228 6229
	}

J
Junaid Shahid 已提交
6230
	mutex_unlock(&kvm_lock);
6231 6232 6233 6234 6235 6236
	return freed;
}

static unsigned long
mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
6237
	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
6238 6239 6240
}

static struct shrinker mmu_shrinker = {
6241 6242
	.count_objects = mmu_shrink_count,
	.scan_objects = mmu_shrink_scan,
6243 6244 6245
	.seeks = DEFAULT_SEEKS * 10,
};

I
Ingo Molnar 已提交
6246
static void mmu_destroy_caches(void)
6247
{
6248 6249
	kmem_cache_destroy(pte_list_desc_cache);
	kmem_cache_destroy(mmu_page_header_cache);
6250 6251
}

P
Paolo Bonzini 已提交
6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285
static bool get_nx_auto_mode(void)
{
	/* Return true when CPU has the bug, and mitigations are ON */
	return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
}

static void __set_nx_huge_pages(bool val)
{
	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
}

static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
{
	bool old_val = nx_huge_pages;
	bool new_val;

	/* In "auto" mode deploy workaround only if CPU has the bug. */
	if (sysfs_streq(val, "off"))
		new_val = 0;
	else if (sysfs_streq(val, "force"))
		new_val = 1;
	else if (sysfs_streq(val, "auto"))
		new_val = get_nx_auto_mode();
	else if (strtobool(val, &new_val) < 0)
		return -EINVAL;

	__set_nx_huge_pages(new_val);

	if (new_val != old_val) {
		struct kvm *kvm;

		mutex_lock(&kvm_lock);

		list_for_each_entry(kvm, &vm_list, vm_list) {
6286
			mutex_lock(&kvm->slots_lock);
P
Paolo Bonzini 已提交
6287
			kvm_mmu_zap_all_fast(kvm);
6288
			mutex_unlock(&kvm->slots_lock);
6289 6290

			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
P
Paolo Bonzini 已提交
6291 6292 6293 6294 6295 6296 6297
		}
		mutex_unlock(&kvm_lock);
	}

	return 0;
}

6298 6299 6300 6301 6302
/*
 * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
 * its default value of -1 is technically undefined behavior for a boolean.
 */
void kvm_mmu_x86_module_init(void)
6303
{
P
Paolo Bonzini 已提交
6304 6305
	if (nx_huge_pages == -1)
		__set_nx_huge_pages(get_nx_auto_mode());
6306 6307 6308 6309 6310 6311 6312 6313 6314 6315
}

/*
 * The bulk of the MMU initialization is deferred until the vendor module is
 * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
 * to be reset when a potentially different vendor module is loaded.
 */
int kvm_mmu_vendor_module_init(void)
{
	int ret = -ENOMEM;
P
Paolo Bonzini 已提交
6316

6317 6318 6319 6320 6321 6322 6323 6324 6325 6326
	/*
	 * MMU roles use union aliasing which is, generally speaking, an
	 * undefined behavior. However, we supposedly know how compilers behave
	 * and the current status quo is unlikely to change. Guardians below are
	 * supposed to let us know if the assumption becomes false.
	 */
	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
	BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));

6327
	kvm_mmu_reset_all_pte_masks();
6328

6329 6330
	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
					    sizeof(struct pte_list_desc),
6331
					    0, SLAB_ACCOUNT, NULL);
6332
	if (!pte_list_desc_cache)
6333
		goto out;
6334

6335 6336
	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
						  sizeof(struct kvm_mmu_page),
6337
						  0, SLAB_ACCOUNT, NULL);
6338
	if (!mmu_page_header_cache)
6339
		goto out;
6340

6341
	if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
6342
		goto out;
6343

6344 6345 6346
	ret = register_shrinker(&mmu_shrinker);
	if (ret)
		goto out;
6347

6348 6349
	return 0;

6350
out:
6351
	mmu_destroy_caches();
6352
	return ret;
6353 6354
}

6355 6356
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
6357
	kvm_mmu_unload(vcpu);
6358 6359
	free_mmu_pages(&vcpu->arch.root_mmu);
	free_mmu_pages(&vcpu->arch.guest_mmu);
6360
	mmu_free_memory_caches(vcpu);
6361 6362
}

6363
void kvm_mmu_vendor_module_exit(void)
6364 6365 6366 6367
{
	mmu_destroy_caches();
	percpu_counter_destroy(&kvm_total_used_mmu_pages);
	unregister_shrinker(&mmu_shrinker);
6368
}
6369

6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394
/*
 * Calculate the effective recovery period, accounting for '0' meaning "let KVM
 * select a halving time of 1 hour".  Returns true if recovery is enabled.
 */
static bool calc_nx_huge_pages_recovery_period(uint *period)
{
	/*
	 * Use READ_ONCE to get the params, this may be called outside of the
	 * param setters, e.g. by the kthread to compute its next timeout.
	 */
	bool enabled = READ_ONCE(nx_huge_pages);
	uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);

	if (!enabled || !ratio)
		return false;

	*period = READ_ONCE(nx_huge_pages_recovery_period_ms);
	if (!*period) {
		/* Make sure the period is not less than one second.  */
		ratio = min(ratio, 3600u);
		*period = 60 * 60 * 1000 / ratio;
	}
	return true;
}

6395
static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
6396
{
6397 6398
	bool was_recovery_enabled, is_recovery_enabled;
	uint old_period, new_period;
6399 6400
	int err;

6401
	was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
6402

6403 6404 6405 6406
	err = param_set_uint(val, kp);
	if (err)
		return err;

6407
	is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
6408

6409
	if (is_recovery_enabled &&
6410
	    (!was_recovery_enabled || old_period > new_period)) {
6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425
		struct kvm *kvm;

		mutex_lock(&kvm_lock);

		list_for_each_entry(kvm, &vm_list, vm_list)
			wake_up_process(kvm->arch.nx_lpage_recovery_thread);

		mutex_unlock(&kvm_lock);
	}

	return err;
}

static void kvm_recover_nx_lpages(struct kvm *kvm)
{
6426
	unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
6427 6428 6429 6430
	int rcu_idx;
	struct kvm_mmu_page *sp;
	unsigned int ratio;
	LIST_HEAD(invalid_list);
6431
	bool flush = false;
6432 6433 6434
	ulong to_zap;

	rcu_idx = srcu_read_lock(&kvm->srcu);
6435
	write_lock(&kvm->mmu_lock);
6436

6437 6438 6439 6440 6441 6442 6443
	/*
	 * Zapping TDP MMU shadow pages, including the remote TLB flush, must
	 * be done under RCU protection, because the pages are freed via RCU
	 * callback.
	 */
	rcu_read_lock();

6444
	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
6445
	to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
6446 6447 6448 6449
	for ( ; to_zap; --to_zap) {
		if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
			break;

6450 6451 6452 6453 6454 6455 6456 6457 6458
		/*
		 * We use a separate list instead of just using active_mmu_pages
		 * because the number of lpage_disallowed pages is expected to
		 * be relatively small compared to the total.
		 */
		sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
				      struct kvm_mmu_page,
				      lpage_disallowed_link);
		WARN_ON_ONCE(!sp->lpage_disallowed);
6459
		if (is_tdp_mmu_page(sp)) {
6460
			flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
6461
		} else {
6462 6463 6464
			kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
			WARN_ON_ONCE(sp->lpage_disallowed);
		}
6465

6466
		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6467
			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6468 6469
			rcu_read_unlock();

6470
			cond_resched_rwlock_write(&kvm->mmu_lock);
6471
			flush = false;
6472 6473

			rcu_read_lock();
6474 6475
		}
	}
6476
	kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6477

6478 6479
	rcu_read_unlock();

6480
	write_unlock(&kvm->mmu_lock);
6481 6482 6483 6484 6485
	srcu_read_unlock(&kvm->srcu, rcu_idx);
}

static long get_nx_lpage_recovery_timeout(u64 start_time)
{
6486 6487
	bool enabled;
	uint period;
6488

6489
	enabled = calc_nx_huge_pages_recovery_period(&period);
6490

6491 6492
	return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
		       : MAX_SCHEDULE_TIMEOUT;
6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537
}

static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
{
	u64 start_time;
	long remaining_time;

	while (true) {
		start_time = get_jiffies_64();
		remaining_time = get_nx_lpage_recovery_timeout(start_time);

		set_current_state(TASK_INTERRUPTIBLE);
		while (!kthread_should_stop() && remaining_time > 0) {
			schedule_timeout(remaining_time);
			remaining_time = get_nx_lpage_recovery_timeout(start_time);
			set_current_state(TASK_INTERRUPTIBLE);
		}

		set_current_state(TASK_RUNNING);

		if (kthread_should_stop())
			return 0;

		kvm_recover_nx_lpages(kvm);
	}
}

int kvm_mmu_post_init_vm(struct kvm *kvm)
{
	int err;

	err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
					  "kvm-nx-lpage-recovery",
					  &kvm->arch.nx_lpage_recovery_thread);
	if (!err)
		kthread_unpark(kvm->arch.nx_lpage_recovery_thread);

	return err;
}

void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
{
	if (kvm->arch.nx_lpage_recovery_thread)
		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
}