mmu.c 164.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
A
Avi Kivity 已提交
2 3 4 5 6 7 8 9 10
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
N
Nicolas Kaiser 已提交
11
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
12 13 14 15 16
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 */
A
Avi Kivity 已提交
17

18
#include "irq.h"
19
#include "ioapic.h"
20
#include "mmu.h"
21
#include "mmu_internal.h"
22
#include "tdp_mmu.h"
23
#include "x86.h"
A
Avi Kivity 已提交
24
#include "kvm_cache_regs.h"
25
#include "kvm_emulate.h"
26
#include "cpuid.h"
27
#include "spte.h"
A
Avi Kivity 已提交
28

29
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
30 31 32 33
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
34 35
#include <linux/moduleparam.h>
#include <linux/export.h>
36
#include <linux/swap.h>
M
Marcelo Tosatti 已提交
37
#include <linux/hugetlb.h>
38
#include <linux/compiler.h>
39
#include <linux/srcu.h>
40
#include <linux/slab.h>
41
#include <linux/sched/signal.h>
42
#include <linux/uaccess.h>
43
#include <linux/hash.h>
44
#include <linux/kern_levels.h>
45
#include <linux/kthread.h>
A
Avi Kivity 已提交
46

A
Avi Kivity 已提交
47
#include <asm/page.h>
48
#include <asm/memtype.h>
A
Avi Kivity 已提交
49
#include <asm/cmpxchg.h>
50
#include <asm/io.h>
51
#include <asm/set_memory.h>
52
#include <asm/vmx.h>
53
#include <asm/kvm_page_track.h>
54
#include "trace.h"
A
Avi Kivity 已提交
55

56 57
#include "paging.h"

P
Paolo Bonzini 已提交
58 59
extern bool itlb_multihit_kvm_mitigation;

60
int __read_mostly nx_huge_pages = -1;
61 62 63 64
#ifdef CONFIG_PREEMPT_RT
/* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
#else
65
static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
66
#endif
P
Paolo Bonzini 已提交
67 68

static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
69
static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
P
Paolo Bonzini 已提交
70

71
static const struct kernel_param_ops nx_huge_pages_ops = {
P
Paolo Bonzini 已提交
72 73 74 75
	.set = set_nx_huge_pages,
	.get = param_get_bool,
};

76
static const struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
77 78 79 80
	.set = set_nx_huge_pages_recovery_ratio,
	.get = param_get_uint,
};

P
Paolo Bonzini 已提交
81 82
module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
__MODULE_PARM_TYPE(nx_huge_pages, "bool");
83 84 85
module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
		&nx_huge_pages_recovery_ratio, 0644);
__MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
P
Paolo Bonzini 已提交
86

87 88 89
static bool __read_mostly force_flush_and_sync_on_reuse;
module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);

90 91 92 93 94 95 96
/*
 * When setting this variable to true it enables Two-Dimensional-Paging
 * where the hardware walks 2 page tables:
 * 1. the guest-virtual to guest-physical
 * 2. while doing 1. it walks guest-physical to host-physical
 * If the hardware supports that we don't need to do shadow paging.
 */
97
bool tdp_enabled = false;
98

99
static int max_huge_page_level __read_mostly;
100
static int tdp_root_level __read_mostly;
101
static int max_tdp_level __read_mostly;
102

103 104 105 106
enum {
	AUDIT_PRE_PAGE_FAULT,
	AUDIT_POST_PAGE_FAULT,
	AUDIT_PRE_PTE_WRITE,
107 108 109
	AUDIT_POST_PTE_WRITE,
	AUDIT_PRE_SYNC,
	AUDIT_POST_SYNC
110
};
111 112

#ifdef MMU_DEBUG
113
bool dbg = 0;
114
module_param(dbg, bool, 0644);
115
#endif
A
Avi Kivity 已提交
116

117 118
#define PTE_PREFETCH_NUM		8

A
Avi Kivity 已提交
119 120 121
#define PT32_LEVEL_BITS 10

#define PT32_LEVEL_SHIFT(level) \
M
Mike Day 已提交
122
		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
A
Avi Kivity 已提交
123

124 125 126
#define PT32_LVL_OFFSET_MASK(level) \
	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
127 128 129 130 131 132 133 134

#define PT32_INDEX(address, level)\
	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))


#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
135 136 137
#define PT32_LVL_ADDR_MASK(level) \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
					    * PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
138

139 140
#include <trace/events/kvm.h>

141
/* make pte_list_desc fit well in cache lines */
142
#define PTE_LIST_EXT 14
143

144 145 146 147 148
/*
 * Slight optimization of cacheline layout, by putting `more' and `spte_count'
 * at the start; then accessing it will only use one single cacheline for
 * either full (entries==PTE_LIST_EXT) case or entries<=6.
 */
149 150
struct pte_list_desc {
	struct pte_list_desc *more;
151 152 153 154 155 156
	/*
	 * Stores number of entries stored in the pte_list_desc.  No need to be
	 * u64 but just for easier alignment.  When PTE_LIST_EXT, means full.
	 */
	u64 spte_count;
	u64 *sptes[PTE_LIST_EXT];
157 158
};

159 160 161 162
struct kvm_shadow_walk_iterator {
	u64 addr;
	hpa_t shadow_addr;
	u64 *sptep;
163
	int level;
164 165 166
	unsigned index;
};

167 168 169 170 171 172 173
#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
					 (_root), (_addr));                \
	     shadow_walk_okay(&(_walker));			           \
	     shadow_walk_next(&(_walker)))

#define for_each_shadow_entry(_vcpu, _addr, _walker)            \
174 175 176 177
	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
	     shadow_walk_okay(&(_walker));			\
	     shadow_walk_next(&(_walker)))

178 179 180 181 182 183
#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
	     shadow_walk_okay(&(_walker)) &&				\
		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
	     __shadow_walk_next(&(_walker), spte))

184
static struct kmem_cache *pte_list_desc_cache;
185
struct kmem_cache *mmu_page_header_cache;
186
static struct percpu_counter kvm_total_used_mmu_pages;
187

188
static void mmu_spte_set(u64 *sptep, u64 spte);
189 190
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
191

192 193 194 195 196 197
struct kvm_mmu_role_regs {
	const unsigned long cr0;
	const unsigned long cr4;
	const u64 efer;
};

198 199 200
#define CREATE_TRACE_POINTS
#include "mmutrace.h"

201 202 203 204 205 206
/*
 * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
 * reading from the role_regs.  Once the mmu_role is constructed, it becomes
 * the single source of truth for the MMU's state.
 */
#define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)			\
207
static inline bool __maybe_unused ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
208 209 210 211 212 213 214 215 216 217 218 219 220 221
{									\
	return !!(regs->reg & flag);					\
}
BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);

222 223 224 225 226 227 228
/*
 * The MMU itself (with a valid role) is the single source of truth for the
 * MMU.  Do not use the regs used to build the MMU/role, nor the vCPU.  The
 * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
 * and the vCPU may be incorrect/irrelevant.
 */
#define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name)		\
229
static inline bool __maybe_unused is_##reg##_##name(struct kvm_mmu *mmu)	\
230 231 232 233 234 235 236 237 238 239 240 241 242
{								\
	return !!(mmu->mmu_role. base_or_ext . reg##_##name);	\
}
BUILD_MMU_ROLE_ACCESSOR(ext,  cr0, pg);
BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pse);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pae);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smep);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, smap);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, pke);
BUILD_MMU_ROLE_ACCESSOR(ext,  cr4, la57);
BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);

243 244 245 246 247 248 249 250 251 252
static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu_role_regs regs = {
		.cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
		.cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
		.efer = vcpu->arch.efer,
	};

	return regs;
}
253

254 255 256 257 258 259 260 261 262 263 264 265
static int role_regs_to_root_level(struct kvm_mmu_role_regs *regs)
{
	if (!____is_cr0_pg(regs))
		return 0;
	else if (____is_efer_lma(regs))
		return ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
					       PT64_ROOT_4LEVEL;
	else if (____is_cr4_pae(regs))
		return PT32E_ROOT_LEVEL;
	else
		return PT32_ROOT_LEVEL;
}
266 267 268

static inline bool kvm_available_flush_tlb_with_range(void)
{
269
	return kvm_x86_ops.tlb_remote_flush_with_range;
270 271 272 273 274 275 276
}

static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
		struct kvm_tlb_range *range)
{
	int ret = -ENOTSUPP;

277
	if (range && kvm_x86_ops.tlb_remote_flush_with_range)
278
		ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range);
279 280 281 282 283

	if (ret)
		kvm_flush_remote_tlbs(kvm);
}

284
void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
285 286 287 288 289 290 291 292 293 294
		u64 start_gfn, u64 pages)
{
	struct kvm_tlb_range range;

	range.start_gfn = start_gfn;
	range.pages = pages;

	kvm_flush_remote_tlbs_with_range(kvm, &range);
}

295 296 297
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
			   unsigned int access)
{
298
	u64 spte = make_mmio_spte(vcpu, gfn, access);
299

300 301
	trace_mark_mmio_spte(sptep, gfn, spte);
	mmu_spte_set(sptep, spte);
302 303 304 305
}

static gfn_t get_mmio_spte_gfn(u64 spte)
{
306
	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
307

308
	gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
309 310 311
	       & shadow_nonpresent_or_rsvd_mask;

	return gpa >> PAGE_SHIFT;
312 313 314 315
}

static unsigned get_mmio_spte_access(u64 spte)
{
316
	return spte & shadow_mmio_access_mask;
317 318
}

319
static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
320
{
321
	u64 kvm_gen, spte_gen, gen;
322

323 324 325
	gen = kvm_vcpu_memslots(vcpu)->generation;
	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
		return false;
326

327
	kvm_gen = gen & MMIO_SPTE_GEN_MASK;
328 329 330 331
	spte_gen = get_mmio_spte_generation(spte);

	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
	return likely(kvm_gen == spte_gen);
332 333
}

334 335 336 337 338 339
static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
                                  struct x86_exception *exception)
{
        return gpa;
}

A
Avi Kivity 已提交
340 341 342 343 344
static int is_cpuid_PSE36(void)
{
	return 1;
}

345 346 347 348 349 350 351
static gfn_t pse36_gfn_delta(u32 gpte)
{
	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;

	return (gpte & PT32_DIR_PSE36_MASK) << shift;
}

352
#ifdef CONFIG_X86_64
A
Avi Kivity 已提交
353
static void __set_spte(u64 *sptep, u64 spte)
354
{
355
	WRITE_ONCE(*sptep, spte);
356 357
}

358
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
359
{
360
	WRITE_ONCE(*sptep, spte);
361 362 363 364 365 366
}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
	return xchg(sptep, spte);
}
367 368 369

static u64 __get_spte_lockless(u64 *sptep)
{
370
	return READ_ONCE(*sptep);
371
}
372
#else
373 374 375 376 377 378 379
union split_spte {
	struct {
		u32 spte_low;
		u32 spte_high;
	};
	u64 spte;
};
380

381 382
static void count_spte_clear(u64 *sptep, u64 spte)
{
383
	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
384 385 386 387 388 389 390 391 392

	if (is_shadow_present_pte(spte))
		return;

	/* Ensure the spte is completely set before we increase the count */
	smp_wmb();
	sp->clear_spte_count++;
}

393 394 395
static void __set_spte(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte;
396

397 398 399 400 401 402 403 404 405 406 407 408
	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	ssptep->spte_high = sspte.spte_high;

	/*
	 * If we map the spte from nonpresent to present, We should store
	 * the high bits firstly, then set present bit, so cpu can not
	 * fetch this spte while we are setting the spte.
	 */
	smp_wmb();

409
	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
410 411
}

412 413 414 415 416 417 418
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

419
	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
420 421 422 423 424 425 426 427

	/*
	 * If we map the spte from present to nonpresent, we should clear
	 * present bit firstly to avoid vcpu fetch the old high bits.
	 */
	smp_wmb();

	ssptep->spte_high = sspte.spte_high;
428
	count_spte_clear(sptep, spte);
429 430 431 432 433 434 435 436 437 438 439
}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte, orig;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	/* xchg acts as a barrier before the setting of the high bits */
	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
440 441
	orig.spte_high = ssptep->spte_high;
	ssptep->spte_high = sspte.spte_high;
442
	count_spte_clear(sptep, spte);
443 444 445

	return orig.spte;
}
446 447 448

/*
 * The idea using the light way get the spte on x86_32 guest is from
449
 * gup_get_pte (mm/gup.c).
450 451 452 453 454 455 456 457 458 459 460 461 462 463
 *
 * An spte tlb flush may be pending, because kvm_set_pte_rmapp
 * coalesces them and we are running out of the MMU lock.  Therefore
 * we need to protect against in-progress updates of the spte.
 *
 * Reading the spte while an update is in progress may get the old value
 * for the high part of the spte.  The race is fine for a present->non-present
 * change (because the high part of the spte is ignored for non-present spte),
 * but for a present->present change we must reread the spte.
 *
 * All such changes are done in two steps (present->non-present and
 * non-present->present), hence it is enough to count the number of
 * present->non-present updates: if it changed while reading the spte,
 * we might have hit the race.  This is done using clear_spte_count.
464 465 466
 */
static u64 __get_spte_lockless(u64 *sptep)
{
467
	struct kvm_mmu_page *sp =  sptep_to_sp(sptep);
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
	union split_spte spte, *orig = (union split_spte *)sptep;
	int count;

retry:
	count = sp->clear_spte_count;
	smp_rmb();

	spte.spte_low = orig->spte_low;
	smp_rmb();

	spte.spte_high = orig->spte_high;
	smp_rmb();

	if (unlikely(spte.spte_low != orig->spte_low ||
	      count != sp->clear_spte_count))
		goto retry;

	return spte.spte;
}
487 488
#endif

489 490
static bool spte_has_volatile_bits(u64 spte)
{
491 492 493
	if (!is_shadow_present_pte(spte))
		return false;

494
	/*
495
	 * Always atomically update spte if it can be updated
496 497 498 499
	 * out of mmu-lock, it can ensure dirty bit is not lost,
	 * also, it can help us to get a stable is_writable_pte()
	 * to ensure tlb flush is not missed.
	 */
500 501
	if (spte_can_locklessly_be_made_writable(spte) ||
	    is_access_track_spte(spte))
502 503
		return true;

504
	if (spte_ad_enabled(spte)) {
505 506 507 508
		if ((spte & shadow_accessed_mask) == 0 ||
	    	    (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
			return true;
	}
509

510
	return false;
511 512
}

513 514 515 516 517 518 519 520 521 522 523 524
/* Rules for using mmu_spte_set:
 * Set the sptep from nonpresent to present.
 * Note: the sptep being assigned *must* be either not present
 * or in a state where the hardware will not attempt to update
 * the spte.
 */
static void mmu_spte_set(u64 *sptep, u64 new_spte)
{
	WARN_ON(is_shadow_present_pte(*sptep));
	__set_spte(sptep, new_spte);
}

525 526 527
/*
 * Update the SPTE (excluding the PFN), but do not track changes in its
 * accessed/dirty status.
528
 */
529
static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
530
{
531
	u64 old_spte = *sptep;
532

533
	WARN_ON(!is_shadow_present_pte(new_spte));
534

535 536
	if (!is_shadow_present_pte(old_spte)) {
		mmu_spte_set(sptep, new_spte);
537
		return old_spte;
538
	}
539

540
	if (!spte_has_volatile_bits(old_spte))
541
		__update_clear_spte_fast(sptep, new_spte);
542
	else
543
		old_spte = __update_clear_spte_slow(sptep, new_spte);
544

545 546
	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));

547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
	return old_spte;
}

/* Rules for using mmu_spte_update:
 * Update the state bits, it means the mapped pfn is not changed.
 *
 * Whenever we overwrite a writable spte with a read-only one we
 * should flush remote TLBs. Otherwise rmap_write_protect
 * will find a read-only spte, even though the writable spte
 * might be cached on a CPU's TLB, the return value indicates this
 * case.
 *
 * Returns true if the TLB needs to be flushed
 */
static bool mmu_spte_update(u64 *sptep, u64 new_spte)
{
	bool flush = false;
	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);

	if (!is_shadow_present_pte(old_spte))
		return false;

569 570
	/*
	 * For the spte updated out of mmu-lock is safe, since
571
	 * we always atomically update it, see the comments in
572 573
	 * spte_has_volatile_bits().
	 */
574
	if (spte_can_locklessly_be_made_writable(old_spte) &&
575
	      !is_writable_pte(new_spte))
576
		flush = true;
577

578
	/*
579
	 * Flush TLB when accessed/dirty states are changed in the page tables,
580 581 582
	 * to guarantee consistency between TLB and page tables.
	 */

583 584
	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
		flush = true;
585
		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
586 587 588 589
	}

	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
		flush = true;
590
		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
591
	}
592

593
	return flush;
594 595
}

596 597 598 599
/*
 * Rules for using mmu_spte_clear_track_bits:
 * It sets the sptep from present to nonpresent, and track the
 * state bits, it is used to clear the last level sptep.
600
 * Returns the old PTE.
601
 */
602
static int mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
603
{
D
Dan Williams 已提交
604
	kvm_pfn_t pfn;
605
	u64 old_spte = *sptep;
606
	int level = sptep_to_sp(sptep)->role.level;
607 608

	if (!spte_has_volatile_bits(old_spte))
609
		__update_clear_spte_fast(sptep, 0ull);
610
	else
611
		old_spte = __update_clear_spte_slow(sptep, 0ull);
612

613
	if (!is_shadow_present_pte(old_spte))
614
		return old_spte;
615

616 617
	kvm_update_page_stats(kvm, level, -1);

618
	pfn = spte_to_pfn(old_spte);
619 620 621 622 623 624

	/*
	 * KVM does not hold the refcount of the page used by
	 * kvm mmu, before reclaiming the page, we should
	 * unmap it from mmu first.
	 */
625
	WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
626

627
	if (is_accessed_spte(old_spte))
628
		kvm_set_pfn_accessed(pfn);
629 630

	if (is_dirty_spte(old_spte))
631
		kvm_set_pfn_dirty(pfn);
632

633
	return old_spte;
634 635 636 637 638 639 640 641 642
}

/*
 * Rules for using mmu_spte_clear_no_track:
 * Directly clear spte without caring the state bits of sptep,
 * it is used to set the upper level spte.
 */
static void mmu_spte_clear_no_track(u64 *sptep)
{
643
	__update_clear_spte_fast(sptep, 0ull);
644 645
}

646 647 648 649 650
static u64 mmu_spte_get_lockless(u64 *sptep)
{
	return __get_spte_lockless(sptep);
}

651 652 653 654
/* Restore an acc-track PTE back to a regular PTE */
static u64 restore_acc_track_spte(u64 spte)
{
	u64 new_spte = spte;
655 656
	u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
			 & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
657

658
	WARN_ON_ONCE(spte_ad_enabled(spte));
659 660 661
	WARN_ON_ONCE(!is_access_track_spte(spte));

	new_spte &= ~shadow_acc_track_mask;
662 663
	new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
		      SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
664 665 666 667 668
	new_spte |= saved_bits;

	return new_spte;
}

669 670 671 672 673 674 675 676
/* Returns the Accessed status of the PTE and resets it at the same time. */
static bool mmu_spte_age(u64 *sptep)
{
	u64 spte = mmu_spte_get_lockless(sptep);

	if (!is_accessed_spte(spte))
		return false;

677
	if (spte_ad_enabled(spte)) {
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
		clear_bit((ffs(shadow_accessed_mask) - 1),
			  (unsigned long *)sptep);
	} else {
		/*
		 * Capture the dirty status of the page, so that it doesn't get
		 * lost when the SPTE is marked for access tracking.
		 */
		if (is_writable_pte(spte))
			kvm_set_pfn_dirty(spte_to_pfn(spte));

		spte = mark_spte_for_access_track(spte);
		mmu_spte_update_no_track(sptep, spte);
	}

	return true;
}

695 696
static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{
697 698 699 700 701 702 703 704
	if (is_tdp_mmu(vcpu->arch.mmu)) {
		kvm_tdp_mmu_walk_lockless_begin();
	} else {
		/*
		 * Prevent page table teardown by making any free-er wait during
		 * kvm_flush_remote_tlbs() IPI to all active vcpus.
		 */
		local_irq_disable();
705

706 707 708 709 710 711
		/*
		 * Make sure a following spte read is not reordered ahead of the write
		 * to vcpu->mode.
		 */
		smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
	}
712 713 714 715
}

static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
716 717 718 719 720 721 722 723 724 725 726
	if (is_tdp_mmu(vcpu->arch.mmu)) {
		kvm_tdp_mmu_walk_lockless_end();
	} else {
		/*
		 * Make sure the write to vcpu->mode is not reordered in front of
		 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
		 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
		 */
		smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
		local_irq_enable();
	}
727 728
}

729
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
730
{
731 732
	int r;

733
	/* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
734 735
	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
				       1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
736
	if (r)
737
		return r;
738 739
	r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
				       PT64_ROOT_MAX_LEVEL);
740
	if (r)
741
		return r;
742
	if (maybe_indirect) {
743 744
		r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
					       PT64_ROOT_MAX_LEVEL);
745 746 747
		if (r)
			return r;
	}
748 749
	return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
					  PT64_ROOT_MAX_LEVEL);
750 751 752 753
}

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
754 755 756 757
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
758 759
}

760
static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
761
{
762
	return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
763 764
}

765
static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
766
{
767
	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
768 769
}

770 771 772 773 774 775 776 777 778 779
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{
	if (!sp->role.direct)
		return sp->gfns[index];

	return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
}

static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
{
780
	if (!sp->role.direct) {
781
		sp->gfns[index] = gfn;
782 783 784 785 786 787 788 789
		return;
	}

	if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
		pr_err_ratelimited("gfn mismatch under direct page %llx "
				   "(expected %llx, got %llx)\n",
				   sp->gfn,
				   kvm_mmu_page_get_gfn(sp, index), gfn);
790 791
}

M
Marcelo Tosatti 已提交
792
/*
793 794
 * Return the pointer to the large page information for a given gfn,
 * handling slots that are not large page aligned.
M
Marcelo Tosatti 已提交
795
 */
796
static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
797
		const struct kvm_memory_slot *slot, int level)
M
Marcelo Tosatti 已提交
798 799 800
{
	unsigned long idx;

801
	idx = gfn_to_index(gfn, slot->base_gfn, level);
802
	return &slot->arch.lpage_info[level - 2][idx];
M
Marcelo Tosatti 已提交
803 804
}

805
static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
806 807 808 809 810
					    gfn_t gfn, int count)
{
	struct kvm_lpage_info *linfo;
	int i;

811
	for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
812 813 814 815 816 817
		linfo = lpage_info_slot(gfn, slot, i);
		linfo->disallow_lpage += count;
		WARN_ON(linfo->disallow_lpage < 0);
	}
}

818
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
819 820 821 822
{
	update_gfn_disallow_lpage_count(slot, gfn, 1);
}

823
void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
824 825 826 827
{
	update_gfn_disallow_lpage_count(slot, gfn, -1);
}

828
static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
M
Marcelo Tosatti 已提交
829
{
830
	struct kvm_memslots *slots;
831
	struct kvm_memory_slot *slot;
832
	gfn_t gfn;
M
Marcelo Tosatti 已提交
833

834
	kvm->arch.indirect_shadow_pages++;
835
	gfn = sp->gfn;
836 837
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
838 839

	/* the non-leaf shadow pages are keeping readonly. */
840
	if (sp->role.level > PG_LEVEL_4K)
841 842 843
		return kvm_slot_page_track_add_page(kvm, slot, gfn,
						    KVM_PAGE_TRACK_WRITE);

844
	kvm_mmu_gfn_disallow_lpage(slot, gfn);
M
Marcelo Tosatti 已提交
845 846
}

847
void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
P
Paolo Bonzini 已提交
848 849 850 851 852
{
	if (sp->lpage_disallowed)
		return;

	++kvm->stat.nx_lpage_splits;
853 854
	list_add_tail(&sp->lpage_disallowed_link,
		      &kvm->arch.lpage_disallowed_mmu_pages);
P
Paolo Bonzini 已提交
855 856 857
	sp->lpage_disallowed = true;
}

858
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
M
Marcelo Tosatti 已提交
859
{
860
	struct kvm_memslots *slots;
861
	struct kvm_memory_slot *slot;
862
	gfn_t gfn;
M
Marcelo Tosatti 已提交
863

864
	kvm->arch.indirect_shadow_pages--;
865
	gfn = sp->gfn;
866 867
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
868
	if (sp->role.level > PG_LEVEL_4K)
869 870 871
		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
						       KVM_PAGE_TRACK_WRITE);

872
	kvm_mmu_gfn_allow_lpage(slot, gfn);
M
Marcelo Tosatti 已提交
873 874
}

875
void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
P
Paolo Bonzini 已提交
876 877 878
{
	--kvm->stat.nx_lpage_splits;
	sp->lpage_disallowed = false;
879
	list_del(&sp->lpage_disallowed_link);
P
Paolo Bonzini 已提交
880 881
}

882 883 884
static struct kvm_memory_slot *
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
			    bool no_dirty_log)
M
Marcelo Tosatti 已提交
885 886
{
	struct kvm_memory_slot *slot;
887

888
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
889 890
	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
		return NULL;
891
	if (no_dirty_log && kvm_slot_dirty_track_enabled(slot))
892
		return NULL;
893 894 895 896

	return slot;
}

897
/*
898
 * About rmap_head encoding:
899
 *
900 901
 * If the bit zero of rmap_head->val is clear, then it points to the only spte
 * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
902
 * pte_list_desc containing more mappings.
903 904 905 906
 */

/*
 * Returns the number of pointers in the rmap chain, not counting the new one.
907
 */
908
static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
909
			struct kvm_rmap_head *rmap_head)
910
{
911
	struct pte_list_desc *desc;
912
	int count = 0;
913

914
	if (!rmap_head->val) {
915
		rmap_printk("%p %llx 0->1\n", spte, *spte);
916 917
		rmap_head->val = (unsigned long)spte;
	} else if (!(rmap_head->val & 1)) {
918
		rmap_printk("%p %llx 1->many\n", spte, *spte);
919
		desc = mmu_alloc_pte_list_desc(vcpu);
920
		desc->sptes[0] = (u64 *)rmap_head->val;
A
Avi Kivity 已提交
921
		desc->sptes[1] = spte;
922
		desc->spte_count = 2;
923
		rmap_head->val = (unsigned long)desc | 1;
924
		++count;
925
	} else {
926
		rmap_printk("%p %llx many->many\n", spte, *spte);
927
		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
928
		while (desc->spte_count == PTE_LIST_EXT) {
929
			count += PTE_LIST_EXT;
930 931 932
			if (!desc->more) {
				desc->more = mmu_alloc_pte_list_desc(vcpu);
				desc = desc->more;
933
				desc->spte_count = 0;
934 935
				break;
			}
936 937
			desc = desc->more;
		}
938 939
		count += desc->spte_count;
		desc->sptes[desc->spte_count++] = spte;
940
	}
941
	return count;
942 943
}

944
static void
945 946 947
pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
			   struct pte_list_desc *desc, int i,
			   struct pte_list_desc *prev_desc)
948
{
949
	int j = desc->spte_count - 1;
950

A
Avi Kivity 已提交
951 952
	desc->sptes[i] = desc->sptes[j];
	desc->sptes[j] = NULL;
953 954
	desc->spte_count--;
	if (desc->spte_count)
955 956
		return;
	if (!prev_desc && !desc->more)
957
		rmap_head->val = 0;
958 959 960 961
	else
		if (prev_desc)
			prev_desc->more = desc->more;
		else
962
			rmap_head->val = (unsigned long)desc->more | 1;
963
	mmu_free_pte_list_desc(desc);
964 965
}

966
static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
967
{
968 969
	struct pte_list_desc *desc;
	struct pte_list_desc *prev_desc;
970 971
	int i;

972
	if (!rmap_head->val) {
973
		pr_err("%s: %p 0->BUG\n", __func__, spte);
974
		BUG();
975
	} else if (!(rmap_head->val & 1)) {
976
		rmap_printk("%p 1->0\n", spte);
977
		if ((u64 *)rmap_head->val != spte) {
978
			pr_err("%s:  %p 1->BUG\n", __func__, spte);
979 980
			BUG();
		}
981
		rmap_head->val = 0;
982
	} else {
983
		rmap_printk("%p many->many\n", spte);
984
		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
985 986
		prev_desc = NULL;
		while (desc) {
987
			for (i = 0; i < desc->spte_count; ++i) {
A
Avi Kivity 已提交
988
				if (desc->sptes[i] == spte) {
989 990
					pte_list_desc_remove_entry(rmap_head,
							desc, i, prev_desc);
991 992
					return;
				}
993
			}
994 995 996
			prev_desc = desc;
			desc = desc->more;
		}
997
		pr_err("%s: %p many->many\n", __func__, spte);
998 999 1000 1001
		BUG();
	}
}

1002 1003
static void pte_list_remove(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			    u64 *sptep)
1004
{
1005
	mmu_spte_clear_track_bits(kvm, sptep);
1006 1007 1008
	__pte_list_remove(sptep, rmap_head);
}

P
Peter Xu 已提交
1009
/* Return true if rmap existed, false otherwise */
1010
static bool pte_list_destroy(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
P
Peter Xu 已提交
1011 1012 1013 1014 1015 1016 1017 1018
{
	struct pte_list_desc *desc, *next;
	int i;

	if (!rmap_head->val)
		return false;

	if (!(rmap_head->val & 1)) {
1019
		mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
P
Peter Xu 已提交
1020 1021 1022 1023 1024 1025 1026
		goto out;
	}

	desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);

	for (; desc; desc = next) {
		for (i = 0; i < desc->spte_count; i++)
1027
			mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
P
Peter Xu 已提交
1028 1029 1030 1031 1032 1033 1034 1035 1036
		next = desc->more;
		mmu_free_pte_list_desc(desc);
	}
out:
	/* rmap_head is meaningless now, remember to reset it */
	rmap_head->val = 0;
	return true;
}

1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
{
	struct pte_list_desc *desc;
	unsigned int count = 0;

	if (!rmap_head->val)
		return 0;
	else if (!(rmap_head->val & 1))
		return 1;

	desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);

	while (desc) {
		count += desc->spte_count;
		desc = desc->more;
	}

	return count;
}

1057 1058
static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
					 const struct kvm_memory_slot *slot)
1059
{
1060
	unsigned long idx;
1061

1062
	idx = gfn_to_index(gfn, slot->base_gfn, level);
1063
	return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1064 1065
}

1066 1067
static bool rmap_can_add(struct kvm_vcpu *vcpu)
{
1068
	struct kvm_mmu_memory_cache *mc;
1069

1070
	mc = &vcpu->arch.mmu_pte_list_desc_cache;
1071
	return kvm_mmu_memory_cache_nr_free_objects(mc);
1072 1073
}

1074 1075
static void rmap_remove(struct kvm *kvm, u64 *spte)
{
1076 1077
	struct kvm_memslots *slots;
	struct kvm_memory_slot *slot;
1078 1079
	struct kvm_mmu_page *sp;
	gfn_t gfn;
1080
	struct kvm_rmap_head *rmap_head;
1081

1082
	sp = sptep_to_sp(spte);
1083
	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1084 1085

	/*
1086 1087 1088
	 * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
	 * so we have to determine which memslots to use based on context
	 * information in sp->role.
1089 1090 1091 1092
	 */
	slots = kvm_memslots_for_spte_role(kvm, sp->role);

	slot = __gfn_to_memslot(slots, gfn);
1093
	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1094

1095
	__pte_list_remove(spte, rmap_head);
1096 1097
}

1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
/*
 * Used by the following functions to iterate through the sptes linked by a
 * rmap.  All fields are private and not assumed to be used outside.
 */
struct rmap_iterator {
	/* private fields */
	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
	int pos;			/* index of the sptep */
};

/*
 * Iteration must be started by this function.  This should also be used after
 * removing/dropping sptes from the rmap link because in such cases the
M
Miaohe Lin 已提交
1111
 * information in the iterator may not be valid.
1112 1113 1114
 *
 * Returns sptep if found, NULL otherwise.
 */
1115 1116
static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
			   struct rmap_iterator *iter)
1117
{
1118 1119
	u64 *sptep;

1120
	if (!rmap_head->val)
1121 1122
		return NULL;

1123
	if (!(rmap_head->val & 1)) {
1124
		iter->desc = NULL;
1125 1126
		sptep = (u64 *)rmap_head->val;
		goto out;
1127 1128
	}

1129
	iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1130
	iter->pos = 0;
1131 1132 1133 1134
	sptep = iter->desc->sptes[iter->pos];
out:
	BUG_ON(!is_shadow_present_pte(*sptep));
	return sptep;
1135 1136 1137 1138 1139 1140 1141 1142 1143
}

/*
 * Must be used with a valid iterator: e.g. after rmap_get_first().
 *
 * Returns sptep if found, NULL otherwise.
 */
static u64 *rmap_get_next(struct rmap_iterator *iter)
{
1144 1145
	u64 *sptep;

1146 1147 1148 1149 1150
	if (iter->desc) {
		if (iter->pos < PTE_LIST_EXT - 1) {
			++iter->pos;
			sptep = iter->desc->sptes[iter->pos];
			if (sptep)
1151
				goto out;
1152 1153 1154 1155 1156 1157 1158
		}

		iter->desc = iter->desc->more;

		if (iter->desc) {
			iter->pos = 0;
			/* desc->sptes[0] cannot be NULL */
1159 1160
			sptep = iter->desc->sptes[iter->pos];
			goto out;
1161 1162 1163 1164
		}
	}

	return NULL;
1165 1166 1167
out:
	BUG_ON(!is_shadow_present_pte(*sptep));
	return sptep;
1168 1169
}

1170 1171
#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)			\
	for (_spte_ = rmap_get_first(_rmap_head_, _iter_);		\
1172
	     _spte_; _spte_ = rmap_get_next(_iter_))
1173

1174
static void drop_spte(struct kvm *kvm, u64 *sptep)
1175
{
1176
	u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
1177 1178

	if (is_shadow_present_pte(old_spte))
1179
		rmap_remove(kvm, sptep);
A
Avi Kivity 已提交
1180 1181
}

1182 1183 1184 1185

static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
{
	if (is_large_pte(*sptep)) {
1186
		WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
1187 1188 1189 1190 1191 1192 1193 1194 1195
		drop_spte(kvm, sptep);
		return true;
	}

	return false;
}

static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
1196
	if (__drop_large_spte(vcpu->kvm, sptep)) {
1197
		struct kvm_mmu_page *sp = sptep_to_sp(sptep);
1198 1199 1200 1201

		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
			KVM_PAGES_PER_HPAGE(sp->role.level));
	}
1202 1203 1204
}

/*
1205
 * Write-protect on the specified @sptep, @pt_protect indicates whether
1206
 * spte write-protection is caused by protecting shadow page table.
1207
 *
T
Tiejun Chen 已提交
1208
 * Note: write protection is difference between dirty logging and spte
1209 1210 1211 1212 1213
 * protection:
 * - for dirty logging, the spte can be set to writable at anytime if
 *   its dirty bitmap is properly set.
 * - for spte protection, the spte can be writable only after unsync-ing
 *   shadow page.
1214
 *
1215
 * Return true if tlb need be flushed.
1216
 */
1217
static bool spte_write_protect(u64 *sptep, bool pt_protect)
1218 1219 1220
{
	u64 spte = *sptep;

1221
	if (!is_writable_pte(spte) &&
1222
	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1223 1224
		return false;

1225
	rmap_printk("spte %p %llx\n", sptep, *sptep);
1226

1227
	if (pt_protect)
1228
		spte &= ~shadow_mmu_writable_mask;
1229
	spte = spte & ~PT_WRITABLE_MASK;
1230

1231
	return mmu_spte_update(sptep, spte);
1232 1233
}

1234 1235
static bool __rmap_write_protect(struct kvm *kvm,
				 struct kvm_rmap_head *rmap_head,
1236
				 bool pt_protect)
1237
{
1238 1239
	u64 *sptep;
	struct rmap_iterator iter;
1240
	bool flush = false;
1241

1242
	for_each_rmap_spte(rmap_head, &iter, sptep)
1243
		flush |= spte_write_protect(sptep, pt_protect);
1244

1245
	return flush;
1246 1247
}

1248
static bool spte_clear_dirty(u64 *sptep)
1249 1250 1251
{
	u64 spte = *sptep;

1252
	rmap_printk("spte %p %llx\n", sptep, *sptep);
1253

1254
	MMU_WARN_ON(!spte_ad_enabled(spte));
1255 1256 1257 1258
	spte &= ~shadow_dirty_mask;
	return mmu_spte_update(sptep, spte);
}

1259
static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1260 1261 1262
{
	bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
					       (unsigned long *)sptep);
1263
	if (was_writable && !spte_ad_enabled(*sptep))
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
		kvm_set_pfn_dirty(spte_to_pfn(*sptep));

	return was_writable;
}

/*
 * Gets the GFN ready for another round of dirty logging by clearing the
 *	- D bit on ad-enabled SPTEs, and
 *	- W bit on ad-disabled SPTEs.
 * Returns true iff any D or W bits were cleared.
 */
1275
static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1276
			       const struct kvm_memory_slot *slot)
1277 1278 1279 1280 1281
{
	u64 *sptep;
	struct rmap_iterator iter;
	bool flush = false;

1282
	for_each_rmap_spte(rmap_head, &iter, sptep)
1283 1284
		if (spte_ad_need_write_protect(*sptep))
			flush |= spte_wrprot_for_clear_dirty(sptep);
1285
		else
1286
			flush |= spte_clear_dirty(sptep);
1287 1288 1289 1290

	return flush;
}

1291
/**
1292
 * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1293 1294 1295 1296 1297
 * @kvm: kvm instance
 * @slot: slot to protect
 * @gfn_offset: start of the BITS_PER_LONG pages we care about
 * @mask: indicates which pages we should protect
 *
1298
 * Used when we do not need to care about huge page mappings.
1299
 */
1300
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1301 1302
				     struct kvm_memory_slot *slot,
				     gfn_t gfn_offset, unsigned long mask)
1303
{
1304
	struct kvm_rmap_head *rmap_head;
1305

1306
	if (is_tdp_mmu_enabled(kvm))
1307 1308
		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
				slot->base_gfn + gfn_offset, mask, true);
1309 1310 1311 1312

	if (!kvm_memslots_have_rmaps(kvm))
		return;

1313
	while (mask) {
1314 1315
		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
					PG_LEVEL_4K, slot);
1316
		__rmap_write_protect(kvm, rmap_head, false);
M
Marcelo Tosatti 已提交
1317

1318 1319 1320
		/* clear the first set bit */
		mask &= mask - 1;
	}
1321 1322
}

1323
/**
1324 1325
 * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
 * protect the page if the D-bit isn't supported.
1326 1327 1328 1329 1330 1331 1332
 * @kvm: kvm instance
 * @slot: slot to clear D-bit
 * @gfn_offset: start of the BITS_PER_LONG pages we care about
 * @mask: indicates which pages we should clear D-bit
 *
 * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
 */
1333 1334 1335
static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
					 struct kvm_memory_slot *slot,
					 gfn_t gfn_offset, unsigned long mask)
1336
{
1337
	struct kvm_rmap_head *rmap_head;
1338

1339
	if (is_tdp_mmu_enabled(kvm))
1340 1341
		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
				slot->base_gfn + gfn_offset, mask, false);
1342 1343 1344 1345

	if (!kvm_memslots_have_rmaps(kvm))
		return;

1346
	while (mask) {
1347 1348
		rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
					PG_LEVEL_4K, slot);
1349
		__rmap_clear_dirty(kvm, rmap_head, slot);
1350 1351 1352 1353 1354 1355

		/* clear the first set bit */
		mask &= mask - 1;
	}
}

1356 1357 1358 1359 1360 1361 1362
/**
 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
 * PT level pages.
 *
 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
 * enable dirty logging for them.
 *
1363 1364
 * We need to care about huge page mappings: e.g. during dirty logging we may
 * have such mappings.
1365 1366 1367 1368 1369
 */
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
				struct kvm_memory_slot *slot,
				gfn_t gfn_offset, unsigned long mask)
{
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
	/*
	 * Huge pages are NOT write protected when we start dirty logging in
	 * initially-all-set mode; must write protect them here so that they
	 * are split to 4K on the first write.
	 *
	 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
	 * of memslot has no such restriction, so the range can cross two large
	 * pages.
	 */
	if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
		gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
		gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);

		kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);

		/* Cross two large pages? */
		if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
		    ALIGN(end << PAGE_SHIFT, PMD_SIZE))
			kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
						       PG_LEVEL_2M);
	}

	/* Now handle 4K PTEs.  */
1393 1394
	if (kvm_x86_ops.cpu_dirty_log_size)
		kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
1395 1396
	else
		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1397 1398
}

1399 1400
int kvm_cpu_dirty_log_size(void)
{
1401
	return kvm_x86_ops.cpu_dirty_log_size;
1402 1403
}

1404
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1405 1406
				    struct kvm_memory_slot *slot, u64 gfn,
				    int min_level)
1407
{
1408
	struct kvm_rmap_head *rmap_head;
1409
	int i;
1410
	bool write_protected = false;
1411

1412 1413
	if (kvm_memslots_have_rmaps(kvm)) {
		for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
1414
			rmap_head = gfn_to_rmap(gfn, i, slot);
1415 1416
			write_protected |= __rmap_write_protect(kvm, rmap_head, true);
		}
1417 1418
	}

1419
	if (is_tdp_mmu_enabled(kvm))
1420
		write_protected |=
1421
			kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
1422

1423
	return write_protected;
1424 1425
}

1426 1427 1428 1429 1430
static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
{
	struct kvm_memory_slot *slot;

	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1431
	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1432 1433
}

1434
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1435
			  const struct kvm_memory_slot *slot)
1436
{
1437
	return pte_list_destroy(kvm, rmap_head);
1438 1439
}

1440 1441 1442
static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			    struct kvm_memory_slot *slot, gfn_t gfn, int level,
			    pte_t unused)
1443
{
1444
	return kvm_zap_rmapp(kvm, rmap_head, slot);
1445 1446
}

1447 1448 1449
static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			      struct kvm_memory_slot *slot, gfn_t gfn, int level,
			      pte_t pte)
1450
{
1451 1452
	u64 *sptep;
	struct rmap_iterator iter;
1453
	int need_flush = 0;
1454
	u64 new_spte;
D
Dan Williams 已提交
1455
	kvm_pfn_t new_pfn;
1456

1457 1458
	WARN_ON(pte_huge(pte));
	new_pfn = pte_pfn(pte);
1459

1460
restart:
1461
	for_each_rmap_spte(rmap_head, &iter, sptep) {
1462
		rmap_printk("spte %p %llx gfn %llx (%d)\n",
1463
			    sptep, *sptep, gfn, level);
1464

1465
		need_flush = 1;
1466

1467
		if (pte_write(pte)) {
1468
			pte_list_remove(kvm, rmap_head, sptep);
1469
			goto restart;
1470
		} else {
1471 1472
			new_spte = kvm_mmu_changed_pte_notifier_make_spte(
					*sptep, new_pfn);
1473

1474
			mmu_spte_clear_track_bits(kvm, sptep);
1475
			mmu_spte_set(sptep, new_spte);
1476 1477
		}
	}
1478

1479 1480 1481 1482 1483
	if (need_flush && kvm_available_flush_tlb_with_range()) {
		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
		return 0;
	}

1484
	return need_flush;
1485 1486
}

1487 1488
struct slot_rmap_walk_iterator {
	/* input fields. */
1489
	const struct kvm_memory_slot *slot;
1490 1491 1492 1493 1494 1495 1496
	gfn_t start_gfn;
	gfn_t end_gfn;
	int start_level;
	int end_level;

	/* output fields. */
	gfn_t gfn;
1497
	struct kvm_rmap_head *rmap;
1498 1499 1500
	int level;

	/* private field. */
1501
	struct kvm_rmap_head *end_rmap;
1502 1503 1504 1505 1506 1507 1508
};

static void
rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
{
	iterator->level = level;
	iterator->gfn = iterator->start_gfn;
1509 1510
	iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
	iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
1511 1512 1513 1514
}

static void
slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1515
		    const struct kvm_memory_slot *slot, int start_level,
1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
{
	iterator->slot = slot;
	iterator->start_level = start_level;
	iterator->end_level = end_level;
	iterator->start_gfn = start_gfn;
	iterator->end_gfn = end_gfn;

	rmap_walk_init_level(iterator, iterator->start_level);
}

static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
{
	return !!iterator->rmap;
}

static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
{
	if (++iterator->rmap <= iterator->end_rmap) {
		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
		return;
	}

	if (++iterator->level > iterator->end_level) {
		iterator->rmap = NULL;
		return;
	}

	rmap_walk_init_level(iterator, iterator->level);
}

#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
	   _start_gfn, _end_gfn, _iter_)				\
	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
				 _end_level_, _start_gfn, _end_gfn);	\
	     slot_rmap_walk_okay(_iter_);				\
	     slot_rmap_walk_next(_iter_))

1554 1555 1556
typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			       struct kvm_memory_slot *slot, gfn_t gfn,
			       int level, pte_t pte);
1557

1558 1559 1560
static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
						 struct kvm_gfn_range *range,
						 rmap_handler_t handler)
1561
{
1562
	struct slot_rmap_walk_iterator iterator;
1563
	bool ret = false;
1564

1565 1566 1567 1568
	for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
				 range->start, range->end - 1, &iterator)
		ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
			       iterator.level, range->pte);
1569

1570
	return ret;
1571 1572
}

1573
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1574
{
1575
	bool flush = false;
1576

1577 1578
	if (kvm_memslots_have_rmaps(kvm))
		flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
1579

1580
	if (is_tdp_mmu_enabled(kvm))
1581
		flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
1582

1583
	return flush;
1584 1585
}

1586
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1587
{
1588
	bool flush = false;
1589

1590 1591
	if (kvm_memslots_have_rmaps(kvm))
		flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
1592

1593
	if (is_tdp_mmu_enabled(kvm))
1594
		flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
1595

1596
	return flush;
1597 1598
}

1599 1600 1601
static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			  struct kvm_memory_slot *slot, gfn_t gfn, int level,
			  pte_t unused)
1602
{
1603
	u64 *sptep;
1604
	struct rmap_iterator iter;
1605 1606
	int young = 0;

1607 1608
	for_each_rmap_spte(rmap_head, &iter, sptep)
		young |= mmu_spte_age(sptep);
1609

1610 1611 1612
	return young;
}

1613 1614 1615
static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
			       struct kvm_memory_slot *slot, gfn_t gfn,
			       int level, pte_t unused)
A
Andrea Arcangeli 已提交
1616
{
1617 1618
	u64 *sptep;
	struct rmap_iterator iter;
A
Andrea Arcangeli 已提交
1619

1620 1621 1622 1623
	for_each_rmap_spte(rmap_head, &iter, sptep)
		if (is_accessed_spte(*sptep))
			return 1;
	return 0;
A
Andrea Arcangeli 已提交
1624 1625
}

1626 1627
#define RMAP_RECYCLE_THRESHOLD 1000

1628 1629
static void rmap_add(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
		     u64 *spte, gfn_t gfn)
1630
{
1631
	struct kvm_mmu_page *sp;
1632 1633
	struct kvm_rmap_head *rmap_head;
	int rmap_count;
1634

1635
	sp = sptep_to_sp(spte);
1636
	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1637
	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1638
	rmap_count = pte_list_add(vcpu, spte, rmap_head);
1639

1640 1641 1642 1643 1644
	if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
		kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
		kvm_flush_remote_tlbs_with_address(
				vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
	}
1645 1646
}

1647
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1648
{
1649
	bool young = false;
1650

1651 1652
	if (kvm_memslots_have_rmaps(kvm))
		young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
1653

1654
	if (is_tdp_mmu_enabled(kvm))
1655
		young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
1656 1657

	return young;
1658 1659
}

1660
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
A
Andrea Arcangeli 已提交
1661
{
1662
	bool young = false;
1663

1664 1665
	if (kvm_memslots_have_rmaps(kvm))
		young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
1666

1667
	if (is_tdp_mmu_enabled(kvm))
1668
		young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
1669 1670

	return young;
A
Andrea Arcangeli 已提交
1671 1672
}

1673
#ifdef MMU_DEBUG
1674
static int is_empty_shadow_page(u64 *spt)
A
Avi Kivity 已提交
1675
{
1676 1677 1678
	u64 *pos;
	u64 *end;

1679
	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1680
		if (is_shadow_present_pte(*pos)) {
1681
			printk(KERN_ERR "%s: %p %llx\n", __func__,
1682
			       pos, *pos);
A
Avi Kivity 已提交
1683
			return 0;
1684
		}
A
Avi Kivity 已提交
1685 1686
	return 1;
}
1687
#endif
A
Avi Kivity 已提交
1688

1689 1690 1691 1692 1693 1694
/*
 * This value is the sum of all of the kvm instances's
 * kvm->arch.n_used_mmu_pages values.  We need a global,
 * aggregate version in order to make the slab shrinker
 * faster
 */
1695
static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
1696 1697 1698 1699 1700
{
	kvm->arch.n_used_mmu_pages += nr;
	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}

1701
static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1702
{
1703
	MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1704
	hlist_del(&sp->hash_link);
1705 1706
	list_del(&sp->link);
	free_page((unsigned long)sp->spt);
1707 1708
	if (!sp->role.direct)
		free_page((unsigned long)sp->gfns);
1709
	kmem_cache_free(mmu_page_header_cache, sp);
1710 1711
}

1712 1713
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
1714
	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1715 1716
}

1717
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1718
				    struct kvm_mmu_page *sp, u64 *parent_pte)
1719 1720 1721 1722
{
	if (!parent_pte)
		return;

1723
	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1724 1725
}

1726
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1727 1728
				       u64 *parent_pte)
{
1729
	__pte_list_remove(parent_pte, &sp->parent_ptes);
1730 1731
}

1732 1733 1734 1735
static void drop_parent_pte(struct kvm_mmu_page *sp,
			    u64 *parent_pte)
{
	mmu_page_remove_parent_pte(sp, parent_pte);
1736
	mmu_spte_clear_no_track(parent_pte);
1737 1738
}

1739
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
M
Marcelo Tosatti 已提交
1740
{
1741
	struct kvm_mmu_page *sp;
1742

1743 1744
	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
1745
	if (!direct)
1746
		sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
1747
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1748 1749 1750 1751 1752 1753

	/*
	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
	 * depends on valid pages being added to the head of the list.  See
	 * comments in kvm_zap_obsolete_pages().
	 */
1754
	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
1755 1756 1757
	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
	return sp;
M
Marcelo Tosatti 已提交
1758 1759
}

1760
static void mark_unsync(u64 *spte);
1761
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1762
{
1763 1764 1765 1766 1767 1768
	u64 *sptep;
	struct rmap_iterator iter;

	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
		mark_unsync(sptep);
	}
1769 1770
}

1771
static void mark_unsync(u64 *spte)
1772
{
1773
	struct kvm_mmu_page *sp;
1774
	unsigned int index;
1775

1776
	sp = sptep_to_sp(spte);
1777 1778
	index = spte - sp->spt;
	if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1779
		return;
1780
	if (sp->unsync_children++)
1781
		return;
1782
	kvm_mmu_mark_parents_unsync(sp);
1783 1784
}

1785
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1786
			       struct kvm_mmu_page *sp)
1787
{
1788
	return -1;
1789 1790
}

1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
#define KVM_PAGE_ARRAY_NR 16

struct kvm_mmu_pages {
	struct mmu_page_and_offset {
		struct kvm_mmu_page *sp;
		unsigned int idx;
	} page[KVM_PAGE_ARRAY_NR];
	unsigned int nr;
};

1801 1802
static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
			 int idx)
1803
{
1804
	int i;
1805

1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816
	if (sp->unsync)
		for (i=0; i < pvec->nr; i++)
			if (pvec->page[i].sp == sp)
				return 0;

	pvec->page[pvec->nr].sp = sp;
	pvec->page[pvec->nr].idx = idx;
	pvec->nr++;
	return (pvec->nr == KVM_PAGE_ARRAY_NR);
}

1817 1818 1819 1820 1821 1822 1823
static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
{
	--sp->unsync_children;
	WARN_ON((int)sp->unsync_children < 0);
	__clear_bit(idx, sp->unsync_child_bitmap);
}

1824 1825 1826 1827
static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
	int i, ret, nr_unsync_leaf = 0;
1828

1829
	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
1830
		struct kvm_mmu_page *child;
1831 1832
		u64 ent = sp->spt[i];

1833 1834 1835 1836
		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
			clear_unsync_child_bit(sp, i);
			continue;
		}
1837

1838
		child = to_shadow_page(ent & PT64_BASE_ADDR_MASK);
1839 1840 1841 1842 1843 1844

		if (child->unsync_children) {
			if (mmu_pages_add(pvec, child, i))
				return -ENOSPC;

			ret = __mmu_unsync_walk(child, pvec);
1845 1846 1847 1848
			if (!ret) {
				clear_unsync_child_bit(sp, i);
				continue;
			} else if (ret > 0) {
1849
				nr_unsync_leaf += ret;
1850
			} else
1851 1852 1853 1854 1855 1856
				return ret;
		} else if (child->unsync) {
			nr_unsync_leaf++;
			if (mmu_pages_add(pvec, child, i))
				return -ENOSPC;
		} else
1857
			clear_unsync_child_bit(sp, i);
1858 1859
	}

1860 1861 1862
	return nr_unsync_leaf;
}

1863 1864
#define INVALID_INDEX (-1)

1865 1866 1867
static int mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
P
Paolo Bonzini 已提交
1868
	pvec->nr = 0;
1869 1870 1871
	if (!sp->unsync_children)
		return 0;

1872
	mmu_pages_add(pvec, sp, INVALID_INDEX);
1873
	return __mmu_unsync_walk(sp, pvec);
1874 1875 1876 1877 1878
}

static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	WARN_ON(!sp->unsync);
1879
	trace_kvm_mmu_sync_page(sp);
1880 1881 1882 1883
	sp->unsync = 0;
	--kvm->stat.mmu_unsync;
}

1884 1885
static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				     struct list_head *invalid_list);
1886 1887
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list);
1888

1889 1890
#define for_each_valid_sp(_kvm, _sp, _list)				\
	hlist_for_each_entry(_sp, _list, hash_link)			\
1891
		if (is_obsolete_sp((_kvm), (_sp))) {			\
1892
		} else
1893 1894

#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
1895 1896
	for_each_valid_sp(_kvm, _sp,					\
	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
1897
		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
1898

1899 1900
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
			 struct list_head *invalid_list)
1901
{
1902 1903 1904
	int ret = vcpu->arch.mmu->sync_page(vcpu, sp);

	if (ret < 0) {
1905
		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
1906
		return false;
1907 1908
	}

1909
	return !!ret;
1910 1911
}

1912 1913 1914 1915
static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
					struct list_head *invalid_list,
					bool remote_flush)
{
1916
	if (!remote_flush && list_empty(invalid_list))
1917 1918 1919 1920 1921 1922 1923 1924 1925
		return false;

	if (!list_empty(invalid_list))
		kvm_mmu_commit_zap_page(kvm, invalid_list);
	else
		kvm_flush_remote_tlbs(kvm);
	return true;
}

1926 1927 1928 1929 1930 1931 1932
#ifdef CONFIG_KVM_MMU_AUDIT
#include "mmu_audit.c"
#else
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
static void mmu_audit_disable(void) { }
#endif

1933 1934
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
1935 1936
	return sp->role.invalid ||
	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
1937 1938
}

1939
struct mmu_page_path {
1940 1941
	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
	unsigned int idx[PT64_ROOT_MAX_LEVEL];
1942 1943
};

1944
#define for_each_sp(pvec, sp, parents, i)			\
P
Paolo Bonzini 已提交
1945
		for (i = mmu_pages_first(&pvec, &parents);	\
1946 1947 1948
			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
			i = mmu_pages_next(&pvec, &parents, i))

1949 1950 1951
static int mmu_pages_next(struct kvm_mmu_pages *pvec,
			  struct mmu_page_path *parents,
			  int i)
1952 1953 1954 1955 1956
{
	int n;

	for (n = i+1; n < pvec->nr; n++) {
		struct kvm_mmu_page *sp = pvec->page[n].sp;
P
Paolo Bonzini 已提交
1957 1958
		unsigned idx = pvec->page[n].idx;
		int level = sp->role.level;
1959

P
Paolo Bonzini 已提交
1960
		parents->idx[level-1] = idx;
1961
		if (level == PG_LEVEL_4K)
P
Paolo Bonzini 已提交
1962
			break;
1963

P
Paolo Bonzini 已提交
1964
		parents->parent[level-2] = sp;
1965 1966 1967 1968 1969
	}

	return n;
}

P
Paolo Bonzini 已提交
1970 1971 1972 1973 1974 1975 1976 1977 1978
static int mmu_pages_first(struct kvm_mmu_pages *pvec,
			   struct mmu_page_path *parents)
{
	struct kvm_mmu_page *sp;
	int level;

	if (pvec->nr == 0)
		return 0;

1979 1980
	WARN_ON(pvec->page[0].idx != INVALID_INDEX);

P
Paolo Bonzini 已提交
1981 1982
	sp = pvec->page[0].sp;
	level = sp->role.level;
1983
	WARN_ON(level == PG_LEVEL_4K);
P
Paolo Bonzini 已提交
1984 1985 1986 1987 1988 1989 1990 1991 1992 1993

	parents->parent[level-2] = sp;

	/* Also set up a sentinel.  Further entries in pvec are all
	 * children of sp, so this element is never overwritten.
	 */
	parents->parent[level-1] = NULL;
	return mmu_pages_next(pvec, parents, 0);
}

1994
static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1995
{
1996 1997 1998 1999 2000 2001 2002 2003 2004
	struct kvm_mmu_page *sp;
	unsigned int level = 0;

	do {
		unsigned int idx = parents->idx[level];
		sp = parents->parent[level];
		if (!sp)
			return;

2005
		WARN_ON(idx == INVALID_INDEX);
2006
		clear_unsync_child_bit(sp, idx);
2007
		level++;
P
Paolo Bonzini 已提交
2008
	} while (!sp->unsync_children);
2009
}
2010

2011 2012
static int mmu_sync_children(struct kvm_vcpu *vcpu,
			     struct kvm_mmu_page *parent, bool can_yield)
2013 2014 2015 2016 2017
{
	int i;
	struct kvm_mmu_page *sp;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
2018
	LIST_HEAD(invalid_list);
2019
	bool flush = false;
2020 2021

	while (mmu_unsync_walk(parent, &pages)) {
2022
		bool protected = false;
2023 2024

		for_each_sp(pages, sp, parents, i)
2025
			protected |= rmap_write_protect(vcpu, sp->gfn);
2026

2027
		if (protected) {
2028
			kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, true);
2029
			flush = false;
2030
		}
2031

2032
		for_each_sp(pages, sp, parents, i) {
2033
			kvm_unlink_unsync_page(vcpu->kvm, sp);
2034
			flush |= kvm_sync_page(vcpu, sp, &invalid_list);
2035 2036
			mmu_pages_clear_parents(&parents);
		}
2037
		if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2038
			kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2039 2040 2041 2042 2043
			if (!can_yield) {
				kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
				return -EINTR;
			}

2044
			cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2045
			flush = false;
2046
		}
2047
	}
2048

2049
	kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
2050
	return 0;
2051 2052
}

2053 2054
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
{
2055
	atomic_set(&sp->write_flooding_count,  0);
2056 2057 2058 2059
}

static void clear_sp_write_flooding_count(u64 *spte)
{
2060
	__clear_sp_write_flooding_count(sptep_to_sp(spte));
2061 2062
}

2063 2064 2065 2066
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
					     gfn_t gfn,
					     gva_t gaddr,
					     unsigned level,
2067
					     int direct,
2068
					     unsigned int access)
2069
{
2070
	bool direct_mmu = vcpu->arch.mmu->direct_map;
2071
	union kvm_mmu_page_role role;
2072
	struct hlist_head *sp_list;
2073
	unsigned quadrant;
2074
	struct kvm_mmu_page *sp;
2075
	int collisions = 0;
2076
	LIST_HEAD(invalid_list);
2077

2078
	role = vcpu->arch.mmu->mmu_role.base;
2079
	role.level = level;
2080
	role.direct = direct;
2081
	if (role.direct)
2082
		role.gpte_is_8_bytes = true;
2083
	role.access = access;
2084
	if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
2085 2086 2087 2088
		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
		role.quadrant = quadrant;
	}
2089 2090 2091

	sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
	for_each_valid_sp(vcpu->kvm, sp, sp_list) {
2092 2093 2094 2095 2096
		if (sp->gfn != gfn) {
			collisions++;
			continue;
		}

2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109
		if (sp->role.word != role.word) {
			/*
			 * If the guest is creating an upper-level page, zap
			 * unsync pages for the same gfn.  While it's possible
			 * the guest is using recursive page tables, in all
			 * likelihood the guest has stopped using the unsync
			 * page and is installing a completely unrelated page.
			 * Unsync pages must not be left as is, because the new
			 * upper-level page will be write-protected.
			 */
			if (level > PG_LEVEL_4K && sp->unsync)
				kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
							 &invalid_list);
2110
			continue;
2111
		}
2112

2113 2114 2115
		if (direct_mmu)
			goto trace_get_page;

2116
		if (sp->unsync) {
2117
			/*
2118
			 * The page is good, but is stale.  kvm_sync_page does
2119 2120 2121 2122 2123 2124 2125 2126 2127
			 * get the latest guest state, but (unlike mmu_unsync_children)
			 * it doesn't write-protect the page or mark it synchronized!
			 * This way the validity of the mapping is ensured, but the
			 * overhead of write protection is not incurred until the
			 * guest invalidates the TLB mapping.  This allows multiple
			 * SPs for a single gfn to be unsync.
			 *
			 * If the sync fails, the page is zapped.  If so, break
			 * in order to rebuild it.
2128
			 */
2129
			if (!kvm_sync_page(vcpu, sp, &invalid_list))
2130 2131 2132
				break;

			WARN_ON(!list_empty(&invalid_list));
2133
			kvm_flush_remote_tlbs(vcpu->kvm);
2134
		}
2135

2136
		__clear_sp_write_flooding_count(sp);
2137 2138

trace_get_page:
2139
		trace_kvm_mmu_get_page(sp, false);
2140
		goto out;
2141
	}
2142

A
Avi Kivity 已提交
2143
	++vcpu->kvm->stat.mmu_cache_miss;
2144 2145 2146

	sp = kvm_mmu_alloc_page(vcpu, direct);

2147 2148
	sp->gfn = gfn;
	sp->role = role;
2149
	hlist_add_head(&sp->hash_link, sp_list);
2150
	if (!direct) {
2151
		account_shadowed(vcpu->kvm, sp);
2152
		if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
2153
			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2154
	}
A
Avi Kivity 已提交
2155
	trace_kvm_mmu_get_page(sp, true);
2156
out:
2157 2158
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);

2159 2160
	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2161
	return sp;
2162 2163
}

2164 2165 2166
static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
					struct kvm_vcpu *vcpu, hpa_t root,
					u64 addr)
2167 2168
{
	iterator->addr = addr;
2169
	iterator->shadow_addr = root;
2170
	iterator->level = vcpu->arch.mmu->shadow_root_level;
2171

2172
	if (iterator->level == PT64_ROOT_4LEVEL &&
2173 2174
	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
	    !vcpu->arch.mmu->direct_map)
2175 2176
		--iterator->level;

2177
	if (iterator->level == PT32E_ROOT_LEVEL) {
2178 2179 2180 2181
		/*
		 * prev_root is currently only used for 64-bit hosts. So only
		 * the active root_hpa is valid here.
		 */
2182
		BUG_ON(root != vcpu->arch.mmu->root_hpa);
2183

2184
		iterator->shadow_addr
2185
			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2186 2187 2188 2189 2190 2191 2192
		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
		--iterator->level;
		if (!iterator->shadow_addr)
			iterator->level = 0;
	}
}

2193 2194 2195
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
			     struct kvm_vcpu *vcpu, u64 addr)
{
2196
	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2197 2198 2199
				    addr);
}

2200 2201
static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{
2202
	if (iterator->level < PG_LEVEL_4K)
2203
		return false;
2204

2205 2206 2207 2208 2209
	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
	return true;
}

2210 2211
static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
			       u64 spte)
2212
{
2213
	if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) {
2214 2215 2216 2217
		iterator->level = 0;
		return;
	}

2218
	iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2219 2220 2221
	--iterator->level;
}

2222 2223
static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
{
2224
	__shadow_walk_next(iterator, *iterator->sptep);
2225 2226
}

2227 2228 2229 2230 2231 2232 2233 2234 2235
static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
			     struct kvm_mmu_page *sp)
{
	u64 spte;

	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);

	spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));

2236
	mmu_spte_set(sptep, spte);
2237 2238 2239 2240 2241

	mmu_page_add_parent_pte(vcpu, sp, sptep);

	if (sp->unsync_children || sp->unsync)
		mark_unsync(sptep);
2242 2243
}

2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256
static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
				   unsigned direct_access)
{
	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
		struct kvm_mmu_page *child;

		/*
		 * For the direct sp, if the guest pte's dirty bit
		 * changed form clean to dirty, it will corrupt the
		 * sp's access: allow writable in the read-only sp,
		 * so we should update the spte at this point to get
		 * a new sp with the correct access.
		 */
2257
		child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK);
2258 2259 2260
		if (child->role.access == direct_access)
			return;

2261
		drop_parent_pte(child, sptep);
2262
		kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2263 2264 2265
	}
}

2266 2267 2268
/* Returns the number of zapped non-leaf child shadow pages. */
static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
			    u64 *spte, struct list_head *invalid_list)
2269 2270 2271 2272 2273 2274
{
	u64 pte;
	struct kvm_mmu_page *child;

	pte = *spte;
	if (is_shadow_present_pte(pte)) {
X
Xiao Guangrong 已提交
2275
		if (is_last_spte(pte, sp->role.level)) {
2276
			drop_spte(kvm, spte);
X
Xiao Guangrong 已提交
2277
		} else {
2278
			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2279
			drop_parent_pte(child, spte);
2280 2281 2282 2283 2284 2285 2286 2287 2288 2289

			/*
			 * Recursively zap nested TDP SPs, parentless SPs are
			 * unlikely to be used again in the near future.  This
			 * avoids retaining a large number of stale nested SPs.
			 */
			if (tdp_enabled && invalid_list &&
			    child->role.guest_mode && !child->parent_ptes.val)
				return kvm_mmu_prepare_zap_page(kvm, child,
								invalid_list);
2290
		}
2291
	} else if (is_mmio_spte(pte)) {
2292
		mmu_spte_clear_no_track(spte);
2293
	}
2294
	return 0;
2295 2296
}

2297 2298 2299
static int kvm_mmu_page_unlink_children(struct kvm *kvm,
					struct kvm_mmu_page *sp,
					struct list_head *invalid_list)
2300
{
2301
	int zapped = 0;
2302 2303
	unsigned i;

2304
	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2305 2306 2307
		zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list);

	return zapped;
2308 2309
}

2310
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2311
{
2312 2313
	u64 *sptep;
	struct rmap_iterator iter;
2314

2315
	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2316
		drop_parent_pte(sp, sptep);
2317 2318
}

2319
static int mmu_zap_unsync_children(struct kvm *kvm,
2320 2321
				   struct kvm_mmu_page *parent,
				   struct list_head *invalid_list)
2322
{
2323 2324 2325
	int i, zapped = 0;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
2326

2327
	if (parent->role.level == PG_LEVEL_4K)
2328
		return 0;
2329 2330 2331 2332 2333

	while (mmu_unsync_walk(parent, &pages)) {
		struct kvm_mmu_page *sp;

		for_each_sp(pages, sp, parents, i) {
2334
			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2335
			mmu_pages_clear_parents(&parents);
2336
			zapped++;
2337 2338 2339 2340
		}
	}

	return zapped;
2341 2342
}

2343 2344 2345 2346
static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
				       struct kvm_mmu_page *sp,
				       struct list_head *invalid_list,
				       int *nr_zapped)
2347
{
2348
	bool list_unstable;
A
Avi Kivity 已提交
2349

2350
	trace_kvm_mmu_prepare_zap_page(sp);
2351
	++kvm->stat.mmu_shadow_zapped;
2352
	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2353
	*nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
2354
	kvm_mmu_unlink_parents(kvm, sp);
2355

2356 2357 2358
	/* Zapping children means active_mmu_pages has become unstable. */
	list_unstable = *nr_zapped;

2359
	if (!sp->role.invalid && !sp->role.direct)
2360
		unaccount_shadowed(kvm, sp);
2361

2362 2363
	if (sp->unsync)
		kvm_unlink_unsync_page(kvm, sp);
2364
	if (!sp->root_count) {
2365
		/* Count self */
2366
		(*nr_zapped)++;
2367 2368 2369 2370 2371 2372 2373 2374 2375 2376

		/*
		 * Already invalid pages (previously active roots) are not on
		 * the active page list.  See list_del() in the "else" case of
		 * !sp->root_count.
		 */
		if (sp->role.invalid)
			list_add(&sp->link, invalid_list);
		else
			list_move(&sp->link, invalid_list);
2377
		kvm_mod_used_mmu_pages(kvm, -1);
2378
	} else {
2379 2380 2381 2382 2383
		/*
		 * Remove the active root from the active page list, the root
		 * will be explicitly freed when the root_count hits zero.
		 */
		list_del(&sp->link);
2384

2385 2386 2387 2388 2389 2390
		/*
		 * Obsolete pages cannot be used on any vCPUs, see the comment
		 * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
		 * treats invalid shadow pages as being obsolete.
		 */
		if (!is_obsolete_sp(kvm, sp))
2391
			kvm_reload_remote_mmus(kvm);
2392
	}
2393

P
Paolo Bonzini 已提交
2394 2395 2396
	if (sp->lpage_disallowed)
		unaccount_huge_nx_page(kvm, sp);

2397
	sp->role.invalid = 1;
2398 2399 2400 2401 2402 2403 2404 2405 2406 2407
	return list_unstable;
}

static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				     struct list_head *invalid_list)
{
	int nr_zapped;

	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
	return nr_zapped;
2408 2409
}

2410 2411 2412
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list)
{
2413
	struct kvm_mmu_page *sp, *nsp;
2414 2415 2416 2417

	if (list_empty(invalid_list))
		return;

2418
	/*
2419 2420 2421 2422 2423 2424 2425
	 * We need to make sure everyone sees our modifications to
	 * the page tables and see changes to vcpu->mode here. The barrier
	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
	 *
	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
	 * guest mode and/or lockless shadow page table walks.
2426 2427
	 */
	kvm_flush_remote_tlbs(kvm);
2428

2429
	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2430
		WARN_ON(!sp->role.invalid || sp->root_count);
2431
		kvm_mmu_free_page(sp);
2432
	}
2433 2434
}

2435 2436
static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm,
						  unsigned long nr_to_zap)
2437
{
2438 2439
	unsigned long total_zapped = 0;
	struct kvm_mmu_page *sp, *tmp;
2440
	LIST_HEAD(invalid_list);
2441 2442
	bool unstable;
	int nr_zapped;
2443 2444

	if (list_empty(&kvm->arch.active_mmu_pages))
2445 2446
		return 0;

2447
restart:
2448
	list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459
		/*
		 * Don't zap active root pages, the page itself can't be freed
		 * and zapping it will just force vCPUs to realloc and reload.
		 */
		if (sp->root_count)
			continue;

		unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
						      &nr_zapped);
		total_zapped += nr_zapped;
		if (total_zapped >= nr_to_zap)
2460 2461
			break;

2462 2463
		if (unstable)
			goto restart;
2464
	}
2465

2466 2467 2468 2469 2470 2471
	kvm_mmu_commit_zap_page(kvm, &invalid_list);

	kvm->stat.mmu_recycled += total_zapped;
	return total_zapped;
}

2472 2473 2474 2475 2476 2477 2478
static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
{
	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
		return kvm->arch.n_max_mmu_pages -
			kvm->arch.n_used_mmu_pages;

	return 0;
2479 2480
}

2481 2482
static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
{
2483
	unsigned long avail = kvm_mmu_available_pages(vcpu->kvm);
2484

2485
	if (likely(avail >= KVM_MIN_FREE_MMU_PAGES))
2486 2487
		return 0;

2488
	kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
2489

2490 2491 2492 2493 2494
	/*
	 * Note, this check is intentionally soft, it only guarantees that one
	 * page is available, while the caller may end up allocating as many as
	 * four pages, e.g. for PAE roots or for 5-level paging.  Temporarily
	 * exceeding the (arbitrary by default) limit will not harm the host,
I
Ingo Molnar 已提交
2495
	 * being too aggressive may unnecessarily kill the guest, and getting an
2496 2497 2498
	 * exact count is far more trouble than it's worth, especially in the
	 * page fault paths.
	 */
2499 2500 2501 2502 2503
	if (!kvm_mmu_available_pages(vcpu->kvm))
		return -ENOSPC;
	return 0;
}

2504 2505
/*
 * Changing the number of mmu pages allocated to the vm
2506
 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2507
 */
2508
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2509
{
2510
	write_lock(&kvm->mmu_lock);
2511

2512
	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2513 2514
		kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
						  goal_nr_mmu_pages);
2515

2516
		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2517 2518
	}

2519
	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2520

2521
	write_unlock(&kvm->mmu_lock);
2522 2523
}

2524
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2525
{
2526
	struct kvm_mmu_page *sp;
2527
	LIST_HEAD(invalid_list);
2528 2529
	int r;

2530
	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2531
	r = 0;
2532
	write_lock(&kvm->mmu_lock);
2533
	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2534
		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2535 2536
			 sp->role.word);
		r = 1;
2537
		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2538
	}
2539
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2540
	write_unlock(&kvm->mmu_lock);
2541

2542
	return r;
2543
}
2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558

static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
	gpa_t gpa;
	int r;

	if (vcpu->arch.mmu->direct_map)
		return 0;

	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);

	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);

	return r;
}
2559

2560
static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2561 2562 2563 2564 2565 2566 2567 2568
{
	trace_kvm_mmu_unsync_page(sp);
	++vcpu->kvm->stat.mmu_unsync;
	sp->unsync = 1;

	kvm_mmu_mark_parents_unsync(sp);
}

2569 2570 2571 2572 2573 2574
/*
 * Attempt to unsync any shadow pages that can be reached by the specified gfn,
 * KVM is creating a writable mapping for said gfn.  Returns 0 if all pages
 * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
 * be write-protected.
 */
2575 2576
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync,
			    bool speculative)
2577
{
2578
	struct kvm_mmu_page *sp;
2579
	bool locked = false;
2580

2581 2582 2583 2584 2585
	/*
	 * Force write-protection if the page is being tracked.  Note, the page
	 * track machinery is used to write-protect upper-level shadow pages,
	 * i.e. this guards the role.level == 4K assertion below!
	 */
2586
	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
2587
		return -EPERM;
2588

2589 2590 2591 2592 2593 2594
	/*
	 * The page is not write-tracked, mark existing shadow pages unsync
	 * unless KVM is synchronizing an unsync SP (can_unsync = false).  In
	 * that case, KVM must complete emulation of the guest TLB flush before
	 * allowing shadow pages to become unsync (writable by the guest).
	 */
2595
	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2596
		if (!can_unsync)
2597
			return -EPERM;
2598

2599 2600
		if (sp->unsync)
			continue;
2601

2602 2603 2604
		if (speculative)
			return -EEXIST;

2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627
		/*
		 * TDP MMU page faults require an additional spinlock as they
		 * run with mmu_lock held for read, not write, and the unsync
		 * logic is not thread safe.  Take the spinklock regardless of
		 * the MMU type to avoid extra conditionals/parameters, there's
		 * no meaningful penalty if mmu_lock is held for write.
		 */
		if (!locked) {
			locked = true;
			spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock);

			/*
			 * Recheck after taking the spinlock, a different vCPU
			 * may have since marked the page unsync.  A false
			 * positive on the unprotected check above is not
			 * possible as clearing sp->unsync _must_ hold mmu_lock
			 * for write, i.e. unsync cannot transition from 0->1
			 * while this CPU holds mmu_lock for read (or write).
			 */
			if (READ_ONCE(sp->unsync))
				continue;
		}

2628
		WARN_ON(sp->role.level != PG_LEVEL_4K);
2629
		kvm_unsync_page(vcpu, sp);
2630
	}
2631 2632
	if (locked)
		spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
2633

2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655
	/*
	 * We need to ensure that the marking of unsync pages is visible
	 * before the SPTE is updated to allow writes because
	 * kvm_mmu_sync_roots() checks the unsync flags without holding
	 * the MMU lock and so can race with this. If the SPTE was updated
	 * before the page had been marked as unsync-ed, something like the
	 * following could happen:
	 *
	 * CPU 1                    CPU 2
	 * ---------------------------------------------------------------------
	 * 1.2 Host updates SPTE
	 *     to be writable
	 *                      2.1 Guest writes a GPTE for GVA X.
	 *                          (GPTE being in the guest page table shadowed
	 *                           by the SP from CPU 1.)
	 *                          This reads SPTE during the page table walk.
	 *                          Since SPTE.W is read as 1, there is no
	 *                          fault.
	 *
	 *                      2.2 Guest issues TLB flush.
	 *                          That causes a VM Exit.
	 *
2656 2657
	 *                      2.3 Walking of unsync pages sees sp->unsync is
	 *                          false and skips the page.
2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672
	 *
	 *                      2.4 Guest accesses GVA X.
	 *                          Since the mapping in the SP was not updated,
	 *                          so the old mapping for GVA X incorrectly
	 *                          gets used.
	 * 1.1 Host marks SP
	 *     as unsync
	 *     (sp->unsync = true)
	 *
	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
	 * the situation in 2.4 does not arise. The implicit barrier in 2.2
	 * pairs with this write barrier.
	 */
	smp_wmb();

2673
	return 0;
2674 2675
}

2676 2677
static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
			u64 *sptep, unsigned int pte_access, gfn_t gfn,
2678
			kvm_pfn_t pfn, struct kvm_page_fault *fault)
M
Marcelo Tosatti 已提交
2679
{
2680
	struct kvm_mmu_page *sp = sptep_to_sp(sptep);
2681
	int level = sp->role.level;
M
Marcelo Tosatti 已提交
2682
	int was_rmapped = 0;
2683
	int ret = RET_PF_FIXED;
2684
	bool flush = false;
2685
	bool wrprot;
2686
	u64 spte;
M
Marcelo Tosatti 已提交
2687

2688 2689 2690 2691 2692
	/* Prefetching always gets a writable pfn.  */
	bool host_writable = !fault || fault->map_writable;
	bool speculative = !fault || fault->prefault;
	bool write_fault = fault && fault->write;

2693 2694
	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
		 *sptep, write_fault, gfn);
M
Marcelo Tosatti 已提交
2695

2696 2697 2698 2699 2700
	if (unlikely(is_noslot_pfn(pfn))) {
		mark_mmio_spte(vcpu, sptep, gfn, pte_access);
		return RET_PF_EMULATE;
	}

2701
	if (is_shadow_present_pte(*sptep)) {
M
Marcelo Tosatti 已提交
2702 2703 2704 2705
		/*
		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
		 * the parent of the now unreachable PTE.
		 */
2706
		if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
M
Marcelo Tosatti 已提交
2707
			struct kvm_mmu_page *child;
A
Avi Kivity 已提交
2708
			u64 pte = *sptep;
M
Marcelo Tosatti 已提交
2709

2710
			child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
2711
			drop_parent_pte(child, sptep);
2712
			flush = true;
A
Avi Kivity 已提交
2713
		} else if (pfn != spte_to_pfn(*sptep)) {
2714
			pgprintk("hfn old %llx new %llx\n",
A
Avi Kivity 已提交
2715
				 spte_to_pfn(*sptep), pfn);
2716
			drop_spte(vcpu->kvm, sptep);
2717
			flush = true;
2718 2719
		} else
			was_rmapped = 1;
M
Marcelo Tosatti 已提交
2720
	}
2721

2722 2723
	wrprot = make_spte(vcpu, sp, pte_access, gfn, pfn, *sptep, speculative,
			   true, host_writable, &spte);
2724 2725 2726 2727 2728 2729 2730 2731

	if (*sptep == spte) {
		ret = RET_PF_SPURIOUS;
	} else {
		trace_kvm_mmu_set_spte(level, gfn, sptep);
		flush |= mmu_spte_update(sptep, spte);
	}

2732
	if (wrprot) {
M
Marcelo Tosatti 已提交
2733
		if (write_fault)
2734
			ret = RET_PF_EMULATE;
2735
	}
2736

2737
	if (flush)
2738 2739
		kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
				KVM_PAGES_PER_HPAGE(level));
M
Marcelo Tosatti 已提交
2740

A
Avi Kivity 已提交
2741
	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
M
Marcelo Tosatti 已提交
2742

2743
	if (!was_rmapped) {
2744
		WARN_ON_ONCE(ret == RET_PF_SPURIOUS);
2745
		kvm_update_page_stats(vcpu->kvm, level, 1);
2746
		rmap_add(vcpu, slot, sptep, gfn);
2747
	}
2748

2749
	return ret;
2750 2751
}

2752 2753 2754 2755 2756
static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp,
				    u64 *start, u64 *end)
{
	struct page *pages[PTE_PREFETCH_NUM];
2757
	struct kvm_memory_slot *slot;
2758
	unsigned int access = sp->role.access;
2759 2760 2761 2762
	int i, ret;
	gfn_t gfn;

	gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2763 2764
	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
	if (!slot)
2765 2766
		return -1;

2767
	ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2768 2769 2770
	if (ret <= 0)
		return -1;

2771
	for (i = 0; i < ret; i++, gfn++, start++) {
2772
		mmu_set_spte(vcpu, slot, start, access, gfn,
2773
			     page_to_pfn(pages[i]), NULL);
2774 2775
		put_page(pages[i]);
	}
2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791

	return 0;
}

static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
				  struct kvm_mmu_page *sp, u64 *sptep)
{
	u64 *spte, *start = NULL;
	int i;

	WARN_ON(!sp->role.direct);

	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
	spte = sp->spt + i;

	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2792
		if (is_shadow_present_pte(*spte) || spte == sptep) {
2793 2794 2795
			if (!start)
				continue;
			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2796
				return;
2797 2798 2799 2800
			start = NULL;
		} else if (!start)
			start = spte;
	}
2801 2802
	if (start)
		direct_pte_prefetch_many(vcpu, sp, start, spte);
2803 2804 2805 2806 2807 2808
}

static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
{
	struct kvm_mmu_page *sp;

2809
	sp = sptep_to_sp(sptep);
2810

2811
	/*
2812 2813 2814
	 * Without accessed bits, there's no way to distinguish between
	 * actually accessed translations and prefetched, so disable pte
	 * prefetch if accessed bits aren't available.
2815
	 */
2816
	if (sp_ad_disabled(sp))
2817 2818
		return;

2819
	if (sp->role.level > PG_LEVEL_4K)
2820 2821
		return;

2822 2823 2824 2825 2826 2827 2828
	/*
	 * If addresses are being invalidated, skip prefetching to avoid
	 * accidentally prefetching those addresses.
	 */
	if (unlikely(vcpu->kvm->mmu_notifier_count))
		return;

2829 2830 2831
	__direct_pte_prefetch(vcpu, sp, sptep);
}

2832
static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
2833
				  const struct kvm_memory_slot *slot)
2834 2835 2836 2837 2838
{
	unsigned long hva;
	pte_t *pte;
	int level;

2839
	if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
2840
		return PG_LEVEL_4K;
2841

2842 2843 2844 2845 2846 2847 2848 2849
	/*
	 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
	 * is not solely for performance, it's also necessary to avoid the
	 * "writable" check in __gfn_to_hva_many(), which will always fail on
	 * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
	 * page fault steps have already verified the guest isn't writing a
	 * read-only memslot.
	 */
2850 2851
	hva = __gfn_to_hva_memslot(slot, gfn);

2852
	pte = lookup_address_in_mm(kvm->mm, hva, &level);
2853
	if (unlikely(!pte))
2854
		return PG_LEVEL_4K;
2855 2856 2857 2858

	return level;
}

2859 2860 2861
int kvm_mmu_max_mapping_level(struct kvm *kvm,
			      const struct kvm_memory_slot *slot, gfn_t gfn,
			      kvm_pfn_t pfn, int max_level)
2862 2863
{
	struct kvm_lpage_info *linfo;
2864
	int host_level;
2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875

	max_level = min(max_level, max_huge_page_level);
	for ( ; max_level > PG_LEVEL_4K; max_level--) {
		linfo = lpage_info_slot(gfn, slot, max_level);
		if (!linfo->disallow_lpage)
			break;
	}

	if (max_level == PG_LEVEL_4K)
		return PG_LEVEL_4K;

2876 2877
	host_level = host_pfn_mapping_level(kvm, gfn, pfn, slot);
	return min(host_level, max_level);
2878 2879
}

2880
void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
2881
{
2882
	struct kvm_memory_slot *slot = fault->slot;
2883 2884
	kvm_pfn_t mask;

2885
	fault->huge_page_disallowed = fault->exec && fault->nx_huge_page_workaround_enabled;
2886

2887 2888
	if (unlikely(fault->max_level == PG_LEVEL_4K))
		return;
2889

2890 2891
	if (is_error_noslot_pfn(fault->pfn) || kvm_is_reserved_pfn(fault->pfn))
		return;
2892

2893
	if (kvm_slot_dirty_track_enabled(slot))
2894
		return;
2895

2896 2897 2898 2899
	/*
	 * Enforce the iTLB multihit workaround after capturing the requested
	 * level, which will be used to do precise, accurate accounting.
	 */
2900 2901 2902 2903 2904
	fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot,
						     fault->gfn, fault->pfn,
						     fault->max_level);
	if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
		return;
2905 2906

	/*
2907 2908
	 * mmu_notifier_retry() was successful and mmu_lock is held, so
	 * the pmd can't be split from under us.
2909
	 */
2910 2911 2912 2913
	fault->goal_level = fault->req_level;
	mask = KVM_PAGES_PER_HPAGE(fault->goal_level) - 1;
	VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask));
	fault->pfn &= ~mask;
2914 2915
}

2916
void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
P
Paolo Bonzini 已提交
2917
{
2918 2919
	if (cur_level > PG_LEVEL_4K &&
	    cur_level == fault->goal_level &&
P
Paolo Bonzini 已提交
2920 2921 2922 2923 2924 2925 2926 2927 2928
	    is_shadow_present_pte(spte) &&
	    !is_large_pte(spte)) {
		/*
		 * A small SPTE exists for this pfn, but FNAME(fetch)
		 * and __direct_map would like to create a large PTE
		 * instead: just force them to go down another level,
		 * patching back for them into pfn the next 9 bits of
		 * the address.
		 */
2929 2930 2931 2932
		u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) -
				KVM_PAGES_PER_HPAGE(cur_level - 1);
		fault->pfn |= fault->gfn & page_mask;
		fault->goal_level--;
P
Paolo Bonzini 已提交
2933 2934 2935
	}
}

2936
static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
2937
{
2938
	struct kvm_shadow_walk_iterator it;
2939
	struct kvm_mmu_page *sp;
2940
	int ret;
2941
	gfn_t base_gfn = fault->gfn;
A
Avi Kivity 已提交
2942

2943
	kvm_mmu_hugepage_adjust(vcpu, fault);
2944

2945
	trace_kvm_mmu_spte_requested(fault);
2946
	for_each_shadow_entry(vcpu, fault->addr, it) {
P
Paolo Bonzini 已提交
2947 2948 2949 2950
		/*
		 * We cannot overwrite existing page tables with an NX
		 * large page, as the leaf could be executable.
		 */
2951
		if (fault->nx_huge_page_workaround_enabled)
2952
			disallowed_hugepage_adjust(fault, *it.sptep, it.level);
P
Paolo Bonzini 已提交
2953

2954
		base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
2955
		if (it.level == fault->goal_level)
2956
			break;
A
Avi Kivity 已提交
2957

2958
		drop_large_spte(vcpu, it.sptep);
2959 2960 2961 2962 2963 2964 2965
		if (is_shadow_present_pte(*it.sptep))
			continue;

		sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
				      it.level - 1, true, ACC_ALL);

		link_shadow_page(vcpu, it.sptep, sp);
2966 2967
		if (fault->is_tdp && fault->huge_page_disallowed &&
		    fault->req_level >= it.level)
2968
			account_huge_nx_page(vcpu->kvm, sp);
2969
	}
2970

2971 2972 2973
	if (WARN_ON_ONCE(it.level != fault->goal_level))
		return -EFAULT;

2974
	ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL,
2975
			   base_gfn, fault->pfn, fault);
2976 2977 2978
	if (ret == RET_PF_SPURIOUS)
		return ret;

2979 2980 2981
	direct_pte_prefetch(vcpu, it.sptep);
	++vcpu->stat.pf_fixed;
	return ret;
A
Avi Kivity 已提交
2982 2983
}

H
Huang Ying 已提交
2984
static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
2985
{
2986
	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
2987 2988
}

D
Dan Williams 已提交
2989
static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
2990
{
X
Xiao Guangrong 已提交
2991 2992 2993 2994 2995 2996
	/*
	 * Do not cache the mmio info caused by writing the readonly gfn
	 * into the spte otherwise read access on readonly gfn also can
	 * caused mmio page fault and treat it as mmio access.
	 */
	if (pfn == KVM_PFN_ERR_RO_FAULT)
2997
		return RET_PF_EMULATE;
X
Xiao Guangrong 已提交
2998

2999
	if (pfn == KVM_PFN_ERR_HWPOISON) {
3000
		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
3001
		return RET_PF_RETRY;
3002
	}
3003

3004
	return -EFAULT;
3005 3006
}

3007 3008
static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
				unsigned int access, int *ret_val)
3009 3010
{
	/* The pfn is invalid, report the error! */
3011 3012
	if (unlikely(is_error_pfn(fault->pfn))) {
		*ret_val = kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn);
3013
		return true;
3014 3015
	}

3016
	if (unlikely(!fault->slot)) {
3017 3018 3019
		gva_t gva = fault->is_tdp ? 0 : fault->addr;

		vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
3020
				     access & shadow_mmio_access_mask);
3021 3022 3023 3024 3025 3026 3027 3028 3029 3030
		/*
		 * If MMIO caching is disabled, emulate immediately without
		 * touching the shadow page tables as attempting to install an
		 * MMIO SPTE will just be an expensive nop.
		 */
		if (unlikely(!shadow_mmio_value)) {
			*ret_val = RET_PF_EMULATE;
			return true;
		}
	}
3031

3032
	return false;
3033 3034
}

3035
static bool page_fault_can_be_fast(struct kvm_page_fault *fault)
3036
{
3037 3038 3039 3040
	/*
	 * Do not fix the mmio spte with invalid generation number which
	 * need to be updated by slow page fault path.
	 */
3041
	if (fault->rsvd)
3042 3043
		return false;

3044
	/* See if the page fault is due to an NX violation */
3045
	if (unlikely(fault->exec && fault->present))
3046 3047
		return false;

3048
	/*
3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059
	 * #PF can be fast if:
	 * 1. The shadow page table entry is not present, which could mean that
	 *    the fault is potentially caused by access tracking (if enabled).
	 * 2. The shadow page table entry is present and the fault
	 *    is caused by write-protect, that means we just need change the W
	 *    bit of the spte which can be done out of mmu-lock.
	 *
	 * However, if access tracking is disabled we know that a non-present
	 * page must be a genuine page fault where we have to create a new SPTE.
	 * So, if access tracking is disabled, we return true only for write
	 * accesses to a present page.
3060 3061
	 */

3062
	return shadow_acc_track_mask != 0 || (fault->write && fault->present);
3063 3064
}

3065 3066 3067 3068
/*
 * Returns true if the SPTE was fixed successfully. Otherwise,
 * someone else modified the SPTE from its original value.
 */
3069
static bool
3070
fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
3071
			u64 *sptep, u64 old_spte, u64 new_spte)
3072
{
3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084
	/*
	 * Theoretically we could also set dirty bit (and flush TLB) here in
	 * order to eliminate unnecessary PML logging. See comments in
	 * set_spte. But fast_page_fault is very unlikely to happen with PML
	 * enabled, so we do not do this. This might result in the same GPA
	 * to be logged in PML buffer again when the write really happens, and
	 * eventually to be called by mark_page_dirty twice. But it's also no
	 * harm. This also avoids the TLB flush needed after setting dirty bit
	 * so non-PML cases won't be impacted.
	 *
	 * Compare with set_spte where instead shadow_dirty_mask is set.
	 */
3085
	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3086 3087
		return false;

3088 3089
	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte))
		mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn);
3090 3091 3092 3093

	return true;
}

3094
static bool is_access_allowed(struct kvm_page_fault *fault, u64 spte)
3095
{
3096
	if (fault->exec)
3097 3098
		return is_executable_pte(spte);

3099
	if (fault->write)
3100 3101 3102 3103 3104 3105
		return is_writable_pte(spte);

	/* Fault was on Read access */
	return spte & PT_PRESENT_MASK;
}

3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128
/*
 * Returns the last level spte pointer of the shadow page walk for the given
 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
 * walk could be performed, returns NULL and *spte does not contain valid data.
 *
 * Contract:
 *  - Must be called between walk_shadow_page_lockless_{begin,end}.
 *  - The returned sptep must not be used after walk_shadow_page_lockless_end.
 */
static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
{
	struct kvm_shadow_walk_iterator iterator;
	u64 old_spte;
	u64 *sptep = NULL;

	for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
		sptep = iterator.sptep;
		*spte = old_spte;
	}

	return sptep;
}

3129
/*
3130
 * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
3131
 */
3132
static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3133
{
3134
	struct kvm_mmu_page *sp;
3135
	int ret = RET_PF_INVALID;
3136
	u64 spte = 0ull;
3137
	u64 *sptep = NULL;
3138
	uint retry_count = 0;
3139

3140
	if (!page_fault_can_be_fast(fault))
3141
		return ret;
3142 3143 3144

	walk_shadow_page_lockless_begin(vcpu);

3145
	do {
3146
		u64 new_spte;
3147

3148
		if (is_tdp_mmu(vcpu->arch.mmu))
3149
			sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3150
		else
3151
			sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3152

3153 3154 3155
		if (!is_shadow_present_pte(spte))
			break;

3156
		sp = sptep_to_sp(sptep);
3157 3158
		if (!is_last_spte(spte, sp->role.level))
			break;
3159

3160
		/*
3161 3162 3163 3164 3165
		 * Check whether the memory access that caused the fault would
		 * still cause it if it were to be performed right now. If not,
		 * then this is a spurious fault caused by TLB lazily flushed,
		 * or some other CPU has already fixed the PTE after the
		 * current CPU took the fault.
3166 3167 3168 3169
		 *
		 * Need not check the access of upper level table entries since
		 * they are always ACC_ALL.
		 */
3170
		if (is_access_allowed(fault, spte)) {
3171
			ret = RET_PF_SPURIOUS;
3172 3173
			break;
		}
3174

3175 3176 3177 3178 3179 3180 3181 3182 3183 3184
		new_spte = spte;

		if (is_access_track_spte(spte))
			new_spte = restore_acc_track_spte(new_spte);

		/*
		 * Currently, to simplify the code, write-protection can
		 * be removed in the fast path only if the SPTE was
		 * write-protected for dirty-logging or access tracking.
		 */
3185
		if (fault->write &&
3186
		    spte_can_locklessly_be_made_writable(spte)) {
3187
			new_spte |= PT_WRITABLE_MASK;
3188 3189

			/*
3190 3191 3192 3193 3194 3195 3196 3197 3198
			 * Do not fix write-permission on the large spte.  Since
			 * we only dirty the first page into the dirty-bitmap in
			 * fast_pf_fix_direct_spte(), other pages are missed
			 * if its slot has dirty logging enabled.
			 *
			 * Instead, we let the slow page fault path create a
			 * normal spte to fix the access.
			 *
			 * See the comments in kvm_arch_commit_memory_region().
3199
			 */
3200
			if (sp->role.level > PG_LEVEL_4K)
3201
				break;
3202
		}
3203

3204
		/* Verify that the fault can be handled in the fast path */
3205
		if (new_spte == spte ||
3206
		    !is_access_allowed(fault, new_spte))
3207 3208 3209 3210 3211
			break;

		/*
		 * Currently, fast page fault only works for direct mapping
		 * since the gfn is not stable for indirect shadow page. See
3212
		 * Documentation/virt/kvm/locking.rst to get more detail.
3213
		 */
3214
		if (fast_pf_fix_direct_spte(vcpu, fault, sptep, spte, new_spte)) {
3215
			ret = RET_PF_FIXED;
3216
			break;
3217
		}
3218 3219 3220 3221 3222 3223 3224 3225

		if (++retry_count > 4) {
			printk_once(KERN_WARNING
				"kvm: Fast #PF retrying more than 4 times.\n");
			break;
		}

	} while (true);
3226

3227
	trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
3228 3229
	walk_shadow_page_lockless_end(vcpu);

3230
	return ret;
3231 3232
}

3233 3234
static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
			       struct list_head *invalid_list)
3235
{
3236
	struct kvm_mmu_page *sp;
3237

3238
	if (!VALID_PAGE(*root_hpa))
A
Avi Kivity 已提交
3239
		return;
3240

3241
	sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
3242

3243
	if (is_tdp_mmu_page(sp))
3244
		kvm_tdp_mmu_put_root(kvm, sp, false);
3245 3246
	else if (!--sp->root_count && sp->role.invalid)
		kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3247

3248 3249 3250
	*root_hpa = INVALID_PAGE;
}

3251
/* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
3252 3253
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			ulong roots_to_free)
3254
{
3255
	struct kvm *kvm = vcpu->kvm;
3256 3257
	int i;
	LIST_HEAD(invalid_list);
3258
	bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
3259

3260
	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3261

3262
	/* Before acquiring the MMU lock, see if we need to do any real work. */
3263 3264 3265 3266 3267 3268 3269 3270 3271
	if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
			    VALID_PAGE(mmu->prev_roots[i].hpa))
				break;

		if (i == KVM_MMU_NUM_PREV_ROOTS)
			return;
	}
3272

3273
	write_lock(&kvm->mmu_lock);
3274

3275 3276
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
3277
			mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa,
3278
					   &invalid_list);
3279

3280 3281 3282
	if (free_active_root) {
		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
3283
			mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
3284
		} else if (mmu->pae_root) {
3285 3286 3287 3288 3289 3290 3291 3292
			for (i = 0; i < 4; ++i) {
				if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
					continue;

				mmu_free_root_page(kvm, &mmu->pae_root[i],
						   &invalid_list);
				mmu->pae_root[i] = INVALID_PAE_ROOT;
			}
3293
		}
3294
		mmu->root_hpa = INVALID_PAGE;
3295
		mmu->root_pgd = 0;
3296
	}
3297

3298
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
3299
	write_unlock(&kvm->mmu_lock);
3300
}
3301
EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3302

3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329
void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
{
	unsigned long roots_to_free = 0;
	hpa_t root_hpa;
	int i;

	/*
	 * This should not be called while L2 is active, L2 can't invalidate
	 * _only_ its own roots, e.g. INVVPID unconditionally exits.
	 */
	WARN_ON_ONCE(mmu->mmu_role.base.guest_mode);

	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		root_hpa = mmu->prev_roots[i].hpa;
		if (!VALID_PAGE(root_hpa))
			continue;

		if (!to_shadow_page(root_hpa) ||
			to_shadow_page(root_hpa)->role.guest_mode)
			roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
	}

	kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
}
EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);


3330 3331 3332 3333
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
{
	int ret = 0;

3334
	if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
3335
		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3336 3337 3338 3339 3340 3341
		ret = 1;
	}

	return ret;
}

3342 3343
static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
			    u8 level, bool direct)
3344 3345
{
	struct kvm_mmu_page *sp;
3346 3347 3348 3349 3350 3351 3352 3353 3354

	sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
	++sp->root_count;

	return __pa(sp->spt);
}

static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
{
3355 3356
	struct kvm_mmu *mmu = vcpu->arch.mmu;
	u8 shadow_root_level = mmu->shadow_root_level;
3357
	hpa_t root;
3358
	unsigned i;
3359 3360 3361 3362 3363 3364
	int r;

	write_lock(&vcpu->kvm->mmu_lock);
	r = make_mmu_pages_available(vcpu);
	if (r < 0)
		goto out_unlock;
3365

3366
	if (is_tdp_mmu_enabled(vcpu->kvm)) {
3367
		root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3368
		mmu->root_hpa = root;
3369
	} else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
3370
		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
3371
		mmu->root_hpa = root;
3372
	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
3373 3374 3375 3376
		if (WARN_ON_ONCE(!mmu->pae_root)) {
			r = -EIO;
			goto out_unlock;
		}
3377

3378
		for (i = 0; i < 4; ++i) {
3379
			WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3380

3381 3382
			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
					      i << 30, PT32_ROOT_LEVEL, true);
3383 3384
			mmu->pae_root[i] = root | PT_PRESENT_MASK |
					   shadow_me_mask;
3385
		}
3386
		mmu->root_hpa = __pa(mmu->pae_root);
3387 3388
	} else {
		WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
3389 3390
		r = -EIO;
		goto out_unlock;
3391
	}
3392

3393
	/* root_pgd is ignored for direct MMUs. */
3394
	mmu->root_pgd = 0;
3395 3396 3397
out_unlock:
	write_unlock(&vcpu->kvm->mmu_lock);
	return r;
3398 3399 3400
}

static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3401
{
3402
	struct kvm_mmu *mmu = vcpu->arch.mmu;
3403
	u64 pdptrs[4], pm_mask;
3404
	gfn_t root_gfn, root_pgd;
3405
	hpa_t root;
3406 3407
	unsigned i;
	int r;
3408

3409
	root_pgd = mmu->get_guest_pgd(vcpu);
3410
	root_gfn = root_pgd >> PAGE_SHIFT;
3411

3412 3413 3414
	if (mmu_check_root(vcpu, root_gfn))
		return 1;

3415 3416 3417 3418
	/*
	 * On SVM, reading PDPTRs might access guest memory, which might fault
	 * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
	 */
3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429
	if (mmu->root_level == PT32E_ROOT_LEVEL) {
		for (i = 0; i < 4; ++i) {
			pdptrs[i] = mmu->get_pdptr(vcpu, i);
			if (!(pdptrs[i] & PT_PRESENT_MASK))
				continue;

			if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
				return 1;
		}
	}

3430 3431 3432 3433
	r = alloc_all_memslots_rmaps(vcpu->kvm);
	if (r)
		return r;

3434 3435 3436 3437 3438
	write_lock(&vcpu->kvm->mmu_lock);
	r = make_mmu_pages_available(vcpu);
	if (r < 0)
		goto out_unlock;

3439 3440 3441 3442
	/*
	 * Do we shadow a long mode page table? If so we need to
	 * write-protect the guests page table root.
	 */
3443
	if (mmu->root_level >= PT64_ROOT_4LEVEL) {
3444
		root = mmu_alloc_root(vcpu, root_gfn, 0,
3445 3446
				      mmu->shadow_root_level, false);
		mmu->root_hpa = root;
3447
		goto set_root_pgd;
3448
	}
3449

3450 3451 3452 3453
	if (WARN_ON_ONCE(!mmu->pae_root)) {
		r = -EIO;
		goto out_unlock;
	}
3454

3455 3456
	/*
	 * We shadow a 32 bit page table. This may be a legacy 2-level
3457 3458
	 * or a PAE 3-level page table. In either case we need to be aware that
	 * the shadow page table may be a PAE or a long mode page table.
3459
	 */
3460
	pm_mask = PT_PRESENT_MASK | shadow_me_mask;
3461
	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
3462 3463
		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;

3464
		if (WARN_ON_ONCE(!mmu->pml4_root)) {
3465 3466 3467
			r = -EIO;
			goto out_unlock;
		}
3468
		mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
3469 3470 3471 3472 3473 3474 3475 3476

		if (mmu->shadow_root_level == PT64_ROOT_5LEVEL) {
			if (WARN_ON_ONCE(!mmu->pml5_root)) {
				r = -EIO;
				goto out_unlock;
			}
			mmu->pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;
		}
3477 3478
	}

3479
	for (i = 0; i < 4; ++i) {
3480
		WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
3481

3482
		if (mmu->root_level == PT32E_ROOT_LEVEL) {
3483
			if (!(pdptrs[i] & PT_PRESENT_MASK)) {
3484
				mmu->pae_root[i] = INVALID_PAE_ROOT;
A
Avi Kivity 已提交
3485 3486
				continue;
			}
3487
			root_gfn = pdptrs[i] >> PAGE_SHIFT;
3488
		}
3489

3490 3491
		root = mmu_alloc_root(vcpu, root_gfn, i << 30,
				      PT32_ROOT_LEVEL, false);
3492
		mmu->pae_root[i] = root | pm_mask;
3493
	}
3494

3495 3496 3497
	if (mmu->shadow_root_level == PT64_ROOT_5LEVEL)
		mmu->root_hpa = __pa(mmu->pml5_root);
	else if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
3498
		mmu->root_hpa = __pa(mmu->pml4_root);
3499 3500
	else
		mmu->root_hpa = __pa(mmu->pae_root);
3501

3502
set_root_pgd:
3503
	mmu->root_pgd = root_pgd;
3504 3505
out_unlock:
	write_unlock(&vcpu->kvm->mmu_lock);
3506

3507
	return 0;
3508 3509
}

3510 3511 3512
static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *mmu = vcpu->arch.mmu;
3513
	bool need_pml5 = mmu->shadow_root_level > PT64_ROOT_4LEVEL;
3514 3515 3516
	u64 *pml5_root = NULL;
	u64 *pml4_root = NULL;
	u64 *pae_root;
3517 3518

	/*
3519 3520 3521 3522
	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
	 * tables are allocated and initialized at root creation as there is no
	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
	 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
3523
	 */
3524 3525 3526
	if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
	    mmu->shadow_root_level < PT64_ROOT_4LEVEL)
		return 0;
3527

3528 3529 3530 3531 3532 3533 3534 3535
	/*
	 * NPT, the only paging mode that uses this horror, uses a fixed number
	 * of levels for the shadow page tables, e.g. all MMUs are 4-level or
	 * all MMus are 5-level.  Thus, this can safely require that pml5_root
	 * is allocated if the other roots are valid and pml5 is needed, as any
	 * prior MMU would also have required pml5.
	 */
	if (mmu->pae_root && mmu->pml4_root && (!need_pml5 || mmu->pml5_root))
3536
		return 0;
3537

3538 3539 3540 3541
	/*
	 * The special roots should always be allocated in concert.  Yell and
	 * bail if KVM ends up in a state where only one of the roots is valid.
	 */
3542
	if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root ||
3543
			 (need_pml5 && mmu->pml5_root)))
3544
		return -EIO;
3545

3546 3547 3548 3549
	/*
	 * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
	 * doesn't need to be decrypted.
	 */
3550 3551 3552
	pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
	if (!pae_root)
		return -ENOMEM;
3553

3554
#ifdef CONFIG_X86_64
3555
	pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3556 3557 3558
	if (!pml4_root)
		goto err_pml4;

3559
	if (need_pml5) {
3560 3561 3562
		pml5_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
		if (!pml5_root)
			goto err_pml5;
3563
	}
3564
#endif
3565

3566
	mmu->pae_root = pae_root;
3567
	mmu->pml4_root = pml4_root;
3568
	mmu->pml5_root = pml5_root;
3569

3570
	return 0;
3571 3572 3573 3574 3575 3576 3577 3578

#ifdef CONFIG_X86_64
err_pml5:
	free_page((unsigned long)pml4_root);
err_pml4:
	free_page((unsigned long)pae_root);
	return -ENOMEM;
#endif
3579 3580
}

3581
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3582 3583 3584 3585
{
	int i;
	struct kvm_mmu_page *sp;

3586
	if (vcpu->arch.mmu->direct_map)
3587 3588
		return;

3589
	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3590
		return;
3591

3592
	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3593

3594 3595
	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
		hpa_t root = vcpu->arch.mmu->root_hpa;
3596
		sp = to_shadow_page(root);
3597 3598 3599 3600 3601 3602 3603 3604

		/*
		 * Even if another CPU was marking the SP as unsync-ed
		 * simultaneously, any guest page table changes are not
		 * guaranteed to be visible anyway until this VCPU issues a TLB
		 * flush strictly after those changes are made. We only need to
		 * ensure that the other CPU sets these flags before any actual
		 * changes to the page tables are made. The comments in
3605 3606
		 * mmu_try_to_unsync_pages() describe what could go wrong if
		 * this requirement isn't satisfied.
3607 3608 3609 3610 3611
		 */
		if (!smp_load_acquire(&sp->unsync) &&
		    !smp_load_acquire(&sp->unsync_children))
			return;

3612
		write_lock(&vcpu->kvm->mmu_lock);
3613 3614
		kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);

3615
		mmu_sync_children(vcpu, sp, true);
3616

3617
		kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3618
		write_unlock(&vcpu->kvm->mmu_lock);
3619 3620
		return;
	}
3621

3622
	write_lock(&vcpu->kvm->mmu_lock);
3623 3624
	kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);

3625
	for (i = 0; i < 4; ++i) {
3626
		hpa_t root = vcpu->arch.mmu->pae_root[i];
3627

3628
		if (IS_VALID_PAE_ROOT(root)) {
3629
			root &= PT64_BASE_ADDR_MASK;
3630
			sp = to_shadow_page(root);
3631
			mmu_sync_children(vcpu, sp, true);
3632 3633 3634
		}
	}

3635
	kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3636
	write_unlock(&vcpu->kvm->mmu_lock);
3637 3638
}

3639
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
3640
				  u32 access, struct x86_exception *exception)
A
Avi Kivity 已提交
3641
{
3642 3643
	if (exception)
		exception->error_code = 0;
A
Avi Kivity 已提交
3644 3645 3646
	return vaddr;
}

3647
static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
3648 3649
					 u32 access,
					 struct x86_exception *exception)
3650
{
3651 3652
	if (exception)
		exception->error_code = 0;
3653
	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3654 3655
}

3656
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3657
{
3658 3659 3660 3661 3662 3663 3664
	/*
	 * A nested guest cannot use the MMIO cache if it is using nested
	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
	 */
	if (mmu_is_nested(vcpu))
		return false;

3665 3666 3667 3668 3669 3670
	if (direct)
		return vcpu_match_mmio_gpa(vcpu, addr);

	return vcpu_match_mmio_gva(vcpu, addr);
}

3671 3672 3673
/*
 * Return the level of the lowest level SPTE added to sptes.
 * That SPTE may be non-present.
3674 3675
 *
 * Must be called between walk_shadow_page_lockless_{begin,end}.
3676
 */
3677
static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
3678 3679
{
	struct kvm_shadow_walk_iterator iterator;
3680
	int leaf = -1;
3681
	u64 spte;
3682

3683 3684
	for (shadow_walk_init(&iterator, vcpu, addr),
	     *root_level = iterator.level;
3685 3686
	     shadow_walk_okay(&iterator);
	     __shadow_walk_next(&iterator, spte)) {
3687
		leaf = iterator.level;
3688 3689
		spte = mmu_spte_get_lockless(iterator.sptep);

3690
		sptes[leaf] = spte;
3691 3692 3693 3694 3695
	}

	return leaf;
}

3696
/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
3697 3698
static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
{
3699
	u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
3700
	struct rsvd_bits_validate *rsvd_check;
3701
	int root, leaf, level;
3702 3703
	bool reserved = false;

3704 3705
	walk_shadow_page_lockless_begin(vcpu);

3706
	if (is_tdp_mmu(vcpu->arch.mmu))
3707
		leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
3708
	else
3709
		leaf = get_walk(vcpu, addr, sptes, &root);
3710

3711 3712
	walk_shadow_page_lockless_end(vcpu);

3713 3714 3715 3716 3717
	if (unlikely(leaf < 0)) {
		*sptep = 0ull;
		return reserved;
	}

3718 3719 3720 3721 3722 3723 3724 3725 3726 3727
	*sptep = sptes[leaf];

	/*
	 * Skip reserved bits checks on the terminal leaf if it's not a valid
	 * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
	 * design, always have reserved bits set.  The purpose of the checks is
	 * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
	 */
	if (!is_shadow_present_pte(sptes[leaf]))
		leaf++;
3728 3729 3730

	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;

3731
	for (level = root; level >= leaf; level--)
3732
		reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
3733 3734

	if (reserved) {
3735
		pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
3736
		       __func__, addr);
3737
		for (level = root; level >= leaf; level--)
3738 3739
			pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
			       sptes[level], level,
3740
			       get_rsvd_bits(rsvd_check, sptes[level], level));
3741
	}
3742

3743
	return reserved;
3744 3745
}

P
Paolo Bonzini 已提交
3746
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3747 3748
{
	u64 spte;
3749
	bool reserved;
3750

3751
	if (mmio_info_in_cache(vcpu, addr, direct))
3752
		return RET_PF_EMULATE;
3753

3754
	reserved = get_mmio_spte(vcpu, addr, &spte);
3755
	if (WARN_ON(reserved))
3756
		return -EINVAL;
3757 3758 3759

	if (is_mmio_spte(spte)) {
		gfn_t gfn = get_mmio_spte_gfn(spte);
3760
		unsigned int access = get_mmio_spte_access(spte);
3761

3762
		if (!check_mmio_spte(vcpu, spte))
3763
			return RET_PF_INVALID;
3764

3765 3766
		if (direct)
			addr = 0;
X
Xiao Guangrong 已提交
3767 3768

		trace_handle_mmio_page_fault(addr, gfn, access);
3769
		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3770
		return RET_PF_EMULATE;
3771 3772 3773 3774 3775 3776
	}

	/*
	 * If the page table is zapped by other cpus, let CPU fault again on
	 * the address.
	 */
3777
	return RET_PF_RETRY;
3778 3779
}

3780
static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
3781
					 struct kvm_page_fault *fault)
3782
{
3783
	if (unlikely(fault->rsvd))
3784 3785
		return false;

3786
	if (!fault->present || !fault->write)
3787 3788 3789 3790 3791 3792
		return false;

	/*
	 * guest is writing the page which is write tracked which can
	 * not be fixed by page fault handler.
	 */
3793
	if (kvm_slot_page_track_is_active(fault->slot, fault->gfn, KVM_PAGE_TRACK_WRITE))
3794 3795 3796 3797 3798
		return true;

	return false;
}

3799 3800 3801 3802 3803 3804
static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
{
	struct kvm_shadow_walk_iterator iterator;
	u64 spte;

	walk_shadow_page_lockless_begin(vcpu);
3805
	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
3806 3807 3808 3809
		clear_sp_write_flooding_count(iterator.sptep);
	walk_shadow_page_lockless_end(vcpu);
}

3810 3811
static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
				    gfn_t gfn)
3812 3813
{
	struct kvm_arch_async_pf arch;
X
Xiao Guangrong 已提交
3814

3815
	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3816
	arch.gfn = gfn;
3817
	arch.direct_map = vcpu->arch.mmu->direct_map;
3818
	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
3819

3820 3821
	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3822 3823
}

3824
static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, int *r)
3825
{
3826
	struct kvm_memory_slot *slot = fault->slot;
3827 3828
	bool async;

3829 3830 3831 3832 3833 3834
	/*
	 * Retry the page fault if the gfn hit a memslot that is being deleted
	 * or moved.  This ensures any existing SPTEs for the old memslot will
	 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
	 */
	if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
3835
		goto out_retry;
3836

3837 3838 3839
	if (!kvm_is_visible_memslot(slot)) {
		/* Don't expose private memslots to L2. */
		if (is_guest_mode(vcpu)) {
3840
			fault->slot = NULL;
3841 3842
			fault->pfn = KVM_PFN_NOSLOT;
			fault->map_writable = false;
3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855
			return false;
		}
		/*
		 * If the APIC access page exists but is disabled, go directly
		 * to emulation without caching the MMIO access or creating a
		 * MMIO SPTE.  That way the cache doesn't need to be purged
		 * when the AVIC is re-enabled.
		 */
		if (slot && slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT &&
		    !kvm_apicv_activated(vcpu->kvm)) {
			*r = RET_PF_EMULATE;
			return true;
		}
3856 3857
	}

3858
	async = false;
3859 3860 3861
	fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, &async,
					  fault->write, &fault->map_writable,
					  &fault->hva);
3862 3863 3864
	if (!async)
		return false; /* *pfn has correct page already */

3865 3866 3867 3868
	if (!fault->prefault && kvm_can_do_async_pf(vcpu)) {
		trace_kvm_try_async_get_page(fault->addr, fault->gfn);
		if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
			trace_kvm_async_pf_doublefault(fault->addr, fault->gfn);
3869
			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
3870
			goto out_retry;
3871
		} else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn))
3872
			goto out_retry;
3873 3874
	}

3875 3876 3877
	fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, NULL,
					  fault->write, &fault->map_writable,
					  &fault->hva);
3878 3879 3880 3881

out_retry:
	*r = RET_PF_RETRY;
	return true;
3882 3883
}

3884
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
A
Avi Kivity 已提交
3885
{
3886
	bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
A
Avi Kivity 已提交
3887

3888
	unsigned long mmu_seq;
3889
	int r;
3890

3891
	fault->gfn = fault->addr >> PAGE_SHIFT;
3892 3893
	fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn);

3894
	if (page_fault_handle_page_track(vcpu, fault))
3895
		return RET_PF_EMULATE;
3896

3897
	r = fast_page_fault(vcpu, fault);
3898 3899
	if (r != RET_PF_INVALID)
		return r;
3900

3901
	r = mmu_topup_memory_caches(vcpu, false);
3902 3903
	if (r)
		return r;
3904

3905 3906 3907
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
	smp_rmb();

3908
	if (kvm_faultin_pfn(vcpu, fault, &r))
3909
		return r;
3910

3911
	if (handle_abnormal_pfn(vcpu, fault, ACC_ALL, &r))
3912
		return r;
A
Avi Kivity 已提交
3913

3914
	r = RET_PF_RETRY;
3915

3916
	if (is_tdp_mmu_fault)
3917 3918 3919 3920
		read_lock(&vcpu->kvm->mmu_lock);
	else
		write_lock(&vcpu->kvm->mmu_lock);

3921
	if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
3922
		goto out_unlock;
3923 3924
	r = make_mmu_pages_available(vcpu);
	if (r)
3925
		goto out_unlock;
B
Ben Gardon 已提交
3926

3927
	if (is_tdp_mmu_fault)
3928
		r = kvm_tdp_mmu_map(vcpu, fault);
B
Ben Gardon 已提交
3929
	else
3930
		r = __direct_map(vcpu, fault);
3931

3932
out_unlock:
3933
	if (is_tdp_mmu_fault)
3934 3935 3936
		read_unlock(&vcpu->kvm->mmu_lock);
	else
		write_unlock(&vcpu->kvm->mmu_lock);
3937
	kvm_release_pfn_clean(fault->pfn);
3938
	return r;
A
Avi Kivity 已提交
3939 3940
}

3941 3942
static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
				struct kvm_page_fault *fault)
3943
{
3944
	pgprintk("%s: gva %lx error %x\n", __func__, fault->addr, fault->error_code);
3945 3946

	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
3947 3948
	fault->max_level = PG_LEVEL_2M;
	return direct_page_fault(vcpu, fault);
3949 3950
}

3951
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
3952
				u64 fault_address, char *insn, int insn_len)
3953 3954
{
	int r = 1;
3955
	u32 flags = vcpu->arch.apf.host_apf_flags;
3956

3957 3958 3959 3960 3961 3962
#ifndef CONFIG_X86_64
	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
	if (WARN_ON_ONCE(fault_address >> 32))
		return -EFAULT;
#endif

P
Paolo Bonzini 已提交
3963
	vcpu->arch.l1tf_flush_l1d = true;
3964
	if (!flags) {
3965 3966
		trace_kvm_page_fault(fault_address, error_code);

3967
		if (kvm_event_needs_reinjection(vcpu))
3968 3969 3970
			kvm_mmu_unprotect_page_virt(vcpu, fault_address);
		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
				insn_len);
3971
	} else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
3972
		vcpu->arch.apf.host_apf_flags = 0;
3973
		local_irq_disable();
3974
		kvm_async_pf_task_wait_schedule(fault_address);
3975
		local_irq_enable();
3976 3977
	} else {
		WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags);
3978
	}
3979

3980 3981 3982 3983
	return r;
}
EXPORT_SYMBOL_GPL(kvm_handle_page_fault);

3984
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
3985
{
3986 3987 3988
	while (fault->max_level > PG_LEVEL_4K) {
		int page_num = KVM_PAGES_PER_HPAGE(fault->max_level);
		gfn_t base = (fault->addr >> PAGE_SHIFT) & ~(page_num - 1);
3989

3990 3991
		if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
			break;
3992 3993

		--fault->max_level;
3994
	}
3995

3996
	return direct_page_fault(vcpu, fault);
3997 3998
}

3999
static void nonpaging_init_context(struct kvm_mmu *context)
A
Avi Kivity 已提交
4000 4001 4002
{
	context->page_fault = nonpaging_page_fault;
	context->gva_to_gpa = nonpaging_gva_to_gpa;
4003
	context->sync_page = nonpaging_sync_page;
4004
	context->invlpg = NULL;
4005
	context->direct_map = true;
A
Avi Kivity 已提交
4006 4007
}

4008
static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
4009 4010
				  union kvm_mmu_page_role role)
{
4011
	return (role.direct || pgd == root->pgd) &&
4012 4013
	       VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) &&
	       role.word == to_shadow_page(root->hpa)->role.word;
4014 4015
}

4016
/*
4017
 * Find out if a previously cached root matching the new pgd/role is available.
4018 4019 4020 4021 4022 4023
 * The current root is also inserted into the cache.
 * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
 * returned.
 * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
 * false is returned. This root should now be freed by the caller.
 */
4024
static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4025 4026 4027 4028
				  union kvm_mmu_page_role new_role)
{
	uint i;
	struct kvm_mmu_root_info root;
4029
	struct kvm_mmu *mmu = vcpu->arch.mmu;
4030

4031
	root.pgd = mmu->root_pgd;
4032 4033
	root.hpa = mmu->root_hpa;

4034
	if (is_root_usable(&root, new_pgd, new_role))
4035 4036
		return true;

4037 4038 4039
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		swap(root, mmu->prev_roots[i]);

4040
		if (is_root_usable(&root, new_pgd, new_role))
4041 4042 4043 4044
			break;
	}

	mmu->root_hpa = root.hpa;
4045
	mmu->root_pgd = root.pgd;
4046 4047 4048 4049

	return i < KVM_MMU_NUM_PREV_ROOTS;
}

4050
static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4051
			    union kvm_mmu_page_role new_role)
A
Avi Kivity 已提交
4052
{
4053
	struct kvm_mmu *mmu = vcpu->arch.mmu;
4054 4055 4056 4057 4058 4059 4060

	/*
	 * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
	 * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
	 * later if necessary.
	 */
	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
4061
	    mmu->root_level >= PT64_ROOT_4LEVEL)
4062
		return cached_root_available(vcpu, new_pgd, new_role);
4063 4064

	return false;
A
Avi Kivity 已提交
4065 4066
}

4067
static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
4068
			      union kvm_mmu_page_role new_role)
A
Avi Kivity 已提交
4069
{
4070
	if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082
		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
		return;
	}

	/*
	 * It's possible that the cached previous root page is obsolete because
	 * of a change in the MMU generation number. However, changing the
	 * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will
	 * free the root set here and allocate a new one.
	 */
	kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);

4083
	if (force_flush_and_sync_on_reuse) {
4084 4085
		kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
4086
	}
4087 4088 4089 4090 4091 4092 4093 4094 4095

	/*
	 * The last MMIO access's GVA and GPA are cached in the VCPU. When
	 * switching to a new CR3, that GVA->GPA mapping may no longer be
	 * valid. So clear any cached MMIO info even when we don't need to sync
	 * the shadow page tables.
	 */
	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);

4096 4097 4098 4099 4100 4101 4102
	/*
	 * If this is a direct root page, it doesn't have a write flooding
	 * count. Otherwise, clear the write flooding count.
	 */
	if (!new_role.direct)
		__clear_sp_write_flooding_count(
				to_shadow_page(vcpu->arch.mmu->root_hpa));
A
Avi Kivity 已提交
4103 4104
}

4105
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
4106
{
4107
	__kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu));
4108
}
4109
EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
4110

4111 4112
static unsigned long get_cr3(struct kvm_vcpu *vcpu)
{
4113
	return kvm_read_cr3(vcpu);
4114 4115
}

4116
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4117
			   unsigned int access)
4118 4119 4120 4121 4122 4123 4124
{
	if (unlikely(is_mmio_spte(*sptep))) {
		if (gfn != get_mmio_spte_gfn(*sptep)) {
			mmu_spte_clear_no_track(sptep);
			return true;
		}

4125
		mark_mmio_spte(vcpu, sptep, gfn, access);
4126 4127 4128 4129 4130 4131
		return true;
	}

	return false;
}

4132 4133 4134 4135 4136
#define PTTYPE_EPT 18 /* arbitrary */
#define PTTYPE PTTYPE_EPT
#include "paging_tmpl.h"
#undef PTTYPE

A
Avi Kivity 已提交
4137 4138 4139 4140 4141 4142 4143 4144
#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE

#define PTTYPE 32
#include "paging_tmpl.h"
#undef PTTYPE

4145
static void
4146
__reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
4147
			u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
4148
			bool pse, bool amd)
4149
{
4150
	u64 gbpages_bit_rsvd = 0;
4151
	u64 nonleaf_bit8_rsvd = 0;
4152
	u64 high_bits_rsvd;
4153

4154
	rsvd_check->bad_mt_xwr = 0;
4155

4156
	if (!gbpages)
4157
		gbpages_bit_rsvd = rsvd_bits(7, 7);
4158

4159 4160 4161 4162 4163 4164 4165 4166 4167
	if (level == PT32E_ROOT_LEVEL)
		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 62);
	else
		high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);

	/* Note, NX doesn't exist in PDPTEs, this is handled below. */
	if (!nx)
		high_bits_rsvd |= rsvd_bits(63, 63);

4168 4169 4170 4171
	/*
	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
	 * leaf entries) on AMD CPUs only.
	 */
4172
	if (amd)
4173 4174
		nonleaf_bit8_rsvd = rsvd_bits(8, 8);

4175
	switch (level) {
4176 4177
	case PT32_ROOT_LEVEL:
		/* no rsvd bits for 2 level 4K page table entries */
4178 4179 4180 4181
		rsvd_check->rsvd_bits_mask[0][1] = 0;
		rsvd_check->rsvd_bits_mask[0][0] = 0;
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4182

4183
		if (!pse) {
4184
			rsvd_check->rsvd_bits_mask[1][1] = 0;
4185 4186 4187
			break;
		}

4188 4189
		if (is_cpuid_PSE36())
			/* 36bits PSE 4MB page */
4190
			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4191 4192
		else
			/* 32 bits PSE 4MB page */
4193
			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4194 4195
		break;
	case PT32E_ROOT_LEVEL:
4196 4197 4198 4199 4200 4201 4202 4203
		rsvd_check->rsvd_bits_mask[0][2] = rsvd_bits(63, 63) |
						   high_bits_rsvd |
						   rsvd_bits(5, 8) |
						   rsvd_bits(1, 2);	/* PDPTE */
		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;	/* PDE */
		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;	/* PTE */
		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
						   rsvd_bits(13, 20);	/* large page */
4204 4205
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4206
		break;
4207
	case PT64_ROOT_5LEVEL:
4208 4209 4210
		rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd |
						   nonleaf_bit8_rsvd |
						   rsvd_bits(7, 7);
4211 4212
		rsvd_check->rsvd_bits_mask[1][4] =
			rsvd_check->rsvd_bits_mask[0][4];
4213
		fallthrough;
4214
	case PT64_ROOT_4LEVEL:
4215 4216 4217 4218 4219 4220 4221
		rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd |
						   nonleaf_bit8_rsvd |
						   rsvd_bits(7, 7);
		rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd |
						   gbpages_bit_rsvd;
		rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd;
		rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4222 4223
		rsvd_check->rsvd_bits_mask[1][3] =
			rsvd_check->rsvd_bits_mask[0][3];
4224 4225 4226 4227 4228
		rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd |
						   gbpages_bit_rsvd |
						   rsvd_bits(13, 29);
		rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd |
						   rsvd_bits(13, 20); /* large page */
4229 4230
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4231 4232 4233 4234
		break;
	}
}

4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249
static bool guest_can_use_gbpages(struct kvm_vcpu *vcpu)
{
	/*
	 * If TDP is enabled, let the guest use GBPAGES if they're supported in
	 * hardware.  The hardware page walker doesn't let KVM disable GBPAGES,
	 * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
	 * walk for performance and complexity reasons.  Not to mention KVM
	 * _can't_ solve the problem because GVA->GPA walks aren't visible to
	 * KVM once a TDP translation is installed.  Mimic hardware behavior so
	 * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
	 */
	return tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
			     guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
}

4250 4251 4252
static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
				  struct kvm_mmu *context)
{
4253
	__reset_rsvds_bits_mask(&context->guest_rsvd_check,
4254
				vcpu->arch.reserved_gpa_bits,
4255
				context->root_level, is_efer_nx(context),
4256
				guest_can_use_gbpages(vcpu),
4257
				is_cr4_pse(context),
4258
				guest_cpuid_is_amd_or_hygon(vcpu));
4259 4260
}

4261 4262
static void
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
4263
			    u64 pa_bits_rsvd, bool execonly)
4264
{
4265
	u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
4266
	u64 bad_mt_xwr;
4267

4268 4269 4270 4271 4272
	rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
	rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
	rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6);
	rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6);
	rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
4273 4274

	/* large page */
4275
	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4276
	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
4277 4278
	rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29);
	rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20);
4279
	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4280

4281 4282 4283 4284 4285 4286 4287 4288
	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
	if (!execonly) {
		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4289
	}
4290
	rsvd_check->bad_mt_xwr = bad_mt_xwr;
4291 4292
}

4293 4294 4295 4296
static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
		struct kvm_mmu *context, bool execonly)
{
	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
4297
				    vcpu->arch.reserved_gpa_bits, execonly);
4298 4299
}

4300 4301 4302 4303 4304
static inline u64 reserved_hpa_bits(void)
{
	return rsvd_bits(shadow_phys_bits, 63);
}

4305 4306 4307 4308 4309
/*
 * the page table on host is the shadow page table for the page
 * table in guest or amd nested guest, its mmu features completely
 * follow the features in guest.
 */
4310 4311
static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
					struct kvm_mmu *context)
4312
{
4313 4314 4315 4316 4317 4318 4319 4320
	/*
	 * KVM uses NX when TDP is disabled to handle a variety of scenarios,
	 * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
	 * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
	 * The iTLB multi-hit workaround can be toggled at any time, so assume
	 * NX can be used by any non-nested shadow MMU to avoid having to reset
	 * MMU contexts.  Note, KVM forces EFER.NX=1 when TDP is disabled.
	 */
4321
	bool uses_nx = is_efer_nx(context) || !tdp_enabled;
4322 4323 4324 4325 4326

	/* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
	bool is_amd = true;
	/* KVM doesn't use 2-level page tables for the shadow MMU. */
	bool is_pse = false;
4327 4328
	struct rsvd_bits_validate *shadow_zero_check;
	int i;
4329

4330 4331
	WARN_ON_ONCE(context->shadow_root_level < PT32E_ROOT_LEVEL);

4332
	shadow_zero_check = &context->shadow_zero_check;
4333
	__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4334
				context->shadow_root_level, uses_nx,
4335
				guest_can_use_gbpages(vcpu), is_pse, is_amd);
4336 4337 4338 4339 4340 4341 4342 4343 4344

	if (!shadow_me_mask)
		return;

	for (i = context->shadow_root_level; --i >= 0;) {
		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
	}

4345 4346
}

4347 4348 4349 4350 4351 4352
static inline bool boot_cpu_is_amd(void)
{
	WARN_ON_ONCE(!tdp_enabled);
	return shadow_x_mask == 0;
}

4353 4354 4355 4356 4357 4358 4359 4360
/*
 * the direct page table on host, use as much mmu features as
 * possible, however, kvm currently does not do execution-protection.
 */
static void
reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
				struct kvm_mmu *context)
{
4361 4362 4363 4364 4365
	struct rsvd_bits_validate *shadow_zero_check;
	int i;

	shadow_zero_check = &context->shadow_zero_check;

4366
	if (boot_cpu_is_amd())
4367
		__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
4368
					context->shadow_root_level, false,
4369
					boot_cpu_has(X86_FEATURE_GBPAGES),
4370
					false, true);
4371
	else
4372
		__reset_rsvds_bits_mask_ept(shadow_zero_check,
4373
					    reserved_hpa_bits(), false);
4374

4375 4376 4377 4378 4379 4380 4381
	if (!shadow_me_mask)
		return;

	for (i = context->shadow_root_level; --i >= 0;) {
		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
	}
4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392
}

/*
 * as the comments in reset_shadow_zero_bits_mask() except it
 * is the shadow page table for intel nested guest.
 */
static void
reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
				struct kvm_mmu *context, bool execonly)
{
	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
4393
				    reserved_hpa_bits(), execonly);
4394 4395
}

4396 4397 4398 4399 4400 4401 4402 4403 4404 4405
#define BYTE_MASK(access) \
	((1 & (access) ? 2 : 0) | \
	 (2 & (access) ? 4 : 0) | \
	 (3 & (access) ? 8 : 0) | \
	 (4 & (access) ? 16 : 0) | \
	 (5 & (access) ? 32 : 0) | \
	 (6 & (access) ? 64 : 0) | \
	 (7 & (access) ? 128 : 0))


4406
static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
4407
{
4408 4409 4410 4411 4412 4413
	unsigned byte;

	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
	const u8 u = BYTE_MASK(ACC_USER_MASK);

4414 4415 4416
	bool cr4_smep = is_cr4_smep(mmu);
	bool cr4_smap = is_cr4_smap(mmu);
	bool cr0_wp = is_cr0_wp(mmu);
4417
	bool efer_nx = is_efer_nx(mmu);
4418 4419

	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4420 4421
		unsigned pfec = byte << 1;

F
Feng Wu 已提交
4422
		/*
4423 4424
		 * Each "*f" variable has a 1 bit for each UWX value
		 * that causes a fault with the given PFEC.
F
Feng Wu 已提交
4425
		 */
4426

4427
		/* Faults from writes to non-writable pages */
4428
		u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4429
		/* Faults from user mode accesses to supervisor pages */
4430
		u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4431
		/* Faults from fetches of non-executable pages*/
4432
		u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4433 4434 4435 4436 4437 4438 4439 4440 4441 4442
		/* Faults from kernel mode fetches of user pages */
		u8 smepf = 0;
		/* Faults from kernel mode accesses of user pages */
		u8 smapf = 0;

		if (!ept) {
			/* Faults from kernel mode accesses to user pages */
			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;

			/* Not really needed: !nx will cause pte.nx to fault */
4443
			if (!efer_nx)
4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457
				ff = 0;

			/* Allow supervisor writes if !cr0.wp */
			if (!cr0_wp)
				wf = (pfec & PFERR_USER_MASK) ? wf : 0;

			/* Disallow supervisor fetches of user code if cr4.smep */
			if (cr4_smep)
				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;

			/*
			 * SMAP:kernel-mode data accesses from user-mode
			 * mappings should fault. A fault is considered
			 * as a SMAP violation if all of the following
P
Peng Hao 已提交
4458
			 * conditions are true:
4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471
			 *   - X86_CR4_SMAP is set in CR4
			 *   - A user page is accessed
			 *   - The access is not a fetch
			 *   - Page fault in kernel mode
			 *   - if CPL = 3 or X86_EFLAGS_AC is clear
			 *
			 * Here, we cover the first three conditions.
			 * The fourth is computed dynamically in permission_fault();
			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
			 * *not* subject to SMAP restrictions.
			 */
			if (cr4_smap)
				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4472
		}
4473 4474

		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4475 4476 4477
	}
}

4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501
/*
* PKU is an additional mechanism by which the paging controls access to
* user-mode addresses based on the value in the PKRU register.  Protection
* key violations are reported through a bit in the page fault error code.
* Unlike other bits of the error code, the PK bit is not known at the
* call site of e.g. gva_to_gpa; it must be computed directly in
* permission_fault based on two bits of PKRU, on some machine state (CR4,
* CR0, EFER, CPL), and on other bits of the error code and the page tables.
*
* In particular the following conditions come from the error code, the
* page tables and the machine state:
* - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
* - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
* - PK is always zero if U=0 in the page tables
* - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
*
* The PKRU bitmask caches the result of these four conditions.  The error
* code (minus the P bit) and the page table's U bit form an index into the
* PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
* with the two bits of the PKRU register corresponding to the protection key.
* For the first three conditions above the bits will be 00, thus masking
* away both AD and WD.  For all reads or if the last condition holds, WD
* only will be masked away.
*/
4502
static void update_pkru_bitmask(struct kvm_mmu *mmu)
4503 4504 4505 4506
{
	unsigned bit;
	bool wp;

4507
	if (!is_cr4_pke(mmu)) {
4508 4509 4510 4511
		mmu->pkru_mask = 0;
		return;
	}

4512
	wp = is_cr0_wp(mmu);
4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545

	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
		unsigned pfec, pkey_bits;
		bool check_pkey, check_write, ff, uf, wf, pte_user;

		pfec = bit << 1;
		ff = pfec & PFERR_FETCH_MASK;
		uf = pfec & PFERR_USER_MASK;
		wf = pfec & PFERR_WRITE_MASK;

		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
		pte_user = pfec & PFERR_RSVD_MASK;

		/*
		 * Only need to check the access which is not an
		 * instruction fetch and is to a user page.
		 */
		check_pkey = (!ff && pte_user);
		/*
		 * write access is controlled by PKRU if it is a
		 * user access or CR0.WP = 1.
		 */
		check_write = check_pkey && wf && (uf || wp);

		/* PKRU.AD stops both read and write access. */
		pkey_bits = !!check_pkey;
		/* PKRU.WD stops write access. */
		pkey_bits |= (!!check_write) << 1;

		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
	}
}

4546 4547
static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
					struct kvm_mmu *mmu)
A
Avi Kivity 已提交
4548
{
4549 4550
	if (!is_cr0_pg(mmu))
		return;
4551

4552 4553 4554
	reset_rsvds_bits_mask(vcpu, mmu);
	update_permission_bitmask(mmu, false);
	update_pkru_bitmask(mmu);
A
Avi Kivity 已提交
4555 4556
}

4557
static void paging64_init_context(struct kvm_mmu *context)
A
Avi Kivity 已提交
4558 4559 4560
{
	context->page_fault = paging64_page_fault;
	context->gva_to_gpa = paging64_gva_to_gpa;
4561
	context->sync_page = paging64_sync_page;
M
Marcelo Tosatti 已提交
4562
	context->invlpg = paging64_invlpg;
4563
	context->direct_map = false;
A
Avi Kivity 已提交
4564 4565
}

4566
static void paging32_init_context(struct kvm_mmu *context)
A
Avi Kivity 已提交
4567 4568 4569
{
	context->page_fault = paging32_page_fault;
	context->gva_to_gpa = paging32_gva_to_gpa;
4570
	context->sync_page = paging32_sync_page;
M
Marcelo Tosatti 已提交
4571
	context->invlpg = paging32_invlpg;
4572
	context->direct_map = false;
A
Avi Kivity 已提交
4573 4574
}

4575 4576
static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
							 struct kvm_mmu_role_regs *regs)
4577 4578 4579
{
	union kvm_mmu_extended_role ext = {0};

4580 4581 4582 4583 4584 4585
	if (____is_cr0_pg(regs)) {
		ext.cr0_pg = 1;
		ext.cr4_pae = ____is_cr4_pae(regs);
		ext.cr4_smep = ____is_cr4_smep(regs);
		ext.cr4_smap = ____is_cr4_smap(regs);
		ext.cr4_pse = ____is_cr4_pse(regs);
4586 4587 4588 4589

		/* PKEY and LA57 are active iff long mode is active. */
		ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
		ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
4590
	}
4591 4592 4593 4594 4595 4596

	ext.valid = 1;

	return ext;
}

4597
static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
4598
						   struct kvm_mmu_role_regs *regs,
4599 4600 4601 4602 4603
						   bool base_only)
{
	union kvm_mmu_role role = {0};

	role.base.access = ACC_ALL;
4604 4605 4606 4607
	if (____is_cr0_pg(regs)) {
		role.base.efer_nx = ____is_efer_nx(regs);
		role.base.cr0_wp = ____is_cr0_wp(regs);
	}
4608 4609 4610 4611 4612 4613
	role.base.smm = is_smm(vcpu);
	role.base.guest_mode = is_guest_mode(vcpu);

	if (base_only)
		return role;

4614
	role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
4615 4616 4617 4618

	return role;
}

4619 4620
static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
{
4621 4622 4623 4624
	/* tdp_root_level is architecture forced level, use it if nonzero */
	if (tdp_root_level)
		return tdp_root_level;

4625
	/* Use 5-level TDP if and only if it's useful/necessary. */
4626
	if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
4627 4628
		return 4;

4629
	return max_tdp_level;
4630 4631
}

4632
static union kvm_mmu_role
4633 4634
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
				struct kvm_mmu_role_regs *regs, bool base_only)
4635
{
4636
	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4637

4638
	role.base.ad_disabled = (shadow_accessed_mask == 0);
4639
	role.base.level = kvm_mmu_get_tdp_level(vcpu);
4640
	role.base.direct = true;
4641
	role.base.gpte_is_8_bytes = true;
4642 4643 4644 4645

	return role;
}

4646
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4647
{
4648
	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4649
	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4650
	union kvm_mmu_role new_role =
4651
		kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, false);
4652

4653 4654 4655 4656
	if (new_role.as_u64 == context->mmu_role.as_u64)
		return;

	context->mmu_role.as_u64 = new_role.as_u64;
4657
	context->page_fault = kvm_tdp_page_fault;
4658
	context->sync_page = nonpaging_sync_page;
4659
	context->invlpg = NULL;
4660
	context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
4661
	context->direct_map = true;
4662
	context->get_guest_pgd = get_cr3;
4663
	context->get_pdptr = kvm_pdptr_read;
4664
	context->inject_page_fault = kvm_inject_page_fault;
4665
	context->root_level = role_regs_to_root_level(&regs);
4666

4667
	if (!is_cr0_pg(context))
4668
		context->gva_to_gpa = nonpaging_gva_to_gpa;
4669
	else if (is_cr4_pae(context))
4670
		context->gva_to_gpa = paging64_gva_to_gpa;
4671
	else
4672
		context->gva_to_gpa = paging32_gva_to_gpa;
4673

4674
	reset_guest_paging_metadata(vcpu, context);
4675
	reset_tdp_shadow_zero_bits_mask(vcpu, context);
4676 4677
}

4678
static union kvm_mmu_role
4679 4680
kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
				      struct kvm_mmu_role_regs *regs, bool base_only)
4681
{
4682
	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
4683

4684 4685
	role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
	role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
4686
	role.base.gpte_is_8_bytes = ____is_cr0_pg(regs) && ____is_cr4_pae(regs);
4687

4688 4689 4690 4691
	return role;
}

static union kvm_mmu_role
4692 4693
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
				   struct kvm_mmu_role_regs *regs, bool base_only)
4694 4695
{
	union kvm_mmu_role role =
4696
		kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
4697

4698
	role.base.direct = !____is_cr0_pg(regs);
4699

4700
	if (!____is_efer_lma(regs))
4701
		role.base.level = PT32E_ROOT_LEVEL;
4702
	else if (____is_cr4_la57(regs))
4703
		role.base.level = PT64_ROOT_5LEVEL;
4704
	else
4705
		role.base.level = PT64_ROOT_4LEVEL;
4706 4707 4708 4709

	return role;
}

4710
static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
4711
				    struct kvm_mmu_role_regs *regs,
4712
				    union kvm_mmu_role new_role)
4713
{
4714 4715
	if (new_role.as_u64 == context->mmu_role.as_u64)
		return;
4716

4717
	context->mmu_role.as_u64 = new_role.as_u64;
4718

4719
	if (!is_cr0_pg(context))
4720
		nonpaging_init_context(context);
4721
	else if (is_cr4_pae(context))
4722
		paging64_init_context(context);
A
Avi Kivity 已提交
4723
	else
4724
		paging32_init_context(context);
4725
	context->root_level = role_regs_to_root_level(regs);
4726

4727
	reset_guest_paging_metadata(vcpu, context);
4728 4729
	context->shadow_root_level = new_role.base.level;

4730
	reset_shadow_zero_bits_mask(vcpu, context);
4731
}
4732

4733 4734
static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
				struct kvm_mmu_role_regs *regs)
4735
{
4736
	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4737
	union kvm_mmu_role new_role =
4738
		kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
4739

4740
	shadow_mmu_init_context(vcpu, context, regs, new_role);
4741 4742
}

4743
static union kvm_mmu_role
4744 4745
kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
				   struct kvm_mmu_role_regs *regs)
4746 4747
{
	union kvm_mmu_role role =
4748
		kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
4749 4750

	role.base.direct = false;
4751
	role.base.level = kvm_mmu_get_tdp_level(vcpu);
4752 4753 4754 4755

	return role;
}

4756 4757
void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
			     unsigned long cr4, u64 efer, gpa_t nested_cr3)
4758
{
4759
	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4760 4761 4762 4763 4764
	struct kvm_mmu_role_regs regs = {
		.cr0 = cr0,
		.cr4 = cr4,
		.efer = efer,
	};
4765
	union kvm_mmu_role new_role;
4766

4767
	new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
4768

4769
	__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
4770

4771
	shadow_mmu_init_context(vcpu, context, &regs, new_role);
4772 4773
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
4774

4775 4776
static union kvm_mmu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
4777
				   bool execonly, u8 level)
4778
{
4779
	union kvm_mmu_role role = {0};
4780

4781 4782
	/* SMM flag is inherited from root_mmu */
	role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
4783

4784
	role.base.level = level;
4785
	role.base.gpte_is_8_bytes = true;
4786 4787 4788 4789
	role.base.direct = false;
	role.base.ad_disabled = !accessed_dirty;
	role.base.guest_mode = true;
	role.base.access = ACC_ALL;
4790

4791 4792
	/* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
	role.ext.word = 0;
4793
	role.ext.execonly = execonly;
4794
	role.ext.valid = 1;
4795 4796 4797 4798

	return role;
}

4799
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4800
			     bool accessed_dirty, gpa_t new_eptp)
N
Nadav Har'El 已提交
4801
{
4802
	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4803
	u8 level = vmx_eptp_page_walk_level(new_eptp);
4804 4805
	union kvm_mmu_role new_role =
		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
4806
						   execonly, level);
4807

4808
	__kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base);
4809 4810 4811

	if (new_role.as_u64 == context->mmu_role.as_u64)
		return;
4812

4813 4814
	context->mmu_role.as_u64 = new_role.as_u64;

4815
	context->shadow_root_level = level;
N
Nadav Har'El 已提交
4816

4817
	context->ept_ad = accessed_dirty;
N
Nadav Har'El 已提交
4818 4819 4820 4821
	context->page_fault = ept_page_fault;
	context->gva_to_gpa = ept_gva_to_gpa;
	context->sync_page = ept_sync_page;
	context->invlpg = ept_invlpg;
4822
	context->root_level = level;
N
Nadav Har'El 已提交
4823
	context->direct_map = false;
4824

4825
	update_permission_bitmask(context, true);
4826
	update_pkru_bitmask(context);
N
Nadav Har'El 已提交
4827
	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4828
	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
N
Nadav Har'El 已提交
4829 4830 4831
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);

4832
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
4833
{
4834
	struct kvm_mmu *context = &vcpu->arch.root_mmu;
4835
	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4836

4837
	kvm_init_shadow_mmu(vcpu, &regs);
4838

4839
	context->get_guest_pgd     = get_cr3;
4840 4841
	context->get_pdptr         = kvm_pdptr_read;
	context->inject_page_fault = kvm_inject_page_fault;
A
Avi Kivity 已提交
4842 4843
}

4844 4845
static union kvm_mmu_role
kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs)
4846
{
4847 4848 4849
	union kvm_mmu_role role;

	role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
4850 4851 4852 4853 4854 4855 4856

	/*
	 * Nested MMUs are used only for walking L2's gva->gpa, they never have
	 * shadow pages of their own and so "direct" has no meaning.   Set it
	 * to "true" to try to detect bogus usage of the nested MMU.
	 */
	role.base.direct = true;
4857
	role.base.level = role_regs_to_root_level(regs);
4858 4859 4860
	return role;
}

4861
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
4862
{
4863 4864
	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
	union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, &regs);
4865 4866
	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;

4867 4868 4869 4870
	if (new_role.as_u64 == g_context->mmu_role.as_u64)
		return;

	g_context->mmu_role.as_u64 = new_role.as_u64;
4871
	g_context->get_guest_pgd     = get_cr3;
4872
	g_context->get_pdptr         = kvm_pdptr_read;
4873
	g_context->inject_page_fault = kvm_inject_page_fault;
4874
	g_context->root_level        = new_role.base.level;
4875

4876 4877 4878 4879 4880 4881
	/*
	 * L2 page tables are never shadowed, so there is no need to sync
	 * SPTEs.
	 */
	g_context->invlpg            = NULL;

4882
	/*
4883
	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
4884 4885 4886 4887 4888
	 * L1's nested page tables (e.g. EPT12). The nested translation
	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
	 * L2's page tables as the first level of translation and L1's
	 * nested page tables as the second level of translation. Basically
	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
4889
	 */
4890
	if (!is_paging(vcpu))
4891
		g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
4892
	else if (is_long_mode(vcpu))
4893
		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4894
	else if (is_pae(vcpu))
4895
		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4896
	else
4897 4898
		g_context->gva_to_gpa = paging32_gva_to_gpa_nested;

4899
	reset_guest_paging_metadata(vcpu, g_context);
4900 4901
}

4902
void kvm_init_mmu(struct kvm_vcpu *vcpu)
4903
{
4904
	if (mmu_is_nested(vcpu))
4905
		init_kvm_nested_mmu(vcpu);
4906
	else if (tdp_enabled)
4907
		init_kvm_tdp_mmu(vcpu);
4908
	else
4909
		init_kvm_softmmu(vcpu);
4910
}
4911
EXPORT_SYMBOL_GPL(kvm_init_mmu);
4912

4913 4914 4915
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
{
4916
	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
4917 4918
	union kvm_mmu_role role;

4919
	if (tdp_enabled)
4920
		role = kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, true);
4921
	else
4922
		role = kvm_calc_shadow_mmu_root_page_role(vcpu, &regs, true);
4923 4924

	return role.base;
4925
}
4926

4927 4928 4929 4930 4931 4932 4933 4934 4935 4936
void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
	/*
	 * Invalidate all MMU roles to force them to reinitialize as CPUID
	 * information is factored into reserved bit calculations.
	 */
	vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
	vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
	vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
	kvm_mmu_reset_context(vcpu);
4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956

	/*
	 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
	 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
	 * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
	 * faults due to reusing SPs/SPTEs.  Alert userspace, but otherwise
	 * sweep the problem under the rug.
	 *
	 * KVM's horrific CPUID ABI makes the problem all but impossible to
	 * solve, as correctly handling multiple vCPU models (with respect to
	 * paging and physical address properties) in a single VM would require
	 * tracking all relevant CPUID information in kvm_mmu_page_role.  That
	 * is very undesirable as it would double the memory requirements for
	 * gfn_track (see struct kvm_mmu_page_role comments), and in practice
	 * no sane VMM mucks with the core vCPU model on the fly.
	 */
	if (vcpu->arch.last_vmentry_cpu != -1) {
		pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
		pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
	}
4957 4958
}

4959
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
4960
{
4961
	kvm_mmu_unload(vcpu);
4962
	kvm_init_mmu(vcpu);
A
Avi Kivity 已提交
4963
}
4964
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
A
Avi Kivity 已提交
4965 4966

int kvm_mmu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
4967
{
4968 4969
	int r;

4970
	r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
A
Avi Kivity 已提交
4971 4972
	if (r)
		goto out;
4973
	r = mmu_alloc_special_roots(vcpu);
A
Avi Kivity 已提交
4974 4975
	if (r)
		goto out;
4976
	if (vcpu->arch.mmu->direct_map)
4977 4978 4979
		r = mmu_alloc_direct_roots(vcpu);
	else
		r = mmu_alloc_shadow_roots(vcpu);
4980 4981
	if (r)
		goto out;
4982 4983 4984

	kvm_mmu_sync_roots(vcpu);

4985
	kvm_mmu_load_pgd(vcpu);
4986
	static_call(kvm_x86_tlb_flush_current)(vcpu);
4987 4988
out:
	return r;
A
Avi Kivity 已提交
4989
}
A
Avi Kivity 已提交
4990 4991 4992

void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
4993 4994 4995 4996
	kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
	WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
	WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
A
Avi Kivity 已提交
4997
}
A
Avi Kivity 已提交
4998

4999 5000 5001 5002 5003 5004 5005 5006
static bool need_remote_flush(u64 old, u64 new)
{
	if (!is_shadow_present_pte(old))
		return false;
	if (!is_shadow_present_pte(new))
		return true;
	if ((old ^ new) & PT64_BASE_ADDR_MASK)
		return true;
5007 5008
	old ^= shadow_nx_mask;
	new ^= shadow_nx_mask;
5009 5010 5011
	return (old & ~new & PT64_PERM_MASK) != 0;
}

5012
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5013
				    int *bytes)
5014
{
5015
	u64 gentry = 0;
5016
	int r;
5017 5018 5019

	/*
	 * Assume that the pte write on a page table of the same type
5020 5021
	 * as the current vcpu paging mode since we update the sptes only
	 * when they have the same mode.
5022
	 */
5023
	if (is_pae(vcpu) && *bytes == 4) {
5024
		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5025 5026
		*gpa &= ~(gpa_t)7;
		*bytes = 8;
5027 5028
	}

5029 5030 5031 5032
	if (*bytes == 4 || *bytes == 8) {
		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
		if (r)
			gentry = 0;
5033 5034
	}

5035 5036 5037 5038 5039 5040 5041
	return gentry;
}

/*
 * If we're seeing too many writes to a page, it may no longer be a page table,
 * or we may be forking, in which case it is better to unmap the page.
 */
5042
static bool detect_write_flooding(struct kvm_mmu_page *sp)
5043
{
5044 5045 5046 5047
	/*
	 * Skip write-flooding detected for the sp whose level is 1, because
	 * it can become unsync, then the guest page is not write-protected.
	 */
5048
	if (sp->role.level == PG_LEVEL_4K)
5049
		return false;
5050

5051 5052
	atomic_inc(&sp->write_flooding_count);
	return atomic_read(&sp->write_flooding_count) >= 3;
5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067
}

/*
 * Misaligned accesses are too much trouble to fix up; also, they usually
 * indicate a page is not used as a page table.
 */
static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
				    int bytes)
{
	unsigned offset, pte_size, misaligned;

	pgprintk("misaligned: gpa %llx bytes %d role %x\n",
		 gpa, bytes, sp->role.word);

	offset = offset_in_page(gpa);
5068
	pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
5069 5070 5071 5072 5073 5074 5075 5076

	/*
	 * Sometimes, the OS only writes the last one bytes to update status
	 * bits, for example, in linux, andb instruction is used in clear_bit().
	 */
	if (!(offset & (pte_size - 1)) && bytes == 1)
		return false;

5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091
	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
	misaligned |= bytes < 4;

	return misaligned;
}

static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
{
	unsigned page_offset, quadrant;
	u64 *spte;
	int level;

	page_offset = offset_in_page(gpa);
	level = sp->role.level;
	*nspte = 1;
5092
	if (!sp->role.gpte_is_8_bytes) {
5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113
		page_offset <<= 1;	/* 32->64 */
		/*
		 * A 32-bit pde maps 4MB while the shadow pdes map
		 * only 2MB.  So we need to double the offset again
		 * and zap two pdes instead of one.
		 */
		if (level == PT32_ROOT_LEVEL) {
			page_offset &= ~7; /* kill rounding error */
			page_offset <<= 1;
			*nspte = 2;
		}
		quadrant = page_offset >> PAGE_SHIFT;
		page_offset &= ~PAGE_MASK;
		if (quadrant != sp->role.quadrant)
			return NULL;
	}

	spte = &sp->spt[page_offset / sizeof(*spte)];
	return spte;
}

5114
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5115 5116
			      const u8 *new, int bytes,
			      struct kvm_page_track_notifier_node *node)
5117 5118 5119 5120 5121 5122
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	struct kvm_mmu_page *sp;
	LIST_HEAD(invalid_list);
	u64 entry, gentry, *spte;
	int npte;
5123
	bool flush = false;
5124 5125 5126 5127 5128

	/*
	 * If we don't have indirect shadow pages, it means no page is
	 * write-protected, so we can exit simply.
	 */
5129
	if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5130 5131 5132 5133 5134 5135
		return;

	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);

	/*
	 * No need to care whether allocation memory is successful
I
Ingo Molnar 已提交
5136
	 * or not since pte prefetch is skipped if it does not have
5137 5138
	 * enough objects in the cache.
	 */
5139
	mmu_topup_memory_caches(vcpu, true);
5140

5141
	write_lock(&vcpu->kvm->mmu_lock);
5142 5143 5144

	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);

5145
	++vcpu->kvm->stat.mmu_pte_write;
5146
	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5147

5148
	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5149
		if (detect_write_misaligned(sp, gpa, bytes) ||
5150
		      detect_write_flooding(sp)) {
5151
			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
A
Avi Kivity 已提交
5152
			++vcpu->kvm->stat.mmu_flooded;
5153 5154
			continue;
		}
5155 5156 5157 5158 5159

		spte = get_written_sptes(sp, gpa, &npte);
		if (!spte)
			continue;

5160
		while (npte--) {
5161
			entry = *spte;
5162
			mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5163 5164
			if (gentry && sp->role.level != PG_LEVEL_4K)
				++vcpu->kvm->stat.mmu_pde_zapped;
G
Gleb Natapov 已提交
5165
			if (need_remote_flush(entry, *spte))
5166
				flush = true;
5167
			++spte;
5168 5169
		}
	}
5170
	kvm_mmu_remote_flush_or_zap(vcpu->kvm, &invalid_list, flush);
5171
	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
5172
	write_unlock(&vcpu->kvm->mmu_lock);
5173 5174
}

5175
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5176
		       void *insn, int insn_len)
5177
{
5178
	int r, emulation_type = EMULTYPE_PF;
5179
	bool direct = vcpu->arch.mmu->direct_map;
5180

5181
	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
5182 5183
		return RET_PF_RETRY;

5184
	r = RET_PF_INVALID;
5185
	if (unlikely(error_code & PFERR_RSVD_MASK)) {
5186
		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5187
		if (r == RET_PF_EMULATE)
5188 5189
			goto emulate;
	}
5190

5191
	if (r == RET_PF_INVALID) {
5192 5193
		r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
					  lower_32_bits(error_code), false);
5194
		if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm))
5195
			return -EIO;
5196 5197
	}

5198
	if (r < 0)
5199
		return r;
5200 5201
	if (r != RET_PF_EMULATE)
		return 1;
5202

5203 5204 5205 5206 5207 5208 5209
	/*
	 * Before emulating the instruction, check if the error code
	 * was due to a RO violation while translating the guest page.
	 * This can occur when using nested virtualization with nested
	 * paging in both guests. If true, we simply unprotect the page
	 * and resume the guest.
	 */
5210
	if (vcpu->arch.mmu->direct_map &&
5211
	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5212
		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5213 5214 5215
		return 1;
	}

5216 5217 5218 5219 5220 5221
	/*
	 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
	 * optimistically try to just unprotect the page and let the processor
	 * re-execute the instruction that caused the page fault.  Do not allow
	 * retrying MMIO emulation, as it's not only pointless but could also
	 * cause us to enter an infinite loop because the processor will keep
5222 5223 5224 5225
	 * faulting on the non-existent MMIO address.  Retrying an instruction
	 * from a nested guest is also pointless and dangerous as we are only
	 * explicitly shadowing L1's page tables, i.e. unprotecting something
	 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5226
	 */
5227
	if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
5228
		emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5229
emulate:
5230
	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5231
				       insn_len);
5232 5233 5234
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);

5235 5236
void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			    gva_t gva, hpa_t root_hpa)
M
Marcelo Tosatti 已提交
5237
{
5238
	int i;
5239

5240 5241 5242 5243 5244 5245
	/* It's actually a GPA for vcpu->arch.guest_mmu.  */
	if (mmu != &vcpu->arch.guest_mmu) {
		/* INVLPG on a non-canonical address is a NOP according to the SDM.  */
		if (is_noncanonical_address(gva, vcpu))
			return;

5246
		static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5247 5248 5249
	}

	if (!mmu->invlpg)
5250 5251
		return;

5252 5253
	if (root_hpa == INVALID_PAGE) {
		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5254

5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272
		/*
		 * INVLPG is required to invalidate any global mappings for the VA,
		 * irrespective of PCID. Since it would take us roughly similar amount
		 * of work to determine whether any of the prev_root mappings of the VA
		 * is marked global, or to just sync it blindly, so we might as well
		 * just always sync it.
		 *
		 * Mappings not reachable via the current cr3 or the prev_roots will be
		 * synced when switching to that cr3, so nothing needs to be done here
		 * for them.
		 */
		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
			if (VALID_PAGE(mmu->prev_roots[i].hpa))
				mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
	} else {
		mmu->invlpg(vcpu, gva, root_hpa);
	}
}
5273

5274 5275 5276
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
	kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
M
Marcelo Tosatti 已提交
5277 5278 5279 5280
	++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);

5281

5282 5283
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
{
5284
	struct kvm_mmu *mmu = vcpu->arch.mmu;
5285
	bool tlb_flush = false;
5286
	uint i;
5287 5288

	if (pcid == kvm_get_active_pcid(vcpu)) {
5289
		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5290
		tlb_flush = true;
5291 5292
	}

5293 5294
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
5295
		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
5296 5297 5298
			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
			tlb_flush = true;
		}
5299
	}
5300

5301
	if (tlb_flush)
5302
		static_call(kvm_x86_tlb_flush_gva)(vcpu, gva);
5303

5304 5305 5306
	++vcpu->stat.invlpg;

	/*
5307 5308 5309
	 * Mappings not reachable via the current cr3 or the prev_roots will be
	 * synced when switching to that cr3, so nothing needs to be done here
	 * for them.
5310 5311 5312
	 */
}

5313 5314
void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
		       int tdp_max_root_level, int tdp_huge_page_level)
5315
{
5316
	tdp_enabled = enable_tdp;
5317
	tdp_root_level = tdp_forced_root_level;
5318
	max_tdp_level = tdp_max_root_level;
5319 5320

	/*
5321
	 * max_huge_page_level reflects KVM's MMU capabilities irrespective
5322 5323 5324 5325 5326 5327
	 * of kernel support, e.g. KVM may be capable of using 1GB pages when
	 * the kernel is not.  But, KVM never creates a page size greater than
	 * what is used by the kernel for any given HVA, i.e. the kernel's
	 * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
	 */
	if (tdp_enabled)
5328
		max_huge_page_level = tdp_huge_page_level;
5329
	else if (boot_cpu_has(X86_FEATURE_GBPAGES))
5330
		max_huge_page_level = PG_LEVEL_1G;
5331
	else
5332
		max_huge_page_level = PG_LEVEL_2M;
5333
}
5334
EXPORT_SYMBOL_GPL(kvm_configure_mmu);
5335 5336

/* The return value indicates if tlb flush on all vcpus is needed. */
5337 5338 5339
typedef bool (*slot_level_handler) (struct kvm *kvm,
				    struct kvm_rmap_head *rmap_head,
				    const struct kvm_memory_slot *slot);
5340 5341 5342

/* The caller should hold mmu-lock before calling this function. */
static __always_inline bool
5343
slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5344
			slot_level_handler fn, int start_level, int end_level,
5345 5346
			gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield,
			bool flush)
5347 5348 5349 5350 5351 5352
{
	struct slot_rmap_walk_iterator iterator;

	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
			end_gfn, &iterator) {
		if (iterator.rmap)
5353
			flush |= fn(kvm, iterator.rmap, memslot);
5354

5355
		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
5356
			if (flush && flush_on_yield) {
5357 5358 5359
				kvm_flush_remote_tlbs_with_address(kvm,
						start_gfn,
						iterator.gfn - start_gfn + 1);
5360 5361
				flush = false;
			}
5362
			cond_resched_rwlock_write(&kvm->mmu_lock);
5363 5364 5365 5366 5367 5368 5369
		}
	}

	return flush;
}

static __always_inline bool
5370
slot_handle_level(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5371
		  slot_level_handler fn, int start_level, int end_level,
5372
		  bool flush_on_yield)
5373 5374 5375 5376
{
	return slot_handle_level_range(kvm, memslot, fn, start_level,
			end_level, memslot->base_gfn,
			memslot->base_gfn + memslot->npages - 1,
5377
			flush_on_yield, false);
5378 5379 5380
}

static __always_inline bool
5381
slot_handle_leaf(struct kvm *kvm, const struct kvm_memory_slot *memslot,
5382
		 slot_level_handler fn, bool flush_on_yield)
5383
{
5384
	return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5385
				 PG_LEVEL_4K, flush_on_yield);
5386 5387
}

5388
static void free_mmu_pages(struct kvm_mmu *mmu)
A
Avi Kivity 已提交
5389
{
5390 5391
	if (!tdp_enabled && mmu->pae_root)
		set_memory_encrypted((unsigned long)mmu->pae_root, 1);
5392
	free_page((unsigned long)mmu->pae_root);
5393
	free_page((unsigned long)mmu->pml4_root);
5394
	free_page((unsigned long)mmu->pml5_root);
A
Avi Kivity 已提交
5395 5396
}

5397
static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
A
Avi Kivity 已提交
5398
{
5399
	struct page *page;
A
Avi Kivity 已提交
5400 5401
	int i;

5402 5403 5404 5405 5406 5407
	mmu->root_hpa = INVALID_PAGE;
	mmu->root_pgd = 0;
	mmu->translate_gpa = translate_gpa;
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;

5408
	/*
5409 5410 5411 5412
	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
	 * while the PDP table is a per-vCPU construct that's allocated at MMU
	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
	 * x86_64.  Therefore we need to allocate the PDP table in the first
5413 5414 5415 5416 5417
	 * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
	 * generally doesn't use PAE paging and can skip allocating the PDP
	 * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
	 * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
	 * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
5418
	 */
5419
	if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
5420 5421
		return 0;

5422
	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5423
	if (!page)
5424 5425
		return -ENOMEM;

5426
	mmu->pae_root = page_address(page);
5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440

	/*
	 * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
	 * get the CPU to treat the PDPTEs as encrypted.  Decrypt the page so
	 * that KVM's writes and the CPU's reads get along.  Note, this is
	 * only necessary when using shadow paging, as 64-bit NPT can get at
	 * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
	 * by 32-bit kernels (when KVM itself uses 32-bit NPT).
	 */
	if (!tdp_enabled)
		set_memory_decrypted((unsigned long)mmu->pae_root, 1);
	else
		WARN_ON_ONCE(shadow_me_mask);

5441
	for (i = 0; i < 4; ++i)
5442
		mmu->pae_root[i] = INVALID_PAE_ROOT;
5443

A
Avi Kivity 已提交
5444 5445 5446
	return 0;
}

5447
int kvm_mmu_create(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
5448
{
5449
	int ret;
5450

5451
	vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
5452 5453
	vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;

5454
	vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
5455
	vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
5456

5457 5458
	vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;

5459 5460
	vcpu->arch.mmu = &vcpu->arch.root_mmu;
	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
A
Avi Kivity 已提交
5461

5462
	vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5463

5464
	ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
5465 5466 5467
	if (ret)
		return ret;

5468
	ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
5469 5470 5471 5472 5473 5474 5475
	if (ret)
		goto fail_allocate_root;

	return ret;
 fail_allocate_root:
	free_mmu_pages(&vcpu->arch.guest_mmu);
	return ret;
A
Avi Kivity 已提交
5476 5477
}

5478
#define BATCH_ZAP_PAGES	10
5479 5480 5481
static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
	struct kvm_mmu_page *sp, *node;
5482
	int nr_zapped, batch = 0;
5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494

restart:
	list_for_each_entry_safe_reverse(sp, node,
	      &kvm->arch.active_mmu_pages, link) {
		/*
		 * No obsolete valid page exists before a newly created page
		 * since active_mmu_pages is a FIFO list.
		 */
		if (!is_obsolete_sp(kvm, sp))
			break;

		/*
5495 5496 5497
		 * Invalid pages should never land back on the list of active
		 * pages.  Skip the bogus page, otherwise we'll get stuck in an
		 * infinite loop if the page gets put back on the list (again).
5498
		 */
5499
		if (WARN_ON(sp->role.invalid))
5500 5501
			continue;

5502 5503 5504 5505 5506 5507
		/*
		 * No need to flush the TLB since we're only zapping shadow
		 * pages with an obsolete generation number and all vCPUS have
		 * loaded a new root, i.e. the shadow pages being zapped cannot
		 * be in active use by the guest.
		 */
5508
		if (batch >= BATCH_ZAP_PAGES &&
5509
		    cond_resched_rwlock_write(&kvm->mmu_lock)) {
5510
			batch = 0;
5511 5512 5513
			goto restart;
		}

5514 5515
		if (__kvm_mmu_prepare_zap_page(kvm, sp,
				&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5516
			batch += nr_zapped;
5517
			goto restart;
5518
		}
5519 5520
	}

5521 5522 5523 5524 5525
	/*
	 * Trigger a remote TLB flush before freeing the page tables to ensure
	 * KVM is not in the middle of a lockless shadow page table walk, which
	 * may reference the pages.
	 */
5526
	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539
}

/*
 * Fast invalidate all shadow pages and use lock-break technique
 * to zap obsolete pages.
 *
 * It's required when memslot is being deleted or VM is being
 * destroyed, in these cases, we should ensure that KVM MMU does
 * not use any resource of the being-deleted slot or all slots
 * after calling the function.
 */
static void kvm_mmu_zap_all_fast(struct kvm *kvm)
{
5540 5541
	lockdep_assert_held(&kvm->slots_lock);

5542
	write_lock(&kvm->mmu_lock);
5543
	trace_kvm_mmu_zap_all_fast(kvm);
5544 5545 5546 5547 5548 5549 5550 5551 5552

	/*
	 * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
	 * held for the entire duration of zapping obsolete pages, it's
	 * impossible for there to be multiple invalid generations associated
	 * with *valid* shadow pages at any given time, i.e. there is exactly
	 * one valid generation and (at most) one invalid generation.
	 */
	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5553

5554 5555 5556 5557 5558 5559 5560 5561 5562
	/* In order to ensure all threads see this change when
	 * handling the MMU reload signal, this must happen in the
	 * same critical section as kvm_reload_remote_mmus, and
	 * before kvm_zap_obsolete_pages as kvm_zap_obsolete_pages
	 * could drop the MMU lock and yield.
	 */
	if (is_tdp_mmu_enabled(kvm))
		kvm_tdp_mmu_invalidate_all_roots(kvm);

5563 5564 5565 5566 5567 5568 5569 5570 5571 5572
	/*
	 * Notify all vcpus to reload its shadow page table and flush TLB.
	 * Then all vcpus will switch to new shadow page table with the new
	 * mmu_valid_gen.
	 *
	 * Note: we need to do this under the protection of mmu_lock,
	 * otherwise, vcpu would purge shadow page but miss tlb flush.
	 */
	kvm_reload_remote_mmus(kvm);

5573
	kvm_zap_obsolete_pages(kvm);
5574

5575
	write_unlock(&kvm->mmu_lock);
5576 5577 5578 5579 5580 5581

	if (is_tdp_mmu_enabled(kvm)) {
		read_lock(&kvm->mmu_lock);
		kvm_tdp_mmu_zap_invalidated_roots(kvm);
		read_unlock(&kvm->mmu_lock);
	}
5582 5583
}

5584 5585 5586 5587 5588
static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
{
	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
}

5589
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5590 5591
			struct kvm_memory_slot *slot,
			struct kvm_page_track_notifier_node *node)
5592
{
5593
	kvm_mmu_zap_all_fast(kvm);
5594 5595
}

5596
void kvm_mmu_init_vm(struct kvm *kvm)
5597
{
5598
	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5599

5600 5601
	spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);

5602 5603 5604 5605 5606 5607 5608
	if (!kvm_mmu_init_tdp_mmu(kvm))
		/*
		 * No smp_load/store wrappers needed here as we are in
		 * VM init and there cannot be any memslots / other threads
		 * accessing this struct kvm yet.
		 */
		kvm->arch.memslots_have_rmaps = true;
5609

5610
	node->track_write = kvm_mmu_pte_write;
5611
	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5612
	kvm_page_track_register_notifier(kvm, node);
5613 5614
}

5615
void kvm_mmu_uninit_vm(struct kvm *kvm)
5616
{
5617
	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5618

5619
	kvm_page_track_unregister_notifier(kvm, node);
5620 5621

	kvm_mmu_uninit_tdp_mmu(kvm);
5622 5623
}

5624 5625 5626 5627
/*
 * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
 * (not including it)
 */
X
Xiao Guangrong 已提交
5628 5629 5630 5631
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
5632
	int i;
5633
	bool flush = false;
X
Xiao Guangrong 已提交
5634

5635 5636
	write_lock(&kvm->mmu_lock);

5637 5638
	kvm_inc_notifier_count(kvm, gfn_start, gfn_end);

5639 5640 5641 5642 5643 5644 5645 5646 5647 5648
	if (kvm_memslots_have_rmaps(kvm)) {
		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
			slots = __kvm_memslots(kvm, i);
			kvm_for_each_memslot(memslot, slots) {
				gfn_t start, end;

				start = max(gfn_start, memslot->base_gfn);
				end = min(gfn_end, memslot->base_gfn + memslot->npages);
				if (start >= end)
					continue;
X
Xiao Guangrong 已提交
5649

5650 5651
				flush = slot_handle_level_range(kvm,
						(const struct kvm_memory_slot *) memslot,
5652 5653 5654 5655
						kvm_zap_rmapp, PG_LEVEL_4K,
						KVM_MAX_HUGEPAGE_LEVEL, start,
						end - 1, true, flush);
			}
5656
		}
5657
		if (flush)
5658 5659
			kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
							   gfn_end - gfn_start);
X
Xiao Guangrong 已提交
5660 5661
	}

5662
	if (is_tdp_mmu_enabled(kvm)) {
5663 5664
		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
			flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
5665
							  gfn_end, flush);
5666 5667 5668
		if (flush)
			kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
							   gfn_end - gfn_start);
5669
	}
5670 5671 5672 5673

	if (flush)
		kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);

5674 5675
	kvm_dec_notifier_count(kvm, gfn_start, gfn_end);

5676
	write_unlock(&kvm->mmu_lock);
X
Xiao Guangrong 已提交
5677 5678
}

5679
static bool slot_rmap_write_protect(struct kvm *kvm,
5680
				    struct kvm_rmap_head *rmap_head,
5681
				    const struct kvm_memory_slot *slot)
5682
{
5683
	return __rmap_write_protect(kvm, rmap_head, false);
5684 5685
}

5686
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
5687
				      const struct kvm_memory_slot *memslot,
5688
				      int start_level)
A
Avi Kivity 已提交
5689
{
5690
	bool flush = false;
A
Avi Kivity 已提交
5691

5692 5693 5694 5695 5696 5697 5698
	if (kvm_memslots_have_rmaps(kvm)) {
		write_lock(&kvm->mmu_lock);
		flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
					  start_level, KVM_MAX_HUGEPAGE_LEVEL,
					  false);
		write_unlock(&kvm->mmu_lock);
	}
5699

5700 5701 5702 5703 5704 5705
	if (is_tdp_mmu_enabled(kvm)) {
		read_lock(&kvm->mmu_lock);
		flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
		read_unlock(&kvm->mmu_lock);
	}

5706 5707 5708 5709 5710 5711 5712
	/*
	 * We can flush all the TLBs out of the mmu lock without TLB
	 * corruption since we just change the spte from writable to
	 * readonly so that we only need to care the case of changing
	 * spte from present to present (changing the spte from present
	 * to nonpresent will flush all the TLBs immediately), in other
	 * words, the only case we care is mmu_spte_update() where we
5713 5714 5715
	 * have checked Host-writable | MMU-writable instead of
	 * PT_WRITABLE_MASK, that means it does not depend on PT_WRITABLE_MASK
	 * anymore.
5716
	 */
5717
	if (flush)
5718
		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
A
Avi Kivity 已提交
5719
}
5720

5721
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
5722
					 struct kvm_rmap_head *rmap_head,
5723
					 const struct kvm_memory_slot *slot)
5724 5725 5726 5727
{
	u64 *sptep;
	struct rmap_iterator iter;
	int need_tlb_flush = 0;
D
Dan Williams 已提交
5728
	kvm_pfn_t pfn;
5729 5730
	struct kvm_mmu_page *sp;

5731
restart:
5732
	for_each_rmap_spte(rmap_head, &iter, sptep) {
5733
		sp = sptep_to_sp(sptep);
5734 5735 5736
		pfn = spte_to_pfn(*sptep);

		/*
5737 5738 5739 5740 5741
		 * We cannot do huge page mapping for indirect shadow pages,
		 * which are found on the last rmap (level = 1) when not using
		 * tdp; such shadow pages are synced with the page table in
		 * the guest, and the guest page table is using 4K page size
		 * mapping if the indirect sp has level = 1.
5742
		 */
5743
		if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
5744 5745
		    sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
							       pfn, PG_LEVEL_NUM)) {
5746
			pte_list_remove(kvm, rmap_head, sptep);
5747 5748 5749 5750 5751 5752 5753

			if (kvm_available_flush_tlb_with_range())
				kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
					KVM_PAGES_PER_HPAGE(sp->role.level));
			else
				need_tlb_flush = 1;

5754 5755
			goto restart;
		}
5756 5757 5758 5759 5760 5761
	}

	return need_tlb_flush;
}

void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5762
				   const struct kvm_memory_slot *slot)
5763
{
5764
	bool flush = false;
5765

5766 5767 5768 5769 5770 5771 5772
	if (kvm_memslots_have_rmaps(kvm)) {
		write_lock(&kvm->mmu_lock);
		flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
		if (flush)
			kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
		write_unlock(&kvm->mmu_lock);
	}
5773 5774 5775 5776 5777 5778 5779 5780

	if (is_tdp_mmu_enabled(kvm)) {
		read_lock(&kvm->mmu_lock);
		flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
		if (flush)
			kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
		read_unlock(&kvm->mmu_lock);
	}
5781 5782
}

5783
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
5784
					const struct kvm_memory_slot *memslot)
5785 5786
{
	/*
5787
	 * All current use cases for flushing the TLBs for a specific memslot
5788
	 * related to dirty logging, and many do the TLB flush out of mmu_lock.
5789 5790 5791
	 * The interaction between the various operations on memslot must be
	 * serialized by slots_locks to ensure the TLB flush from one operation
	 * is observed by any other operation on the same memslot.
5792 5793
	 */
	lockdep_assert_held(&kvm->slots_lock);
5794 5795
	kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
					   memslot->npages);
5796 5797
}

5798
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
5799
				   const struct kvm_memory_slot *memslot)
5800
{
5801
	bool flush = false;
5802

5803 5804 5805 5806 5807 5808
	if (kvm_memslots_have_rmaps(kvm)) {
		write_lock(&kvm->mmu_lock);
		flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty,
					 false);
		write_unlock(&kvm->mmu_lock);
	}
5809

5810 5811 5812 5813 5814 5815
	if (is_tdp_mmu_enabled(kvm)) {
		read_lock(&kvm->mmu_lock);
		flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
		read_unlock(&kvm->mmu_lock);
	}

5816 5817 5818 5819 5820 5821 5822
	/*
	 * It's also safe to flush TLBs out of mmu lock here as currently this
	 * function is only used for dirty logging, in which case flushing TLB
	 * out of mmu lock also guarantees no dirty pages will be lost in
	 * dirty_bitmap.
	 */
	if (flush)
5823
		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5824 5825
}

5826
void kvm_mmu_zap_all(struct kvm *kvm)
5827 5828
{
	struct kvm_mmu_page *sp, *node;
5829
	LIST_HEAD(invalid_list);
5830
	int ign;
5831

5832
	write_lock(&kvm->mmu_lock);
5833
restart:
5834
	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
5835
		if (WARN_ON(sp->role.invalid))
5836
			continue;
5837
		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
5838
			goto restart;
5839
		if (cond_resched_rwlock_write(&kvm->mmu_lock))
5840 5841 5842
			goto restart;
	}

5843
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
5844

5845
	if (is_tdp_mmu_enabled(kvm))
5846 5847
		kvm_tdp_mmu_zap_all(kvm);

5848
	write_unlock(&kvm->mmu_lock);
5849 5850
}

5851
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
5852
{
5853
	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
5854

5855
	gen &= MMIO_SPTE_GEN_MASK;
5856

5857
	/*
5858 5859 5860 5861 5862 5863 5864 5865
	 * Generation numbers are incremented in multiples of the number of
	 * address spaces in order to provide unique generations across all
	 * address spaces.  Strip what is effectively the address space
	 * modifier prior to checking for a wrap of the MMIO generation so
	 * that a wrap in any address space is detected.
	 */
	gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);

5866
	/*
5867
	 * The very rare case: if the MMIO generation number has wrapped,
5868 5869
	 * zap all shadow pages.
	 */
5870
	if (unlikely(gen == 0)) {
5871
		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5872
		kvm_mmu_zap_all_fast(kvm);
5873
	}
5874 5875
}

5876 5877
static unsigned long
mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5878 5879
{
	struct kvm *kvm;
5880
	int nr_to_scan = sc->nr_to_scan;
5881
	unsigned long freed = 0;
5882

J
Junaid Shahid 已提交
5883
	mutex_lock(&kvm_lock);
5884 5885

	list_for_each_entry(kvm, &vm_list, vm_list) {
5886
		int idx;
5887
		LIST_HEAD(invalid_list);
5888

5889 5890 5891 5892 5893 5894 5895 5896
		/*
		 * Never scan more than sc->nr_to_scan VM instances.
		 * Will not hit this condition practically since we do not try
		 * to shrink more than one VM and it is very unlikely to see
		 * !n_used_mmu_pages so many times.
		 */
		if (!nr_to_scan--)
			break;
5897 5898 5899 5900 5901 5902
		/*
		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
		 * here. We may skip a VM instance errorneosly, but we do not
		 * want to shrink a VM that only started to populate its MMU
		 * anyway.
		 */
5903 5904
		if (!kvm->arch.n_used_mmu_pages &&
		    !kvm_has_zapped_obsolete_pages(kvm))
5905 5906
			continue;

5907
		idx = srcu_read_lock(&kvm->srcu);
5908
		write_lock(&kvm->mmu_lock);
5909

5910 5911 5912 5913 5914 5915
		if (kvm_has_zapped_obsolete_pages(kvm)) {
			kvm_mmu_commit_zap_page(kvm,
			      &kvm->arch.zapped_obsolete_pages);
			goto unlock;
		}

5916
		freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan);
5917

5918
unlock:
5919
		write_unlock(&kvm->mmu_lock);
5920
		srcu_read_unlock(&kvm->srcu, idx);
5921

5922 5923 5924 5925 5926
		/*
		 * unfair on small ones
		 * per-vm shrinkers cry out
		 * sadness comes quickly
		 */
5927 5928
		list_move_tail(&kvm->vm_list, &vm_list);
		break;
5929 5930
	}

J
Junaid Shahid 已提交
5931
	mutex_unlock(&kvm_lock);
5932 5933 5934 5935 5936 5937
	return freed;
}

static unsigned long
mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
5938
	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
5939 5940 5941
}

static struct shrinker mmu_shrinker = {
5942 5943
	.count_objects = mmu_shrink_count,
	.scan_objects = mmu_shrink_scan,
5944 5945 5946
	.seeks = DEFAULT_SEEKS * 10,
};

I
Ingo Molnar 已提交
5947
static void mmu_destroy_caches(void)
5948
{
5949 5950
	kmem_cache_destroy(pte_list_desc_cache);
	kmem_cache_destroy(mmu_page_header_cache);
5951 5952
}

P
Paolo Bonzini 已提交
5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986
static bool get_nx_auto_mode(void)
{
	/* Return true when CPU has the bug, and mitigations are ON */
	return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
}

static void __set_nx_huge_pages(bool val)
{
	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
}

static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
{
	bool old_val = nx_huge_pages;
	bool new_val;

	/* In "auto" mode deploy workaround only if CPU has the bug. */
	if (sysfs_streq(val, "off"))
		new_val = 0;
	else if (sysfs_streq(val, "force"))
		new_val = 1;
	else if (sysfs_streq(val, "auto"))
		new_val = get_nx_auto_mode();
	else if (strtobool(val, &new_val) < 0)
		return -EINVAL;

	__set_nx_huge_pages(new_val);

	if (new_val != old_val) {
		struct kvm *kvm;

		mutex_lock(&kvm_lock);

		list_for_each_entry(kvm, &vm_list, vm_list) {
5987
			mutex_lock(&kvm->slots_lock);
P
Paolo Bonzini 已提交
5988
			kvm_mmu_zap_all_fast(kvm);
5989
			mutex_unlock(&kvm->slots_lock);
5990 5991

			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
P
Paolo Bonzini 已提交
5992 5993 5994 5995 5996 5997 5998
		}
		mutex_unlock(&kvm_lock);
	}

	return 0;
}

5999 6000
int kvm_mmu_module_init(void)
{
6001 6002
	int ret = -ENOMEM;

P
Paolo Bonzini 已提交
6003 6004 6005
	if (nx_huge_pages == -1)
		__set_nx_huge_pages(get_nx_auto_mode());

6006 6007 6008 6009 6010 6011 6012 6013 6014 6015
	/*
	 * MMU roles use union aliasing which is, generally speaking, an
	 * undefined behavior. However, we supposedly know how compilers behave
	 * and the current status quo is unlikely to change. Guardians below are
	 * supposed to let us know if the assumption becomes false.
	 */
	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
	BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));

6016
	kvm_mmu_reset_all_pte_masks();
6017

6018 6019
	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
					    sizeof(struct pte_list_desc),
6020
					    0, SLAB_ACCOUNT, NULL);
6021
	if (!pte_list_desc_cache)
6022
		goto out;
6023

6024 6025
	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
						  sizeof(struct kvm_mmu_page),
6026
						  0, SLAB_ACCOUNT, NULL);
6027
	if (!mmu_page_header_cache)
6028
		goto out;
6029

6030
	if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
6031
		goto out;
6032

6033 6034 6035
	ret = register_shrinker(&mmu_shrinker);
	if (ret)
		goto out;
6036

6037 6038
	return 0;

6039
out:
6040
	mmu_destroy_caches();
6041
	return ret;
6042 6043
}

6044
/*
P
Peng Hao 已提交
6045
 * Calculate mmu pages needed for kvm.
6046
 */
6047
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
6048
{
6049 6050
	unsigned long nr_mmu_pages;
	unsigned long nr_pages = 0;
6051
	struct kvm_memslots *slots;
6052
	struct kvm_memory_slot *memslot;
6053
	int i;
6054

6055 6056
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);
6057

6058 6059 6060
		kvm_for_each_memslot(memslot, slots)
			nr_pages += memslot->npages;
	}
6061 6062

	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
6063
	nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
6064 6065 6066 6067

	return nr_mmu_pages;
}

6068 6069
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
6070
	kvm_mmu_unload(vcpu);
6071 6072
	free_mmu_pages(&vcpu->arch.root_mmu);
	free_mmu_pages(&vcpu->arch.guest_mmu);
6073
	mmu_free_memory_caches(vcpu);
6074 6075 6076 6077 6078 6079 6080
}

void kvm_mmu_module_exit(void)
{
	mmu_destroy_caches();
	percpu_counter_destroy(&kvm_total_used_mmu_pages);
	unregister_shrinker(&mmu_shrinker);
6081 6082
	mmu_audit_disable();
}
6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110

static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
{
	unsigned int old_val;
	int err;

	old_val = nx_huge_pages_recovery_ratio;
	err = param_set_uint(val, kp);
	if (err)
		return err;

	if (READ_ONCE(nx_huge_pages) &&
	    !old_val && nx_huge_pages_recovery_ratio) {
		struct kvm *kvm;

		mutex_lock(&kvm_lock);

		list_for_each_entry(kvm, &vm_list, vm_list)
			wake_up_process(kvm->arch.nx_lpage_recovery_thread);

		mutex_unlock(&kvm_lock);
	}

	return err;
}

static void kvm_recover_nx_lpages(struct kvm *kvm)
{
6111
	unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
6112 6113 6114 6115
	int rcu_idx;
	struct kvm_mmu_page *sp;
	unsigned int ratio;
	LIST_HEAD(invalid_list);
6116
	bool flush = false;
6117 6118 6119
	ulong to_zap;

	rcu_idx = srcu_read_lock(&kvm->srcu);
6120
	write_lock(&kvm->mmu_lock);
6121 6122

	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
6123
	to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
6124 6125 6126 6127
	for ( ; to_zap; --to_zap) {
		if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
			break;

6128 6129 6130 6131 6132 6133 6134 6135 6136
		/*
		 * We use a separate list instead of just using active_mmu_pages
		 * because the number of lpage_disallowed pages is expected to
		 * be relatively small compared to the total.
		 */
		sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
				      struct kvm_mmu_page,
				      lpage_disallowed_link);
		WARN_ON_ONCE(!sp->lpage_disallowed);
6137
		if (is_tdp_mmu_page(sp)) {
6138
			flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
6139
		} else {
6140 6141 6142
			kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
			WARN_ON_ONCE(sp->lpage_disallowed);
		}
6143

6144
		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6145
			kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6146
			cond_resched_rwlock_write(&kvm->mmu_lock);
6147
			flush = false;
6148 6149
		}
	}
6150
	kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
6151

6152
	write_unlock(&kvm->mmu_lock);
6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205
	srcu_read_unlock(&kvm->srcu, rcu_idx);
}

static long get_nx_lpage_recovery_timeout(u64 start_time)
{
	return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
		? start_time + 60 * HZ - get_jiffies_64()
		: MAX_SCHEDULE_TIMEOUT;
}

static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
{
	u64 start_time;
	long remaining_time;

	while (true) {
		start_time = get_jiffies_64();
		remaining_time = get_nx_lpage_recovery_timeout(start_time);

		set_current_state(TASK_INTERRUPTIBLE);
		while (!kthread_should_stop() && remaining_time > 0) {
			schedule_timeout(remaining_time);
			remaining_time = get_nx_lpage_recovery_timeout(start_time);
			set_current_state(TASK_INTERRUPTIBLE);
		}

		set_current_state(TASK_RUNNING);

		if (kthread_should_stop())
			return 0;

		kvm_recover_nx_lpages(kvm);
	}
}

int kvm_mmu_post_init_vm(struct kvm *kvm)
{
	int err;

	err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
					  "kvm-nx-lpage-recovery",
					  &kvm->arch.nx_lpage_recovery_thread);
	if (!err)
		kthread_unpark(kvm->arch.nx_lpage_recovery_thread);

	return err;
}

void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
{
	if (kvm->arch.nx_lpage_recovery_thread)
		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
}