mmu.c 168.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
A
Avi Kivity 已提交
2 3 4 5 6 7 8 9 10
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
N
Nicolas Kaiser 已提交
11
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
12 13 14 15 16
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 */
A
Avi Kivity 已提交
17

18
#include "irq.h"
19
#include "mmu.h"
20
#include "x86.h"
A
Avi Kivity 已提交
21
#include "kvm_cache_regs.h"
22
#include "kvm_emulate.h"
23
#include "cpuid.h"
A
Avi Kivity 已提交
24

25
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
26 27 28 29
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
30 31
#include <linux/moduleparam.h>
#include <linux/export.h>
32
#include <linux/swap.h>
M
Marcelo Tosatti 已提交
33
#include <linux/hugetlb.h>
34
#include <linux/compiler.h>
35
#include <linux/srcu.h>
36
#include <linux/slab.h>
37
#include <linux/sched/signal.h>
38
#include <linux/uaccess.h>
39
#include <linux/hash.h>
40
#include <linux/kern_levels.h>
41
#include <linux/kthread.h>
A
Avi Kivity 已提交
42

A
Avi Kivity 已提交
43
#include <asm/page.h>
44
#include <asm/memtype.h>
A
Avi Kivity 已提交
45
#include <asm/cmpxchg.h>
46
#include <asm/e820/api.h>
47
#include <asm/io.h>
48
#include <asm/vmx.h>
49
#include <asm/kvm_page_track.h>
50
#include "trace.h"
A
Avi Kivity 已提交
51

P
Paolo Bonzini 已提交
52 53 54
extern bool itlb_multihit_kvm_mitigation;

static int __read_mostly nx_huge_pages = -1;
55 56 57 58
#ifdef CONFIG_PREEMPT_RT
/* Recovery can cause latency spikes, disable it for PREEMPT_RT.  */
static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
#else
59
static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
60
#endif
P
Paolo Bonzini 已提交
61 62

static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
63
static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
P
Paolo Bonzini 已提交
64 65 66 67 68 69

static struct kernel_param_ops nx_huge_pages_ops = {
	.set = set_nx_huge_pages,
	.get = param_get_bool,
};

70 71 72 73 74
static struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = {
	.set = set_nx_huge_pages_recovery_ratio,
	.get = param_get_uint,
};

P
Paolo Bonzini 已提交
75 76
module_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644);
__MODULE_PARM_TYPE(nx_huge_pages, "bool");
77 78 79
module_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops,
		&nx_huge_pages_recovery_ratio, 0644);
__MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
P
Paolo Bonzini 已提交
80

81 82 83 84 85 86 87
/*
 * When setting this variable to true it enables Two-Dimensional-Paging
 * where the hardware walks 2 page tables:
 * 1. the guest-virtual to guest-physical
 * 2. while doing 1. it walks guest-physical to host-physical
 * If the hardware supports that we don't need to do shadow paging.
 */
88
bool tdp_enabled = false;
89

90 91 92 93
enum {
	AUDIT_PRE_PAGE_FAULT,
	AUDIT_POST_PAGE_FAULT,
	AUDIT_PRE_PTE_WRITE,
94 95 96
	AUDIT_POST_PTE_WRITE,
	AUDIT_PRE_SYNC,
	AUDIT_POST_SYNC
97
};
98

99
#undef MMU_DEBUG
100 101

#ifdef MMU_DEBUG
102 103
static bool dbg = 0;
module_param(dbg, bool, 0644);
104 105 106

#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
107
#define MMU_WARN_ON(x) WARN_ON(x)
108 109 110
#else
#define pgprintk(x...) do { } while (0)
#define rmap_printk(x...) do { } while (0)
111
#define MMU_WARN_ON(x) do { } while (0)
112
#endif
A
Avi Kivity 已提交
113

114 115
#define PTE_PREFETCH_NUM		8

116
#define PT_FIRST_AVAIL_BITS_SHIFT 10
117 118 119 120 121 122 123 124 125
#define PT64_SECOND_AVAIL_BITS_SHIFT 54

/*
 * The mask used to denote special SPTEs, which can be either MMIO SPTEs or
 * Access Tracking SPTEs.
 */
#define SPTE_SPECIAL_MASK (3ULL << 52)
#define SPTE_AD_ENABLED_MASK (0ULL << 52)
#define SPTE_AD_DISABLED_MASK (1ULL << 52)
126
#define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52)
127
#define SPTE_MMIO_MASK (3ULL << 52)
A
Avi Kivity 已提交
128 129 130 131

#define PT64_LEVEL_BITS 9

#define PT64_LEVEL_SHIFT(level) \
M
Mike Day 已提交
132
		(PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
A
Avi Kivity 已提交
133 134 135 136 137 138 139 140

#define PT64_INDEX(address, level)\
	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))


#define PT32_LEVEL_BITS 10

#define PT32_LEVEL_SHIFT(level) \
M
Mike Day 已提交
141
		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
A
Avi Kivity 已提交
142

143 144 145
#define PT32_LVL_OFFSET_MASK(level) \
	(PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
146 147 148 149 150

#define PT32_INDEX(address, level)\
	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))


151 152 153 154 155
#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
#define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1))
#else
#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
#endif
156 157 158 159 160 161
#define PT64_LVL_ADDR_MASK(level) \
	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT64_LEVEL_BITS))) - 1))
#define PT64_LVL_OFFSET_MASK(level) \
	(PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
						* PT64_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
162 163 164 165

#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
166 167 168
#define PT32_LVL_ADDR_MASK(level) \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
					    * PT32_LEVEL_BITS))) - 1))
A
Avi Kivity 已提交
169

170
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
171
			| shadow_x_mask | shadow_nx_mask | shadow_me_mask)
A
Avi Kivity 已提交
172

173 174 175 176 177
#define ACC_EXEC_MASK    1
#define ACC_WRITE_MASK   PT_WRITABLE_MASK
#define ACC_USER_MASK    PT_USER_MASK
#define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)

178 179 180 181
/* The mask for the R/X bits in EPT PTEs */
#define PT64_EPT_READABLE_MASK			0x1ull
#define PT64_EPT_EXECUTABLE_MASK		0x4ull

182 183
#include <trace/events/kvm.h>

184 185
#define SPTE_HOST_WRITEABLE	(1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
#define SPTE_MMU_WRITEABLE	(1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
186

187 188
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)

189 190 191
/* make pte_list_desc fit well in cache line */
#define PTE_LIST_EXT 3

192 193 194 195 196 197 198 199 200 201 202 203 204 205
/*
 * Return values of handle_mmio_page_fault and mmu.page_fault:
 * RET_PF_RETRY: let CPU fault again on the address.
 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
 *
 * For handle_mmio_page_fault only:
 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
 */
enum {
	RET_PF_RETRY = 0,
	RET_PF_EMULATE = 1,
	RET_PF_INVALID = 2,
};

206 207 208
struct pte_list_desc {
	u64 *sptes[PTE_LIST_EXT];
	struct pte_list_desc *more;
209 210
};

211 212 213 214
struct kvm_shadow_walk_iterator {
	u64 addr;
	hpa_t shadow_addr;
	u64 *sptep;
215
	int level;
216 217 218
	unsigned index;
};

219 220 221 222 223 224 225
#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker)     \
	for (shadow_walk_init_using_root(&(_walker), (_vcpu),              \
					 (_root), (_addr));                \
	     shadow_walk_okay(&(_walker));			           \
	     shadow_walk_next(&(_walker)))

#define for_each_shadow_entry(_vcpu, _addr, _walker)            \
226 227 228 229
	for (shadow_walk_init(&(_walker), _vcpu, _addr);	\
	     shadow_walk_okay(&(_walker));			\
	     shadow_walk_next(&(_walker)))

230 231 232 233 234 235
#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\
	for (shadow_walk_init(&(_walker), _vcpu, _addr);		\
	     shadow_walk_okay(&(_walker)) &&				\
		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
	     __shadow_walk_next(&(_walker), spte))

236
static struct kmem_cache *pte_list_desc_cache;
237
static struct kmem_cache *mmu_page_header_cache;
238
static struct percpu_counter kvm_total_used_mmu_pages;
239

S
Sheng Yang 已提交
240 241 242 243 244
static u64 __read_mostly shadow_nx_mask;
static u64 __read_mostly shadow_x_mask;	/* mutual exclusive with nx_mask */
static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask;
245
static u64 __read_mostly shadow_mmio_mask;
246
static u64 __read_mostly shadow_mmio_value;
247
static u64 __read_mostly shadow_mmio_access_mask;
248
static u64 __read_mostly shadow_present_mask;
249
static u64 __read_mostly shadow_me_mask;
250

251
/*
252 253 254
 * SPTEs used by MMUs without A/D bits are marked with SPTE_AD_DISABLED_MASK;
 * shadow_acc_track_mask is the set of bits to be cleared in non-accessed
 * pages.
255 256 257 258 259 260 261 262 263 264 265 266 267
 */
static u64 __read_mostly shadow_acc_track_mask;

/*
 * The mask/shift to use for saving the original R/X bits when marking the PTE
 * as not-present for access tracking purposes. We do not save the W bit as the
 * PTEs being access tracked also need to be dirty tracked, so the W bit will be
 * restored only when a write is attempted to the page.
 */
static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
						    PT64_EPT_EXECUTABLE_MASK;
static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;

268 269 270 271 272 273 274 275 276 277 278
/*
 * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
 * to guard against L1TF attacks.
 */
static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;

/*
 * The number of high-order 1 bits to use in the mask above.
 */
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;

279 280 281 282 283 284 285 286 287 288
/*
 * In some cases, we need to preserve the GFN of a non-present or reserved
 * SPTE when we usurp the upper five bits of the physical address space to
 * defend against L1TF, e.g. for MMIO SPTEs.  To preserve the GFN, we'll
 * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
 * left into the reserved bits, i.e. the GFN in the SPTE will be split into
 * high and low parts.  This mask covers the lower bits of the GFN.
 */
static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;

289 290 291 292 293
/*
 * The number of non-reserved physical address bits irrespective of features
 * that repurpose legal bits, e.g. MKTME.
 */
static u8 __read_mostly shadow_phys_bits;
294

295
static void mmu_spte_set(u64 *sptep, u64 spte);
296
static bool is_executable_pte(u64 spte);
297 298
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
299

300 301 302
#define CREATE_TRACE_POINTS
#include "mmutrace.h"

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331

static inline bool kvm_available_flush_tlb_with_range(void)
{
	return kvm_x86_ops->tlb_remote_flush_with_range;
}

static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
		struct kvm_tlb_range *range)
{
	int ret = -ENOTSUPP;

	if (range && kvm_x86_ops->tlb_remote_flush_with_range)
		ret = kvm_x86_ops->tlb_remote_flush_with_range(kvm, range);

	if (ret)
		kvm_flush_remote_tlbs(kvm);
}

static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
		u64 start_gfn, u64 pages)
{
	struct kvm_tlb_range range;

	range.start_gfn = start_gfn;
	range.pages = pages;

	kvm_flush_remote_tlbs_with_range(kvm, &range);
}

332
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask)
333
{
334
	BUG_ON((u64)(unsigned)access_mask != access_mask);
335
	BUG_ON((mmio_mask & mmio_value) != mmio_value);
336
	shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
337
	shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
338
	shadow_mmio_access_mask = access_mask;
339 340 341
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);

342 343 344 345 346
static bool is_mmio_spte(u64 spte)
{
	return (spte & shadow_mmio_mask) == shadow_mmio_value;
}

347 348 349 350 351
static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
{
	return sp->role.ad_disabled;
}

352 353 354 355 356 357 358 359 360 361 362
static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
{
	/*
	 * When using the EPT page-modification log, the GPAs in the log
	 * would come from L2 rather than L1.  Therefore, we need to rely
	 * on write protection to record dirty pages.  This also bypasses
	 * PML, since writes now result in a vmexit.
	 */
	return vcpu->arch.mmu == &vcpu->arch.guest_mmu;
}

363 364
static inline bool spte_ad_enabled(u64 spte)
{
365
	MMU_WARN_ON(is_mmio_spte(spte));
366 367 368 369 370 371 372
	return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK;
}

static inline bool spte_ad_need_write_protect(u64 spte)
{
	MMU_WARN_ON(is_mmio_spte(spte));
	return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK;
373 374
}

P
Paolo Bonzini 已提交
375 376 377 378 379
static bool is_nx_huge_page_enabled(void)
{
	return READ_ONCE(nx_huge_pages);
}

380 381
static inline u64 spte_shadow_accessed_mask(u64 spte)
{
382
	MMU_WARN_ON(is_mmio_spte(spte));
383 384 385 386 387
	return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
}

static inline u64 spte_shadow_dirty_mask(u64 spte)
{
388
	MMU_WARN_ON(is_mmio_spte(spte));
389 390 391
	return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
}

392 393
static inline bool is_access_track_spte(u64 spte)
{
394
	return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0;
395 396
}

397
/*
398 399
 * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of
 * the memslots generation and is derived as follows:
400
 *
401 402
 * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11
 * Bits 9-18 of the MMIO generation are propagated to spte bits 52-61
403
 *
404 405 406 407 408 409
 * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in
 * the MMIO generation number, as doing so would require stealing a bit from
 * the "real" generation number and thus effectively halve the maximum number
 * of MMIO generations that can be handled before encountering a wrap (which
 * requires a full MMU zap).  The flag is instead explicitly queried when
 * checking for MMIO spte cache hits.
410
 */
411
#define MMIO_SPTE_GEN_MASK		GENMASK_ULL(17, 0)
412

413 414 415 416
#define MMIO_SPTE_GEN_LOW_START		3
#define MMIO_SPTE_GEN_LOW_END		11
#define MMIO_SPTE_GEN_LOW_MASK		GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
						    MMIO_SPTE_GEN_LOW_START)
417

418 419
#define MMIO_SPTE_GEN_HIGH_START	PT64_SECOND_AVAIL_BITS_SHIFT
#define MMIO_SPTE_GEN_HIGH_END		62
420 421
#define MMIO_SPTE_GEN_HIGH_MASK		GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
						    MMIO_SPTE_GEN_HIGH_START)
422

423
static u64 generation_mmio_spte_mask(u64 gen)
424 425 426
{
	u64 mask;

427
	WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
428
	BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK);
429

430 431
	mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK;
	mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK;
432 433 434
	return mask;
}

435
static u64 get_mmio_spte_generation(u64 spte)
436
{
437
	u64 gen;
438

439 440
	gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START;
	gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START;
441 442 443
	return gen;
}

444
static u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
445
{
446

447
	u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
448
	u64 mask = generation_mmio_spte_mask(gen);
449
	u64 gpa = gfn << PAGE_SHIFT;
450

451
	access &= shadow_mmio_access_mask;
452 453 454 455
	mask |= shadow_mmio_value | access;
	mask |= gpa | shadow_nonpresent_or_rsvd_mask;
	mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
		<< shadow_nonpresent_or_rsvd_mask_len;
456

457 458 459 460 461 462 463 464 465 466 467
	return mask;
}

static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
			   unsigned int access)
{
	u64 mask = make_mmio_spte(vcpu, gfn, access);
	unsigned int gen = get_mmio_spte_generation(mask);

	access = mask & ACC_ALL;

468
	trace_mark_mmio_spte(sptep, gfn, access, gen);
469
	mmu_spte_set(sptep, mask);
470 471 472 473
}

static gfn_t get_mmio_spte_gfn(u64 spte)
{
474
	u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
475 476 477 478 479

	gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
	       & shadow_nonpresent_or_rsvd_mask;

	return gpa >> PAGE_SHIFT;
480 481 482 483
}

static unsigned get_mmio_spte_access(u64 spte)
{
484
	return spte & shadow_mmio_access_mask;
485 486
}

487
static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
488
			  kvm_pfn_t pfn, unsigned int access)
489 490
{
	if (unlikely(is_noslot_pfn(pfn))) {
491
		mark_mmio_spte(vcpu, sptep, gfn, access);
492 493 494 495 496
		return true;
	}

	return false;
}
497

498
static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
499
{
500
	u64 kvm_gen, spte_gen, gen;
501

502 503 504
	gen = kvm_vcpu_memslots(vcpu)->generation;
	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
		return false;
505

506
	kvm_gen = gen & MMIO_SPTE_GEN_MASK;
507 508 509 510
	spte_gen = get_mmio_spte_generation(spte);

	trace_check_mmio_spte(spte, kvm_gen, spte_gen);
	return likely(kvm_gen == spte_gen);
511 512
}

513 514 515 516 517 518 519
/*
 * Sets the shadow PTE masks used by the MMU.
 *
 * Assumptions:
 *  - Setting either @accessed_mask or @dirty_mask requires setting both
 *  - At least one of @accessed_mask or @acc_track_mask must be set
 */
S
Sheng Yang 已提交
520
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
521
		u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
522
		u64 acc_track_mask, u64 me_mask)
S
Sheng Yang 已提交
523
{
524 525
	BUG_ON(!dirty_mask != !accessed_mask);
	BUG_ON(!accessed_mask && !acc_track_mask);
526
	BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK);
527

S
Sheng Yang 已提交
528 529 530 531 532
	shadow_user_mask = user_mask;
	shadow_accessed_mask = accessed_mask;
	shadow_dirty_mask = dirty_mask;
	shadow_nx_mask = nx_mask;
	shadow_x_mask = x_mask;
533
	shadow_present_mask = p_mask;
534
	shadow_acc_track_mask = acc_track_mask;
535
	shadow_me_mask = me_mask;
S
Sheng Yang 已提交
536 537 538
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);

539 540 541
static u8 kvm_get_shadow_phys_bits(void)
{
	/*
542 543 544 545
	 * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected
	 * in CPU detection code, but the processor treats those reduced bits as
	 * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at
	 * the physical address bits reported by CPUID.
546
	 */
547 548
	if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008))
		return cpuid_eax(0x80000008) & 0xff;
549

550 551 552 553 554 555
	/*
	 * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with
	 * custom CPUID.  Proceed with whatever the kernel found since these features
	 * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008).
	 */
	return boot_cpu_data.x86_phys_bits;
556 557
}

558
static void kvm_mmu_reset_all_pte_masks(void)
559
{
560 561
	u8 low_phys_bits;

562 563 564 565 566 567 568 569
	shadow_user_mask = 0;
	shadow_accessed_mask = 0;
	shadow_dirty_mask = 0;
	shadow_nx_mask = 0;
	shadow_x_mask = 0;
	shadow_mmio_mask = 0;
	shadow_present_mask = 0;
	shadow_acc_track_mask = 0;
570

571 572
	shadow_phys_bits = kvm_get_shadow_phys_bits();

573 574 575 576
	/*
	 * If the CPU has 46 or less physical address bits, then set an
	 * appropriate mask to guard against L1TF attacks. Otherwise, it is
	 * assumed that the CPU is not vulnerable to L1TF.
577 578 579 580 581
	 *
	 * Some Intel CPUs address the L1 cache using more PA bits than are
	 * reported by CPUID. Use the PA width of the L1 cache when possible
	 * to achieve more effective mitigation, e.g. if system RAM overlaps
	 * the most significant bits of legal physical address space.
582
	 */
583 584 585
	shadow_nonpresent_or_rsvd_mask = 0;
	low_phys_bits = boot_cpu_data.x86_cache_bits;
	if (boot_cpu_data.x86_cache_bits <
586
	    52 - shadow_nonpresent_or_rsvd_mask_len) {
587
		shadow_nonpresent_or_rsvd_mask =
588
			rsvd_bits(boot_cpu_data.x86_cache_bits -
589
				  shadow_nonpresent_or_rsvd_mask_len,
590
				  boot_cpu_data.x86_cache_bits - 1);
591
		low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
592 593 594
	} else
		WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF));

595 596
	shadow_nonpresent_or_rsvd_lower_gfn_mask =
		GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
597 598
}

A
Avi Kivity 已提交
599 600 601 602 603
static int is_cpuid_PSE36(void)
{
	return 1;
}

604 605
static int is_nx(struct kvm_vcpu *vcpu)
{
606
	return vcpu->arch.efer & EFER_NX;
607 608
}

609 610
static int is_shadow_present_pte(u64 pte)
{
611
	return (pte != 0) && !is_mmio_spte(pte);
612 613
}

M
Marcelo Tosatti 已提交
614 615 616 617 618
static int is_large_pte(u64 pte)
{
	return pte & PT_PAGE_SIZE_MASK;
}

619 620 621 622
static int is_last_spte(u64 pte, int level)
{
	if (level == PT_PAGE_TABLE_LEVEL)
		return 1;
623
	if (is_large_pte(pte))
624 625 626 627
		return 1;
	return 0;
}

628 629 630 631 632
static bool is_executable_pte(u64 spte)
{
	return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask;
}

D
Dan Williams 已提交
633
static kvm_pfn_t spte_to_pfn(u64 pte)
634
{
635
	return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
636 637
}

638 639 640 641 642 643 644
static gfn_t pse36_gfn_delta(u32 gpte)
{
	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;

	return (gpte & PT32_DIR_PSE36_MASK) << shift;
}

645
#ifdef CONFIG_X86_64
A
Avi Kivity 已提交
646
static void __set_spte(u64 *sptep, u64 spte)
647
{
648
	WRITE_ONCE(*sptep, spte);
649 650
}

651
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
652
{
653
	WRITE_ONCE(*sptep, spte);
654 655 656 657 658 659
}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
	return xchg(sptep, spte);
}
660 661 662

static u64 __get_spte_lockless(u64 *sptep)
{
663
	return READ_ONCE(*sptep);
664
}
665
#else
666 667 668 669 670 671 672
union split_spte {
	struct {
		u32 spte_low;
		u32 spte_high;
	};
	u64 spte;
};
673

674 675 676 677 678 679 680 681 682 683 684 685
static void count_spte_clear(u64 *sptep, u64 spte)
{
	struct kvm_mmu_page *sp =  page_header(__pa(sptep));

	if (is_shadow_present_pte(spte))
		return;

	/* Ensure the spte is completely set before we increase the count */
	smp_wmb();
	sp->clear_spte_count++;
}

686 687 688
static void __set_spte(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte;
689

690 691 692 693 694 695 696 697 698 699 700 701
	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	ssptep->spte_high = sspte.spte_high;

	/*
	 * If we map the spte from nonpresent to present, We should store
	 * the high bits firstly, then set present bit, so cpu can not
	 * fetch this spte while we are setting the spte.
	 */
	smp_wmb();

702
	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
703 704
}

705 706 707 708 709 710 711
static void __update_clear_spte_fast(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

712
	WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
713 714 715 716 717 718 719 720

	/*
	 * If we map the spte from present to nonpresent, we should clear
	 * present bit firstly to avoid vcpu fetch the old high bits.
	 */
	smp_wmb();

	ssptep->spte_high = sspte.spte_high;
721
	count_spte_clear(sptep, spte);
722 723 724 725 726 727 728 729 730 731 732
}

static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
{
	union split_spte *ssptep, sspte, orig;

	ssptep = (union split_spte *)sptep;
	sspte = (union split_spte)spte;

	/* xchg acts as a barrier before the setting of the high bits */
	orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
733 734
	orig.spte_high = ssptep->spte_high;
	ssptep->spte_high = sspte.spte_high;
735
	count_spte_clear(sptep, spte);
736 737 738

	return orig.spte;
}
739 740 741

/*
 * The idea using the light way get the spte on x86_32 guest is from
742
 * gup_get_pte (mm/gup.c).
743 744 745 746 747 748 749 750 751 752 753 754 755 756
 *
 * An spte tlb flush may be pending, because kvm_set_pte_rmapp
 * coalesces them and we are running out of the MMU lock.  Therefore
 * we need to protect against in-progress updates of the spte.
 *
 * Reading the spte while an update is in progress may get the old value
 * for the high part of the spte.  The race is fine for a present->non-present
 * change (because the high part of the spte is ignored for non-present spte),
 * but for a present->present change we must reread the spte.
 *
 * All such changes are done in two steps (present->non-present and
 * non-present->present), hence it is enough to count the number of
 * present->non-present updates: if it changed while reading the spte,
 * we might have hit the race.  This is done using clear_spte_count.
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
 */
static u64 __get_spte_lockless(u64 *sptep)
{
	struct kvm_mmu_page *sp =  page_header(__pa(sptep));
	union split_spte spte, *orig = (union split_spte *)sptep;
	int count;

retry:
	count = sp->clear_spte_count;
	smp_rmb();

	spte.spte_low = orig->spte_low;
	smp_rmb();

	spte.spte_high = orig->spte_high;
	smp_rmb();

	if (unlikely(spte.spte_low != orig->spte_low ||
	      count != sp->clear_spte_count))
		goto retry;

	return spte.spte;
}
780 781
#endif

782
static bool spte_can_locklessly_be_made_writable(u64 spte)
783
{
784 785
	return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
		(SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
786 787
}

788 789
static bool spte_has_volatile_bits(u64 spte)
{
790 791 792
	if (!is_shadow_present_pte(spte))
		return false;

793
	/*
794
	 * Always atomically update spte if it can be updated
795 796 797 798
	 * out of mmu-lock, it can ensure dirty bit is not lost,
	 * also, it can help us to get a stable is_writable_pte()
	 * to ensure tlb flush is not missed.
	 */
799 800
	if (spte_can_locklessly_be_made_writable(spte) ||
	    is_access_track_spte(spte))
801 802
		return true;

803
	if (spte_ad_enabled(spte)) {
804 805 806 807
		if ((spte & shadow_accessed_mask) == 0 ||
	    	    (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
			return true;
	}
808

809
	return false;
810 811
}

812
static bool is_accessed_spte(u64 spte)
813
{
814 815 816 817
	u64 accessed_mask = spte_shadow_accessed_mask(spte);

	return accessed_mask ? spte & accessed_mask
			     : !is_access_track_spte(spte);
818 819
}

820
static bool is_dirty_spte(u64 spte)
821
{
822 823 824
	u64 dirty_mask = spte_shadow_dirty_mask(spte);

	return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
825 826
}

827 828 829 830 831 832 833 834 835 836 837 838
/* Rules for using mmu_spte_set:
 * Set the sptep from nonpresent to present.
 * Note: the sptep being assigned *must* be either not present
 * or in a state where the hardware will not attempt to update
 * the spte.
 */
static void mmu_spte_set(u64 *sptep, u64 new_spte)
{
	WARN_ON(is_shadow_present_pte(*sptep));
	__set_spte(sptep, new_spte);
}

839 840 841
/*
 * Update the SPTE (excluding the PFN), but do not track changes in its
 * accessed/dirty status.
842
 */
843
static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
844
{
845
	u64 old_spte = *sptep;
846

847
	WARN_ON(!is_shadow_present_pte(new_spte));
848

849 850
	if (!is_shadow_present_pte(old_spte)) {
		mmu_spte_set(sptep, new_spte);
851
		return old_spte;
852
	}
853

854
	if (!spte_has_volatile_bits(old_spte))
855
		__update_clear_spte_fast(sptep, new_spte);
856
	else
857
		old_spte = __update_clear_spte_slow(sptep, new_spte);
858

859 860
	WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));

861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882
	return old_spte;
}

/* Rules for using mmu_spte_update:
 * Update the state bits, it means the mapped pfn is not changed.
 *
 * Whenever we overwrite a writable spte with a read-only one we
 * should flush remote TLBs. Otherwise rmap_write_protect
 * will find a read-only spte, even though the writable spte
 * might be cached on a CPU's TLB, the return value indicates this
 * case.
 *
 * Returns true if the TLB needs to be flushed
 */
static bool mmu_spte_update(u64 *sptep, u64 new_spte)
{
	bool flush = false;
	u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);

	if (!is_shadow_present_pte(old_spte))
		return false;

883 884
	/*
	 * For the spte updated out of mmu-lock is safe, since
885
	 * we always atomically update it, see the comments in
886 887
	 * spte_has_volatile_bits().
	 */
888
	if (spte_can_locklessly_be_made_writable(old_spte) &&
889
	      !is_writable_pte(new_spte))
890
		flush = true;
891

892
	/*
893
	 * Flush TLB when accessed/dirty states are changed in the page tables,
894 895 896
	 * to guarantee consistency between TLB and page tables.
	 */

897 898
	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
		flush = true;
899
		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
900 901 902 903
	}

	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
		flush = true;
904
		kvm_set_pfn_dirty(spte_to_pfn(old_spte));
905
	}
906

907
	return flush;
908 909
}

910 911 912 913
/*
 * Rules for using mmu_spte_clear_track_bits:
 * It sets the sptep from present to nonpresent, and track the
 * state bits, it is used to clear the last level sptep.
914
 * Returns non-zero if the PTE was previously valid.
915 916 917
 */
static int mmu_spte_clear_track_bits(u64 *sptep)
{
D
Dan Williams 已提交
918
	kvm_pfn_t pfn;
919 920 921
	u64 old_spte = *sptep;

	if (!spte_has_volatile_bits(old_spte))
922
		__update_clear_spte_fast(sptep, 0ull);
923
	else
924
		old_spte = __update_clear_spte_slow(sptep, 0ull);
925

926
	if (!is_shadow_present_pte(old_spte))
927 928 929
		return 0;

	pfn = spte_to_pfn(old_spte);
930 931 932 933 934 935

	/*
	 * KVM does not hold the refcount of the page used by
	 * kvm mmu, before reclaiming the page, we should
	 * unmap it from mmu first.
	 */
936
	WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
937

938
	if (is_accessed_spte(old_spte))
939
		kvm_set_pfn_accessed(pfn);
940 941

	if (is_dirty_spte(old_spte))
942
		kvm_set_pfn_dirty(pfn);
943

944 945 946 947 948 949 950 951 952 953
	return 1;
}

/*
 * Rules for using mmu_spte_clear_no_track:
 * Directly clear spte without caring the state bits of sptep,
 * it is used to set the upper level spte.
 */
static void mmu_spte_clear_no_track(u64 *sptep)
{
954
	__update_clear_spte_fast(sptep, 0ull);
955 956
}

957 958 959 960 961
static u64 mmu_spte_get_lockless(u64 *sptep)
{
	return __get_spte_lockless(sptep);
}

962 963
static u64 mark_spte_for_access_track(u64 spte)
{
964
	if (spte_ad_enabled(spte))
965 966
		return spte & ~shadow_accessed_mask;

967
	if (is_access_track_spte(spte))
968 969 970
		return spte;

	/*
971 972 973
	 * Making an Access Tracking PTE will result in removal of write access
	 * from the PTE. So, verify that we will be able to restore the write
	 * access in the fast page fault path later on.
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989
	 */
	WARN_ONCE((spte & PT_WRITABLE_MASK) &&
		  !spte_can_locklessly_be_made_writable(spte),
		  "kvm: Writable SPTE is not locklessly dirty-trackable\n");

	WARN_ONCE(spte & (shadow_acc_track_saved_bits_mask <<
			  shadow_acc_track_saved_bits_shift),
		  "kvm: Access Tracking saved bit locations are not zero\n");

	spte |= (spte & shadow_acc_track_saved_bits_mask) <<
		shadow_acc_track_saved_bits_shift;
	spte &= ~shadow_acc_track_mask;

	return spte;
}

990 991 992 993 994 995 996
/* Restore an acc-track PTE back to a regular PTE */
static u64 restore_acc_track_spte(u64 spte)
{
	u64 new_spte = spte;
	u64 saved_bits = (spte >> shadow_acc_track_saved_bits_shift)
			 & shadow_acc_track_saved_bits_mask;

997
	WARN_ON_ONCE(spte_ad_enabled(spte));
998 999 1000 1001 1002 1003 1004 1005 1006 1007
	WARN_ON_ONCE(!is_access_track_spte(spte));

	new_spte &= ~shadow_acc_track_mask;
	new_spte &= ~(shadow_acc_track_saved_bits_mask <<
		      shadow_acc_track_saved_bits_shift);
	new_spte |= saved_bits;

	return new_spte;
}

1008 1009 1010 1011 1012 1013 1014 1015
/* Returns the Accessed status of the PTE and resets it at the same time. */
static bool mmu_spte_age(u64 *sptep)
{
	u64 spte = mmu_spte_get_lockless(sptep);

	if (!is_accessed_spte(spte))
		return false;

1016
	if (spte_ad_enabled(spte)) {
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
		clear_bit((ffs(shadow_accessed_mask) - 1),
			  (unsigned long *)sptep);
	} else {
		/*
		 * Capture the dirty status of the page, so that it doesn't get
		 * lost when the SPTE is marked for access tracking.
		 */
		if (is_writable_pte(spte))
			kvm_set_pfn_dirty(spte_to_pfn(spte));

		spte = mark_spte_for_access_track(spte);
		mmu_spte_update_no_track(sptep, spte);
	}

	return true;
}

1034 1035
static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{
1036 1037 1038 1039 1040
	/*
	 * Prevent page table teardown by making any free-er wait during
	 * kvm_flush_remote_tlbs() IPI to all active vcpus.
	 */
	local_irq_disable();
1041

1042 1043 1044 1045
	/*
	 * Make sure a following spte read is not reordered ahead of the write
	 * to vcpu->mode.
	 */
1046
	smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
1047 1048 1049 1050
}

static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
1051 1052
	/*
	 * Make sure the write to vcpu->mode is not reordered in front of
1053
	 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
1054 1055
	 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
	 */
1056
	smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
1057
	local_irq_enable();
1058 1059
}

1060
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
1061
				  struct kmem_cache *base_cache, int min)
1062 1063 1064 1065
{
	void *obj;

	if (cache->nobjs >= min)
1066
		return 0;
1067
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
1068
		obj = kmem_cache_zalloc(base_cache, GFP_KERNEL_ACCOUNT);
1069
		if (!obj)
1070
			return cache->nobjs >= min ? 0 : -ENOMEM;
1071 1072
		cache->objects[cache->nobjs++] = obj;
	}
1073
	return 0;
1074 1075
}

1076 1077 1078 1079 1080
static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
{
	return cache->nobjs;
}

1081 1082
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
				  struct kmem_cache *cache)
1083 1084
{
	while (mc->nobjs)
1085
		kmem_cache_free(cache, mc->objects[--mc->nobjs]);
1086 1087
}

A
Avi Kivity 已提交
1088
static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
1089
				       int min)
A
Avi Kivity 已提交
1090
{
1091
	void *page;
A
Avi Kivity 已提交
1092 1093 1094 1095

	if (cache->nobjs >= min)
		return 0;
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
1096
		page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
A
Avi Kivity 已提交
1097
		if (!page)
1098
			return cache->nobjs >= min ? 0 : -ENOMEM;
1099
		cache->objects[cache->nobjs++] = page;
A
Avi Kivity 已提交
1100 1101 1102 1103 1104 1105 1106
	}
	return 0;
}

static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
1107
		free_page((unsigned long)mc->objects[--mc->nobjs]);
A
Avi Kivity 已提交
1108 1109
}

1110
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
1111
{
1112 1113
	int r;

1114
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
1115
				   pte_list_desc_cache, 8 + PTE_PREFETCH_NUM);
1116 1117
	if (r)
		goto out;
1118
	r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
1119 1120
	if (r)
		goto out;
1121
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
1122
				   mmu_page_header_cache, 4);
1123 1124
out:
	return r;
1125 1126 1127 1128
}

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
1129 1130
	mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
				pte_list_desc_cache);
1131
	mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
1132 1133
	mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
				mmu_page_header_cache);
1134 1135
}

1136
static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
1137 1138 1139 1140 1141 1142 1143 1144
{
	void *p;

	BUG_ON(!mc->nobjs);
	p = mc->objects[--mc->nobjs];
	return p;
}

1145
static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
1146
{
1147
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
1148 1149
}

1150
static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
1151
{
1152
	kmem_cache_free(pte_list_desc_cache, pte_list_desc);
1153 1154
}

1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
{
	if (!sp->role.direct)
		return sp->gfns[index];

	return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
}

static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
{
1165
	if (!sp->role.direct) {
1166
		sp->gfns[index] = gfn;
1167 1168 1169 1170 1171 1172 1173 1174
		return;
	}

	if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
		pr_err_ratelimited("gfn mismatch under direct page %llx "
				   "(expected %llx, got %llx)\n",
				   sp->gfn,
				   kvm_mmu_page_get_gfn(sp, index), gfn);
1175 1176
}

M
Marcelo Tosatti 已提交
1177
/*
1178 1179
 * Return the pointer to the large page information for a given gfn,
 * handling slots that are not large page aligned.
M
Marcelo Tosatti 已提交
1180
 */
1181 1182 1183
static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
					      struct kvm_memory_slot *slot,
					      int level)
M
Marcelo Tosatti 已提交
1184 1185 1186
{
	unsigned long idx;

1187
	idx = gfn_to_index(gfn, slot->base_gfn, level);
1188
	return &slot->arch.lpage_info[level - 2][idx];
M
Marcelo Tosatti 已提交
1189 1190
}

1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
					    gfn_t gfn, int count)
{
	struct kvm_lpage_info *linfo;
	int i;

	for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
		linfo = lpage_info_slot(gfn, slot, i);
		linfo->disallow_lpage += count;
		WARN_ON(linfo->disallow_lpage < 0);
	}
}

void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
{
	update_gfn_disallow_lpage_count(slot, gfn, 1);
}

void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
{
	update_gfn_disallow_lpage_count(slot, gfn, -1);
}

1214
static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
M
Marcelo Tosatti 已提交
1215
{
1216
	struct kvm_memslots *slots;
1217
	struct kvm_memory_slot *slot;
1218
	gfn_t gfn;
M
Marcelo Tosatti 已提交
1219

1220
	kvm->arch.indirect_shadow_pages++;
1221
	gfn = sp->gfn;
1222 1223
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
1224 1225 1226 1227 1228 1229

	/* the non-leaf shadow pages are keeping readonly. */
	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
		return kvm_slot_page_track_add_page(kvm, slot, gfn,
						    KVM_PAGE_TRACK_WRITE);

1230
	kvm_mmu_gfn_disallow_lpage(slot, gfn);
M
Marcelo Tosatti 已提交
1231 1232
}

P
Paolo Bonzini 已提交
1233 1234 1235 1236 1237 1238
static void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	if (sp->lpage_disallowed)
		return;

	++kvm->stat.nx_lpage_splits;
1239 1240
	list_add_tail(&sp->lpage_disallowed_link,
		      &kvm->arch.lpage_disallowed_mmu_pages);
P
Paolo Bonzini 已提交
1241 1242 1243
	sp->lpage_disallowed = true;
}

1244
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
M
Marcelo Tosatti 已提交
1245
{
1246
	struct kvm_memslots *slots;
1247
	struct kvm_memory_slot *slot;
1248
	gfn_t gfn;
M
Marcelo Tosatti 已提交
1249

1250
	kvm->arch.indirect_shadow_pages--;
1251
	gfn = sp->gfn;
1252 1253
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
1254 1255 1256 1257
	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
						       KVM_PAGE_TRACK_WRITE);

1258
	kvm_mmu_gfn_allow_lpage(slot, gfn);
M
Marcelo Tosatti 已提交
1259 1260
}

P
Paolo Bonzini 已提交
1261 1262 1263 1264
static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	--kvm->stat.nx_lpage_splits;
	sp->lpage_disallowed = false;
1265
	list_del(&sp->lpage_disallowed_link);
P
Paolo Bonzini 已提交
1266 1267
}

1268 1269 1270
static struct kvm_memory_slot *
gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
			    bool no_dirty_log)
M
Marcelo Tosatti 已提交
1271 1272
{
	struct kvm_memory_slot *slot;
1273

1274
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1275 1276 1277 1278
	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
		return NULL;
	if (no_dirty_log && slot->dirty_bitmap)
		return NULL;
1279 1280 1281 1282

	return slot;
}

1283
/*
1284
 * About rmap_head encoding:
1285
 *
1286 1287
 * If the bit zero of rmap_head->val is clear, then it points to the only spte
 * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
1288
 * pte_list_desc containing more mappings.
1289 1290 1291 1292
 */

/*
 * Returns the number of pointers in the rmap chain, not counting the new one.
1293
 */
1294
static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
1295
			struct kvm_rmap_head *rmap_head)
1296
{
1297
	struct pte_list_desc *desc;
1298
	int i, count = 0;
1299

1300
	if (!rmap_head->val) {
1301
		rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
1302 1303
		rmap_head->val = (unsigned long)spte;
	} else if (!(rmap_head->val & 1)) {
1304 1305
		rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
		desc = mmu_alloc_pte_list_desc(vcpu);
1306
		desc->sptes[0] = (u64 *)rmap_head->val;
A
Avi Kivity 已提交
1307
		desc->sptes[1] = spte;
1308
		rmap_head->val = (unsigned long)desc | 1;
1309
		++count;
1310
	} else {
1311
		rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
1312
		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1313
		while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
1314
			desc = desc->more;
1315
			count += PTE_LIST_EXT;
1316
		}
1317 1318
		if (desc->sptes[PTE_LIST_EXT-1]) {
			desc->more = mmu_alloc_pte_list_desc(vcpu);
1319 1320
			desc = desc->more;
		}
A
Avi Kivity 已提交
1321
		for (i = 0; desc->sptes[i]; ++i)
1322
			++count;
A
Avi Kivity 已提交
1323
		desc->sptes[i] = spte;
1324
	}
1325
	return count;
1326 1327
}

1328
static void
1329 1330 1331
pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
			   struct pte_list_desc *desc, int i,
			   struct pte_list_desc *prev_desc)
1332 1333 1334
{
	int j;

1335
	for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
1336
		;
A
Avi Kivity 已提交
1337 1338
	desc->sptes[i] = desc->sptes[j];
	desc->sptes[j] = NULL;
1339 1340 1341
	if (j != 0)
		return;
	if (!prev_desc && !desc->more)
1342
		rmap_head->val = 0;
1343 1344 1345 1346
	else
		if (prev_desc)
			prev_desc->more = desc->more;
		else
1347
			rmap_head->val = (unsigned long)desc->more | 1;
1348
	mmu_free_pte_list_desc(desc);
1349 1350
}

1351
static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
1352
{
1353 1354
	struct pte_list_desc *desc;
	struct pte_list_desc *prev_desc;
1355 1356
	int i;

1357
	if (!rmap_head->val) {
1358
		pr_err("%s: %p 0->BUG\n", __func__, spte);
1359
		BUG();
1360
	} else if (!(rmap_head->val & 1)) {
1361
		rmap_printk("%s:  %p 1->0\n", __func__, spte);
1362
		if ((u64 *)rmap_head->val != spte) {
1363
			pr_err("%s:  %p 1->BUG\n", __func__, spte);
1364 1365
			BUG();
		}
1366
		rmap_head->val = 0;
1367
	} else {
1368
		rmap_printk("%s:  %p many->many\n", __func__, spte);
1369
		desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1370 1371
		prev_desc = NULL;
		while (desc) {
1372
			for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
A
Avi Kivity 已提交
1373
				if (desc->sptes[i] == spte) {
1374 1375
					pte_list_desc_remove_entry(rmap_head,
							desc, i, prev_desc);
1376 1377
					return;
				}
1378
			}
1379 1380 1381
			prev_desc = desc;
			desc = desc->more;
		}
1382
		pr_err("%s: %p many->many\n", __func__, spte);
1383 1384 1385 1386
		BUG();
	}
}

1387 1388 1389 1390 1391 1392
static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
{
	mmu_spte_clear_track_bits(sptep);
	__pte_list_remove(sptep, rmap_head);
}

1393 1394
static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
					   struct kvm_memory_slot *slot)
1395
{
1396
	unsigned long idx;
1397

1398
	idx = gfn_to_index(gfn, slot->base_gfn, level);
1399
	return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx];
1400 1401
}

1402 1403
static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
					 struct kvm_mmu_page *sp)
1404
{
1405
	struct kvm_memslots *slots;
1406 1407
	struct kvm_memory_slot *slot;

1408 1409
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
1410
	return __gfn_to_rmap(gfn, sp->role.level, slot);
1411 1412
}

1413 1414 1415 1416 1417 1418 1419 1420
static bool rmap_can_add(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu_memory_cache *cache;

	cache = &vcpu->arch.mmu_pte_list_desc_cache;
	return mmu_memory_cache_free_objects(cache);
}

1421 1422 1423
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
	struct kvm_mmu_page *sp;
1424
	struct kvm_rmap_head *rmap_head;
1425 1426 1427

	sp = page_header(__pa(spte));
	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1428 1429
	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
	return pte_list_add(vcpu, spte, rmap_head);
1430 1431 1432 1433 1434 1435
}

static void rmap_remove(struct kvm *kvm, u64 *spte)
{
	struct kvm_mmu_page *sp;
	gfn_t gfn;
1436
	struct kvm_rmap_head *rmap_head;
1437 1438 1439

	sp = page_header(__pa(spte));
	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1440
	rmap_head = gfn_to_rmap(kvm, gfn, sp);
1441
	__pte_list_remove(spte, rmap_head);
1442 1443
}

1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
/*
 * Used by the following functions to iterate through the sptes linked by a
 * rmap.  All fields are private and not assumed to be used outside.
 */
struct rmap_iterator {
	/* private fields */
	struct pte_list_desc *desc;	/* holds the sptep if not NULL */
	int pos;			/* index of the sptep */
};

/*
 * Iteration must be started by this function.  This should also be used after
 * removing/dropping sptes from the rmap link because in such cases the
M
Miaohe Lin 已提交
1457
 * information in the iterator may not be valid.
1458 1459 1460
 *
 * Returns sptep if found, NULL otherwise.
 */
1461 1462
static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
			   struct rmap_iterator *iter)
1463
{
1464 1465
	u64 *sptep;

1466
	if (!rmap_head->val)
1467 1468
		return NULL;

1469
	if (!(rmap_head->val & 1)) {
1470
		iter->desc = NULL;
1471 1472
		sptep = (u64 *)rmap_head->val;
		goto out;
1473 1474
	}

1475
	iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1476
	iter->pos = 0;
1477 1478 1479 1480
	sptep = iter->desc->sptes[iter->pos];
out:
	BUG_ON(!is_shadow_present_pte(*sptep));
	return sptep;
1481 1482 1483 1484 1485 1486 1487 1488 1489
}

/*
 * Must be used with a valid iterator: e.g. after rmap_get_first().
 *
 * Returns sptep if found, NULL otherwise.
 */
static u64 *rmap_get_next(struct rmap_iterator *iter)
{
1490 1491
	u64 *sptep;

1492 1493 1494 1495 1496
	if (iter->desc) {
		if (iter->pos < PTE_LIST_EXT - 1) {
			++iter->pos;
			sptep = iter->desc->sptes[iter->pos];
			if (sptep)
1497
				goto out;
1498 1499 1500 1501 1502 1503 1504
		}

		iter->desc = iter->desc->more;

		if (iter->desc) {
			iter->pos = 0;
			/* desc->sptes[0] cannot be NULL */
1505 1506
			sptep = iter->desc->sptes[iter->pos];
			goto out;
1507 1508 1509 1510
		}
	}

	return NULL;
1511 1512 1513
out:
	BUG_ON(!is_shadow_present_pte(*sptep));
	return sptep;
1514 1515
}

1516 1517
#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)			\
	for (_spte_ = rmap_get_first(_rmap_head_, _iter_);		\
1518
	     _spte_; _spte_ = rmap_get_next(_iter_))
1519

1520
static void drop_spte(struct kvm *kvm, u64 *sptep)
1521
{
1522
	if (mmu_spte_clear_track_bits(sptep))
1523
		rmap_remove(kvm, sptep);
A
Avi Kivity 已提交
1524 1525
}

1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541

static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
{
	if (is_large_pte(*sptep)) {
		WARN_ON(page_header(__pa(sptep))->role.level ==
			PT_PAGE_TABLE_LEVEL);
		drop_spte(kvm, sptep);
		--kvm->stat.lpages;
		return true;
	}

	return false;
}

static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
1542 1543 1544 1545 1546 1547
	if (__drop_large_spte(vcpu->kvm, sptep)) {
		struct kvm_mmu_page *sp = page_header(__pa(sptep));

		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
			KVM_PAGES_PER_HPAGE(sp->role.level));
	}
1548 1549 1550
}

/*
1551
 * Write-protect on the specified @sptep, @pt_protect indicates whether
1552
 * spte write-protection is caused by protecting shadow page table.
1553
 *
T
Tiejun Chen 已提交
1554
 * Note: write protection is difference between dirty logging and spte
1555 1556 1557 1558 1559
 * protection:
 * - for dirty logging, the spte can be set to writable at anytime if
 *   its dirty bitmap is properly set.
 * - for spte protection, the spte can be writable only after unsync-ing
 *   shadow page.
1560
 *
1561
 * Return true if tlb need be flushed.
1562
 */
1563
static bool spte_write_protect(u64 *sptep, bool pt_protect)
1564 1565 1566
{
	u64 spte = *sptep;

1567
	if (!is_writable_pte(spte) &&
1568
	      !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1569 1570 1571 1572
		return false;

	rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);

1573 1574
	if (pt_protect)
		spte &= ~SPTE_MMU_WRITEABLE;
1575
	spte = spte & ~PT_WRITABLE_MASK;
1576

1577
	return mmu_spte_update(sptep, spte);
1578 1579
}

1580 1581
static bool __rmap_write_protect(struct kvm *kvm,
				 struct kvm_rmap_head *rmap_head,
1582
				 bool pt_protect)
1583
{
1584 1585
	u64 *sptep;
	struct rmap_iterator iter;
1586
	bool flush = false;
1587

1588
	for_each_rmap_spte(rmap_head, &iter, sptep)
1589
		flush |= spte_write_protect(sptep, pt_protect);
1590

1591
	return flush;
1592 1593
}

1594
static bool spte_clear_dirty(u64 *sptep)
1595 1596 1597 1598 1599
{
	u64 spte = *sptep;

	rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);

1600
	MMU_WARN_ON(!spte_ad_enabled(spte));
1601 1602 1603 1604
	spte &= ~shadow_dirty_mask;
	return mmu_spte_update(sptep, spte);
}

1605
static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1606 1607 1608
{
	bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
					       (unsigned long *)sptep);
1609
	if (was_writable && !spte_ad_enabled(*sptep))
1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620
		kvm_set_pfn_dirty(spte_to_pfn(*sptep));

	return was_writable;
}

/*
 * Gets the GFN ready for another round of dirty logging by clearing the
 *	- D bit on ad-enabled SPTEs, and
 *	- W bit on ad-disabled SPTEs.
 * Returns true iff any D or W bits were cleared.
 */
1621
static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1622 1623 1624 1625 1626
{
	u64 *sptep;
	struct rmap_iterator iter;
	bool flush = false;

1627
	for_each_rmap_spte(rmap_head, &iter, sptep)
1628 1629
		if (spte_ad_need_write_protect(*sptep))
			flush |= spte_wrprot_for_clear_dirty(sptep);
1630
		else
1631
			flush |= spte_clear_dirty(sptep);
1632 1633 1634 1635

	return flush;
}

1636
static bool spte_set_dirty(u64 *sptep)
1637 1638 1639 1640 1641
{
	u64 spte = *sptep;

	rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);

1642 1643 1644 1645 1646
	/*
	 * Similar to the !kvm_x86_ops->slot_disable_log_dirty case,
	 * do not bother adding back write access to pages marked
	 * SPTE_AD_WRPROT_ONLY_MASK.
	 */
1647 1648 1649 1650 1651
	spte |= shadow_dirty_mask;

	return mmu_spte_update(sptep, spte);
}

1652
static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1653 1654 1655 1656 1657
{
	u64 *sptep;
	struct rmap_iterator iter;
	bool flush = false;

1658
	for_each_rmap_spte(rmap_head, &iter, sptep)
1659 1660
		if (spte_ad_enabled(*sptep))
			flush |= spte_set_dirty(sptep);
1661 1662 1663 1664

	return flush;
}

1665
/**
1666
 * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1667 1668 1669 1670 1671 1672 1673 1674
 * @kvm: kvm instance
 * @slot: slot to protect
 * @gfn_offset: start of the BITS_PER_LONG pages we care about
 * @mask: indicates which pages we should protect
 *
 * Used when we do not need to care about huge page mappings: e.g. during dirty
 * logging we do not have any such mappings.
 */
1675
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1676 1677
				     struct kvm_memory_slot *slot,
				     gfn_t gfn_offset, unsigned long mask)
1678
{
1679
	struct kvm_rmap_head *rmap_head;
1680

1681
	while (mask) {
1682 1683 1684
		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
					  PT_PAGE_TABLE_LEVEL, slot);
		__rmap_write_protect(kvm, rmap_head, false);
M
Marcelo Tosatti 已提交
1685

1686 1687 1688
		/* clear the first set bit */
		mask &= mask - 1;
	}
1689 1690
}

1691
/**
1692 1693
 * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
 * protect the page if the D-bit isn't supported.
1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
 * @kvm: kvm instance
 * @slot: slot to clear D-bit
 * @gfn_offset: start of the BITS_PER_LONG pages we care about
 * @mask: indicates which pages we should clear D-bit
 *
 * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
 */
void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
				     struct kvm_memory_slot *slot,
				     gfn_t gfn_offset, unsigned long mask)
{
1705
	struct kvm_rmap_head *rmap_head;
1706 1707

	while (mask) {
1708 1709 1710
		rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
					  PT_PAGE_TABLE_LEVEL, slot);
		__rmap_clear_dirty(kvm, rmap_head);
1711 1712 1713 1714 1715 1716 1717

		/* clear the first set bit */
		mask &= mask - 1;
	}
}
EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked);

1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731
/**
 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
 * PT level pages.
 *
 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
 * enable dirty logging for them.
 *
 * Used when we do not need to care about huge page mappings: e.g. during dirty
 * logging we do not have any such mappings.
 */
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
				struct kvm_memory_slot *slot,
				gfn_t gfn_offset, unsigned long mask)
{
1732 1733 1734 1735 1736
	if (kvm_x86_ops->enable_log_dirty_pt_masked)
		kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
				mask);
	else
		kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1737 1738
}

1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
/**
 * kvm_arch_write_log_dirty - emulate dirty page logging
 * @vcpu: Guest mode vcpu
 *
 * Emulate arch specific page modification logging for the
 * nested hypervisor
 */
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops->write_log_dirty)
		return kvm_x86_ops->write_log_dirty(vcpu);

	return 0;
}

1754 1755
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
				    struct kvm_memory_slot *slot, u64 gfn)
1756
{
1757
	struct kvm_rmap_head *rmap_head;
1758
	int i;
1759
	bool write_protected = false;
1760

1761
	for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
1762
		rmap_head = __gfn_to_rmap(gfn, i, slot);
1763
		write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1764 1765 1766
	}

	return write_protected;
1767 1768
}

1769 1770 1771 1772 1773 1774 1775 1776
static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
{
	struct kvm_memory_slot *slot;

	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
	return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
}

1777
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1778
{
1779 1780
	u64 *sptep;
	struct rmap_iterator iter;
1781
	bool flush = false;
1782

1783
	while ((sptep = rmap_get_first(rmap_head, &iter))) {
1784
		rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
1785

1786
		pte_list_remove(rmap_head, sptep);
1787
		flush = true;
1788
	}
1789

1790 1791 1792
	return flush;
}

1793
static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1794 1795 1796
			   struct kvm_memory_slot *slot, gfn_t gfn, int level,
			   unsigned long data)
{
1797
	return kvm_zap_rmapp(kvm, rmap_head);
1798 1799
}

1800
static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1801 1802
			     struct kvm_memory_slot *slot, gfn_t gfn, int level,
			     unsigned long data)
1803
{
1804 1805
	u64 *sptep;
	struct rmap_iterator iter;
1806
	int need_flush = 0;
1807
	u64 new_spte;
1808
	pte_t *ptep = (pte_t *)data;
D
Dan Williams 已提交
1809
	kvm_pfn_t new_pfn;
1810 1811 1812

	WARN_ON(pte_huge(*ptep));
	new_pfn = pte_pfn(*ptep);
1813

1814
restart:
1815
	for_each_rmap_spte(rmap_head, &iter, sptep) {
1816
		rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
1817
			    sptep, *sptep, gfn, level);
1818

1819
		need_flush = 1;
1820

1821
		if (pte_write(*ptep)) {
1822
			pte_list_remove(rmap_head, sptep);
1823
			goto restart;
1824
		} else {
1825
			new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
1826 1827 1828 1829
			new_spte |= (u64)new_pfn << PAGE_SHIFT;

			new_spte &= ~PT_WRITABLE_MASK;
			new_spte &= ~SPTE_HOST_WRITEABLE;
1830 1831

			new_spte = mark_spte_for_access_track(new_spte);
1832 1833 1834

			mmu_spte_clear_track_bits(sptep);
			mmu_spte_set(sptep, new_spte);
1835 1836
		}
	}
1837

1838 1839 1840 1841 1842
	if (need_flush && kvm_available_flush_tlb_with_range()) {
		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
		return 0;
	}

1843
	return need_flush;
1844 1845
}

1846 1847 1848 1849 1850 1851 1852 1853 1854 1855
struct slot_rmap_walk_iterator {
	/* input fields. */
	struct kvm_memory_slot *slot;
	gfn_t start_gfn;
	gfn_t end_gfn;
	int start_level;
	int end_level;

	/* output fields. */
	gfn_t gfn;
1856
	struct kvm_rmap_head *rmap;
1857 1858 1859
	int level;

	/* private field. */
1860
	struct kvm_rmap_head *end_rmap;
1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913
};

static void
rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
{
	iterator->level = level;
	iterator->gfn = iterator->start_gfn;
	iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
	iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
					   iterator->slot);
}

static void
slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
		    struct kvm_memory_slot *slot, int start_level,
		    int end_level, gfn_t start_gfn, gfn_t end_gfn)
{
	iterator->slot = slot;
	iterator->start_level = start_level;
	iterator->end_level = end_level;
	iterator->start_gfn = start_gfn;
	iterator->end_gfn = end_gfn;

	rmap_walk_init_level(iterator, iterator->start_level);
}

static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
{
	return !!iterator->rmap;
}

static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
{
	if (++iterator->rmap <= iterator->end_rmap) {
		iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
		return;
	}

	if (++iterator->level > iterator->end_level) {
		iterator->rmap = NULL;
		return;
	}

	rmap_walk_init_level(iterator, iterator->level);
}

#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
	   _start_gfn, _end_gfn, _iter_)				\
	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
				 _end_level_, _start_gfn, _end_gfn);	\
	     slot_rmap_walk_okay(_iter_);				\
	     slot_rmap_walk_next(_iter_))

1914 1915 1916 1917 1918
static int kvm_handle_hva_range(struct kvm *kvm,
				unsigned long start,
				unsigned long end,
				unsigned long data,
				int (*handler)(struct kvm *kvm,
1919
					       struct kvm_rmap_head *rmap_head,
1920
					       struct kvm_memory_slot *slot,
1921 1922
					       gfn_t gfn,
					       int level,
1923
					       unsigned long data))
1924
{
1925
	struct kvm_memslots *slots;
1926
	struct kvm_memory_slot *memslot;
1927 1928
	struct slot_rmap_walk_iterator iterator;
	int ret = 0;
1929
	int i;
1930

1931 1932 1933 1934 1935
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);
		kvm_for_each_memslot(memslot, slots) {
			unsigned long hva_start, hva_end;
			gfn_t gfn_start, gfn_end;
1936

1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955
			hva_start = max(start, memslot->userspace_addr);
			hva_end = min(end, memslot->userspace_addr +
				      (memslot->npages << PAGE_SHIFT));
			if (hva_start >= hva_end)
				continue;
			/*
			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
			 */
			gfn_start = hva_to_gfn_memslot(hva_start, memslot);
			gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);

			for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
						 PT_MAX_HUGEPAGE_LEVEL,
						 gfn_start, gfn_end - 1,
						 &iterator)
				ret |= handler(kvm, iterator.rmap, memslot,
					       iterator.gfn, iterator.level, data);
		}
1956 1957
	}

1958
	return ret;
1959 1960
}

1961 1962
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
			  unsigned long data,
1963 1964
			  int (*handler)(struct kvm *kvm,
					 struct kvm_rmap_head *rmap_head,
1965
					 struct kvm_memory_slot *slot,
1966
					 gfn_t gfn, int level,
1967 1968 1969
					 unsigned long data))
{
	return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
1970 1971
}

1972 1973 1974 1975 1976
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{
	return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
}

1977
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1978
{
1979
	return kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
1980 1981
}

1982
static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1983 1984
			 struct kvm_memory_slot *slot, gfn_t gfn, int level,
			 unsigned long data)
1985
{
1986
	u64 *sptep;
1987
	struct rmap_iterator uninitialized_var(iter);
1988 1989
	int young = 0;

1990 1991
	for_each_rmap_spte(rmap_head, &iter, sptep)
		young |= mmu_spte_age(sptep);
1992

1993
	trace_kvm_age_page(gfn, level, slot, young);
1994 1995 1996
	return young;
}

1997
static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1998 1999
			      struct kvm_memory_slot *slot, gfn_t gfn,
			      int level, unsigned long data)
A
Andrea Arcangeli 已提交
2000
{
2001 2002
	u64 *sptep;
	struct rmap_iterator iter;
A
Andrea Arcangeli 已提交
2003

2004 2005 2006 2007
	for_each_rmap_spte(rmap_head, &iter, sptep)
		if (is_accessed_spte(*sptep))
			return 1;
	return 0;
A
Andrea Arcangeli 已提交
2008 2009
}

2010 2011
#define RMAP_RECYCLE_THRESHOLD 1000

2012
static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
2013
{
2014
	struct kvm_rmap_head *rmap_head;
2015 2016 2017
	struct kvm_mmu_page *sp;

	sp = page_header(__pa(spte));
2018

2019
	rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
2020

2021
	kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
2022 2023
	kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
			KVM_PAGES_PER_HPAGE(sp->role.level));
2024 2025
}

A
Andres Lagar-Cavilla 已提交
2026
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
2027
{
A
Andres Lagar-Cavilla 已提交
2028
	return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
2029 2030
}

A
Andrea Arcangeli 已提交
2031 2032 2033 2034 2035
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
	return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
}

2036
#ifdef MMU_DEBUG
2037
static int is_empty_shadow_page(u64 *spt)
A
Avi Kivity 已提交
2038
{
2039 2040 2041
	u64 *pos;
	u64 *end;

2042
	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
2043
		if (is_shadow_present_pte(*pos)) {
2044
			printk(KERN_ERR "%s: %p %llx\n", __func__,
2045
			       pos, *pos);
A
Avi Kivity 已提交
2046
			return 0;
2047
		}
A
Avi Kivity 已提交
2048 2049
	return 1;
}
2050
#endif
A
Avi Kivity 已提交
2051

2052 2053 2054 2055 2056 2057
/*
 * This value is the sum of all of the kvm instances's
 * kvm->arch.n_used_mmu_pages values.  We need a global,
 * aggregate version in order to make the slab shrinker
 * faster
 */
2058
static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
2059 2060 2061 2062 2063
{
	kvm->arch.n_used_mmu_pages += nr;
	percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}

2064
static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
2065
{
2066
	MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
2067
	hlist_del(&sp->hash_link);
2068 2069
	list_del(&sp->link);
	free_page((unsigned long)sp->spt);
2070 2071
	if (!sp->role.direct)
		free_page((unsigned long)sp->gfns);
2072
	kmem_cache_free(mmu_page_header_cache, sp);
2073 2074
}

2075 2076
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
2077
	return hash_64(gfn, KVM_MMU_HASH_SHIFT);
2078 2079
}

2080
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
2081
				    struct kvm_mmu_page *sp, u64 *parent_pte)
2082 2083 2084 2085
{
	if (!parent_pte)
		return;

2086
	pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
2087 2088
}

2089
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
2090 2091
				       u64 *parent_pte)
{
2092
	__pte_list_remove(parent_pte, &sp->parent_ptes);
2093 2094
}

2095 2096 2097 2098
static void drop_parent_pte(struct kvm_mmu_page *sp,
			    u64 *parent_pte)
{
	mmu_page_remove_parent_pte(sp, parent_pte);
2099
	mmu_spte_clear_no_track(parent_pte);
2100 2101
}

2102
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
M
Marcelo Tosatti 已提交
2103
{
2104
	struct kvm_mmu_page *sp;
2105

2106 2107
	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
2108
	if (!direct)
2109
		sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
2110
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
2111 2112 2113 2114 2115 2116

	/*
	 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
	 * depends on valid pages being added to the head of the list.  See
	 * comments in kvm_zap_obsolete_pages().
	 */
2117
	sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
2118 2119 2120
	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
	kvm_mod_used_mmu_pages(vcpu->kvm, +1);
	return sp;
M
Marcelo Tosatti 已提交
2121 2122
}

2123
static void mark_unsync(u64 *spte);
2124
static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
2125
{
2126 2127 2128 2129 2130 2131
	u64 *sptep;
	struct rmap_iterator iter;

	for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
		mark_unsync(sptep);
	}
2132 2133
}

2134
static void mark_unsync(u64 *spte)
2135
{
2136
	struct kvm_mmu_page *sp;
2137
	unsigned int index;
2138

2139
	sp = page_header(__pa(spte));
2140 2141
	index = spte - sp->spt;
	if (__test_and_set_bit(index, sp->unsync_child_bitmap))
2142
		return;
2143
	if (sp->unsync_children++)
2144
		return;
2145
	kvm_mmu_mark_parents_unsync(sp);
2146 2147
}

2148
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
2149
			       struct kvm_mmu_page *sp)
2150
{
2151
	return 0;
2152 2153
}

2154
static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root)
M
Marcelo Tosatti 已提交
2155 2156 2157
{
}

2158 2159
static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
				 struct kvm_mmu_page *sp, u64 *spte,
2160
				 const void *pte)
2161 2162 2163 2164
{
	WARN_ON(1);
}

2165 2166 2167 2168 2169 2170 2171 2172 2173 2174
#define KVM_PAGE_ARRAY_NR 16

struct kvm_mmu_pages {
	struct mmu_page_and_offset {
		struct kvm_mmu_page *sp;
		unsigned int idx;
	} page[KVM_PAGE_ARRAY_NR];
	unsigned int nr;
};

2175 2176
static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
			 int idx)
2177
{
2178
	int i;
2179

2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190
	if (sp->unsync)
		for (i=0; i < pvec->nr; i++)
			if (pvec->page[i].sp == sp)
				return 0;

	pvec->page[pvec->nr].sp = sp;
	pvec->page[pvec->nr].idx = idx;
	pvec->nr++;
	return (pvec->nr == KVM_PAGE_ARRAY_NR);
}

2191 2192 2193 2194 2195 2196 2197
static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
{
	--sp->unsync_children;
	WARN_ON((int)sp->unsync_children < 0);
	__clear_bit(idx, sp->unsync_child_bitmap);
}

2198 2199 2200 2201
static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
	int i, ret, nr_unsync_leaf = 0;
2202

2203
	for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
2204
		struct kvm_mmu_page *child;
2205 2206
		u64 ent = sp->spt[i];

2207 2208 2209 2210
		if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
			clear_unsync_child_bit(sp, i);
			continue;
		}
2211 2212 2213 2214 2215 2216 2217 2218

		child = page_header(ent & PT64_BASE_ADDR_MASK);

		if (child->unsync_children) {
			if (mmu_pages_add(pvec, child, i))
				return -ENOSPC;

			ret = __mmu_unsync_walk(child, pvec);
2219 2220 2221 2222
			if (!ret) {
				clear_unsync_child_bit(sp, i);
				continue;
			} else if (ret > 0) {
2223
				nr_unsync_leaf += ret;
2224
			} else
2225 2226 2227 2228 2229 2230
				return ret;
		} else if (child->unsync) {
			nr_unsync_leaf++;
			if (mmu_pages_add(pvec, child, i))
				return -ENOSPC;
		} else
2231
			clear_unsync_child_bit(sp, i);
2232 2233
	}

2234 2235 2236
	return nr_unsync_leaf;
}

2237 2238
#define INVALID_INDEX (-1)

2239 2240 2241
static int mmu_unsync_walk(struct kvm_mmu_page *sp,
			   struct kvm_mmu_pages *pvec)
{
P
Paolo Bonzini 已提交
2242
	pvec->nr = 0;
2243 2244 2245
	if (!sp->unsync_children)
		return 0;

2246
	mmu_pages_add(pvec, sp, INVALID_INDEX);
2247
	return __mmu_unsync_walk(sp, pvec);
2248 2249 2250 2251 2252
}

static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	WARN_ON(!sp->unsync);
2253
	trace_kvm_mmu_sync_page(sp);
2254 2255 2256 2257
	sp->unsync = 0;
	--kvm->stat.mmu_unsync;
}

2258 2259
static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				     struct list_head *invalid_list);
2260 2261
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list);
2262

2263

2264
#define for_each_valid_sp(_kvm, _sp, _gfn)				\
2265 2266
	hlist_for_each_entry(_sp,					\
	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
2267
		if (is_obsolete_sp((_kvm), (_sp))) {			\
2268
		} else
2269 2270

#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
2271 2272
	for_each_valid_sp(_kvm, _sp, _gfn)				\
		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
2273

2274 2275 2276 2277 2278
static inline bool is_ept_sp(struct kvm_mmu_page *sp)
{
	return sp->role.cr0_wp && sp->role.smap_andnot_wp;
}

2279
/* @sp->gfn should be write-protected at the call site */
2280 2281
static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
			    struct list_head *invalid_list)
2282
{
2283 2284
	if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
	    vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
2285
		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
2286
		return false;
2287 2288
	}

2289
	return true;
2290 2291
}

2292 2293 2294 2295
static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm,
					struct list_head *invalid_list,
					bool remote_flush)
{
2296
	if (!remote_flush && list_empty(invalid_list))
2297 2298 2299 2300 2301 2302 2303 2304 2305
		return false;

	if (!list_empty(invalid_list))
		kvm_mmu_commit_zap_page(kvm, invalid_list);
	else
		kvm_flush_remote_tlbs(kvm);
	return true;
}

2306 2307 2308
static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
				 struct list_head *invalid_list,
				 bool remote_flush, bool local_flush)
2309
{
2310
	if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush))
2311
		return;
2312

2313
	if (local_flush)
2314
		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2315 2316
}

2317 2318 2319 2320 2321 2322 2323
#ifdef CONFIG_KVM_MMU_AUDIT
#include "mmu_audit.c"
#else
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
static void mmu_audit_disable(void) { }
#endif

2324 2325
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
2326 2327
	return sp->role.invalid ||
	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
2328 2329
}

2330
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2331
			 struct list_head *invalid_list)
2332
{
2333 2334
	kvm_unlink_unsync_page(vcpu->kvm, sp);
	return __kvm_sync_page(vcpu, sp, invalid_list);
2335 2336
}

2337
/* @gfn should be write-protected at the call site */
2338 2339
static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
			   struct list_head *invalid_list)
2340 2341
{
	struct kvm_mmu_page *s;
2342
	bool ret = false;
2343

2344
	for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2345
		if (!s->unsync)
2346 2347 2348
			continue;

		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
2349
		ret |= kvm_sync_page(vcpu, s, invalid_list);
2350 2351
	}

2352
	return ret;
2353 2354
}

2355
struct mmu_page_path {
2356 2357
	struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
	unsigned int idx[PT64_ROOT_MAX_LEVEL];
2358 2359
};

2360
#define for_each_sp(pvec, sp, parents, i)			\
P
Paolo Bonzini 已提交
2361
		for (i = mmu_pages_first(&pvec, &parents);	\
2362 2363 2364
			i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});	\
			i = mmu_pages_next(&pvec, &parents, i))

2365 2366 2367
static int mmu_pages_next(struct kvm_mmu_pages *pvec,
			  struct mmu_page_path *parents,
			  int i)
2368 2369 2370 2371 2372
{
	int n;

	for (n = i+1; n < pvec->nr; n++) {
		struct kvm_mmu_page *sp = pvec->page[n].sp;
P
Paolo Bonzini 已提交
2373 2374
		unsigned idx = pvec->page[n].idx;
		int level = sp->role.level;
2375

P
Paolo Bonzini 已提交
2376 2377 2378
		parents->idx[level-1] = idx;
		if (level == PT_PAGE_TABLE_LEVEL)
			break;
2379

P
Paolo Bonzini 已提交
2380
		parents->parent[level-2] = sp;
2381 2382 2383 2384 2385
	}

	return n;
}

P
Paolo Bonzini 已提交
2386 2387 2388 2389 2390 2391 2392 2393 2394
static int mmu_pages_first(struct kvm_mmu_pages *pvec,
			   struct mmu_page_path *parents)
{
	struct kvm_mmu_page *sp;
	int level;

	if (pvec->nr == 0)
		return 0;

2395 2396
	WARN_ON(pvec->page[0].idx != INVALID_INDEX);

P
Paolo Bonzini 已提交
2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409
	sp = pvec->page[0].sp;
	level = sp->role.level;
	WARN_ON(level == PT_PAGE_TABLE_LEVEL);

	parents->parent[level-2] = sp;

	/* Also set up a sentinel.  Further entries in pvec are all
	 * children of sp, so this element is never overwritten.
	 */
	parents->parent[level-1] = NULL;
	return mmu_pages_next(pvec, parents, 0);
}

2410
static void mmu_pages_clear_parents(struct mmu_page_path *parents)
2411
{
2412 2413 2414 2415 2416 2417 2418 2419 2420
	struct kvm_mmu_page *sp;
	unsigned int level = 0;

	do {
		unsigned int idx = parents->idx[level];
		sp = parents->parent[level];
		if (!sp)
			return;

2421
		WARN_ON(idx == INVALID_INDEX);
2422
		clear_unsync_child_bit(sp, idx);
2423
		level++;
P
Paolo Bonzini 已提交
2424
	} while (!sp->unsync_children);
2425
}
2426

2427 2428 2429 2430 2431 2432 2433
static void mmu_sync_children(struct kvm_vcpu *vcpu,
			      struct kvm_mmu_page *parent)
{
	int i;
	struct kvm_mmu_page *sp;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
2434
	LIST_HEAD(invalid_list);
2435
	bool flush = false;
2436 2437

	while (mmu_unsync_walk(parent, &pages)) {
2438
		bool protected = false;
2439 2440

		for_each_sp(pages, sp, parents, i)
2441
			protected |= rmap_write_protect(vcpu, sp->gfn);
2442

2443
		if (protected) {
2444
			kvm_flush_remote_tlbs(vcpu->kvm);
2445 2446
			flush = false;
		}
2447

2448
		for_each_sp(pages, sp, parents, i) {
2449
			flush |= kvm_sync_page(vcpu, sp, &invalid_list);
2450 2451
			mmu_pages_clear_parents(&parents);
		}
2452 2453 2454 2455 2456
		if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) {
			kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
			cond_resched_lock(&vcpu->kvm->mmu_lock);
			flush = false;
		}
2457
	}
2458 2459

	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2460 2461
}

2462 2463
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
{
2464
	atomic_set(&sp->write_flooding_count,  0);
2465 2466 2467 2468 2469 2470 2471 2472 2473
}

static void clear_sp_write_flooding_count(u64 *spte)
{
	struct kvm_mmu_page *sp =  page_header(__pa(spte));

	__clear_sp_write_flooding_count(sp);
}

2474 2475 2476 2477
static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
					     gfn_t gfn,
					     gva_t gaddr,
					     unsigned level,
2478
					     int direct,
2479
					     unsigned int access)
2480 2481 2482
{
	union kvm_mmu_page_role role;
	unsigned quadrant;
2483 2484
	struct kvm_mmu_page *sp;
	bool need_sync = false;
2485
	bool flush = false;
2486
	int collisions = 0;
2487
	LIST_HEAD(invalid_list);
2488

2489
	role = vcpu->arch.mmu->mmu_role.base;
2490
	role.level = level;
2491
	role.direct = direct;
2492
	if (role.direct)
2493
		role.gpte_is_8_bytes = true;
2494
	role.access = access;
2495 2496
	if (!vcpu->arch.mmu->direct_map
	    && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
2497 2498 2499 2500
		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
		role.quadrant = quadrant;
	}
2501 2502 2503 2504 2505 2506
	for_each_valid_sp(vcpu->kvm, sp, gfn) {
		if (sp->gfn != gfn) {
			collisions++;
			continue;
		}

2507 2508
		if (!need_sync && sp->unsync)
			need_sync = true;
2509

2510 2511
		if (sp->role.word != role.word)
			continue;
2512

2513 2514 2515 2516 2517 2518 2519 2520 2521 2522
		if (sp->unsync) {
			/* The page is good, but __kvm_sync_page might still end
			 * up zapping it.  If so, break in order to rebuild it.
			 */
			if (!__kvm_sync_page(vcpu, sp, &invalid_list))
				break;

			WARN_ON(!list_empty(&invalid_list));
			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
		}
2523

2524
		if (sp->unsync_children)
2525
			kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2526

2527
		__clear_sp_write_flooding_count(sp);
2528
		trace_kvm_mmu_get_page(sp, false);
2529
		goto out;
2530
	}
2531

A
Avi Kivity 已提交
2532
	++vcpu->kvm->stat.mmu_cache_miss;
2533 2534 2535

	sp = kvm_mmu_alloc_page(vcpu, direct);

2536 2537
	sp->gfn = gfn;
	sp->role = role;
2538 2539
	hlist_add_head(&sp->hash_link,
		&vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
2540
	if (!direct) {
2541 2542 2543 2544 2545 2546 2547 2548
		/*
		 * we should do write protection before syncing pages
		 * otherwise the content of the synced shadow page may
		 * be inconsistent with guest page table.
		 */
		account_shadowed(vcpu->kvm, sp);
		if (level == PT_PAGE_TABLE_LEVEL &&
		      rmap_write_protect(vcpu, gfn))
2549
			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2550 2551

		if (level > PT_PAGE_TABLE_LEVEL && need_sync)
2552
			flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
2553
	}
2554
	clear_page(sp->spt);
A
Avi Kivity 已提交
2555
	trace_kvm_mmu_get_page(sp, true);
2556 2557

	kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2558 2559 2560
out:
	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2561
	return sp;
2562 2563
}

2564 2565 2566
static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
					struct kvm_vcpu *vcpu, hpa_t root,
					u64 addr)
2567 2568
{
	iterator->addr = addr;
2569
	iterator->shadow_addr = root;
2570
	iterator->level = vcpu->arch.mmu->shadow_root_level;
2571

2572
	if (iterator->level == PT64_ROOT_4LEVEL &&
2573 2574
	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
	    !vcpu->arch.mmu->direct_map)
2575 2576
		--iterator->level;

2577
	if (iterator->level == PT32E_ROOT_LEVEL) {
2578 2579 2580 2581
		/*
		 * prev_root is currently only used for 64-bit hosts. So only
		 * the active root_hpa is valid here.
		 */
2582
		BUG_ON(root != vcpu->arch.mmu->root_hpa);
2583

2584
		iterator->shadow_addr
2585
			= vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2586 2587 2588 2589 2590 2591 2592
		iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
		--iterator->level;
		if (!iterator->shadow_addr)
			iterator->level = 0;
	}
}

2593 2594 2595
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
			     struct kvm_vcpu *vcpu, u64 addr)
{
2596
	shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2597 2598 2599
				    addr);
}

2600 2601 2602 2603
static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{
	if (iterator->level < PT_PAGE_TABLE_LEVEL)
		return false;
2604

2605 2606 2607 2608 2609
	iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
	iterator->sptep	= ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
	return true;
}

2610 2611
static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
			       u64 spte)
2612
{
2613
	if (is_last_spte(spte, iterator->level)) {
2614 2615 2616 2617
		iterator->level = 0;
		return;
	}

2618
	iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2619 2620 2621
	--iterator->level;
}

2622 2623
static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
{
2624
	__shadow_walk_next(iterator, *iterator->sptep);
2625 2626
}

2627 2628
static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
			     struct kvm_mmu_page *sp)
2629 2630 2631
{
	u64 spte;

2632
	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2633

2634
	spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK |
2635
	       shadow_user_mask | shadow_x_mask | shadow_me_mask;
2636 2637

	if (sp_ad_disabled(sp))
2638
		spte |= SPTE_AD_DISABLED_MASK;
2639 2640
	else
		spte |= shadow_accessed_mask;
X
Xiao Guangrong 已提交
2641

2642
	mmu_spte_set(sptep, spte);
2643 2644 2645 2646 2647

	mmu_page_add_parent_pte(vcpu, sp, sptep);

	if (sp->unsync_children || sp->unsync)
		mark_unsync(sptep);
2648 2649
}

2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666
static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
				   unsigned direct_access)
{
	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
		struct kvm_mmu_page *child;

		/*
		 * For the direct sp, if the guest pte's dirty bit
		 * changed form clean to dirty, it will corrupt the
		 * sp's access: allow writable in the read-only sp,
		 * so we should update the spte at this point to get
		 * a new sp with the correct access.
		 */
		child = page_header(*sptep & PT64_BASE_ADDR_MASK);
		if (child->role.access == direct_access)
			return;

2667
		drop_parent_pte(child, sptep);
2668
		kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2669 2670 2671
	}
}

X
Xiao Guangrong 已提交
2672
static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2673 2674 2675 2676 2677 2678 2679
			     u64 *spte)
{
	u64 pte;
	struct kvm_mmu_page *child;

	pte = *spte;
	if (is_shadow_present_pte(pte)) {
X
Xiao Guangrong 已提交
2680
		if (is_last_spte(pte, sp->role.level)) {
2681
			drop_spte(kvm, spte);
X
Xiao Guangrong 已提交
2682 2683 2684
			if (is_large_pte(pte))
				--kvm->stat.lpages;
		} else {
2685
			child = page_header(pte & PT64_BASE_ADDR_MASK);
2686
			drop_parent_pte(child, spte);
2687
		}
X
Xiao Guangrong 已提交
2688 2689 2690 2691
		return true;
	}

	if (is_mmio_spte(pte))
2692
		mmu_spte_clear_no_track(spte);
2693

X
Xiao Guangrong 已提交
2694
	return false;
2695 2696
}

2697
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
2698
					 struct kvm_mmu_page *sp)
2699
{
2700 2701
	unsigned i;

2702 2703
	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
		mmu_page_zap_pte(kvm, sp, sp->spt + i);
2704 2705
}

2706
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2707
{
2708 2709
	u64 *sptep;
	struct rmap_iterator iter;
2710

2711
	while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2712
		drop_parent_pte(sp, sptep);
2713 2714
}

2715
static int mmu_zap_unsync_children(struct kvm *kvm,
2716 2717
				   struct kvm_mmu_page *parent,
				   struct list_head *invalid_list)
2718
{
2719 2720 2721
	int i, zapped = 0;
	struct mmu_page_path parents;
	struct kvm_mmu_pages pages;
2722

2723
	if (parent->role.level == PT_PAGE_TABLE_LEVEL)
2724
		return 0;
2725 2726 2727 2728 2729

	while (mmu_unsync_walk(parent, &pages)) {
		struct kvm_mmu_page *sp;

		for_each_sp(pages, sp, parents, i) {
2730
			kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2731
			mmu_pages_clear_parents(&parents);
2732
			zapped++;
2733 2734 2735 2736
		}
	}

	return zapped;
2737 2738
}

2739 2740 2741 2742
static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
				       struct kvm_mmu_page *sp,
				       struct list_head *invalid_list,
				       int *nr_zapped)
2743
{
2744
	bool list_unstable;
A
Avi Kivity 已提交
2745

2746
	trace_kvm_mmu_prepare_zap_page(sp);
2747
	++kvm->stat.mmu_shadow_zapped;
2748
	*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
2749
	kvm_mmu_page_unlink_children(kvm, sp);
2750
	kvm_mmu_unlink_parents(kvm, sp);
2751

2752 2753 2754
	/* Zapping children means active_mmu_pages has become unstable. */
	list_unstable = *nr_zapped;

2755
	if (!sp->role.invalid && !sp->role.direct)
2756
		unaccount_shadowed(kvm, sp);
2757

2758 2759
	if (sp->unsync)
		kvm_unlink_unsync_page(kvm, sp);
2760
	if (!sp->root_count) {
2761
		/* Count self */
2762
		(*nr_zapped)++;
2763
		list_move(&sp->link, invalid_list);
2764
		kvm_mod_used_mmu_pages(kvm, -1);
2765
	} else {
A
Avi Kivity 已提交
2766
		list_move(&sp->link, &kvm->arch.active_mmu_pages);
2767

2768 2769 2770 2771 2772 2773
		/*
		 * Obsolete pages cannot be used on any vCPUs, see the comment
		 * in kvm_mmu_zap_all_fast().  Note, is_obsolete_sp() also
		 * treats invalid shadow pages as being obsolete.
		 */
		if (!is_obsolete_sp(kvm, sp))
2774
			kvm_reload_remote_mmus(kvm);
2775
	}
2776

P
Paolo Bonzini 已提交
2777 2778 2779
	if (sp->lpage_disallowed)
		unaccount_huge_nx_page(kvm, sp);

2780
	sp->role.invalid = 1;
2781 2782 2783 2784 2785 2786 2787 2788 2789 2790
	return list_unstable;
}

static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				     struct list_head *invalid_list)
{
	int nr_zapped;

	__kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped);
	return nr_zapped;
2791 2792
}

2793 2794 2795
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
				    struct list_head *invalid_list)
{
2796
	struct kvm_mmu_page *sp, *nsp;
2797 2798 2799 2800

	if (list_empty(invalid_list))
		return;

2801
	/*
2802 2803 2804 2805 2806 2807 2808
	 * We need to make sure everyone sees our modifications to
	 * the page tables and see changes to vcpu->mode here. The barrier
	 * in the kvm_flush_remote_tlbs() achieves this. This pairs
	 * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
	 *
	 * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
	 * guest mode and/or lockless shadow page table walks.
2809 2810
	 */
	kvm_flush_remote_tlbs(kvm);
2811

2812
	list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2813
		WARN_ON(!sp->role.invalid || sp->root_count);
2814
		kvm_mmu_free_page(sp);
2815
	}
2816 2817
}

2818 2819 2820 2821 2822 2823 2824 2825
static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
					struct list_head *invalid_list)
{
	struct kvm_mmu_page *sp;

	if (list_empty(&kvm->arch.active_mmu_pages))
		return false;

G
Geliang Tang 已提交
2826 2827
	sp = list_last_entry(&kvm->arch.active_mmu_pages,
			     struct kvm_mmu_page, link);
2828
	return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2829 2830
}

2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850
static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
{
	LIST_HEAD(invalid_list);

	if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
		return 0;

	while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
		if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
			break;

		++vcpu->kvm->stat.mmu_recycled;
	}
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);

	if (!kvm_mmu_available_pages(vcpu->kvm))
		return -ENOSPC;
	return 0;
}

2851 2852
/*
 * Changing the number of mmu pages allocated to the vm
2853
 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2854
 */
2855
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
2856
{
2857
	LIST_HEAD(invalid_list);
2858

2859 2860
	spin_lock(&kvm->mmu_lock);

2861
	if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2862 2863 2864 2865
		/* Need to free some mmu pages to achieve the goal. */
		while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages)
			if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list))
				break;
2866

2867
		kvm_mmu_commit_zap_page(kvm, &invalid_list);
2868
		goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2869 2870
	}

2871
	kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2872 2873

	spin_unlock(&kvm->mmu_lock);
2874 2875
}

2876
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2877
{
2878
	struct kvm_mmu_page *sp;
2879
	LIST_HEAD(invalid_list);
2880 2881
	int r;

2882
	pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2883
	r = 0;
2884
	spin_lock(&kvm->mmu_lock);
2885
	for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2886
		pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2887 2888
			 sp->role.word);
		r = 1;
2889
		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2890
	}
2891
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
2892 2893
	spin_unlock(&kvm->mmu_lock);

2894
	return r;
2895
}
2896
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
2897

2898
static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2899 2900 2901 2902 2903 2904 2905 2906
{
	trace_kvm_mmu_unsync_page(sp);
	++vcpu->kvm->stat.mmu_unsync;
	sp->unsync = 1;

	kvm_mmu_mark_parents_unsync(sp);
}

2907 2908
static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
				   bool can_unsync)
2909
{
2910
	struct kvm_mmu_page *sp;
2911

2912 2913
	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
		return true;
2914

2915
	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2916
		if (!can_unsync)
2917
			return true;
2918

2919 2920
		if (sp->unsync)
			continue;
2921

2922 2923
		WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
		kvm_unsync_page(vcpu, sp);
2924
	}
2925

2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964
	/*
	 * We need to ensure that the marking of unsync pages is visible
	 * before the SPTE is updated to allow writes because
	 * kvm_mmu_sync_roots() checks the unsync flags without holding
	 * the MMU lock and so can race with this. If the SPTE was updated
	 * before the page had been marked as unsync-ed, something like the
	 * following could happen:
	 *
	 * CPU 1                    CPU 2
	 * ---------------------------------------------------------------------
	 * 1.2 Host updates SPTE
	 *     to be writable
	 *                      2.1 Guest writes a GPTE for GVA X.
	 *                          (GPTE being in the guest page table shadowed
	 *                           by the SP from CPU 1.)
	 *                          This reads SPTE during the page table walk.
	 *                          Since SPTE.W is read as 1, there is no
	 *                          fault.
	 *
	 *                      2.2 Guest issues TLB flush.
	 *                          That causes a VM Exit.
	 *
	 *                      2.3 kvm_mmu_sync_pages() reads sp->unsync.
	 *                          Since it is false, so it just returns.
	 *
	 *                      2.4 Guest accesses GVA X.
	 *                          Since the mapping in the SP was not updated,
	 *                          so the old mapping for GVA X incorrectly
	 *                          gets used.
	 * 1.1 Host marks SP
	 *     as unsync
	 *     (sp->unsync = true)
	 *
	 * The write barrier below ensures that 1.1 happens before 1.2 and thus
	 * the situation in 2.4 does not arise. The implicit barrier in 2.2
	 * pairs with this write barrier.
	 */
	smp_wmb();

2965
	return false;
2966 2967
}

D
Dan Williams 已提交
2968
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
2969 2970
{
	if (pfn_valid(pfn))
2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982
		return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
			/*
			 * Some reserved pages, such as those from NVDIMM
			 * DAX devices, are not for MMIO, and can be mapped
			 * with cached memory type for better performance.
			 * However, the above check misconceives those pages
			 * as MMIO, and results in KVM mapping them with UC
			 * memory type, which would hurt the performance.
			 * Therefore, we check the host memory type in addition
			 * and only treat UC/UC-/WC pages as MMIO.
			 */
			(!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
2983

2984 2985 2986
	return !e820__mapped_raw_any(pfn_to_hpa(pfn),
				     pfn_to_hpa(pfn + 1) - 1,
				     E820_TYPE_RAM);
2987 2988
}

2989 2990 2991 2992
/* Bits which may be returned by set_spte() */
#define SET_SPTE_WRITE_PROTECTED_PT	BIT(0)
#define SET_SPTE_NEED_REMOTE_TLB_FLUSH	BIT(1)

A
Avi Kivity 已提交
2993
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2994
		    unsigned int pte_access, int level,
D
Dan Williams 已提交
2995
		    gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2996
		    bool can_unsync, bool host_writable)
2997
{
2998
	u64 spte = 0;
M
Marcelo Tosatti 已提交
2999
	int ret = 0;
3000
	struct kvm_mmu_page *sp;
S
Sheng Yang 已提交
3001

3002
	if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
3003 3004
		return 0;

3005 3006
	sp = page_header(__pa(sptep));
	if (sp_ad_disabled(sp))
3007
		spte |= SPTE_AD_DISABLED_MASK;
3008 3009
	else if (kvm_vcpu_ad_need_write_protect(vcpu))
		spte |= SPTE_AD_WRPROT_ONLY_MASK;
3010

3011 3012 3013 3014 3015 3016
	/*
	 * For the EPT case, shadow_present_mask is 0 if hardware
	 * supports exec-only page table entries.  In that case,
	 * ACC_USER_MASK and shadow_user_mask are used to represent
	 * read access.  See FNAME(gpte_access) in paging_tmpl.h.
	 */
3017
	spte |= shadow_present_mask;
3018
	if (!speculative)
3019
		spte |= spte_shadow_accessed_mask(spte);
3020

P
Paolo Bonzini 已提交
3021 3022 3023 3024 3025
	if (level > PT_PAGE_TABLE_LEVEL && (pte_access & ACC_EXEC_MASK) &&
	    is_nx_huge_page_enabled()) {
		pte_access &= ~ACC_EXEC_MASK;
	}

S
Sheng Yang 已提交
3026 3027 3028 3029
	if (pte_access & ACC_EXEC_MASK)
		spte |= shadow_x_mask;
	else
		spte |= shadow_nx_mask;
3030

3031
	if (pte_access & ACC_USER_MASK)
S
Sheng Yang 已提交
3032
		spte |= shadow_user_mask;
3033

3034
	if (level > PT_PAGE_TABLE_LEVEL)
M
Marcelo Tosatti 已提交
3035
		spte |= PT_PAGE_SIZE_MASK;
3036
	if (tdp_enabled)
3037
		spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
3038
			kvm_is_mmio_pfn(pfn));
3039

3040
	if (host_writable)
3041
		spte |= SPTE_HOST_WRITEABLE;
3042 3043
	else
		pte_access &= ~ACC_WRITE_MASK;
3044

3045 3046 3047
	if (!kvm_is_mmio_pfn(pfn))
		spte |= shadow_me_mask;

3048
	spte |= (u64)pfn << PAGE_SHIFT;
3049

3050
	if (pte_access & ACC_WRITE_MASK) {
3051
		spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
3052

3053 3054 3055 3056 3057 3058
		/*
		 * Optimization: for pte sync, if spte was writable the hash
		 * lookup is unnecessary (and expensive). Write protection
		 * is responsibility of mmu_get_page / kvm_sync_page.
		 * Same reasoning can be applied to dirty page accounting.
		 */
3059
		if (!can_unsync && is_writable_pte(*sptep))
3060 3061
			goto set_pte;

3062
		if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
3063
			pgprintk("%s: found shadow page for %llx, marking ro\n",
3064
				 __func__, gfn);
3065
			ret |= SET_SPTE_WRITE_PROTECTED_PT;
3066
			pte_access &= ~ACC_WRITE_MASK;
3067
			spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
3068 3069 3070
		}
	}

3071
	if (pte_access & ACC_WRITE_MASK) {
3072
		kvm_vcpu_mark_page_dirty(vcpu, gfn);
3073
		spte |= spte_shadow_dirty_mask(spte);
3074
	}
3075

3076 3077 3078
	if (speculative)
		spte = mark_spte_for_access_track(spte);

3079
set_pte:
3080
	if (mmu_spte_update(sptep, spte))
3081
		ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
M
Marcelo Tosatti 已提交
3082 3083 3084
	return ret;
}

3085 3086 3087 3088
static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
			unsigned int pte_access, int write_fault, int level,
			gfn_t gfn, kvm_pfn_t pfn, bool speculative,
			bool host_writable)
M
Marcelo Tosatti 已提交
3089 3090
{
	int was_rmapped = 0;
3091
	int rmap_count;
3092
	int set_spte_ret;
3093
	int ret = RET_PF_RETRY;
3094
	bool flush = false;
M
Marcelo Tosatti 已提交
3095

3096 3097
	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
		 *sptep, write_fault, gfn);
M
Marcelo Tosatti 已提交
3098

3099
	if (is_shadow_present_pte(*sptep)) {
M
Marcelo Tosatti 已提交
3100 3101 3102 3103
		/*
		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
		 * the parent of the now unreachable PTE.
		 */
3104 3105
		if (level > PT_PAGE_TABLE_LEVEL &&
		    !is_large_pte(*sptep)) {
M
Marcelo Tosatti 已提交
3106
			struct kvm_mmu_page *child;
A
Avi Kivity 已提交
3107
			u64 pte = *sptep;
M
Marcelo Tosatti 已提交
3108 3109

			child = page_header(pte & PT64_BASE_ADDR_MASK);
3110
			drop_parent_pte(child, sptep);
3111
			flush = true;
A
Avi Kivity 已提交
3112
		} else if (pfn != spte_to_pfn(*sptep)) {
3113
			pgprintk("hfn old %llx new %llx\n",
A
Avi Kivity 已提交
3114
				 spte_to_pfn(*sptep), pfn);
3115
			drop_spte(vcpu->kvm, sptep);
3116
			flush = true;
3117 3118
		} else
			was_rmapped = 1;
M
Marcelo Tosatti 已提交
3119
	}
3120

3121 3122 3123
	set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
				speculative, true, host_writable);
	if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
M
Marcelo Tosatti 已提交
3124
		if (write_fault)
3125
			ret = RET_PF_EMULATE;
3126
		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3127
	}
3128

3129
	if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
3130 3131
		kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
				KVM_PAGES_PER_HPAGE(level));
M
Marcelo Tosatti 已提交
3132

3133
	if (unlikely(is_mmio_spte(*sptep)))
3134
		ret = RET_PF_EMULATE;
3135

A
Avi Kivity 已提交
3136
	pgprintk("%s: setting spte %llx\n", __func__, *sptep);
3137
	trace_kvm_mmu_set_spte(level, gfn, sptep);
A
Avi Kivity 已提交
3138
	if (!was_rmapped && is_large_pte(*sptep))
M
Marcelo Tosatti 已提交
3139 3140
		++vcpu->kvm->stat.lpages;

3141 3142 3143 3144 3145 3146
	if (is_shadow_present_pte(*sptep)) {
		if (!was_rmapped) {
			rmap_count = rmap_add(vcpu, sptep, gfn);
			if (rmap_count > RMAP_RECYCLE_THRESHOLD)
				rmap_recycle(vcpu, sptep, gfn);
		}
3147
	}
3148

3149
	return ret;
3150 3151
}

D
Dan Williams 已提交
3152
static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
3153 3154 3155 3156
				     bool no_dirty_log)
{
	struct kvm_memory_slot *slot;

3157
	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
3158
	if (!slot)
3159
		return KVM_PFN_ERR_FAULT;
3160

3161
	return gfn_to_pfn_memslot_atomic(slot, gfn);
3162 3163 3164 3165 3166 3167 3168
}

static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp,
				    u64 *start, u64 *end)
{
	struct page *pages[PTE_PREFETCH_NUM];
3169
	struct kvm_memory_slot *slot;
3170
	unsigned int access = sp->role.access;
3171 3172 3173 3174
	int i, ret;
	gfn_t gfn;

	gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
3175 3176
	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
	if (!slot)
3177 3178
		return -1;

3179
	ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
3180 3181 3182
	if (ret <= 0)
		return -1;

3183
	for (i = 0; i < ret; i++, gfn++, start++) {
3184 3185
		mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn,
			     page_to_pfn(pages[i]), true, true);
3186 3187
		put_page(pages[i]);
	}
3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203

	return 0;
}

static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
				  struct kvm_mmu_page *sp, u64 *sptep)
{
	u64 *spte, *start = NULL;
	int i;

	WARN_ON(!sp->role.direct);

	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
	spte = sp->spt + i;

	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
3204
		if (is_shadow_present_pte(*spte) || spte == sptep) {
3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218
			if (!start)
				continue;
			if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
				break;
			start = NULL;
		} else if (!start)
			start = spte;
	}
}

static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
{
	struct kvm_mmu_page *sp;

3219 3220
	sp = page_header(__pa(sptep));

3221
	/*
3222 3223 3224
	 * Without accessed bits, there's no way to distinguish between
	 * actually accessed translations and prefetched, so disable pte
	 * prefetch if accessed bits aren't available.
3225
	 */
3226
	if (sp_ad_disabled(sp))
3227 3228 3229 3230 3231 3232 3233 3234
		return;

	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
		return;

	__direct_pte_prefetch(vcpu, sp, sptep);
}

3235
static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn,
3236
				  kvm_pfn_t pfn, struct kvm_memory_slot *slot)
3237 3238 3239 3240 3241 3242 3243 3244 3245
{
	unsigned long hva;
	pte_t *pte;
	int level;

	BUILD_BUG_ON(PT_PAGE_TABLE_LEVEL != (int)PG_LEVEL_4K ||
		     PT_DIRECTORY_LEVEL != (int)PG_LEVEL_2M ||
		     PT_PDPE_LEVEL != (int)PG_LEVEL_1G);

3246
	if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
3247 3248
		return PT_PAGE_TABLE_LEVEL;

3249 3250 3251 3252 3253 3254 3255 3256
	/*
	 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
	 * is not solely for performance, it's also necessary to avoid the
	 * "writable" check in __gfn_to_hva_many(), which will always fail on
	 * read-only memslots due to gfn_to_hva() assuming writes.  Earlier
	 * page fault steps have already verified the guest isn't writing a
	 * read-only memslot.
	 */
3257 3258 3259 3260 3261 3262 3263 3264 3265
	hva = __gfn_to_hva_memslot(slot, gfn);

	pte = lookup_address_in_mm(vcpu->kvm->mm, hva, &level);
	if (unlikely(!pte))
		return PT_PAGE_TABLE_LEVEL;

	return level;
}

3266 3267
static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
				   int max_level, kvm_pfn_t *pfnp)
3268
{
3269
	struct kvm_memory_slot *slot;
3270
	struct kvm_lpage_info *linfo;
3271
	kvm_pfn_t pfn = *pfnp;
3272
	kvm_pfn_t mask;
3273
	int level;
3274

3275
	if (unlikely(max_level == PT_PAGE_TABLE_LEVEL))
3276
		return PT_PAGE_TABLE_LEVEL;
3277

3278
	if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn))
3279
		return PT_PAGE_TABLE_LEVEL;
3280

3281 3282 3283 3284 3285 3286
	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true);
	if (!slot)
		return PT_PAGE_TABLE_LEVEL;

	max_level = min(max_level, kvm_x86_ops->get_lpage_level());
	for ( ; max_level > PT_PAGE_TABLE_LEVEL; max_level--) {
3287 3288
		linfo = lpage_info_slot(gfn, slot, max_level);
		if (!linfo->disallow_lpage)
3289 3290 3291 3292 3293 3294 3295
			break;
	}

	if (max_level == PT_PAGE_TABLE_LEVEL)
		return PT_PAGE_TABLE_LEVEL;

	level = host_pfn_mapping_level(vcpu, gfn, pfn, slot);
3296
	if (level == PT_PAGE_TABLE_LEVEL)
3297
		return level;
3298

3299
	level = min(level, max_level);
3300 3301

	/*
3302 3303
	 * mmu_notifier_retry() was successful and mmu_lock is held, so
	 * the pmd can't be split from under us.
3304
	 */
3305 3306 3307
	mask = KVM_PAGES_PER_HPAGE(level) - 1;
	VM_BUG_ON((gfn & mask) != (pfn & mask));
	*pfnp = pfn & ~mask;
3308 3309

	return level;
3310 3311
}

P
Paolo Bonzini 已提交
3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334
static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
				       gfn_t gfn, kvm_pfn_t *pfnp, int *levelp)
{
	int level = *levelp;
	u64 spte = *it.sptep;

	if (it.level == level && level > PT_PAGE_TABLE_LEVEL &&
	    is_nx_huge_page_enabled() &&
	    is_shadow_present_pte(spte) &&
	    !is_large_pte(spte)) {
		/*
		 * A small SPTE exists for this pfn, but FNAME(fetch)
		 * and __direct_map would like to create a large PTE
		 * instead: just force them to go down another level,
		 * patching back for them into pfn the next 9 bits of
		 * the address.
		 */
		u64 page_mask = KVM_PAGES_PER_HPAGE(level) - KVM_PAGES_PER_HPAGE(level - 1);
		*pfnp |= gfn & page_mask;
		(*levelp)--;
	}
}

3335
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
3336 3337
			int map_writable, int max_level, kvm_pfn_t pfn,
			bool prefault, bool account_disallowed_nx_lpage)
3338
{
3339
	struct kvm_shadow_walk_iterator it;
3340
	struct kvm_mmu_page *sp;
3341
	int level, ret;
3342 3343
	gfn_t gfn = gpa >> PAGE_SHIFT;
	gfn_t base_gfn = gfn;
A
Avi Kivity 已提交
3344

3345
	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
3346
		return RET_PF_RETRY;
3347

3348
	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn);
3349

3350
	trace_kvm_mmu_spte_requested(gpa, level, pfn);
3351
	for_each_shadow_entry(vcpu, gpa, it) {
P
Paolo Bonzini 已提交
3352 3353 3354 3355 3356 3357
		/*
		 * We cannot overwrite existing page tables with an NX
		 * large page, as the leaf could be executable.
		 */
		disallowed_hugepage_adjust(it, gfn, &pfn, &level);

3358 3359
		base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
		if (it.level == level)
3360
			break;
A
Avi Kivity 已提交
3361

3362 3363 3364 3365
		drop_large_spte(vcpu, it.sptep);
		if (!is_shadow_present_pte(*it.sptep)) {
			sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
					      it.level - 1, true, ACC_ALL);
3366

3367
			link_shadow_page(vcpu, it.sptep, sp);
3368
			if (account_disallowed_nx_lpage)
P
Paolo Bonzini 已提交
3369
				account_huge_nx_page(vcpu->kvm, sp);
3370 3371
		}
	}
3372 3373 3374 3375 3376 3377 3378

	ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL,
			   write, level, base_gfn, pfn, prefault,
			   map_writable);
	direct_pte_prefetch(vcpu, it.sptep);
	++vcpu->stat.pf_fixed;
	return ret;
A
Avi Kivity 已提交
3379 3380
}

H
Huang Ying 已提交
3381
static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
3382
{
3383
	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
3384 3385
}

D
Dan Williams 已提交
3386
static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
3387
{
X
Xiao Guangrong 已提交
3388 3389 3390 3391 3392 3393
	/*
	 * Do not cache the mmio info caused by writing the readonly gfn
	 * into the spte otherwise read access on readonly gfn also can
	 * caused mmio page fault and treat it as mmio access.
	 */
	if (pfn == KVM_PFN_ERR_RO_FAULT)
3394
		return RET_PF_EMULATE;
X
Xiao Guangrong 已提交
3395

3396
	if (pfn == KVM_PFN_ERR_HWPOISON) {
3397
		kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
3398
		return RET_PF_RETRY;
3399
	}
3400

3401
	return -EFAULT;
3402 3403
}

3404
static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
3405 3406
				kvm_pfn_t pfn, unsigned int access,
				int *ret_val)
3407 3408
{
	/* The pfn is invalid, report the error! */
3409
	if (unlikely(is_error_pfn(pfn))) {
3410
		*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
3411
		return true;
3412 3413
	}

3414
	if (unlikely(is_noslot_pfn(pfn)))
3415 3416
		vcpu_cache_mmio_info(vcpu, gva, gfn,
				     access & shadow_mmio_access_mask);
3417

3418
	return false;
3419 3420
}

3421
static bool page_fault_can_be_fast(u32 error_code)
3422
{
3423 3424 3425 3426 3427 3428 3429
	/*
	 * Do not fix the mmio spte with invalid generation number which
	 * need to be updated by slow page fault path.
	 */
	if (unlikely(error_code & PFERR_RSVD_MASK))
		return false;

3430 3431 3432 3433 3434
	/* See if the page fault is due to an NX violation */
	if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))
		      == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
		return false;

3435
	/*
3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446
	 * #PF can be fast if:
	 * 1. The shadow page table entry is not present, which could mean that
	 *    the fault is potentially caused by access tracking (if enabled).
	 * 2. The shadow page table entry is present and the fault
	 *    is caused by write-protect, that means we just need change the W
	 *    bit of the spte which can be done out of mmu-lock.
	 *
	 * However, if access tracking is disabled we know that a non-present
	 * page must be a genuine page fault where we have to create a new SPTE.
	 * So, if access tracking is disabled, we return true only for write
	 * accesses to a present page.
3447 3448
	 */

3449 3450 3451
	return shadow_acc_track_mask != 0 ||
	       ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
		== (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
3452 3453
}

3454 3455 3456 3457
/*
 * Returns true if the SPTE was fixed successfully. Otherwise,
 * someone else modified the SPTE from its original value.
 */
3458
static bool
3459
fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
3460
			u64 *sptep, u64 old_spte, u64 new_spte)
3461 3462 3463 3464 3465
{
	gfn_t gfn;

	WARN_ON(!sp->role.direct);

3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477
	/*
	 * Theoretically we could also set dirty bit (and flush TLB) here in
	 * order to eliminate unnecessary PML logging. See comments in
	 * set_spte. But fast_page_fault is very unlikely to happen with PML
	 * enabled, so we do not do this. This might result in the same GPA
	 * to be logged in PML buffer again when the write really happens, and
	 * eventually to be called by mark_page_dirty twice. But it's also no
	 * harm. This also avoids the TLB flush needed after setting dirty bit
	 * so non-PML cases won't be impacted.
	 *
	 * Compare with set_spte where instead shadow_dirty_mask is set.
	 */
3478
	if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3479 3480
		return false;

3481
	if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
3482 3483 3484 3485 3486 3487 3488
		/*
		 * The gfn of direct spte is stable since it is
		 * calculated by sp->gfn.
		 */
		gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
		kvm_vcpu_mark_page_dirty(vcpu, gfn);
	}
3489 3490 3491 3492

	return true;
}

3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504
static bool is_access_allowed(u32 fault_err_code, u64 spte)
{
	if (fault_err_code & PFERR_FETCH_MASK)
		return is_executable_pte(spte);

	if (fault_err_code & PFERR_WRITE_MASK)
		return is_writable_pte(spte);

	/* Fault was on Read access */
	return spte & PT_PRESENT_MASK;
}

3505 3506 3507 3508 3509
/*
 * Return value:
 * - true: let the vcpu to access on the same address again.
 * - false: let the real page fault path to fix it.
 */
3510
static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
3511 3512 3513
			    u32 error_code)
{
	struct kvm_shadow_walk_iterator iterator;
3514
	struct kvm_mmu_page *sp;
3515
	bool fault_handled = false;
3516
	u64 spte = 0ull;
3517
	uint retry_count = 0;
3518

3519
	if (!page_fault_can_be_fast(error_code))
3520 3521 3522 3523
		return false;

	walk_shadow_page_lockless_begin(vcpu);

3524
	do {
3525
		u64 new_spte;
3526

3527
		for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
3528
			if (!is_shadow_present_pte(spte))
3529 3530
				break;

3531 3532 3533
		sp = page_header(__pa(iterator.sptep));
		if (!is_last_spte(spte, sp->role.level))
			break;
3534

3535
		/*
3536 3537 3538 3539 3540
		 * Check whether the memory access that caused the fault would
		 * still cause it if it were to be performed right now. If not,
		 * then this is a spurious fault caused by TLB lazily flushed,
		 * or some other CPU has already fixed the PTE after the
		 * current CPU took the fault.
3541 3542 3543 3544
		 *
		 * Need not check the access of upper level table entries since
		 * they are always ACC_ALL.
		 */
3545 3546 3547 3548
		if (is_access_allowed(error_code, spte)) {
			fault_handled = true;
			break;
		}
3549

3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560
		new_spte = spte;

		if (is_access_track_spte(spte))
			new_spte = restore_acc_track_spte(new_spte);

		/*
		 * Currently, to simplify the code, write-protection can
		 * be removed in the fast path only if the SPTE was
		 * write-protected for dirty-logging or access tracking.
		 */
		if ((error_code & PFERR_WRITE_MASK) &&
3561
		    spte_can_locklessly_be_made_writable(spte)) {
3562
			new_spte |= PT_WRITABLE_MASK;
3563 3564

			/*
3565 3566 3567 3568 3569 3570 3571 3572 3573
			 * Do not fix write-permission on the large spte.  Since
			 * we only dirty the first page into the dirty-bitmap in
			 * fast_pf_fix_direct_spte(), other pages are missed
			 * if its slot has dirty logging enabled.
			 *
			 * Instead, we let the slow page fault path create a
			 * normal spte to fix the access.
			 *
			 * See the comments in kvm_arch_commit_memory_region().
3574
			 */
3575
			if (sp->role.level > PT_PAGE_TABLE_LEVEL)
3576
				break;
3577
		}
3578

3579
		/* Verify that the fault can be handled in the fast path */
3580 3581
		if (new_spte == spte ||
		    !is_access_allowed(error_code, new_spte))
3582 3583 3584 3585 3586
			break;

		/*
		 * Currently, fast page fault only works for direct mapping
		 * since the gfn is not stable for indirect shadow page. See
3587
		 * Documentation/virt/kvm/locking.txt to get more detail.
3588 3589
		 */
		fault_handled = fast_pf_fix_direct_spte(vcpu, sp,
3590
							iterator.sptep, spte,
3591
							new_spte);
3592 3593 3594 3595 3596 3597 3598 3599 3600 3601
		if (fault_handled)
			break;

		if (++retry_count > 4) {
			printk_once(KERN_WARNING
				"kvm: Fast #PF retrying more than 4 times.\n");
			break;
		}

	} while (true);
3602

3603
	trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
3604
			      spte, fault_handled);
3605 3606
	walk_shadow_page_lockless_end(vcpu);

3607
	return fault_handled;
3608 3609
}

3610 3611
static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
			       struct list_head *invalid_list)
3612
{
3613
	struct kvm_mmu_page *sp;
3614

3615
	if (!VALID_PAGE(*root_hpa))
A
Avi Kivity 已提交
3616
		return;
3617

3618 3619 3620 3621
	sp = page_header(*root_hpa & PT64_BASE_ADDR_MASK);
	--sp->root_count;
	if (!sp->root_count && sp->role.invalid)
		kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
3622

3623 3624 3625
	*root_hpa = INVALID_PAGE;
}

3626
/* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
3627 3628
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			ulong roots_to_free)
3629 3630 3631
{
	int i;
	LIST_HEAD(invalid_list);
3632
	bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
3633

3634
	BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
3635

3636
	/* Before acquiring the MMU lock, see if we need to do any real work. */
3637 3638 3639 3640 3641 3642 3643 3644 3645
	if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) {
		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
			if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) &&
			    VALID_PAGE(mmu->prev_roots[i].hpa))
				break;

		if (i == KVM_MMU_NUM_PREV_ROOTS)
			return;
	}
3646 3647

	spin_lock(&vcpu->kvm->mmu_lock);
3648

3649 3650 3651 3652
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
			mmu_free_root_page(vcpu->kvm, &mmu->prev_roots[i].hpa,
					   &invalid_list);
3653

3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666
	if (free_active_root) {
		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
			mmu_free_root_page(vcpu->kvm, &mmu->root_hpa,
					   &invalid_list);
		} else {
			for (i = 0; i < 4; ++i)
				if (mmu->pae_root[i] != 0)
					mmu_free_root_page(vcpu->kvm,
							   &mmu->pae_root[i],
							   &invalid_list);
			mmu->root_hpa = INVALID_PAGE;
		}
3667
		mmu->root_cr3 = 0;
3668
	}
3669

3670
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3671
	spin_unlock(&vcpu->kvm->mmu_lock);
3672
}
3673
EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3674

3675 3676 3677 3678 3679
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
{
	int ret = 0;

	if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
3680
		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3681 3682 3683 3684 3685 3686
		ret = 1;
	}

	return ret;
}

3687 3688 3689
static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu_page *sp;
3690
	unsigned i;
3691

3692
	if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
3693
		spin_lock(&vcpu->kvm->mmu_lock);
3694 3695
		if(make_mmu_pages_available(vcpu) < 0) {
			spin_unlock(&vcpu->kvm->mmu_lock);
3696
			return -ENOSPC;
3697
		}
3698
		sp = kvm_mmu_get_page(vcpu, 0, 0,
3699
				vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL);
3700 3701
		++sp->root_count;
		spin_unlock(&vcpu->kvm->mmu_lock);
3702 3703
		vcpu->arch.mmu->root_hpa = __pa(sp->spt);
	} else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) {
3704
		for (i = 0; i < 4; ++i) {
3705
			hpa_t root = vcpu->arch.mmu->pae_root[i];
3706

3707
			MMU_WARN_ON(VALID_PAGE(root));
3708
			spin_lock(&vcpu->kvm->mmu_lock);
3709 3710
			if (make_mmu_pages_available(vcpu) < 0) {
				spin_unlock(&vcpu->kvm->mmu_lock);
3711
				return -ENOSPC;
3712
			}
3713
			sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
3714
					i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
3715 3716 3717
			root = __pa(sp->spt);
			++sp->root_count;
			spin_unlock(&vcpu->kvm->mmu_lock);
3718
			vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
3719
		}
3720
		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
3721 3722
	} else
		BUG();
3723 3724 3725

	/* root_cr3 is ignored for direct MMUs. */
	vcpu->arch.mmu->root_cr3 = 0;
3726 3727 3728 3729 3730

	return 0;
}

static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3731
{
3732
	struct kvm_mmu_page *sp;
3733
	u64 pdptr, pm_mask;
3734
	gfn_t root_gfn, root_cr3;
3735
	int i;
3736

3737
	root_cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
3738
	root_gfn = root_cr3 >> PAGE_SHIFT;
3739

3740 3741 3742 3743 3744 3745 3746
	if (mmu_check_root(vcpu, root_gfn))
		return 1;

	/*
	 * Do we shadow a long mode page table? If so we need to
	 * write-protect the guests page table root.
	 */
3747 3748
	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
		hpa_t root = vcpu->arch.mmu->root_hpa;
3749

3750
		MMU_WARN_ON(VALID_PAGE(root));
3751

3752
		spin_lock(&vcpu->kvm->mmu_lock);
3753 3754
		if (make_mmu_pages_available(vcpu) < 0) {
			spin_unlock(&vcpu->kvm->mmu_lock);
3755
			return -ENOSPC;
3756
		}
3757
		sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
3758
				vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL);
3759 3760
		root = __pa(sp->spt);
		++sp->root_count;
3761
		spin_unlock(&vcpu->kvm->mmu_lock);
3762
		vcpu->arch.mmu->root_hpa = root;
3763
		goto set_root_cr3;
3764
	}
3765

3766 3767
	/*
	 * We shadow a 32 bit page table. This may be a legacy 2-level
3768 3769
	 * or a PAE 3-level page table. In either case we need to be aware that
	 * the shadow page table may be a PAE or a long mode page table.
3770
	 */
3771
	pm_mask = PT_PRESENT_MASK;
3772
	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
3773 3774
		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;

3775
	for (i = 0; i < 4; ++i) {
3776
		hpa_t root = vcpu->arch.mmu->pae_root[i];
3777

3778
		MMU_WARN_ON(VALID_PAGE(root));
3779 3780
		if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
			pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
B
Bandan Das 已提交
3781
			if (!(pdptr & PT_PRESENT_MASK)) {
3782
				vcpu->arch.mmu->pae_root[i] = 0;
A
Avi Kivity 已提交
3783 3784
				continue;
			}
A
Avi Kivity 已提交
3785
			root_gfn = pdptr >> PAGE_SHIFT;
3786 3787
			if (mmu_check_root(vcpu, root_gfn))
				return 1;
3788
		}
3789
		spin_lock(&vcpu->kvm->mmu_lock);
3790 3791
		if (make_mmu_pages_available(vcpu) < 0) {
			spin_unlock(&vcpu->kvm->mmu_lock);
3792
			return -ENOSPC;
3793
		}
3794 3795
		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
				      0, ACC_ALL);
3796 3797
		root = __pa(sp->spt);
		++sp->root_count;
3798 3799
		spin_unlock(&vcpu->kvm->mmu_lock);

3800
		vcpu->arch.mmu->pae_root[i] = root | pm_mask;
3801
	}
3802
	vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
3803 3804 3805 3806 3807

	/*
	 * If we shadow a 32 bit page table with a long mode page
	 * table we enter this path.
	 */
3808 3809
	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
		if (vcpu->arch.mmu->lm_root == NULL) {
3810 3811 3812 3813 3814 3815 3816
			/*
			 * The additional page necessary for this is only
			 * allocated on demand.
			 */

			u64 *lm_root;

3817
			lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3818 3819 3820
			if (lm_root == NULL)
				return 1;

3821
			lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
3822

3823
			vcpu->arch.mmu->lm_root = lm_root;
3824 3825
		}

3826
		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
3827 3828
	}

3829 3830 3831
set_root_cr3:
	vcpu->arch.mmu->root_cr3 = root_cr3;

3832
	return 0;
3833 3834
}

3835 3836
static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
{
3837
	if (vcpu->arch.mmu->direct_map)
3838 3839 3840 3841 3842
		return mmu_alloc_direct_roots(vcpu);
	else
		return mmu_alloc_shadow_roots(vcpu);
}

3843
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3844 3845 3846 3847
{
	int i;
	struct kvm_mmu_page *sp;

3848
	if (vcpu->arch.mmu->direct_map)
3849 3850
		return;

3851
	if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3852
		return;
3853

3854
	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3855

3856 3857
	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
		hpa_t root = vcpu->arch.mmu->root_hpa;
3858
		sp = page_header(root);
3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876

		/*
		 * Even if another CPU was marking the SP as unsync-ed
		 * simultaneously, any guest page table changes are not
		 * guaranteed to be visible anyway until this VCPU issues a TLB
		 * flush strictly after those changes are made. We only need to
		 * ensure that the other CPU sets these flags before any actual
		 * changes to the page tables are made. The comments in
		 * mmu_need_write_protect() describe what could go wrong if this
		 * requirement isn't satisfied.
		 */
		if (!smp_load_acquire(&sp->unsync) &&
		    !smp_load_acquire(&sp->unsync_children))
			return;

		spin_lock(&vcpu->kvm->mmu_lock);
		kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);

3877
		mmu_sync_children(vcpu, sp);
3878

3879
		kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3880
		spin_unlock(&vcpu->kvm->mmu_lock);
3881 3882
		return;
	}
3883 3884 3885 3886

	spin_lock(&vcpu->kvm->mmu_lock);
	kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);

3887
	for (i = 0; i < 4; ++i) {
3888
		hpa_t root = vcpu->arch.mmu->pae_root[i];
3889

3890
		if (root && VALID_PAGE(root)) {
3891 3892 3893 3894 3895 3896
			root &= PT64_BASE_ADDR_MASK;
			sp = page_header(root);
			mmu_sync_children(vcpu, sp);
		}
	}

3897
	kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3898
	spin_unlock(&vcpu->kvm->mmu_lock);
3899
}
N
Nadav Har'El 已提交
3900
EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
3901

3902
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
3903
				  u32 access, struct x86_exception *exception)
A
Avi Kivity 已提交
3904
{
3905 3906
	if (exception)
		exception->error_code = 0;
A
Avi Kivity 已提交
3907 3908 3909
	return vaddr;
}

3910
static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
3911 3912
					 u32 access,
					 struct x86_exception *exception)
3913
{
3914 3915
	if (exception)
		exception->error_code = 0;
3916
	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3917 3918
}

3919 3920 3921
static bool
__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
{
3922
	int bit7 = (pte >> 7) & 1;
3923

3924
	return pte & rsvd_check->rsvd_bits_mask[bit7][level-1];
3925 3926
}

3927
static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
3928
{
3929
	return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
3930 3931
}

3932
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3933
{
3934 3935 3936 3937 3938 3939 3940
	/*
	 * A nested guest cannot use the MMIO cache if it is using nested
	 * page tables, because cr2 is a nGPA while the cache stores GPAs.
	 */
	if (mmu_is_nested(vcpu))
		return false;

3941 3942 3943 3944 3945 3946
	if (direct)
		return vcpu_match_mmio_gpa(vcpu, addr);

	return vcpu_match_mmio_gva(vcpu, addr);
}

3947 3948 3949
/* return true if reserved bit is detected on spte. */
static bool
walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3950 3951
{
	struct kvm_shadow_walk_iterator iterator;
3952
	u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull;
3953
	struct rsvd_bits_validate *rsvd_check;
3954 3955
	int root, leaf;
	bool reserved = false;
3956

3957
	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
3958

3959
	walk_shadow_page_lockless_begin(vcpu);
3960

3961 3962
	for (shadow_walk_init(&iterator, vcpu, addr),
		 leaf = root = iterator.level;
3963 3964 3965 3966 3967
	     shadow_walk_okay(&iterator);
	     __shadow_walk_next(&iterator, spte)) {
		spte = mmu_spte_get_lockless(iterator.sptep);

		sptes[leaf - 1] = spte;
3968
		leaf--;
3969

3970 3971
		if (!is_shadow_present_pte(spte))
			break;
3972

3973 3974 3975 3976 3977 3978 3979
		/*
		 * Use a bitwise-OR instead of a logical-OR to aggregate the
		 * reserved bit and EPT's invalid memtype/XWR checks to avoid
		 * adding a Jcc in the loop.
		 */
		reserved |= __is_bad_mt_xwr(rsvd_check, spte) |
			    __is_rsvd_bits_set(rsvd_check, spte, iterator.level);
3980 3981
	}

3982 3983
	walk_shadow_page_lockless_end(vcpu);

3984 3985 3986
	if (reserved) {
		pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
		       __func__, addr);
3987
		while (root > leaf) {
3988 3989 3990 3991 3992
			pr_err("------ spte 0x%llx level %d.\n",
			       sptes[root - 1], root);
			root--;
		}
	}
3993

3994 3995
	*sptep = spte;
	return reserved;
3996 3997
}

P
Paolo Bonzini 已提交
3998
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3999 4000
{
	u64 spte;
4001
	bool reserved;
4002

4003
	if (mmio_info_in_cache(vcpu, addr, direct))
4004
		return RET_PF_EMULATE;
4005

4006
	reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
4007
	if (WARN_ON(reserved))
4008
		return -EINVAL;
4009 4010 4011

	if (is_mmio_spte(spte)) {
		gfn_t gfn = get_mmio_spte_gfn(spte);
4012
		unsigned int access = get_mmio_spte_access(spte);
4013

4014
		if (!check_mmio_spte(vcpu, spte))
4015
			return RET_PF_INVALID;
4016

4017 4018
		if (direct)
			addr = 0;
X
Xiao Guangrong 已提交
4019 4020

		trace_handle_mmio_page_fault(addr, gfn, access);
4021
		vcpu_cache_mmio_info(vcpu, addr, gfn, access);
4022
		return RET_PF_EMULATE;
4023 4024 4025 4026 4027 4028
	}

	/*
	 * If the page table is zapped by other cpus, let CPU fault again on
	 * the address.
	 */
4029
	return RET_PF_RETRY;
4030 4031
}

4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051
static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
					 u32 error_code, gfn_t gfn)
{
	if (unlikely(error_code & PFERR_RSVD_MASK))
		return false;

	if (!(error_code & PFERR_PRESENT_MASK) ||
	      !(error_code & PFERR_WRITE_MASK))
		return false;

	/*
	 * guest is writing the page which is write tracked which can
	 * not be fixed by page fault handler.
	 */
	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
		return true;

	return false;
}

4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065
static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
{
	struct kvm_shadow_walk_iterator iterator;
	u64 spte;

	walk_shadow_page_lockless_begin(vcpu);
	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
		clear_sp_write_flooding_count(iterator.sptep);
		if (!is_shadow_present_pte(spte))
			break;
	}
	walk_shadow_page_lockless_end(vcpu);
}

4066 4067
static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
				   gfn_t gfn)
4068 4069
{
	struct kvm_arch_async_pf arch;
X
Xiao Guangrong 已提交
4070

4071
	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
4072
	arch.gfn = gfn;
4073
	arch.direct_map = vcpu->arch.mmu->direct_map;
4074
	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
4075

4076 4077
	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
4078 4079
}

4080
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
4081 4082
			 gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
			 bool *writable)
4083
{
4084
	struct kvm_memory_slot *slot;
4085 4086
	bool async;

4087 4088 4089 4090 4091 4092 4093 4094
	/*
	 * Don't expose private memslots to L2.
	 */
	if (is_guest_mode(vcpu) && !kvm_is_visible_gfn(vcpu->kvm, gfn)) {
		*pfn = KVM_PFN_NOSLOT;
		return false;
	}

4095
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
4096 4097
	async = false;
	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
4098 4099 4100
	if (!async)
		return false; /* *pfn has correct page already */

4101
	if (!prefault && kvm_can_do_async_pf(vcpu)) {
4102
		trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
4103
		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
4104
			trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
4105 4106
			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
			return true;
4107
		} else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
4108 4109 4110
			return true;
	}

4111
	*pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable);
4112 4113 4114
	return false;
}

4115 4116
static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
			     bool prefault, int max_level, bool is_tdp)
A
Avi Kivity 已提交
4117
{
4118 4119 4120
	bool write = error_code & PFERR_WRITE_MASK;
	bool exec = error_code & PFERR_FETCH_MASK;
	bool lpage_disallowed = exec && is_nx_huge_page_enabled();
4121
	bool map_writable;
A
Avi Kivity 已提交
4122

4123 4124 4125
	gfn_t gfn = gpa >> PAGE_SHIFT;
	unsigned long mmu_seq;
	kvm_pfn_t pfn;
4126
	int r;
4127

4128
	if (page_fault_handle_page_track(vcpu, error_code, gfn))
4129
		return RET_PF_EMULATE;
4130

4131 4132 4133
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;
4134

4135 4136
	if (lpage_disallowed)
		max_level = PT_PAGE_TABLE_LEVEL;
4137

4138
	if (fast_page_fault(vcpu, gpa, error_code))
4139 4140 4141 4142 4143 4144 4145 4146
		return RET_PF_RETRY;

	mmu_seq = vcpu->kvm->mmu_notifier_seq;
	smp_rmb();

	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
		return RET_PF_RETRY;

4147
	if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
4148
		return r;
A
Avi Kivity 已提交
4149

4150 4151 4152 4153 4154 4155
	r = RET_PF_RETRY;
	spin_lock(&vcpu->kvm->mmu_lock);
	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
		goto out_unlock;
	if (make_mmu_pages_available(vcpu) < 0)
		goto out_unlock;
4156
	r = __direct_map(vcpu, gpa, write, map_writable, max_level, pfn,
4157
			 prefault, is_tdp && lpage_disallowed);
4158

4159 4160 4161 4162
out_unlock:
	spin_unlock(&vcpu->kvm->mmu_lock);
	kvm_release_pfn_clean(pfn);
	return r;
A
Avi Kivity 已提交
4163 4164
}

4165 4166 4167 4168 4169 4170 4171 4172 4173 4174
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
				u32 error_code, bool prefault)
{
	pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);

	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
	return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault,
				 PT_DIRECTORY_LEVEL, false);
}

4175
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
4176
				u64 fault_address, char *insn, int insn_len)
4177 4178 4179
{
	int r = 1;

4180 4181 4182 4183 4184 4185
#ifndef CONFIG_X86_64
	/* A 64-bit CR2 should be impossible on 32-bit KVM. */
	if (WARN_ON_ONCE(fault_address >> 32))
		return -EFAULT;
#endif

P
Paolo Bonzini 已提交
4186
	vcpu->arch.l1tf_flush_l1d = true;
4187 4188 4189 4190
	switch (vcpu->arch.apf.host_apf_reason) {
	default:
		trace_kvm_page_fault(fault_address, error_code);

4191
		if (kvm_event_needs_reinjection(vcpu))
4192 4193 4194 4195 4196 4197 4198
			kvm_mmu_unprotect_page_virt(vcpu, fault_address);
		r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
				insn_len);
		break;
	case KVM_PV_REASON_PAGE_NOT_PRESENT:
		vcpu->arch.apf.host_apf_reason = 0;
		local_irq_disable();
4199
		kvm_async_pf_task_wait(fault_address, 0);
4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212
		local_irq_enable();
		break;
	case KVM_PV_REASON_PAGE_READY:
		vcpu->arch.apf.host_apf_reason = 0;
		local_irq_disable();
		kvm_async_pf_task_wake(fault_address);
		local_irq_enable();
		break;
	}
	return r;
}
EXPORT_SYMBOL_GPL(kvm_handle_page_fault);

4213 4214
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
		       bool prefault)
4215
{
4216
	int max_level;
4217

4218 4219 4220 4221
	for (max_level = PT_MAX_HUGEPAGE_LEVEL;
	     max_level > PT_PAGE_TABLE_LEVEL;
	     max_level--) {
		int page_num = KVM_PAGES_PER_HPAGE(max_level);
4222
		gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1);
4223

4224 4225
		if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
			break;
4226
	}
4227

4228 4229
	return direct_page_fault(vcpu, gpa, error_code, prefault,
				 max_level, true);
4230 4231
}

4232 4233
static void nonpaging_init_context(struct kvm_vcpu *vcpu,
				   struct kvm_mmu *context)
A
Avi Kivity 已提交
4234 4235 4236
{
	context->page_fault = nonpaging_page_fault;
	context->gva_to_gpa = nonpaging_gva_to_gpa;
4237
	context->sync_page = nonpaging_sync_page;
M
Marcelo Tosatti 已提交
4238
	context->invlpg = nonpaging_invlpg;
4239
	context->update_pte = nonpaging_update_pte;
4240
	context->root_level = 0;
A
Avi Kivity 已提交
4241
	context->shadow_root_level = PT32E_ROOT_LEVEL;
4242
	context->direct_map = true;
4243
	context->nx = false;
A
Avi Kivity 已提交
4244 4245
}

4246 4247 4248 4249 4250 4251 4252 4253
static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t cr3,
				  union kvm_mmu_page_role role)
{
	return (role.direct || cr3 == root->cr3) &&
	       VALID_PAGE(root->hpa) && page_header(root->hpa) &&
	       role.word == page_header(root->hpa)->role.word;
}

4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266
/*
 * Find out if a previously cached root matching the new CR3/role is available.
 * The current root is also inserted into the cache.
 * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
 * returned.
 * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
 * false is returned. This root should now be freed by the caller.
 */
static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
				  union kvm_mmu_page_role new_role)
{
	uint i;
	struct kvm_mmu_root_info root;
4267
	struct kvm_mmu *mmu = vcpu->arch.mmu;
4268

4269
	root.cr3 = mmu->root_cr3;
4270 4271
	root.hpa = mmu->root_hpa;

4272 4273 4274
	if (is_root_usable(&root, new_cr3, new_role))
		return true;

4275 4276 4277
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		swap(root, mmu->prev_roots[i]);

4278
		if (is_root_usable(&root, new_cr3, new_role))
4279 4280 4281 4282
			break;
	}

	mmu->root_hpa = root.hpa;
4283
	mmu->root_cr3 = root.cr3;
4284 4285 4286 4287

	return i < KVM_MMU_NUM_PREV_ROOTS;
}

4288
static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
4289 4290
			    union kvm_mmu_page_role new_role,
			    bool skip_tlb_flush)
A
Avi Kivity 已提交
4291
{
4292
	struct kvm_mmu *mmu = vcpu->arch.mmu;
4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303

	/*
	 * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
	 * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
	 * later if necessary.
	 */
	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
	    mmu->root_level >= PT64_ROOT_4LEVEL) {
		if (mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT))
			return false;

4304
		if (cached_root_available(vcpu, new_cr3, new_role)) {
4305 4306 4307 4308 4309 4310 4311
			/*
			 * It is possible that the cached previous root page is
			 * obsolete because of a change in the MMU generation
			 * number. However, changing the generation number is
			 * accompanied by KVM_REQ_MMU_RELOAD, which will free
			 * the root set here and allocate a new one.
			 */
4312
			kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
4313 4314
			if (!skip_tlb_flush) {
				kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
4315
				kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4316 4317 4318 4319 4320 4321 4322 4323 4324 4325
			}

			/*
			 * The last MMIO access's GVA and GPA are cached in the
			 * VCPU. When switching to a new CR3, that GVA->GPA
			 * mapping may no longer be valid. So clear any cached
			 * MMIO info even when we don't need to sync the shadow
			 * page tables.
			 */
			vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
4326

4327 4328 4329 4330 4331 4332 4333 4334
			__clear_sp_write_flooding_count(
				page_header(mmu->root_hpa));

			return true;
		}
	}

	return false;
A
Avi Kivity 已提交
4335 4336
}

4337
static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
4338 4339
			      union kvm_mmu_page_role new_role,
			      bool skip_tlb_flush)
A
Avi Kivity 已提交
4340
{
4341
	if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush))
4342 4343
		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu,
				   KVM_MMU_ROOT_CURRENT);
A
Avi Kivity 已提交
4344 4345
}

4346
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)
4347
{
4348 4349
	__kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu),
			  skip_tlb_flush);
4350
}
4351
EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3);
4352

4353 4354
static unsigned long get_cr3(struct kvm_vcpu *vcpu)
{
4355
	return kvm_read_cr3(vcpu);
4356 4357
}

4358 4359
static void inject_page_fault(struct kvm_vcpu *vcpu,
			      struct x86_exception *fault)
A
Avi Kivity 已提交
4360
{
4361
	vcpu->arch.mmu->inject_page_fault(vcpu, fault);
A
Avi Kivity 已提交
4362 4363
}

4364
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4365
			   unsigned int access, int *nr_present)
4366 4367 4368 4369 4370 4371 4372 4373
{
	if (unlikely(is_mmio_spte(*sptep))) {
		if (gfn != get_mmio_spte_gfn(*sptep)) {
			mmu_spte_clear_no_track(sptep);
			return true;
		}

		(*nr_present)++;
4374
		mark_mmio_spte(vcpu, sptep, gfn, access);
4375 4376 4377 4378 4379 4380
		return true;
	}

	return false;
}

4381 4382
static inline bool is_last_gpte(struct kvm_mmu *mmu,
				unsigned level, unsigned gpte)
A
Avi Kivity 已提交
4383
{
4384 4385 4386 4387 4388 4389 4390
	/*
	 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
	 * If it is clear, there are no large pages at this level, so clear
	 * PT_PAGE_SIZE_MASK in gpte if that is the case.
	 */
	gpte &= level - mmu->last_nonleaf_level;

4391 4392 4393 4394 4395 4396 4397
	/*
	 * PT_PAGE_TABLE_LEVEL always terminates.  The RHS has bit 7 set
	 * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
	 * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
	 */
	gpte |= level - PT_PAGE_TABLE_LEVEL - 1;

4398
	return gpte & PT_PAGE_SIZE_MASK;
A
Avi Kivity 已提交
4399 4400
}

4401 4402 4403 4404 4405
#define PTTYPE_EPT 18 /* arbitrary */
#define PTTYPE PTTYPE_EPT
#include "paging_tmpl.h"
#undef PTTYPE

A
Avi Kivity 已提交
4406 4407 4408 4409 4410 4411 4412 4413
#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE

#define PTTYPE 32
#include "paging_tmpl.h"
#undef PTTYPE

4414 4415 4416 4417
static void
__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
			struct rsvd_bits_validate *rsvd_check,
			int maxphyaddr, int level, bool nx, bool gbpages,
4418
			bool pse, bool amd)
4419 4420
{
	u64 exb_bit_rsvd = 0;
4421
	u64 gbpages_bit_rsvd = 0;
4422
	u64 nonleaf_bit8_rsvd = 0;
4423

4424
	rsvd_check->bad_mt_xwr = 0;
4425

4426
	if (!nx)
4427
		exb_bit_rsvd = rsvd_bits(63, 63);
4428
	if (!gbpages)
4429
		gbpages_bit_rsvd = rsvd_bits(7, 7);
4430 4431 4432 4433 4434

	/*
	 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
	 * leaf entries) on AMD CPUs only.
	 */
4435
	if (amd)
4436 4437
		nonleaf_bit8_rsvd = rsvd_bits(8, 8);

4438
	switch (level) {
4439 4440
	case PT32_ROOT_LEVEL:
		/* no rsvd bits for 2 level 4K page table entries */
4441 4442 4443 4444
		rsvd_check->rsvd_bits_mask[0][1] = 0;
		rsvd_check->rsvd_bits_mask[0][0] = 0;
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4445

4446
		if (!pse) {
4447
			rsvd_check->rsvd_bits_mask[1][1] = 0;
4448 4449 4450
			break;
		}

4451 4452
		if (is_cpuid_PSE36())
			/* 36bits PSE 4MB page */
4453
			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4454 4455
		else
			/* 32 bits PSE 4MB page */
4456
			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4457 4458
		break;
	case PT32E_ROOT_LEVEL:
4459
		rsvd_check->rsvd_bits_mask[0][2] =
4460
			rsvd_bits(maxphyaddr, 63) |
4461
			rsvd_bits(5, 8) | rsvd_bits(1, 2);	/* PDPTE */
4462
		rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
4463
			rsvd_bits(maxphyaddr, 62);	/* PDE */
4464
		rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
4465
			rsvd_bits(maxphyaddr, 62); 	/* PTE */
4466
		rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
4467 4468
			rsvd_bits(maxphyaddr, 62) |
			rsvd_bits(13, 20);		/* large page */
4469 4470
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4471
		break;
4472 4473 4474 4475 4476 4477
	case PT64_ROOT_5LEVEL:
		rsvd_check->rsvd_bits_mask[0][4] = exb_bit_rsvd |
			nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
			rsvd_bits(maxphyaddr, 51);
		rsvd_check->rsvd_bits_mask[1][4] =
			rsvd_check->rsvd_bits_mask[0][4];
4478
		/* fall through */
4479
	case PT64_ROOT_4LEVEL:
4480 4481
		rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
			nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
4482
			rsvd_bits(maxphyaddr, 51);
4483 4484
		rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
			nonleaf_bit8_rsvd | gbpages_bit_rsvd |
4485
			rsvd_bits(maxphyaddr, 51);
4486 4487 4488 4489 4490 4491 4492
		rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 51);
		rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
			rsvd_bits(maxphyaddr, 51);
		rsvd_check->rsvd_bits_mask[1][3] =
			rsvd_check->rsvd_bits_mask[0][3];
		rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd |
4493
			gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) |
4494
			rsvd_bits(13, 29);
4495
		rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
4496 4497
			rsvd_bits(maxphyaddr, 51) |
			rsvd_bits(13, 20);		/* large page */
4498 4499
		rsvd_check->rsvd_bits_mask[1][0] =
			rsvd_check->rsvd_bits_mask[0][0];
4500 4501 4502 4503
		break;
	}
}

4504 4505 4506 4507 4508
static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
				  struct kvm_mmu *context)
{
	__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
				cpuid_maxphyaddr(vcpu), context->root_level,
4509 4510
				context->nx,
				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
4511
				is_pse(vcpu), guest_cpuid_is_amd(vcpu));
4512 4513
}

4514 4515 4516
static void
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
			    int maxphyaddr, bool execonly)
4517
{
4518
	u64 bad_mt_xwr;
4519

4520 4521
	rsvd_check->rsvd_bits_mask[0][4] =
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
4522
	rsvd_check->rsvd_bits_mask[0][3] =
4523
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
4524
	rsvd_check->rsvd_bits_mask[0][2] =
4525
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
4526
	rsvd_check->rsvd_bits_mask[0][1] =
4527
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
4528
	rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
4529 4530

	/* large page */
4531
	rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4532 4533
	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
	rsvd_check->rsvd_bits_mask[1][2] =
4534
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29);
4535
	rsvd_check->rsvd_bits_mask[1][1] =
4536
		rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20);
4537
	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4538

4539 4540 4541 4542 4543 4544 4545 4546
	bad_mt_xwr = 0xFFull << (2 * 8);	/* bits 3..5 must not be 2 */
	bad_mt_xwr |= 0xFFull << (3 * 8);	/* bits 3..5 must not be 3 */
	bad_mt_xwr |= 0xFFull << (7 * 8);	/* bits 3..5 must not be 7 */
	bad_mt_xwr |= REPEAT_BYTE(1ull << 2);	/* bits 0..2 must not be 010 */
	bad_mt_xwr |= REPEAT_BYTE(1ull << 6);	/* bits 0..2 must not be 110 */
	if (!execonly) {
		/* bits 0..2 must not be 100 unless VMX capabilities allow it */
		bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4547
	}
4548
	rsvd_check->bad_mt_xwr = bad_mt_xwr;
4549 4550
}

4551 4552 4553 4554 4555 4556 4557
static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
		struct kvm_mmu *context, bool execonly)
{
	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
				    cpuid_maxphyaddr(vcpu), execonly);
}

4558 4559 4560 4561 4562 4563 4564 4565
/*
 * the page table on host is the shadow page table for the page
 * table in guest or amd nested guest, its mmu features completely
 * follow the features in guest.
 */
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
4566 4567
	bool uses_nx = context->nx ||
		context->mmu_role.base.smep_andnot_wp;
4568 4569
	struct rsvd_bits_validate *shadow_zero_check;
	int i;
4570

4571 4572 4573 4574
	/*
	 * Passing "true" to the last argument is okay; it adds a check
	 * on bit 8 of the SPTEs which KVM doesn't use anyway.
	 */
4575 4576
	shadow_zero_check = &context->shadow_zero_check;
	__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
4577
				shadow_phys_bits,
4578
				context->shadow_root_level, uses_nx,
4579 4580
				guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
				is_pse(vcpu), true);
4581 4582 4583 4584 4585 4586 4587 4588 4589

	if (!shadow_me_mask)
		return;

	for (i = context->shadow_root_level; --i >= 0;) {
		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
	}

4590 4591 4592
}
EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);

4593 4594 4595 4596 4597 4598
static inline bool boot_cpu_is_amd(void)
{
	WARN_ON_ONCE(!tdp_enabled);
	return shadow_x_mask == 0;
}

4599 4600 4601 4602 4603 4604 4605 4606
/*
 * the direct page table on host, use as much mmu features as
 * possible, however, kvm currently does not do execution-protection.
 */
static void
reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
				struct kvm_mmu *context)
{
4607 4608 4609 4610 4611
	struct rsvd_bits_validate *shadow_zero_check;
	int i;

	shadow_zero_check = &context->shadow_zero_check;

4612
	if (boot_cpu_is_amd())
4613
		__reset_rsvds_bits_mask(vcpu, shadow_zero_check,
4614
					shadow_phys_bits,
4615
					context->shadow_root_level, false,
4616 4617
					boot_cpu_has(X86_FEATURE_GBPAGES),
					true, true);
4618
	else
4619
		__reset_rsvds_bits_mask_ept(shadow_zero_check,
4620
					    shadow_phys_bits,
4621 4622
					    false);

4623 4624 4625 4626 4627 4628 4629
	if (!shadow_me_mask)
		return;

	for (i = context->shadow_root_level; --i >= 0;) {
		shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
		shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
	}
4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640
}

/*
 * as the comments in reset_shadow_zero_bits_mask() except it
 * is the shadow page table for intel nested guest.
 */
static void
reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
				struct kvm_mmu *context, bool execonly)
{
	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
4641
				    shadow_phys_bits, execonly);
4642 4643
}

4644 4645 4646 4647 4648 4649 4650 4651 4652 4653
#define BYTE_MASK(access) \
	((1 & (access) ? 2 : 0) | \
	 (2 & (access) ? 4 : 0) | \
	 (3 & (access) ? 8 : 0) | \
	 (4 & (access) ? 16 : 0) | \
	 (5 & (access) ? 32 : 0) | \
	 (6 & (access) ? 64 : 0) | \
	 (7 & (access) ? 128 : 0))


4654 4655
static void update_permission_bitmask(struct kvm_vcpu *vcpu,
				      struct kvm_mmu *mmu, bool ept)
4656
{
4657 4658 4659 4660 4661 4662 4663 4664 4665
	unsigned byte;

	const u8 x = BYTE_MASK(ACC_EXEC_MASK);
	const u8 w = BYTE_MASK(ACC_WRITE_MASK);
	const u8 u = BYTE_MASK(ACC_USER_MASK);

	bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0;
	bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0;
	bool cr0_wp = is_write_protection(vcpu);
4666 4667

	for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4668 4669
		unsigned pfec = byte << 1;

F
Feng Wu 已提交
4670
		/*
4671 4672
		 * Each "*f" variable has a 1 bit for each UWX value
		 * that causes a fault with the given PFEC.
F
Feng Wu 已提交
4673
		 */
4674

4675
		/* Faults from writes to non-writable pages */
4676
		u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
4677
		/* Faults from user mode accesses to supervisor pages */
4678
		u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
4679
		/* Faults from fetches of non-executable pages*/
4680
		u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705
		/* Faults from kernel mode fetches of user pages */
		u8 smepf = 0;
		/* Faults from kernel mode accesses of user pages */
		u8 smapf = 0;

		if (!ept) {
			/* Faults from kernel mode accesses to user pages */
			u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;

			/* Not really needed: !nx will cause pte.nx to fault */
			if (!mmu->nx)
				ff = 0;

			/* Allow supervisor writes if !cr0.wp */
			if (!cr0_wp)
				wf = (pfec & PFERR_USER_MASK) ? wf : 0;

			/* Disallow supervisor fetches of user code if cr4.smep */
			if (cr4_smep)
				smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;

			/*
			 * SMAP:kernel-mode data accesses from user-mode
			 * mappings should fault. A fault is considered
			 * as a SMAP violation if all of the following
P
Peng Hao 已提交
4706
			 * conditions are true:
4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719
			 *   - X86_CR4_SMAP is set in CR4
			 *   - A user page is accessed
			 *   - The access is not a fetch
			 *   - Page fault in kernel mode
			 *   - if CPL = 3 or X86_EFLAGS_AC is clear
			 *
			 * Here, we cover the first three conditions.
			 * The fourth is computed dynamically in permission_fault();
			 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
			 * *not* subject to SMAP restrictions.
			 */
			if (cr4_smap)
				smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4720
		}
4721 4722

		mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4723 4724 4725
	}
}

4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800
/*
* PKU is an additional mechanism by which the paging controls access to
* user-mode addresses based on the value in the PKRU register.  Protection
* key violations are reported through a bit in the page fault error code.
* Unlike other bits of the error code, the PK bit is not known at the
* call site of e.g. gva_to_gpa; it must be computed directly in
* permission_fault based on two bits of PKRU, on some machine state (CR4,
* CR0, EFER, CPL), and on other bits of the error code and the page tables.
*
* In particular the following conditions come from the error code, the
* page tables and the machine state:
* - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
* - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
* - PK is always zero if U=0 in the page tables
* - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
*
* The PKRU bitmask caches the result of these four conditions.  The error
* code (minus the P bit) and the page table's U bit form an index into the
* PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
* with the two bits of the PKRU register corresponding to the protection key.
* For the first three conditions above the bits will be 00, thus masking
* away both AD and WD.  For all reads or if the last condition holds, WD
* only will be masked away.
*/
static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
				bool ept)
{
	unsigned bit;
	bool wp;

	if (ept) {
		mmu->pkru_mask = 0;
		return;
	}

	/* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */
	if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) {
		mmu->pkru_mask = 0;
		return;
	}

	wp = is_write_protection(vcpu);

	for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
		unsigned pfec, pkey_bits;
		bool check_pkey, check_write, ff, uf, wf, pte_user;

		pfec = bit << 1;
		ff = pfec & PFERR_FETCH_MASK;
		uf = pfec & PFERR_USER_MASK;
		wf = pfec & PFERR_WRITE_MASK;

		/* PFEC.RSVD is replaced by ACC_USER_MASK. */
		pte_user = pfec & PFERR_RSVD_MASK;

		/*
		 * Only need to check the access which is not an
		 * instruction fetch and is to a user page.
		 */
		check_pkey = (!ff && pte_user);
		/*
		 * write access is controlled by PKRU if it is a
		 * user access or CR0.WP = 1.
		 */
		check_write = check_pkey && wf && (uf || wp);

		/* PKRU.AD stops both read and write access. */
		pkey_bits = !!check_pkey;
		/* PKRU.WD stops write access. */
		pkey_bits |= (!!check_write) << 1;

		mmu->pkru_mask |= (pkey_bits & 3) << pfec;
	}
}

4801
static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
A
Avi Kivity 已提交
4802
{
4803 4804 4805 4806 4807
	unsigned root_level = mmu->root_level;

	mmu->last_nonleaf_level = root_level;
	if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
		mmu->last_nonleaf_level++;
A
Avi Kivity 已提交
4808 4809
}

4810 4811 4812
static void paging64_init_context_common(struct kvm_vcpu *vcpu,
					 struct kvm_mmu *context,
					 int level)
A
Avi Kivity 已提交
4813
{
4814
	context->nx = is_nx(vcpu);
4815
	context->root_level = level;
4816

4817
	reset_rsvds_bits_mask(vcpu, context);
4818
	update_permission_bitmask(vcpu, context, false);
4819
	update_pkru_bitmask(vcpu, context, false);
4820
	update_last_nonleaf_level(vcpu, context);
A
Avi Kivity 已提交
4821

4822
	MMU_WARN_ON(!is_pae(vcpu));
A
Avi Kivity 已提交
4823 4824
	context->page_fault = paging64_page_fault;
	context->gva_to_gpa = paging64_gva_to_gpa;
4825
	context->sync_page = paging64_sync_page;
M
Marcelo Tosatti 已提交
4826
	context->invlpg = paging64_invlpg;
4827
	context->update_pte = paging64_update_pte;
4828
	context->shadow_root_level = level;
4829
	context->direct_map = false;
A
Avi Kivity 已提交
4830 4831
}

4832 4833
static void paging64_init_context(struct kvm_vcpu *vcpu,
				  struct kvm_mmu *context)
4834
{
4835 4836 4837 4838
	int root_level = is_la57_mode(vcpu) ?
			 PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;

	paging64_init_context_common(vcpu, context, root_level);
4839 4840
}

4841 4842
static void paging32_init_context(struct kvm_vcpu *vcpu,
				  struct kvm_mmu *context)
A
Avi Kivity 已提交
4843
{
4844
	context->nx = false;
4845
	context->root_level = PT32_ROOT_LEVEL;
4846

4847
	reset_rsvds_bits_mask(vcpu, context);
4848
	update_permission_bitmask(vcpu, context, false);
4849
	update_pkru_bitmask(vcpu, context, false);
4850
	update_last_nonleaf_level(vcpu, context);
A
Avi Kivity 已提交
4851 4852 4853

	context->page_fault = paging32_page_fault;
	context->gva_to_gpa = paging32_gva_to_gpa;
4854
	context->sync_page = paging32_sync_page;
M
Marcelo Tosatti 已提交
4855
	context->invlpg = paging32_invlpg;
4856
	context->update_pte = paging32_update_pte;
A
Avi Kivity 已提交
4857
	context->shadow_root_level = PT32E_ROOT_LEVEL;
4858
	context->direct_map = false;
A
Avi Kivity 已提交
4859 4860
}

4861 4862
static void paging32E_init_context(struct kvm_vcpu *vcpu,
				   struct kvm_mmu *context)
A
Avi Kivity 已提交
4863
{
4864
	paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
A
Avi Kivity 已提交
4865 4866
}

4867 4868 4869 4870
static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
{
	union kvm_mmu_extended_role ext = {0};

4871
	ext.cr0_pg = !!is_paging(vcpu);
4872
	ext.cr4_pae = !!is_pae(vcpu);
4873 4874 4875 4876
	ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
	ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
	ext.cr4_pse = !!is_pse(vcpu);
	ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
4877
	ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
4878 4879 4880 4881 4882 4883

	ext.valid = 1;

	return ext;
}

4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904
static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
						   bool base_only)
{
	union kvm_mmu_role role = {0};

	role.base.access = ACC_ALL;
	role.base.nxe = !!is_nx(vcpu);
	role.base.cr0_wp = is_write_protection(vcpu);
	role.base.smm = is_smm(vcpu);
	role.base.guest_mode = is_guest_mode(vcpu);

	if (base_only)
		return role;

	role.ext = kvm_calc_mmu_role_ext(vcpu);

	return role;
}

static union kvm_mmu_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
4905
{
4906
	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
4907

4908 4909 4910
	role.base.ad_disabled = (shadow_accessed_mask == 0);
	role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
	role.base.direct = true;
4911
	role.base.gpte_is_8_bytes = true;
4912 4913 4914 4915

	return role;
}

4916
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4917
{
4918
	struct kvm_mmu *context = vcpu->arch.mmu;
4919 4920
	union kvm_mmu_role new_role =
		kvm_calc_tdp_mmu_root_page_role(vcpu, false);
4921

4922 4923 4924 4925
	if (new_role.as_u64 == context->mmu_role.as_u64)
		return;

	context->mmu_role.as_u64 = new_role.as_u64;
4926
	context->page_fault = kvm_tdp_page_fault;
4927
	context->sync_page = nonpaging_sync_page;
M
Marcelo Tosatti 已提交
4928
	context->invlpg = nonpaging_invlpg;
4929
	context->update_pte = nonpaging_update_pte;
4930
	context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu);
4931
	context->direct_map = true;
4932
	context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
4933
	context->get_guest_pgd = get_cr3;
4934
	context->get_pdptr = kvm_pdptr_read;
4935
	context->inject_page_fault = kvm_inject_page_fault;
4936 4937

	if (!is_paging(vcpu)) {
4938
		context->nx = false;
4939 4940 4941
		context->gva_to_gpa = nonpaging_gva_to_gpa;
		context->root_level = 0;
	} else if (is_long_mode(vcpu)) {
4942
		context->nx = is_nx(vcpu);
4943 4944
		context->root_level = is_la57_mode(vcpu) ?
				PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4945 4946
		reset_rsvds_bits_mask(vcpu, context);
		context->gva_to_gpa = paging64_gva_to_gpa;
4947
	} else if (is_pae(vcpu)) {
4948
		context->nx = is_nx(vcpu);
4949
		context->root_level = PT32E_ROOT_LEVEL;
4950 4951
		reset_rsvds_bits_mask(vcpu, context);
		context->gva_to_gpa = paging64_gva_to_gpa;
4952
	} else {
4953
		context->nx = false;
4954
		context->root_level = PT32_ROOT_LEVEL;
4955 4956
		reset_rsvds_bits_mask(vcpu, context);
		context->gva_to_gpa = paging32_gva_to_gpa;
4957 4958
	}

4959
	update_permission_bitmask(vcpu, context, false);
4960
	update_pkru_bitmask(vcpu, context, false);
4961
	update_last_nonleaf_level(vcpu, context);
4962
	reset_tdp_shadow_zero_bits_mask(vcpu, context);
4963 4964
}

4965 4966 4967 4968 4969 4970 4971 4972 4973 4974
static union kvm_mmu_role
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
{
	union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);

	role.base.smep_andnot_wp = role.ext.cr4_smep &&
		!is_write_protection(vcpu);
	role.base.smap_andnot_wp = role.ext.cr4_smap &&
		!is_write_protection(vcpu);
	role.base.direct = !is_paging(vcpu);
4975
	role.base.gpte_is_8_bytes = !!is_pae(vcpu);
4976 4977

	if (!is_long_mode(vcpu))
4978
		role.base.level = PT32E_ROOT_LEVEL;
4979
	else if (is_la57_mode(vcpu))
4980
		role.base.level = PT64_ROOT_5LEVEL;
4981
	else
4982
		role.base.level = PT64_ROOT_4LEVEL;
4983 4984 4985 4986 4987 4988

	return role;
}

void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
{
4989
	struct kvm_mmu *context = vcpu->arch.mmu;
4990 4991 4992 4993 4994
	union kvm_mmu_role new_role =
		kvm_calc_shadow_mmu_root_page_role(vcpu, false);

	if (new_role.as_u64 == context->mmu_role.as_u64)
		return;
A
Avi Kivity 已提交
4995 4996

	if (!is_paging(vcpu))
4997
		nonpaging_init_context(vcpu, context);
A
Avi Kivity 已提交
4998
	else if (is_long_mode(vcpu))
4999
		paging64_init_context(vcpu, context);
A
Avi Kivity 已提交
5000
	else if (is_pae(vcpu))
5001
		paging32E_init_context(vcpu, context);
A
Avi Kivity 已提交
5002
	else
5003
		paging32_init_context(vcpu, context);
5004

5005
	context->mmu_role.as_u64 = new_role.as_u64;
5006
	reset_shadow_zero_bits_mask(vcpu, context);
5007 5008 5009
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);

5010 5011
static union kvm_mmu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
5012
				   bool execonly, u8 level)
5013
{
5014
	union kvm_mmu_role role = {0};
5015

5016 5017
	/* SMM flag is inherited from root_mmu */
	role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
5018

5019
	role.base.level = level;
5020
	role.base.gpte_is_8_bytes = true;
5021 5022 5023 5024
	role.base.direct = false;
	role.base.ad_disabled = !accessed_dirty;
	role.base.guest_mode = true;
	role.base.access = ACC_ALL;
5025

5026 5027 5028 5029 5030 5031 5032
	/*
	 * WP=1 and NOT_WP=1 is an impossible combination, use WP and the
	 * SMAP variation to denote shadow EPT entries.
	 */
	role.base.cr0_wp = true;
	role.base.smap_andnot_wp = true;

5033
	role.ext = kvm_calc_mmu_role_ext(vcpu);
5034
	role.ext.execonly = execonly;
5035 5036 5037 5038

	return role;
}

5039
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
5040
			     bool accessed_dirty, gpa_t new_eptp)
N
Nadav Har'El 已提交
5041
{
5042
	struct kvm_mmu *context = vcpu->arch.mmu;
5043
	u8 level = vmx_eptp_page_walk_level(new_eptp);
5044 5045
	union kvm_mmu_role new_role =
		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
5046
						   execonly, level);
5047 5048 5049 5050 5051

	__kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false);

	if (new_role.as_u64 == context->mmu_role.as_u64)
		return;
5052

5053
	context->shadow_root_level = level;
N
Nadav Har'El 已提交
5054 5055

	context->nx = true;
5056
	context->ept_ad = accessed_dirty;
N
Nadav Har'El 已提交
5057 5058 5059 5060 5061
	context->page_fault = ept_page_fault;
	context->gva_to_gpa = ept_gva_to_gpa;
	context->sync_page = ept_sync_page;
	context->invlpg = ept_invlpg;
	context->update_pte = ept_update_pte;
5062
	context->root_level = level;
N
Nadav Har'El 已提交
5063
	context->direct_map = false;
5064
	context->mmu_role.as_u64 = new_role.as_u64;
5065

N
Nadav Har'El 已提交
5066
	update_permission_bitmask(vcpu, context, true);
5067
	update_pkru_bitmask(vcpu, context, true);
5068
	update_last_nonleaf_level(vcpu, context);
N
Nadav Har'El 已提交
5069
	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
5070
	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
N
Nadav Har'El 已提交
5071 5072 5073
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);

5074
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
5075
{
5076
	struct kvm_mmu *context = vcpu->arch.mmu;
5077 5078 5079

	kvm_init_shadow_mmu(vcpu);
	context->set_cr3           = kvm_x86_ops->set_cr3;
5080
	context->get_guest_pgd     = get_cr3;
5081 5082
	context->get_pdptr         = kvm_pdptr_read;
	context->inject_page_fault = kvm_inject_page_fault;
A
Avi Kivity 已提交
5083 5084
}

5085
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
5086
{
5087
	union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
5088 5089
	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;

5090 5091 5092 5093
	if (new_role.as_u64 == g_context->mmu_role.as_u64)
		return;

	g_context->mmu_role.as_u64 = new_role.as_u64;
5094
	g_context->get_guest_pgd     = get_cr3;
5095
	g_context->get_pdptr         = kvm_pdptr_read;
5096 5097 5098
	g_context->inject_page_fault = kvm_inject_page_fault;

	/*
5099
	 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
5100 5101 5102 5103 5104
	 * L1's nested page tables (e.g. EPT12). The nested translation
	 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
	 * L2's page tables as the first level of translation and L1's
	 * nested page tables as the second level of translation. Basically
	 * the gva_to_gpa functions between mmu and nested_mmu are swapped.
5105 5106
	 */
	if (!is_paging(vcpu)) {
5107
		g_context->nx = false;
5108 5109 5110
		g_context->root_level = 0;
		g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
	} else if (is_long_mode(vcpu)) {
5111
		g_context->nx = is_nx(vcpu);
5112 5113
		g_context->root_level = is_la57_mode(vcpu) ?
					PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
5114
		reset_rsvds_bits_mask(vcpu, g_context);
5115 5116
		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
	} else if (is_pae(vcpu)) {
5117
		g_context->nx = is_nx(vcpu);
5118
		g_context->root_level = PT32E_ROOT_LEVEL;
5119
		reset_rsvds_bits_mask(vcpu, g_context);
5120 5121
		g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
	} else {
5122
		g_context->nx = false;
5123
		g_context->root_level = PT32_ROOT_LEVEL;
5124
		reset_rsvds_bits_mask(vcpu, g_context);
5125 5126 5127
		g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
	}

5128
	update_permission_bitmask(vcpu, g_context, false);
5129
	update_pkru_bitmask(vcpu, g_context, false);
5130
	update_last_nonleaf_level(vcpu, g_context);
5131 5132
}

5133
void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
5134
{
5135
	if (reset_roots) {
5136 5137
		uint i;

5138
		vcpu->arch.mmu->root_hpa = INVALID_PAGE;
5139 5140

		for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5141
			vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
5142 5143
	}

5144
	if (mmu_is_nested(vcpu))
5145
		init_kvm_nested_mmu(vcpu);
5146
	else if (tdp_enabled)
5147
		init_kvm_tdp_mmu(vcpu);
5148
	else
5149
		init_kvm_softmmu(vcpu);
5150
}
5151
EXPORT_SYMBOL_GPL(kvm_init_mmu);
5152

5153 5154 5155
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
{
5156 5157
	union kvm_mmu_role role;

5158
	if (tdp_enabled)
5159
		role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
5160
	else
5161 5162 5163
		role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);

	return role.base;
5164
}
5165

5166
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
5167
{
5168
	kvm_mmu_unload(vcpu);
5169
	kvm_init_mmu(vcpu, true);
A
Avi Kivity 已提交
5170
}
5171
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
A
Avi Kivity 已提交
5172 5173

int kvm_mmu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
5174
{
5175 5176
	int r;

5177
	r = mmu_topup_memory_caches(vcpu);
A
Avi Kivity 已提交
5178 5179
	if (r)
		goto out;
5180
	r = mmu_alloc_roots(vcpu);
5181
	kvm_mmu_sync_roots(vcpu);
5182 5183
	if (r)
		goto out;
5184
	kvm_mmu_load_cr3(vcpu);
5185
	kvm_x86_ops->tlb_flush(vcpu, true);
5186 5187
out:
	return r;
A
Avi Kivity 已提交
5188
}
A
Avi Kivity 已提交
5189 5190 5191 5192
EXPORT_SYMBOL_GPL(kvm_mmu_load);

void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
5193 5194 5195 5196
	kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
	WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
	WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
A
Avi Kivity 已提交
5197
}
5198
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
A
Avi Kivity 已提交
5199

5200
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
5201 5202
				  struct kvm_mmu_page *sp, u64 *spte,
				  const void *new)
5203
{
5204
	if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
5205 5206
		++vcpu->kvm->stat.mmu_pde_zapped;
		return;
5207
        }
5208

A
Avi Kivity 已提交
5209
	++vcpu->kvm->stat.mmu_pte_updated;
5210
	vcpu->arch.mmu->update_pte(vcpu, sp, spte, new);
5211 5212
}

5213 5214 5215 5216 5217 5218 5219 5220
static bool need_remote_flush(u64 old, u64 new)
{
	if (!is_shadow_present_pte(old))
		return false;
	if (!is_shadow_present_pte(new))
		return true;
	if ((old ^ new) & PT64_BASE_ADDR_MASK)
		return true;
5221 5222
	old ^= shadow_nx_mask;
	new ^= shadow_nx_mask;
5223 5224 5225
	return (old & ~new & PT64_PERM_MASK) != 0;
}

5226
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
5227
				    int *bytes)
5228
{
5229
	u64 gentry = 0;
5230
	int r;
5231 5232 5233

	/*
	 * Assume that the pte write on a page table of the same type
5234 5235
	 * as the current vcpu paging mode since we update the sptes only
	 * when they have the same mode.
5236
	 */
5237
	if (is_pae(vcpu) && *bytes == 4) {
5238
		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
5239 5240
		*gpa &= ~(gpa_t)7;
		*bytes = 8;
5241 5242
	}

5243 5244 5245 5246
	if (*bytes == 4 || *bytes == 8) {
		r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
		if (r)
			gentry = 0;
5247 5248
	}

5249 5250 5251 5252 5253 5254 5255
	return gentry;
}

/*
 * If we're seeing too many writes to a page, it may no longer be a page table,
 * or we may be forking, in which case it is better to unmap the page.
 */
5256
static bool detect_write_flooding(struct kvm_mmu_page *sp)
5257
{
5258 5259 5260 5261
	/*
	 * Skip write-flooding detected for the sp whose level is 1, because
	 * it can become unsync, then the guest page is not write-protected.
	 */
5262
	if (sp->role.level == PT_PAGE_TABLE_LEVEL)
5263
		return false;
5264

5265 5266
	atomic_inc(&sp->write_flooding_count);
	return atomic_read(&sp->write_flooding_count) >= 3;
5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281
}

/*
 * Misaligned accesses are too much trouble to fix up; also, they usually
 * indicate a page is not used as a page table.
 */
static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
				    int bytes)
{
	unsigned offset, pte_size, misaligned;

	pgprintk("misaligned: gpa %llx bytes %d role %x\n",
		 gpa, bytes, sp->role.word);

	offset = offset_in_page(gpa);
5282
	pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
5283 5284 5285 5286 5287 5288 5289 5290

	/*
	 * Sometimes, the OS only writes the last one bytes to update status
	 * bits, for example, in linux, andb instruction is used in clear_bit().
	 */
	if (!(offset & (pte_size - 1)) && bytes == 1)
		return false;

5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305
	misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
	misaligned |= bytes < 4;

	return misaligned;
}

static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
{
	unsigned page_offset, quadrant;
	u64 *spte;
	int level;

	page_offset = offset_in_page(gpa);
	level = sp->role.level;
	*nspte = 1;
5306
	if (!sp->role.gpte_is_8_bytes) {
5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327
		page_offset <<= 1;	/* 32->64 */
		/*
		 * A 32-bit pde maps 4MB while the shadow pdes map
		 * only 2MB.  So we need to double the offset again
		 * and zap two pdes instead of one.
		 */
		if (level == PT32_ROOT_LEVEL) {
			page_offset &= ~7; /* kill rounding error */
			page_offset <<= 1;
			*nspte = 2;
		}
		quadrant = page_offset >> PAGE_SHIFT;
		page_offset &= ~PAGE_MASK;
		if (quadrant != sp->role.quadrant)
			return NULL;
	}

	spte = &sp->spt[page_offset / sizeof(*spte)];
	return spte;
}

5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343
/*
 * Ignore various flags when determining if a SPTE can be immediately
 * overwritten for the current MMU.
 *  - level: explicitly checked in mmu_pte_write_new_pte(), and will never
 *    match the current MMU role, as MMU's level tracks the root level.
 *  - access: updated based on the new guest PTE
 *  - quadrant: handled by get_written_sptes()
 *  - invalid: always false (loop only walks valid shadow pages)
 */
static const union kvm_mmu_page_role role_ign = {
	.level = 0xf,
	.access = 0x7,
	.quadrant = 0x3,
	.invalid = 0x1,
};

5344
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
5345 5346
			      const u8 *new, int bytes,
			      struct kvm_page_track_notifier_node *node)
5347 5348 5349 5350 5351 5352
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	struct kvm_mmu_page *sp;
	LIST_HEAD(invalid_list);
	u64 entry, gentry, *spte;
	int npte;
5353
	bool remote_flush, local_flush;
5354 5355 5356 5357 5358

	/*
	 * If we don't have indirect shadow pages, it means no page is
	 * write-protected, so we can exit simply.
	 */
5359
	if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5360 5361
		return;

5362
	remote_flush = local_flush = false;
5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373

	pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);

	/*
	 * No need to care whether allocation memory is successful
	 * or not since pte prefetch is skiped if it does not have
	 * enough objects in the cache.
	 */
	mmu_topup_memory_caches(vcpu);

	spin_lock(&vcpu->kvm->mmu_lock);
5374 5375 5376

	gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);

5377
	++vcpu->kvm->stat.mmu_pte_write;
5378
	kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
5379

5380
	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5381
		if (detect_write_misaligned(sp, gpa, bytes) ||
5382
		      detect_write_flooding(sp)) {
5383
			kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
A
Avi Kivity 已提交
5384
			++vcpu->kvm->stat.mmu_flooded;
5385 5386
			continue;
		}
5387 5388 5389 5390 5391

		spte = get_written_sptes(sp, gpa, &npte);
		if (!spte)
			continue;

5392
		local_flush = true;
5393
		while (npte--) {
5394 5395
			u32 base_role = vcpu->arch.mmu->mmu_role.base.word;

5396
			entry = *spte;
5397
			mmu_page_zap_pte(vcpu->kvm, sp, spte);
5398
			if (gentry &&
5399 5400
			    !((sp->role.word ^ base_role) & ~role_ign.word) &&
			    rmap_can_add(vcpu))
5401
				mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
G
Gleb Natapov 已提交
5402
			if (need_remote_flush(entry, *spte))
5403
				remote_flush = true;
5404
			++spte;
5405 5406
		}
	}
5407
	kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
5408
	kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
5409
	spin_unlock(&vcpu->kvm->mmu_lock);
5410 5411
}

5412 5413
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
5414 5415
	gpa_t gpa;
	int r;
5416

5417
	if (vcpu->arch.mmu->direct_map)
5418 5419
		return 0;

5420
	gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
5421 5422

	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
5423

5424
	return r;
5425
}
5426
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
5427

5428
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
5429
		       void *insn, int insn_len)
5430
{
5431
	int r, emulation_type = EMULTYPE_PF;
5432
	bool direct = vcpu->arch.mmu->direct_map;
5433

5434
	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
5435 5436
		return RET_PF_RETRY;

5437
	r = RET_PF_INVALID;
5438
	if (unlikely(error_code & PFERR_RSVD_MASK)) {
5439
		r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
5440
		if (r == RET_PF_EMULATE)
5441 5442
			goto emulate;
	}
5443

5444
	if (r == RET_PF_INVALID) {
5445 5446
		r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
					  lower_32_bits(error_code), false);
5447 5448 5449 5450 5451
		WARN_ON(r == RET_PF_INVALID);
	}

	if (r == RET_PF_RETRY)
		return 1;
5452
	if (r < 0)
5453
		return r;
5454

5455 5456 5457 5458 5459 5460 5461
	/*
	 * Before emulating the instruction, check if the error code
	 * was due to a RO violation while translating the guest page.
	 * This can occur when using nested virtualization with nested
	 * paging in both guests. If true, we simply unprotect the page
	 * and resume the guest.
	 */
5462
	if (vcpu->arch.mmu->direct_map &&
5463
	    (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
5464
		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
5465 5466 5467
		return 1;
	}

5468 5469 5470 5471 5472 5473
	/*
	 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
	 * optimistically try to just unprotect the page and let the processor
	 * re-execute the instruction that caused the page fault.  Do not allow
	 * retrying MMIO emulation, as it's not only pointless but could also
	 * cause us to enter an infinite loop because the processor will keep
5474 5475 5476 5477
	 * faulting on the non-existent MMIO address.  Retrying an instruction
	 * from a nested guest is also pointless and dangerous as we are only
	 * explicitly shadowing L1's page tables, i.e. unprotecting something
	 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5478
	 */
5479
	if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
5480
		emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
5481
emulate:
5482 5483 5484 5485 5486
	/*
	 * On AMD platforms, under certain conditions insn_len may be zero on #NPF.
	 * This can happen if a guest gets a page-fault on data access but the HW
	 * table walker is not able to read the instruction page (e.g instruction
	 * page is not present in memory). In those cases we simply restart the
5487
	 * guest, with the exception of AMD Erratum 1096 which is unrecoverable.
5488
	 */
5489 5490 5491 5492
	if (unlikely(insn && !insn_len)) {
		if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu))
			return 1;
	}
5493

5494
	return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
5495
				       insn_len);
5496 5497 5498
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);

M
Marcelo Tosatti 已提交
5499 5500
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
5501
	struct kvm_mmu *mmu = vcpu->arch.mmu;
5502
	int i;
5503

5504 5505 5506 5507
	/* INVLPG on a * non-canonical address is a NOP according to the SDM.  */
	if (is_noncanonical_address(gva, vcpu))
		return;

5508
	mmu->invlpg(vcpu, gva, mmu->root_hpa);
5509 5510 5511 5512

	/*
	 * INVLPG is required to invalidate any global mappings for the VA,
	 * irrespective of PCID. Since it would take us roughly similar amount
5513 5514 5515
	 * of work to determine whether any of the prev_root mappings of the VA
	 * is marked global, or to just sync it blindly, so we might as well
	 * just always sync it.
5516
	 *
5517 5518 5519
	 * Mappings not reachable via the current cr3 or the prev_roots will be
	 * synced when switching to that cr3, so nothing needs to be done here
	 * for them.
5520
	 */
5521 5522 5523
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		if (VALID_PAGE(mmu->prev_roots[i].hpa))
			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
5524

5525
	kvm_x86_ops->tlb_flush_gva(vcpu, gva);
M
Marcelo Tosatti 已提交
5526 5527 5528 5529
	++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);

5530 5531
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
{
5532
	struct kvm_mmu *mmu = vcpu->arch.mmu;
5533
	bool tlb_flush = false;
5534
	uint i;
5535 5536

	if (pcid == kvm_get_active_pcid(vcpu)) {
5537
		mmu->invlpg(vcpu, gva, mmu->root_hpa);
5538
		tlb_flush = true;
5539 5540
	}

5541 5542 5543 5544 5545 5546
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
		if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
		    pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].cr3)) {
			mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
			tlb_flush = true;
		}
5547
	}
5548

5549 5550 5551
	if (tlb_flush)
		kvm_x86_ops->tlb_flush_gva(vcpu, gva);

5552 5553 5554
	++vcpu->stat.invlpg;

	/*
5555 5556 5557
	 * Mappings not reachable via the current cr3 or the prev_roots will be
	 * synced when switching to that cr3, so nothing needs to be done here
	 * for them.
5558 5559 5560 5561
	 */
}
EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva);

5562
void kvm_configure_mmu(bool enable_tdp)
5563
{
5564
	tdp_enabled = enable_tdp;
5565
}
5566
EXPORT_SYMBOL_GPL(kvm_configure_mmu);
5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586

/* The return value indicates if tlb flush on all vcpus is needed. */
typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);

/* The caller should hold mmu-lock before calling this function. */
static __always_inline bool
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
			slot_level_handler fn, int start_level, int end_level,
			gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
{
	struct slot_rmap_walk_iterator iterator;
	bool flush = false;

	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
			end_gfn, &iterator) {
		if (iterator.rmap)
			flush |= fn(kvm, iterator.rmap);

		if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
			if (flush && lock_flush_tlb) {
5587 5588 5589
				kvm_flush_remote_tlbs_with_address(kvm,
						start_gfn,
						iterator.gfn - start_gfn + 1);
5590 5591 5592 5593 5594 5595 5596
				flush = false;
			}
			cond_resched_lock(&kvm->mmu_lock);
		}
	}

	if (flush && lock_flush_tlb) {
5597 5598
		kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
						   end_gfn - start_gfn + 1);
5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639
		flush = false;
	}

	return flush;
}

static __always_inline bool
slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
		  slot_level_handler fn, int start_level, int end_level,
		  bool lock_flush_tlb)
{
	return slot_handle_level_range(kvm, memslot, fn, start_level,
			end_level, memslot->base_gfn,
			memslot->base_gfn + memslot->npages - 1,
			lock_flush_tlb);
}

static __always_inline bool
slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
		      slot_level_handler fn, bool lock_flush_tlb)
{
	return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
				 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}

static __always_inline bool
slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
			slot_level_handler fn, bool lock_flush_tlb)
{
	return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
				 PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
}

static __always_inline bool
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
		 slot_level_handler fn, bool lock_flush_tlb)
{
	return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
				 PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
}

5640
static void free_mmu_pages(struct kvm_mmu *mmu)
A
Avi Kivity 已提交
5641
{
5642 5643
	free_page((unsigned long)mmu->pae_root);
	free_page((unsigned long)mmu->lm_root);
A
Avi Kivity 已提交
5644 5645
}

5646
static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
A
Avi Kivity 已提交
5647
{
5648
	struct page *page;
A
Avi Kivity 已提交
5649 5650
	int i;

5651
	/*
5652 5653 5654 5655 5656 5657 5658
	 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
	 * while the PDP table is a per-vCPU construct that's allocated at MMU
	 * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
	 * x86_64.  Therefore we need to allocate the PDP table in the first
	 * 4GB of memory, which happens to fit the DMA32 zone.  Except for
	 * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
	 * skip allocating the PDP table.
5659
	 */
5660 5661 5662
	if (tdp_enabled && kvm_x86_ops->get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
		return 0;

5663
	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
5664
	if (!page)
5665 5666
		return -ENOMEM;

5667
	mmu->pae_root = page_address(page);
5668
	for (i = 0; i < 4; ++i)
5669
		mmu->pae_root[i] = INVALID_PAGE;
5670

A
Avi Kivity 已提交
5671 5672 5673
	return 0;
}

5674
int kvm_mmu_create(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
5675
{
5676
	uint i;
5677
	int ret;
5678

5679 5680
	vcpu->arch.mmu = &vcpu->arch.root_mmu;
	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
A
Avi Kivity 已提交
5681

5682
	vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
5683
	vcpu->arch.root_mmu.root_cr3 = 0;
5684
	vcpu->arch.root_mmu.translate_gpa = translate_gpa;
5685
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
5686
		vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
A
Avi Kivity 已提交
5687

5688
	vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
5689
	vcpu->arch.guest_mmu.root_cr3 = 0;
5690 5691 5692
	vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
		vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
5693

5694
	vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707

	ret = alloc_mmu_pages(vcpu, &vcpu->arch.guest_mmu);
	if (ret)
		return ret;

	ret = alloc_mmu_pages(vcpu, &vcpu->arch.root_mmu);
	if (ret)
		goto fail_allocate_root;

	return ret;
 fail_allocate_root:
	free_mmu_pages(&vcpu->arch.guest_mmu);
	return ret;
A
Avi Kivity 已提交
5708 5709
}

5710
#define BATCH_ZAP_PAGES	10
5711 5712 5713
static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
	struct kvm_mmu_page *sp, *node;
5714
	int nr_zapped, batch = 0;
5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726

restart:
	list_for_each_entry_safe_reverse(sp, node,
	      &kvm->arch.active_mmu_pages, link) {
		/*
		 * No obsolete valid page exists before a newly created page
		 * since active_mmu_pages is a FIFO list.
		 */
		if (!is_obsolete_sp(kvm, sp))
			break;

		/*
5727 5728 5729 5730
		 * Skip invalid pages with a non-zero root count, zapping pages
		 * with a non-zero root count will never succeed, i.e. the page
		 * will get thrown back on active_mmu_pages and we'll get stuck
		 * in an infinite loop.
5731
		 */
5732
		if (sp->role.invalid && sp->root_count)
5733 5734
			continue;

5735 5736 5737 5738 5739 5740
		/*
		 * No need to flush the TLB since we're only zapping shadow
		 * pages with an obsolete generation number and all vCPUS have
		 * loaded a new root, i.e. the shadow pages being zapped cannot
		 * be in active use by the guest.
		 */
5741
		if (batch >= BATCH_ZAP_PAGES &&
5742
		    cond_resched_lock(&kvm->mmu_lock)) {
5743
			batch = 0;
5744 5745 5746
			goto restart;
		}

5747 5748
		if (__kvm_mmu_prepare_zap_page(kvm, sp,
				&kvm->arch.zapped_obsolete_pages, &nr_zapped)) {
5749
			batch += nr_zapped;
5750
			goto restart;
5751
		}
5752 5753
	}

5754 5755 5756 5757 5758
	/*
	 * Trigger a remote TLB flush before freeing the page tables to ensure
	 * KVM is not in the middle of a lockless shadow page table walk, which
	 * may reference the pages.
	 */
5759
	kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772
}

/*
 * Fast invalidate all shadow pages and use lock-break technique
 * to zap obsolete pages.
 *
 * It's required when memslot is being deleted or VM is being
 * destroyed, in these cases, we should ensure that KVM MMU does
 * not use any resource of the being-deleted slot or all slots
 * after calling the function.
 */
static void kvm_mmu_zap_all_fast(struct kvm *kvm)
{
5773 5774
	lockdep_assert_held(&kvm->slots_lock);

5775
	spin_lock(&kvm->mmu_lock);
5776
	trace_kvm_mmu_zap_all_fast(kvm);
5777 5778 5779 5780 5781 5782 5783 5784 5785

	/*
	 * Toggle mmu_valid_gen between '0' and '1'.  Because slots_lock is
	 * held for the entire duration of zapping obsolete pages, it's
	 * impossible for there to be multiple invalid generations associated
	 * with *valid* shadow pages at any given time, i.e. there is exactly
	 * one valid generation and (at most) one invalid generation.
	 */
	kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5786

5787 5788 5789 5790 5791 5792 5793 5794 5795 5796
	/*
	 * Notify all vcpus to reload its shadow page table and flush TLB.
	 * Then all vcpus will switch to new shadow page table with the new
	 * mmu_valid_gen.
	 *
	 * Note: we need to do this under the protection of mmu_lock,
	 * otherwise, vcpu would purge shadow page but miss tlb flush.
	 */
	kvm_reload_remote_mmus(kvm);

5797 5798 5799 5800
	kvm_zap_obsolete_pages(kvm);
	spin_unlock(&kvm->mmu_lock);
}

5801 5802 5803 5804 5805
static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
{
	return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
}

5806
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5807 5808
			struct kvm_memory_slot *slot,
			struct kvm_page_track_notifier_node *node)
5809
{
5810
	kvm_mmu_zap_all_fast(kvm);
5811 5812
}

5813
void kvm_mmu_init_vm(struct kvm *kvm)
5814
{
5815
	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5816

5817
	node->track_write = kvm_mmu_pte_write;
5818
	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5819
	kvm_page_track_register_notifier(kvm, node);
5820 5821
}

5822
void kvm_mmu_uninit_vm(struct kvm *kvm)
5823
{
5824
	struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5825

5826
	kvm_page_track_unregister_notifier(kvm, node);
5827 5828
}

X
Xiao Guangrong 已提交
5829 5830 5831 5832
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
5833
	int i;
X
Xiao Guangrong 已提交
5834 5835

	spin_lock(&kvm->mmu_lock);
5836 5837 5838 5839 5840 5841 5842 5843 5844
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);
		kvm_for_each_memslot(memslot, slots) {
			gfn_t start, end;

			start = max(gfn_start, memslot->base_gfn);
			end = min(gfn_end, memslot->base_gfn + memslot->npages);
			if (start >= end)
				continue;
X
Xiao Guangrong 已提交
5845

5846 5847 5848
			slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
						PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
						start, end - 1, true);
5849
		}
X
Xiao Guangrong 已提交
5850 5851 5852 5853 5854
	}

	spin_unlock(&kvm->mmu_lock);
}

5855 5856
static bool slot_rmap_write_protect(struct kvm *kvm,
				    struct kvm_rmap_head *rmap_head)
5857
{
5858
	return __rmap_write_protect(kvm, rmap_head, false);
5859 5860
}

5861
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
5862 5863
				      struct kvm_memory_slot *memslot,
				      int start_level)
A
Avi Kivity 已提交
5864
{
5865
	bool flush;
A
Avi Kivity 已提交
5866

5867
	spin_lock(&kvm->mmu_lock);
5868 5869
	flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
				start_level, PT_MAX_HUGEPAGE_LEVEL, false);
5870
	spin_unlock(&kvm->mmu_lock);
5871 5872 5873 5874 5875 5876 5877 5878

	/*
	 * We can flush all the TLBs out of the mmu lock without TLB
	 * corruption since we just change the spte from writable to
	 * readonly so that we only need to care the case of changing
	 * spte from present to present (changing the spte from present
	 * to nonpresent will flush all the TLBs immediately), in other
	 * words, the only case we care is mmu_spte_update() where we
W
Wei Yang 已提交
5879
	 * have checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE
5880 5881 5882
	 * instead of PT_WRITABLE_MASK, that means it does not depend
	 * on PT_WRITABLE_MASK anymore.
	 */
5883
	if (flush)
5884
		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
A
Avi Kivity 已提交
5885
}
5886

5887
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
5888
					 struct kvm_rmap_head *rmap_head)
5889 5890 5891 5892
{
	u64 *sptep;
	struct rmap_iterator iter;
	int need_tlb_flush = 0;
D
Dan Williams 已提交
5893
	kvm_pfn_t pfn;
5894 5895
	struct kvm_mmu_page *sp;

5896
restart:
5897
	for_each_rmap_spte(rmap_head, &iter, sptep) {
5898 5899 5900 5901
		sp = page_header(__pa(sptep));
		pfn = spte_to_pfn(*sptep);

		/*
5902 5903 5904 5905 5906
		 * We cannot do huge page mapping for indirect shadow pages,
		 * which are found on the last rmap (level = 1) when not using
		 * tdp; such shadow pages are synced with the page table in
		 * the guest, and the guest page table is using 4K page size
		 * mapping if the indirect sp has level = 1.
5907
		 */
5908
		if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
5909 5910
		    (kvm_is_zone_device_pfn(pfn) ||
		     PageCompound(pfn_to_page(pfn)))) {
5911
			pte_list_remove(rmap_head, sptep);
5912 5913 5914 5915 5916 5917 5918

			if (kvm_available_flush_tlb_with_range())
				kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
					KVM_PAGES_PER_HPAGE(sp->role.level));
			else
				need_tlb_flush = 1;

5919 5920
			goto restart;
		}
5921 5922 5923 5924 5925 5926
	}

	return need_tlb_flush;
}

void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5927
				   const struct kvm_memory_slot *memslot)
5928
{
5929
	/* FIXME: const-ify all uses of struct kvm_memory_slot.  */
5930
	spin_lock(&kvm->mmu_lock);
5931 5932
	slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
			 kvm_mmu_zap_collapsible_spte, true);
5933 5934 5935
	spin_unlock(&kvm->mmu_lock);
}

5936 5937 5938 5939
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
					struct kvm_memory_slot *memslot)
{
	/*
5940 5941 5942 5943 5944
	 * All current use cases for flushing the TLBs for a specific memslot
	 * are related to dirty logging, and do the TLB flush out of mmu_lock.
	 * The interaction between the various operations on memslot must be
	 * serialized by slots_locks to ensure the TLB flush from one operation
	 * is observed by any other operation on the same memslot.
5945 5946
	 */
	lockdep_assert_held(&kvm->slots_lock);
5947 5948
	kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
					   memslot->npages);
5949 5950
}

5951 5952 5953
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
				   struct kvm_memory_slot *memslot)
{
5954
	bool flush;
5955 5956

	spin_lock(&kvm->mmu_lock);
5957
	flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
5958 5959 5960 5961 5962 5963 5964 5965 5966
	spin_unlock(&kvm->mmu_lock);

	/*
	 * It's also safe to flush TLBs out of mmu lock here as currently this
	 * function is only used for dirty logging, in which case flushing TLB
	 * out of mmu lock also guarantees no dirty pages will be lost in
	 * dirty_bitmap.
	 */
	if (flush)
5967
		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5968 5969 5970 5971 5972 5973
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);

void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
					struct kvm_memory_slot *memslot)
{
5974
	bool flush;
5975 5976

	spin_lock(&kvm->mmu_lock);
5977 5978
	flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
					false);
5979 5980 5981
	spin_unlock(&kvm->mmu_lock);

	if (flush)
5982
		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5983 5984 5985 5986 5987 5988
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);

void kvm_mmu_slot_set_dirty(struct kvm *kvm,
			    struct kvm_memory_slot *memslot)
{
5989
	bool flush;
5990 5991

	spin_lock(&kvm->mmu_lock);
5992
	flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
5993 5994 5995
	spin_unlock(&kvm->mmu_lock);

	if (flush)
5996
		kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5997 5998 5999
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);

6000
void kvm_mmu_zap_all(struct kvm *kvm)
6001 6002
{
	struct kvm_mmu_page *sp, *node;
6003
	LIST_HEAD(invalid_list);
6004
	int ign;
6005

6006
	spin_lock(&kvm->mmu_lock);
6007
restart:
6008
	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
6009
		if (sp->role.invalid && sp->root_count)
6010
			continue;
6011
		if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
6012
			goto restart;
6013
		if (cond_resched_lock(&kvm->mmu_lock))
6014 6015 6016
			goto restart;
	}

6017
	kvm_mmu_commit_zap_page(kvm, &invalid_list);
6018 6019 6020
	spin_unlock(&kvm->mmu_lock);
}

6021
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
6022
{
6023
	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
6024

6025
	gen &= MMIO_SPTE_GEN_MASK;
6026

6027
	/*
6028 6029 6030 6031 6032 6033 6034 6035
	 * Generation numbers are incremented in multiples of the number of
	 * address spaces in order to provide unique generations across all
	 * address spaces.  Strip what is effectively the address space
	 * modifier prior to checking for a wrap of the MMIO generation so
	 * that a wrap in any address space is detected.
	 */
	gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);

6036
	/*
6037
	 * The very rare case: if the MMIO generation number has wrapped,
6038 6039
	 * zap all shadow pages.
	 */
6040
	if (unlikely(gen == 0)) {
6041
		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
6042
		kvm_mmu_zap_all_fast(kvm);
6043
	}
6044 6045
}

6046 6047
static unsigned long
mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
6048 6049
{
	struct kvm *kvm;
6050
	int nr_to_scan = sc->nr_to_scan;
6051
	unsigned long freed = 0;
6052

J
Junaid Shahid 已提交
6053
	mutex_lock(&kvm_lock);
6054 6055

	list_for_each_entry(kvm, &vm_list, vm_list) {
6056
		int idx;
6057
		LIST_HEAD(invalid_list);
6058

6059 6060 6061 6062 6063 6064 6065 6066
		/*
		 * Never scan more than sc->nr_to_scan VM instances.
		 * Will not hit this condition practically since we do not try
		 * to shrink more than one VM and it is very unlikely to see
		 * !n_used_mmu_pages so many times.
		 */
		if (!nr_to_scan--)
			break;
6067 6068 6069 6070 6071 6072
		/*
		 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
		 * here. We may skip a VM instance errorneosly, but we do not
		 * want to shrink a VM that only started to populate its MMU
		 * anyway.
		 */
6073 6074
		if (!kvm->arch.n_used_mmu_pages &&
		    !kvm_has_zapped_obsolete_pages(kvm))
6075 6076
			continue;

6077
		idx = srcu_read_lock(&kvm->srcu);
6078 6079
		spin_lock(&kvm->mmu_lock);

6080 6081 6082 6083 6084 6085
		if (kvm_has_zapped_obsolete_pages(kvm)) {
			kvm_mmu_commit_zap_page(kvm,
			      &kvm->arch.zapped_obsolete_pages);
			goto unlock;
		}

6086 6087
		if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
			freed++;
6088
		kvm_mmu_commit_zap_page(kvm, &invalid_list);
6089

6090
unlock:
6091
		spin_unlock(&kvm->mmu_lock);
6092
		srcu_read_unlock(&kvm->srcu, idx);
6093

6094 6095 6096 6097 6098
		/*
		 * unfair on small ones
		 * per-vm shrinkers cry out
		 * sadness comes quickly
		 */
6099 6100
		list_move_tail(&kvm->vm_list, &vm_list);
		break;
6101 6102
	}

J
Junaid Shahid 已提交
6103
	mutex_unlock(&kvm_lock);
6104 6105 6106 6107 6108 6109
	return freed;
}

static unsigned long
mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
6110
	return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
6111 6112 6113
}

static struct shrinker mmu_shrinker = {
6114 6115
	.count_objects = mmu_shrink_count,
	.scan_objects = mmu_shrink_scan,
6116 6117 6118
	.seeks = DEFAULT_SEEKS * 10,
};

I
Ingo Molnar 已提交
6119
static void mmu_destroy_caches(void)
6120
{
6121 6122
	kmem_cache_destroy(pte_list_desc_cache);
	kmem_cache_destroy(mmu_page_header_cache);
6123 6124
}

6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146
static void kvm_set_mmio_spte_mask(void)
{
	u64 mask;

	/*
	 * Set the reserved bits and the present bit of an paging-structure
	 * entry to generate page fault with PFER.RSV = 1.
	 */

	/*
	 * Mask the uppermost physical address bit, which would be reserved as
	 * long as the supported physical address width is less than 52.
	 */
	mask = 1ull << 51;

	/* Set the present bit. */
	mask |= 1ull;

	/*
	 * If reserved bit is not supported, clear the present bit to disable
	 * mmio page fault.
	 */
6147
	if (shadow_phys_bits == 52)
6148 6149
		mask &= ~1ull;

6150
	kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
6151 6152
}

P
Paolo Bonzini 已提交
6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186
static bool get_nx_auto_mode(void)
{
	/* Return true when CPU has the bug, and mitigations are ON */
	return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off();
}

static void __set_nx_huge_pages(bool val)
{
	nx_huge_pages = itlb_multihit_kvm_mitigation = val;
}

static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
{
	bool old_val = nx_huge_pages;
	bool new_val;

	/* In "auto" mode deploy workaround only if CPU has the bug. */
	if (sysfs_streq(val, "off"))
		new_val = 0;
	else if (sysfs_streq(val, "force"))
		new_val = 1;
	else if (sysfs_streq(val, "auto"))
		new_val = get_nx_auto_mode();
	else if (strtobool(val, &new_val) < 0)
		return -EINVAL;

	__set_nx_huge_pages(new_val);

	if (new_val != old_val) {
		struct kvm *kvm;

		mutex_lock(&kvm_lock);

		list_for_each_entry(kvm, &vm_list, vm_list) {
6187
			mutex_lock(&kvm->slots_lock);
P
Paolo Bonzini 已提交
6188
			kvm_mmu_zap_all_fast(kvm);
6189
			mutex_unlock(&kvm->slots_lock);
6190 6191

			wake_up_process(kvm->arch.nx_lpage_recovery_thread);
P
Paolo Bonzini 已提交
6192 6193 6194 6195 6196 6197 6198
		}
		mutex_unlock(&kvm_lock);
	}

	return 0;
}

6199 6200
int kvm_mmu_module_init(void)
{
6201 6202
	int ret = -ENOMEM;

P
Paolo Bonzini 已提交
6203 6204 6205
	if (nx_huge_pages == -1)
		__set_nx_huge_pages(get_nx_auto_mode());

6206 6207 6208 6209 6210 6211 6212 6213 6214 6215
	/*
	 * MMU roles use union aliasing which is, generally speaking, an
	 * undefined behavior. However, we supposedly know how compilers behave
	 * and the current status quo is unlikely to change. Guardians below are
	 * supposed to let us know if the assumption becomes false.
	 */
	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
	BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));

6216
	kvm_mmu_reset_all_pte_masks();
6217

6218 6219
	kvm_set_mmio_spte_mask();

6220 6221
	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
					    sizeof(struct pte_list_desc),
6222
					    0, SLAB_ACCOUNT, NULL);
6223
	if (!pte_list_desc_cache)
6224
		goto out;
6225

6226 6227
	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
						  sizeof(struct kvm_mmu_page),
6228
						  0, SLAB_ACCOUNT, NULL);
6229
	if (!mmu_page_header_cache)
6230
		goto out;
6231

6232
	if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
6233
		goto out;
6234

6235 6236 6237
	ret = register_shrinker(&mmu_shrinker);
	if (ret)
		goto out;
6238

6239 6240
	return 0;

6241
out:
6242
	mmu_destroy_caches();
6243
	return ret;
6244 6245
}

6246
/*
P
Peng Hao 已提交
6247
 * Calculate mmu pages needed for kvm.
6248
 */
6249
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
6250
{
6251 6252
	unsigned long nr_mmu_pages;
	unsigned long nr_pages = 0;
6253
	struct kvm_memslots *slots;
6254
	struct kvm_memory_slot *memslot;
6255
	int i;
6256

6257 6258
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
		slots = __kvm_memslots(kvm, i);
6259

6260 6261 6262
		kvm_for_each_memslot(memslot, slots)
			nr_pages += memslot->npages;
	}
6263 6264

	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
6265
	nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
6266 6267 6268 6269

	return nr_mmu_pages;
}

6270 6271
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
6272
	kvm_mmu_unload(vcpu);
6273 6274
	free_mmu_pages(&vcpu->arch.root_mmu);
	free_mmu_pages(&vcpu->arch.guest_mmu);
6275
	mmu_free_memory_caches(vcpu);
6276 6277 6278 6279 6280 6281 6282
}

void kvm_mmu_module_exit(void)
{
	mmu_destroy_caches();
	percpu_counter_destroy(&kvm_total_used_mmu_pages);
	unregister_shrinker(&mmu_shrinker);
6283 6284
	mmu_audit_disable();
}
6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397

static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp)
{
	unsigned int old_val;
	int err;

	old_val = nx_huge_pages_recovery_ratio;
	err = param_set_uint(val, kp);
	if (err)
		return err;

	if (READ_ONCE(nx_huge_pages) &&
	    !old_val && nx_huge_pages_recovery_ratio) {
		struct kvm *kvm;

		mutex_lock(&kvm_lock);

		list_for_each_entry(kvm, &vm_list, vm_list)
			wake_up_process(kvm->arch.nx_lpage_recovery_thread);

		mutex_unlock(&kvm_lock);
	}

	return err;
}

static void kvm_recover_nx_lpages(struct kvm *kvm)
{
	int rcu_idx;
	struct kvm_mmu_page *sp;
	unsigned int ratio;
	LIST_HEAD(invalid_list);
	ulong to_zap;

	rcu_idx = srcu_read_lock(&kvm->srcu);
	spin_lock(&kvm->mmu_lock);

	ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
	to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
	while (to_zap && !list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) {
		/*
		 * We use a separate list instead of just using active_mmu_pages
		 * because the number of lpage_disallowed pages is expected to
		 * be relatively small compared to the total.
		 */
		sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
				      struct kvm_mmu_page,
				      lpage_disallowed_link);
		WARN_ON_ONCE(!sp->lpage_disallowed);
		kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
		WARN_ON_ONCE(sp->lpage_disallowed);

		if (!--to_zap || need_resched() || spin_needbreak(&kvm->mmu_lock)) {
			kvm_mmu_commit_zap_page(kvm, &invalid_list);
			if (to_zap)
				cond_resched_lock(&kvm->mmu_lock);
		}
	}

	spin_unlock(&kvm->mmu_lock);
	srcu_read_unlock(&kvm->srcu, rcu_idx);
}

static long get_nx_lpage_recovery_timeout(u64 start_time)
{
	return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio)
		? start_time + 60 * HZ - get_jiffies_64()
		: MAX_SCHEDULE_TIMEOUT;
}

static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
{
	u64 start_time;
	long remaining_time;

	while (true) {
		start_time = get_jiffies_64();
		remaining_time = get_nx_lpage_recovery_timeout(start_time);

		set_current_state(TASK_INTERRUPTIBLE);
		while (!kthread_should_stop() && remaining_time > 0) {
			schedule_timeout(remaining_time);
			remaining_time = get_nx_lpage_recovery_timeout(start_time);
			set_current_state(TASK_INTERRUPTIBLE);
		}

		set_current_state(TASK_RUNNING);

		if (kthread_should_stop())
			return 0;

		kvm_recover_nx_lpages(kvm);
	}
}

int kvm_mmu_post_init_vm(struct kvm *kvm)
{
	int err;

	err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0,
					  "kvm-nx-lpage-recovery",
					  &kvm->arch.nx_lpage_recovery_thread);
	if (!err)
		kthread_unpark(kvm->arch.nx_lpage_recovery_thread);

	return err;
}

void kvm_mmu_pre_destroy_vm(struct kvm *kvm)
{
	if (kvm->arch.nx_lpage_recovery_thread)
		kthread_stop(kvm->arch.nx_lpage_recovery_thread);
}