tdp_mmu.c 33.8 KB
Newer Older
1 2
// SPDX-License-Identifier: GPL-2.0

3 4
#include "mmu.h"
#include "mmu_internal.h"
B
Ben Gardon 已提交
5
#include "mmutrace.h"
6
#include "tdp_iter.h"
7
#include "tdp_mmu.h"
8
#include "spte.h"
9

10 11
#include <trace/events/kvm.h>

12
#ifdef CONFIG_X86_64
13
static bool __read_mostly tdp_mmu_enabled = false;
14 15
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
#endif
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33

static bool is_tdp_mmu_enabled(void)
{
#ifdef CONFIG_X86_64
	return tdp_enabled && READ_ONCE(tdp_mmu_enabled);
#else
	return false;
#endif /* CONFIG_X86_64 */
}

/* Initializes the TDP MMU for the VM, if enabled. */
void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
{
	if (!is_tdp_mmu_enabled())
		return;

	/* This should not be changed for the lifetime of the VM. */
	kvm->arch.tdp_mmu_enabled = true;
34 35

	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
36
	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
37 38 39 40 41 42
}

void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
{
	if (!kvm->arch.tdp_mmu_enabled)
		return;
43 44 45 46

	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
}

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
{
	if (kvm_mmu_put_root(kvm, root))
		kvm_tdp_mmu_free_root(kvm, root);
}

static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
					   struct kvm_mmu_page *root)
{
	lockdep_assert_held(&kvm->mmu_lock);

	if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
		return false;

	kvm_mmu_get_root(kvm, root);
	return true;

}

static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
						     struct kvm_mmu_page *root)
{
	struct kvm_mmu_page *next_root;

	next_root = list_next_entry(root, link);
	tdp_mmu_put_root(kvm, root);
	return next_root;
}

/*
 * Note: this iterator gets and puts references to the roots it iterates over.
 * This makes it safe to release the MMU lock and yield within the loop, but
 * if exiting the loop early, the caller must drop the reference to the most
 * recent root. (Unless keeping a live reference is desirable.)
 */
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root)				\
	for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots,	\
				      typeof(*_root), link);		\
	     tdp_mmu_next_root_valid(_kvm, _root);			\
	     _root = tdp_mmu_next_root(_kvm, _root))

#define for_each_tdp_mmu_root(_kvm, _root)				\
89 90 91 92 93 94
	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)

bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
{
	struct kvm_mmu_page *sp;

95 96 97 98 99
	if (!kvm->arch.tdp_mmu_enabled)
		return false;
	if (WARN_ON(!VALID_PAGE(hpa)))
		return false;

100
	sp = to_shadow_page(hpa);
101 102
	if (WARN_ON(!sp))
		return false;
103 104 105 106

	return sp->tdp_mmu_page && sp->root_count;
}

107
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
108
			  gfn_t start, gfn_t end, bool can_yield);
109

110 111
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
{
112
	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
113

114 115 116 117 118 119 120
	lockdep_assert_held(&kvm->mmu_lock);

	WARN_ON(root->root_count);
	WARN_ON(!root->tdp_mmu_page);

	list_del(&root->link);

121
	zap_gfn_range(kvm, root, 0, max_gfn, false);
122

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
	free_page((unsigned long)root->spt);
	kmem_cache_free(mmu_page_header_cache, root);
}

static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
						   int level)
{
	union kvm_mmu_page_role role;

	role = vcpu->arch.mmu->mmu_role.base;
	role.level = level;
	role.direct = true;
	role.gpte_is_8_bytes = true;
	role.access = ACC_ALL;

	return role;
}

static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
					       int level)
{
	struct kvm_mmu_page *sp;

	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);

	sp->role.word = page_role_for_level(vcpu, level).word;
	sp->gfn = gfn;
	sp->tdp_mmu_page = true;

154 155
	trace_kvm_mmu_get_page(sp, true);

156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
	return sp;
}

static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu)
{
	union kvm_mmu_page_role role;
	struct kvm *kvm = vcpu->kvm;
	struct kvm_mmu_page *root;

	role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);

	spin_lock(&kvm->mmu_lock);

	/* Check for an existing root before allocating a new one. */
	for_each_tdp_mmu_root(kvm, root) {
		if (root->role.word == role.word) {
			kvm_mmu_get_root(kvm, root);
			spin_unlock(&kvm->mmu_lock);
			return root;
		}
	}

	root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
	root->root_count = 1;

	list_add(&root->link, &kvm->arch.tdp_mmu_roots);

	spin_unlock(&kvm->mmu_lock);

	return root;
}

hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu_page *root;

	root = get_tdp_mmu_vcpu_root(vcpu);
	if (!root)
		return INVALID_PAGE;

	return __pa(root->spt);
197
}
198 199 200 201

static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
				u64 old_spte, u64 new_spte, int level);

202 203 204 205 206
static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
{
	return sp->role.smm ? 1 : 0;
}

207 208 209 210 211 212 213 214 215 216 217 218
static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
{
	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);

	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
		return;

	if (is_accessed_spte(old_spte) &&
	    (!is_accessed_spte(new_spte) || pfn_changed))
		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
}

219 220 221 222 223 224 225 226 227 228 229 230 231 232
static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
					  u64 old_spte, u64 new_spte, int level)
{
	bool pfn_changed;
	struct kvm_memory_slot *slot;

	if (level > PG_LEVEL_4K)
		return;

	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);

	if ((!is_writable_pte(old_spte) || pfn_changed) &&
	    is_writable_pte(new_spte)) {
		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
233
		mark_page_dirty_in_slot(kvm, slot, gfn);
234 235 236
	}
}

237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
/**
 * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
 *
 * @kvm: kvm instance
 * @pt: the page removed from the paging structure
 *
 * Given a page table that has been removed from the TDP paging structure,
 * iterates through the page table to clear SPTEs and free child page tables.
 */
static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt)
{
	struct kvm_mmu_page *sp = sptep_to_sp(pt);
	int level = sp->role.level;
	gfn_t gfn = sp->gfn;
	u64 old_child_spte;
	int i;

	trace_kvm_mmu_prepare_zap_page(sp);

	list_del(&sp->link);

	if (sp->lpage_disallowed)
		unaccount_huge_nx_page(kvm, sp);

	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
		old_child_spte = READ_ONCE(*(pt + i));
		WRITE_ONCE(*(pt + i), 0);
		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp),
			gfn + (i * KVM_PAGES_PER_HPAGE(level - 1)),
			old_child_spte, 0, level - 1);
	}

	kvm_flush_remote_tlbs_with_address(kvm, gfn,
					   KVM_PAGES_PER_HPAGE(level));

	free_page((unsigned long)pt);
	kmem_cache_free(mmu_page_header_cache, sp);
}

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
/**
 * handle_changed_spte - handle bookkeeping associated with an SPTE change
 * @kvm: kvm instance
 * @as_id: the address space of the paging structure the SPTE was a part of
 * @gfn: the base GFN that was mapped by the SPTE
 * @old_spte: The value of the SPTE before the change
 * @new_spte: The value of the SPTE after the change
 * @level: the level of the PT the SPTE is part of in the paging structure
 *
 * Handle bookkeeping that might result from the modification of a SPTE.
 * This function must be called for all TDP SPTE modifications.
 */
static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
				u64 old_spte, u64 new_spte, int level)
{
	bool was_present = is_shadow_present_pte(old_spte);
	bool is_present = is_shadow_present_pte(new_spte);
	bool was_leaf = was_present && is_last_spte(old_spte, level);
	bool is_leaf = is_present && is_last_spte(new_spte, level);
	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);

	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
	WARN_ON(level < PG_LEVEL_4K);
299
	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326

	/*
	 * If this warning were to trigger it would indicate that there was a
	 * missing MMU notifier or a race with some notifier handler.
	 * A present, leaf SPTE should never be directly replaced with another
	 * present leaf SPTE pointing to a differnt PFN. A notifier handler
	 * should be zapping the SPTE before the main MM's page table is
	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
	 * thread before replacement.
	 */
	if (was_leaf && is_leaf && pfn_changed) {
		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
		       "SPTE with another present leaf SPTE mapping a\n"
		       "different PFN!\n"
		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
		       as_id, gfn, old_spte, new_spte, level);

		/*
		 * Crash the host to prevent error propagation and guest data
		 * courruption.
		 */
		BUG();
	}

	if (old_spte == new_spte)
		return;

327 328
	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);

329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
	/*
	 * The only times a SPTE should be changed from a non-present to
	 * non-present state is when an MMIO entry is installed/modified/
	 * removed. In that case, there is nothing to do here.
	 */
	if (!was_present && !is_present) {
		/*
		 * If this change does not involve a MMIO SPTE, it is
		 * unexpected. Log the change, though it should not impact the
		 * guest since both the former and current SPTEs are nonpresent.
		 */
		if (WARN_ON(!is_mmio_spte(old_spte) && !is_mmio_spte(new_spte)))
			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
			       "should not be replaced with another,\n"
			       "different nonpresent SPTE, unless one or both\n"
			       "are MMIO SPTEs.\n"
			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
			       as_id, gfn, old_spte, new_spte, level);
		return;
	}


	if (was_leaf && is_dirty_spte(old_spte) &&
	    (!is_dirty_spte(new_spte) || pfn_changed))
		kvm_set_pfn_dirty(spte_to_pfn(old_spte));

	/*
	 * Recursively handle child PTs if the change removed a subtree from
	 * the paging structure.
	 */
359 360 361
	if (was_present && !was_leaf && (pfn_changed || !is_present))
		handle_removed_tdp_mmu_page(kvm,
				spte_to_child_pt(old_spte, level));
362 363 364 365 366 367
}

static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
				u64 old_spte, u64 new_spte, int level)
{
	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level);
368
	handle_changed_spte_acc_track(old_spte, new_spte, level);
369 370
	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
				      new_spte, level);
371
}
372

373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
/*
 * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
 * @kvm: kvm instance
 * @iter: a tdp_iter instance currently on the SPTE that should be set
 * @new_spte: The value the SPTE should be set to
 * @record_acc_track: Notify the MM subsystem of changes to the accessed state
 *		      of the page. Should be set unless handling an MMU
 *		      notifier for access tracking. Leaving record_acc_track
 *		      unset in that case prevents page accesses from being
 *		      double counted.
 * @record_dirty_log: Record the page as dirty in the dirty bitmap if
 *		      appropriate for the change being made. Should be set
 *		      unless performing certain dirty logging operations.
 *		      Leaving record_dirty_log unset in that case prevents page
 *		      writes from being double counted.
 */
389
static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
390 391
				      u64 new_spte, bool record_acc_track,
				      bool record_dirty_log)
392 393 394 395 396
{
	u64 *root_pt = tdp_iter_root_pt(iter);
	struct kvm_mmu_page *root = sptep_to_sp(root_pt);
	int as_id = kvm_mmu_page_as_id(root);

397 398
	lockdep_assert_held(&kvm->mmu_lock);

399 400 401 402 403 404 405
	WRITE_ONCE(*iter->sptep, new_spte);

	__handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
			      iter->level);
	if (record_acc_track)
		handle_changed_spte_acc_track(iter->old_spte, new_spte,
					      iter->level);
406 407 408 409
	if (record_dirty_log)
		handle_changed_spte_dirty_log(kvm, as_id, iter->gfn,
					      iter->old_spte, new_spte,
					      iter->level);
410 411 412 413 414
}

static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
				    u64 new_spte)
{
415
	__tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
416
}
417

418 419 420 421
static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
						 struct tdp_iter *iter,
						 u64 new_spte)
{
422 423 424 425 426 427 428 429
	__tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
}

static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
						 struct tdp_iter *iter,
						 u64 new_spte)
{
	__tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
430 431 432 433 434
}

#define tdp_root_for_each_pte(_iter, _root, _start, _end) \
	for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)

435 436 437 438 439 440 441
#define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
		if (!is_shadow_present_pte(_iter.old_spte) ||		\
		    !is_last_spte(_iter.old_spte, _iter.level))		\
			continue;					\
		else

B
Ben Gardon 已提交
442 443 444 445
#define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
	for_each_tdp_pte(_iter, __va(_mmu->root_hpa),		\
			 _mmu->shadow_root_level, _start, _end)

446 447 448 449
/*
 * Yield if the MMU lock is contended or this thread needs to return control
 * to the scheduler.
 *
450 451 452
 * If this function should yield and flush is set, it will perform a remote
 * TLB flush before yielding.
 *
453
 * If this function yields, it will also reset the tdp_iter's walk over the
454 455 456
 * paging structure and the calling function should skip to the next
 * iteration to allow the iterator to continue its traversal from the
 * paging structure root.
457 458 459 460
 *
 * Return true if this function yielded and the iterator's traversal was reset.
 * Return false if a yield was not needed.
 */
461 462
static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
					     struct tdp_iter *iter, bool flush)
463
{
464 465 466 467
	/* Ensure forward progress has been made before yielding. */
	if (iter->next_last_level_gfn == iter->yielded_gfn)
		return false;

468
	if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
469 470 471
		if (flush)
			kvm_flush_remote_tlbs(kvm);

472
		cond_resched_lock(&kvm->mmu_lock);
473 474 475 476 477 478 479

		WARN_ON(iter->gfn > iter->next_last_level_gfn);

		tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
			       iter->root_level, iter->min_level,
			       iter->next_last_level_gfn);

480
		return true;
481
	}
482 483

	return false;
484 485
}

486 487 488 489 490
/*
 * Tears down the mappings for the range of gfns, [start, end), and frees the
 * non-root pages mapping GFNs strictly within that range. Returns true if
 * SPTEs have been cleared and a TLB flush is needed before releasing the
 * MMU lock.
491 492 493 494 495
 * If can_yield is true, will release the MMU lock and reschedule if the
 * scheduler needs the CPU or there is contention on the MMU lock. If this
 * function cannot yield, it will not release the MMU lock or reschedule and
 * the caller must ensure it does not supply too large a GFN range, or the
 * operation can cause a soft lockup.
496 497
 */
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
498
			  gfn_t start, gfn_t end, bool can_yield)
499 500 501 502 503
{
	struct tdp_iter iter;
	bool flush_needed = false;

	tdp_root_for_each_pte(iter, root, start, end) {
504 505 506 507 508 509
		if (can_yield &&
		    tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
			flush_needed = false;
			continue;
		}

510 511 512 513 514 515 516 517 518 519 520 521 522 523
		if (!is_shadow_present_pte(iter.old_spte))
			continue;

		/*
		 * If this is a non-last-level SPTE that covers a larger range
		 * than should be zapped, continue, and zap the mappings at a
		 * lower level.
		 */
		if ((iter.gfn < start ||
		     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
		    !is_last_spte(iter.old_spte, iter.level))
			continue;

		tdp_mmu_set_spte(kvm, &iter, 0);
524
		flush_needed = true;
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
	}
	return flush_needed;
}

/*
 * Tears down the mappings for the range of gfns, [start, end), and frees the
 * non-root pages mapping GFNs strictly within that range. Returns true if
 * SPTEs have been cleared and a TLB flush is needed before releasing the
 * MMU lock.
 */
bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
{
	struct kvm_mmu_page *root;
	bool flush = false;

540
	for_each_tdp_mmu_root_yield_safe(kvm, root)
541
		flush |= zap_gfn_range(kvm, root, start, end, true);
542 543 544 545 546 547

	return flush;
}

void kvm_tdp_mmu_zap_all(struct kvm *kvm)
{
548
	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
549 550 551 552 553 554
	bool flush;

	flush = kvm_tdp_mmu_zap_gfn_range(kvm, 0, max_gfn);
	if (flush)
		kvm_flush_remote_tlbs(kvm);
}
B
Ben Gardon 已提交
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571

/*
 * Installs a last-level SPTE to handle a TDP page fault.
 * (NPT/EPT violation/misconfiguration)
 */
static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
					  int map_writable,
					  struct tdp_iter *iter,
					  kvm_pfn_t pfn, bool prefault)
{
	u64 new_spte;
	int ret = 0;
	int make_spte_ret = 0;

	if (unlikely(is_noslot_pfn(pfn))) {
		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
		trace_mark_mmio_spte(iter->sptep, iter->gfn, new_spte);
572
	} else {
B
Ben Gardon 已提交
573 574 575 576
		make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
					 pfn, iter->old_spte, prefault, true,
					 map_writable, !shadow_accessed_mask,
					 &new_spte);
577 578
		trace_kvm_mmu_set_spte(iter->level, iter->gfn, iter->sptep);
	}
B
Ben Gardon 已提交
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620

	if (new_spte == iter->old_spte)
		ret = RET_PF_SPURIOUS;
	else
		tdp_mmu_set_spte(vcpu->kvm, iter, new_spte);

	/*
	 * If the page fault was caused by a write but the page is write
	 * protected, emulation is needed. If the emulation was skipped,
	 * the vCPU would have the same fault again.
	 */
	if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
		if (write)
			ret = RET_PF_EMULATE;
		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
	}

	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
	if (unlikely(is_mmio_spte(new_spte)))
		ret = RET_PF_EMULATE;

	trace_kvm_mmu_set_spte(iter->level, iter->gfn, iter->sptep);
	if (!prefault)
		vcpu->stat.pf_fixed++;

	return ret;
}

/*
 * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
 * page tables and SPTEs to translate the faulting guest physical address.
 */
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
		    int map_writable, int max_level, kvm_pfn_t pfn,
		    bool prefault)
{
	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
	bool write = error_code & PFERR_WRITE_MASK;
	bool exec = error_code & PFERR_FETCH_MASK;
	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
	struct kvm_mmu *mmu = vcpu->arch.mmu;
	struct tdp_iter iter;
621
	struct kvm_mmu_page *sp;
B
Ben Gardon 已提交
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
	u64 *child_pt;
	u64 new_spte;
	int ret;
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int level;
	int req_level;

	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
		return RET_PF_RETRY;
	if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
		return RET_PF_RETRY;

	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
					huge_page_disallowed, &req_level);

	trace_kvm_mmu_spte_requested(gpa, level, pfn);
	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
		if (nx_huge_page_workaround_enabled)
			disallowed_hugepage_adjust(iter.old_spte, gfn,
						   iter.level, &pfn, &level);

		if (iter.level == level)
			break;

		/*
		 * If there is an SPTE mapping a large page at a higher level
		 * than the target, that SPTE must be cleared and replaced
		 * with a non-leaf SPTE.
		 */
		if (is_shadow_present_pte(iter.old_spte) &&
		    is_large_pte(iter.old_spte)) {
			tdp_mmu_set_spte(vcpu->kvm, &iter, 0);

			kvm_flush_remote_tlbs_with_address(vcpu->kvm, iter.gfn,
					KVM_PAGES_PER_HPAGE(iter.level));

			/*
			 * The iter must explicitly re-read the spte here
			 * because the new value informs the !present
			 * path below.
			 */
			iter.old_spte = READ_ONCE(*iter.sptep);
		}

		if (!is_shadow_present_pte(iter.old_spte)) {
667 668 669
			sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
			list_add(&sp->link, &vcpu->kvm->arch.tdp_mmu_pages);
			child_pt = sp->spt;
B
Ben Gardon 已提交
670 671 672 673
			new_spte = make_nonleaf_spte(child_pt,
						     !shadow_accessed_mask);

			trace_kvm_mmu_get_page(sp, true);
674 675 676
			if (huge_page_disallowed && req_level >= iter.level)
				account_huge_nx_page(vcpu->kvm, sp);

B
Ben Gardon 已提交
677 678 679 680 681 682 683 684 685 686 687 688
			tdp_mmu_set_spte(vcpu->kvm, &iter, new_spte);
		}
	}

	if (WARN_ON(iter.level != level))
		return RET_PF_RETRY;

	ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
					      pfn, prefault);

	return ret;
}
689 690 691 692 693 694 695 696 697 698 699 700 701

static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
		unsigned long end, unsigned long data,
		int (*handler)(struct kvm *kvm, struct kvm_memory_slot *slot,
			       struct kvm_mmu_page *root, gfn_t start,
			       gfn_t end, unsigned long data))
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
	struct kvm_mmu_page *root;
	int ret = 0;
	int as_id;

702
	for_each_tdp_mmu_root_yield_safe(kvm, root) {
703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
		as_id = kvm_mmu_page_as_id(root);
		slots = __kvm_memslots(kvm, as_id);
		kvm_for_each_memslot(memslot, slots) {
			unsigned long hva_start, hva_end;
			gfn_t gfn_start, gfn_end;

			hva_start = max(start, memslot->userspace_addr);
			hva_end = min(end, memslot->userspace_addr +
				      (memslot->npages << PAGE_SHIFT));
			if (hva_start >= hva_end)
				continue;
			/*
			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
			 */
			gfn_start = hva_to_gfn_memslot(hva_start, memslot);
			gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);

			ret |= handler(kvm, memslot, root, gfn_start,
				       gfn_end, data);
		}
	}

	return ret;
}

static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
				     struct kvm_memory_slot *slot,
				     struct kvm_mmu_page *root, gfn_t start,
				     gfn_t end, unsigned long unused)
{
	return zap_gfn_range(kvm, root, start, end, false);
}

int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
			      unsigned long end)
{
	return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0,
					    zap_gfn_range_hva_wrapper);
}
743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778

/*
 * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
 * if any of the GFNs in the range have been accessed.
 */
static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot,
			 struct kvm_mmu_page *root, gfn_t start, gfn_t end,
			 unsigned long unused)
{
	struct tdp_iter iter;
	int young = 0;
	u64 new_spte = 0;

	tdp_root_for_each_leaf_pte(iter, root, start, end) {
		/*
		 * If we have a non-accessed entry we don't need to change the
		 * pte.
		 */
		if (!is_accessed_spte(iter.old_spte))
			continue;

		new_spte = iter.old_spte;

		if (spte_ad_enabled(new_spte)) {
			clear_bit((ffs(shadow_accessed_mask) - 1),
				  (unsigned long *)&new_spte);
		} else {
			/*
			 * Capture the dirty status of the page, so that it doesn't get
			 * lost when the SPTE is marked for access tracking.
			 */
			if (is_writable_pte(new_spte))
				kvm_set_pfn_dirty(spte_to_pfn(new_spte));

			new_spte = mark_spte_for_access_track(new_spte);
		}
779
		new_spte &= ~shadow_dirty_mask;
780 781 782

		tdp_mmu_set_spte_no_acc_track(kvm, &iter, new_spte);
		young = 1;
783 784

		trace_kvm_age_page(iter.gfn, iter.level, slot, young);
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
	}

	return young;
}

int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start,
			      unsigned long end)
{
	return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0,
					    age_gfn_range);
}

static int test_age_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
			struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused,
			unsigned long unused2)
{
	struct tdp_iter iter;

	tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1)
		if (is_accessed_spte(iter.old_spte))
			return 1;

	return 0;
}

int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva)
{
	return kvm_tdp_mmu_handle_hva_range(kvm, hva, hva + 1, 0,
					    test_age_gfn);
}
815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870

/*
 * Handle the changed_pte MMU notifier for the TDP MMU.
 * data is a pointer to the new pte_t mapping the HVA specified by the MMU
 * notifier.
 * Returns non-zero if a flush is needed before releasing the MMU lock.
 */
static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot,
			struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused,
			unsigned long data)
{
	struct tdp_iter iter;
	pte_t *ptep = (pte_t *)data;
	kvm_pfn_t new_pfn;
	u64 new_spte;
	int need_flush = 0;

	WARN_ON(pte_huge(*ptep));

	new_pfn = pte_pfn(*ptep);

	tdp_root_for_each_pte(iter, root, gfn, gfn + 1) {
		if (iter.level != PG_LEVEL_4K)
			continue;

		if (!is_shadow_present_pte(iter.old_spte))
			break;

		tdp_mmu_set_spte(kvm, &iter, 0);

		kvm_flush_remote_tlbs_with_address(kvm, iter.gfn, 1);

		if (!pte_write(*ptep)) {
			new_spte = kvm_mmu_changed_pte_notifier_make_spte(
					iter.old_spte, new_pfn);

			tdp_mmu_set_spte(kvm, &iter, new_spte);
		}

		need_flush = 1;
	}

	if (need_flush)
		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);

	return 0;
}

int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
			     pte_t *host_ptep)
{
	return kvm_tdp_mmu_handle_hva_range(kvm, address, address + 1,
					    (unsigned long)host_ptep,
					    set_tdp_spte);
}

871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
/*
 * Remove write access from all the SPTEs mapping GFNs [start, end). If
 * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
 */
static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
			     gfn_t start, gfn_t end, int min_level)
{
	struct tdp_iter iter;
	u64 new_spte;
	bool spte_set = false;

	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);

	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
				   min_level, start, end) {
887 888 889
		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
			continue;

890
		if (!is_shadow_present_pte(iter.old_spte) ||
891 892
		    !is_last_spte(iter.old_spte, iter.level) ||
		    !(iter.old_spte & PT_WRITABLE_MASK))
893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
			continue;

		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;

		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
		spte_set = true;
	}
	return spte_set;
}

/*
 * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
 * only affect leaf SPTEs down to min_level.
 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
 */
bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
			     int min_level)
{
	struct kvm_mmu_page *root;
	int root_as_id;
	bool spte_set = false;

915
	for_each_tdp_mmu_root_yield_safe(kvm, root) {
916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
			     slot->base_gfn + slot->npages, min_level);
	}

	return spte_set;
}

/*
 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
 * If AD bits are not enabled, this will require clearing the writable bit on
 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
 * be flushed.
 */
static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
			   gfn_t start, gfn_t end)
{
	struct tdp_iter iter;
	u64 new_spte;
	bool spte_set = false;

	tdp_root_for_each_leaf_pte(iter, root, start, end) {
942 943 944
		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
			continue;

945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
		if (spte_ad_need_write_protect(iter.old_spte)) {
			if (is_writable_pte(iter.old_spte))
				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
			else
				continue;
		} else {
			if (iter.old_spte & shadow_dirty_mask)
				new_spte = iter.old_spte & ~shadow_dirty_mask;
			else
				continue;
		}

		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
		spte_set = true;
	}
	return spte_set;
}

/*
 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
 * If AD bits are not enabled, this will require clearing the writable bit on
 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
 * be flushed.
 */
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
	struct kvm_mmu_page *root;
	int root_as_id;
	bool spte_set = false;

976
	for_each_tdp_mmu_root_yield_safe(kvm, root) {
977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
				slot->base_gfn + slot->npages);
	}

	return spte_set;
}

/*
 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
 * set in mask, starting at gfn. The given memslot is expected to contain all
 * the GFNs represented by set bits in the mask. If AD bits are enabled,
 * clearing the dirty status will involve clearing the dirty bit on each SPTE
 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
 */
static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
				  gfn_t gfn, unsigned long mask, bool wrprot)
{
	struct tdp_iter iter;
	u64 new_spte;

	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
				    gfn + BITS_PER_LONG) {
		if (!mask)
			break;

		if (iter.level > PG_LEVEL_4K ||
		    !(mask & (1UL << (iter.gfn - gfn))))
			continue;

1010 1011
		mask &= ~(1UL << (iter.gfn - gfn));

1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
			if (is_writable_pte(iter.old_spte))
				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
			else
				continue;
		} else {
			if (iter.old_spte & shadow_dirty_mask)
				new_spte = iter.old_spte & ~shadow_dirty_mask;
			else
				continue;
		}

		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
	}
}

/*
 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
 * set in mask, starting at gfn. The given memslot is expected to contain all
 * the GFNs represented by set bits in the mask. If AD bits are enabled,
 * clearing the dirty status will involve clearing the dirty bit on each SPTE
 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
 */
void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
				       struct kvm_memory_slot *slot,
				       gfn_t gfn, unsigned long mask,
				       bool wrprot)
{
	struct kvm_mmu_page *root;
	int root_as_id;

	lockdep_assert_held(&kvm->mmu_lock);
	for_each_tdp_mmu_root(kvm, root) {
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
	}
}

/*
 * Set the dirty status of all the SPTEs mapping GFNs in the memslot. This is
 * only used for PML, and so will involve setting the dirty bit on each SPTE.
 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
 */
static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
				gfn_t start, gfn_t end)
{
	struct tdp_iter iter;
	u64 new_spte;
	bool spte_set = false;

	tdp_root_for_each_pte(iter, root, start, end) {
1066 1067 1068
		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
			continue;

1069 1070
		if (!is_shadow_present_pte(iter.old_spte) ||
		    iter.old_spte & shadow_dirty_mask)
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
			continue;

		new_spte = iter.old_spte | shadow_dirty_mask;

		tdp_mmu_set_spte(kvm, &iter, new_spte);
		spte_set = true;
	}

	return spte_set;
}

/*
 * Set the dirty status of all the SPTEs mapping GFNs in the memslot. This is
 * only used for PML, and so will involve setting the dirty bit on each SPTE.
 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
 */
bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
{
	struct kvm_mmu_page *root;
	int root_as_id;
	bool spte_set = false;

1093
	for_each_tdp_mmu_root_yield_safe(kvm, root) {
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

		spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn,
				slot->base_gfn + slot->npages);
	}
	return spte_set;
}

1104
/*
1105 1106
 * Clear leaf entries which could be replaced by large mappings, for
 * GFNs within the slot.
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
 */
static void zap_collapsible_spte_range(struct kvm *kvm,
				       struct kvm_mmu_page *root,
				       gfn_t start, gfn_t end)
{
	struct tdp_iter iter;
	kvm_pfn_t pfn;
	bool spte_set = false;

	tdp_root_for_each_pte(iter, root, start, end) {
1117 1118 1119 1120 1121
		if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) {
			spte_set = false;
			continue;
		}

1122
		if (!is_shadow_present_pte(iter.old_spte) ||
1123
		    !is_last_spte(iter.old_spte, iter.level))
1124 1125 1126 1127 1128 1129 1130 1131 1132
			continue;

		pfn = spte_to_pfn(iter.old_spte);
		if (kvm_is_reserved_pfn(pfn) ||
		    !PageTransCompoundMap(pfn_to_page(pfn)))
			continue;

		tdp_mmu_set_spte(kvm, &iter, 0);

1133
		spte_set = true;
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
	}

	if (spte_set)
		kvm_flush_remote_tlbs(kvm);
}

/*
 * Clear non-leaf entries (and free associated page tables) which could
 * be replaced by large mappings, for GFNs within the slot.
 */
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
				       const struct kvm_memory_slot *slot)
{
	struct kvm_mmu_page *root;
	int root_as_id;

1150
	for_each_tdp_mmu_root_yield_safe(kvm, root) {
1151 1152 1153 1154 1155 1156 1157 1158
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

		zap_collapsible_spte_range(kvm, root, slot->base_gfn,
					   slot->base_gfn + slot->npages);
	}
}
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208

/*
 * Removes write access on the last level SPTE mapping this GFN and unsets the
 * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
 * Returns true if an SPTE was set and a TLB flush is needed.
 */
static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
			      gfn_t gfn)
{
	struct tdp_iter iter;
	u64 new_spte;
	bool spte_set = false;

	tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
		if (!is_writable_pte(iter.old_spte))
			break;

		new_spte = iter.old_spte &
			~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);

		tdp_mmu_set_spte(kvm, &iter, new_spte);
		spte_set = true;
	}

	return spte_set;
}

/*
 * Removes write access on the last level SPTE mapping this GFN and unsets the
 * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
 * Returns true if an SPTE was set and a TLB flush is needed.
 */
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
				   struct kvm_memory_slot *slot, gfn_t gfn)
{
	struct kvm_mmu_page *root;
	int root_as_id;
	bool spte_set = false;

	lockdep_assert_held(&kvm->mmu_lock);
	for_each_tdp_mmu_root(kvm, root) {
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

		spte_set |= write_protect_gfn(kvm, root, gfn);
	}
	return spte_set;
}

1209 1210 1211 1212
/*
 * Return the level of the lowest level SPTE added to sptes.
 * That SPTE may be non-present.
 */
1213 1214
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
			 int *root_level)
1215 1216 1217 1218
{
	struct tdp_iter iter;
	struct kvm_mmu *mmu = vcpu->arch.mmu;
	gfn_t gfn = addr >> PAGE_SHIFT;
1219
	int leaf = -1;
1220

1221
	*root_level = vcpu->arch.mmu->shadow_root_level;
1222 1223 1224

	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
		leaf = iter.level;
1225
		sptes[leaf] = iter.old_spte;
1226 1227 1228 1229
	}

	return leaf;
}