tdp_mmu.c 39.0 KB
Newer Older
1 2
// SPDX-License-Identifier: GPL-2.0

3 4
#include "mmu.h"
#include "mmu_internal.h"
5
#include "mmutrace.h"
6
#include "tdp_iter.h"
7
#include "tdp_mmu.h"
8
#include "spte.h"
9

10
#include <asm/cmpxchg.h>
11 12
#include <trace/events/kvm.h>

13
static bool __read_mostly tdp_mmu_enabled = false;
14
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15 16 17 18

/* Initializes the TDP MMU for the VM, if enabled. */
void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
{
19
	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20 21 22 23
		return;

	/* This should not be changed for the lifetime of the VM. */
	kvm->arch.tdp_mmu_enabled = true;
24 25

	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
26
	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
27
	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28 29 30 31 32 33
}

void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
{
	if (!kvm->arch.tdp_mmu_enabled)
		return;
34 35

	WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
36 37 38 39 40 41

	/*
	 * Ensure that all the outstanding RCU callbacks to free shadow pages
	 * can run before the VM is torn down.
	 */
	rcu_barrier();
42 43
}

44 45 46 47 48 49 50 51 52
static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
{
	if (kvm_mmu_put_root(kvm, root))
		kvm_tdp_mmu_free_root(kvm, root);
}

static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
					   struct kvm_mmu_page *root)
{
53
	lockdep_assert_held_write(&kvm->mmu_lock);
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85

	if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
		return false;

	kvm_mmu_get_root(kvm, root);
	return true;

}

static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
						     struct kvm_mmu_page *root)
{
	struct kvm_mmu_page *next_root;

	next_root = list_next_entry(root, link);
	tdp_mmu_put_root(kvm, root);
	return next_root;
}

/*
 * Note: this iterator gets and puts references to the roots it iterates over.
 * This makes it safe to release the MMU lock and yield within the loop, but
 * if exiting the loop early, the caller must drop the reference to the most
 * recent root. (Unless keeping a live reference is desirable.)
 */
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root)				\
	for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots,	\
				      typeof(*_root), link);		\
	     tdp_mmu_next_root_valid(_kvm, _root);			\
	     _root = tdp_mmu_next_root(_kvm, _root))

#define for_each_tdp_mmu_root(_kvm, _root)				\
86 87
	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)

88
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
89
			  gfn_t start, gfn_t end, bool can_yield);
90

91 92
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
{
93
	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
94

95
	lockdep_assert_held_write(&kvm->mmu_lock);
96 97 98 99 100 101

	WARN_ON(root->root_count);
	WARN_ON(!root->tdp_mmu_page);

	list_del(&root->link);

102
	zap_gfn_range(kvm, root, 0, max_gfn, false);
103

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
	free_page((unsigned long)root->spt);
	kmem_cache_free(mmu_page_header_cache, root);
}

static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
						   int level)
{
	union kvm_mmu_page_role role;

	role = vcpu->arch.mmu->mmu_role.base;
	role.level = level;
	role.direct = true;
	role.gpte_is_8_bytes = true;
	role.access = ACC_ALL;

	return role;
}

static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
					       int level)
{
	struct kvm_mmu_page *sp;

	sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
	sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);

	sp->role.word = page_role_for_level(vcpu, level).word;
	sp->gfn = gfn;
	sp->tdp_mmu_page = true;

135 136
	trace_kvm_mmu_get_page(sp, true);

137 138 139 140 141 142 143 144 145 146 147
	return sp;
}

static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu)
{
	union kvm_mmu_page_role role;
	struct kvm *kvm = vcpu->kvm;
	struct kvm_mmu_page *root;

	role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);

148
	write_lock(&kvm->mmu_lock);
149 150 151 152 153

	/* Check for an existing root before allocating a new one. */
	for_each_tdp_mmu_root(kvm, root) {
		if (root->role.word == role.word) {
			kvm_mmu_get_root(kvm, root);
154
			write_unlock(&kvm->mmu_lock);
155 156 157 158 159 160 161 162 163
			return root;
		}
	}

	root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
	root->root_count = 1;

	list_add(&root->link, &kvm->arch.tdp_mmu_roots);

164
	write_unlock(&kvm->mmu_lock);
165 166 167 168 169 170 171 172 173 174 175 176 177

	return root;
}

hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu_page *root;

	root = get_tdp_mmu_vcpu_root(vcpu);
	if (!root)
		return INVALID_PAGE;

	return __pa(root->spt);
178
}
179

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
{
	free_page((unsigned long)sp->spt);
	kmem_cache_free(mmu_page_header_cache, sp);
}

/*
 * This is called through call_rcu in order to free TDP page table memory
 * safely with respect to other kernel threads that may be operating on
 * the memory.
 * By only accessing TDP MMU page table memory in an RCU read critical
 * section, and freeing it after a grace period, lockless access to that
 * memory won't use it after it is freed.
 */
static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
{
	struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
					       rcu_head);

	tdp_mmu_free_sp(sp);
}

202
static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
203 204
				u64 old_spte, u64 new_spte, int level,
				bool shared);
205

206 207 208 209 210
static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
{
	return sp->role.smm ? 1 : 0;
}

211 212 213 214 215 216 217 218 219 220 221 222
static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
{
	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);

	if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
		return;

	if (is_accessed_spte(old_spte) &&
	    (!is_accessed_spte(new_spte) || pfn_changed))
		kvm_set_pfn_accessed(spte_to_pfn(old_spte));
}

223 224 225 226 227 228 229 230 231 232 233 234 235 236
static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
					  u64 old_spte, u64 new_spte, int level)
{
	bool pfn_changed;
	struct kvm_memory_slot *slot;

	if (level > PG_LEVEL_4K)
		return;

	pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);

	if ((!is_writable_pte(old_spte) || pfn_changed) &&
	    is_writable_pte(new_spte)) {
		slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
237
		mark_page_dirty_in_slot(kvm, slot, gfn);
238 239 240
	}
}

241 242 243 244 245
/**
 * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
 *
 * @kvm: kvm instance
 * @sp: the new page
246 247 248
 * @shared: This operation may not be running under the exclusive use of
 *	    the MMU lock and the operation must synchronize with other
 *	    threads that might be adding or removing pages.
249 250 251 252
 * @account_nx: This page replaces a NX large page and should be marked for
 *		eventual reclaim.
 */
static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
253
			      bool shared, bool account_nx)
254
{
255 256 257 258
	if (shared)
		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
	else
		lockdep_assert_held_write(&kvm->mmu_lock);
259 260 261 262

	list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
	if (account_nx)
		account_huge_nx_page(kvm, sp);
263 264 265

	if (shared)
		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
266 267 268 269 270 271 272
}

/**
 * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
 *
 * @kvm: kvm instance
 * @sp: the page to be removed
273 274 275
 * @shared: This operation may not be running under the exclusive use of
 *	    the MMU lock and the operation must synchronize with other
 *	    threads that might be adding or removing pages.
276
 */
277 278
static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
				bool shared)
279
{
280 281 282 283
	if (shared)
		spin_lock(&kvm->arch.tdp_mmu_pages_lock);
	else
		lockdep_assert_held_write(&kvm->mmu_lock);
284 285 286 287

	list_del(&sp->link);
	if (sp->lpage_disallowed)
		unaccount_huge_nx_page(kvm, sp);
288 289 290

	if (shared)
		spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
291 292
}

293 294 295 296 297
/**
 * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
 *
 * @kvm: kvm instance
 * @pt: the page removed from the paging structure
298 299 300
 * @shared: This operation may not be running under the exclusive use
 *	    of the MMU lock and the operation must synchronize with other
 *	    threads that might be modifying SPTEs.
301 302 303 304
 *
 * Given a page table that has been removed from the TDP paging structure,
 * iterates through the page table to clear SPTEs and free child page tables.
 */
305 306
static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt,
					bool shared)
307 308 309
{
	struct kvm_mmu_page *sp = sptep_to_sp(pt);
	int level = sp->role.level;
310
	gfn_t base_gfn = sp->gfn;
311
	u64 old_child_spte;
312
	u64 *sptep;
313
	gfn_t gfn;
314 315 316 317
	int i;

	trace_kvm_mmu_prepare_zap_page(sp);

318
	tdp_mmu_unlink_page(kvm, sp, shared);
319 320

	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
321
		sptep = pt + i;
322
		gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
323 324

		if (shared) {
325 326 327 328 329 330 331 332 333 334 335 336 337 338
			/*
			 * Set the SPTE to a nonpresent value that other
			 * threads will not overwrite. If the SPTE was
			 * already marked as removed then another thread
			 * handling a page fault could overwrite it, so
			 * set the SPTE until it is set from some other
			 * value to the removed SPTE value.
			 */
			for (;;) {
				old_child_spte = xchg(sptep, REMOVED_SPTE);
				if (!is_removed_spte(old_child_spte))
					break;
				cpu_relax();
			}
339
		} else {
340 341 342 343 344 345 346 347 348
			/*
			 * If the SPTE is not MMU-present, there is no backing
			 * page associated with the SPTE and so no side effects
			 * that need to be recorded, and exclusive ownership of
			 * mmu_lock ensures the SPTE can't be made present.
			 * Note, zapping MMIO SPTEs is also unnecessary as they
			 * are guarded by the memslots generation, not by being
			 * unreachable.
			 */
349
			old_child_spte = READ_ONCE(*sptep);
350 351
			if (!is_shadow_present_pte(old_child_spte))
				continue;
352 353 354 355 356 357 358 359 360 361

			/*
			 * Marking the SPTE as a removed SPTE is not
			 * strictly necessary here as the MMU lock will
			 * stop other threads from concurrently modifying
			 * this SPTE. Using the removed SPTE value keeps
			 * the two branches consistent and simplifies
			 * the function.
			 */
			WRITE_ONCE(*sptep, REMOVED_SPTE);
362
		}
363 364 365
		handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
				    old_child_spte, REMOVED_SPTE, level - 1,
				    shared);
366 367 368 369 370
	}

	kvm_flush_remote_tlbs_with_address(kvm, gfn,
					   KVM_PAGES_PER_HPAGE(level));

371
	call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
372 373
}

374 375 376 377 378 379 380 381
/**
 * handle_changed_spte - handle bookkeeping associated with an SPTE change
 * @kvm: kvm instance
 * @as_id: the address space of the paging structure the SPTE was a part of
 * @gfn: the base GFN that was mapped by the SPTE
 * @old_spte: The value of the SPTE before the change
 * @new_spte: The value of the SPTE after the change
 * @level: the level of the PT the SPTE is part of in the paging structure
382 383 384
 * @shared: This operation may not be running under the exclusive use of
 *	    the MMU lock and the operation must synchronize with other
 *	    threads that might be modifying SPTEs.
385 386 387 388 389
 *
 * Handle bookkeeping that might result from the modification of a SPTE.
 * This function must be called for all TDP SPTE modifications.
 */
static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
390 391
				  u64 old_spte, u64 new_spte, int level,
				  bool shared)
392 393 394 395 396 397 398 399 400
{
	bool was_present = is_shadow_present_pte(old_spte);
	bool is_present = is_shadow_present_pte(new_spte);
	bool was_leaf = was_present && is_last_spte(old_spte, level);
	bool is_leaf = is_present && is_last_spte(new_spte, level);
	bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);

	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
	WARN_ON(level < PG_LEVEL_4K);
401
	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428

	/*
	 * If this warning were to trigger it would indicate that there was a
	 * missing MMU notifier or a race with some notifier handler.
	 * A present, leaf SPTE should never be directly replaced with another
	 * present leaf SPTE pointing to a differnt PFN. A notifier handler
	 * should be zapping the SPTE before the main MM's page table is
	 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
	 * thread before replacement.
	 */
	if (was_leaf && is_leaf && pfn_changed) {
		pr_err("Invalid SPTE change: cannot replace a present leaf\n"
		       "SPTE with another present leaf SPTE mapping a\n"
		       "different PFN!\n"
		       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
		       as_id, gfn, old_spte, new_spte, level);

		/*
		 * Crash the host to prevent error propagation and guest data
		 * courruption.
		 */
		BUG();
	}

	if (old_spte == new_spte)
		return;

429 430
	trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);

431 432 433 434 435 436 437
	/*
	 * The only times a SPTE should be changed from a non-present to
	 * non-present state is when an MMIO entry is installed/modified/
	 * removed. In that case, there is nothing to do here.
	 */
	if (!was_present && !is_present) {
		/*
438 439 440 441
		 * If this change does not involve a MMIO SPTE or removed SPTE,
		 * it is unexpected. Log the change, though it should not
		 * impact the guest since both the former and current SPTEs
		 * are nonpresent.
442
		 */
443 444 445
		if (WARN_ON(!is_mmio_spte(old_spte) &&
			    !is_mmio_spte(new_spte) &&
			    !is_removed_spte(new_spte)))
446 447 448
			pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
			       "should not be replaced with another,\n"
			       "different nonpresent SPTE, unless one or both\n"
449 450
			       "are MMIO SPTEs, or the new SPTE is\n"
			       "a temporary removed SPTE.\n"
451 452 453 454 455 456 457 458 459 460 461 462 463 464
			       "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
			       as_id, gfn, old_spte, new_spte, level);
		return;
	}


	if (was_leaf && is_dirty_spte(old_spte) &&
	    (!is_dirty_spte(new_spte) || pfn_changed))
		kvm_set_pfn_dirty(spte_to_pfn(old_spte));

	/*
	 * Recursively handle child PTs if the change removed a subtree from
	 * the paging structure.
	 */
465 466
	if (was_present && !was_leaf && (pfn_changed || !is_present))
		handle_removed_tdp_mmu_page(kvm,
467
				spte_to_child_pt(old_spte, level), shared);
468 469 470
}

static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
471 472
				u64 old_spte, u64 new_spte, int level,
				bool shared)
473
{
474 475
	__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
			      shared);
476
	handle_changed_spte_acc_track(old_spte, new_spte, level);
477 478
	handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
				      new_spte, level);
479
}
480

481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
/*
 * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically and handle the
 * associated bookkeeping
 *
 * @kvm: kvm instance
 * @iter: a tdp_iter instance currently on the SPTE that should be set
 * @new_spte: The value the SPTE should be set to
 * Returns: true if the SPTE was set, false if it was not. If false is returned,
 *	    this function will have no side-effects.
 */
static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
					   struct tdp_iter *iter,
					   u64 new_spte)
{
	u64 *root_pt = tdp_iter_root_pt(iter);
	struct kvm_mmu_page *root = sptep_to_sp(root_pt);
	int as_id = kvm_mmu_page_as_id(root);

	lockdep_assert_held_read(&kvm->mmu_lock);

501 502 503 504 505 506 507
	/*
	 * Do not change removed SPTEs. Only the thread that froze the SPTE
	 * may modify it.
	 */
	if (iter->old_spte == REMOVED_SPTE)
		return false;

508 509 510 511 512 513 514 515 516 517
	if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
		      new_spte) != iter->old_spte)
		return false;

	handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
			    iter->level, true);

	return true;
}

518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
					   struct tdp_iter *iter)
{
	/*
	 * Freeze the SPTE by setting it to a special,
	 * non-present value. This will stop other threads from
	 * immediately installing a present entry in its place
	 * before the TLBs are flushed.
	 */
	if (!tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE))
		return false;

	kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
					   KVM_PAGES_PER_HPAGE(iter->level));

	/*
	 * No other thread can overwrite the removed SPTE as they
	 * must either wait on the MMU lock or use
	 * tdp_mmu_set_spte_atomic which will not overrite the
	 * special removed SPTE value. No bookkeeping is needed
	 * here since the SPTE is going from non-present
	 * to non-present.
	 */
	WRITE_ONCE(*iter->sptep, 0);

	return true;
}

546

547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
/*
 * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
 * @kvm: kvm instance
 * @iter: a tdp_iter instance currently on the SPTE that should be set
 * @new_spte: The value the SPTE should be set to
 * @record_acc_track: Notify the MM subsystem of changes to the accessed state
 *		      of the page. Should be set unless handling an MMU
 *		      notifier for access tracking. Leaving record_acc_track
 *		      unset in that case prevents page accesses from being
 *		      double counted.
 * @record_dirty_log: Record the page as dirty in the dirty bitmap if
 *		      appropriate for the change being made. Should be set
 *		      unless performing certain dirty logging operations.
 *		      Leaving record_dirty_log unset in that case prevents page
 *		      writes from being double counted.
 */
563
static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
564 565
				      u64 new_spte, bool record_acc_track,
				      bool record_dirty_log)
566
{
567
	tdp_ptep_t root_pt = tdp_iter_root_pt(iter);
568 569 570
	struct kvm_mmu_page *root = sptep_to_sp(root_pt);
	int as_id = kvm_mmu_page_as_id(root);

571
	lockdep_assert_held_write(&kvm->mmu_lock);
572

573 574 575 576 577 578 579 580 581
	/*
	 * No thread should be using this function to set SPTEs to the
	 * temporary removed SPTE value.
	 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
	 * should be used. If operating under the MMU lock in write mode, the
	 * use of the removed SPTE should not be necessary.
	 */
	WARN_ON(iter->old_spte == REMOVED_SPTE);

582
	WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
583 584

	__handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
585
			      iter->level, false);
586 587 588
	if (record_acc_track)
		handle_changed_spte_acc_track(iter->old_spte, new_spte,
					      iter->level);
589 590 591 592
	if (record_dirty_log)
		handle_changed_spte_dirty_log(kvm, as_id, iter->gfn,
					      iter->old_spte, new_spte,
					      iter->level);
593 594 595 596 597
}

static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
				    u64 new_spte)
{
598
	__tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
599
}
600

601 602 603 604
static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
						 struct tdp_iter *iter,
						 u64 new_spte)
{
605 606 607 608 609 610 611 612
	__tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
}

static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
						 struct tdp_iter *iter,
						 u64 new_spte)
{
	__tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
613 614 615 616 617
}

#define tdp_root_for_each_pte(_iter, _root, _start, _end) \
	for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)

618 619 620 621 622 623 624
#define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end)	\
	tdp_root_for_each_pte(_iter, _root, _start, _end)		\
		if (!is_shadow_present_pte(_iter.old_spte) ||		\
		    !is_last_spte(_iter.old_spte, _iter.level))		\
			continue;					\
		else

625 626 627 628
#define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)		\
	for_each_tdp_pte(_iter, __va(_mmu->root_hpa),		\
			 _mmu->shadow_root_level, _start, _end)

629 630 631 632
/*
 * Yield if the MMU lock is contended or this thread needs to return control
 * to the scheduler.
 *
633 634 635
 * If this function should yield and flush is set, it will perform a remote
 * TLB flush before yielding.
 *
636
 * If this function yields, it will also reset the tdp_iter's walk over the
637 638 639
 * paging structure and the calling function should skip to the next
 * iteration to allow the iterator to continue its traversal from the
 * paging structure root.
640 641 642 643
 *
 * Return true if this function yielded and the iterator's traversal was reset.
 * Return false if a yield was not needed.
 */
644 645
static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
					     struct tdp_iter *iter, bool flush)
646
{
647 648 649 650
	/* Ensure forward progress has been made before yielding. */
	if (iter->next_last_level_gfn == iter->yielded_gfn)
		return false;

651
	if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
652 653
		rcu_read_unlock();

654 655 656
		if (flush)
			kvm_flush_remote_tlbs(kvm);

657
		cond_resched_rwlock_write(&kvm->mmu_lock);
658
		rcu_read_lock();
659 660 661 662 663 664 665

		WARN_ON(iter->gfn > iter->next_last_level_gfn);

		tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
			       iter->root_level, iter->min_level,
			       iter->next_last_level_gfn);

666
		return true;
667
	}
668 669

	return false;
670 671
}

672 673 674 675 676
/*
 * Tears down the mappings for the range of gfns, [start, end), and frees the
 * non-root pages mapping GFNs strictly within that range. Returns true if
 * SPTEs have been cleared and a TLB flush is needed before releasing the
 * MMU lock.
677 678 679 680 681
 * If can_yield is true, will release the MMU lock and reschedule if the
 * scheduler needs the CPU or there is contention on the MMU lock. If this
 * function cannot yield, it will not release the MMU lock or reschedule and
 * the caller must ensure it does not supply too large a GFN range, or the
 * operation can cause a soft lockup.
682 683
 */
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
684
			  gfn_t start, gfn_t end, bool can_yield)
685 686 687 688
{
	struct tdp_iter iter;
	bool flush_needed = false;

689 690
	rcu_read_lock();

691
	tdp_root_for_each_pte(iter, root, start, end) {
692 693 694 695 696 697
		if (can_yield &&
		    tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
			flush_needed = false;
			continue;
		}

698 699 700 701 702 703 704 705 706 707 708 709 710 711
		if (!is_shadow_present_pte(iter.old_spte))
			continue;

		/*
		 * If this is a non-last-level SPTE that covers a larger range
		 * than should be zapped, continue, and zap the mappings at a
		 * lower level.
		 */
		if ((iter.gfn < start ||
		     iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
		    !is_last_spte(iter.old_spte, iter.level))
			continue;

		tdp_mmu_set_spte(kvm, &iter, 0);
712
		flush_needed = true;
713
	}
714 715

	rcu_read_unlock();
716 717 718 719 720 721 722 723 724 725 726 727 728 729
	return flush_needed;
}

/*
 * Tears down the mappings for the range of gfns, [start, end), and frees the
 * non-root pages mapping GFNs strictly within that range. Returns true if
 * SPTEs have been cleared and a TLB flush is needed before releasing the
 * MMU lock.
 */
bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
{
	struct kvm_mmu_page *root;
	bool flush = false;

730
	for_each_tdp_mmu_root_yield_safe(kvm, root)
731
		flush |= zap_gfn_range(kvm, root, start, end, true);
732 733 734 735 736 737

	return flush;
}

void kvm_tdp_mmu_zap_all(struct kvm *kvm)
{
738
	gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
739 740 741 742 743 744
	bool flush;

	flush = kvm_tdp_mmu_zap_gfn_range(kvm, 0, max_gfn);
	if (flush)
		kvm_flush_remote_tlbs(kvm);
}
745 746 747 748 749 750 751 752 753 754 755 756 757 758

/*
 * Installs a last-level SPTE to handle a TDP page fault.
 * (NPT/EPT violation/misconfiguration)
 */
static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
					  int map_writable,
					  struct tdp_iter *iter,
					  kvm_pfn_t pfn, bool prefault)
{
	u64 new_spte;
	int ret = 0;
	int make_spte_ret = 0;

759
	if (unlikely(is_noslot_pfn(pfn)))
760
		new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
761
	else
762 763 764 765 766 767 768
		make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
					 pfn, iter->old_spte, prefault, true,
					 map_writable, !shadow_accessed_mask,
					 &new_spte);

	if (new_spte == iter->old_spte)
		ret = RET_PF_SPURIOUS;
769 770
	else if (!tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
		return RET_PF_RETRY;
771 772 773 774 775 776 777 778 779 780 781 782 783

	/*
	 * If the page fault was caused by a write but the page is write
	 * protected, emulation is needed. If the emulation was skipped,
	 * the vCPU would have the same fault again.
	 */
	if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
		if (write)
			ret = RET_PF_EMULATE;
		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
	}

	/* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
784 785 786
	if (unlikely(is_mmio_spte(new_spte))) {
		trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
				     new_spte);
787
		ret = RET_PF_EMULATE;
788 789 790
	} else
		trace_kvm_mmu_set_spte(iter->level, iter->gfn,
				       rcu_dereference(iter->sptep));
791

792 793
	trace_kvm_mmu_set_spte(iter->level, iter->gfn,
			       rcu_dereference(iter->sptep));
794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
	if (!prefault)
		vcpu->stat.pf_fixed++;

	return ret;
}

/*
 * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
 * page tables and SPTEs to translate the faulting guest physical address.
 */
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
		    int map_writable, int max_level, kvm_pfn_t pfn,
		    bool prefault)
{
	bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
	bool write = error_code & PFERR_WRITE_MASK;
	bool exec = error_code & PFERR_FETCH_MASK;
	bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
	struct kvm_mmu *mmu = vcpu->arch.mmu;
	struct tdp_iter iter;
814
	struct kvm_mmu_page *sp;
815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
	u64 *child_pt;
	u64 new_spte;
	int ret;
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int level;
	int req_level;

	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
		return RET_PF_RETRY;
	if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
		return RET_PF_RETRY;

	level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
					huge_page_disallowed, &req_level);

	trace_kvm_mmu_spte_requested(gpa, level, pfn);
831 832 833

	rcu_read_lock();

834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
		if (nx_huge_page_workaround_enabled)
			disallowed_hugepage_adjust(iter.old_spte, gfn,
						   iter.level, &pfn, &level);

		if (iter.level == level)
			break;

		/*
		 * If there is an SPTE mapping a large page at a higher level
		 * than the target, that SPTE must be cleared and replaced
		 * with a non-leaf SPTE.
		 */
		if (is_shadow_present_pte(iter.old_spte) &&
		    is_large_pte(iter.old_spte)) {
849
			if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
850
				break;
851 852 853 854 855 856

			/*
			 * The iter must explicitly re-read the spte here
			 * because the new value informs the !present
			 * path below.
			 */
857
			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
858 859 860
		}

		if (!is_shadow_present_pte(iter.old_spte)) {
861 862
			sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
			child_pt = sp->spt;
863

864 865 866
			new_spte = make_nonleaf_spte(child_pt,
						     !shadow_accessed_mask);

867 868 869 870 871 872 873 874 875 876 877
			if (tdp_mmu_set_spte_atomic(vcpu->kvm, &iter,
						    new_spte)) {
				tdp_mmu_link_page(vcpu->kvm, sp, true,
						  huge_page_disallowed &&
						  req_level >= iter.level);

				trace_kvm_mmu_get_page(sp, true);
			} else {
				tdp_mmu_free_sp(sp);
				break;
			}
878 879 880
		}
	}

881
	if (iter.level != level) {
882
		rcu_read_unlock();
883
		return RET_PF_RETRY;
884
	}
885 886 887

	ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
					      pfn, prefault);
888
	rcu_read_unlock();
889 890 891

	return ret;
}
892

893 894 895 896 897 898 899 900 901 902 903
static __always_inline int
kvm_tdp_mmu_handle_hva_range(struct kvm *kvm,
			     unsigned long start,
			     unsigned long end,
			     unsigned long data,
			     int (*handler)(struct kvm *kvm,
					    struct kvm_memory_slot *slot,
					    struct kvm_mmu_page *root,
					    gfn_t start,
					    gfn_t end,
					    unsigned long data))
904 905 906 907 908 909 910
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
	struct kvm_mmu_page *root;
	int ret = 0;
	int as_id;

911
	for_each_tdp_mmu_root_yield_safe(kvm, root) {
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
		as_id = kvm_mmu_page_as_id(root);
		slots = __kvm_memslots(kvm, as_id);
		kvm_for_each_memslot(memslot, slots) {
			unsigned long hva_start, hva_end;
			gfn_t gfn_start, gfn_end;

			hva_start = max(start, memslot->userspace_addr);
			hva_end = min(end, memslot->userspace_addr +
				      (memslot->npages << PAGE_SHIFT));
			if (hva_start >= hva_end)
				continue;
			/*
			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
			 */
			gfn_start = hva_to_gfn_memslot(hva_start, memslot);
			gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);

			ret |= handler(kvm, memslot, root, gfn_start,
				       gfn_end, data);
		}
	}

	return ret;
}

static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
				     struct kvm_memory_slot *slot,
				     struct kvm_mmu_page *root, gfn_t start,
				     gfn_t end, unsigned long unused)
{
	return zap_gfn_range(kvm, root, start, end, false);
}

int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
			      unsigned long end)
{
	return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0,
					    zap_gfn_range_hva_wrapper);
}
952 953 954 955 956 957 958 959 960 961 962 963 964

/*
 * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
 * if any of the GFNs in the range have been accessed.
 */
static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot,
			 struct kvm_mmu_page *root, gfn_t start, gfn_t end,
			 unsigned long unused)
{
	struct tdp_iter iter;
	int young = 0;
	u64 new_spte = 0;

965 966
	rcu_read_lock();

967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989
	tdp_root_for_each_leaf_pte(iter, root, start, end) {
		/*
		 * If we have a non-accessed entry we don't need to change the
		 * pte.
		 */
		if (!is_accessed_spte(iter.old_spte))
			continue;

		new_spte = iter.old_spte;

		if (spte_ad_enabled(new_spte)) {
			clear_bit((ffs(shadow_accessed_mask) - 1),
				  (unsigned long *)&new_spte);
		} else {
			/*
			 * Capture the dirty status of the page, so that it doesn't get
			 * lost when the SPTE is marked for access tracking.
			 */
			if (is_writable_pte(new_spte))
				kvm_set_pfn_dirty(spte_to_pfn(new_spte));

			new_spte = mark_spte_for_access_track(new_spte);
		}
990
		new_spte &= ~shadow_dirty_mask;
991 992 993

		tdp_mmu_set_spte_no_acc_track(kvm, &iter, new_spte);
		young = 1;
994 995

		trace_kvm_age_page(iter.gfn, iter.level, slot, young);
996 997
	}

998 999
	rcu_read_unlock();

1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
	return young;
}

int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start,
			      unsigned long end)
{
	return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0,
					    age_gfn_range);
}

static int test_age_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
			struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused,
			unsigned long unused2)
{
	struct tdp_iter iter;

	tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1)
		if (is_accessed_spte(iter.old_spte))
			return 1;

	return 0;
}

int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva)
{
	return kvm_tdp_mmu_handle_hva_range(kvm, hva, hva + 1, 0,
					    test_age_gfn);
}
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044

/*
 * Handle the changed_pte MMU notifier for the TDP MMU.
 * data is a pointer to the new pte_t mapping the HVA specified by the MMU
 * notifier.
 * Returns non-zero if a flush is needed before releasing the MMU lock.
 */
static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot,
			struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused,
			unsigned long data)
{
	struct tdp_iter iter;
	pte_t *ptep = (pte_t *)data;
	kvm_pfn_t new_pfn;
	u64 new_spte;
	int need_flush = 0;

1045 1046
	rcu_read_lock();

1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
	WARN_ON(pte_huge(*ptep));

	new_pfn = pte_pfn(*ptep);

	tdp_root_for_each_pte(iter, root, gfn, gfn + 1) {
		if (iter.level != PG_LEVEL_4K)
			continue;

		if (!is_shadow_present_pte(iter.old_spte))
			break;

		tdp_mmu_set_spte(kvm, &iter, 0);

		kvm_flush_remote_tlbs_with_address(kvm, iter.gfn, 1);

		if (!pte_write(*ptep)) {
			new_spte = kvm_mmu_changed_pte_notifier_make_spte(
					iter.old_spte, new_pfn);

			tdp_mmu_set_spte(kvm, &iter, new_spte);
		}

		need_flush = 1;
	}

	if (need_flush)
		kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);

1075 1076
	rcu_read_unlock();

1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
	return 0;
}

int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
			     pte_t *host_ptep)
{
	return kvm_tdp_mmu_handle_hva_range(kvm, address, address + 1,
					    (unsigned long)host_ptep,
					    set_tdp_spte);
}

1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
/*
 * Remove write access from all the SPTEs mapping GFNs [start, end). If
 * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
 */
static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
			     gfn_t start, gfn_t end, int min_level)
{
	struct tdp_iter iter;
	u64 new_spte;
	bool spte_set = false;

1100 1101
	rcu_read_lock();

1102 1103 1104 1105
	BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);

	for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
				   min_level, start, end) {
1106 1107 1108
		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
			continue;

1109
		if (!is_shadow_present_pte(iter.old_spte) ||
1110 1111
		    !is_last_spte(iter.old_spte, iter.level) ||
		    !(iter.old_spte & PT_WRITABLE_MASK))
1112 1113 1114 1115 1116 1117 1118
			continue;

		new_spte = iter.old_spte & ~PT_WRITABLE_MASK;

		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
		spte_set = true;
	}
1119 1120

	rcu_read_unlock();
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
	return spte_set;
}

/*
 * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
 * only affect leaf SPTEs down to min_level.
 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
 */
bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
			     int min_level)
{
	struct kvm_mmu_page *root;
	int root_as_id;
	bool spte_set = false;

1136
	for_each_tdp_mmu_root_yield_safe(kvm, root) {
1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
			     slot->base_gfn + slot->npages, min_level);
	}

	return spte_set;
}

/*
 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
 * If AD bits are not enabled, this will require clearing the writable bit on
 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
 * be flushed.
 */
static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
			   gfn_t start, gfn_t end)
{
	struct tdp_iter iter;
	u64 new_spte;
	bool spte_set = false;

1162 1163
	rcu_read_lock();

1164
	tdp_root_for_each_leaf_pte(iter, root, start, end) {
1165 1166 1167
		if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
			continue;

1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
		if (spte_ad_need_write_protect(iter.old_spte)) {
			if (is_writable_pte(iter.old_spte))
				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
			else
				continue;
		} else {
			if (iter.old_spte & shadow_dirty_mask)
				new_spte = iter.old_spte & ~shadow_dirty_mask;
			else
				continue;
		}

		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
		spte_set = true;
	}
1183 1184

	rcu_read_unlock();
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
	return spte_set;
}

/*
 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
 * If AD bits are not enabled, this will require clearing the writable bit on
 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
 * be flushed.
 */
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
	struct kvm_mmu_page *root;
	int root_as_id;
	bool spte_set = false;

1201
	for_each_tdp_mmu_root_yield_safe(kvm, root) {
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
				slot->base_gfn + slot->npages);
	}

	return spte_set;
}

/*
 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
 * set in mask, starting at gfn. The given memslot is expected to contain all
 * the GFNs represented by set bits in the mask. If AD bits are enabled,
 * clearing the dirty status will involve clearing the dirty bit on each SPTE
 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
 */
static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
				  gfn_t gfn, unsigned long mask, bool wrprot)
{
	struct tdp_iter iter;
	u64 new_spte;

1226 1227
	rcu_read_lock();

1228 1229 1230 1231 1232 1233 1234 1235 1236
	tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
				    gfn + BITS_PER_LONG) {
		if (!mask)
			break;

		if (iter.level > PG_LEVEL_4K ||
		    !(mask & (1UL << (iter.gfn - gfn))))
			continue;

1237 1238
		mask &= ~(1UL << (iter.gfn - gfn));

1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
		if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
			if (is_writable_pte(iter.old_spte))
				new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
			else
				continue;
		} else {
			if (iter.old_spte & shadow_dirty_mask)
				new_spte = iter.old_spte & ~shadow_dirty_mask;
			else
				continue;
		}

		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
	}
1253 1254

	rcu_read_unlock();
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
}

/*
 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
 * set in mask, starting at gfn. The given memslot is expected to contain all
 * the GFNs represented by set bits in the mask. If AD bits are enabled,
 * clearing the dirty status will involve clearing the dirty bit on each SPTE
 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
 */
void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
				       struct kvm_memory_slot *slot,
				       gfn_t gfn, unsigned long mask,
				       bool wrprot)
{
	struct kvm_mmu_page *root;
	int root_as_id;

1272
	lockdep_assert_held_write(&kvm->mmu_lock);
1273 1274 1275 1276 1277 1278 1279 1280 1281
	for_each_tdp_mmu_root(kvm, root) {
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
	}
}

1282
/*
1283 1284
 * Clear leaf entries which could be replaced by large mappings, for
 * GFNs within the slot.
1285 1286 1287
 */
static void zap_collapsible_spte_range(struct kvm *kvm,
				       struct kvm_mmu_page *root,
1288
				       struct kvm_memory_slot *slot)
1289
{
1290 1291
	gfn_t start = slot->base_gfn;
	gfn_t end = start + slot->npages;
1292 1293 1294 1295
	struct tdp_iter iter;
	kvm_pfn_t pfn;
	bool spte_set = false;

1296 1297
	rcu_read_lock();

1298
	tdp_root_for_each_pte(iter, root, start, end) {
1299 1300 1301 1302 1303
		if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) {
			spte_set = false;
			continue;
		}

1304
		if (!is_shadow_present_pte(iter.old_spte) ||
1305
		    !is_last_spte(iter.old_spte, iter.level))
1306 1307 1308 1309
			continue;

		pfn = spte_to_pfn(iter.old_spte);
		if (kvm_is_reserved_pfn(pfn) ||
1310 1311
		    iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
							    pfn, PG_LEVEL_NUM))
1312 1313 1314 1315
			continue;

		tdp_mmu_set_spte(kvm, &iter, 0);

1316
		spte_set = true;
1317 1318
	}

1319
	rcu_read_unlock();
1320 1321 1322 1323 1324 1325 1326 1327 1328
	if (spte_set)
		kvm_flush_remote_tlbs(kvm);
}

/*
 * Clear non-leaf entries (and free associated page tables) which could
 * be replaced by large mappings, for GFNs within the slot.
 */
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1329
				       struct kvm_memory_slot *slot)
1330 1331 1332 1333
{
	struct kvm_mmu_page *root;
	int root_as_id;

1334
	for_each_tdp_mmu_root_yield_safe(kvm, root) {
1335 1336 1337 1338
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

1339
		zap_collapsible_spte_range(kvm, root, slot);
1340 1341
	}
}
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354

/*
 * Removes write access on the last level SPTE mapping this GFN and unsets the
 * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
 * Returns true if an SPTE was set and a TLB flush is needed.
 */
static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
			      gfn_t gfn)
{
	struct tdp_iter iter;
	u64 new_spte;
	bool spte_set = false;

1355 1356
	rcu_read_lock();

1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
	tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
		if (!is_writable_pte(iter.old_spte))
			break;

		new_spte = iter.old_spte &
			~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);

		tdp_mmu_set_spte(kvm, &iter, new_spte);
		spte_set = true;
	}

1368 1369
	rcu_read_unlock();

1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
	return spte_set;
}

/*
 * Removes write access on the last level SPTE mapping this GFN and unsets the
 * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
 * Returns true if an SPTE was set and a TLB flush is needed.
 */
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
				   struct kvm_memory_slot *slot, gfn_t gfn)
{
	struct kvm_mmu_page *root;
	int root_as_id;
	bool spte_set = false;

1385
	lockdep_assert_held_write(&kvm->mmu_lock);
1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
	for_each_tdp_mmu_root(kvm, root) {
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

		spte_set |= write_protect_gfn(kvm, root, gfn);
	}
	return spte_set;
}

1396 1397 1398 1399
/*
 * Return the level of the lowest level SPTE added to sptes.
 * That SPTE may be non-present.
 */
1400 1401
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
			 int *root_level)
1402 1403 1404 1405
{
	struct tdp_iter iter;
	struct kvm_mmu *mmu = vcpu->arch.mmu;
	gfn_t gfn = addr >> PAGE_SHIFT;
1406
	int leaf = -1;
1407

1408
	*root_level = vcpu->arch.mmu->shadow_root_level;
1409

1410 1411
	rcu_read_lock();

1412 1413
	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
		leaf = iter.level;
1414
		sptes[leaf] = iter.old_spte;
1415 1416
	}

1417 1418
	rcu_read_unlock();

1419 1420
	return leaf;
}
反馈
建议
客服 返回
顶部