paging_tmpl.h 27.8 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
N
Nicolas Kaiser 已提交
10
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

/*
 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
 * so the code in this file is compiled twice, once per pte size.
 */

26 27 28 29 30 31 32
/*
 * This is used to catch non optimized PT_GUEST_(DIRTY|ACCESS)_SHIFT macro
 * uses for EPT without A/D paging type.
 */
extern u64 __pure __using_nonexistent_pte_bit(void)
	       __compiletime_error("wrong use of PT_GUEST_(DIRTY|ACCESS)_SHIFT");

A
Avi Kivity 已提交
33 34 35 36 37
#if PTTYPE == 64
	#define pt_element_t u64
	#define guest_walker guest_walker64
	#define FNAME(name) paging##64_##name
	#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
38 39
	#define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
	#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
A
Avi Kivity 已提交
40
	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
41
	#define PT_LEVEL_BITS PT64_LEVEL_BITS
42 43 44 45
	#define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK
	#define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK
	#define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
	#define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
46 47
	#ifdef CONFIG_X86_64
	#define PT_MAX_FULL_LEVELS 4
48
	#define CMPXCHG cmpxchg
49
	#else
50
	#define CMPXCHG cmpxchg64
51 52
	#define PT_MAX_FULL_LEVELS 2
	#endif
A
Avi Kivity 已提交
53 54 55 56 57
#elif PTTYPE == 32
	#define pt_element_t u32
	#define guest_walker guest_walker32
	#define FNAME(name) paging##32_##name
	#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
58 59
	#define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
	#define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
A
Avi Kivity 已提交
60
	#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
61
	#define PT_LEVEL_BITS PT32_LEVEL_BITS
62
	#define PT_MAX_FULL_LEVELS 2
63 64 65 66
	#define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK
	#define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK
	#define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
	#define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
67
	#define CMPXCHG cmpxchg
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
#elif PTTYPE == PTTYPE_EPT
	#define pt_element_t u64
	#define guest_walker guest_walkerEPT
	#define FNAME(name) ept_##name
	#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
	#define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
	#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
	#define PT_LEVEL_BITS PT64_LEVEL_BITS
	#define PT_GUEST_ACCESSED_MASK 0
	#define PT_GUEST_DIRTY_MASK 0
	#define PT_GUEST_DIRTY_SHIFT __using_nonexistent_pte_bit()
	#define PT_GUEST_ACCESSED_SHIFT __using_nonexistent_pte_bit()
	#define CMPXCHG cmpxchg64
	#define PT_MAX_FULL_LEVELS 4
A
Avi Kivity 已提交
83 84 85 86
#else
	#error Invalid PTTYPE value
#endif

87 88
#define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
89

A
Avi Kivity 已提交
90 91 92 93 94 95
/*
 * The guest_walker structure emulates the behavior of the hardware page
 * table walker.
 */
struct guest_walker {
	int level;
96
	unsigned max_level;
97
	gfn_t table_gfn[PT_MAX_FULL_LEVELS];
98
	pt_element_t ptes[PT_MAX_FULL_LEVELS];
99
	pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
100
	gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
101
	pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
102
	bool pte_writable[PT_MAX_FULL_LEVELS];
103 104
	unsigned pt_access;
	unsigned pte_access;
105
	gfn_t gfn;
106
	struct x86_exception fault;
A
Avi Kivity 已提交
107 108
};

109
static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
110
{
111
	return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
112 113
}

114 115 116 117
static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte)
{
	unsigned mask;

118 119 120 121
	/* dirty bit is not supported, so no need to track it */
	if (!PT_GUEST_DIRTY_MASK)
		return;

122 123 124 125
	BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);

	mask = (unsigned)~ACC_WRITE_MASK;
	/* Allow write access to dirty gptes */
126 127
	mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
		PT_WRITABLE_MASK;
128 129 130 131 132
	*access &= mask;
}

static inline int FNAME(is_present_gpte)(unsigned long pte)
{
133
#if PTTYPE != PTTYPE_EPT
134
	return is_present_gpte(pte);
135 136 137
#else
	return pte & 7;
#endif
138 139
}

140
static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
141 142
			       pt_element_t __user *ptep_user, unsigned index,
			       pt_element_t orig_pte, pt_element_t new_pte)
143
{
144
	int npages;
145 146 147 148
	pt_element_t ret;
	pt_element_t *table;
	struct page *page;

149 150 151
	npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
	/* Check if the user is doing something meaningless. */
	if (unlikely(npages != 1))
152 153
		return -EFAULT;

154
	table = kmap_atomic(page);
155
	ret = CMPXCHG(&table[index], orig_pte, new_pte);
156
	kunmap_atomic(table);
157 158 159 160 161 162

	kvm_release_page_dirty(page);

	return (ret != orig_pte);
}

163 164 165 166
static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
				  struct kvm_mmu_page *sp, u64 *spte,
				  u64 gpte)
{
167
	if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
168 169 170 171 172
		goto no_present;

	if (!FNAME(is_present_gpte)(gpte))
		goto no_present;

173 174
	/* if accessed bit is not supported prefetch non accessed gpte */
	if (PT_GUEST_ACCESSED_MASK && !(gpte & PT_GUEST_ACCESSED_MASK))
175 176 177 178 179 180 181 182 183 184 185 186
		goto no_present;

	return false;

no_present:
	drop_spte(vcpu->kvm, spte);
	return true;
}

static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
{
	unsigned access;
187 188 189 190 191
#if PTTYPE == PTTYPE_EPT
	access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
		((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
		ACC_USER_MASK;
#else
192 193 194 195 196
	BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK);
	BUILD_BUG_ON(ACC_EXEC_MASK != 1);
	access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK);
	/* Combine NX with P (which is set here) to get ACC_EXEC_MASK.  */
	access ^= (gpte >> PT64_NX_SHIFT);
197
#endif
198 199 200 201

	return access;
}

202 203 204 205 206 207 208 209 210 211 212
static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
					     struct kvm_mmu *mmu,
					     struct guest_walker *walker,
					     int write_fault)
{
	unsigned level, index;
	pt_element_t pte, orig_pte;
	pt_element_t __user *ptep_user;
	gfn_t table_gfn;
	int ret;

213 214 215 216
	/* dirty/accessed bits are not supported, so no need to update them */
	if (!PT_GUEST_DIRTY_MASK)
		return 0;

217 218 219 220 221
	for (level = walker->max_level; level >= walker->level; --level) {
		pte = orig_pte = walker->ptes[level - 1];
		table_gfn = walker->table_gfn[level - 1];
		ptep_user = walker->ptep_user[level - 1];
		index = offset_in_page(ptep_user) / sizeof(pt_element_t);
222
		if (!(pte & PT_GUEST_ACCESSED_MASK)) {
223
			trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
224
			pte |= PT_GUEST_ACCESSED_MASK;
225
		}
226
		if (level == walker->level && write_fault &&
227
				!(pte & PT_GUEST_DIRTY_MASK)) {
228
			trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
229
			pte |= PT_GUEST_DIRTY_MASK;
230 231 232 233
		}
		if (pte == orig_pte)
			continue;

234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
		/*
		 * If the slot is read-only, simply do not process the accessed
		 * and dirty bits.  This is the correct thing to do if the slot
		 * is ROM, and page tables in read-as-ROM/write-as-MMIO slots
		 * are only supported if the accessed and dirty bits are already
		 * set in the ROM (so that MMIO writes are never needed).
		 *
		 * Note that NPT does not allow this at all and faults, since
		 * it always wants nested page table entries for the guest
		 * page tables to be writable.  And EPT works but will simply
		 * overwrite the read-only memory to set the accessed and dirty
		 * bits.
		 */
		if (unlikely(!walker->pte_writable[level - 1]))
			continue;

250 251 252 253
		ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
		if (ret)
			return ret;

254
		kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
255
		walker->ptes[level - 1] = pte;
256 257 258 259
	}
	return 0;
}

260 261 262 263 264 265 266 267 268 269 270
static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
{
	unsigned pkeys = 0;
#if PTTYPE == 64
	pte_t pte = {.pte = gpte};

	pkeys = pte_flags_pkey(pte_flags(pte));
#endif
	return pkeys;
}

271 272 273
/*
 * Fetch a guest pte for a guest virtual address
 */
274 275
static int FNAME(walk_addr_generic)(struct guest_walker *walker,
				    struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
276
				    gva_t addr, u32 access)
A
Avi Kivity 已提交
277
{
278
	int ret;
279
	pt_element_t pte;
280
	pt_element_t __user *uninitialized_var(ptep_user);
281
	gfn_t table_gfn;
282
	unsigned index, pt_access, pte_access, accessed_dirty, pte_pkey;
283
	gpa_t pte_gpa;
284 285 286 287 288
	int offset;
	const int write_fault = access & PFERR_WRITE_MASK;
	const int user_fault  = access & PFERR_USER_MASK;
	const int fetch_fault = access & PFERR_FETCH_MASK;
	u16 errcode = 0;
289 290
	gpa_t real_gpa;
	gfn_t gfn;
A
Avi Kivity 已提交
291

292
	trace_kvm_mmu_pagetable_walk(addr, access);
293
retry_walk:
294 295 296
	walker->level = mmu->root_level;
	pte           = mmu->get_cr3(vcpu);

297
#if PTTYPE == 64
298
	if (walker->level == PT32E_ROOT_LEVEL) {
299
		pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
300
		trace_kvm_mmu_paging_element(pte, walker->level);
301
		if (!FNAME(is_present_gpte)(pte))
302
			goto error;
303 304 305
		--walker->level;
	}
#endif
306
	walker->max_level = walker->level;
307
	ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
A
Avi Kivity 已提交
308

309
	accessed_dirty = PT_GUEST_ACCESSED_MASK;
310 311
	pt_access = pte_access = ACC_ALL;
	++walker->level;
312

313
	do {
314 315 316
		gfn_t real_gfn;
		unsigned long host_addr;

317 318 319
		pt_access &= pte_access;
		--walker->level;

320
		index = PT_INDEX(addr, walker->level);
321

322
		table_gfn = gpte_to_gfn(pte);
323 324
		offset    = index * sizeof(pt_element_t);
		pte_gpa   = gfn_to_gpa(table_gfn) + offset;
325
		walker->table_gfn[walker->level - 1] = table_gfn;
326
		walker->pte_gpa[walker->level - 1] = pte_gpa;
327

328
		real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
329 330
					      PFERR_USER_MASK|PFERR_WRITE_MASK,
					      &walker->fault);
331 332 333 334 335 336 337 338 339 340 341

		/*
		 * FIXME: This can happen if emulation (for of an INS/OUTS
		 * instruction) triggers a nested page fault.  The exit
		 * qualification / exit info field will incorrectly have
		 * "guest page access" as the nested page fault's cause,
		 * instead of "guest page structure access".  To fix this,
		 * the x86_exception struct should be augmented with enough
		 * information to fix the exit_qualification or exit_info_1
		 * fields.
		 */
342
		if (unlikely(real_gfn == UNMAPPED_GVA))
343
			return 0;
344

345 346
		real_gfn = gpa_to_gfn(real_gfn);

347
		host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn,
348
					    &walker->pte_writable[walker->level - 1]);
349 350
		if (unlikely(kvm_is_error_hva(host_addr)))
			goto error;
351 352

		ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
353 354
		if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
			goto error;
355
		walker->ptep_user[walker->level - 1] = ptep_user;
356

357
		trace_kvm_mmu_paging_element(pte, walker->level);
358

359
		if (unlikely(!FNAME(is_present_gpte)(pte)))
360
			goto error;
361

362
		if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) {
363
			errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
364
			goto error;
365
		}
366

367
		accessed_dirty &= pte;
368
		pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
369

370
		walker->ptes[walker->level - 1] = pte;
A
Avi Kivity 已提交
371
	} while (!is_last_gpte(mmu, walker->level, pte));
372

373 374
	pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
	errcode = permission_fault(vcpu, mmu, pte_access, pte_pkey, access);
375
	if (unlikely(errcode))
376 377
		goto error;

378 379 380 381 382 383
	gfn = gpte_to_gfn_lvl(pte, walker->level);
	gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;

	if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36())
		gfn += pse36_gfn_delta(pte);

384
	real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault);
385 386 387 388 389
	if (real_gpa == UNMAPPED_GVA)
		return 0;

	walker->gfn = real_gpa >> PAGE_SHIFT;

390
	if (!write_fault)
391
		FNAME(protect_clean_gpte)(&pte_access, pte);
392 393
	else
		/*
394 395 396
		 * On a write fault, fold the dirty bit into accessed_dirty.
		 * For modes without A/D bits support accessed_dirty will be
		 * always clear.
397
		 */
398 399
		accessed_dirty &= pte >>
			(PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
400 401 402 403 404 405 406 407

	if (unlikely(!accessed_dirty)) {
		ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
		if (unlikely(ret < 0))
			goto error;
		else if (ret)
			goto retry_walk;
	}
408

409 410 411
	walker->pt_access = pt_access;
	walker->pte_access = pte_access;
	pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
412
		 __func__, (u64)pte, pte_access, pt_access);
413 414
	return 1;

415
error:
416
	errcode |= write_fault | user_fault;
417 418
	if (fetch_fault && (mmu->nx ||
			    kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
419
		errcode |= PFERR_FETCH_MASK;
420

421 422 423
	walker->fault.vector = PF_VECTOR;
	walker->fault.error_code_valid = true;
	walker->fault.error_code = errcode;
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442

#if PTTYPE == PTTYPE_EPT
	/*
	 * Use PFERR_RSVD_MASK in error_code to to tell if EPT
	 * misconfiguration requires to be injected. The detection is
	 * done by is_rsvd_bits_set() above.
	 *
	 * We set up the value of exit_qualification to inject:
	 * [2:0] - Derive from [2:0] of real exit_qualification at EPT violation
	 * [5:3] - Calculated by the page walk of the guest EPT page tables
	 * [7:8] - Derived from [7:8] of real exit_qualification
	 *
	 * The other bits are set to 0.
	 */
	if (!(errcode & PFERR_RSVD_MASK)) {
		vcpu->arch.exit_qualification &= 0x187;
		vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3;
	}
#endif
443 444
	walker->fault.address = addr;
	walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
445

446
	trace_kvm_mmu_walker_error(walker->fault.error_code);
447
	return 0;
A
Avi Kivity 已提交
448 449
}

450
static int FNAME(walk_addr)(struct guest_walker *walker,
451
			    struct kvm_vcpu *vcpu, gva_t addr, u32 access)
452 453
{
	return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
454
					access);
455 456
}

457
#if PTTYPE != PTTYPE_EPT
458 459
static int FNAME(walk_addr_nested)(struct guest_walker *walker,
				   struct kvm_vcpu *vcpu, gva_t addr,
460
				   u32 access)
461 462
{
	return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
463
					addr, access);
464
}
465
#endif
466

467 468 469
static bool
FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
		     u64 *spte, pt_element_t gpte, bool no_dirty_log)
470
{
471
	unsigned pte_access;
472
	gfn_t gfn;
D
Dan Williams 已提交
473
	kvm_pfn_t pfn;
474

475
	if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
476
		return false;
477

478
	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
479 480

	gfn = gpte_to_gfn(gpte);
481 482
	pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
	FNAME(protect_clean_gpte)(&pte_access, gpte);
483 484
	pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
			no_dirty_log && (pte_access & ACC_WRITE_MASK));
485
	if (is_error_pfn(pfn))
486
		return false;
487

488
	/*
489 490
	 * we call mmu_set_spte() with host_writable = true because
	 * pte_prefetch_gfn_to_pfn always gets a writable pfn.
491
	 */
492 493
	mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn,
		     true, true);
494 495 496 497 498 499 500 501 502 503

	return true;
}

static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
			      u64 *spte, const void *pte)
{
	pt_element_t gpte = *(const pt_element_t *)pte;

	FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
504 505
}

A
Avi Kivity 已提交
506 507 508 509
static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
				struct guest_walker *gw, int level)
{
	pt_element_t curr_pte;
510 511 512 513 514 515 516 517 518
	gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
	u64 mask;
	int r, index;

	if (level == PT_PAGE_TABLE_LEVEL) {
		mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
		base_gpa = pte_gpa & ~mask;
		index = (pte_gpa - base_gpa) / sizeof(pt_element_t);

519
		r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa,
520 521 522
				gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
		curr_pte = gw->prefetch_ptes[index];
	} else
523
		r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa,
A
Avi Kivity 已提交
524
				  &curr_pte, sizeof(curr_pte));
525

A
Avi Kivity 已提交
526 527 528
	return r || curr_pte != gw->ptes[level - 1];
}

529 530
static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
				u64 *sptep)
531 532
{
	struct kvm_mmu_page *sp;
533
	pt_element_t *gptep = gw->prefetch_ptes;
534
	u64 *spte;
535
	int i;
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551

	sp = page_header(__pa(sptep));

	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
		return;

	if (sp->role.direct)
		return __direct_pte_prefetch(vcpu, sp, sptep);

	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
	spte = sp->spt + i;

	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
		if (spte == sptep)
			continue;

552
		if (is_shadow_present_pte(*spte))
553 554
			continue;

555
		if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
556 557 558 559
			break;
	}
}

A
Avi Kivity 已提交
560 561
/*
 * Fetch a shadow pte for a specific level in the paging hierarchy.
562 563
 * If the guest tries to write a write-protected page, we need to
 * emulate this operation, return 1 to indicate this case.
A
Avi Kivity 已提交
564
 */
565
static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
566
			 struct guest_walker *gw,
567
			 int write_fault, int hlevel,
D
Dan Williams 已提交
568
			 kvm_pfn_t pfn, bool map_writable, bool prefault)
A
Avi Kivity 已提交
569
{
570
	struct kvm_mmu_page *sp = NULL;
571
	struct kvm_shadow_walk_iterator it;
572
	unsigned direct_access, access = gw->pt_access;
573
	int top_level, emulate;
574

575
	direct_access = gw->pte_access;
576

577 578 579 580 581 582 583 584 585 586 587 588
	top_level = vcpu->arch.mmu.root_level;
	if (top_level == PT32E_ROOT_LEVEL)
		top_level = PT32_ROOT_LEVEL;
	/*
	 * Verify that the top-level gpte is still there.  Since the page
	 * is a root page, it is either write protected (and cannot be
	 * changed from now on) or it is invalid (in which case, we don't
	 * really care if it changes underneath us after this point).
	 */
	if (FNAME(gpte_changed)(vcpu, gw, top_level))
		goto out_gpte_changed;

589 590 591
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		goto out_gpte_changed;

592 593 594
	for (shadow_walk_init(&it, vcpu, addr);
	     shadow_walk_okay(&it) && it.level > gw->level;
	     shadow_walk_next(&it)) {
595 596
		gfn_t table_gfn;

597
		clear_sp_write_flooding_count(it.sptep);
598
		drop_large_spte(vcpu, it.sptep);
599

600
		sp = NULL;
601 602 603
		if (!is_shadow_present_pte(*it.sptep)) {
			table_gfn = gw->table_gfn[it.level - 2];
			sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
604
					      false, access);
605
		}
606 607 608 609 610

		/*
		 * Verify that the gpte in the page we've just write
		 * protected is still there.
		 */
611
		if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
612
			goto out_gpte_changed;
613

614
		if (sp)
615
			link_shadow_page(vcpu, it.sptep, sp);
616
	}
A
Avi Kivity 已提交
617

618
	for (;
619 620
	     shadow_walk_okay(&it) && it.level > hlevel;
	     shadow_walk_next(&it)) {
621 622
		gfn_t direct_gfn;

623
		clear_sp_write_flooding_count(it.sptep);
624
		validate_direct_spte(vcpu, it.sptep, direct_access);
625

626
		drop_large_spte(vcpu, it.sptep);
627

628
		if (is_shadow_present_pte(*it.sptep))
629 630
			continue;

631
		direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
632

633
		sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
634
				      true, direct_access);
635
		link_shadow_page(vcpu, it.sptep, sp);
636 637
	}

638
	clear_sp_write_flooding_count(it.sptep);
639 640
	emulate = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
			       it.level, gw->gfn, pfn, prefault, map_writable);
641
	FNAME(pte_prefetch)(vcpu, gw, it.sptep);
642

643
	return emulate;
644 645 646

out_gpte_changed:
	kvm_release_pfn_clean(pfn);
647
	return 0;
A
Avi Kivity 已提交
648 649
}

650 651 652 653 654 655 656 657 658 659
 /*
 * To see whether the mapped gfn can write its page table in the current
 * mapping.
 *
 * It is the helper function of FNAME(page_fault). When guest uses large page
 * size to map the writable gfn which is used as current page table, we should
 * force kvm to use small page size to map it because new shadow page will be
 * created when kvm establishes shadow page table that stop kvm using large
 * page size. Do it early can avoid unnecessary #PF and emulation.
 *
660 661 662
 * @write_fault_to_shadow_pgtable will return true if the fault gfn is
 * currently used as its page table.
 *
663 664 665 666 667 668
 * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
 * since the PDPT is always shadowed, that means, we can not use large page
 * size to map the gfn which is used as PDPT.
 */
static bool
FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
669 670
			      struct guest_walker *walker, int user_fault,
			      bool *write_fault_to_shadow_pgtable)
671 672 673
{
	int level;
	gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
674
	bool self_changed = false;
675 676 677 678 679

	if (!(walker->pte_access & ACC_WRITE_MASK ||
	      (!is_write_protection(vcpu) && !user_fault)))
		return false;

680 681 682 683 684 685
	for (level = walker->level; level <= walker->max_level; level++) {
		gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];

		self_changed |= !(gfn & mask);
		*write_fault_to_shadow_pgtable |= !gfn;
	}
686

687
	return self_changed;
688 689
}

A
Avi Kivity 已提交
690 691 692 693 694 695 696 697 698 699 700
/*
 * Page fault handler.  There are several causes for a page fault:
 *   - there is no shadow pte for the guest pte
 *   - write access through a shadow pte marked read only so that we can set
 *     the dirty bit
 *   - write access to a shadow pte marked read only so we can update the page
 *     dirty bitmap, when userspace requests it
 *   - mmio access; in this case we will never install a present shadow pte
 *   - normal guest page fault due to the guest pte marked not present, not
 *     writable, or not executable
 *
701 702
 *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
 *           a negative value on error.
A
Avi Kivity 已提交
703
 */
G
Gleb Natapov 已提交
704
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
705
			     bool prefault)
A
Avi Kivity 已提交
706 707 708 709
{
	int write_fault = error_code & PFERR_WRITE_MASK;
	int user_fault = error_code & PFERR_USER_MASK;
	struct guest_walker walker;
710
	int r;
D
Dan Williams 已提交
711
	kvm_pfn_t pfn;
712
	int level = PT_PAGE_TABLE_LEVEL;
713
	bool force_pt_level = false;
714
	unsigned long mmu_seq;
715
	bool map_writable, is_self_change_mapping;
A
Avi Kivity 已提交
716

717
	pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
718

719 720 721
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;
722

723 724 725 726 727 728
	/*
	 * If PFEC.RSVD is set, this is a shadow page fault.
	 * The bit needs to be cleared before walking guest page tables.
	 */
	error_code &= ~PFERR_RSVD_MASK;

A
Avi Kivity 已提交
729
	/*
730
	 * Look up the guest pte for the faulting address.
A
Avi Kivity 已提交
731
	 */
732
	r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
A
Avi Kivity 已提交
733 734 735 736

	/*
	 * The page is not mapped by the guest.  Let the guest handle it.
	 */
737
	if (!r) {
738
		pgprintk("%s: guest page fault\n", __func__);
739
		if (!prefault)
X
Xiao Guangrong 已提交
740
			inject_page_fault(vcpu, &walker.fault);
741

A
Avi Kivity 已提交
742 743 744
		return 0;
	}

745 746
	if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
		shadow_page_table_clear_flood(vcpu, addr);
747
		return 1;
748
	}
749

750 751 752 753 754
	vcpu->arch.write_fault_to_shadow_pgtable = false;

	is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
	      &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);

755
	if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) {
756 757 758
		level = mapping_level(vcpu, walker.gfn, &force_pt_level);
		if (likely(!force_pt_level)) {
			level = min(walker.level, level);
759 760 761
			walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
		}
	} else
762
		force_pt_level = true;
763

764
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
765
	smp_rmb();
766

767
	if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
768
			 &map_writable))
769
		return 0;
770

771 772 773 774
	if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
				walker.gfn, pfn, walker.pte_access, &r))
		return r;

775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
	/*
	 * Do not change pte_access if the pfn is a mmio page, otherwise
	 * we will cache the incorrect access into mmio spte.
	 */
	if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
	     !is_write_protection(vcpu) && !user_fault &&
	      !is_noslot_pfn(pfn)) {
		walker.pte_access |= ACC_WRITE_MASK;
		walker.pte_access &= ~ACC_USER_MASK;

		/*
		 * If we converted a user page to a kernel page,
		 * so that the kernel can write to it when cr0.wp=0,
		 * then we should prevent the kernel from executing it
		 * if SMEP is enabled.
		 */
		if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
			walker.pte_access &= ~ACC_EXEC_MASK;
	}

795
	spin_lock(&vcpu->kvm->mmu_lock);
796
	if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
797
		goto out_unlock;
798

799
	kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
800
	make_mmu_pages_available(vcpu);
801 802
	if (!force_pt_level)
		transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
803
	r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
804
			 level, pfn, map_writable, prefault);
A
Avi Kivity 已提交
805
	++vcpu->stat.pf_fixed;
806
	kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
807
	spin_unlock(&vcpu->kvm->mmu_lock);
A
Avi Kivity 已提交
808

809
	return r;
810 811 812 813 814

out_unlock:
	spin_unlock(&vcpu->kvm->mmu_lock);
	kvm_release_pfn_clean(pfn);
	return 0;
A
Avi Kivity 已提交
815 816
}

X
Xiao Guangrong 已提交
817 818 819 820
static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
{
	int offset = 0;

821
	WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
X
Xiao Guangrong 已提交
822 823 824 825 826 827 828

	if (PTTYPE == 32)
		offset = sp->role.quadrant << PT64_LEVEL_BITS;

	return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
}

829
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
M
Marcelo Tosatti 已提交
830
{
831
	struct kvm_shadow_walk_iterator iterator;
832
	struct kvm_mmu_page *sp;
833 834 835
	int level;
	u64 *sptep;

836 837
	vcpu_clear_mmio_info(vcpu, gva);

838 839 840 841 842
	/*
	 * No need to check return value here, rmap_can_add() can
	 * help us to skip pte prefetch later.
	 */
	mmu_topup_memory_caches(vcpu);
M
Marcelo Tosatti 已提交
843

844 845 846 847 848
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
		WARN_ON(1);
		return;
	}

849
	spin_lock(&vcpu->kvm->mmu_lock);
850 851 852
	for_each_shadow_entry(vcpu, gva, iterator) {
		level = iterator.level;
		sptep = iterator.sptep;
853

854
		sp = page_header(__pa(sptep));
X
Xiao Guangrong 已提交
855
		if (is_last_spte(*sptep, level)) {
856 857 858
			pt_element_t gpte;
			gpa_t pte_gpa;

859 860 861
			if (!sp->unsync)
				break;

X
Xiao Guangrong 已提交
862
			pte_gpa = FNAME(get_level1_sp_gpa)(sp);
863
			pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
864

X
Xiao Guangrong 已提交
865 866
			if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
				kvm_flush_remote_tlbs(vcpu->kvm);
867 868 869 870

			if (!rmap_can_add(vcpu))
				break;

871 872
			if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
						       sizeof(pt_element_t)))
873 874 875
				break;

			FNAME(update_pte)(vcpu, sp, sptep, &gpte);
876
		}
M
Marcelo Tosatti 已提交
877

878
		if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
879 880
			break;
	}
881
	spin_unlock(&vcpu->kvm->mmu_lock);
M
Marcelo Tosatti 已提交
882 883
}

884
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
885
			       struct x86_exception *exception)
A
Avi Kivity 已提交
886 887
{
	struct guest_walker walker;
A
Avi Kivity 已提交
888 889
	gpa_t gpa = UNMAPPED_GVA;
	int r;
A
Avi Kivity 已提交
890

891
	r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
A
Avi Kivity 已提交
892

A
Avi Kivity 已提交
893
	if (r) {
A
Avi Kivity 已提交
894
		gpa = gfn_to_gpa(walker.gfn);
A
Avi Kivity 已提交
895
		gpa |= vaddr & ~PAGE_MASK;
896 897
	} else if (exception)
		*exception = walker.fault;
A
Avi Kivity 已提交
898 899 900 901

	return gpa;
}

902
#if PTTYPE != PTTYPE_EPT
903
static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
904 905
				      u32 access,
				      struct x86_exception *exception)
906 907 908 909 910
{
	struct guest_walker walker;
	gpa_t gpa = UNMAPPED_GVA;
	int r;

911
	r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
912 913 914 915

	if (r) {
		gpa = gfn_to_gpa(walker.gfn);
		gpa |= vaddr & ~PAGE_MASK;
916 917
	} else if (exception)
		*exception = walker.fault;
918 919 920

	return gpa;
}
921
#endif
922

923 924 925 926
/*
 * Using the cached information from sp->gfns is safe because:
 * - The spte has a reference to the struct page, so the pfn for a given gfn
 *   can't change unless all sptes pointing to it are nuked first.
927 928 929 930 931 932 933
 *
 * Note:
 *   We should flush all tlbs if spte is dropped even though guest is
 *   responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
 *   and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
 *   used by guest then tlbs are not flushed, so guest is allowed to access the
 *   freed pages.
934
 *   And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
935
 */
936
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
937
{
X
Xiao Guangrong 已提交
938
	int i, nr_present = 0;
939
	bool host_writable;
940
	gpa_t first_pte_gpa;
941

942 943 944
	/* direct kvm_mmu_page can not be unsync. */
	BUG_ON(sp->role.direct);

X
Xiao Guangrong 已提交
945
	first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
946

947 948 949 950
	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
		unsigned pte_access;
		pt_element_t gpte;
		gpa_t pte_gpa;
951
		gfn_t gfn;
952

953
		if (!sp->spt[i])
954 955
			continue;

956
		pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
957

958 959
		if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
					       sizeof(pt_element_t)))
960
			return 0;
961

962
		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
963 964 965 966 967 968
			/*
			 * Update spte before increasing tlbs_dirty to make
			 * sure no tlb flush is lost after spte is zapped; see
			 * the comments in kvm_flush_remote_tlbs().
			 */
			smp_wmb();
969
			vcpu->kvm->tlbs_dirty++;
970 971 972
			continue;
		}

973 974
		gfn = gpte_to_gfn(gpte);
		pte_access = sp->role.access;
975 976
		pte_access &= FNAME(gpte_access)(vcpu, gpte);
		FNAME(protect_clean_gpte)(&pte_access, gpte);
977

978
		if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
979
		      &nr_present))
980 981
			continue;

982
		if (gfn != sp->gfns[i]) {
983
			drop_spte(vcpu->kvm, &sp->spt[i]);
984 985 986 987 988
			/*
			 * The same as above where we are doing
			 * prefetch_invalid_gpte().
			 */
			smp_wmb();
989
			vcpu->kvm->tlbs_dirty++;
990 991 992 993
			continue;
		}

		nr_present++;
994

995 996
		host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;

997
		set_spte(vcpu, &sp->spt[i], pte_access,
998
			 PT_PAGE_TABLE_LEVEL, gfn,
999
			 spte_to_pfn(sp->spt[i]), true, false,
1000
			 host_writable);
1001 1002
	}

1003
	return nr_present;
1004 1005
}

A
Avi Kivity 已提交
1006 1007 1008 1009 1010
#undef pt_element_t
#undef guest_walker
#undef FNAME
#undef PT_BASE_ADDR_MASK
#undef PT_INDEX
1011 1012
#undef PT_LVL_ADDR_MASK
#undef PT_LVL_OFFSET_MASK
1013
#undef PT_LEVEL_BITS
1014
#undef PT_MAX_FULL_LEVELS
1015
#undef gpte_to_gfn
1016
#undef gpte_to_gfn_lvl
1017
#undef CMPXCHG
1018 1019 1020 1021
#undef PT_GUEST_ACCESSED_MASK
#undef PT_GUEST_DIRTY_MASK
#undef PT_GUEST_DIRTY_SHIFT
#undef PT_GUEST_ACCESSED_SHIFT