paging_tmpl.h 21.2 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
N
Nicolas Kaiser 已提交
10
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

/*
 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
 * so the code in this file is compiled twice, once per pte size.
 */

#if PTTYPE == 64
	#define pt_element_t u64
	#define guest_walker guest_walker64
	#define FNAME(name) paging##64_##name
	#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 32
	#define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
	#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
A
Avi Kivity 已提交
33
	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
34
	#define PT_LEVEL_BITS PT64_LEVEL_BITS
35 36
	#ifdef CONFIG_X86_64
	#define PT_MAX_FULL_LEVELS 4
37
	#define CMPXCHG cmpxchg
38
	#else
39
	#define CMPXCHG cmpxchg64
40 41
	#define PT_MAX_FULL_LEVELS 2
	#endif
A
Avi Kivity 已提交
42 43 44 45 46
#elif PTTYPE == 32
	#define pt_element_t u32
	#define guest_walker guest_walker32
	#define FNAME(name) paging##32_##name
	#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
47 48
	#define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
	#define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
A
Avi Kivity 已提交
49
	#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
50
	#define PT_LEVEL_BITS PT32_LEVEL_BITS
51
	#define PT_MAX_FULL_LEVELS 2
52
	#define CMPXCHG cmpxchg
A
Avi Kivity 已提交
53 54 55 56
#else
	#error Invalid PTTYPE value
#endif

57 58
#define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
59

A
Avi Kivity 已提交
60 61 62 63 64 65
/*
 * The guest_walker structure emulates the behavior of the hardware page
 * table walker.
 */
struct guest_walker {
	int level;
66
	gfn_t table_gfn[PT_MAX_FULL_LEVELS];
67
	pt_element_t ptes[PT_MAX_FULL_LEVELS];
68
	pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
69
	gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
70 71
	unsigned pt_access;
	unsigned pte_access;
72
	gfn_t gfn;
73
	struct x86_exception fault;
A
Avi Kivity 已提交
74 75
};

76
static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
77
{
78
	return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
79 80
}

81
static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
82 83
			       pt_element_t __user *ptep_user, unsigned index,
			       pt_element_t orig_pte, pt_element_t new_pte)
84
{
85
	int npages;
86 87 88 89
	pt_element_t ret;
	pt_element_t *table;
	struct page *page;

90 91 92
	npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
	/* Check if the user is doing something meaningless. */
	if (unlikely(npages != 1))
93 94
		return -EFAULT;

95 96 97 98 99 100 101 102 103
	table = kmap_atomic(page, KM_USER0);
	ret = CMPXCHG(&table[index], orig_pte, new_pte);
	kunmap_atomic(table, KM_USER0);

	kvm_release_page_dirty(page);

	return (ret != orig_pte);
}

104 105
static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte,
				   bool last)
106 107 108 109
{
	unsigned access;

	access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
110 111 112
	if (last && !is_dirty_gpte(gpte))
		access &= ~ACC_WRITE_MASK;

113
#if PTTYPE == 64
114
	if (vcpu->arch.mmu.nx)
115 116 117 118 119
		access &= ~(gpte >> PT64_NX_SHIFT);
#endif
	return access;
}

120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
static bool FNAME(is_last_gpte)(struct guest_walker *walker,
				struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
				pt_element_t gpte)
{
	if (walker->level == PT_PAGE_TABLE_LEVEL)
		return true;

	if ((walker->level == PT_DIRECTORY_LEVEL) && is_large_pte(gpte) &&
	    (PTTYPE == 64 || is_pse(vcpu)))
		return true;

	if ((walker->level == PT_PDPE_LEVEL) && is_large_pte(gpte) &&
	    (mmu->root_level == PT64_ROOT_LEVEL))
		return true;

	return false;
}

138 139 140
/*
 * Fetch a guest pte for a guest virtual address
 */
141 142
static int FNAME(walk_addr_generic)(struct guest_walker *walker,
				    struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
143
				    gva_t addr, u32 access)
A
Avi Kivity 已提交
144
{
145
	pt_element_t pte;
146
	pt_element_t __user *uninitialized_var(ptep_user);
147
	gfn_t table_gfn;
148
	unsigned index, pt_access, uninitialized_var(pte_access);
149
	gpa_t pte_gpa;
150 151 152 153 154 155
	bool eperm;
	int offset;
	const int write_fault = access & PFERR_WRITE_MASK;
	const int user_fault  = access & PFERR_USER_MASK;
	const int fetch_fault = access & PFERR_FETCH_MASK;
	u16 errcode = 0;
A
Avi Kivity 已提交
156

157 158
	trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
				     fetch_fault);
159
retry_walk:
160
	eperm = false;
161 162 163
	walker->level = mmu->root_level;
	pte           = mmu->get_cr3(vcpu);

164
#if PTTYPE == 64
165
	if (walker->level == PT32E_ROOT_LEVEL) {
166
		pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
167
		trace_kvm_mmu_paging_element(pte, walker->level);
168
		if (!is_present_gpte(pte))
169
			goto error;
170 171 172
		--walker->level;
	}
#endif
A
Avi Kivity 已提交
173
	ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
174
	       (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
A
Avi Kivity 已提交
175

176
	pt_access = ACC_ALL;
177 178

	for (;;) {
179 180 181
		gfn_t real_gfn;
		unsigned long host_addr;

182
		index = PT_INDEX(addr, walker->level);
183

184
		table_gfn = gpte_to_gfn(pte);
185 186
		offset    = index * sizeof(pt_element_t);
		pte_gpa   = gfn_to_gpa(table_gfn) + offset;
187
		walker->table_gfn[walker->level - 1] = table_gfn;
188
		walker->pte_gpa[walker->level - 1] = pte_gpa;
189

190 191
		real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
					      PFERR_USER_MASK|PFERR_WRITE_MASK);
192 193
		if (unlikely(real_gfn == UNMAPPED_GVA))
			goto error;
194 195 196
		real_gfn = gpa_to_gfn(real_gfn);

		host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
197 198
		if (unlikely(kvm_is_error_hva(host_addr)))
			goto error;
199 200

		ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
201 202
		if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
			goto error;
203

204
		trace_kvm_mmu_paging_element(pte, walker->level);
205

206 207
		if (unlikely(!is_present_gpte(pte)))
			goto error;
208

209 210
		if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
					      walker->level))) {
211 212
			errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
			goto error;
213
		}
214

215 216
		if (!check_write_user_access(vcpu, write_fault, user_fault,
					  pte))
217
			eperm = true;
218

219
#if PTTYPE == 64
220
		if (unlikely(fetch_fault && (pte & PT64_NX_MASK)))
221
			eperm = true;
222 223
#endif

224
		if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
225
			int ret;
226 227
			trace_kvm_mmu_set_accessed_bit(table_gfn, index,
						       sizeof(pte));
228 229
			ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
						  pte, pte|PT_ACCESSED_MASK);
230 231 232
			if (unlikely(ret < 0))
				goto error;
			else if (ret)
233
				goto retry_walk;
234

235
			mark_page_dirty(vcpu->kvm, table_gfn);
236
			pte |= PT_ACCESSED_MASK;
237
		}
238

239 240
		walker->ptes[walker->level - 1] = pte;

241
		if (FNAME(is_last_gpte)(walker, vcpu, mmu, pte)) {
242
			int lvl = walker->level;
243 244
			gpa_t real_gpa;
			gfn_t gfn;
245
			u32 ac;
246

247 248 249 250 251 252
			/* check if the kernel is fetching from user page */
			if (unlikely(pte_access & PT_USER_MASK) &&
			    kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
				if (fetch_fault && !user_fault)
					eperm = true;

253 254
			gfn = gpte_to_gfn_lvl(pte, lvl);
			gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;
255 256 257 258

			if (PTTYPE == 32 &&
			    walker->level == PT_DIRECTORY_LEVEL &&
			    is_cpuid_PSE36())
259 260
				gfn += pse36_gfn_delta(pte);

261
			ac = write_fault | fetch_fault | user_fault;
262 263

			real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn),
264
						      ac);
265 266 267 268
			if (real_gpa == UNMAPPED_GVA)
				return 0;

			walker->gfn = real_gpa >> PAGE_SHIFT;
269

270
			break;
271
		}
272

273
		pt_access &= FNAME(gpte_access)(vcpu, pte, false);
274 275
		--walker->level;
	}
276

277 278
	if (unlikely(eperm)) {
		errcode |= PFERR_PRESENT_MASK;
279
		goto error;
280
	}
281

282
	if (write_fault && unlikely(!is_dirty_gpte(pte))) {
283
		int ret;
284

285
		trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
286 287
		ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
					  pte, pte|PT_DIRTY_MASK);
288
		if (unlikely(ret < 0))
289
			goto error;
290
		else if (ret)
291
			goto retry_walk;
292

293
		mark_page_dirty(vcpu->kvm, table_gfn);
294
		pte |= PT_DIRTY_MASK;
295
		walker->ptes[walker->level - 1] = pte;
296 297
	}

298
	pte_access = pt_access & FNAME(gpte_access)(vcpu, pte, true);
299 300 301
	walker->pt_access = pt_access;
	walker->pte_access = pte_access;
	pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
302
		 __func__, (u64)pte, pte_access, pt_access);
303 304
	return 1;

305
error:
306
	errcode |= write_fault | user_fault;
307 308
	if (fetch_fault && (mmu->nx ||
			    kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
309
		errcode |= PFERR_FETCH_MASK;
310

311 312 313
	walker->fault.vector = PF_VECTOR;
	walker->fault.error_code_valid = true;
	walker->fault.error_code = errcode;
314 315
	walker->fault.address = addr;
	walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
316

317
	trace_kvm_mmu_walker_error(walker->fault.error_code);
318
	return 0;
A
Avi Kivity 已提交
319 320
}

321
static int FNAME(walk_addr)(struct guest_walker *walker,
322
			    struct kvm_vcpu *vcpu, gva_t addr, u32 access)
323 324
{
	return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
325
					access);
326 327
}

328 329
static int FNAME(walk_addr_nested)(struct guest_walker *walker,
				   struct kvm_vcpu *vcpu, gva_t addr,
330
				   u32 access)
331 332
{
	return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
333
					addr, access);
334 335
}

336 337 338 339 340 341 342
static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp, u64 *spte,
				    pt_element_t gpte)
{
	if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
		goto no_present;

343
	if (!is_present_gpte(gpte))
344 345 346 347 348 349 350 351
		goto no_present;

	if (!(gpte & PT_ACCESSED_MASK))
		goto no_present;

	return false;

no_present:
352
	drop_spte(vcpu->kvm, spte);
353 354 355
	return true;
}

356
static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
357
			      u64 *spte, const void *pte)
358 359
{
	pt_element_t gpte;
360
	unsigned pte_access;
361
	pfn_t pfn;
362 363

	gpte = *(const pt_element_t *)pte;
364
	if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
365
		return;
366

367
	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
368
	pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
369
	pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
370
	if (mmu_invalid_pfn(pfn)) {
371
		kvm_release_pfn_clean(pfn);
372
		return;
373 374
	}

375
	/*
L
Lucas De Marchi 已提交
376
	 * we call mmu_set_spte() with host_writable = true because that
377 378
	 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
	 */
379
	mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
380
		     NULL, PT_PAGE_TABLE_LEVEL,
381
		     gpte_to_gfn(gpte), pfn, true, true);
382 383
}

A
Avi Kivity 已提交
384 385 386 387
static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
				struct guest_walker *gw, int level)
{
	pt_element_t curr_pte;
388 389 390 391 392 393 394 395 396 397 398 399 400 401
	gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
	u64 mask;
	int r, index;

	if (level == PT_PAGE_TABLE_LEVEL) {
		mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
		base_gpa = pte_gpa & ~mask;
		index = (pte_gpa - base_gpa) / sizeof(pt_element_t);

		r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
				gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
		curr_pte = gw->prefetch_ptes[index];
	} else
		r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
A
Avi Kivity 已提交
402
				  &curr_pte, sizeof(curr_pte));
403

A
Avi Kivity 已提交
404 405 406
	return r || curr_pte != gw->ptes[level - 1];
}

407 408
static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
				u64 *sptep)
409 410
{
	struct kvm_mmu_page *sp;
411
	pt_element_t *gptep = gw->prefetch_ptes;
412
	u64 *spte;
413
	int i;
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434

	sp = page_header(__pa(sptep));

	if (sp->role.level > PT_PAGE_TABLE_LEVEL)
		return;

	if (sp->role.direct)
		return __direct_pte_prefetch(vcpu, sp, sptep);

	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
	spte = sp->spt + i;

	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
		pt_element_t gpte;
		unsigned pte_access;
		gfn_t gfn;
		pfn_t pfn;

		if (spte == sptep)
			continue;

435
		if (is_shadow_present_pte(*spte))
436 437 438 439
			continue;

		gpte = gptep[i];

440
		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
441 442
			continue;

443 444
		pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte,
								  true);
445 446
		gfn = gpte_to_gfn(gpte);
		pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
447
				      pte_access & ACC_WRITE_MASK);
448
		if (mmu_invalid_pfn(pfn)) {
449 450 451 452 453
			kvm_release_pfn_clean(pfn);
			break;
		}

		mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
454
			     NULL, PT_PAGE_TABLE_LEVEL, gfn,
455 456 457 458
			     pfn, true, true);
	}
}

A
Avi Kivity 已提交
459 460 461
/*
 * Fetch a shadow pte for a specific level in the paging hierarchy.
 */
462 463
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
			 struct guest_walker *gw,
464
			 int user_fault, int write_fault, int hlevel,
465
			 int *emulate, pfn_t pfn, bool map_writable,
X
Xiao Guangrong 已提交
466
			 bool prefault)
A
Avi Kivity 已提交
467
{
468
	unsigned access = gw->pt_access;
469 470
	struct kvm_mmu_page *sp = NULL;
	int top_level;
471
	unsigned direct_access;
472
	struct kvm_shadow_walk_iterator it;
473

474
	if (!is_present_gpte(gw->ptes[gw->level - 1]))
475
		return NULL;
A
Avi Kivity 已提交
476

477
	direct_access = gw->pte_access;
478

479 480 481 482 483 484 485 486 487 488 489 490
	top_level = vcpu->arch.mmu.root_level;
	if (top_level == PT32E_ROOT_LEVEL)
		top_level = PT32_ROOT_LEVEL;
	/*
	 * Verify that the top-level gpte is still there.  Since the page
	 * is a root page, it is either write protected (and cannot be
	 * changed from now on) or it is invalid (in which case, we don't
	 * really care if it changes underneath us after this point).
	 */
	if (FNAME(gpte_changed)(vcpu, gw, top_level))
		goto out_gpte_changed;

491 492 493
	for (shadow_walk_init(&it, vcpu, addr);
	     shadow_walk_okay(&it) && it.level > gw->level;
	     shadow_walk_next(&it)) {
494 495
		gfn_t table_gfn;

496
		drop_large_spte(vcpu, it.sptep);
497

498
		sp = NULL;
499 500 501 502
		if (!is_shadow_present_pte(*it.sptep)) {
			table_gfn = gw->table_gfn[it.level - 2];
			sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
					      false, access, it.sptep);
503
		}
504 505 506 507 508

		/*
		 * Verify that the gpte in the page we've just write
		 * protected is still there.
		 */
509
		if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
510
			goto out_gpte_changed;
511

512
		if (sp)
513
			link_shadow_page(it.sptep, sp);
514
	}
A
Avi Kivity 已提交
515

516
	for (;
517 518
	     shadow_walk_okay(&it) && it.level > hlevel;
	     shadow_walk_next(&it)) {
519 520
		gfn_t direct_gfn;

521
		validate_direct_spte(vcpu, it.sptep, direct_access);
522

523
		drop_large_spte(vcpu, it.sptep);
524

525
		if (is_shadow_present_pte(*it.sptep))
526 527
			continue;

528
		direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
529

530 531 532
		sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
				      true, direct_access, it.sptep);
		link_shadow_page(it.sptep, sp);
533 534
	}

535
	mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
536
		     user_fault, write_fault, emulate, it.level,
X
Xiao Guangrong 已提交
537
		     gw->gfn, pfn, prefault, map_writable);
538
	FNAME(pte_prefetch)(vcpu, gw, it.sptep);
539

540
	return it.sptep;
541 542

out_gpte_changed:
543
	if (sp)
544
		kvm_mmu_put_page(sp, it.sptep);
545 546
	kvm_release_pfn_clean(pfn);
	return NULL;
A
Avi Kivity 已提交
547 548 549 550 551 552 553 554 555 556 557 558 559
}

/*
 * Page fault handler.  There are several causes for a page fault:
 *   - there is no shadow pte for the guest pte
 *   - write access through a shadow pte marked read only so that we can set
 *     the dirty bit
 *   - write access to a shadow pte marked read only so we can update the page
 *     dirty bitmap, when userspace requests it
 *   - mmio access; in this case we will never install a present shadow pte
 *   - normal guest page fault due to the guest pte marked not present, not
 *     writable, or not executable
 *
560 561
 *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
 *           a negative value on error.
A
Avi Kivity 已提交
562
 */
G
Gleb Natapov 已提交
563
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
564
			     bool prefault)
A
Avi Kivity 已提交
565 566 567 568
{
	int write_fault = error_code & PFERR_WRITE_MASK;
	int user_fault = error_code & PFERR_USER_MASK;
	struct guest_walker walker;
A
Avi Kivity 已提交
569
	u64 *sptep;
570
	int emulate = 0;
571
	int r;
572
	pfn_t pfn;
573
	int level = PT_PAGE_TABLE_LEVEL;
574
	int force_pt_level;
575
	unsigned long mmu_seq;
576
	bool map_writable;
A
Avi Kivity 已提交
577

578
	pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
579

580 581 582 583
	if (unlikely(error_code & PFERR_RSVD_MASK))
		return handle_mmio_page_fault(vcpu, addr, error_code,
					      mmu_is_nested(vcpu));

584 585 586
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;
587

A
Avi Kivity 已提交
588
	/*
589
	 * Look up the guest pte for the faulting address.
A
Avi Kivity 已提交
590
	 */
591
	r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
A
Avi Kivity 已提交
592 593 594 595

	/*
	 * The page is not mapped by the guest.  Let the guest handle it.
	 */
596
	if (!r) {
597
		pgprintk("%s: guest page fault\n", __func__);
X
Xiao Guangrong 已提交
598 599 600 601 602
		if (!prefault) {
			inject_page_fault(vcpu, &walker.fault);
			/* reset fork detector */
			vcpu->arch.last_pt_write_count = 0;
		}
A
Avi Kivity 已提交
603 604 605
		return 0;
	}

606 607 608 609 610
	if (walker.level >= PT_DIRECTORY_LEVEL)
		force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
	else
		force_pt_level = 1;
	if (!force_pt_level) {
611 612
		level = min(walker.level, mapping_level(vcpu, walker.gfn));
		walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
M
Marcelo Tosatti 已提交
613
	}
614

615
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
616
	smp_rmb();
617

618
	if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
619
			 &map_writable))
620
		return 0;
621

622 623 624 625
	if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
				walker.gfn, pfn, walker.pte_access, &r))
		return r;

626
	spin_lock(&vcpu->kvm->mmu_lock);
627 628
	if (mmu_notifier_retry(vcpu, mmu_seq))
		goto out_unlock;
629

630
	trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
631
	kvm_mmu_free_some_pages(vcpu);
632 633
	if (!force_pt_level)
		transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
A
Avi Kivity 已提交
634
	sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
635
			     level, &emulate, pfn, map_writable, prefault);
A
Andi Kleen 已提交
636
	(void)sptep;
637 638
	pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
		 sptep, *sptep, emulate);
639

640
	if (!emulate)
641
		vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
642

A
Avi Kivity 已提交
643
	++vcpu->stat.pf_fixed;
644
	trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
645
	spin_unlock(&vcpu->kvm->mmu_lock);
A
Avi Kivity 已提交
646

647
	return emulate;
648 649 650 651 652

out_unlock:
	spin_unlock(&vcpu->kvm->mmu_lock);
	kvm_release_pfn_clean(pfn);
	return 0;
A
Avi Kivity 已提交
653 654
}

655
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
M
Marcelo Tosatti 已提交
656
{
657
	struct kvm_shadow_walk_iterator iterator;
658
	struct kvm_mmu_page *sp;
659
	gpa_t pte_gpa = -1;
660 661
	int level;
	u64 *sptep;
662
	int need_flush = 0;
663

664 665
	vcpu_clear_mmio_info(vcpu, gva);

666
	spin_lock(&vcpu->kvm->mmu_lock);
M
Marcelo Tosatti 已提交
667

668 669 670
	for_each_shadow_entry(vcpu, gva, iterator) {
		level = iterator.level;
		sptep = iterator.sptep;
671

672
		sp = page_header(__pa(sptep));
X
Xiao Guangrong 已提交
673
		if (is_last_spte(*sptep, level)) {
674
			int offset, shift;
675

676 677 678
			if (!sp->unsync)
				break;

679 680 681 682 683
			shift = PAGE_SHIFT -
				  (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
			offset = sp->role.quadrant << shift;

			pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
684
			pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
685 686 687 688

			if (is_shadow_present_pte(*sptep)) {
				if (is_large_pte(*sptep))
					--vcpu->kvm->stat.lpages;
689
				drop_spte(vcpu->kvm, sptep);
690
				need_flush = 1;
691 692
			} else if (is_mmio_spte(*sptep))
				mmu_spte_clear_no_track(sptep);
693

694
			break;
695
		}
M
Marcelo Tosatti 已提交
696

697
		if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
698 699
			break;
	}
M
Marcelo Tosatti 已提交
700

701 702
	if (need_flush)
		kvm_flush_remote_tlbs(vcpu->kvm);
703 704 705

	atomic_inc(&vcpu->kvm->arch.invlpg_counter);

706
	spin_unlock(&vcpu->kvm->mmu_lock);
707 708 709 710 711 712 713

	if (pte_gpa == -1)
		return;

	if (mmu_topup_memory_caches(vcpu))
		return;
	kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
M
Marcelo Tosatti 已提交
714 715
}

716
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
717
			       struct x86_exception *exception)
A
Avi Kivity 已提交
718 719
{
	struct guest_walker walker;
A
Avi Kivity 已提交
720 721
	gpa_t gpa = UNMAPPED_GVA;
	int r;
A
Avi Kivity 已提交
722

723
	r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
A
Avi Kivity 已提交
724

A
Avi Kivity 已提交
725
	if (r) {
A
Avi Kivity 已提交
726
		gpa = gfn_to_gpa(walker.gfn);
A
Avi Kivity 已提交
727
		gpa |= vaddr & ~PAGE_MASK;
728 729
	} else if (exception)
		*exception = walker.fault;
A
Avi Kivity 已提交
730 731 732 733

	return gpa;
}

734
static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
735 736
				      u32 access,
				      struct x86_exception *exception)
737 738 739 740 741
{
	struct guest_walker walker;
	gpa_t gpa = UNMAPPED_GVA;
	int r;

742
	r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
743 744 745 746

	if (r) {
		gpa = gfn_to_gpa(walker.gfn);
		gpa |= vaddr & ~PAGE_MASK;
747 748
	} else if (exception)
		*exception = walker.fault;
749 750 751 752

	return gpa;
}

753 754 755 756
/*
 * Using the cached information from sp->gfns is safe because:
 * - The spte has a reference to the struct page, so the pfn for a given gfn
 *   can't change unless all sptes pointing to it are nuked first.
757 758 759 760 761 762 763 764
 *
 * Note:
 *   We should flush all tlbs if spte is dropped even though guest is
 *   responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
 *   and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
 *   used by guest then tlbs are not flushed, so guest is allowed to access the
 *   freed pages.
 *   And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
765
 */
766
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
767 768
{
	int i, offset, nr_present;
769
	bool host_writable;
770
	gpa_t first_pte_gpa;
771 772 773

	offset = nr_present = 0;

774 775 776
	/* direct kvm_mmu_page can not be unsync. */
	BUG_ON(sp->role.direct);

777 778 779
	if (PTTYPE == 32)
		offset = sp->role.quadrant << PT64_LEVEL_BITS;

780 781
	first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);

782 783 784 785
	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
		unsigned pte_access;
		pt_element_t gpte;
		gpa_t pte_gpa;
786
		gfn_t gfn;
787

788
		if (!sp->spt[i])
789 790
			continue;

791
		pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
792 793 794 795 796

		if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
					  sizeof(pt_element_t)))
			return -EINVAL;

797
		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
798
			vcpu->kvm->tlbs_dirty++;
799 800 801
			continue;
		}

802 803 804 805 806 807 808
		gfn = gpte_to_gfn(gpte);
		pte_access = sp->role.access;
		pte_access &= FNAME(gpte_access)(vcpu, gpte, true);

		if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present))
			continue;

809
		if (gfn != sp->gfns[i]) {
810
			drop_spte(vcpu->kvm, &sp->spt[i]);
811
			vcpu->kvm->tlbs_dirty++;
812 813 814 815
			continue;
		}

		nr_present++;
816

817 818
		host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;

819
		set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
820
			 PT_PAGE_TABLE_LEVEL, gfn,
821
			 spte_to_pfn(sp->spt[i]), true, false,
822
			 host_writable);
823 824 825 826 827
	}

	return !nr_present;
}

A
Avi Kivity 已提交
828 829 830 831 832
#undef pt_element_t
#undef guest_walker
#undef FNAME
#undef PT_BASE_ADDR_MASK
#undef PT_INDEX
833 834
#undef PT_LVL_ADDR_MASK
#undef PT_LVL_OFFSET_MASK
835
#undef PT_LEVEL_BITS
836
#undef PT_MAX_FULL_LEVELS
837
#undef gpte_to_gfn
838
#undef gpte_to_gfn_lvl
839
#undef CMPXCHG