mmu.c 51.6 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */
A
Avi Kivity 已提交
19 20

#include "vmx.h"
21
#include "mmu.h"
A
Avi Kivity 已提交
22

23
#include <linux/kvm_host.h>
A
Avi Kivity 已提交
24 25 26 27 28
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/module.h>
29
#include <linux/swap.h>
M
Marcelo Tosatti 已提交
30
#include <linux/hugetlb.h>
A
Avi Kivity 已提交
31

A
Avi Kivity 已提交
32 33
#include <asm/page.h>
#include <asm/cmpxchg.h>
34
#include <asm/io.h>
A
Avi Kivity 已提交
35

36 37 38 39 40 41 42 43 44
/*
 * When setting this variable to true it enables Two-Dimensional-Paging
 * where the hardware walks 2 page tables:
 * 1. the guest-virtual to guest-physical
 * 2. while doing 1. it walks guest-physical to host-physical
 * If the hardware supports that we don't need to do shadow paging.
 */
static bool tdp_enabled = false;

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
#undef MMU_DEBUG

#undef AUDIT

#ifdef AUDIT
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
#else
static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
#endif

#ifdef MMU_DEBUG

#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)

#else

#define pgprintk(x...) do { } while (0)
#define rmap_printk(x...) do { } while (0)

#endif

#if defined(MMU_DEBUG) || defined(AUDIT)
static int dbg = 1;
#endif
A
Avi Kivity 已提交
70

71 72 73
#ifndef MMU_DEBUG
#define ASSERT(x) do { } while (0)
#else
A
Avi Kivity 已提交
74 75 76 77 78
#define ASSERT(x)							\
	if (!(x)) {							\
		printk(KERN_WARNING "assertion failed %s:%d: %s\n",	\
		       __FILE__, __LINE__, #x);				\
	}
79
#endif
A
Avi Kivity 已提交
80

81 82 83 84
#define PT64_PT_BITS 9
#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
#define PT32_PT_BITS 10
#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
A
Avi Kivity 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97

#define PT_WRITABLE_SHIFT 1

#define PT_PRESENT_MASK (1ULL << 0)
#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
#define PT_USER_MASK (1ULL << 2)
#define PT_PWT_MASK (1ULL << 3)
#define PT_PCD_MASK (1ULL << 4)
#define PT_ACCESSED_MASK (1ULL << 5)
#define PT_DIRTY_MASK (1ULL << 6)
#define PT_PAGE_SIZE_MASK (1ULL << 7)
#define PT_PAT_MASK (1ULL << 7)
#define PT_GLOBAL_MASK (1ULL << 8)
98 99
#define PT64_NX_SHIFT 63
#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
A
Avi Kivity 已提交
100 101 102 103 104 105 106

#define PT_PAT_SHIFT 7
#define PT_DIR_PAT_SHIFT 12
#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)

#define PT32_DIR_PSE36_SIZE 4
#define PT32_DIR_PSE36_SHIFT 13
M
Mike Day 已提交
107 108
#define PT32_DIR_PSE36_MASK \
	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
A
Avi Kivity 已提交
109 110 111 112 113 114 115 116 117 118


#define PT_FIRST_AVAIL_BITS_SHIFT 9
#define PT64_SECOND_AVAIL_BITS_SHIFT 52

#define VALID_PAGE(x) ((x) != INVALID_PAGE)

#define PT64_LEVEL_BITS 9

#define PT64_LEVEL_SHIFT(level) \
M
Mike Day 已提交
119
		(PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
A
Avi Kivity 已提交
120 121 122 123 124 125 126 127 128 129 130

#define PT64_LEVEL_MASK(level) \
		(((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))

#define PT64_INDEX(address, level)\
	(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))


#define PT32_LEVEL_BITS 10

#define PT32_LEVEL_SHIFT(level) \
M
Mike Day 已提交
131
		(PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
A
Avi Kivity 已提交
132 133 134 135 136 137 138 139

#define PT32_LEVEL_MASK(level) \
		(((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))

#define PT32_INDEX(address, level)\
	(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))


140
#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
A
Avi Kivity 已提交
141 142 143 144 145 146 147
#define PT64_DIR_BASE_ADDR_MASK \
	(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))

#define PT32_BASE_ADDR_MASK PAGE_MASK
#define PT32_DIR_BASE_ADDR_MASK \
	(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))

148 149
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
			| PT64_NX_MASK)
A
Avi Kivity 已提交
150 151 152 153

#define PFERR_PRESENT_MASK (1U << 0)
#define PFERR_WRITE_MASK (1U << 1)
#define PFERR_USER_MASK (1U << 2)
154
#define PFERR_FETCH_MASK (1U << 4)
A
Avi Kivity 已提交
155 156 157 158 159 160 161 162

#define PT64_ROOT_LEVEL 4
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3

#define PT_DIRECTORY_LEVEL 2
#define PT_PAGE_TABLE_LEVEL 1

163 164
#define RMAP_EXT 4

165 166 167 168 169
#define ACC_EXEC_MASK    1
#define ACC_WRITE_MASK   PT_WRITABLE_MASK
#define ACC_USER_MASK    PT_USER_MASK
#define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)

170 171 172 173 174
struct kvm_rmap_desc {
	u64 *shadow_ptes[RMAP_EXT];
	struct kvm_rmap_desc *more;
};

175 176
static struct kmem_cache *pte_chain_cache;
static struct kmem_cache *rmap_desc_cache;
177
static struct kmem_cache *mmu_page_header_cache;
178

179 180 181 182 183 184 185 186 187 188
static u64 __read_mostly shadow_trap_nonpresent_pte;
static u64 __read_mostly shadow_notrap_nonpresent_pte;

void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
{
	shadow_trap_nonpresent_pte = trap_pte;
	shadow_notrap_nonpresent_pte = notrap_pte;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);

A
Avi Kivity 已提交
189 190
static int is_write_protection(struct kvm_vcpu *vcpu)
{
191
	return vcpu->arch.cr0 & X86_CR0_WP;
A
Avi Kivity 已提交
192 193 194 195 196 197 198
}

static int is_cpuid_PSE36(void)
{
	return 1;
}

199 200
static int is_nx(struct kvm_vcpu *vcpu)
{
201
	return vcpu->arch.shadow_efer & EFER_NX;
202 203
}

A
Avi Kivity 已提交
204 205 206 207 208
static int is_present_pte(unsigned long pte)
{
	return pte & PT_PRESENT_MASK;
}

209 210 211 212 213 214
static int is_shadow_present_pte(u64 pte)
{
	return pte != shadow_trap_nonpresent_pte
		&& pte != shadow_notrap_nonpresent_pte;
}

M
Marcelo Tosatti 已提交
215 216 217 218 219
static int is_large_pte(u64 pte)
{
	return pte & PT_PAGE_SIZE_MASK;
}

A
Avi Kivity 已提交
220 221 222 223 224
static int is_writeble_pte(unsigned long pte)
{
	return pte & PT_WRITABLE_MASK;
}

225 226 227 228 229
static int is_dirty_pte(unsigned long pte)
{
	return pte & PT_DIRTY_MASK;
}

230 231
static int is_rmap_pte(u64 pte)
{
232
	return is_shadow_present_pte(pte);
233 234
}

235 236 237 238 239 240 241
static gfn_t pse36_gfn_delta(u32 gpte)
{
	int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;

	return (gpte & PT32_DIR_PSE36_MASK) << shift;
}

242 243 244 245 246 247 248 249 250
static void set_shadow_pte(u64 *sptep, u64 spte)
{
#ifdef CONFIG_X86_64
	set_64bit((unsigned long *)sptep, spte);
#else
	set_64bit((unsigned long long *)sptep, spte);
#endif
}

251
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
252
				  struct kmem_cache *base_cache, int min)
253 254 255 256
{
	void *obj;

	if (cache->nobjs >= min)
257
		return 0;
258
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
259
		obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
260
		if (!obj)
261
			return -ENOMEM;
262 263
		cache->objects[cache->nobjs++] = obj;
	}
264
	return 0;
265 266 267 268 269 270 271 272
}

static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
		kfree(mc->objects[--mc->nobjs]);
}

A
Avi Kivity 已提交
273
static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
274
				       int min)
A
Avi Kivity 已提交
275 276 277 278 279 280
{
	struct page *page;

	if (cache->nobjs >= min)
		return 0;
	while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
281
		page = alloc_page(GFP_KERNEL);
A
Avi Kivity 已提交
282 283 284 285 286 287 288 289 290 291 292
		if (!page)
			return -ENOMEM;
		set_page_private(page, 0);
		cache->objects[cache->nobjs++] = page_address(page);
	}
	return 0;
}

static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
293
		free_page((unsigned long)mc->objects[--mc->nobjs]);
A
Avi Kivity 已提交
294 295
}

296
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
297
{
298 299
	int r;

300
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
301
				   pte_chain_cache, 4);
302 303
	if (r)
		goto out;
304
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
305
				   rmap_desc_cache, 1);
306 307
	if (r)
		goto out;
308
	r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
309 310
	if (r)
		goto out;
311
	r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
312
				   mmu_page_header_cache, 4);
313 314
out:
	return r;
315 316 317 318
}

static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
319 320 321 322
	mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
	mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
	mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
}

static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
				    size_t size)
{
	void *p;

	BUG_ON(!mc->nobjs);
	p = mc->objects[--mc->nobjs];
	memset(p, 0, size);
	return p;
}

static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
{
338
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
339 340 341
				      sizeof(struct kvm_pte_chain));
}

342
static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
343
{
344
	kfree(pc);
345 346 347 348
}

static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
{
349
	return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
350 351 352
				      sizeof(struct kvm_rmap_desc));
}

353
static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
354
{
355
	kfree(rd);
356 357
}

M
Marcelo Tosatti 已提交
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
/*
 * Return the pointer to the largepage write count for a given
 * gfn, handling slots that are not large page aligned.
 */
static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
{
	unsigned long idx;

	idx = (gfn / KVM_PAGES_PER_HPAGE) -
	      (slot->base_gfn / KVM_PAGES_PER_HPAGE);
	return &slot->lpage_info[idx].write_count;
}

static void account_shadowed(struct kvm *kvm, gfn_t gfn)
{
	int *write_count;

	write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
	*write_count += 1;
	WARN_ON(*write_count > KVM_PAGES_PER_HPAGE);
}

static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
{
	int *write_count;

	write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
	*write_count -= 1;
	WARN_ON(*write_count < 0);
}

static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
{
	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
	int *largepage_idx;

	if (slot) {
		largepage_idx = slot_largepage_idx(gfn, slot);
		return *largepage_idx;
	}

	return 1;
}

static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
{
	struct vm_area_struct *vma;
	unsigned long addr;

	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return 0;

	vma = find_vma(current->mm, addr);
	if (vma && is_vm_hugetlb_page(vma))
		return 1;

	return 0;
}

static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
{
	struct kvm_memory_slot *slot;

	if (has_wrprotected_page(vcpu->kvm, large_gfn))
		return 0;

	if (!host_largepage_backed(vcpu->kvm, large_gfn))
		return 0;

	slot = gfn_to_memslot(vcpu->kvm, large_gfn);
	if (slot && slot->dirty_bitmap)
		return 0;

	return 1;
}

435 436 437 438 439
/*
 * Take gfn and return the reverse mapping to it.
 * Note: gfn must be unaliased before this function get called
 */

M
Marcelo Tosatti 已提交
440
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
441 442
{
	struct kvm_memory_slot *slot;
M
Marcelo Tosatti 已提交
443
	unsigned long idx;
444 445

	slot = gfn_to_memslot(kvm, gfn);
M
Marcelo Tosatti 已提交
446 447 448 449 450 451 452
	if (!lpage)
		return &slot->rmap[gfn - slot->base_gfn];

	idx = (gfn / KVM_PAGES_PER_HPAGE) -
	      (slot->base_gfn / KVM_PAGES_PER_HPAGE);

	return &slot->lpage_info[idx].rmap_pde;
453 454
}

455 456 457
/*
 * Reverse mapping data structures:
 *
458 459
 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
 * that points to page_address(page).
460
 *
461 462
 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
 * containing more mappings.
463
 */
M
Marcelo Tosatti 已提交
464
static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
465
{
466
	struct kvm_mmu_page *sp;
467
	struct kvm_rmap_desc *desc;
468
	unsigned long *rmapp;
469 470 471 472
	int i;

	if (!is_rmap_pte(*spte))
		return;
473
	gfn = unalias_gfn(vcpu->kvm, gfn);
474 475
	sp = page_header(__pa(spte));
	sp->gfns[spte - sp->spt] = gfn;
M
Marcelo Tosatti 已提交
476
	rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
477
	if (!*rmapp) {
478
		rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
479 480
		*rmapp = (unsigned long)spte;
	} else if (!(*rmapp & 1)) {
481
		rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
482
		desc = mmu_alloc_rmap_desc(vcpu);
483
		desc->shadow_ptes[0] = (u64 *)*rmapp;
484
		desc->shadow_ptes[1] = spte;
485
		*rmapp = (unsigned long)desc | 1;
486 487
	} else {
		rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
488
		desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
489 490 491
		while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
			desc = desc->more;
		if (desc->shadow_ptes[RMAP_EXT-1]) {
492
			desc->more = mmu_alloc_rmap_desc(vcpu);
493 494 495 496 497 498 499 500
			desc = desc->more;
		}
		for (i = 0; desc->shadow_ptes[i]; ++i)
			;
		desc->shadow_ptes[i] = spte;
	}
}

501
static void rmap_desc_remove_entry(unsigned long *rmapp,
502 503 504 505 506 507 508 509 510
				   struct kvm_rmap_desc *desc,
				   int i,
				   struct kvm_rmap_desc *prev_desc)
{
	int j;

	for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
		;
	desc->shadow_ptes[i] = desc->shadow_ptes[j];
A
Al Viro 已提交
511
	desc->shadow_ptes[j] = NULL;
512 513 514
	if (j != 0)
		return;
	if (!prev_desc && !desc->more)
515
		*rmapp = (unsigned long)desc->shadow_ptes[0];
516 517 518 519
	else
		if (prev_desc)
			prev_desc->more = desc->more;
		else
520
			*rmapp = (unsigned long)desc->more | 1;
521
	mmu_free_rmap_desc(desc);
522 523
}

524
static void rmap_remove(struct kvm *kvm, u64 *spte)
525 526 527
{
	struct kvm_rmap_desc *desc;
	struct kvm_rmap_desc *prev_desc;
528
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
529
	struct page *page;
530
	unsigned long *rmapp;
531 532 533 534
	int i;

	if (!is_rmap_pte(*spte))
		return;
535
	sp = page_header(__pa(spte));
A
Avi Kivity 已提交
536
	page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
537
	mark_page_accessed(page);
538
	if (is_writeble_pte(*spte))
A
Avi Kivity 已提交
539
		kvm_release_page_dirty(page);
540
	else
A
Avi Kivity 已提交
541
		kvm_release_page_clean(page);
M
Marcelo Tosatti 已提交
542
	rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
543
	if (!*rmapp) {
544 545
		printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
		BUG();
546
	} else if (!(*rmapp & 1)) {
547
		rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
548
		if ((u64 *)*rmapp != spte) {
549 550 551 552
			printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
			       spte, *spte);
			BUG();
		}
553
		*rmapp = 0;
554 555
	} else {
		rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
556
		desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
557 558 559 560
		prev_desc = NULL;
		while (desc) {
			for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
				if (desc->shadow_ptes[i] == spte) {
561
					rmap_desc_remove_entry(rmapp,
562
							       desc, i,
563 564 565 566 567 568 569 570 571 572
							       prev_desc);
					return;
				}
			prev_desc = desc;
			desc = desc->more;
		}
		BUG();
	}
}

573
static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
574 575
{
	struct kvm_rmap_desc *desc;
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
	struct kvm_rmap_desc *prev_desc;
	u64 *prev_spte;
	int i;

	if (!*rmapp)
		return NULL;
	else if (!(*rmapp & 1)) {
		if (!spte)
			return (u64 *)*rmapp;
		return NULL;
	}
	desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
	prev_desc = NULL;
	prev_spte = NULL;
	while (desc) {
		for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
			if (prev_spte == spte)
				return desc->shadow_ptes[i];
			prev_spte = desc->shadow_ptes[i];
		}
		desc = desc->more;
	}
	return NULL;
}

static void rmap_write_protect(struct kvm *kvm, u64 gfn)
{
603
	unsigned long *rmapp;
604
	u64 *spte;
605
	int write_protected = 0;
606

607
	gfn = unalias_gfn(kvm, gfn);
M
Marcelo Tosatti 已提交
608
	rmapp = gfn_to_rmap(kvm, gfn, 0);
609

610 611
	spte = rmap_next(kvm, rmapp, NULL);
	while (spte) {
612 613 614
		BUG_ON(!spte);
		BUG_ON(!(*spte & PT_PRESENT_MASK));
		rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
615
		if (is_writeble_pte(*spte)) {
616
			set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
617 618
			write_protected = 1;
		}
619
		spte = rmap_next(kvm, rmapp, spte);
620
	}
M
Marcelo Tosatti 已提交
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
	/* check for huge page mappings */
	rmapp = gfn_to_rmap(kvm, gfn, 1);
	spte = rmap_next(kvm, rmapp, NULL);
	while (spte) {
		BUG_ON(!spte);
		BUG_ON(!(*spte & PT_PRESENT_MASK));
		BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
		pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
		if (is_writeble_pte(*spte)) {
			rmap_remove(kvm, spte);
			--kvm->stat.lpages;
			set_shadow_pte(spte, shadow_trap_nonpresent_pte);
			write_protected = 1;
		}
		spte = rmap_next(kvm, rmapp, spte);
	}

638 639
	if (write_protected)
		kvm_flush_remote_tlbs(kvm);
M
Marcelo Tosatti 已提交
640 641

	account_shadowed(kvm, gfn);
642 643
}

644
#ifdef MMU_DEBUG
645
static int is_empty_shadow_page(u64 *spt)
A
Avi Kivity 已提交
646
{
647 648 649
	u64 *pos;
	u64 *end;

650
	for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
651
		if (*pos != shadow_trap_nonpresent_pte) {
652 653
			printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
			       pos, *pos);
A
Avi Kivity 已提交
654
			return 0;
655
		}
A
Avi Kivity 已提交
656 657
	return 1;
}
658
#endif
A
Avi Kivity 已提交
659

660
static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
661
{
662 663 664 665 666
	ASSERT(is_empty_shadow_page(sp->spt));
	list_del(&sp->link);
	__free_page(virt_to_page(sp->spt));
	__free_page(virt_to_page(sp->gfns));
	kfree(sp);
667
	++kvm->arch.n_free_mmu_pages;
668 669
}

670 671
static unsigned kvm_page_table_hashfn(gfn_t gfn)
{
672
	return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
673 674
}

675 676
static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
					       u64 *parent_pte)
A
Avi Kivity 已提交
677
{
678
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
679

680 681 682
	sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
	sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
	sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
683
	set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
684
	list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
685 686 687 688
	ASSERT(is_empty_shadow_page(sp->spt));
	sp->slot_bitmap = 0;
	sp->multimapped = 0;
	sp->parent_pte = parent_pte;
689
	--vcpu->kvm->arch.n_free_mmu_pages;
690
	return sp;
A
Avi Kivity 已提交
691 692
}

693
static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
694
				    struct kvm_mmu_page *sp, u64 *parent_pte)
695 696 697 698 699 700 701
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	int i;

	if (!parent_pte)
		return;
702 703
	if (!sp->multimapped) {
		u64 *old = sp->parent_pte;
704 705

		if (!old) {
706
			sp->parent_pte = parent_pte;
707 708
			return;
		}
709
		sp->multimapped = 1;
710
		pte_chain = mmu_alloc_pte_chain(vcpu);
711 712
		INIT_HLIST_HEAD(&sp->parent_ptes);
		hlist_add_head(&pte_chain->link, &sp->parent_ptes);
713 714
		pte_chain->parent_ptes[0] = old;
	}
715
	hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
716 717 718 719 720 721 722 723
		if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
			continue;
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
			if (!pte_chain->parent_ptes[i]) {
				pte_chain->parent_ptes[i] = parent_pte;
				return;
			}
	}
724
	pte_chain = mmu_alloc_pte_chain(vcpu);
725
	BUG_ON(!pte_chain);
726
	hlist_add_head(&pte_chain->link, &sp->parent_ptes);
727 728 729
	pte_chain->parent_ptes[0] = parent_pte;
}

730
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
731 732 733 734 735 736
				       u64 *parent_pte)
{
	struct kvm_pte_chain *pte_chain;
	struct hlist_node *node;
	int i;

737 738 739
	if (!sp->multimapped) {
		BUG_ON(sp->parent_pte != parent_pte);
		sp->parent_pte = NULL;
740 741
		return;
	}
742
	hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
743 744 745 746 747
		for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
			if (!pte_chain->parent_ptes[i])
				break;
			if (pte_chain->parent_ptes[i] != parent_pte)
				continue;
748 749
			while (i + 1 < NR_PTE_CHAIN_ENTRIES
				&& pte_chain->parent_ptes[i + 1]) {
750 751 752 753 754
				pte_chain->parent_ptes[i]
					= pte_chain->parent_ptes[i + 1];
				++i;
			}
			pte_chain->parent_ptes[i] = NULL;
755 756
			if (i == 0) {
				hlist_del(&pte_chain->link);
757
				mmu_free_pte_chain(pte_chain);
758 759 760
				if (hlist_empty(&sp->parent_ptes)) {
					sp->multimapped = 0;
					sp->parent_pte = NULL;
761 762
				}
			}
763 764 765 766 767
			return;
		}
	BUG();
}

768
static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
769 770 771
{
	unsigned index;
	struct hlist_head *bucket;
772
	struct kvm_mmu_page *sp;
773 774 775
	struct hlist_node *node;

	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
776
	index = kvm_page_table_hashfn(gfn);
777
	bucket = &kvm->arch.mmu_page_hash[index];
778
	hlist_for_each_entry(sp, node, bucket, hash_link)
779 780
		if (sp->gfn == gfn && !sp->role.metaphysical
		    && !sp->role.invalid) {
781
			pgprintk("%s: found role %x\n",
782 783
				 __FUNCTION__, sp->role.word);
			return sp;
784 785 786 787 788 789 790 791 792
		}
	return NULL;
}

static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
					     gfn_t gfn,
					     gva_t gaddr,
					     unsigned level,
					     int metaphysical,
793
					     unsigned access,
794
					     u64 *parent_pte)
795 796 797 798 799
{
	union kvm_mmu_page_role role;
	unsigned index;
	unsigned quadrant;
	struct hlist_head *bucket;
800
	struct kvm_mmu_page *sp;
801 802 803
	struct hlist_node *node;

	role.word = 0;
804
	role.glevels = vcpu->arch.mmu.root_level;
805 806
	role.level = level;
	role.metaphysical = metaphysical;
807
	role.access = access;
808
	if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
809 810 811 812 813 814
		quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
		role.quadrant = quadrant;
	}
	pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
		 gfn, role.word);
815
	index = kvm_page_table_hashfn(gfn);
816
	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
817 818 819
	hlist_for_each_entry(sp, node, bucket, hash_link)
		if (sp->gfn == gfn && sp->role.word == role.word) {
			mmu_page_add_parent_pte(vcpu, sp, parent_pte);
820
			pgprintk("%s: found\n", __FUNCTION__);
821
			return sp;
822
		}
A
Avi Kivity 已提交
823
	++vcpu->kvm->stat.mmu_cache_miss;
824 825 826
	sp = kvm_mmu_alloc_page(vcpu, parent_pte);
	if (!sp)
		return sp;
827
	pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
828 829 830
	sp->gfn = gfn;
	sp->role = role;
	hlist_add_head(&sp->hash_link, bucket);
831
	vcpu->arch.mmu.prefetch_page(vcpu, sp);
832
	if (!metaphysical)
833
		rmap_write_protect(vcpu->kvm, gfn);
834
	return sp;
835 836
}

837
static void kvm_mmu_page_unlink_children(struct kvm *kvm,
838
					 struct kvm_mmu_page *sp)
839
{
840 841 842 843
	unsigned i;
	u64 *pt;
	u64 ent;

844
	pt = sp->spt;
845

846
	if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
847
		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
848
			if (is_shadow_present_pte(pt[i]))
849
				rmap_remove(kvm, &pt[i]);
850
			pt[i] = shadow_trap_nonpresent_pte;
851
		}
852
		kvm_flush_remote_tlbs(kvm);
853 854 855 856 857 858
		return;
	}

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
		ent = pt[i];

M
Marcelo Tosatti 已提交
859 860 861 862 863 864 865 866 867 868
		if (is_shadow_present_pte(ent)) {
			if (!is_large_pte(ent)) {
				ent &= PT64_BASE_ADDR_MASK;
				mmu_page_remove_parent_pte(page_header(ent),
							   &pt[i]);
			} else {
				--kvm->stat.lpages;
				rmap_remove(kvm, &pt[i]);
			}
		}
869
		pt[i] = shadow_trap_nonpresent_pte;
870
	}
871
	kvm_flush_remote_tlbs(kvm);
872 873
}

874
static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
875
{
876
	mmu_page_remove_parent_pte(sp, parent_pte);
877 878
}

879 880 881 882 883 884
static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
{
	int i;

	for (i = 0; i < KVM_MAX_VCPUS; ++i)
		if (kvm->vcpus[i])
885
			kvm->vcpus[i]->arch.last_pte_updated = NULL;
886 887
}

888
static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
889 890 891
{
	u64 *parent_pte;

A
Avi Kivity 已提交
892
	++kvm->stat.mmu_shadow_zapped;
893 894 895
	while (sp->multimapped || sp->parent_pte) {
		if (!sp->multimapped)
			parent_pte = sp->parent_pte;
896 897 898
		else {
			struct kvm_pte_chain *chain;

899
			chain = container_of(sp->parent_ptes.first,
900 901 902
					     struct kvm_pte_chain, link);
			parent_pte = chain->parent_ptes[0];
		}
903
		BUG_ON(!parent_pte);
904
		kvm_mmu_put_page(sp, parent_pte);
905
		set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
906
	}
907 908
	kvm_mmu_page_unlink_children(kvm, sp);
	if (!sp->root_count) {
M
Marcelo Tosatti 已提交
909 910
		if (!sp->role.metaphysical)
			unaccount_shadowed(kvm, sp->gfn);
911 912
		hlist_del(&sp->hash_link);
		kvm_mmu_free_page(kvm, sp);
913
	} else {
914
		list_move(&sp->link, &kvm->arch.active_mmu_pages);
915 916 917
		sp->role.invalid = 1;
		kvm_reload_remote_mmus(kvm);
	}
918
	kvm_mmu_reset_last_pte_updated(kvm);
919 920
}

921 922 923 924 925 926 927 928 929 930 931 932
/*
 * Changing the number of mmu pages allocated to the vm
 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
 */
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
{
	/*
	 * If we set the number of mmu pages to be smaller be than the
	 * number of actived pages , we must to free some mmu pages before we
	 * change the value
	 */

933
	if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
934
	    kvm_nr_mmu_pages) {
935 936
		int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
				       - kvm->arch.n_free_mmu_pages;
937 938 939 940

		while (n_used_mmu_pages > kvm_nr_mmu_pages) {
			struct kvm_mmu_page *page;

941
			page = container_of(kvm->arch.active_mmu_pages.prev,
942 943 944 945
					    struct kvm_mmu_page, link);
			kvm_mmu_zap_page(kvm, page);
			n_used_mmu_pages--;
		}
946
		kvm->arch.n_free_mmu_pages = 0;
947 948
	}
	else
949 950
		kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
					 - kvm->arch.n_alloc_mmu_pages;
951

952
	kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
953 954
}

955
static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
956 957 958
{
	unsigned index;
	struct hlist_head *bucket;
959
	struct kvm_mmu_page *sp;
960 961 962 963 964
	struct hlist_node *node, *n;
	int r;

	pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
	r = 0;
965
	index = kvm_page_table_hashfn(gfn);
966
	bucket = &kvm->arch.mmu_page_hash[index];
967 968
	hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
		if (sp->gfn == gfn && !sp->role.metaphysical) {
969
			pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
970 971
				 sp->role.word);
			kvm_mmu_zap_page(kvm, sp);
972 973 974
			r = 1;
		}
	return r;
975 976
}

977
static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
978
{
979
	struct kvm_mmu_page *sp;
980

981 982 983
	while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
		pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word);
		kvm_mmu_zap_page(kvm, sp);
984 985 986
	}
}

987
static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
A
Avi Kivity 已提交
988
{
989
	int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
990
	struct kvm_mmu_page *sp = page_header(__pa(pte));
A
Avi Kivity 已提交
991

992
	__set_bit(slot, &sp->slot_bitmap);
A
Avi Kivity 已提交
993 994
}

995 996
struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
{
997 998
	struct page *page;

999
	gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1000 1001 1002

	if (gpa == UNMAPPED_GVA)
		return NULL;
1003 1004 1005 1006 1007 1008

	down_read(&current->mm->mmap_sem);
	page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
	up_read(&current->mm->mmap_sem);

	return page;
1009 1010
}

1011 1012 1013
static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
			 unsigned pt_access, unsigned pte_access,
			 int user_fault, int write_fault, int dirty,
M
Marcelo Tosatti 已提交
1014 1015
			 int *ptwrite, int largepage, gfn_t gfn,
			 struct page *page)
1016 1017
{
	u64 spte;
1018
	int was_rmapped = 0;
1019
	int was_writeble = is_writeble_pte(*shadow_pte);
1020
	hfn_t host_pfn = (*shadow_pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1021

1022
	pgprintk("%s: spte %llx access %x write_fault %d"
1023
		 " user_fault %d gfn %lx\n",
1024
		 __FUNCTION__, *shadow_pte, pt_access,
1025 1026
		 write_fault, user_fault, gfn);

1027
	if (is_rmap_pte(*shadow_pte)) {
M
Marcelo Tosatti 已提交
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
		/*
		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
		 * the parent of the now unreachable PTE.
		 */
		if (largepage && !is_large_pte(*shadow_pte)) {
			struct kvm_mmu_page *child;
			u64 pte = *shadow_pte;

			child = page_header(pte & PT64_BASE_ADDR_MASK);
			mmu_page_remove_parent_pte(child, shadow_pte);
		} else if (host_pfn != page_to_pfn(page)) {
1039 1040 1041
			pgprintk("hfn old %lx new %lx\n",
				 host_pfn, page_to_pfn(page));
			rmap_remove(vcpu->kvm, shadow_pte);
M
Marcelo Tosatti 已提交
1042 1043 1044 1045 1046
		} else {
			if (largepage)
				was_rmapped = is_large_pte(*shadow_pte);
			else
				was_rmapped = 1;
1047 1048 1049
		}
	}

M
Marcelo Tosatti 已提交
1050

1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
	/*
	 * We don't set the accessed bit, since we sometimes want to see
	 * whether the guest actually used the pte (in order to detect
	 * demand paging).
	 */
	spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
	if (!dirty)
		pte_access &= ~ACC_WRITE_MASK;
	if (!(pte_access & ACC_EXEC_MASK))
		spte |= PT64_NX_MASK;

	spte |= PT_PRESENT_MASK;
	if (pte_access & ACC_USER_MASK)
		spte |= PT_USER_MASK;
M
Marcelo Tosatti 已提交
1065 1066
	if (largepage)
		spte |= PT_PAGE_SIZE_MASK;
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080

	spte |= page_to_phys(page);

	if ((pte_access & ACC_WRITE_MASK)
	    || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
		struct kvm_mmu_page *shadow;

		spte |= PT_WRITABLE_MASK;
		if (user_fault) {
			mmu_unshadow(vcpu->kvm, gfn);
			goto unshadowed;
		}

		shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
M
Marcelo Tosatti 已提交
1081 1082
		if (shadow ||
		   (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
			pgprintk("%s: found shadow page for %lx, marking ro\n",
				 __FUNCTION__, gfn);
			pte_access &= ~ACC_WRITE_MASK;
			if (is_writeble_pte(spte)) {
				spte &= ~PT_WRITABLE_MASK;
				kvm_x86_ops->tlb_flush(vcpu);
			}
			if (write_fault)
				*ptwrite = 1;
		}
	}

unshadowed:

	if (pte_access & ACC_WRITE_MASK)
		mark_page_dirty(vcpu->kvm, gfn);

	pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
M
Marcelo Tosatti 已提交
1101 1102 1103
	pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n",
		 (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
		 (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
1104
	set_shadow_pte(shadow_pte, spte);
M
Marcelo Tosatti 已提交
1105 1106 1107 1108
	if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK)
	    && (spte & PT_PRESENT_MASK))
		++vcpu->kvm->stat.lpages;

1109 1110
	page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
	if (!was_rmapped) {
M
Marcelo Tosatti 已提交
1111
		rmap_add(vcpu, shadow_pte, gfn, largepage);
1112 1113
		if (!is_rmap_pte(*shadow_pte))
			kvm_release_page_clean(page);
1114 1115 1116 1117 1118
	} else {
		if (was_writeble)
			kvm_release_page_dirty(page);
		else
			kvm_release_page_clean(page);
1119 1120
	}
	if (!ptwrite || !*ptwrite)
1121
		vcpu->arch.last_pte_updated = shadow_pte;
1122 1123
}

A
Avi Kivity 已提交
1124 1125 1126 1127
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
{
}

1128
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
M
Marcelo Tosatti 已提交
1129 1130
			   int largepage, gfn_t gfn, struct page *page,
			   int level)
A
Avi Kivity 已提交
1131
{
1132
	hpa_t table_addr = vcpu->arch.mmu.root_hpa;
1133
	int pt_write = 0;
A
Avi Kivity 已提交
1134 1135 1136 1137 1138 1139 1140 1141 1142

	for (; ; level--) {
		u32 index = PT64_INDEX(v, level);
		u64 *table;

		ASSERT(VALID_PAGE(table_addr));
		table = __va(table_addr);

		if (level == 1) {
1143
			mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
M
Marcelo Tosatti 已提交
1144 1145 1146 1147 1148 1149 1150
				     0, write, 1, &pt_write, 0, gfn, page);
			return pt_write;
		}

		if (largepage && level == 2) {
			mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
				    0, write, 1, &pt_write, 1, gfn, page);
1151
			return pt_write;
A
Avi Kivity 已提交
1152 1153
		}

1154
		if (table[index] == shadow_trap_nonpresent_pte) {
1155
			struct kvm_mmu_page *new_table;
1156
			gfn_t pseudo_gfn;
A
Avi Kivity 已提交
1157

1158 1159 1160 1161
			pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
				>> PAGE_SHIFT;
			new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
						     v, level - 1,
1162
						     1, ACC_ALL, &table[index]);
1163
			if (!new_table) {
A
Avi Kivity 已提交
1164
				pgprintk("nonpaging_map: ENOMEM\n");
1165
				kvm_release_page_clean(page);
A
Avi Kivity 已提交
1166 1167 1168
				return -ENOMEM;
			}

1169
			table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
1170
				| PT_WRITABLE_MASK | PT_USER_MASK;
A
Avi Kivity 已提交
1171 1172 1173 1174 1175
		}
		table_addr = table[index] & PT64_BASE_ADDR_MASK;
	}
}

1176 1177 1178
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
{
	int r;
M
Marcelo Tosatti 已提交
1179
	int largepage = 0;
1180

1181 1182
	struct page *page;

1183 1184
	down_read(&vcpu->kvm->slots_lock);

1185
	down_read(&current->mm->mmap_sem);
M
Marcelo Tosatti 已提交
1186 1187 1188 1189 1190
	if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
		gfn &= ~(KVM_PAGES_PER_HPAGE-1);
		largepage = 1;
	}

1191
	page = gfn_to_page(vcpu->kvm, gfn);
1192
	up_read(&current->mm->mmap_sem);
1193

1194 1195 1196 1197 1198 1199 1200
	/* mmio */
	if (is_error_page(page)) {
		kvm_release_page_clean(page);
		up_read(&vcpu->kvm->slots_lock);
		return 1;
	}

1201
	spin_lock(&vcpu->kvm->mmu_lock);
1202
	kvm_mmu_free_some_pages(vcpu);
M
Marcelo Tosatti 已提交
1203 1204
	r = __direct_map(vcpu, v, write, largepage, gfn, page,
			 PT32E_ROOT_LEVEL);
1205 1206
	spin_unlock(&vcpu->kvm->mmu_lock);

1207
	up_read(&vcpu->kvm->slots_lock);
1208

1209 1210 1211 1212
	return r;
}


1213 1214 1215 1216 1217 1218 1219 1220 1221
static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
				    struct kvm_mmu_page *sp)
{
	int i;

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
		sp->spt[i] = shadow_trap_nonpresent_pte;
}

1222 1223 1224
static void mmu_free_roots(struct kvm_vcpu *vcpu)
{
	int i;
1225
	struct kvm_mmu_page *sp;
1226

1227
	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
A
Avi Kivity 已提交
1228
		return;
1229
	spin_lock(&vcpu->kvm->mmu_lock);
1230
#ifdef CONFIG_X86_64
1231 1232
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->arch.mmu.root_hpa;
1233

1234 1235
		sp = page_header(root);
		--sp->root_count;
1236 1237
		if (!sp->root_count && sp->role.invalid)
			kvm_mmu_zap_page(vcpu->kvm, sp);
1238
		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1239
		spin_unlock(&vcpu->kvm->mmu_lock);
1240 1241 1242 1243
		return;
	}
#endif
	for (i = 0; i < 4; ++i) {
1244
		hpa_t root = vcpu->arch.mmu.pae_root[i];
1245

A
Avi Kivity 已提交
1246 1247
		if (root) {
			root &= PT64_BASE_ADDR_MASK;
1248 1249
			sp = page_header(root);
			--sp->root_count;
1250 1251
			if (!sp->root_count && sp->role.invalid)
				kvm_mmu_zap_page(vcpu->kvm, sp);
A
Avi Kivity 已提交
1252
		}
1253
		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1254
	}
1255
	spin_unlock(&vcpu->kvm->mmu_lock);
1256
	vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1257 1258 1259 1260 1261
}

static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
{
	int i;
1262
	gfn_t root_gfn;
1263
	struct kvm_mmu_page *sp;
1264
	int metaphysical = 0;
1265

1266
	root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1267 1268

#ifdef CONFIG_X86_64
1269 1270
	if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
		hpa_t root = vcpu->arch.mmu.root_hpa;
1271 1272

		ASSERT(!VALID_PAGE(root));
1273 1274
		if (tdp_enabled)
			metaphysical = 1;
1275
		sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1276 1277
				      PT64_ROOT_LEVEL, metaphysical,
				      ACC_ALL, NULL);
1278 1279
		root = __pa(sp->spt);
		++sp->root_count;
1280
		vcpu->arch.mmu.root_hpa = root;
1281 1282 1283
		return;
	}
#endif
1284 1285 1286
	metaphysical = !is_paging(vcpu);
	if (tdp_enabled)
		metaphysical = 1;
1287
	for (i = 0; i < 4; ++i) {
1288
		hpa_t root = vcpu->arch.mmu.pae_root[i];
1289 1290

		ASSERT(!VALID_PAGE(root));
1291 1292 1293
		if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
			if (!is_present_pte(vcpu->arch.pdptrs[i])) {
				vcpu->arch.mmu.pae_root[i] = 0;
A
Avi Kivity 已提交
1294 1295
				continue;
			}
1296 1297
			root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
		} else if (vcpu->arch.mmu.root_level == 0)
1298
			root_gfn = 0;
1299
		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1300
				      PT32_ROOT_LEVEL, metaphysical,
1301
				      ACC_ALL, NULL);
1302 1303
		root = __pa(sp->spt);
		++sp->root_count;
1304
		vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
1305
	}
1306
	vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
1307 1308
}

A
Avi Kivity 已提交
1309 1310 1311 1312 1313 1314
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
{
	return vaddr;
}

static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
A
Avi Kivity 已提交
1315
				u32 error_code)
A
Avi Kivity 已提交
1316
{
1317
	gfn_t gfn;
1318
	int r;
A
Avi Kivity 已提交
1319

1320
	pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code);
1321 1322 1323
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;
1324

A
Avi Kivity 已提交
1325
	ASSERT(vcpu);
1326
	ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
1327

1328
	gfn = gva >> PAGE_SHIFT;
A
Avi Kivity 已提交
1329

1330 1331
	return nonpaging_map(vcpu, gva & PAGE_MASK,
			     error_code & PFERR_WRITE_MASK, gfn);
A
Avi Kivity 已提交
1332 1333
}

1334 1335 1336 1337 1338
static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
				u32 error_code)
{
	struct page *page;
	int r;
M
Marcelo Tosatti 已提交
1339 1340
	int largepage = 0;
	gfn_t gfn = gpa >> PAGE_SHIFT;
1341 1342 1343 1344 1345 1346 1347 1348 1349

	ASSERT(vcpu);
	ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));

	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;

	down_read(&current->mm->mmap_sem);
M
Marcelo Tosatti 已提交
1350 1351 1352 1353 1354
	if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
		gfn &= ~(KVM_PAGES_PER_HPAGE-1);
		largepage = 1;
	}
	page = gfn_to_page(vcpu->kvm, gfn);
1355 1356 1357 1358 1359 1360 1361 1362
	if (is_error_page(page)) {
		kvm_release_page_clean(page);
		up_read(&current->mm->mmap_sem);
		return 1;
	}
	spin_lock(&vcpu->kvm->mmu_lock);
	kvm_mmu_free_some_pages(vcpu);
	r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
M
Marcelo Tosatti 已提交
1363
			 largepage, gfn, page, TDP_ROOT_LEVEL);
1364 1365 1366 1367 1368 1369
	spin_unlock(&vcpu->kvm->mmu_lock);
	up_read(&current->mm->mmap_sem);

	return r;
}

A
Avi Kivity 已提交
1370 1371
static void nonpaging_free(struct kvm_vcpu *vcpu)
{
1372
	mmu_free_roots(vcpu);
A
Avi Kivity 已提交
1373 1374 1375 1376
}

static int nonpaging_init_context(struct kvm_vcpu *vcpu)
{
1377
	struct kvm_mmu *context = &vcpu->arch.mmu;
A
Avi Kivity 已提交
1378 1379 1380 1381 1382

	context->new_cr3 = nonpaging_new_cr3;
	context->page_fault = nonpaging_page_fault;
	context->gva_to_gpa = nonpaging_gva_to_gpa;
	context->free = nonpaging_free;
1383
	context->prefetch_page = nonpaging_prefetch_page;
1384
	context->root_level = 0;
A
Avi Kivity 已提交
1385
	context->shadow_root_level = PT32E_ROOT_LEVEL;
A
Avi Kivity 已提交
1386
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
1387 1388 1389
	return 0;
}

1390
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1391
{
A
Avi Kivity 已提交
1392
	++vcpu->stat.tlb_flush;
1393
	kvm_x86_ops->tlb_flush(vcpu);
A
Avi Kivity 已提交
1394 1395 1396 1397
}

static void paging_new_cr3(struct kvm_vcpu *vcpu)
{
1398
	pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3);
1399
	mmu_free_roots(vcpu);
A
Avi Kivity 已提交
1400 1401 1402 1403 1404 1405
}

static void inject_page_fault(struct kvm_vcpu *vcpu,
			      u64 addr,
			      u32 err_code)
{
1406
	kvm_inject_page_fault(vcpu, addr, err_code);
A
Avi Kivity 已提交
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
}

static void paging_free(struct kvm_vcpu *vcpu)
{
	nonpaging_free(vcpu);
}

#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE

#define PTTYPE 32
#include "paging_tmpl.h"
#undef PTTYPE

1422
static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
A
Avi Kivity 已提交
1423
{
1424
	struct kvm_mmu *context = &vcpu->arch.mmu;
A
Avi Kivity 已提交
1425 1426 1427 1428 1429

	ASSERT(is_pae(vcpu));
	context->new_cr3 = paging_new_cr3;
	context->page_fault = paging64_page_fault;
	context->gva_to_gpa = paging64_gva_to_gpa;
1430
	context->prefetch_page = paging64_prefetch_page;
A
Avi Kivity 已提交
1431
	context->free = paging_free;
1432 1433
	context->root_level = level;
	context->shadow_root_level = level;
A
Avi Kivity 已提交
1434
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
1435 1436 1437
	return 0;
}

1438 1439 1440 1441 1442
static int paging64_init_context(struct kvm_vcpu *vcpu)
{
	return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
}

A
Avi Kivity 已提交
1443 1444
static int paging32_init_context(struct kvm_vcpu *vcpu)
{
1445
	struct kvm_mmu *context = &vcpu->arch.mmu;
A
Avi Kivity 已提交
1446 1447 1448 1449 1450

	context->new_cr3 = paging_new_cr3;
	context->page_fault = paging32_page_fault;
	context->gva_to_gpa = paging32_gva_to_gpa;
	context->free = paging_free;
1451
	context->prefetch_page = paging32_prefetch_page;
A
Avi Kivity 已提交
1452 1453
	context->root_level = PT32_ROOT_LEVEL;
	context->shadow_root_level = PT32E_ROOT_LEVEL;
A
Avi Kivity 已提交
1454
	context->root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
1455 1456 1457 1458 1459
	return 0;
}

static int paging32E_init_context(struct kvm_vcpu *vcpu)
{
1460
	return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
A
Avi Kivity 已提交
1461 1462
}

1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *context = &vcpu->arch.mmu;

	context->new_cr3 = nonpaging_new_cr3;
	context->page_fault = tdp_page_fault;
	context->free = nonpaging_free;
	context->prefetch_page = nonpaging_prefetch_page;
	context->shadow_root_level = TDP_ROOT_LEVEL;
	context->root_hpa = INVALID_PAGE;

	if (!is_paging(vcpu)) {
		context->gva_to_gpa = nonpaging_gva_to_gpa;
		context->root_level = 0;
	} else if (is_long_mode(vcpu)) {
		context->gva_to_gpa = paging64_gva_to_gpa;
		context->root_level = PT64_ROOT_LEVEL;
	} else if (is_pae(vcpu)) {
		context->gva_to_gpa = paging64_gva_to_gpa;
		context->root_level = PT32E_ROOT_LEVEL;
	} else {
		context->gva_to_gpa = paging32_gva_to_gpa;
		context->root_level = PT32_ROOT_LEVEL;
	}

	return 0;
}

static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1492 1493
{
	ASSERT(vcpu);
1494
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
1495 1496 1497

	if (!is_paging(vcpu))
		return nonpaging_init_context(vcpu);
A
Avi Kivity 已提交
1498
	else if (is_long_mode(vcpu))
A
Avi Kivity 已提交
1499 1500 1501 1502 1503 1504 1505
		return paging64_init_context(vcpu);
	else if (is_pae(vcpu))
		return paging32E_init_context(vcpu);
	else
		return paging32_init_context(vcpu);
}

1506 1507 1508 1509 1510 1511 1512 1513
static int init_kvm_mmu(struct kvm_vcpu *vcpu)
{
	if (tdp_enabled)
		return init_kvm_tdp_mmu(vcpu);
	else
		return init_kvm_softmmu(vcpu);
}

A
Avi Kivity 已提交
1514 1515 1516
static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
1517 1518 1519
	if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
		vcpu->arch.mmu.free(vcpu);
		vcpu->arch.mmu.root_hpa = INVALID_PAGE;
A
Avi Kivity 已提交
1520 1521 1522 1523
	}
}

int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1524 1525 1526 1527
{
	destroy_kvm_mmu(vcpu);
	return init_kvm_mmu(vcpu);
}
1528
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
A
Avi Kivity 已提交
1529 1530

int kvm_mmu_load(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1531
{
1532 1533
	int r;

1534
	r = mmu_topup_memory_caches(vcpu);
A
Avi Kivity 已提交
1535 1536
	if (r)
		goto out;
1537
	spin_lock(&vcpu->kvm->mmu_lock);
1538
	kvm_mmu_free_some_pages(vcpu);
A
Avi Kivity 已提交
1539
	mmu_alloc_roots(vcpu);
1540
	spin_unlock(&vcpu->kvm->mmu_lock);
1541
	kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
A
Avi Kivity 已提交
1542
	kvm_mmu_flush_tlb(vcpu);
1543 1544
out:
	return r;
A
Avi Kivity 已提交
1545
}
A
Avi Kivity 已提交
1546 1547 1548 1549 1550 1551
EXPORT_SYMBOL_GPL(kvm_mmu_load);

void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
	mmu_free_roots(vcpu);
}
A
Avi Kivity 已提交
1552

1553
static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1554
				  struct kvm_mmu_page *sp,
1555 1556 1557 1558 1559 1560
				  u64 *spte)
{
	u64 pte;
	struct kvm_mmu_page *child;

	pte = *spte;
1561
	if (is_shadow_present_pte(pte)) {
M
Marcelo Tosatti 已提交
1562 1563
		if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
		    is_large_pte(pte))
1564
			rmap_remove(vcpu->kvm, spte);
1565 1566
		else {
			child = page_header(pte & PT64_BASE_ADDR_MASK);
1567
			mmu_page_remove_parent_pte(child, spte);
1568 1569
		}
	}
1570
	set_shadow_pte(spte, shadow_trap_nonpresent_pte);
M
Marcelo Tosatti 已提交
1571 1572
	if (is_large_pte(pte))
		--vcpu->kvm->stat.lpages;
1573 1574
}

1575
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1576
				  struct kvm_mmu_page *sp,
1577
				  u64 *spte,
1578
				  const void *new)
1579
{
M
Marcelo Tosatti 已提交
1580 1581
	if ((sp->role.level != PT_PAGE_TABLE_LEVEL)
	    && !vcpu->arch.update_pte.largepage) {
A
Avi Kivity 已提交
1582
		++vcpu->kvm->stat.mmu_pde_zapped;
1583
		return;
A
Avi Kivity 已提交
1584
	}
1585

A
Avi Kivity 已提交
1586
	++vcpu->kvm->stat.mmu_pte_updated;
1587
	if (sp->role.glevels == PT32_ROOT_LEVEL)
1588
		paging32_update_pte(vcpu, sp, spte, new);
1589
	else
1590
		paging64_update_pte(vcpu, sp, spte, new);
1591 1592
}

1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
static bool need_remote_flush(u64 old, u64 new)
{
	if (!is_shadow_present_pte(old))
		return false;
	if (!is_shadow_present_pte(new))
		return true;
	if ((old ^ new) & PT64_BASE_ADDR_MASK)
		return true;
	old ^= PT64_NX_MASK;
	new ^= PT64_NX_MASK;
	return (old & ~new & PT64_PERM_MASK) != 0;
}

static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
{
	if (need_remote_flush(old, new))
		kvm_flush_remote_tlbs(vcpu->kvm);
	else
		kvm_mmu_flush_tlb(vcpu);
}

1614 1615
static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
{
1616
	u64 *spte = vcpu->arch.last_pte_updated;
1617 1618 1619 1620

	return !!(spte && (*spte & PT_ACCESSED_MASK));
}

1621 1622 1623 1624 1625 1626
static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
					  const u8 *new, int bytes)
{
	gfn_t gfn;
	int r;
	u64 gpte = 0;
1627
	struct page *page;
1628

M
Marcelo Tosatti 已提交
1629 1630
	vcpu->arch.update_pte.largepage = 0;

1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656
	if (bytes != 4 && bytes != 8)
		return;

	/*
	 * Assume that the pte write on a page table of the same type
	 * as the current vcpu paging mode.  This is nearly always true
	 * (might be false while changing modes).  Note it is verified later
	 * by update_pte().
	 */
	if (is_pae(vcpu)) {
		/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
		if ((bytes == 4) && (gpa % 4 == 0)) {
			r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
			if (r)
				return;
			memcpy((void *)&gpte + (gpa % 8), new, 4);
		} else if ((bytes == 8) && (gpa % 8 == 0)) {
			memcpy((void *)&gpte, new, 8);
		}
	} else {
		if ((bytes == 4) && (gpa % 4 == 0))
			memcpy((void *)&gpte, new, 4);
	}
	if (!is_present_pte(gpte))
		return;
	gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1657

M
Marcelo Tosatti 已提交
1658 1659 1660 1661 1662
	down_read(&current->mm->mmap_sem);
	if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
		gfn &= ~(KVM_PAGES_PER_HPAGE-1);
		vcpu->arch.update_pte.largepage = 1;
	}
1663
	page = gfn_to_page(vcpu->kvm, gfn);
M
Marcelo Tosatti 已提交
1664
	up_read(&current->mm->mmap_sem);
1665

1666 1667 1668 1669
	if (is_error_page(page)) {
		kvm_release_page_clean(page);
		return;
	}
1670
	vcpu->arch.update_pte.gfn = gfn;
1671
	vcpu->arch.update_pte.page = page;
1672 1673
}

1674
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1675
		       const u8 *new, int bytes)
1676
{
1677
	gfn_t gfn = gpa >> PAGE_SHIFT;
1678
	struct kvm_mmu_page *sp;
1679
	struct hlist_node *node, *n;
1680 1681
	struct hlist_head *bucket;
	unsigned index;
1682
	u64 entry, gentry;
1683 1684
	u64 *spte;
	unsigned offset = offset_in_page(gpa);
1685
	unsigned pte_size;
1686
	unsigned page_offset;
1687
	unsigned misaligned;
1688
	unsigned quadrant;
1689
	int level;
1690
	int flooded = 0;
1691
	int npte;
1692
	int r;
1693

1694
	pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1695
	mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
1696
	spin_lock(&vcpu->kvm->mmu_lock);
1697
	kvm_mmu_free_some_pages(vcpu);
A
Avi Kivity 已提交
1698
	++vcpu->kvm->stat.mmu_pte_write;
1699
	kvm_mmu_audit(vcpu, "pre pte write");
1700
	if (gfn == vcpu->arch.last_pt_write_gfn
1701
	    && !last_updated_pte_accessed(vcpu)) {
1702 1703
		++vcpu->arch.last_pt_write_count;
		if (vcpu->arch.last_pt_write_count >= 3)
1704 1705
			flooded = 1;
	} else {
1706 1707 1708
		vcpu->arch.last_pt_write_gfn = gfn;
		vcpu->arch.last_pt_write_count = 1;
		vcpu->arch.last_pte_updated = NULL;
1709
	}
1710
	index = kvm_page_table_hashfn(gfn);
1711
	bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1712 1713
	hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
		if (sp->gfn != gfn || sp->role.metaphysical)
1714
			continue;
1715
		pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1716
		misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1717
		misaligned |= bytes < 4;
1718
		if (misaligned || flooded) {
1719 1720 1721 1722
			/*
			 * Misaligned accesses are too much trouble to fix
			 * up; also, they usually indicate a page is not used
			 * as a page table.
1723 1724 1725 1726 1727
			 *
			 * If we're seeing too many writes to a page,
			 * it may no longer be a page table, or we may be
			 * forking, in which case it is better to unmap the
			 * page.
1728 1729
			 */
			pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1730 1731
				 gpa, bytes, sp->role.word);
			kvm_mmu_zap_page(vcpu->kvm, sp);
A
Avi Kivity 已提交
1732
			++vcpu->kvm->stat.mmu_flooded;
1733 1734
			continue;
		}
1735
		page_offset = offset;
1736
		level = sp->role.level;
1737
		npte = 1;
1738
		if (sp->role.glevels == PT32_ROOT_LEVEL) {
1739 1740 1741 1742 1743 1744 1745
			page_offset <<= 1;	/* 32->64 */
			/*
			 * A 32-bit pde maps 4MB while the shadow pdes map
			 * only 2MB.  So we need to double the offset again
			 * and zap two pdes instead of one.
			 */
			if (level == PT32_ROOT_LEVEL) {
1746
				page_offset &= ~7; /* kill rounding error */
1747 1748 1749
				page_offset <<= 1;
				npte = 2;
			}
1750
			quadrant = page_offset >> PAGE_SHIFT;
1751
			page_offset &= ~PAGE_MASK;
1752
			if (quadrant != sp->role.quadrant)
1753
				continue;
1754
		}
1755
		spte = &sp->spt[page_offset / sizeof(*spte)];
1756 1757 1758 1759 1760 1761 1762 1763 1764
		if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
			gentry = 0;
			r = kvm_read_guest_atomic(vcpu->kvm,
						  gpa & ~(u64)(pte_size - 1),
						  &gentry, pte_size);
			new = (const void *)&gentry;
			if (r < 0)
				new = NULL;
		}
1765
		while (npte--) {
1766
			entry = *spte;
1767
			mmu_pte_write_zap_pte(vcpu, sp, spte);
1768 1769
			if (new)
				mmu_pte_write_new_pte(vcpu, sp, spte, new);
1770
			mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1771
			++spte;
1772 1773
		}
	}
1774
	kvm_mmu_audit(vcpu, "post pte write");
1775
	spin_unlock(&vcpu->kvm->mmu_lock);
1776 1777 1778 1779
	if (vcpu->arch.update_pte.page) {
		kvm_release_page_clean(vcpu->arch.update_pte.page);
		vcpu->arch.update_pte.page = NULL;
	}
1780 1781
}

1782 1783
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{
1784 1785
	gpa_t gpa;
	int r;
1786

1787
	down_read(&vcpu->kvm->slots_lock);
1788
	gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1789
	up_read(&vcpu->kvm->slots_lock);
1790

1791
	spin_lock(&vcpu->kvm->mmu_lock);
1792
	r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1793
	spin_unlock(&vcpu->kvm->mmu_lock);
1794
	return r;
1795 1796
}

1797
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1798
{
1799
	while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
1800
		struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
1801

1802
		sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
1803 1804
				  struct kvm_mmu_page, link);
		kvm_mmu_zap_page(vcpu->kvm, sp);
A
Avi Kivity 已提交
1805
		++vcpu->kvm->stat.mmu_recycled;
A
Avi Kivity 已提交
1806 1807 1808
	}
}

1809 1810 1811 1812 1813
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
{
	int r;
	enum emulation_result er;

1814
	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
1815 1816 1817 1818 1819 1820 1821 1822
	if (r < 0)
		goto out;

	if (!r) {
		r = 1;
		goto out;
	}

1823 1824 1825 1826
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		goto out;

1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845
	er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);

	switch (er) {
	case EMULATE_DONE:
		return 1;
	case EMULATE_DO_MMIO:
		++vcpu->stat.mmio_exits;
		return 0;
	case EMULATE_FAIL:
		kvm_report_emulation_failure(vcpu, "pagetable");
		return 1;
	default:
		BUG();
	}
out:
	return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);

1846 1847 1848 1849 1850 1851
void kvm_enable_tdp(void)
{
	tdp_enabled = true;
}
EXPORT_SYMBOL_GPL(kvm_enable_tdp);

A
Avi Kivity 已提交
1852 1853
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
1854
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
1855

1856 1857
	while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
		sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
1858 1859
				  struct kvm_mmu_page, link);
		kvm_mmu_zap_page(vcpu->kvm, sp);
1860
	}
1861
	free_page((unsigned long)vcpu->arch.mmu.pae_root);
A
Avi Kivity 已提交
1862 1863 1864 1865
}

static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
{
1866
	struct page *page;
A
Avi Kivity 已提交
1867 1868 1869 1870
	int i;

	ASSERT(vcpu);

1871 1872 1873
	if (vcpu->kvm->arch.n_requested_mmu_pages)
		vcpu->kvm->arch.n_free_mmu_pages =
					vcpu->kvm->arch.n_requested_mmu_pages;
1874
	else
1875 1876
		vcpu->kvm->arch.n_free_mmu_pages =
					vcpu->kvm->arch.n_alloc_mmu_pages;
1877 1878 1879 1880 1881 1882 1883 1884
	/*
	 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
	 * Therefore we need to allocate shadow page tables in the first
	 * 4GB of memory, which happens to fit the DMA32 zone.
	 */
	page = alloc_page(GFP_KERNEL | __GFP_DMA32);
	if (!page)
		goto error_1;
1885
	vcpu->arch.mmu.pae_root = page_address(page);
1886
	for (i = 0; i < 4; ++i)
1887
		vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1888

A
Avi Kivity 已提交
1889 1890 1891 1892 1893 1894 1895
	return 0;

error_1:
	free_mmu_pages(vcpu);
	return -ENOMEM;
}

1896
int kvm_mmu_create(struct kvm_vcpu *vcpu)
A
Avi Kivity 已提交
1897 1898
{
	ASSERT(vcpu);
1899
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
A
Avi Kivity 已提交
1900

1901 1902
	return alloc_mmu_pages(vcpu);
}
A
Avi Kivity 已提交
1903

1904 1905 1906
int kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);
1907
	ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1908

1909
	return init_kvm_mmu(vcpu);
A
Avi Kivity 已提交
1910 1911 1912 1913 1914 1915 1916 1917
}

void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
	ASSERT(vcpu);

	destroy_kvm_mmu(vcpu);
	free_mmu_pages(vcpu);
1918
	mmu_free_memory_caches(vcpu);
A
Avi Kivity 已提交
1919 1920
}

1921
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
A
Avi Kivity 已提交
1922
{
1923
	struct kvm_mmu_page *sp;
A
Avi Kivity 已提交
1924

1925
	list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
A
Avi Kivity 已提交
1926 1927 1928
		int i;
		u64 *pt;

1929
		if (!test_bit(slot, &sp->slot_bitmap))
A
Avi Kivity 已提交
1930 1931
			continue;

1932
		pt = sp->spt;
A
Avi Kivity 已提交
1933 1934
		for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
			/* avoid RMW */
1935
			if (pt[i] & PT_WRITABLE_MASK)
A
Avi Kivity 已提交
1936 1937 1938
				pt[i] &= ~PT_WRITABLE_MASK;
	}
}
1939

1940
void kvm_mmu_zap_all(struct kvm *kvm)
D
Dor Laor 已提交
1941
{
1942
	struct kvm_mmu_page *sp, *node;
D
Dor Laor 已提交
1943

1944
	spin_lock(&kvm->mmu_lock);
1945
	list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
1946
		kvm_mmu_zap_page(kvm, sp);
1947
	spin_unlock(&kvm->mmu_lock);
D
Dor Laor 已提交
1948

1949
	kvm_flush_remote_tlbs(kvm);
D
Dor Laor 已提交
1950 1951
}

1952 1953 1954 1955 1956 1957
void kvm_mmu_module_exit(void)
{
	if (pte_chain_cache)
		kmem_cache_destroy(pte_chain_cache);
	if (rmap_desc_cache)
		kmem_cache_destroy(rmap_desc_cache);
1958 1959
	if (mmu_page_header_cache)
		kmem_cache_destroy(mmu_page_header_cache);
1960 1961 1962 1963 1964 1965
}

int kvm_mmu_module_init(void)
{
	pte_chain_cache = kmem_cache_create("kvm_pte_chain",
					    sizeof(struct kvm_pte_chain),
1966
					    0, 0, NULL);
1967 1968 1969 1970
	if (!pte_chain_cache)
		goto nomem;
	rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
					    sizeof(struct kvm_rmap_desc),
1971
					    0, 0, NULL);
1972 1973 1974
	if (!rmap_desc_cache)
		goto nomem;

1975 1976
	mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
						  sizeof(struct kvm_mmu_page),
1977
						  0, 0, NULL);
1978 1979 1980
	if (!mmu_page_header_cache)
		goto nomem;

1981 1982 1983 1984 1985 1986 1987
	return 0;

nomem:
	kvm_mmu_module_exit();
	return -ENOMEM;
}

1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006
/*
 * Caculate mmu pages needed for kvm.
 */
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
{
	int i;
	unsigned int nr_mmu_pages;
	unsigned int  nr_pages = 0;

	for (i = 0; i < kvm->nmemslots; i++)
		nr_pages += kvm->memslots[i].npages;

	nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
	nr_mmu_pages = max(nr_mmu_pages,
			(unsigned int) KVM_MIN_ALLOC_MMU_PAGES);

	return nr_mmu_pages;
}

2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028
#ifdef AUDIT

static const char *audit_msg;

static gva_t canonicalize(gva_t gva)
{
#ifdef CONFIG_X86_64
	gva = (long long)(gva << 16) >> 16;
#endif
	return gva;
}

static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
				gva_t va, int level)
{
	u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
	int i;
	gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
		u64 ent = pt[i];

2029
		if (ent == shadow_trap_nonpresent_pte)
2030 2031 2032
			continue;

		va = canonicalize(va);
2033 2034 2035 2036 2037
		if (level > 1) {
			if (ent == shadow_notrap_nonpresent_pte)
				printk(KERN_ERR "audit: (%s) nontrapping pte"
				       " in nonleaf level: levels %d gva %lx"
				       " level %d pte %llx\n", audit_msg,
2038
				       vcpu->arch.mmu.root_level, va, level, ent);
2039

2040
			audit_mappings_page(vcpu, ent, va, level - 1);
2041
		} else {
2042
			gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
A
Avi Kivity 已提交
2043 2044
			struct page *page = gpa_to_page(vcpu, gpa);
			hpa_t hpa = page_to_phys(page);
2045

2046
			if (is_shadow_present_pte(ent)
2047
			    && (ent & PT64_BASE_ADDR_MASK) != hpa)
2048 2049
				printk(KERN_ERR "xx audit error: (%s) levels %d"
				       " gva %lx gpa %llx hpa %llx ent %llx %d\n",
2050
				       audit_msg, vcpu->arch.mmu.root_level,
M
Mike Day 已提交
2051 2052
				       va, gpa, hpa, ent,
				       is_shadow_present_pte(ent));
2053 2054 2055 2056
			else if (ent == shadow_notrap_nonpresent_pte
				 && !is_error_hpa(hpa))
				printk(KERN_ERR "audit: (%s) notrap shadow,"
				       " valid guest gva %lx\n", audit_msg, va);
2057
			kvm_release_page_clean(page);
2058

2059 2060 2061 2062 2063 2064
		}
	}
}

static void audit_mappings(struct kvm_vcpu *vcpu)
{
2065
	unsigned i;
2066

2067 2068
	if (vcpu->arch.mmu.root_level == 4)
		audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
2069 2070
	else
		for (i = 0; i < 4; ++i)
2071
			if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
2072
				audit_mappings_page(vcpu,
2073
						    vcpu->arch.mmu.pae_root[i],
2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087
						    i << 30,
						    2);
}

static int count_rmaps(struct kvm_vcpu *vcpu)
{
	int nmaps = 0;
	int i, j, k;

	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
		struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
		struct kvm_rmap_desc *d;

		for (j = 0; j < m->npages; ++j) {
2088
			unsigned long *rmapp = &m->rmap[j];
2089

2090
			if (!*rmapp)
2091
				continue;
2092
			if (!(*rmapp & 1)) {
2093 2094 2095
				++nmaps;
				continue;
			}
2096
			d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112
			while (d) {
				for (k = 0; k < RMAP_EXT; ++k)
					if (d->shadow_ptes[k])
						++nmaps;
					else
						break;
				d = d->more;
			}
		}
	}
	return nmaps;
}

static int count_writable_mappings(struct kvm_vcpu *vcpu)
{
	int nmaps = 0;
2113
	struct kvm_mmu_page *sp;
2114 2115
	int i;

2116
	list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2117
		u64 *pt = sp->spt;
2118

2119
		if (sp->role.level != PT_PAGE_TABLE_LEVEL)
2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146
			continue;

		for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
			u64 ent = pt[i];

			if (!(ent & PT_PRESENT_MASK))
				continue;
			if (!(ent & PT_WRITABLE_MASK))
				continue;
			++nmaps;
		}
	}
	return nmaps;
}

static void audit_rmap(struct kvm_vcpu *vcpu)
{
	int n_rmap = count_rmaps(vcpu);
	int n_actual = count_writable_mappings(vcpu);

	if (n_rmap != n_actual)
		printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
		       __FUNCTION__, audit_msg, n_rmap, n_actual);
}

static void audit_write_protection(struct kvm_vcpu *vcpu)
{
2147
	struct kvm_mmu_page *sp;
2148 2149 2150
	struct kvm_memory_slot *slot;
	unsigned long *rmapp;
	gfn_t gfn;
2151

2152
	list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
2153
		if (sp->role.metaphysical)
2154 2155
			continue;

2156 2157
		slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
		gfn = unalias_gfn(vcpu->kvm, sp->gfn);
2158 2159
		rmapp = &slot->rmap[gfn - slot->base_gfn];
		if (*rmapp)
2160 2161
			printk(KERN_ERR "%s: (%s) shadow page has writable"
			       " mappings: gfn %lx role %x\n",
2162 2163
			       __FUNCTION__, audit_msg, sp->gfn,
			       sp->role.word);
2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
	}
}

static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
{
	int olddbg = dbg;

	dbg = 0;
	audit_msg = msg;
	audit_rmap(vcpu);
	audit_write_protection(vcpu);
	audit_mappings(vcpu);
	dbg = olddbg;
}

#endif