paging_tmpl.h 12.0 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

/*
 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
 * so the code in this file is compiled twice, once per pte size.
 */

#if PTTYPE == 64
	#define pt_element_t u64
	#define guest_walker guest_walker64
	#define FNAME(name) paging##64_##name
	#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
	#define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
	#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
	#define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
	#define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
35 36 37 38 39
	#ifdef CONFIG_X86_64
	#define PT_MAX_FULL_LEVELS 4
	#else
	#define PT_MAX_FULL_LEVELS 2
	#endif
A
Avi Kivity 已提交
40 41 42 43 44 45 46 47 48 49
#elif PTTYPE == 32
	#define pt_element_t u32
	#define guest_walker guest_walker32
	#define FNAME(name) paging##32_##name
	#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
	#define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
	#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
	#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
	#define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
	#define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
50
	#define PT_MAX_FULL_LEVELS 2
A
Avi Kivity 已提交
51 52 53 54 55 56 57 58 59 60
#else
	#error Invalid PTTYPE value
#endif

/*
 * The guest_walker structure emulates the behavior of the hardware page
 * table walker.
 */
struct guest_walker {
	int level;
61
	gfn_t table_gfn[PT_MAX_FULL_LEVELS];
A
Avi Kivity 已提交
62
	pt_element_t *table;
63
	pt_element_t *ptep;
A
Avi Kivity 已提交
64
	pt_element_t inherited_ar;
65
	gfn_t gfn;
A
Avi Kivity 已提交
66 67
};

68 69 70 71 72
/*
 * Fetch a guest pte for a guest virtual address
 */
static void FNAME(walk_addr)(struct guest_walker *walker,
			     struct kvm_vcpu *vcpu, gva_t addr)
A
Avi Kivity 已提交
73 74 75
{
	hpa_t hpa;
	struct kvm_memory_slot *slot;
76
	pt_element_t *ptep;
77
	pt_element_t root;
78
	gfn_t table_gfn;
A
Avi Kivity 已提交
79

80
	pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
A
Avi Kivity 已提交
81
	walker->level = vcpu->mmu.root_level;
82 83 84 85 86 87 88 89 90 91 92
	walker->table = NULL;
	root = vcpu->cr3;
#if PTTYPE == 64
	if (!is_long_mode(vcpu)) {
		walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
		root = *walker->ptep;
		if (!(root & PT_PRESENT_MASK))
			return;
		--walker->level;
	}
#endif
93 94 95 96 97
	table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
	walker->table_gfn[walker->level - 1] = table_gfn;
	pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
		 walker->level - 1, table_gfn);
	slot = gfn_to_memslot(vcpu->kvm, table_gfn);
98
	hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK);
A
Avi Kivity 已提交
99 100
	walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0);

A
Avi Kivity 已提交
101
	ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
A
Avi Kivity 已提交
102 103 104
	       (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);

	walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
105 106 107 108 109 110 111 112 113

	for (;;) {
		int index = PT_INDEX(addr, walker->level);
		hpa_t paddr;

		ptep = &walker->table[index];
		ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
		       ((unsigned long)ptep & PAGE_MASK));

114 115
		if (is_present_pte(*ptep) && !(*ptep &  PT_ACCESSED_MASK))
			*ptep |= PT_ACCESSED_MASK;
116

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
		if (!is_present_pte(*ptep))
			break;

		if (walker->level == PT_PAGE_TABLE_LEVEL) {
			walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
				>> PAGE_SHIFT;
			break;
		}

		if (walker->level == PT_DIRECTORY_LEVEL
		    && (*ptep & PT_PAGE_SIZE_MASK)
		    && (PTTYPE == 64 || is_pse(vcpu))) {
			walker->gfn = (*ptep & PT_DIR_BASE_ADDR_MASK)
				>> PAGE_SHIFT;
			walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
132
			break;
133
		}
134 135 136

		if (walker->level != 3 || is_long_mode(vcpu))
			walker->inherited_ar &= walker->table[index];
137
		table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
138 139 140 141 142
		paddr = safe_gpa_to_hpa(vcpu, *ptep & PT_BASE_ADDR_MASK);
		kunmap_atomic(walker->table, KM_USER0);
		walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT),
					    KM_USER0);
		--walker->level;
143 144 145
		walker->table_gfn[walker->level - 1 ] = table_gfn;
		pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
			 walker->level - 1, table_gfn);
146 147
	}
	walker->ptep = ptep;
148
	pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep);
A
Avi Kivity 已提交
149 150 151 152
}

static void FNAME(release_walker)(struct guest_walker *walker)
{
153 154
	if (walker->table)
		kunmap_atomic(walker->table, KM_USER0);
A
Avi Kivity 已提交
155 156 157
}

static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
158
			   u64 *shadow_pte, u64 access_bits, gfn_t gfn)
A
Avi Kivity 已提交
159 160 161 162 163
{
	ASSERT(*shadow_pte == 0);
	access_bits &= guest_pte;
	*shadow_pte = (guest_pte & PT_PTE_COPY_MASK);
	set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK,
164
		       guest_pte & PT_DIRTY_MASK, access_bits, gfn);
A
Avi Kivity 已提交
165 166 167
}

static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
168
			   u64 *shadow_pte, u64 access_bits, gfn_t gfn)
A
Avi Kivity 已提交
169 170 171 172 173
{
	gpa_t gaddr;

	ASSERT(*shadow_pte == 0);
	access_bits &= guest_pde;
174
	gaddr = (gpa_t)gfn << PAGE_SHIFT;
A
Avi Kivity 已提交
175 176 177
	if (PTTYPE == 32 && is_cpuid_PSE36())
		gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
			(32 - PT32_DIR_PSE36_SHIFT);
178
	*shadow_pte = guest_pde & PT_PTE_COPY_MASK;
A
Avi Kivity 已提交
179
	set_pte_common(vcpu, shadow_pte, gaddr,
180
		       guest_pde & PT_DIRTY_MASK, access_bits, gfn);
A
Avi Kivity 已提交
181 182 183 184 185 186 187 188 189 190 191
}

/*
 * Fetch a shadow pte for a specific level in the paging hierarchy.
 */
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
			      struct guest_walker *walker)
{
	hpa_t shadow_addr;
	int level;
	u64 *prev_shadow_ent = NULL;
192 193 194 195
	pt_element_t *guest_ent = walker->ptep;

	if (!is_present_pte(*guest_ent))
		return NULL;
A
Avi Kivity 已提交
196 197 198

	shadow_addr = vcpu->mmu.root_hpa;
	level = vcpu->mmu.shadow_root_level;
199 200 201 202 203
	if (level == PT32E_ROOT_LEVEL) {
		shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
		shadow_addr &= PT64_BASE_ADDR_MASK;
		--level;
	}
A
Avi Kivity 已提交
204 205 206 207

	for (; ; level--) {
		u32 index = SHADOW_PT_INDEX(addr, level);
		u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index;
208
		struct kvm_mmu_page *shadow_page;
209
		u64 shadow_pte;
210 211
		int metaphysical;
		gfn_t table_gfn;
A
Avi Kivity 已提交
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227

		if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
			if (level == PT_PAGE_TABLE_LEVEL)
				return shadow_ent;
			shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
			prev_shadow_ent = shadow_ent;
			continue;
		}

		if (level == PT_PAGE_TABLE_LEVEL) {

			if (walker->level == PT_DIRECTORY_LEVEL) {
				if (prev_shadow_ent)
					*prev_shadow_ent |= PT_SHADOW_PS_MARK;
				FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
					       walker->inherited_ar,
228
					       walker->gfn);
A
Avi Kivity 已提交
229 230
			} else {
				ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
231 232 233
				FNAME(set_pte)(vcpu, *guest_ent, shadow_ent,
					       walker->inherited_ar,
					       walker->gfn);
A
Avi Kivity 已提交
234 235 236 237
			}
			return shadow_ent;
		}

238 239 240 241 242 243 244 245 246 247 248
		if (level - 1 == PT_PAGE_TABLE_LEVEL
		    && walker->level == PT_DIRECTORY_LEVEL) {
			metaphysical = 1;
			table_gfn = (*guest_ent & PT_BASE_ADDR_MASK)
				>> PAGE_SHIFT;
		} else {
			metaphysical = 0;
			table_gfn = walker->table_gfn[level - 2];
		}
		shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
					       metaphysical, shadow_ent);
249
		shadow_addr = shadow_page->page_hpa;
250 251
		shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
			| PT_WRITABLE_MASK | PT_USER_MASK;
252
		*shadow_ent = shadow_pte;
A
Avi Kivity 已提交
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
		prev_shadow_ent = shadow_ent;
	}
}

/*
 * The guest faulted for write.  We need to
 *
 * - check write permissions
 * - update the guest pte dirty bit
 * - update our own dirty page tracking structures
 */
static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
			       u64 *shadow_ent,
			       struct guest_walker *walker,
			       gva_t addr,
268 269
			       int user,
			       int *write_pt)
A
Avi Kivity 已提交
270 271 272 273
{
	pt_element_t *guest_ent;
	int writable_shadow;
	gfn_t gfn;
274
	struct kvm_mmu_page *page;
A
Avi Kivity 已提交
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298

	if (is_writeble_pte(*shadow_ent))
		return 0;

	writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK;
	if (user) {
		/*
		 * User mode access.  Fail if it's a kernel page or a read-only
		 * page.
		 */
		if (!(*shadow_ent & PT_SHADOW_USER_MASK) || !writable_shadow)
			return 0;
		ASSERT(*shadow_ent & PT_USER_MASK);
	} else
		/*
		 * Kernel mode access.  Fail if it's a read-only page and
		 * supervisor write protection is enabled.
		 */
		if (!writable_shadow) {
			if (is_write_protection(vcpu))
				return 0;
			*shadow_ent &= ~PT_USER_MASK;
		}

299
	guest_ent = walker->ptep;
A
Avi Kivity 已提交
300 301 302 303 304 305

	if (!is_present_pte(*guest_ent)) {
		*shadow_ent = 0;
		return 0;
	}

306
	gfn = walker->gfn;
307 308 309 310 311 312 313 314 315 316 317

	if (user) {
		/*
		 * Usermode page faults won't be for page table updates.
		 */
		while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
			pgprintk("%s: zap %lx %x\n",
				 __FUNCTION__, gfn, page->role.word);
			kvm_mmu_zap_page(vcpu, page);
		}
	} else if (kvm_mmu_lookup_page(vcpu, gfn)) {
318 319 320 321 322
		pgprintk("%s: found shadow page for %lx, marking ro\n",
			 __FUNCTION__, gfn);
		*write_pt = 1;
		return 0;
	}
A
Avi Kivity 已提交
323 324 325
	mark_page_dirty(vcpu->kvm, gfn);
	*shadow_ent |= PT_WRITABLE_MASK;
	*guest_ent |= PT_DIRTY_MASK;
326
	rmap_add(vcpu, shadow_ent);
A
Avi Kivity 已提交
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352

	return 1;
}

/*
 * Page fault handler.  There are several causes for a page fault:
 *   - there is no shadow pte for the guest pte
 *   - write access through a shadow pte marked read only so that we can set
 *     the dirty bit
 *   - write access to a shadow pte marked read only so we can update the page
 *     dirty bitmap, when userspace requests it
 *   - mmio access; in this case we will never install a present shadow pte
 *   - normal guest page fault due to the guest pte marked not present, not
 *     writable, or not executable
 *
 *  Returns: 1 if we need to emulate the instruction, 0 otherwise
 */
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
			       u32 error_code)
{
	int write_fault = error_code & PFERR_WRITE_MASK;
	int pte_present = error_code & PFERR_PRESENT_MASK;
	int user_fault = error_code & PFERR_USER_MASK;
	struct guest_walker walker;
	u64 *shadow_pte;
	int fixed;
353
	int write_pt = 0;
A
Avi Kivity 已提交
354

355
	pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
356 357 358

	mmu_topup_memory_caches(vcpu);

A
Avi Kivity 已提交
359 360 361
	/*
	 * Look up the shadow pte for the faulting address.
	 */
A
Avi Kivity 已提交
362 363
	FNAME(walk_addr)(&walker, vcpu, addr);
	shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
A
Avi Kivity 已提交
364 365 366 367 368

	/*
	 * The page is not mapped by the guest.  Let the guest handle it.
	 */
	if (!shadow_pte) {
369
		pgprintk("%s: not mapped\n", __FUNCTION__);
A
Avi Kivity 已提交
370 371 372 373 374
		inject_page_fault(vcpu, addr, error_code);
		FNAME(release_walker)(&walker);
		return 0;
	}

375 376 377
	pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__,
		 shadow_pte, *shadow_pte);

A
Avi Kivity 已提交
378 379 380 381 382
	/*
	 * Update the shadow pte.
	 */
	if (write_fault)
		fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr,
383
					    user_fault, &write_pt);
A
Avi Kivity 已提交
384 385 386
	else
		fixed = fix_read_pf(shadow_pte);

387 388 389
	pgprintk("%s: updated shadow pte %p %llx\n", __FUNCTION__,
		 shadow_pte, *shadow_pte);

A
Avi Kivity 已提交
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
	FNAME(release_walker)(&walker);

	/*
	 * mmio: emulate if accessible, otherwise its a guest fault.
	 */
	if (is_io_pte(*shadow_pte)) {
		if (may_access(*shadow_pte, write_fault, user_fault))
			return 1;
		pgprintk("%s: io work, no access\n", __FUNCTION__);
		inject_page_fault(vcpu, addr,
				  error_code | PFERR_PRESENT_MASK);
		return 0;
	}

	/*
	 * pte not present, guest page fault.
	 */
407
	if (pte_present && !fixed && !write_pt) {
A
Avi Kivity 已提交
408 409 410 411 412 413
		inject_page_fault(vcpu, addr, error_code);
		return 0;
	}

	++kvm_stat.pf_fixed;

414
	return write_pt;
A
Avi Kivity 已提交
415 416 417 418 419 420 421 422
}

static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
{
	struct guest_walker walker;
	pt_element_t guest_pte;
	gpa_t gpa;

423 424
	FNAME(walk_addr)(&walker, vcpu, vaddr);
	guest_pte = *walker.ptep;
A
Avi Kivity 已提交
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
	FNAME(release_walker)(&walker);

	if (!is_present_pte(guest_pte))
		return UNMAPPED_GVA;

	if (walker.level == PT_DIRECTORY_LEVEL) {
		ASSERT((guest_pte & PT_PAGE_SIZE_MASK));
		ASSERT(PTTYPE == 64 || is_pse(vcpu));

		gpa = (guest_pte & PT_DIR_BASE_ADDR_MASK) | (vaddr &
			(PT_LEVEL_MASK(PT_PAGE_TABLE_LEVEL) | ~PAGE_MASK));

		if (PTTYPE == 32 && is_cpuid_PSE36())
			gpa |= (guest_pte & PT32_DIR_PSE36_MASK) <<
					(32 - PT32_DIR_PSE36_SHIFT);
	} else {
		gpa = (guest_pte & PT_BASE_ADDR_MASK);
		gpa |= (vaddr & ~PAGE_MASK);
	}

	return gpa;
}

#undef pt_element_t
#undef guest_walker
#undef FNAME
#undef PT_BASE_ADDR_MASK
#undef PT_INDEX
#undef SHADOW_PT_INDEX
#undef PT_LEVEL_MASK
#undef PT_PTE_COPY_MASK
#undef PT_NON_PTE_COPY_MASK
#undef PT_DIR_BASE_ADDR_MASK
458
#undef PT_MAX_FULL_LEVELS