paging_tmpl.h 10.2 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

/*
 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
 * so the code in this file is compiled twice, once per pte size.
 */

#if PTTYPE == 64
	#define pt_element_t u64
	#define guest_walker guest_walker64
	#define FNAME(name) paging##64_##name
	#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
	#define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
	#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
	#define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
	#define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
#elif PTTYPE == 32
	#define pt_element_t u32
	#define guest_walker guest_walker32
	#define FNAME(name) paging##32_##name
	#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
	#define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
	#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
	#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
	#define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
	#define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
#else
	#error Invalid PTTYPE value
#endif

/*
 * The guest_walker structure emulates the behavior of the hardware page
 * table walker.
 */
struct guest_walker {
	int level;
55
	gfn_t table_gfn;
A
Avi Kivity 已提交
56
	pt_element_t *table;
57
	pt_element_t *ptep;
A
Avi Kivity 已提交
58 59 60
	pt_element_t inherited_ar;
};

61 62 63 64 65
/*
 * Fetch a guest pte for a guest virtual address
 */
static void FNAME(walk_addr)(struct guest_walker *walker,
			     struct kvm_vcpu *vcpu, gva_t addr)
A
Avi Kivity 已提交
66 67 68
{
	hpa_t hpa;
	struct kvm_memory_slot *slot;
69
	pt_element_t *ptep;
A
Avi Kivity 已提交
70 71

	walker->level = vcpu->mmu.root_level;
72 73
	walker->table_gfn = (vcpu->cr3 & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
	slot = gfn_to_memslot(vcpu->kvm, walker->table_gfn);
A
Avi Kivity 已提交
74 75 76
	hpa = safe_gpa_to_hpa(vcpu, vcpu->cr3 & PT64_BASE_ADDR_MASK);
	walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0);

A
Avi Kivity 已提交
77
	ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
A
Avi Kivity 已提交
78 79 80 81 82
	       (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);

	walker->table = (pt_element_t *)( (unsigned long)walker->table |
		(unsigned long)(vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) );
	walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114

	for (;;) {
		int index = PT_INDEX(addr, walker->level);
		hpa_t paddr;

		ptep = &walker->table[index];
		ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
		       ((unsigned long)ptep & PAGE_MASK));

		/* Don't set accessed bit on PAE PDPTRs */
		if (vcpu->mmu.root_level != 3 || walker->level != 3)
			if ((*ptep & (PT_PRESENT_MASK | PT_ACCESSED_MASK))
			    == PT_PRESENT_MASK)
				*ptep |= PT_ACCESSED_MASK;

		if (!is_present_pte(*ptep) ||
		    walker->level == PT_PAGE_TABLE_LEVEL ||
		    (walker->level == PT_DIRECTORY_LEVEL &&
		     (*ptep & PT_PAGE_SIZE_MASK) &&
		     (PTTYPE == 64 || is_pse(vcpu))))
			break;

		if (walker->level != 3 || is_long_mode(vcpu))
			walker->inherited_ar &= walker->table[index];
		walker->table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
		paddr = safe_gpa_to_hpa(vcpu, *ptep & PT_BASE_ADDR_MASK);
		kunmap_atomic(walker->table, KM_USER0);
		walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT),
					    KM_USER0);
		--walker->level;
	}
	walker->ptep = ptep;
A
Avi Kivity 已提交
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
}

static void FNAME(release_walker)(struct guest_walker *walker)
{
	kunmap_atomic(walker->table, KM_USER0);
}

static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
			   u64 *shadow_pte, u64 access_bits)
{
	ASSERT(*shadow_pte == 0);
	access_bits &= guest_pte;
	*shadow_pte = (guest_pte & PT_PTE_COPY_MASK);
	set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK,
		       guest_pte & PT_DIRTY_MASK, access_bits);
}

static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
			   u64 *shadow_pte, u64 access_bits,
			   int index)
{
	gpa_t gaddr;

	ASSERT(*shadow_pte == 0);
	access_bits &= guest_pde;
	gaddr = (guest_pde & PT_DIR_BASE_ADDR_MASK) + PAGE_SIZE * index;
	if (PTTYPE == 32 && is_cpuid_PSE36())
		gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
			(32 - PT32_DIR_PSE36_SHIFT);
144
	*shadow_pte = guest_pde & PT_PTE_COPY_MASK;
A
Avi Kivity 已提交
145 146 147 148 149 150 151 152 153 154 155 156 157
	set_pte_common(vcpu, shadow_pte, gaddr,
		       guest_pde & PT_DIRTY_MASK, access_bits);
}

/*
 * Fetch a shadow pte for a specific level in the paging hierarchy.
 */
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
			      struct guest_walker *walker)
{
	hpa_t shadow_addr;
	int level;
	u64 *prev_shadow_ent = NULL;
158 159 160 161
	pt_element_t *guest_ent = walker->ptep;

	if (!is_present_pte(*guest_ent))
		return NULL;
A
Avi Kivity 已提交
162 163 164 165 166 167 168

	shadow_addr = vcpu->mmu.root_hpa;
	level = vcpu->mmu.shadow_root_level;

	for (; ; level--) {
		u32 index = SHADOW_PT_INDEX(addr, level);
		u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index;
169
		u64 shadow_pte;
A
Avi Kivity 已提交
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196

		if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
			if (level == PT_PAGE_TABLE_LEVEL)
				return shadow_ent;
			shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
			prev_shadow_ent = shadow_ent;
			continue;
		}

		if (level == PT_PAGE_TABLE_LEVEL) {

			if (walker->level == PT_DIRECTORY_LEVEL) {
				if (prev_shadow_ent)
					*prev_shadow_ent |= PT_SHADOW_PS_MARK;
				FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
					       walker->inherited_ar,
				          PT_INDEX(addr, PT_PAGE_TABLE_LEVEL));
			} else {
				ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
				FNAME(set_pte)(vcpu, *guest_ent, shadow_ent, walker->inherited_ar);
			}
			return shadow_ent;
		}

		shadow_addr = kvm_mmu_alloc_page(vcpu, shadow_ent);
		if (!VALID_PAGE(shadow_addr))
			return ERR_PTR(-ENOMEM);
197 198 199 200 201
		shadow_pte = shadow_addr | PT_PRESENT_MASK;
		if (vcpu->mmu.root_level > 3 || level != 3)
			shadow_pte |= PT_ACCESSED_MASK
				| PT_WRITABLE_MASK | PT_USER_MASK;
		*shadow_ent = shadow_pte;
A
Avi Kivity 已提交
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
		prev_shadow_ent = shadow_ent;
	}
}

/*
 * The guest faulted for write.  We need to
 *
 * - check write permissions
 * - update the guest pte dirty bit
 * - update our own dirty page tracking structures
 */
static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
			       u64 *shadow_ent,
			       struct guest_walker *walker,
			       gva_t addr,
			       int user)
{
	pt_element_t *guest_ent;
	int writable_shadow;
	gfn_t gfn;

	if (is_writeble_pte(*shadow_ent))
		return 0;

	writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK;
	if (user) {
		/*
		 * User mode access.  Fail if it's a kernel page or a read-only
		 * page.
		 */
		if (!(*shadow_ent & PT_SHADOW_USER_MASK) || !writable_shadow)
			return 0;
		ASSERT(*shadow_ent & PT_USER_MASK);
	} else
		/*
		 * Kernel mode access.  Fail if it's a read-only page and
		 * supervisor write protection is enabled.
		 */
		if (!writable_shadow) {
			if (is_write_protection(vcpu))
				return 0;
			*shadow_ent &= ~PT_USER_MASK;
		}

246
	guest_ent = walker->ptep;
A
Avi Kivity 已提交
247 248 249 250 251 252 253 254 255 256

	if (!is_present_pte(*guest_ent)) {
		*shadow_ent = 0;
		return 0;
	}

	gfn = (*guest_ent & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
	mark_page_dirty(vcpu->kvm, gfn);
	*shadow_ent |= PT_WRITABLE_MASK;
	*guest_ent |= PT_DIRTY_MASK;
257
	rmap_add(vcpu->kvm, shadow_ent);
A
Avi Kivity 已提交
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288

	return 1;
}

/*
 * Page fault handler.  There are several causes for a page fault:
 *   - there is no shadow pte for the guest pte
 *   - write access through a shadow pte marked read only so that we can set
 *     the dirty bit
 *   - write access to a shadow pte marked read only so we can update the page
 *     dirty bitmap, when userspace requests it
 *   - mmio access; in this case we will never install a present shadow pte
 *   - normal guest page fault due to the guest pte marked not present, not
 *     writable, or not executable
 *
 *  Returns: 1 if we need to emulate the instruction, 0 otherwise
 */
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
			       u32 error_code)
{
	int write_fault = error_code & PFERR_WRITE_MASK;
	int pte_present = error_code & PFERR_PRESENT_MASK;
	int user_fault = error_code & PFERR_USER_MASK;
	struct guest_walker walker;
	u64 *shadow_pte;
	int fixed;

	/*
	 * Look up the shadow pte for the faulting address.
	 */
	for (;;) {
289
		FNAME(walk_addr)(&walker, vcpu, addr);
A
Avi Kivity 已提交
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
		shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
		if (IS_ERR(shadow_pte)) {  /* must be -ENOMEM */
			nonpaging_flush(vcpu);
			FNAME(release_walker)(&walker);
			continue;
		}
		break;
	}

	/*
	 * The page is not mapped by the guest.  Let the guest handle it.
	 */
	if (!shadow_pte) {
		inject_page_fault(vcpu, addr, error_code);
		FNAME(release_walker)(&walker);
		return 0;
	}

	/*
	 * Update the shadow pte.
	 */
	if (write_fault)
		fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr,
					    user_fault);
	else
		fixed = fix_read_pf(shadow_pte);

	FNAME(release_walker)(&walker);

	/*
	 * mmio: emulate if accessible, otherwise its a guest fault.
	 */
	if (is_io_pte(*shadow_pte)) {
		if (may_access(*shadow_pte, write_fault, user_fault))
			return 1;
		pgprintk("%s: io work, no access\n", __FUNCTION__);
		inject_page_fault(vcpu, addr,
				  error_code | PFERR_PRESENT_MASK);
		return 0;
	}

	/*
	 * pte not present, guest page fault.
	 */
	if (pte_present && !fixed) {
		inject_page_fault(vcpu, addr, error_code);
		return 0;
	}

	++kvm_stat.pf_fixed;

	return 0;
}

static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
{
	struct guest_walker walker;
	pt_element_t guest_pte;
	gpa_t gpa;

350 351
	FNAME(walk_addr)(&walker, vcpu, vaddr);
	guest_pte = *walker.ptep;
A
Avi Kivity 已提交
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
	FNAME(release_walker)(&walker);

	if (!is_present_pte(guest_pte))
		return UNMAPPED_GVA;

	if (walker.level == PT_DIRECTORY_LEVEL) {
		ASSERT((guest_pte & PT_PAGE_SIZE_MASK));
		ASSERT(PTTYPE == 64 || is_pse(vcpu));

		gpa = (guest_pte & PT_DIR_BASE_ADDR_MASK) | (vaddr &
			(PT_LEVEL_MASK(PT_PAGE_TABLE_LEVEL) | ~PAGE_MASK));

		if (PTTYPE == 32 && is_cpuid_PSE36())
			gpa |= (guest_pte & PT32_DIR_PSE36_MASK) <<
					(32 - PT32_DIR_PSE36_SHIFT);
	} else {
		gpa = (guest_pte & PT_BASE_ADDR_MASK);
		gpa |= (vaddr & ~PAGE_MASK);
	}

	return gpa;
}

#undef pt_element_t
#undef guest_walker
#undef FNAME
#undef PT_BASE_ADDR_MASK
#undef PT_INDEX
#undef SHADOW_PT_INDEX
#undef PT_LEVEL_MASK
#undef PT_PTE_COPY_MASK
#undef PT_NON_PTE_COPY_MASK
#undef PT_DIR_BASE_ADDR_MASK