paging_tmpl.h 14.5 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * MMU support
 *
 * Copyright (C) 2006 Qumranet, Inc.
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

/*
 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
 * so the code in this file is compiled twice, once per pte size.
 */

#if PTTYPE == 64
	#define pt_element_t u64
	#define guest_walker guest_walker64
	#define FNAME(name) paging##64_##name
	#define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
	#define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
	#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
	#define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34
	#define PT_LEVEL_BITS PT64_LEVEL_BITS
35 36 37 38 39
	#ifdef CONFIG_X86_64
	#define PT_MAX_FULL_LEVELS 4
	#else
	#define PT_MAX_FULL_LEVELS 2
	#endif
A
Avi Kivity 已提交
40 41 42 43 44 45 46 47 48
#elif PTTYPE == 32
	#define pt_element_t u32
	#define guest_walker guest_walker32
	#define FNAME(name) paging##32_##name
	#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
	#define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
	#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
	#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
	#define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
49
	#define PT_LEVEL_BITS PT32_LEVEL_BITS
50
	#define PT_MAX_FULL_LEVELS 2
A
Avi Kivity 已提交
51 52 53 54 55 56 57 58 59 60
#else
	#error Invalid PTTYPE value
#endif

/*
 * The guest_walker structure emulates the behavior of the hardware page
 * table walker.
 */
struct guest_walker {
	int level;
61
	gfn_t table_gfn[PT_MAX_FULL_LEVELS];
A
Avi Kivity 已提交
62
	pt_element_t *table;
63
	pt_element_t pte;
64
	pt_element_t *ptep;
65 66
	struct page *page;
	int index;
A
Avi Kivity 已提交
67
	pt_element_t inherited_ar;
68
	gfn_t gfn;
69
	u32 error_code;
A
Avi Kivity 已提交
70 71
};

72 73 74
/*
 * Fetch a guest pte for a guest virtual address
 */
75 76
static int FNAME(walk_addr)(struct guest_walker *walker,
			    struct kvm_vcpu *vcpu, gva_t addr,
77
			    int write_fault, int user_fault, int fetch_fault)
A
Avi Kivity 已提交
78 79 80
{
	hpa_t hpa;
	struct kvm_memory_slot *slot;
81
	pt_element_t *ptep;
82
	pt_element_t root;
83
	gfn_t table_gfn;
A
Avi Kivity 已提交
84

85
	pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
A
Avi Kivity 已提交
86
	walker->level = vcpu->mmu.root_level;
87
	walker->table = NULL;
88 89
	walker->page = NULL;
	walker->ptep = NULL;
90 91 92 93 94
	root = vcpu->cr3;
#if PTTYPE == 64
	if (!is_long_mode(vcpu)) {
		walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
		root = *walker->ptep;
95
		walker->pte = root;
96
		if (!(root & PT_PRESENT_MASK))
97
			goto not_present;
98 99 100
		--walker->level;
	}
#endif
101 102 103 104 105
	table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
	walker->table_gfn[walker->level - 1] = table_gfn;
	pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
		 walker->level - 1, table_gfn);
	slot = gfn_to_memslot(vcpu->kvm, table_gfn);
106
	hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK);
107 108
	walker->page = pfn_to_page(hpa >> PAGE_SHIFT);
	walker->table = kmap_atomic(walker->page, KM_USER0);
A
Avi Kivity 已提交
109

A
Avi Kivity 已提交
110
	ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
111
	       (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
A
Avi Kivity 已提交
112 113

	walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
114 115 116 117 118 119

	for (;;) {
		int index = PT_INDEX(addr, walker->level);
		hpa_t paddr;

		ptep = &walker->table[index];
120
		walker->index = index;
121 122 123
		ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
		       ((unsigned long)ptep & PAGE_MASK));

124
		if (!is_present_pte(*ptep))
125 126 127 128 129 130 131 132 133
			goto not_present;

		if (write_fault && !is_writeble_pte(*ptep))
			if (user_fault || is_write_protection(vcpu))
				goto access_error;

		if (user_fault && !(*ptep & PT_USER_MASK))
			goto access_error;

134 135 136 137 138
#if PTTYPE == 64
		if (fetch_fault && is_nx(vcpu) && (*ptep & PT64_NX_MASK))
			goto access_error;
#endif

139 140 141 142
		if (!(*ptep & PT_ACCESSED_MASK)) {
			mark_page_dirty(vcpu->kvm, table_gfn);
			*ptep |= PT_ACCESSED_MASK;
		}
143 144 145 146 147 148 149 150 151 152 153 154 155

		if (walker->level == PT_PAGE_TABLE_LEVEL) {
			walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
				>> PAGE_SHIFT;
			break;
		}

		if (walker->level == PT_DIRECTORY_LEVEL
		    && (*ptep & PT_PAGE_SIZE_MASK)
		    && (PTTYPE == 64 || is_pse(vcpu))) {
			walker->gfn = (*ptep & PT_DIR_BASE_ADDR_MASK)
				>> PAGE_SHIFT;
			walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
156
			break;
157
		}
158

159
		walker->inherited_ar &= walker->table[index];
160
		table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
161
		kunmap_atomic(walker->table, KM_USER0);
162 163 164
		paddr = safe_gpa_to_hpa(vcpu, table_gfn << PAGE_SHIFT);
		walker->page = pfn_to_page(paddr >> PAGE_SHIFT);
		walker->table = kmap_atomic(walker->page, KM_USER0);
165
		--walker->level;
166 167 168
		walker->table_gfn[walker->level - 1 ] = table_gfn;
		pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
			 walker->level - 1, table_gfn);
169
	}
170 171 172 173 174
	walker->pte = *ptep;
	if (walker->page)
		walker->ptep = NULL;
	if (walker->table)
		kunmap_atomic(walker->table, KM_USER0);
175
	pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep);
176 177 178 179 180 181 182 183 184 185 186 187 188 189
	return 1;

not_present:
	walker->error_code = 0;
	goto err;

access_error:
	walker->error_code = PFERR_PRESENT_MASK;

err:
	if (write_fault)
		walker->error_code |= PFERR_WRITE_MASK;
	if (user_fault)
		walker->error_code |= PFERR_USER_MASK;
190 191
	if (fetch_fault)
		walker->error_code |= PFERR_FETCH_MASK;
192 193
	if (walker->table)
		kunmap_atomic(walker->table, KM_USER0);
194
	return 0;
A
Avi Kivity 已提交
195 196
}

197 198 199 200 201 202
static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
					struct guest_walker *walker)
{
	mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
}

203 204 205
static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
				  u64 *shadow_pte,
				  gpa_t gaddr,
206
				  pt_element_t gpte,
207
				  u64 access_bits,
208
				  int user_fault,
209
				  int write_fault,
210 211
				  int *ptwrite,
				  struct guest_walker *walker,
212 213 214
				  gfn_t gfn)
{
	hpa_t paddr;
215
	int dirty = gpte & PT_DIRTY_MASK;
216 217
	u64 spte;
	int was_rmapped = is_rmap_pte(*shadow_pte);
218 219 220

	pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
		 " user_fault %d gfn %lx\n",
221
		 __FUNCTION__, *shadow_pte, (u64)gpte, access_bits,
222 223 224
		 write_fault, user_fault, gfn);

	if (write_fault && !dirty) {
225 226 227 228 229 230 231 232 233 234 235 236
		pt_element_t *guest_ent, *tmp = NULL;

		if (walker->ptep)
			guest_ent = walker->ptep;
		else {
			tmp = kmap_atomic(walker->page, KM_USER0);
			guest_ent = &tmp[walker->index];
		}

		*guest_ent |= PT_DIRTY_MASK;
		if (!walker->ptep)
			kunmap_atomic(tmp, KM_USER0);
237 238 239
		dirty = 1;
		FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
	}
240

241 242 243 244 245 246
	/*
	 * We don't set the accessed bit, since we sometimes want to see
	 * whether the guest actually used the pte (in order to detect
	 * demand paging).
	 */
	spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
247
	spte |= gpte & PT64_NX_MASK;
248 249 250 251 252
	if (!dirty)
		access_bits &= ~PT_WRITABLE_MASK;

	paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);

A
Avi Kivity 已提交
253
	spte |= PT_PRESENT_MASK;
254
	if (access_bits & PT_USER_MASK)
A
Avi Kivity 已提交
255
		spte |= PT_USER_MASK;
256 257

	if (is_error_hpa(paddr)) {
258 259
		set_shadow_pte(shadow_pte,
			       shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
260 261 262
		return;
	}

A
Avi Kivity 已提交
263
	spte |= paddr;
264

265 266
	if ((access_bits & PT_WRITABLE_MASK)
	    || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
267 268
		struct kvm_mmu_page *shadow;

A
Avi Kivity 已提交
269
		spte |= PT_WRITABLE_MASK;
270 271 272 273 274
		if (user_fault) {
			mmu_unshadow(vcpu, gfn);
			goto unshadowed;
		}

275 276 277 278 279
		shadow = kvm_mmu_lookup_page(vcpu, gfn);
		if (shadow) {
			pgprintk("%s: found shadow page for %lx, marking ro\n",
				 __FUNCTION__, gfn);
			access_bits &= ~PT_WRITABLE_MASK;
A
Avi Kivity 已提交
280 281
			if (is_writeble_pte(spte)) {
				spte &= ~PT_WRITABLE_MASK;
282
				kvm_x86_ops->tlb_flush(vcpu);
283
			}
284 285
			if (write_fault)
				*ptwrite = 1;
286 287 288
		}
	}

289 290
unshadowed:

291 292 293
	if (access_bits & PT_WRITABLE_MASK)
		mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);

294
	pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
295
	set_shadow_pte(shadow_pte, spte);
296
	page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
297
	if (!was_rmapped)
298 299
		rmap_add(vcpu, shadow_pte, (gaddr & PT64_BASE_ADDR_MASK)
			 >> PAGE_SHIFT);
300 301
	if (!ptwrite || !*ptwrite)
		vcpu->last_pte_updated = shadow_pte;
302 303
}

304
static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t gpte,
305
			   u64 *shadow_pte, u64 access_bits,
306 307
			   int user_fault, int write_fault, int *ptwrite,
			   struct guest_walker *walker, gfn_t gfn)
A
Avi Kivity 已提交
308
{
309 310
	access_bits &= gpte;
	FNAME(set_pte_common)(vcpu, shadow_pte, gpte & PT_BASE_ADDR_MASK,
311 312
			      gpte, access_bits, user_fault, write_fault,
			      ptwrite, walker, gfn);
A
Avi Kivity 已提交
313 314
}

315
static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
316 317
			      u64 *spte, const void *pte, int bytes,
			      int offset_in_pte)
318 319 320 321
{
	pt_element_t gpte;

	gpte = *(const pt_element_t *)pte;
322 323 324 325 326 327
	if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
		if (!offset_in_pte && !is_present_pte(gpte))
			set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
		return;
	}
	if (bytes < sizeof(pt_element_t))
328 329
		return;
	pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
330
	FNAME(set_pte)(vcpu, gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
331
		       0, NULL, NULL,
332 333 334
		       (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
}

335
static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t gpde,
336 337 338
			   u64 *shadow_pte, u64 access_bits,
			   int user_fault, int write_fault, int *ptwrite,
			   struct guest_walker *walker, gfn_t gfn)
A
Avi Kivity 已提交
339 340 341
{
	gpa_t gaddr;

342
	access_bits &= gpde;
343
	gaddr = (gpa_t)gfn << PAGE_SHIFT;
A
Avi Kivity 已提交
344
	if (PTTYPE == 32 && is_cpuid_PSE36())
345
		gaddr |= (gpde & PT32_DIR_PSE36_MASK) <<
A
Avi Kivity 已提交
346
			(32 - PT32_DIR_PSE36_SHIFT);
347
	FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
348 349
			      gpde, access_bits, user_fault, write_fault,
			      ptwrite, walker, gfn);
A
Avi Kivity 已提交
350 351 352 353 354 355
}

/*
 * Fetch a shadow pte for a specific level in the paging hierarchy.
 */
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
356 357
			 struct guest_walker *walker,
			 int user_fault, int write_fault, int *ptwrite)
A
Avi Kivity 已提交
358 359 360
{
	hpa_t shadow_addr;
	int level;
361
	u64 *shadow_ent;
A
Avi Kivity 已提交
362
	u64 *prev_shadow_ent = NULL;
363

364
	if (!is_present_pte(walker->pte))
365
		return NULL;
A
Avi Kivity 已提交
366 367 368

	shadow_addr = vcpu->mmu.root_hpa;
	level = vcpu->mmu.shadow_root_level;
369 370 371 372 373
	if (level == PT32E_ROOT_LEVEL) {
		shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
		shadow_addr &= PT64_BASE_ADDR_MASK;
		--level;
	}
A
Avi Kivity 已提交
374 375 376

	for (; ; level--) {
		u32 index = SHADOW_PT_INDEX(addr, level);
377
		struct kvm_mmu_page *shadow_page;
378
		u64 shadow_pte;
379 380
		int metaphysical;
		gfn_t table_gfn;
381
		unsigned hugepage_access = 0;
A
Avi Kivity 已提交
382

383
		shadow_ent = ((u64 *)__va(shadow_addr)) + index;
384
		if (is_shadow_present_pte(*shadow_ent)) {
A
Avi Kivity 已提交
385
			if (level == PT_PAGE_TABLE_LEVEL)
386
				break;
A
Avi Kivity 已提交
387 388 389 390 391
			shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
			prev_shadow_ent = shadow_ent;
			continue;
		}

392 393
		if (level == PT_PAGE_TABLE_LEVEL)
			break;
A
Avi Kivity 已提交
394

395 396 397
		if (level - 1 == PT_PAGE_TABLE_LEVEL
		    && walker->level == PT_DIRECTORY_LEVEL) {
			metaphysical = 1;
398
			hugepage_access = walker->pte;
399
			hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
400
			if (walker->pte & PT64_NX_MASK)
401
				hugepage_access |= (1 << 2);
402
			hugepage_access >>= PT_WRITABLE_SHIFT;
403
			table_gfn = (walker->pte & PT_BASE_ADDR_MASK)
404 405 406 407 408 409
				>> PAGE_SHIFT;
		} else {
			metaphysical = 0;
			table_gfn = walker->table_gfn[level - 2];
		}
		shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
410 411
					       metaphysical, hugepage_access,
					       shadow_ent);
412
		shadow_addr = __pa(shadow_page->spt);
413 414
		shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
			| PT_WRITABLE_MASK | PT_USER_MASK;
415
		*shadow_ent = shadow_pte;
A
Avi Kivity 已提交
416 417
		prev_shadow_ent = shadow_ent;
	}
418 419

	if (walker->level == PT_DIRECTORY_LEVEL) {
420
		FNAME(set_pde)(vcpu, walker->pte, shadow_ent,
421 422
			       walker->inherited_ar, user_fault, write_fault,
			       ptwrite, walker, walker->gfn);
423 424
	} else {
		ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
425
		FNAME(set_pte)(vcpu, walker->pte, shadow_ent,
426 427
			       walker->inherited_ar, user_fault, write_fault,
			       ptwrite, walker, walker->gfn);
428 429
	}
	return shadow_ent;
A
Avi Kivity 已提交
430 431 432 433 434 435 436 437 438 439 440 441 442
}

/*
 * Page fault handler.  There are several causes for a page fault:
 *   - there is no shadow pte for the guest pte
 *   - write access through a shadow pte marked read only so that we can set
 *     the dirty bit
 *   - write access to a shadow pte marked read only so we can update the page
 *     dirty bitmap, when userspace requests it
 *   - mmio access; in this case we will never install a present shadow pte
 *   - normal guest page fault due to the guest pte marked not present, not
 *     writable, or not executable
 *
443 444
 *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
 *           a negative value on error.
A
Avi Kivity 已提交
445 446 447 448 449 450
 */
static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
			       u32 error_code)
{
	int write_fault = error_code & PFERR_WRITE_MASK;
	int user_fault = error_code & PFERR_USER_MASK;
451
	int fetch_fault = error_code & PFERR_FETCH_MASK;
A
Avi Kivity 已提交
452 453
	struct guest_walker walker;
	u64 *shadow_pte;
454
	int write_pt = 0;
455
	int r;
A
Avi Kivity 已提交
456

457
	pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
458
	kvm_mmu_audit(vcpu, "pre page fault");
459

460 461 462
	r = mmu_topup_memory_caches(vcpu);
	if (r)
		return r;
463

A
Avi Kivity 已提交
464 465 466
	/*
	 * Look up the shadow pte for the faulting address.
	 */
467 468
	r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
			     fetch_fault);
A
Avi Kivity 已提交
469 470 471 472

	/*
	 * The page is not mapped by the guest.  Let the guest handle it.
	 */
473 474 475
	if (!r) {
		pgprintk("%s: guest page fault\n", __FUNCTION__);
		inject_page_fault(vcpu, addr, walker.error_code);
476
		vcpu->last_pt_write_count = 0; /* reset fork detector */
A
Avi Kivity 已提交
477 478 479
		return 0;
	}

480 481 482 483
	shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
				  &write_pt);
	pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
		 shadow_pte, *shadow_pte, write_pt);
484

485 486 487
	if (!write_pt)
		vcpu->last_pt_write_count = 0; /* reset fork detector */

A
Avi Kivity 已提交
488 489 490
	/*
	 * mmio: emulate if accessible, otherwise its a guest fault.
	 */
A
Avi Kivity 已提交
491
	if (is_io_pte(*shadow_pte))
492
		return 1;
A
Avi Kivity 已提交
493

A
Avi Kivity 已提交
494
	++vcpu->stat.pf_fixed;
495
	kvm_mmu_audit(vcpu, "post page fault (fixed)");
A
Avi Kivity 已提交
496

497
	return write_pt;
A
Avi Kivity 已提交
498 499 500 501 502
}

static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
{
	struct guest_walker walker;
A
Avi Kivity 已提交
503 504
	gpa_t gpa = UNMAPPED_GVA;
	int r;
A
Avi Kivity 已提交
505

A
Avi Kivity 已提交
506
	r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
A
Avi Kivity 已提交
507

A
Avi Kivity 已提交
508 509 510
	if (r) {
		gpa = (gpa_t)walker.gfn << PAGE_SHIFT;
		gpa |= vaddr & ~PAGE_MASK;
A
Avi Kivity 已提交
511 512 513 514 515
	}

	return gpa;
}

516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
				 struct kvm_mmu_page *sp)
{
	int i;
	pt_element_t *gpt;

	if (sp->role.metaphysical || PTTYPE == 32) {
		nonpaging_prefetch_page(vcpu, sp);
		return;
	}

	gpt = kmap_atomic(gfn_to_page(vcpu->kvm, sp->gfn), KM_USER0);
	for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
		if (is_present_pte(gpt[i]))
			sp->spt[i] = shadow_trap_nonpresent_pte;
		else
			sp->spt[i] = shadow_notrap_nonpresent_pte;
	kunmap_atomic(gpt, KM_USER0);
}

A
Avi Kivity 已提交
536 537 538 539 540 541 542 543
#undef pt_element_t
#undef guest_walker
#undef FNAME
#undef PT_BASE_ADDR_MASK
#undef PT_INDEX
#undef SHADOW_PT_INDEX
#undef PT_LEVEL_MASK
#undef PT_DIR_BASE_ADDR_MASK
544
#undef PT_LEVEL_BITS
545
#undef PT_MAX_FULL_LEVELS