pgtable.h 18.1 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_PGTABLE_H
#define _ASM_X86_PGTABLE_H
3 4 5

#define FIRST_USER_ADDRESS	0

J
Jiri Slaby 已提交
6 7 8 9 10 11 12
#define _PAGE_BIT_PRESENT	0	/* is present */
#define _PAGE_BIT_RW		1	/* writeable */
#define _PAGE_BIT_USER		2	/* userspace addressable */
#define _PAGE_BIT_PWT		3	/* page write through */
#define _PAGE_BIT_PCD		4	/* page cache disabled */
#define _PAGE_BIT_ACCESSED	5	/* was accessed (raised by CPU) */
#define _PAGE_BIT_DIRTY		6	/* was written to (raised by CPU) */
13
#define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page */
A
Andi Kleen 已提交
14
#define _PAGE_BIT_PAT		7	/* on 4KB pages */
15 16
#define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
#define _PAGE_BIT_UNUSED1	9	/* available for programmer */
17
#define _PAGE_BIT_IOMAP		10	/* flag used to indicate IO mapping */
18
#define _PAGE_BIT_UNUSED3	11
A
Andi Kleen 已提交
19
#define _PAGE_BIT_PAT_LARGE	12	/* On 2MB or 1GB pages */
N
Nick Piggin 已提交
20
#define _PAGE_BIT_SPECIAL	_PAGE_BIT_UNUSED1
21
#define _PAGE_BIT_CPA_TEST	_PAGE_BIT_UNUSED1
22 23
#define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */

J
Jan Beulich 已提交
24 25 26 27 28 29
/* If _PAGE_BIT_PRESENT is clear, we use these: */
/* - if the user mapped it with PROT_NONE; pte_present gives true */
#define _PAGE_BIT_PROTNONE	_PAGE_BIT_GLOBAL
/* - set: nonlinear file mapping, saved PTE; unset:swap */
#define _PAGE_BIT_FILE		_PAGE_BIT_DIRTY

30 31 32 33 34 35 36 37 38 39
#define _PAGE_PRESENT	(_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
#define _PAGE_RW	(_AT(pteval_t, 1) << _PAGE_BIT_RW)
#define _PAGE_USER	(_AT(pteval_t, 1) << _PAGE_BIT_USER)
#define _PAGE_PWT	(_AT(pteval_t, 1) << _PAGE_BIT_PWT)
#define _PAGE_PCD	(_AT(pteval_t, 1) << _PAGE_BIT_PCD)
#define _PAGE_ACCESSED	(_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
#define _PAGE_DIRTY	(_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
#define _PAGE_PSE	(_AT(pteval_t, 1) << _PAGE_BIT_PSE)
#define _PAGE_GLOBAL	(_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
#define _PAGE_UNUSED1	(_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
40
#define _PAGE_IOMAP	(_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
41 42 43
#define _PAGE_UNUSED3	(_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
#define _PAGE_PAT	(_AT(pteval_t, 1) << _PAGE_BIT_PAT)
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
N
Nick Piggin 已提交
44
#define _PAGE_SPECIAL	(_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
45
#define _PAGE_CPA_TEST	(_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
N
Nick Piggin 已提交
46
#define __HAVE_ARCH_PTE_SPECIAL
47 48

#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
49
#define _PAGE_NX	(_AT(pteval_t, 1) << _PAGE_BIT_NX)
50
#else
51
#define _PAGE_NX	(_AT(pteval_t, 0))
52 53
#endif

J
Jan Beulich 已提交
54 55
#define _PAGE_FILE	(_AT(pteval_t, 1) << _PAGE_BIT_FILE)
#define _PAGE_PROTNONE	(_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
56

57 58 59 60
#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |	\
			 _PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |	\
			 _PAGE_DIRTY)
61

62
/* Set of bits not changed in pte_modify */
63
#define _PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |		\
N
Nick Piggin 已提交
64
			 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
65

66 67 68 69 70 71
#define _PAGE_CACHE_MASK	(_PAGE_PCD | _PAGE_PWT)
#define _PAGE_CACHE_WB		(0)
#define _PAGE_CACHE_WC		(_PAGE_PWT)
#define _PAGE_CACHE_UC_MINUS	(_PAGE_PCD)
#define _PAGE_CACHE_UC		(_PAGE_PCD | _PAGE_PWT)

72
#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
73 74 75 76 77 78 79 80 81
#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
				 _PAGE_ACCESSED | _PAGE_NX)

#define PAGE_SHARED_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_RW |	\
					 _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY_NOEXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
					 _PAGE_ACCESSED | _PAGE_NX)
#define PAGE_COPY_EXEC		__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
					 _PAGE_ACCESSED)
82
#define PAGE_COPY		PAGE_COPY_NOEXEC
83 84 85 86
#define PAGE_READONLY		__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
					 _PAGE_ACCESSED | _PAGE_NX)
#define PAGE_READONLY_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
					 _PAGE_ACCESSED)
87 88

#define __PAGE_KERNEL_EXEC						\
89
	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
90 91 92 93
#define __PAGE_KERNEL		(__PAGE_KERNEL_EXEC | _PAGE_NX)

#define __PAGE_KERNEL_RO		(__PAGE_KERNEL & ~_PAGE_RW)
#define __PAGE_KERNEL_RX		(__PAGE_KERNEL_EXEC & ~_PAGE_RW)
I
Ingo Molnar 已提交
94
#define __PAGE_KERNEL_EXEC_NOCACHE	(__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
95
#define __PAGE_KERNEL_WC		(__PAGE_KERNEL | _PAGE_CACHE_WC)
96
#define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
97
#define __PAGE_KERNEL_UC_MINUS		(__PAGE_KERNEL | _PAGE_PCD)
98 99 100
#define __PAGE_KERNEL_VSYSCALL		(__PAGE_KERNEL_RX | _PAGE_USER)
#define __PAGE_KERNEL_VSYSCALL_NOCACHE	(__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
#define __PAGE_KERNEL_LARGE		(__PAGE_KERNEL | _PAGE_PSE)
101
#define __PAGE_KERNEL_LARGE_NOCACHE	(__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
102 103
#define __PAGE_KERNEL_LARGE_EXEC	(__PAGE_KERNEL_EXEC | _PAGE_PSE)

104 105 106 107 108
#define __PAGE_KERNEL_IO		(__PAGE_KERNEL | _PAGE_IOMAP)
#define __PAGE_KERNEL_IO_NOCACHE	(__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
#define __PAGE_KERNEL_IO_UC_MINUS	(__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
#define __PAGE_KERNEL_IO_WC		(__PAGE_KERNEL_WC | _PAGE_IOMAP)

109 110 111 112 113 114 115 116 117
#define PAGE_KERNEL			__pgprot(__PAGE_KERNEL)
#define PAGE_KERNEL_RO			__pgprot(__PAGE_KERNEL_RO)
#define PAGE_KERNEL_EXEC		__pgprot(__PAGE_KERNEL_EXEC)
#define PAGE_KERNEL_RX			__pgprot(__PAGE_KERNEL_RX)
#define PAGE_KERNEL_WC			__pgprot(__PAGE_KERNEL_WC)
#define PAGE_KERNEL_NOCACHE		__pgprot(__PAGE_KERNEL_NOCACHE)
#define PAGE_KERNEL_UC_MINUS		__pgprot(__PAGE_KERNEL_UC_MINUS)
#define PAGE_KERNEL_EXEC_NOCACHE	__pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
#define PAGE_KERNEL_LARGE		__pgprot(__PAGE_KERNEL_LARGE)
118
#define PAGE_KERNEL_LARGE_NOCACHE	__pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
119 120 121
#define PAGE_KERNEL_LARGE_EXEC		__pgprot(__PAGE_KERNEL_LARGE_EXEC)
#define PAGE_KERNEL_VSYSCALL		__pgprot(__PAGE_KERNEL_VSYSCALL)
#define PAGE_KERNEL_VSYSCALL_NOCACHE	__pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
122

123 124 125 126 127
#define PAGE_KERNEL_IO			__pgprot(__PAGE_KERNEL_IO)
#define PAGE_KERNEL_IO_NOCACHE		__pgprot(__PAGE_KERNEL_IO_NOCACHE)
#define PAGE_KERNEL_IO_UC_MINUS		__pgprot(__PAGE_KERNEL_IO_UC_MINUS)
#define PAGE_KERNEL_IO_WC		__pgprot(__PAGE_KERNEL_IO_WC)

128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
/*         xwr */
#define __P000	PAGE_NONE
#define __P001	PAGE_READONLY
#define __P010	PAGE_COPY
#define __P011	PAGE_COPY
#define __P100	PAGE_READONLY_EXEC
#define __P101	PAGE_READONLY_EXEC
#define __P110	PAGE_COPY_EXEC
#define __P111	PAGE_COPY_EXEC

#define __S000	PAGE_NONE
#define __S001	PAGE_READONLY
#define __S010	PAGE_SHARED
#define __S011	PAGE_SHARED
#define __S100	PAGE_READONLY_EXEC
#define __S101	PAGE_READONLY_EXEC
#define __S110	PAGE_SHARED_EXEC
#define __S111	PAGE_SHARED_EXEC

147 148 149 150 151 152
/*
 * early identity mapping  pte attrib macros.
 */
#ifdef CONFIG_X86_64
#define __PAGE_KERNEL_IDENT_LARGE_EXEC	__PAGE_KERNEL_LARGE_EXEC
#else
S
Suresh Siddha 已提交
153 154 155 156 157
/*
 * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
 * bits are combined, this will alow user to access the high address mapped
 * VDSO in the presence of CONFIG_COMPAT_VDSO
 */
158
#define PTE_IDENT_ATTR	 0x003		/* PRESENT+RW */
S
Suresh Siddha 已提交
159
#define PDE_IDENT_ATTR	 0x067		/* PRESENT+RW+USER+DIRTY+ACCESSED */
160 161 162
#define PGD_IDENT_ATTR	 0x001		/* PRESENT (no other attributes) */
#endif

163 164 165 166 167 168 169 170
/*
 * Macro to mark a page protection value as UC-
 */
#define pgprot_noncached(prot)					\
	((boot_cpu_data.x86 > 3)				\
	 ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS))	\
	 : (prot))

171
#ifndef __ASSEMBLY__
172

173 174 175
#define pgprot_writecombine	pgprot_writecombine
extern pgprot_t pgprot_writecombine(pgprot_t prot);

176 177 178 179
/*
 * ZERO_PAGE is a global shared page that is always zero: used
 * for zero-mapped memory areas etc..
 */
180
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
181 182
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))

183 184
extern spinlock_t pgd_lock;
extern struct list_head pgd_list;
185

186 187 188 189
/*
 * The following only work if pte_present() is true.
 * Undefined behaviour if not..
 */
190 191
static inline int pte_dirty(pte_t pte)
{
192
	return pte_flags(pte) & _PAGE_DIRTY;
193 194 195 196
}

static inline int pte_young(pte_t pte)
{
197
	return pte_flags(pte) & _PAGE_ACCESSED;
198 199 200 201
}

static inline int pte_write(pte_t pte)
{
202
	return pte_flags(pte) & _PAGE_RW;
203 204 205 206
}

static inline int pte_file(pte_t pte)
{
207
	return pte_flags(pte) & _PAGE_FILE;
208 209 210 211
}

static inline int pte_huge(pte_t pte)
{
212
	return pte_flags(pte) & _PAGE_PSE;
213 214
}

215 216
static inline int pte_global(pte_t pte)
{
217
	return pte_flags(pte) & _PAGE_GLOBAL;
218 219 220 221
}

static inline int pte_exec(pte_t pte)
{
222
	return !(pte_flags(pte) & _PAGE_NX);
223 224
}

N
Nick Piggin 已提交
225 226
static inline int pte_special(pte_t pte)
{
227
	return pte_flags(pte) & _PAGE_SPECIAL;
N
Nick Piggin 已提交
228 229
}

H
Hugh Dickins 已提交
230 231 232 233 234 235 236
static inline unsigned long pte_pfn(pte_t pte)
{
	return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
}

#define pte_page(pte)	pfn_to_page(pte_pfn(pte))

237 238 239 240 241 242 243 244
static inline int pmd_large(pmd_t pte)
{
	return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
		(_PAGE_PSE | _PAGE_PRESENT);
}

static inline pte_t pte_mkclean(pte_t pte)
{
245
	return __pte(pte_val(pte) & ~_PAGE_DIRTY);
246 247 248 249
}

static inline pte_t pte_mkold(pte_t pte)
{
250
	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
251 252 253 254
}

static inline pte_t pte_wrprotect(pte_t pte)
{
255
	return __pte(pte_val(pte) & ~_PAGE_RW);
256 257 258 259
}

static inline pte_t pte_mkexec(pte_t pte)
{
260
	return __pte(pte_val(pte) & ~_PAGE_NX);
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
}

static inline pte_t pte_mkdirty(pte_t pte)
{
	return __pte(pte_val(pte) | _PAGE_DIRTY);
}

static inline pte_t pte_mkyoung(pte_t pte)
{
	return __pte(pte_val(pte) | _PAGE_ACCESSED);
}

static inline pte_t pte_mkwrite(pte_t pte)
{
	return __pte(pte_val(pte) | _PAGE_RW);
}

static inline pte_t pte_mkhuge(pte_t pte)
{
	return __pte(pte_val(pte) | _PAGE_PSE);
}

static inline pte_t pte_clrhuge(pte_t pte)
{
285
	return __pte(pte_val(pte) & ~_PAGE_PSE);
286 287 288 289 290 291 292 293 294
}

static inline pte_t pte_mkglobal(pte_t pte)
{
	return __pte(pte_val(pte) | _PAGE_GLOBAL);
}

static inline pte_t pte_clrglobal(pte_t pte)
{
295
	return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
296
}
297

N
Nick Piggin 已提交
298 299
static inline pte_t pte_mkspecial(pte_t pte)
{
N
Nick Piggin 已提交
300
	return __pte(pte_val(pte) | _PAGE_SPECIAL);
N
Nick Piggin 已提交
301 302
}

303 304 305 306 307 308 309 310 311 312 313 314 315 316
extern pteval_t __supported_pte_mask;

static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
	return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
		      pgprot_val(pgprot)) & __supported_pte_mask);
}

static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{
	return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
		      pgprot_val(pgprot)) & __supported_pte_mask);
}

317 318 319 320 321 322 323 324
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
	pteval_t val = pte_val(pte);

	/*
	 * Chop off the NX bit (if present), and add the NX portion of
	 * the newprot (if present):
	 */
325 326
	val &= _PAGE_CHG_MASK;
	val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask;
327 328 329 330

	return __pte(val);
}

331 332 333 334 335 336 337 338 339
/* mprotect needs to preserve PAT bits when updating vm_page_prot */
#define pgprot_modify pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
	pgprotval_t addbits = pgprot_val(newprot);
	return __pgprot(preservebits | addbits);
}

J
Jeremy Fitzhardinge 已提交
340
#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
A
Andi Kleen 已提交
341

A
Andi Kleen 已提交
342 343
#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)

344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
static inline int is_new_memtype_allowed(unsigned long flags,
						unsigned long new_flags)
{
	/*
	 * Certain new memtypes are not allowed with certain
	 * requested memtype:
	 * - request is uncached, return cannot be write-back
	 * - request is write-combine, return cannot be write-back
	 */
	if ((flags == _PAGE_CACHE_UC_MINUS &&
	     new_flags == _PAGE_CACHE_WB) ||
	    (flags == _PAGE_CACHE_WC &&
	     new_flags == _PAGE_CACHE_WB)) {
		return 0;
	}

	return 1;
}

363
#ifndef __ASSEMBLY__
364
/* Indicate that x86 has its own track and untrack pfn vma functions */
365
#define __HAVE_PFNMAP_TRACKING
366

367 368 369 370 371 372 373 374
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
                              unsigned long size, pgprot_t vma_prot);
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
                              unsigned long size, pgprot_t *vma_prot);
#endif

375 376 377
/* Install a pte for a particular vaddr in kernel space. */
void set_pte_vaddr(unsigned long vaddr, pte_t pte);

378 379 380 381 382 383 384 385
#ifdef CONFIG_X86_32
extern void native_pagetable_setup_start(pgd_t *base);
extern void native_pagetable_setup_done(pgd_t *base);
#else
static inline void native_pagetable_setup_start(pgd_t *base) {}
static inline void native_pagetable_setup_done(pgd_t *base) {}
#endif

386 387
struct seq_file;
extern void arch_report_meminfo(struct seq_file *m);
388

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else  /* !CONFIG_PARAVIRT */
#define set_pte(ptep, pte)		native_set_pte(ptep, pte)
#define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)

#define set_pte_present(mm, addr, ptep, pte)				\
	native_set_pte_present(mm, addr, ptep, pte)
#define set_pte_atomic(ptep, pte)					\
	native_set_pte_atomic(ptep, pte)

#define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)

#ifndef __PAGETABLE_PUD_FOLDED
#define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
#define pgd_clear(pgd)			native_pgd_clear(pgd)
#endif

#ifndef set_pud
# define set_pud(pudp, pud)		native_set_pud(pudp, pud)
#endif

#ifndef __PAGETABLE_PMD_FOLDED
#define pud_clear(pud)			native_pud_clear(pud)
#endif

#define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
#define pmd_clear(pmd)			native_pmd_clear(pmd)

#define pte_update(mm, addr, ptep)              do { } while (0)
#define pte_update_defer(mm, addr, ptep)        do { } while (0)
420 421 422 423 424 425 426 427 428 429

static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
{
	native_pagetable_setup_start(base);
}

static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
{
	native_pagetable_setup_done(base);
}
430 431
#endif	/* CONFIG_PARAVIRT */

J
Jeremy Fitzhardinge 已提交
432 433 434 435 436
static inline int pte_none(pte_t pte)
{
	return !pte.pte;
}

437 438
#endif	/* __ASSEMBLY__ */

439 440 441 442 443
#ifdef CONFIG_X86_32
# include "pgtable_32.h"
#else
# include "pgtable_64.h"
#endif
444

J
Jeremy Fitzhardinge 已提交
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
/*
 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
 *
 * this macro returns the index of the entry in the pgd page which would
 * control the given virtual address
 */
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))

/*
 * pgd_offset() returns a (pgd_t *)
 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
 */
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
/*
 * a shortcut which implies the use of the kernel's pgd, instead
 * of a process's
 */
#define pgd_offset_k(address) pgd_offset(&init_mm, (address))


J
Jeremy Fitzhardinge 已提交
465 466 467
#define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)

468 469
#ifndef __ASSEMBLY__

T
Thomas Gleixner 已提交
470 471 472 473
enum {
	PG_LEVEL_NONE,
	PG_LEVEL_4K,
	PG_LEVEL_2M,
I
Ingo Molnar 已提交
474
	PG_LEVEL_1G,
475
	PG_LEVEL_NUM
T
Thomas Gleixner 已提交
476 477
};

478 479 480 481 482
#ifdef CONFIG_PROC_FS
extern void update_page_count(int level, unsigned long pages);
#else
static inline void update_page_count(int level, unsigned long pages) { }
#endif
483

484 485 486 487 488 489
/*
 * Helper function that returns the kernel pagetable entry controlling
 * the virtual address 'address'. NULL means no pagetable entry present.
 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
 * as a pte too.
 */
490
extern pte_t *lookup_address(unsigned long address, unsigned int *level);
491

492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
/* local pte updates need not use xchg for locking */
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
{
	pte_t res = *ptep;

	/* Pure native function needs no input for mm, addr */
	native_pte_clear(NULL, 0, ptep);
	return res;
}

static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
				     pte_t *ptep , pte_t pte)
{
	native_set_pte(ptep, pte);
}

508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
#ifndef CONFIG_PARAVIRT
/*
 * Rules for using pte_update - it must be called after any PTE update which
 * has not been done using the set_pte / clear_pte interfaces.  It is used by
 * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
 * updates should either be sets, clears, or set_pte_atomic for P->P
 * transitions, which means this hook should only be called for user PTEs.
 * This hook implies a P->P protection or access change has taken place, which
 * requires a subsequent TLB flush.  The notification can optionally be delayed
 * until the TLB flush event by using the pte_update_defer form of the
 * interface, but care must be taken to assure that the flush happens while
 * still holding the same page table lock so that the shadow and primary pages
 * do not become out of sync on SMP.
 */
#define pte_update(mm, addr, ptep)		do { } while (0)
#define pte_update_defer(mm, addr, ptep)	do { } while (0)
#endif

/*
 * We only update the dirty/accessed state if we set
 * the dirty bit by hand in the kernel, since the hardware
 * will do the accessed bit for us, and we don't want to
 * race with other CPU's that might be updating the dirty
 * bit at the same time.
 */
533 534
struct vm_area_struct;

535
#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
536 537 538
extern int ptep_set_access_flags(struct vm_area_struct *vma,
				 unsigned long address, pte_t *ptep,
				 pte_t entry, int dirty);
539 540

#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
541 542
extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
				     unsigned long addr, pte_t *ptep);
543 544

#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
545 546
extern int ptep_clear_flush_young(struct vm_area_struct *vma,
				  unsigned long address, pte_t *ptep);
547 548

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
549 550
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
				       pte_t *ptep)
551 552 553 554 555 556 557
{
	pte_t pte = native_ptep_get_and_clear(ptep);
	pte_update(mm, addr, ptep);
	return pte;
}

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
558 559 560
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
					    unsigned long addr, pte_t *ptep,
					    int full)
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
{
	pte_t pte;
	if (full) {
		/*
		 * Full address destruction in progress; paravirt does not
		 * care about updates and native needs no locking
		 */
		pte = native_local_ptep_get_and_clear(ptep);
	} else {
		pte = ptep_get_and_clear(mm, addr, ptep);
	}
	return pte;
}

#define __HAVE_ARCH_PTEP_SET_WRPROTECT
576 577
static inline void ptep_set_wrprotect(struct mm_struct *mm,
				      unsigned long addr, pte_t *ptep)
578
{
J
Jeremy Fitzhardinge 已提交
579
	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
580 581 582
	pte_update(mm, addr, ptep);
}

J
Jeremy Fitzhardinge 已提交
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
/*
 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
 *
 *  dst - pointer to pgd range anwhere on a pgd page
 *  src - ""
 *  count - the number of pgds to copy.
 *
 * dst and src can be on the same page, but the range must not overlap,
 * and must not cross a page boundary.
 */
static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
{
       memcpy(dst, src, count * sizeof(pgd_t));
}


599 600 601
#include <asm-generic/pgtable.h>
#endif	/* __ASSEMBLY__ */

H
H. Peter Anvin 已提交
602
#endif /* _ASM_X86_PGTABLE_H */