pgtable.h 16.7 KB
Newer Older
1 2
#ifndef _ASM_POWERPC_PGTABLE_H
#define _ASM_POWERPC_PGTABLE_H
3
#ifdef __KERNEL__
4 5 6 7

#ifndef CONFIG_PPC64
#include <asm-ppc/pgtable.h>
#else
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19

/*
 * This file contains the functions and defines necessary to modify and use
 * the ppc64 hashed page table.
 */

#ifndef __ASSEMBLY__
#include <linux/stddef.h>
#include <asm/processor.h>		/* For TASK_SIZE */
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
20
struct mm_struct;
L
Linus Torvalds 已提交
21 22
#endif /* __ASSEMBLY__ */

23 24 25 26 27
#ifdef CONFIG_PPC_64K_PAGES
#include <asm/pgtable-64k.h>
#else
#include <asm/pgtable-4k.h>
#endif
28 29

#define FIRST_USER_ADDRESS	0
L
Linus Torvalds 已提交
30 31 32 33

/*
 * Size of EA range mapped by our pagetables.
 */
34 35 36 37 38 39 40 41 42 43 44
#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
                	    PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
#define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE)

#if TASK_SIZE_USER64 > PGTABLE_RANGE
#error TASK_SIZE_USER64 exceeds pagetable range
#endif

#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
#error TASK_SIZE_USER64 exceeds user VSID range
#endif
L
Linus Torvalds 已提交
45 46 47 48 49

/*
 * Define the address range of the vmalloc VM area.
 */
#define VMALLOC_START (0xD000000000000000ul)
50
#define VMALLOC_SIZE  (0x80000000000UL)
D
David Gibson 已提交
51
#define VMALLOC_END   (VMALLOC_START + VMALLOC_SIZE)
L
Linus Torvalds 已提交
52

D
David Gibson 已提交
53 54 55 56 57 58 59
/*
 * Define the address range of the imalloc VM area.
 */
#define PHBS_IO_BASE	VMALLOC_END
#define IMALLOC_BASE	(PHBS_IO_BASE + 0x80000000ul)	/* Reserve 2 gigs for PHBs */
#define IMALLOC_END	(VMALLOC_START + PGTABLE_RANGE)

60 61 62 63 64 65 66 67 68 69 70
/*
 * Region IDs
 */
#define REGION_SHIFT		60UL
#define REGION_MASK		(0xfUL << REGION_SHIFT)
#define REGION_ID(ea)		(((unsigned long)(ea)) >> REGION_SHIFT)

#define VMALLOC_REGION_ID	(REGION_ID(VMALLOC_START))
#define KERNEL_REGION_ID	(REGION_ID(PAGE_OFFSET))
#define USER_REGION_ID		(0UL)

L
Linus Torvalds 已提交
71
/*
72 73 74
 * Common bits in a linux-style PTE.  These match the bits in the
 * (hardware-defined) PowerPC PTE as closely as possible. Additional
 * bits may be defined in pgtable-*.h
L
Linus Torvalds 已提交
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
 */
#define _PAGE_PRESENT	0x0001 /* software: pte contains a translation */
#define _PAGE_USER	0x0002 /* matches one of the PP bits */
#define _PAGE_FILE	0x0002 /* (!present only) software: pte holds file offset */
#define _PAGE_EXEC	0x0004 /* No execute on POWER4 and newer (we invert) */
#define _PAGE_GUARDED	0x0008
#define _PAGE_COHERENT	0x0010 /* M: enforce memory coherence (SMP systems) */
#define _PAGE_NO_CACHE	0x0020 /* I: cache inhibit */
#define _PAGE_WRITETHRU	0x0040 /* W: cache write-through */
#define _PAGE_DIRTY	0x0080 /* C: page changed */
#define _PAGE_ACCESSED	0x0100 /* R: page referenced */
#define _PAGE_RW	0x0200 /* software: user write access allowed */
#define _PAGE_HASHPTE	0x0400 /* software: pte has an associated HPTE */
#define _PAGE_BUSY	0x0800 /* software: PTE & hash are busy */ 

#define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)

#define _PAGE_WRENABLE	(_PAGE_RW | _PAGE_DIRTY)

94
/* __pgprot defined in asm-powerpc/page.h */
L
Linus Torvalds 已提交
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)

#define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC)
#define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_WRENABLE)
#define PAGE_KERNEL_CI	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
			       _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC)

#define PAGE_AGP	__pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
#define HAVE_PAGE_AGP

111 112 113 114
/* PTEIDX nibble */
#define _PTEIDX_SECONDARY	0x8
#define _PTEIDX_GROUP_IX	0x7

L
Linus Torvalds 已提交
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169

/*
 * POWER4 and newer have per page execute protection, older chips can only
 * do this on a segment (256MB) basis.
 *
 * Also, write permissions imply read permissions.
 * This is the closest we can get..
 *
 * Note due to the way vm flags are laid out, the bits are XWR
 */
#define __P000	PAGE_NONE
#define __P001	PAGE_READONLY
#define __P010	PAGE_COPY
#define __P011	PAGE_COPY
#define __P100	PAGE_READONLY_X
#define __P101	PAGE_READONLY_X
#define __P110	PAGE_COPY_X
#define __P111	PAGE_COPY_X

#define __S000	PAGE_NONE
#define __S001	PAGE_READONLY
#define __S010	PAGE_SHARED
#define __S011	PAGE_SHARED
#define __S100	PAGE_READONLY_X
#define __S101	PAGE_READONLY_X
#define __S110	PAGE_SHARED_X
#define __S111	PAGE_SHARED_X

#ifndef __ASSEMBLY__

/*
 * ZERO_PAGE is a global shared page that is always zero: used
 * for zero-mapped memory areas etc..
 */
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* __ASSEMBLY__ */

#ifdef CONFIG_HUGETLB_PAGE

#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN

#endif

#ifndef __ASSEMBLY__

/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 *
 * mk_pte takes a (struct page *) as input
 */
#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))

170 171 172 173 174
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
{
	pte_t pte;


175
	pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot);
176 177
	return pte;
}
L
Linus Torvalds 已提交
178 179 180 181 182 183 184 185 186

#define pte_modify(_pte, newprot) \
  (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))

#define pte_none(pte)		((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0)
#define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)

/* pte_clear moved to later in this file */

187
#define pte_pfn(x)		((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT)))
L
Linus Torvalds 已提交
188 189
#define pte_page(x)		pfn_to_page(pte_pfn(x))

190 191 192
#define PMD_BAD_BITS		(PTE_TABLE_SIZE-1)
#define PUD_BAD_BITS		(PMD_TABLE_SIZE-1)

193
#define pmd_set(pmdp, pmdval) 	(pmd_val(*(pmdp)) = (pmdval))
L
Linus Torvalds 已提交
194
#define pmd_none(pmd)		(!pmd_val(pmd))
195 196
#define	pmd_bad(pmd)		(!is_kernel_addr(pmd_val(pmd)) \
				 || (pmd_val(pmd) & PMD_BAD_BITS))
L
Linus Torvalds 已提交
197 198
#define	pmd_present(pmd)	(pmd_val(pmd) != 0)
#define	pmd_clear(pmdp)		(pmd_val(*(pmdp)) = 0)
199
#define pmd_page_kernel(pmd)	(pmd_val(pmd) & ~PMD_MASKED_BITS)
L
Linus Torvalds 已提交
200
#define pmd_page(pmd)		virt_to_page(pmd_page_kernel(pmd))
201

202
#define pud_set(pudp, pudval)	(pud_val(*(pudp)) = (pudval))
203
#define pud_none(pud)		(!pud_val(pud))
204 205
#define	pud_bad(pud)		(!is_kernel_addr(pud_val(pud)) \
				 || (pud_val(pud) & PUD_BAD_BITS))
206 207
#define pud_present(pud)	(pud_val(pud) != 0)
#define pud_clear(pudp)		(pud_val(*(pudp)) = 0)
208
#define pud_page(pud)		(pud_val(pud) & ~PUD_MASKED_BITS)
209 210

#define pgd_set(pgdp, pudp)	({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
L
Linus Torvalds 已提交
211 212 213 214 215 216

/* 
 * Find an entry in a page-table-directory.  We combine the address region 
 * (the high order N bits) and the pgd portion of the address.
 */
/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
217
#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
L
Linus Torvalds 已提交
218 219 220

#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))

221
#define pmd_offset(pudp,addr) \
222
  (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
L
Linus Torvalds 已提交
223 224

#define pte_offset_kernel(dir,addr) \
225
  (((pte_t *) pmd_page_kernel(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
L
Linus Torvalds 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270

#define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
#define pte_offset_map_nested(dir,addr)	pte_offset_kernel((dir), (addr))
#define pte_unmap(pte)			do { } while(0)
#define pte_unmap_nested(pte)		do { } while(0)

/* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)

/*
 * The following only work if pte_present() is true.
 * Undefined behaviour if not..
 */
static inline int pte_read(pte_t pte)  { return pte_val(pte) & _PAGE_USER;}
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
static inline int pte_exec(pte_t pte)  { return pte_val(pte) & _PAGE_EXEC;}
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}

static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
static inline void pte_cache(pte_t pte)   { pte_val(pte) &= ~_PAGE_NO_CACHE; }

static inline pte_t pte_rdprotect(pte_t pte) {
	pte_val(pte) &= ~_PAGE_USER; return pte; }
static inline pte_t pte_exprotect(pte_t pte) {
	pte_val(pte) &= ~_PAGE_EXEC; return pte; }
static inline pte_t pte_wrprotect(pte_t pte) {
	pte_val(pte) &= ~(_PAGE_RW); return pte; }
static inline pte_t pte_mkclean(pte_t pte) {
	pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
static inline pte_t pte_mkold(pte_t pte) {
	pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkread(pte_t pte) {
	pte_val(pte) |= _PAGE_USER; return pte; }
static inline pte_t pte_mkexec(pte_t pte) {
	pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) {
	pte_val(pte) |= _PAGE_RW; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) {
	pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) {
	pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkhuge(pte_t pte) {
271
	return pte; }
L
Linus Torvalds 已提交
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293

/* Atomic PTE updates */
static inline unsigned long pte_update(pte_t *p, unsigned long clr)
{
	unsigned long old, tmp;

	__asm__ __volatile__(
	"1:	ldarx	%0,0,%3		# pte_update\n\
	andi.	%1,%0,%6\n\
	bne-	1b \n\
	andc	%1,%0,%4 \n\
	stdcx.	%1,0,%3 \n\
	bne-	1b"
	: "=&r" (old), "=&r" (tmp), "=m" (*p)
	: "r" (p), "r" (clr), "m" (*p), "i" (_PAGE_BUSY)
	: "cc" );
	return old;
}

/* PTE updating functions, this function puts the PTE in the
 * batch, doesn't actually triggers the hash flush immediately,
 * you need to call flush_tlb_pending() to do that.
294
 * Pass -1 for "normal" size (4K or 64K)
L
Linus Torvalds 已提交
295
 */
296 297
extern void hpte_update(struct mm_struct *mm, unsigned long addr,
			pte_t *ptep, unsigned long pte, int huge);
L
Linus Torvalds 已提交
298

299 300
static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
					      unsigned long addr, pte_t *ptep)
L
Linus Torvalds 已提交
301 302 303 304 305 306 307
{
	unsigned long old;

       	if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
		return 0;
	old = pte_update(ptep, _PAGE_ACCESSED);
	if (old & _PAGE_HASHPTE) {
308
		hpte_update(mm, addr, ptep, old, 0);
L
Linus Torvalds 已提交
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
		flush_tlb_pending();
	}
	return (old & _PAGE_ACCESSED) != 0;
}
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define ptep_test_and_clear_young(__vma, __addr, __ptep)		   \
({									   \
	int __r;							   \
	__r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
	__r;								   \
})

/*
 * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the
 * moment we always flush but we need to fix hpte_update and test if the
 * optimisation is worth it.
 */
326 327
static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm,
					      unsigned long addr, pte_t *ptep)
L
Linus Torvalds 已提交
328 329 330 331 332 333 334
{
	unsigned long old;

       	if ((pte_val(*ptep) & _PAGE_DIRTY) == 0)
		return 0;
	old = pte_update(ptep, _PAGE_DIRTY);
	if (old & _PAGE_HASHPTE)
335
		hpte_update(mm, addr, ptep, old, 0);
L
Linus Torvalds 已提交
336 337 338 339 340 341 342 343 344 345 346
	return (old & _PAGE_DIRTY) != 0;
}
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define ptep_test_and_clear_dirty(__vma, __addr, __ptep)		   \
({									   \
	int __r;							   \
	__r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \
	__r;								   \
})

#define __HAVE_ARCH_PTEP_SET_WRPROTECT
347 348
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
				      pte_t *ptep)
L
Linus Torvalds 已提交
349 350 351 352 353 354 355
{
	unsigned long old;

       	if ((pte_val(*ptep) & _PAGE_RW) == 0)
       		return;
	old = pte_update(ptep, _PAGE_RW);
	if (old & _PAGE_HASHPTE)
356
		hpte_update(mm, addr, ptep, old, 0);
L
Linus Torvalds 已提交
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
}

/*
 * We currently remove entries from the hashtable regardless of whether
 * the entry was young or dirty. The generic routines only flush if the
 * entry was young or dirty which is not good enough.
 *
 * We should be more intelligent about this but for the moment we override
 * these functions and force a tlb flush unconditionally
 */
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define ptep_clear_flush_young(__vma, __address, __ptep)		\
({									\
	int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
						  __ptep);		\
	__young;							\
})

#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
#define ptep_clear_flush_dirty(__vma, __address, __ptep)		\
({									\
	int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \
						  __ptep); 		\
	flush_tlb_page(__vma, __address);				\
	__dirty;							\
})

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
385 386
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
				       unsigned long addr, pte_t *ptep)
L
Linus Torvalds 已提交
387 388 389 390
{
	unsigned long old = pte_update(ptep, ~0UL);

	if (old & _PAGE_HASHPTE)
391
		hpte_update(mm, addr, ptep, old, 0);
L
Linus Torvalds 已提交
392 393 394
	return __pte(old);
}

395 396
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
			     pte_t * ptep)
L
Linus Torvalds 已提交
397 398 399 400
{
	unsigned long old = pte_update(ptep, ~0UL);

	if (old & _PAGE_HASHPTE)
401
		hpte_update(mm, addr, ptep, old, 0);
L
Linus Torvalds 已提交
402 403 404 405 406 407 408 409 410 411 412 413
}

/*
 * set_pte stores a linux PTE into the linux page table.
 */
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep, pte_t pte)
{
	if (pte_present(*ptep)) {
		pte_clear(mm, addr, ptep);
		flush_tlb_pending();
	}
414 415 416 417 418 419 420 421
	pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);

#ifdef CONFIG_PPC_64K_PAGES
	if (mmu_virtual_psize != MMU_PAGE_64K)
		pte = __pte(pte_val(pte) | _PAGE_COMBO);
#endif /* CONFIG_PPC_64K_PAGES */

	*ptep = pte;
L
Linus Torvalds 已提交
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
}

/* Set the dirty and/or accessed bits atomically in a linux PTE, this
 * function doesn't need to flush the hash entry
 */
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
{
	unsigned long bits = pte_val(entry) &
		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
	unsigned long old, tmp;

	__asm__ __volatile__(
	"1:	ldarx	%0,0,%4\n\
		andi.	%1,%0,%6\n\
		bne-	1b \n\
		or	%0,%3,%0\n\
		stdcx.	%0,0,%4\n\
		bne-	1b"
	:"=&r" (old), "=&r" (tmp), "=m" (*ptep)
	:"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
	:"cc");
}
#define  ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
	do {								   \
		__ptep_set_access_flags(__ptep, __entry, __dirty);	   \
		flush_tlb_page_nohash(__vma, __address);	       	   \
	} while(0)

/*
 * Macro to mark a page protection value as "uncacheable".
 */
#define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))

struct file;
457
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
L
Linus Torvalds 已提交
458 459 460 461 462 463
				     unsigned long size, pgprot_t vma_prot);
#define __HAVE_PHYS_MEM_ACCESS_PROT

#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)

464 465
#define pte_ERROR(e) \
	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
L
Linus Torvalds 已提交
466
#define pmd_ERROR(e) \
467
	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
L
Linus Torvalds 已提交
468
#define pgd_ERROR(e) \
469
	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
L
Linus Torvalds 已提交
470

471
extern pgd_t swapper_pg_dir[];
L
Linus Torvalds 已提交
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487

extern void paging_init(void);

/*
 * This gets called at the end of handling a page fault, when
 * the kernel has put a new PTE into the page table for the process.
 * We use it to put a corresponding HPTE into the hash table
 * ahead of time, instead of waiting for the inevitable extra
 * hash-table miss exception.
 */
struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);

/* Encode and de-code a swap entry */
#define __swp_type(entry)	(((entry).val >> 1) & 0x3f)
#define __swp_offset(entry)	((entry).val >> 8)
488 489 490 491 492 493
#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
#define __pte_to_swp_entry(pte)	((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
#define __swp_entry_to_pte(x)	((pte_t) { (x).val << PTE_RPN_SHIFT })
#define pte_to_pgoff(pte)	(pte_val(pte) >> PTE_RPN_SHIFT)
#define pgoff_to_pte(off)	((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
#define PTE_FILE_MAX_BITS	(BITS_PER_LONG - PTE_RPN_SHIFT)
L
Linus Torvalds 已提交
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510

/*
 * kern_addr_valid is intended to indicate whether an address is a valid
 * kernel address.  Most 32-bit archs define it as always true (like this)
 * but most 64-bit archs actually perform a test.  What should we do here?
 * The only use is in fs/ncpfs/dir.c
 */
#define kern_addr_valid(addr)	(1)

#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
		remap_pfn_range(vma, vaddr, pfn, size, prot)

void pgtable_cache_init(void);

/*
 * find_linux_pte returns the address of a linux pte for a given 
 * effective address and directory.  If not found, it returns zero.
511
 */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
L
Linus Torvalds 已提交
512 513
{
	pgd_t *pg;
514
	pud_t *pu;
L
Linus Torvalds 已提交
515 516 517 518 519
	pmd_t *pm;
	pte_t *pt = NULL;

	pg = pgdir + pgd_index(ea);
	if (!pgd_none(*pg)) {
520 521 522
		pu = pud_offset(pg, ea);
		if (!pud_none(*pu)) {
			pm = pmd_offset(pu, ea);
523
			if (pmd_present(*pm))
524
				pt = pte_offset_kernel(pm, ea);
L
Linus Torvalds 已提交
525 526 527 528 529 530 531 532 533
		}
	}
	return pt;
}

#include <asm-generic/pgtable.h>

#endif /* __ASSEMBLY__ */

534
#endif /* CONFIG_PPC64 */
535
#endif /* __KERNEL__ */
536
#endif /* _ASM_POWERPC_PGTABLE_H */