pgtable-ppc64.h 10.8 KB
Newer Older
1 2 3 4 5 6 7 8
#ifndef _ASM_POWERPC_PGTABLE_PPC64_H_
#define _ASM_POWERPC_PGTABLE_PPC64_H_
/*
 * This file contains the functions and defines necessary to modify and use
 * the ppc64 hashed page table.
 */

#ifdef CONFIG_PPC_64K_PAGES
9
#include <asm/pgtable-ppc64-64k.h>
10
#else
11
#include <asm/pgtable-ppc64-4k.h>
12 13 14 15 16 17 18 19 20
#endif

#define FIRST_USER_ADDRESS	0

/*
 * Size of EA range mapped by our pagetables.
 */
#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
                	    PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
21
#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
22

23

24
/*
25
 * Define the address range of the kernel non-linear virtual area
26
 */
27 28 29 30 31 32

#ifdef CONFIG_PPC_BOOK3E
#define KERN_VIRT_START ASM_CONST(0x8000000000000000)
#else
#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
#endif
33
#define KERN_VIRT_SIZE	ASM_CONST(0x0000100000000000)
34 35

/*
36 37
 * The vmalloc space starts at the beginning of that region, and
 * occupies half of it on hash CPUs and a quarter of it on Book3E
38
 * (we keep a quarter for the virtual memmap)
39 40 41 42 43 44 45 46 47 48 49 50 51
 */
#define VMALLOC_START	KERN_VIRT_START
#ifdef CONFIG_PPC_BOOK3E
#define VMALLOC_SIZE	(KERN_VIRT_SIZE >> 2)
#else
#define VMALLOC_SIZE	(KERN_VIRT_SIZE >> 1)
#endif
#define VMALLOC_END	(VMALLOC_START + VMALLOC_SIZE)

/*
 * The second half of the kernel virtual space is used for IO mappings,
 * it's itself carved into the PIO region (ISA and PHB IO space) and
 * the ioremap space
52
 *
53
 *  ISA_IO_BASE = KERN_IO_START, 64K reserved area
54 55
 *  PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
56
 */
57
#define KERN_IO_START	(KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
58
#define FULL_IO_SIZE	0x80000000ul
59 60
#define  ISA_IO_BASE	(KERN_IO_START)
#define  ISA_IO_END	(KERN_IO_START + 0x10000ul)
61
#define  PHB_IO_BASE	(ISA_IO_END)
62
#define  PHB_IO_END	(KERN_IO_START + FULL_IO_SIZE)
63
#define IOREMAP_BASE	(PHB_IO_END)
64 65
#define IOREMAP_END	(KERN_VIRT_START + KERN_VIRT_SIZE)

66 67 68 69 70 71 72 73 74 75

/*
 * Region IDs
 */
#define REGION_SHIFT		60UL
#define REGION_MASK		(0xfUL << REGION_SHIFT)
#define REGION_ID(ea)		(((unsigned long)(ea)) >> REGION_SHIFT)

#define VMALLOC_REGION_ID	(REGION_ID(VMALLOC_START))
#define KERNEL_REGION_ID	(REGION_ID(PAGE_OFFSET))
76
#define VMEMMAP_REGION_ID	(0xfUL)	/* Server only */
77 78
#define USER_REGION_ID		(0UL)

A
Andy Whitcroft 已提交
79
/*
80 81
 * Defines the address of the vmemap area, in its own region on
 * hash table CPUs and after the vmalloc space on Book3E
A
Andy Whitcroft 已提交
82
 */
83 84 85 86
#ifdef CONFIG_PPC_BOOK3E
#define VMEMMAP_BASE		VMALLOC_END
#define VMEMMAP_END		KERN_IO_START
#else
87
#define VMEMMAP_BASE		(VMEMMAP_REGION_ID << REGION_SHIFT)
88
#endif
89 90
#define vmemmap			((struct page *)VMEMMAP_BASE)

A
Andy Whitcroft 已提交
91

92
/*
93
 * Include the PTE bits definitions
94
 */
95
#ifdef CONFIG_PPC_BOOK3S
96
#include <asm/pte-hash64.h>
97 98 99
#else
#include <asm/pte-book3e.h>
#endif
100
#include <asm/pte-common.h>
101

102
#ifdef CONFIG_PPC_MM_SLICES
103 104
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
105
#endif /* CONFIG_PPC_MM_SLICES */
106 107 108

#ifndef __ASSEMBLY__

109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
/*
 * This is the default implementation of various PTE accessors, it's
 * used in all cases except Book3S with 64K pages where we have a
 * concept of sub-pages
 */
#ifndef __real_pte

#ifdef STRICT_MM_TYPECHECKS
#define __real_pte(e,p)		((real_pte_t){(e)})
#define __rpte_to_pte(r)	((r).pte)
#else
#define __real_pte(e,p)		(e)
#define __rpte_to_pte(r)	(__pte(r))
#endif
#define __rpte_to_hidx(r,index)	(pte_val(__rpte_to_pte(r)) >> 12)

#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift)       \
	do {							         \
		index = 0;					         \
		shift = mmu_psize_defs[psize].shift;		         \

#define pte_iterate_hashed_end() } while(0)

#ifdef CONFIG_PPC_HAS_HASH_64K
#define pte_pagesize_index(mm, addr, pte)	get_slice_psize(mm, addr)
#else
#define pte_pagesize_index(mm, addr, pte)	MMU_PAGE_4K
#endif

#endif /* __real_pte */


141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
/* pte_clear moved to later in this file */

#define PMD_BAD_BITS		(PTE_TABLE_SIZE-1)
#define PUD_BAD_BITS		(PMD_TABLE_SIZE-1)

#define pmd_set(pmdp, pmdval) 	(pmd_val(*(pmdp)) = (pmdval))
#define pmd_none(pmd)		(!pmd_val(pmd))
#define	pmd_bad(pmd)		(!is_kernel_addr(pmd_val(pmd)) \
				 || (pmd_val(pmd) & PMD_BAD_BITS))
#define	pmd_present(pmd)	(pmd_val(pmd) != 0)
#define	pmd_clear(pmdp)		(pmd_val(*(pmdp)) = 0)
#define pmd_page_vaddr(pmd)	(pmd_val(pmd) & ~PMD_MASKED_BITS)
#define pmd_page(pmd)		virt_to_page(pmd_page_vaddr(pmd))

#define pud_set(pudp, pudval)	(pud_val(*(pudp)) = (pudval))
#define pud_none(pud)		(!pud_val(pud))
#define	pud_bad(pud)		(!is_kernel_addr(pud_val(pud)) \
				 || (pud_val(pud) & PUD_BAD_BITS))
#define pud_present(pud)	(pud_val(pud) != 0)
#define pud_clear(pudp)		(pud_val(*(pudp)) = 0)
#define pud_page_vaddr(pud)	(pud_val(pud) & ~PUD_MASKED_BITS)
#define pud_page(pud)		virt_to_page(pud_page_vaddr(pud))

#define pgd_set(pgdp, pudp)	({pgd_val(*(pgdp)) = (unsigned long)(pudp);})

/*
 * Find an entry in a page-table-directory.  We combine the address region
 * (the high order N bits) and the pgd portion of the address.
 */
/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)

#define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))

#define pmd_offset(pudp,addr) \
  (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))

#define pte_offset_kernel(dir,addr) \
  (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))

#define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
#define pte_unmap(pte)			do { } while(0)

/* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
187 188
extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
			    pte_t *ptep, unsigned long pte, int huge);
189 190 191 192 193 194 195

/* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm,
				       unsigned long addr,
				       pte_t *ptep, unsigned long clr,
				       int huge)
{
196
#ifdef PTE_ATOMIC_UPDATES
197 198 199 200 201 202 203 204 205 206 207 208
	unsigned long old, tmp;

	__asm__ __volatile__(
	"1:	ldarx	%0,0,%3		# pte_update\n\
	andi.	%1,%0,%6\n\
	bne-	1b \n\
	andc	%1,%0,%4 \n\
	stdcx.	%1,0,%3 \n\
	bne-	1b"
	: "=&r" (old), "=&r" (tmp), "=m" (*ptep)
	: "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
	: "cc" );
209 210 211 212
#else
	unsigned long old = pte_val(*ptep);
	*ptep = __pte(old & ~clr);
#endif
213 214 215 216
	/* huge pages use the old page table lock */
	if (!huge)
		assert_pte_locked(mm, addr);

217
#ifdef CONFIG_PPC_STD_MMU_64
218 219
	if (old & _PAGE_HASHPTE)
		hpte_need_flush(mm, addr, ptep, old, huge);
220 221
#endif

222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
	return old;
}

static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
					      unsigned long addr, pte_t *ptep)
{
	unsigned long old;

       	if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
		return 0;
	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
	return (old & _PAGE_ACCESSED) != 0;
}
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define ptep_test_and_clear_young(__vma, __addr, __ptep)		   \
({									   \
	int __r;							   \
	__r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
	__r;								   \
})

#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
				      pte_t *ptep)
{

248 249 250 251
	if ((pte_val(*ptep) & _PAGE_RW) == 0)
		return;

	pte_update(mm, addr, ptep, _PAGE_RW, 0);
252 253
}

254 255 256
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
					   unsigned long addr, pte_t *ptep)
{
257 258
	if ((pte_val(*ptep) & _PAGE_RW) == 0)
		return;
259 260

	pte_update(mm, addr, ptep, _PAGE_RW, 1);
261
}
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296

/*
 * We currently remove entries from the hashtable regardless of whether
 * the entry was young or dirty. The generic routines only flush if the
 * entry was young or dirty which is not good enough.
 *
 * We should be more intelligent about this but for the moment we override
 * these functions and force a tlb flush unconditionally
 */
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define ptep_clear_flush_young(__vma, __address, __ptep)		\
({									\
	int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
						  __ptep);		\
	__young;							\
})

#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
				       unsigned long addr, pte_t *ptep)
{
	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
	return __pte(old);
}

static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
			     pte_t * ptep)
{
	pte_update(mm, addr, ptep, ~0UL, 0);
}


/* Set the dirty and/or accessed bits atomically in a linux PTE, this
 * function doesn't need to flush the hash entry
 */
297
static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
298 299
{
	unsigned long bits = pte_val(entry) &
300
		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
301 302

#ifdef PTE_ATOMIC_UPDATES
303 304 305 306 307 308 309 310 311 312 313 314
	unsigned long old, tmp;

	__asm__ __volatile__(
	"1:	ldarx	%0,0,%4\n\
		andi.	%1,%0,%6\n\
		bne-	1b \n\
		or	%0,%3,%0\n\
		stdcx.	%0,0,%4\n\
		bne-	1b"
	:"=&r" (old), "=&r" (tmp), "=m" (*ptep)
	:"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
	:"cc");
315 316 317 318
#else
	unsigned long old = pte_val(*ptep);
	*ptep = __pte(old | bits);
#endif
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
}

#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)

#define pte_ERROR(e) \
	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
#define pgd_ERROR(e) \
	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))

/* Encode and de-code a swap entry */
#define __swp_type(entry)	(((entry).val >> 1) & 0x3f)
#define __swp_offset(entry)	((entry).val >> 8)
#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
#define __pte_to_swp_entry(pte)	((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
#define __swp_entry_to_pte(x)	((pte_t) { (x).val << PTE_RPN_SHIFT })
#define pte_to_pgoff(pte)	(pte_val(pte) >> PTE_RPN_SHIFT)
#define pgoff_to_pte(off)	((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
#define PTE_FILE_MAX_BITS	(BITS_PER_LONG - PTE_RPN_SHIFT)

341
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
342 343 344 345 346
void pgtable_cache_init(void);

/*
 * find_linux_pte returns the address of a linux pte for a given
 * effective address and directory.  If not found, it returns zero.
347 348
 */
static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
{
	pgd_t *pg;
	pud_t *pu;
	pmd_t *pm;
	pte_t *pt = NULL;

	pg = pgdir + pgd_index(ea);
	if (!pgd_none(*pg)) {
		pu = pud_offset(pg, ea);
		if (!pud_none(*pu)) {
			pm = pmd_offset(pu, ea);
			if (pmd_present(*pm))
				pt = pte_offset_kernel(pm, ea);
		}
	}
	return pt;
}

367 368 369 370 371 372 373 374 375 376 377 378
#ifdef CONFIG_HUGETLB_PAGE
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
				 unsigned *shift);
#else
static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
					       unsigned *shift)
{
	if (shift)
		*shift = 0;
	return find_linux_pte(pgdir, ea);
}
#endif /* !CONFIG_HUGETLB_PAGE */
379

380 381 382
#endif /* __ASSEMBLY__ */

#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */