pgtable_64.h 30.9 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * pgtable.h: SpitFire page table operations.
 *
 * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
 * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
 */

#ifndef _SPARC64_PGTABLE_H
#define _SPARC64_PGTABLE_H

/* This file contains the functions and defines necessary to modify and use
 * the SpitFire page tables.
 */

16
#include <asm-generic/5level-fixup.h>
17 18 19 20 21 22 23 24 25 26
#include <linux/compiler.h>
#include <linux/const.h>
#include <asm/types.h>
#include <asm/spitfire.h>
#include <asm/asi.h>
#include <asm/page.h>
#include <asm/processor.h>

/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
 * The page copy blockops can use 0x6000000 to 0x8000000.
27 28
 * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
 * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
29 30 31 32 33 34 35 36
 * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
 * The vmalloc area spans 0x100000000 to 0x200000000.
 * Since modules need to be in the lowest 32-bits of the address space,
 * we place them right before the OBP area from 0x10000000 to 0xf0000000.
 * There is a single static kernel PMD which maps from 0x0 to address
 * 0x400000000.
 */
#define	TLBTEMP_BASE		_AC(0x0000000006000000,UL)
37 38
#define	TSBMAP_8K_BASE		_AC(0x0000000008000000,UL)
#define	TSBMAP_4M_BASE		_AC(0x0000000008400000,UL)
39 40 41 42 43 44
#define MODULES_VADDR		_AC(0x0000000010000000,UL)
#define MODULES_LEN		_AC(0x00000000e0000000,UL)
#define MODULES_END		_AC(0x00000000f0000000,UL)
#define LOW_OBP_ADDRESS		_AC(0x00000000f0000000,UL)
#define HI_OBP_ADDRESS		_AC(0x0000000100000000,UL)
#define VMALLOC_START		_AC(0x0000000100000000,UL)
45
#define VMEMMAP_BASE		VMALLOC_END
46 47 48 49

/* PMD_SHIFT determines the size of the area a second-level page
 * table can map
 */
50
#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3))
51 52
#define PMD_SIZE	(_AC(1,UL) << PMD_SHIFT)
#define PMD_MASK	(~(PMD_SIZE-1))
53
#define PMD_BITS	(PAGE_SHIFT - 3)
54

55 56 57 58 59 60 61 62 63 64
/* PUD_SHIFT determines the size of the area a third-level page
 * table can map
 */
#define PUD_SHIFT	(PMD_SHIFT + PMD_BITS)
#define PUD_SIZE	(_AC(1,UL) << PUD_SHIFT)
#define PUD_MASK	(~(PUD_SIZE-1))
#define PUD_BITS	(PAGE_SHIFT - 3)

/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
#define PGDIR_SHIFT	(PUD_SHIFT + PUD_BITS)
65 66
#define PGDIR_SIZE	(_AC(1,UL) << PGDIR_SHIFT)
#define PGDIR_MASK	(~(PGDIR_SIZE-1))
67
#define PGDIR_BITS	(PAGE_SHIFT - 3)
68

69 70 71 72
#if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS)
#error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support
#endif

73
#if (PGDIR_SHIFT + PGDIR_BITS) != 53
74 75 76
#error Page table parameters do not cover virtual address space properly.
#endif

77 78 79 80
#if (PMD_SHIFT != HPAGE_SHIFT)
#error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
#endif

81 82
#ifndef __ASSEMBLY__

83 84 85 86
extern unsigned long VMALLOC_END;

#define vmemmap			((struct page *)VMEMMAP_BASE)

87 88
#include <linux/sched.h>

89
bool kern_addr_valid(unsigned long addr);
90

91
/* Entries per page directory level. */
92
#define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-3))
93
#define PTRS_PER_PMD	(1UL << PMD_BITS)
94
#define PTRS_PER_PUD	(1UL << PUD_BITS)
95 96 97
#define PTRS_PER_PGD	(1UL << PGDIR_BITS)

/* Kernel has a separate 44bit address space. */
98
#define FIRST_USER_ADDRESS	0UL
99

100 101 102
#define pmd_ERROR(e)							\
	pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n",		\
	       __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
103 104 105
#define pud_ERROR(e)							\
	pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n",		\
	       __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
106 107 108
#define pgd_ERROR(e)							\
	pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n",		\
	       __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
109 110 111 112 113 114

#endif /* !(__ASSEMBLY__) */

/* PTE bits which are the same in SUN4U and SUN4V format.  */
#define _PAGE_VALID	  _AC(0x8000000000000000,UL) /* Valid TTE            */
#define _PAGE_R	  	  _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
115
#define _PAGE_SPECIAL     _AC(0x0200000000000000,UL) /* Special page         */
116
#define _PAGE_PMD_HUGE    _AC(0x0100000000000000,UL) /* Huge page            */
117
#define _PAGE_PUD_HUGE    _PAGE_PMD_HUGE
118 119 120

/* Advertise support for _PAGE_SPECIAL */
#define __HAVE_ARCH_PTE_SPECIAL
121 122 123 124 125 126 127 128 129

/* SUN4U pte bits... */
#define _PAGE_SZ4MB_4U	  _AC(0x6000000000000000,UL) /* 4MB Page             */
#define _PAGE_SZ512K_4U	  _AC(0x4000000000000000,UL) /* 512K Page            */
#define _PAGE_SZ64K_4U	  _AC(0x2000000000000000,UL) /* 64K Page             */
#define _PAGE_SZ8K_4U	  _AC(0x0000000000000000,UL) /* 8K Page              */
#define _PAGE_NFO_4U	  _AC(0x1000000000000000,UL) /* No Fault Only        */
#define _PAGE_IE_4U	  _AC(0x0800000000000000,UL) /* Invert Endianness    */
#define _PAGE_SOFT2_4U	  _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
130
#define _PAGE_SPECIAL_4U  _AC(0x0200000000000000,UL) /* Special page         */
131
#define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page            */
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
#define _PAGE_RES1_4U	  _AC(0x0002000000000000,UL) /* Reserved             */
#define _PAGE_SZ32MB_4U	  _AC(0x0001000000000000,UL) /* (Panther) 32MB page  */
#define _PAGE_SZ256MB_4U  _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
#define _PAGE_SZALL_4U	  _AC(0x6001000000000000,UL) /* All pgsz bits        */
#define _PAGE_SN_4U	  _AC(0x0000800000000000,UL) /* (Cheetah) Snoop      */
#define _PAGE_RES2_4U	  _AC(0x0000780000000000,UL) /* Reserved             */
#define _PAGE_PADDR_4U	  _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13]  */
#define _PAGE_SOFT_4U	  _AC(0x0000000000001F80,UL) /* Software bits:       */
#define _PAGE_EXEC_4U	  _AC(0x0000000000001000,UL) /* Executable SW bit    */
#define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty)     */
#define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd)     */
#define _PAGE_READ_4U	  _AC(0x0000000000000200,UL) /* Readable SW Bit      */
#define _PAGE_WRITE_4U	  _AC(0x0000000000000100,UL) /* Writable SW Bit      */
#define _PAGE_PRESENT_4U  _AC(0x0000000000000080,UL) /* Present              */
#define _PAGE_L_4U	  _AC(0x0000000000000040,UL) /* Locked TTE           */
#define _PAGE_CP_4U	  _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
#define _PAGE_CV_4U	  _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
#define _PAGE_E_4U	  _AC(0x0000000000000008,UL) /* side-Effect          */
#define _PAGE_P_4U	  _AC(0x0000000000000004,UL) /* Privileged Page      */
#define _PAGE_W_4U	  _AC(0x0000000000000002,UL) /* Writable             */

/* SUN4V pte bits... */
#define _PAGE_NFO_4V	  _AC(0x4000000000000000,UL) /* No Fault Only        */
#define _PAGE_SOFT2_4V	  _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
#define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty)     */
#define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd)     */
#define _PAGE_READ_4V	  _AC(0x0800000000000000,UL) /* Readable SW Bit      */
#define _PAGE_WRITE_4V	  _AC(0x0400000000000000,UL) /* Writable SW Bit      */
160
#define _PAGE_SPECIAL_4V  _AC(0x0200000000000000,UL) /* Special page         */
161
#define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page            */
162 163 164 165 166
#define _PAGE_PADDR_4V	  _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13]         */
#define _PAGE_IE_4V	  _AC(0x0000000000001000,UL) /* Invert Endianness    */
#define _PAGE_E_4V	  _AC(0x0000000000000800,UL) /* side-Effect          */
#define _PAGE_CP_4V	  _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
#define _PAGE_CV_4V	  _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
167 168
/* Bit 9 is used to enable MCD corruption detection instead on M7 */
#define _PAGE_MCD_4V      _AC(0x0000000000000200,UL) /* Memory Corruption    */
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
#define _PAGE_P_4V	  _AC(0x0000000000000100,UL) /* Privileged Page      */
#define _PAGE_EXEC_4V	  _AC(0x0000000000000080,UL) /* Executable Page      */
#define _PAGE_W_4V	  _AC(0x0000000000000040,UL) /* Writable             */
#define _PAGE_SOFT_4V	  _AC(0x0000000000000030,UL) /* Software bits        */
#define _PAGE_PRESENT_4V  _AC(0x0000000000000010,UL) /* Present              */
#define _PAGE_RESV_4V	  _AC(0x0000000000000008,UL) /* Reserved             */
#define _PAGE_SZ16GB_4V	  _AC(0x0000000000000007,UL) /* 16GB Page            */
#define _PAGE_SZ2GB_4V	  _AC(0x0000000000000006,UL) /* 2GB Page             */
#define _PAGE_SZ256MB_4V  _AC(0x0000000000000005,UL) /* 256MB Page           */
#define _PAGE_SZ32MB_4V	  _AC(0x0000000000000004,UL) /* 32MB Page            */
#define _PAGE_SZ4MB_4V	  _AC(0x0000000000000003,UL) /* 4MB Page             */
#define _PAGE_SZ512K_4V	  _AC(0x0000000000000002,UL) /* 512K Page            */
#define _PAGE_SZ64K_4V	  _AC(0x0000000000000001,UL) /* 64K Page             */
#define _PAGE_SZ8K_4V	  _AC(0x0000000000000000,UL) /* 8K Page              */
#define _PAGE_SZALL_4V	  _AC(0x0000000000000007,UL) /* All pgsz bits        */

#define _PAGE_SZBITS_4U	_PAGE_SZ8K_4U
#define _PAGE_SZBITS_4V	_PAGE_SZ8K_4V
187

188 189 190 191
#if REAL_HPAGE_SHIFT != 22
#error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up
#endif

192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
#define _PAGE_SZHUGE_4U	_PAGE_SZ4MB_4U
#define _PAGE_SZHUGE_4V	_PAGE_SZ4MB_4V

/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
#define __P000	__pgprot(0)
#define __P001	__pgprot(0)
#define __P010	__pgprot(0)
#define __P011	__pgprot(0)
#define __P100	__pgprot(0)
#define __P101	__pgprot(0)
#define __P110	__pgprot(0)
#define __P111	__pgprot(0)

#define __S000	__pgprot(0)
#define __S001	__pgprot(0)
#define __S010	__pgprot(0)
#define __S011	__pgprot(0)
#define __S100	__pgprot(0)
#define __S101	__pgprot(0)
#define __S110	__pgprot(0)
#define __S111	__pgprot(0)

#ifndef __ASSEMBLY__

216
pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
217

218
unsigned long pte_sz_bits(unsigned long size);
219 220 221 222 223 224

extern pgprot_t PAGE_KERNEL;
extern pgprot_t PAGE_KERNEL_LOCKED;
extern pgprot_t PAGE_COPY;
extern pgprot_t PAGE_SHARED;

225
/* XXX This ugliness is for the atyfb driver's sparc mmap() support. XXX */
226 227 228 229 230 231 232 233 234 235
extern unsigned long _PAGE_IE;
extern unsigned long _PAGE_E;
extern unsigned long _PAGE_CACHE;

extern unsigned long pg_iobits;
extern unsigned long _PAGE_ALL_SZ_BITS;

extern struct page *mem_map_zero;
#define ZERO_PAGE(vaddr)	(mem_map_zero)

236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
/* This macro must be updated when the size of struct page grows above 80
 * or reduces below 64.
 * The idea that compiler optimizes out switch() statement, and only
 * leaves clrx instructions
 */
#define	mm_zero_struct_page(pp) do {					\
	unsigned long *_pp = (void *)(pp);				\
									\
	 /* Check that struct page is either 64, 72, or 80 bytes */	\
	BUILD_BUG_ON(sizeof(struct page) & 7);				\
	BUILD_BUG_ON(sizeof(struct page) < 64);				\
	BUILD_BUG_ON(sizeof(struct page) > 80);				\
									\
	switch (sizeof(struct page)) {					\
	case 80:							\
		_pp[9] = 0;	/* fallthrough */			\
	case 72:							\
		_pp[8] = 0;	/* fallthrough */			\
	default:							\
		_pp[7] = 0;						\
		_pp[6] = 0;						\
		_pp[5] = 0;						\
		_pp[4] = 0;						\
		_pp[3] = 0;						\
		_pp[2] = 0;						\
		_pp[1] = 0;						\
		_pp[0] = 0;						\
	}								\
} while (0)

266 267 268 269 270 271 272 273
/* PFNs are real physical page numbers.  However, mem_map only begins to record
 * per-page information starting at pfn_base.  This is to handle systems where
 * the first physical page in the machine is at some huge physical address,
 * such as 4GB.   This is common on a partitioned E10000, for example.
 */
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
{
	unsigned long paddr = pfn << PAGE_SHIFT;
274 275 276

	BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
	return __pte(paddr | pgprot_val(prot));
277 278 279
}
#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))

280
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
281
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
282
{
283 284 285
	pte_t pte = pfn_pte(page_nr, pgprot);

	return __pmd(pte_val(pte));
286
}
287
#define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
288 289
#endif

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
/* This one can be done with two shifts.  */
static inline unsigned long pte_pfn(pte_t pte)
{
	unsigned long ret;

	__asm__ __volatile__(
	"\n661:	sllx		%1, %2, %0\n"
	"	srlx		%0, %3, %0\n"
	"	.section	.sun4v_2insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	sllx		%1, %4, %0\n"
	"	srlx		%0, %5, %0\n"
	"	.previous\n"
	: "=r" (ret)
	: "r" (pte_val(pte)),
	  "i" (21), "i" (21 + PAGE_SHIFT),
	  "i" (8), "i" (8 + PAGE_SHIFT));

	return ret;
}
#define pte_page(x) pfn_to_page(pte_pfn(x))

static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
{
	unsigned long mask, tmp;

316 317
	/* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
	 * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
318 319 320 321 322 323 324 325
	 *
	 * Even if we use negation tricks the result is still a 6
	 * instruction sequence, so don't try to play fancy and just
	 * do the most straightforward implementation.
	 *
	 * Note: We encode this into 3 sun4v 2-insn patch sequences.
	 */

326
	BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
	__asm__ __volatile__(
	"\n661:	sethi		%%uhi(%2), %1\n"
	"	sethi		%%hi(%2), %0\n"
	"\n662:	or		%1, %%ulo(%2), %1\n"
	"	or		%0, %%lo(%2), %0\n"
	"\n663:	sllx		%1, 32, %1\n"
	"	or		%0, %1, %0\n"
	"	.section	.sun4v_2insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	sethi		%%uhi(%3), %1\n"
	"	sethi		%%hi(%3), %0\n"
	"	.word		662b\n"
	"	or		%1, %%ulo(%3), %1\n"
	"	or		%0, %%lo(%3), %0\n"
	"	.word		663b\n"
	"	sllx		%1, 32, %1\n"
	"	or		%0, %1, %0\n"
	"	.previous\n"
345 346 347 348 349 350 351 352 353 354 355
	"	.section	.sun_m7_2insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	sethi		%%uhi(%4), %1\n"
	"	sethi		%%hi(%4), %0\n"
	"	.word		662b\n"
	"	or		%1, %%ulo(%4), %1\n"
	"	or		%0, %%lo(%4), %0\n"
	"	.word		663b\n"
	"	sllx		%1, 32, %1\n"
	"	or		%0, %1, %0\n"
	"	.previous\n"
356 357
	: "=r" (mask), "=r" (tmp)
	: "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
358
	       _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
359
	       _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
360
	  "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
361
	       _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
362 363 364
	       _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V),
	  "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
	       _PAGE_CP_4V | _PAGE_E_4V |
365
	       _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
366 367 368 369

	return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
}

370 371 372 373 374 375 376 377 378 379 380
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
	pte_t pte = __pte(pmd_val(pmd));

	pte = pte_modify(pte, newprot);

	return __pmd(pte_val(pte));
}
#endif

381 382 383 384 385 386 387 388 389 390 391 392
static inline pgprot_t pgprot_noncached(pgprot_t prot)
{
	unsigned long val = pgprot_val(prot);

	__asm__ __volatile__(
	"\n661:	andn		%0, %2, %0\n"
	"	or		%0, %3, %0\n"
	"	.section	.sun4v_2insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	andn		%0, %4, %0\n"
	"	or		%0, %5, %0\n"
	"	.previous\n"
393 394 395 396 397
	"	.section	.sun_m7_2insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	andn		%0, %6, %0\n"
	"	or		%0, %5, %0\n"
	"	.previous\n"
398 399
	: "=r" (val)
	: "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
400 401
	             "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V),
	             "i" (_PAGE_CP_4V));
402 403 404 405 406 407 408 409 410

	return __pgprot(val);
}
/* Various pieces of code check for platform support by ifdef testing
 * on "pgprot_noncached".  That's broken and should be fixed, but for
 * now...
 */
#define pgprot_noncached pgprot_noncached

411
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
N
Nitin Gupta 已提交
412 413 414 415
extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
				struct page *page, int writable);
#define arch_make_huge_pte arch_make_huge_pte
static inline unsigned long __pte_default_huge_mask(void)
416 417 418 419 420 421 422 423 424 425 426 427 428 429
{
	unsigned long mask;

	__asm__ __volatile__(
	"\n661:	sethi		%%uhi(%1), %0\n"
	"	sllx		%0, 32, %0\n"
	"	.section	.sun4v_2insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	mov		%2, %0\n"
	"	nop\n"
	"	.previous\n"
	: "=r" (mask)
	: "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));

430 431 432 433 434
	return mask;
}

static inline pte_t pte_mkhuge(pte_t pte)
{
N
Nitin Gupta 已提交
435
	return __pte(pte_val(pte) | __pte_default_huge_mask());
436 437
}

N
Nitin Gupta 已提交
438
static inline bool is_default_hugetlb_pte(pte_t pte)
439
{
N
Nitin Gupta 已提交
440 441 442
	unsigned long mask = __pte_default_huge_mask();

	return (pte_val(pte) & mask) == mask;
443
}
444

445 446 447 448 449
static inline bool is_hugetlb_pmd(pmd_t pmd)
{
	return !!(pmd_val(pmd) & _PAGE_PMD_HUGE);
}

N
Nitin Gupta 已提交
450 451 452 453 454
static inline bool is_hugetlb_pud(pud_t pud)
{
	return !!(pud_val(pud) & _PAGE_PUD_HUGE);
}

455 456 457 458 459 460 461 462 463 464 465
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
	pte_t pte = __pte(pmd_val(pmd));

	pte = pte_mkhuge(pte);
	pte_val(pte) |= _PAGE_PMD_HUGE;

	return __pmd(pte_val(pte));
}
#endif
466 467 468 469 470
#else
static inline bool is_hugetlb_pte(pte_t pte)
{
	return false;
}
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
#endif

static inline pte_t pte_mkdirty(pte_t pte)
{
	unsigned long val = pte_val(pte), tmp;

	__asm__ __volatile__(
	"\n661:	or		%0, %3, %0\n"
	"	nop\n"
	"\n662:	nop\n"
	"	nop\n"
	"	.section	.sun4v_2insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	sethi		%%uhi(%4), %1\n"
	"	sllx		%1, 32, %1\n"
	"	.word		662b\n"
	"	or		%1, %%lo(%4), %1\n"
	"	or		%0, %1, %0\n"
	"	.previous\n"
	: "=r" (val), "=r" (tmp)
	: "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
	  "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));

	return __pte(val);
}

static inline pte_t pte_mkclean(pte_t pte)
{
	unsigned long val = pte_val(pte), tmp;

	__asm__ __volatile__(
	"\n661:	andn		%0, %3, %0\n"
	"	nop\n"
	"\n662:	nop\n"
	"	nop\n"
	"	.section	.sun4v_2insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	sethi		%%uhi(%4), %1\n"
	"	sllx		%1, 32, %1\n"
	"	.word		662b\n"
	"	or		%1, %%lo(%4), %1\n"
	"	andn		%0, %1, %0\n"
	"	.previous\n"
	: "=r" (val), "=r" (tmp)
	: "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
	  "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));

	return __pte(val);
}

static inline pte_t pte_mkwrite(pte_t pte)
{
	unsigned long val = pte_val(pte), mask;

	__asm__ __volatile__(
	"\n661:	mov		%1, %0\n"
	"	nop\n"
	"	.section	.sun4v_2insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	sethi		%%uhi(%2), %0\n"
	"	sllx		%0, 32, %0\n"
	"	.previous\n"
	: "=r" (mask)
	: "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));

	return __pte(val | mask);
}

static inline pte_t pte_wrprotect(pte_t pte)
{
	unsigned long val = pte_val(pte), tmp;

	__asm__ __volatile__(
	"\n661:	andn		%0, %3, %0\n"
	"	nop\n"
	"\n662:	nop\n"
	"	nop\n"
	"	.section	.sun4v_2insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	sethi		%%uhi(%4), %1\n"
	"	sllx		%1, 32, %1\n"
	"	.word		662b\n"
	"	or		%1, %%lo(%4), %1\n"
	"	andn		%0, %1, %0\n"
	"	.previous\n"
	: "=r" (val), "=r" (tmp)
	: "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
	  "i" (_PAGE_WRITE_4V | _PAGE_W_4V));

	return __pte(val);
}

static inline pte_t pte_mkold(pte_t pte)
{
	unsigned long mask;

	__asm__ __volatile__(
	"\n661:	mov		%1, %0\n"
	"	nop\n"
	"	.section	.sun4v_2insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	sethi		%%uhi(%2), %0\n"
	"	sllx		%0, 32, %0\n"
	"	.previous\n"
	: "=r" (mask)
	: "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));

	mask |= _PAGE_R;

	return __pte(pte_val(pte) & ~mask);
}

static inline pte_t pte_mkyoung(pte_t pte)
{
	unsigned long mask;

	__asm__ __volatile__(
	"\n661:	mov		%1, %0\n"
	"	nop\n"
	"	.section	.sun4v_2insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	sethi		%%uhi(%2), %0\n"
	"	sllx		%0, 32, %0\n"
	"	.previous\n"
	: "=r" (mask)
	: "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));

	mask |= _PAGE_R;

	return __pte(pte_val(pte) | mask);
}

static inline pte_t pte_mkspecial(pte_t pte)
{
605
	pte_val(pte) |= _PAGE_SPECIAL;
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
	return pte;
}

static inline unsigned long pte_young(pte_t pte)
{
	unsigned long mask;

	__asm__ __volatile__(
	"\n661:	mov		%1, %0\n"
	"	nop\n"
	"	.section	.sun4v_2insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	sethi		%%uhi(%2), %0\n"
	"	sllx		%0, 32, %0\n"
	"	.previous\n"
	: "=r" (mask)
	: "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));

	return (pte_val(pte) & mask);
}

static inline unsigned long pte_dirty(pte_t pte)
{
	unsigned long mask;

	__asm__ __volatile__(
	"\n661:	mov		%1, %0\n"
	"	nop\n"
	"	.section	.sun4v_2insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	sethi		%%uhi(%2), %0\n"
	"	sllx		%0, 32, %0\n"
	"	.previous\n"
	: "=r" (mask)
	: "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));

	return (pte_val(pte) & mask);
}

static inline unsigned long pte_write(pte_t pte)
{
	unsigned long mask;

	__asm__ __volatile__(
	"\n661:	mov		%1, %0\n"
	"	nop\n"
	"	.section	.sun4v_2insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	sethi		%%uhi(%2), %0\n"
	"	sllx		%0, 32, %0\n"
	"	.previous\n"
	: "=r" (mask)
	: "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));

	return (pte_val(pte) & mask);
}

static inline unsigned long pte_exec(pte_t pte)
{
	unsigned long mask;

	__asm__ __volatile__(
	"\n661:	sethi		%%hi(%1), %0\n"
	"	.section	.sun4v_1insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	mov		%2, %0\n"
	"	.previous\n"
	: "=r" (mask)
	: "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));

	return (pte_val(pte) & mask);
}

static inline unsigned long pte_present(pte_t pte)
{
	unsigned long val = pte_val(pte);

	__asm__ __volatile__(
	"\n661:	and		%0, %2, %0\n"
	"	.section	.sun4v_1insn_patch, \"ax\"\n"
	"	.word		661b\n"
	"	and		%0, %3, %0\n"
	"	.previous\n"
	: "=r" (val)
	: "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));

	return val;
}

D
David S. Miller 已提交
695
#define pte_accessible pte_accessible
696
static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
D
David S. Miller 已提交
697 698 699 700
{
	return pte_val(a) & _PAGE_VALID;
}

701
static inline unsigned long pte_special(pte_t pte)
702
{
703
	return pte_val(pte) & _PAGE_SPECIAL;
704 705
}

706
static inline unsigned long pmd_large(pmd_t pmd)
707
{
708 709
	pte_t pte = __pte(pmd_val(pmd));

710
	return pte_val(pte) & _PAGE_PMD_HUGE;
711 712
}

713
static inline unsigned long pmd_pfn(pmd_t pmd)
714
{
715 716
	pte_t pte = __pte(pmd_val(pmd));

717
	return pte_pfn(pte);
718 719
}

720
#define pmd_write pmd_write
721
static inline unsigned long pmd_write(pmd_t pmd)
722 723 724
{
	pte_t pte = __pte(pmd_val(pmd));

725
	return pte_write(pte);
726 727
}

728 729
#define pud_write(pud)	pte_write(__pte(pud_val(pud)))

730 731
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline unsigned long pmd_dirty(pmd_t pmd)
732
{
733 734
	pte_t pte = __pte(pmd_val(pmd));

735
	return pte_dirty(pte);
736 737
}

738
static inline unsigned long pmd_young(pmd_t pmd)
739
{
740
	pte_t pte = __pte(pmd_val(pmd));
741

742
	return pte_young(pte);
743 744
}

745
static inline unsigned long pmd_trans_huge(pmd_t pmd)
746
{
747 748 749
	pte_t pte = __pte(pmd_val(pmd));

	return pte_val(pte) & _PAGE_PMD_HUGE;
750 751 752 753
}

static inline pmd_t pmd_mkold(pmd_t pmd)
{
754 755 756 757 758
	pte_t pte = __pte(pmd_val(pmd));

	pte = pte_mkold(pte);

	return __pmd(pte_val(pte));
759 760 761 762
}

static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
763 764 765 766 767
	pte_t pte = __pte(pmd_val(pmd));

	pte = pte_wrprotect(pte);

	return __pmd(pte_val(pte));
768 769 770 771
}

static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
772 773 774 775 776
	pte_t pte = __pte(pmd_val(pmd));

	pte = pte_mkdirty(pte);

	return __pmd(pte_val(pte));
777 778
}

779 780 781 782 783 784 785 786 787
static inline pmd_t pmd_mkclean(pmd_t pmd)
{
	pte_t pte = __pte(pmd_val(pmd));

	pte = pte_mkclean(pte);

	return __pmd(pte_val(pte));
}

788 789
static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
790 791 792 793 794
	pte_t pte = __pte(pmd_val(pmd));

	pte = pte_mkyoung(pte);

	return __pmd(pte_val(pte));
795 796 797 798
}

static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
799 800 801 802 803
	pte_t pte = __pte(pmd_val(pmd));

	pte = pte_mkwrite(pte);

	return __pmd(pte_val(pte));
804 805
}

806 807 808 809 810 811
static inline pgprot_t pmd_pgprot(pmd_t entry)
{
	unsigned long val = pmd_val(entry);

	return __pgprot(val);
}
812 813 814 815
#endif

static inline int pmd_present(pmd_t pmd)
{
816
	return pmd_val(pmd) != 0UL;
817 818 819 820
}

#define pmd_none(pmd)			(!pmd_val(pmd))

821 822 823 824 825 826
/* pmd_bad() is only called on non-trans-huge PMDs.  Our encoding is
 * very simple, it's just the physical address.  PTE tables are of
 * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
 * the top bits outside of the range of any physical address size we
 * support are clear as well.  We also validate the physical itself.
 */
827
#define pmd_bad(pmd)			(pmd_val(pmd) & ~PAGE_MASK)
828 829 830

#define pud_none(pud)			(!pud_val(pud))

831
#define pud_bad(pud)			(pud_val(pud) & ~PAGE_MASK)
832

833 834
#define pgd_none(pgd)			(!pgd_val(pgd))

835
#define pgd_bad(pgd)			(pgd_val(pgd) & ~PAGE_MASK)
836

837
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
838 839
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
		pmd_t *pmdp, pmd_t pmd);
840 841 842 843 844 845 846 847 848 849
#else
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
			      pmd_t *pmdp, pmd_t pmd)
{
	*pmdp = pmd;
}
#endif

static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
{
850
	unsigned long val = __pa((unsigned long) (ptep));
851 852 853 854

	pmd_val(*pmdp) = val;
}

855
#define pud_set(pudp, pmdp)	\
856
	(pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
857 858
static inline unsigned long __pmd_page(pmd_t pmd)
{
859 860 861 862 863 864
	pte_t pte = __pte(pmd_val(pmd));
	unsigned long pfn;

	pfn = pte_pfn(pte);

	return ((unsigned long) __va(pfn << PAGE_SHIFT));
865
}
866 867 868 869 870 871 872 873 874 875 876

static inline unsigned long pud_page_vaddr(pud_t pud)
{
	pte_t pte = __pte(pud_val(pud));
	unsigned long pfn;

	pfn = pte_pfn(pte);

	return ((unsigned long) __va(pfn << PAGE_SHIFT));
}

877 878
#define pmd_page(pmd) 			virt_to_page((void *)__pmd_page(pmd))
#define pud_page(pud) 			virt_to_page((void *)pud_page_vaddr(pud))
879
#define pmd_clear(pmdp)			(pmd_val(*(pmdp)) = 0UL)
880
#define pud_present(pud)		(pud_val(pud) != 0U)
881
#define pud_clear(pudp)			(pud_val(*(pudp)) = 0UL)
882 883 884
#define pgd_page_vaddr(pgd)		\
	((unsigned long) __va(pgd_val(pgd)))
#define pgd_present(pgd)		(pgd_val(pgd) != 0U)
885
#define pgd_clear(pgdp)			(pgd_val(*(pgdp)) = 0UL)
886

887 888 889 890 891 892 893 894 895 896 897 898 899 900
static inline unsigned long pud_large(pud_t pud)
{
	pte_t pte = __pte(pud_val(pud));

	return pte_val(pte) & _PAGE_PMD_HUGE;
}

static inline unsigned long pud_pfn(pud_t pud)
{
	pte_t pte = __pte(pud_val(pud));

	return pte_pfn(pte);
}

901 902 903
/* Same in both SUN4V and SUN4U.  */
#define pte_none(pte) 			(!pte_val(pte))

904 905 906
#define pgd_set(pgdp, pudp)	\
	(pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp))))

907 908 909 910 911 912 913
/* to find an entry in a page-table-directory. */
#define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
#define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))

/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)

914 915 916 917 918
/* Find an entry in the third-level page table.. */
#define pud_index(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
#define pud_offset(pgdp, address)	\
	((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address))

919 920 921 922 923 924 925 926 927 928 929 930 931
/* Find an entry in the second-level page table.. */
#define pmd_offset(pudp, address)	\
	((pmd_t *) pud_page_vaddr(*(pudp)) + \
	 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))

/* Find an entry in the third-level page table.. */
#define pte_index(dir, address)	\
	((pte_t *) __pmd_page(*(dir)) + \
	 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_kernel		pte_index
#define pte_offset_map			pte_index
#define pte_unmap(pte)			do { } while (0)

932 933 934
/* We cannot include <linux/mm_types.h> at this point yet: */
extern struct mm_struct init_mm;

935
/* Actual page table PTE updates.  */
936
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
N
Nitin Gupta 已提交
937 938
		   pte_t *ptep, pte_t orig, int fullmm,
		   unsigned int hugepage_shift);
939

940
static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
N
Nitin Gupta 已提交
941 942
				pte_t *ptep, pte_t orig, int fullmm,
				unsigned int hugepage_shift)
943 944 945 946 947 948 949 950
{
	/* It is more efficient to let flush_tlb_kernel_range()
	 * handle init_mm tlb flushes.
	 *
	 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
	 *             and SUN4V pte layout, so this inline test is fine.
	 */
	if (likely(mm != &init_mm) && pte_accessible(mm, orig))
N
Nitin Gupta 已提交
951
		tlb_batch_add(mm, vaddr, ptep, orig, fullmm, hugepage_shift);
952 953
}

954 955 956 957
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
					    unsigned long addr,
					    pmd_t *pmdp)
958 959
{
	pmd_t pmd = *pmdp;
960
	set_pmd_at(mm, addr, pmdp, __pmd(0UL));
961 962 963
	return pmd;
}

P
Peter Zijlstra 已提交
964 965
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
			     pte_t *ptep, pte_t pte, int fullmm)
966 967 968 969
{
	pte_t orig = *ptep;

	*ptep = pte;
N
Nitin Gupta 已提交
970
	maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT);
971 972
}

P
Peter Zijlstra 已提交
973 974 975
#define set_pte_at(mm,addr,ptep,pte)	\
	__set_pte_at((mm), (addr), (ptep), (pte), 0)

976 977 978
#define pte_clear(mm,addr,ptep)		\
	set_pte_at((mm), (addr), (ptep), __pte(0UL))

P
Peter Zijlstra 已提交
979 980 981 982
#define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
#define pte_clear_not_present_full(mm,addr,ptep,fullmm)	\
	__set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))

983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
#ifdef DCACHE_ALIASING_POSSIBLE
#define __HAVE_ARCH_MOVE_PTE
#define move_pte(pte, prot, old_addr, new_addr)				\
({									\
	pte_t newpte = (pte);						\
	if (tlb_type != hypervisor && pte_present(pte)) {		\
		unsigned long this_pfn = pte_pfn(pte);			\
									\
		if (pfn_valid(this_pfn) &&				\
		    (((old_addr) ^ (new_addr)) & (1 << 13)))		\
			flush_dcache_page_all(current->mm,		\
					      pfn_to_page(this_pfn));	\
	}								\
	newpte;								\
})
#endif

1000
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
1001

1002 1003
void paging_init(void);
unsigned long find_ecache_flush_span(unsigned long size);
1004

1005
struct seq_file;
1006
void mmu_info(struct seq_file *);
1007

1008
struct vm_area_struct;
1009
void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
1010
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1011 1012
void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
			  pmd_t *pmd);
1013

1014
#define __HAVE_ARCH_PMDP_INVALIDATE
1015
extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1016 1017
			    pmd_t *pmdp);

1018
#define __HAVE_ARCH_PGTABLE_DEPOSIT
1019 1020
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
				pgtable_t pgtable);
1021 1022

#define __HAVE_ARCH_PGTABLE_WITHDRAW
1023
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1024
#endif
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037

/* Encode and de-code a swap entry */
#define __swp_type(entry)	(((entry).val >> PAGE_SHIFT) & 0xffUL)
#define __swp_offset(entry)	((entry).val >> (PAGE_SHIFT + 8UL))
#define __swp_entry(type, offset)	\
	( (swp_entry_t) \
	  { \
		(((long)(type) << PAGE_SHIFT) | \
                 ((long)(offset) << (PAGE_SHIFT + 8UL))) \
	  } )
#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x)		((pte_t) { (x).val })

1038
int page_in_phys_avail(unsigned long paddr);
1039 1040 1041 1042 1043 1044 1045 1046 1047

/*
 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
 * its high 4 bits.  These macros/functions put it there or get it from there.
 */
#define MK_IOSPACE_PFN(space, pfn)	(pfn | (space << (BITS_PER_LONG - 4)))
#define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
#define GET_PFN(pfn)			(pfn & 0x0fffffffffffffffUL)

1048 1049
int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
		    unsigned long, pgprot_t);
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062

static inline int io_remap_pfn_range(struct vm_area_struct *vma,
				     unsigned long from, unsigned long pfn,
				     unsigned long size, pgprot_t prot)
{
	unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
	int space = GET_IOSPACE(pfn);
	unsigned long phys_base;

	phys_base = offset | (((unsigned long) space) << 32UL);

	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}
1063
#define io_remap_pfn_range io_remap_pfn_range 
1064

1065
#include <asm/tlbflush.h>
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
#include <asm-generic/pgtable.h>

/* We provide our own get_unmapped_area to cope with VA holes and
 * SHM area cache aliasing for userland.
 */
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN

/* We provide a special get_unmapped_area for framebuffer mmaps to try and use
 * the largest alignment possible such that larget PTEs can be used.
 */
1077 1078 1079
unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
				   unsigned long, unsigned long,
				   unsigned long);
1080 1081
#define HAVE_ARCH_FB_UNMAPPED_AREA

1082 1083 1084 1085 1086
void pgtable_cache_init(void);
void sun4v_register_fault_status(void);
void sun4v_ktsb_register(void);
void __init cheetah_ecache_flush_init(void);
void sun4v_patch_tlb_handlers(void);
1087 1088 1089

extern unsigned long cmdline_memory_size;

1090
asmlinkage void do_sparc64_fault(struct pt_regs *regs);
1091

1092 1093 1094
#endif /* !(__ASSEMBLY__) */

#endif /* !(_SPARC64_PGTABLE_H) */