pgtable.h 19.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4
#ifndef _PARISC_PGTABLE_H
#define _PARISC_PGTABLE_H

Q
Qian Cai 已提交
5
#include <asm/page.h>
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14
#include <asm-generic/4level-fixup.h>

#include <asm/fixmap.h>

#ifndef __ASSEMBLY__
/*
 * we simulate an x86-style page table for the linux mm code
 */

J
Jiri Slaby 已提交
15
#include <linux/bitops.h>
16
#include <linux/spinlock.h>
17
#include <linux/mm_types.h>
L
Linus Torvalds 已提交
18 19 20
#include <asm/processor.h>
#include <asm/cache.h>

21
static inline spinlock_t *pgd_spinlock(pgd_t *);
22

L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
/*
 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
 * memory.  For the return value to be meaningful, ADDR must be >=
 * PAGE_OFFSET.  This operation can be relatively expensive (e.g.,
 * require a hash-, or multi-level tree-lookup or something of that
 * sort) but it guarantees to return TRUE only if accessing the page
 * at that address does not cause an error.  Note that there may be
 * addresses for which kern_addr_valid() returns FALSE even though an
 * access would not cause an error (e.g., this is typically true for
 * memory mapped I/O regions.
 *
 * XXX Need to implement this for parisc.
 */
#define kern_addr_valid(addr)	(1)

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/* This is for the serialization of PxTLB broadcasts. At least on the N class
 * systems, only one PxTLB inter processor broadcast can be active at any one
 * time on the Merced bus.

 * PTE updates are protected by locks in the PMD.
 */
extern spinlock_t pa_tlb_flush_lock;
extern spinlock_t pa_swapper_pg_lock;
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
extern int pa_serialize_tlb_flushes;
#else
#define pa_serialize_tlb_flushes        (0)
#endif

#define purge_tlb_start(flags)  do { \
	if (pa_serialize_tlb_flushes)	\
		spin_lock_irqsave(&pa_tlb_flush_lock, flags); \
	else \
		local_irq_save(flags);	\
	} while (0)
#define purge_tlb_end(flags)	do { \
	if (pa_serialize_tlb_flushes)	\
		spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \
	else \
		local_irq_restore(flags); \
	} while (0)

/* Purge data and instruction TLB entries. The TLB purge instructions
 * are slow on SMP machines since the purge must be broadcast to all CPUs.
67 68 69 70
 */

static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{
71 72 73
	unsigned long flags;

	purge_tlb_start(flags);
74 75
	mtsp(mm->context, 1);
	pdtlb(addr);
76
	pitlb(addr);
77
	purge_tlb_end(flags);
78 79
}

L
Linus Torvalds 已提交
80 81 82 83 84 85 86 87
/* Certain architectures need to do special things when PTEs
 * within a page table are directly modified.  Thus, the following
 * hook is made available.
 */
#define set_pte(pteptr, pteval)                                 \
        do{                                                     \
                *(pteptr) = (pteval);                           \
        } while(0)
88

89 90 91
#define set_pte_at(mm, addr, ptep, pteval)			\
	do {							\
		pte_t old_pte;					\
92
		unsigned long flags;				\
93
		spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\
94
		old_pte = *ptep;				\
95
		set_pte(ptep, pteval);				\
96
		purge_tlb_entries(mm, addr);			\
97
		spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\
98
	} while (0)
L
Linus Torvalds 已提交
99 100 101 102 103 104 105 106 107 108

#endif /* !__ASSEMBLY__ */

#define pte_ERROR(e) \
	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
#define pgd_ERROR(e) \
	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))

109
/* This is the size of the initially mapped kernel memory */
110 111
#if defined(CONFIG_64BIT)
#define KERNEL_INITIAL_ORDER	26	/* 1<<26 = 64MB */
112
#else
113
#define KERNEL_INITIAL_ORDER	25	/* 1<<25 = 32MB */
114
#endif
L
Linus Torvalds 已提交
115 116
#define KERNEL_INITIAL_SIZE	(1 << KERNEL_INITIAL_ORDER)

117
#if CONFIG_PGTABLE_LEVELS == 3
L
Linus Torvalds 已提交
118 119
#define PGD_ORDER	1 /* Number of pages per pgd */
#define PMD_ORDER	1 /* Number of pages per pmd */
120
#define PGD_ALLOC_ORDER	(2 + 1) /* first pgd contains pmd */
L
Linus Torvalds 已提交
121 122
#else
#define PGD_ORDER	1 /* Number of pages per pgd */
123
#define PGD_ALLOC_ORDER	(PGD_ORDER + 1)
L
Linus Torvalds 已提交
124 125 126 127 128 129 130 131 132 133 134 135 136 137
#endif

/* Definitions for 3rd level (we use PLD here for Page Lower directory
 * because PTE_SHIFT is used lower down to mean shift that has to be
 * done to get usable bits out of the PTE) */
#define PLD_SHIFT	PAGE_SHIFT
#define PLD_SIZE	PAGE_SIZE
#define BITS_PER_PTE	(PAGE_SHIFT - BITS_PER_PTE_ENTRY)
#define PTRS_PER_PTE    (1UL << BITS_PER_PTE)

/* Definitions for 2nd level */
#define PMD_SHIFT       (PLD_SHIFT + BITS_PER_PTE)
#define PMD_SIZE	(1UL << PMD_SHIFT)
#define PMD_MASK	(~(PMD_SIZE-1))
138
#if CONFIG_PGTABLE_LEVELS == 3
L
Linus Torvalds 已提交
139 140
#define BITS_PER_PMD	(PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
#else
141
#define __PAGETABLE_PMD_FOLDED 1
L
Linus Torvalds 已提交
142 143 144 145 146 147
#define BITS_PER_PMD	0
#endif
#define PTRS_PER_PMD    (1UL << BITS_PER_PMD)

/* Definitions for 1st level */
#define PGDIR_SHIFT	(PMD_SHIFT + BITS_PER_PMD)
148 149 150
#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
#define BITS_PER_PGD	(BITS_PER_LONG - PGDIR_SHIFT)
#else
L
Linus Torvalds 已提交
151
#define BITS_PER_PGD	(PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)
152
#endif
L
Linus Torvalds 已提交
153 154 155 156 157
#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
#define PGDIR_MASK	(~(PGDIR_SIZE-1))
#define PTRS_PER_PGD    (1UL << BITS_PER_PGD)
#define USER_PTRS_PER_PGD       PTRS_PER_PGD

158
#ifdef CONFIG_64BIT
L
Linus Torvalds 已提交
159 160
#define MAX_ADDRBITS	(PGDIR_SHIFT + BITS_PER_PGD)
#define MAX_ADDRESS	(1UL << MAX_ADDRBITS)
161
#define SPACEID_SHIFT	(MAX_ADDRBITS - 32)
162 163 164 165 166
#else
#define MAX_ADDRBITS	(BITS_PER_LONG)
#define MAX_ADDRESS	(1UL << MAX_ADDRBITS)
#define SPACEID_SHIFT	0
#endif
L
Linus Torvalds 已提交
167 168 169

/* This calculates the number of initial pages we need for the initial
 * page tables */
170 171 172 173 174
#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT)
# define PT_INITIAL	(1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT))
#else
# define PT_INITIAL	(1)  /* all initial PTEs fit into one page */
#endif
L
Linus Torvalds 已提交
175 176 177 178 179

/*
 * pgd entries used up by user/kernel:
 */

180
#define FIRST_USER_ADDRESS	0UL
L
Linus Torvalds 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195

/* NB: The tlb miss handlers make certain assumptions about the order */
/*     of the following bits, so be careful (One example, bits 25-31  */
/*     are moved together in one instruction).                        */

#define _PAGE_READ_BIT     31   /* (0x001) read access allowed */
#define _PAGE_WRITE_BIT    30   /* (0x002) write access allowed */
#define _PAGE_EXEC_BIT     29   /* (0x004) execute access allowed */
#define _PAGE_GATEWAY_BIT  28   /* (0x008) privilege promotion allowed */
#define _PAGE_DMB_BIT      27   /* (0x010) Data Memory Break enable (B bit) */
#define _PAGE_DIRTY_BIT    26   /* (0x020) Page Dirty (D bit) */
#define _PAGE_REFTRAP_BIT  25   /* (0x040) Page Ref. Trap enable (T bit) */
#define _PAGE_NO_CACHE_BIT 24   /* (0x080) Uncached Page (U bit) */
#define _PAGE_ACCESSED_BIT 23   /* (0x100) Software: Page Accessed */
#define _PAGE_PRESENT_BIT  22   /* (0x200) Software: translation valid */
196
#define _PAGE_HPAGE_BIT    21   /* (0x400) Software: Huge Page */
L
Linus Torvalds 已提交
197 198 199 200 201 202 203 204 205 206 207 208
#define _PAGE_USER_BIT     20   /* (0x800) Software: User accessible page */

/* N.B. The bits are defined in terms of a 32 bit word above, so the */
/*      following macro is ok for both 32 and 64 bit.                */

#define xlate_pabit(x) (31 - x)

/* this defines the shift to the usable bits in the PTE it is set so
 * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set
 * to zero */
#define PTE_SHIFT	   	xlate_pabit(_PAGE_USER_BIT)

209 210 211
/* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
#define PFN_PTE_SHIFT		12

L
Linus Torvalds 已提交
212 213 214 215 216 217 218 219 220 221 222
#define _PAGE_READ     (1 << xlate_pabit(_PAGE_READ_BIT))
#define _PAGE_WRITE    (1 << xlate_pabit(_PAGE_WRITE_BIT))
#define _PAGE_RW       (_PAGE_READ | _PAGE_WRITE)
#define _PAGE_EXEC     (1 << xlate_pabit(_PAGE_EXEC_BIT))
#define _PAGE_GATEWAY  (1 << xlate_pabit(_PAGE_GATEWAY_BIT))
#define _PAGE_DMB      (1 << xlate_pabit(_PAGE_DMB_BIT))
#define _PAGE_DIRTY    (1 << xlate_pabit(_PAGE_DIRTY_BIT))
#define _PAGE_REFTRAP  (1 << xlate_pabit(_PAGE_REFTRAP_BIT))
#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
#define _PAGE_PRESENT  (1 << xlate_pabit(_PAGE_PRESENT_BIT))
223
#define _PAGE_HUGE     (1 << xlate_pabit(_PAGE_HPAGE_BIT))
L
Linus Torvalds 已提交
224 225
#define _PAGE_USER     (1 << xlate_pabit(_PAGE_USER_BIT))

226
#define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
L
Linus Torvalds 已提交
227
#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
228 229 230 231
#define _PAGE_KERNEL_RO	(_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_KERNEL_EXEC	(_PAGE_KERNEL_RO | _PAGE_EXEC)
#define _PAGE_KERNEL_RWX	(_PAGE_KERNEL_EXEC | _PAGE_WRITE)
#define _PAGE_KERNEL		(_PAGE_KERNEL_RO | _PAGE_WRITE)
L
Linus Torvalds 已提交
232 233 234 235

/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
 * are page-aligned, we don't care about the PAGE_OFFSET bits, except
 * for a few meta-information bits, so we shift the address to be
236 237
 * able to effectively address 40/42/44-bits of physical address space
 * depending on 4k/16k/64k PAGE_SIZE */
L
Linus Torvalds 已提交
238 239 240 241 242 243 244 245 246
#define _PxD_PRESENT_BIT   31
#define _PxD_ATTACHED_BIT  30
#define _PxD_VALID_BIT     29

#define PxD_FLAG_PRESENT  (1 << xlate_pabit(_PxD_PRESENT_BIT))
#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT))
#define PxD_FLAG_VALID    (1 << xlate_pabit(_PxD_VALID_BIT))
#define PxD_FLAG_MASK     (0xf)
#define PxD_FLAG_SHIFT    (4)
247
#define PxD_VALUE_SHIFT   (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
L
Linus Torvalds 已提交
248 249 250

#ifndef __ASSEMBLY__

251 252
#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_USER)
#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
L
Linus Torvalds 已提交
253 254 255
/* Others seem to make this executable, I don't know if that's correct
   or not.  The stack is mapped this way though so this is necessary
   in the short term - dhd@linuxcare.com, 2000-08-08 */
256 257 258
#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ)
#define PAGE_WRITEONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE)
#define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC)
L
Linus Torvalds 已提交
259
#define PAGE_COPY       PAGE_EXECREAD
260
#define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
L
Linus Torvalds 已提交
261
#define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
262 263 264
#define PAGE_KERNEL_EXEC	__pgprot(_PAGE_KERNEL_EXEC)
#define PAGE_KERNEL_RWX	__pgprot(_PAGE_KERNEL_RWX)
#define PAGE_KERNEL_RO	__pgprot(_PAGE_KERNEL_RO)
L
Linus Torvalds 已提交
265
#define PAGE_KERNEL_UNC	__pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
266
#define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ)
L
Linus Torvalds 已提交
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295


/*
 * We could have an execute only page using "gateway - promote to priv
 * level 3", but that is kind of silly. So, the way things are defined
 * now, we must always have read permission for pages with execute
 * permission. For the fun of it we'll go ahead and support write only
 * pages.
 */

	 /*xwr*/
#define __P000  PAGE_NONE
#define __P001  PAGE_READONLY
#define __P010  __P000 /* copy on write */
#define __P011  __P001 /* copy on write */
#define __P100  PAGE_EXECREAD
#define __P101  PAGE_EXECREAD
#define __P110  __P100 /* copy on write */
#define __P111  __P101 /* copy on write */

#define __S000  PAGE_NONE
#define __S001  PAGE_READONLY
#define __S010  PAGE_WRITEONLY
#define __S011  PAGE_SHARED
#define __S100  PAGE_EXECREAD
#define __S101  PAGE_EXECREAD
#define __S110  PAGE_RWX
#define __S111  PAGE_RWX

296

L
Linus Torvalds 已提交
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */

/* initial page tables for 0-8MB for kernel */

extern pte_t pg0[];

/* zero page used for uninitialized stuff */

extern unsigned long *empty_zero_page;

/*
 * ZERO_PAGE is a global shared page that is always zero: used
 * for zero-mapped memory areas etc..
 */

#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))

314
#define pte_none(x)     (pte_val(x) == 0)
L
Linus Torvalds 已提交
315
#define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
316
#define pte_clear(mm, addr, xp)  set_pte_at(mm, addr, xp, __pte(0))
L
Linus Torvalds 已提交
317 318 319 320 321 322

#define pmd_flag(x)	(pmd_val(x) & PxD_FLAG_MASK)
#define pmd_address(x)	((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
#define pgd_flag(x)	(pgd_val(x) & PxD_FLAG_MASK)
#define pgd_address(x)	((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)

323
#if CONFIG_PGTABLE_LEVELS == 3
L
Linus Torvalds 已提交
324 325 326 327 328 329 330 331 332
/* The first entry of the permanent pmd is not there if it contains
 * the gateway marker */
#define pmd_none(x)	(!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
#else
#define pmd_none(x)	(!pmd_val(x))
#endif
#define pmd_bad(x)	(!(pmd_flag(x) & PxD_FLAG_VALID))
#define pmd_present(x)	(pmd_flag(x) & PxD_FLAG_PRESENT)
static inline void pmd_clear(pmd_t *pmd) {
333
#if CONFIG_PGTABLE_LEVELS == 3
L
Linus Torvalds 已提交
334 335 336 337 338 339 340 341 342 343 344
	if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
		/* This is the entry pointing to the permanent pmd
		 * attached to the pgd; cannot clear it */
		__pmd_val_set(*pmd, PxD_FLAG_ATTACHED);
	else
#endif
		__pmd_val_set(*pmd,  0);
}



345
#if CONFIG_PGTABLE_LEVELS == 3
346 347
#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd)))
#define pgd_page(pgd)	virt_to_page((void *)pgd_page_vaddr(pgd))
L
Linus Torvalds 已提交
348 349 350 351 352 353 354

/* For 64 bit we have three level tables */

#define pgd_none(x)     (!pgd_val(x))
#define pgd_bad(x)      (!(pgd_flag(x) & PxD_FLAG_VALID))
#define pgd_present(x)  (pgd_flag(x) & PxD_FLAG_PRESENT)
static inline void pgd_clear(pgd_t *pgd) {
355
#if CONFIG_PGTABLE_LEVELS == 3
L
Linus Torvalds 已提交
356 357 358 359 360 361 362 363 364 365 366 367 368
	if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
		/* This is the permanent pmd attached to the pgd; cannot
		 * free it */
		return;
#endif
	__pgd_val_set(*pgd, 0);
}
#else
/*
 * The "pgd_xxx()" functions here are trivial for a folded two-level
 * setup: the pgd is never bad, and a pmd always exists (as it's folded
 * into the pgd entry)
 */
369 370 371 372
static inline int pgd_none(pgd_t pgd)		{ return 0; }
static inline int pgd_bad(pgd_t pgd)		{ return 0; }
static inline int pgd_present(pgd_t pgd)	{ return 1; }
static inline void pgd_clear(pgd_t * pgdp)	{ }
L
Linus Torvalds 已提交
373 374 375 376 377 378
#endif

/*
 * The following only work if pte_present() is true.
 * Undefined behaviour if not..
 */
379 380 381
static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte)		{ return pte_val(pte) & _PAGE_WRITE; }
N
Nick Piggin 已提交
382
static inline int pte_special(pte_t pte)	{ return 0; }
383 384 385 386 387 388 389

static inline pte_t pte_mkclean(pte_t pte)	{ pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
static inline pte_t pte_mkold(pte_t pte)	{ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
static inline pte_t pte_wrprotect(pte_t pte)	{ pte_val(pte) &= ~_PAGE_WRITE; return pte; }
static inline pte_t pte_mkdirty(pte_t pte)	{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte)	{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte)	{ pte_val(pte) |= _PAGE_WRITE; return pte; }
N
Nick Piggin 已提交
390
static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
L
Linus Torvalds 已提交
391

392 393 394 395 396
/*
 * Huge pte definitions.
 */
#ifdef CONFIG_HUGETLB_PAGE
#define pte_huge(pte)           (pte_val(pte) & _PAGE_HUGE)
397 398
#define pte_mkhuge(pte)         (__pte(pte_val(pte) | \
				 (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
399 400 401 402 403 404
#else
#define pte_huge(pte)           (0)
#define pte_mkhuge(pte)         (pte)
#endif


L
Linus Torvalds 已提交
405 406 407 408 409 410 411 412
/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 */
#define __mk_pte(addr,pgprot) \
({									\
	pte_t __pte;							\
									\
413
	pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot));	\
L
Linus Torvalds 已提交
414 415 416 417 418 419 420 421 422
									\
	__pte;								\
})

#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))

static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
{
	pte_t pte;
423
	pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
L
Linus Torvalds 已提交
424 425 426
	return pte;
}

427
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
L
Linus Torvalds 已提交
428 429 430 431
{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }

/* Permanent address of a page.  On parisc we don't have highmem. */

432
#define pte_pfn(x)		(pte_val(x) >> PFN_PTE_SHIFT)
L
Linus Torvalds 已提交
433 434 435

#define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))

436
#define pmd_page_vaddr(pmd)	((unsigned long) __va(pmd_address(pmd)))
L
Linus Torvalds 已提交
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451

#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
#define pmd_page(pmd)	virt_to_page((void *)__pmd_page(pmd))

#define pgd_index(address) ((address) >> PGDIR_SHIFT)

/* to find an entry in a page-table-directory */
#define pgd_offset(mm, address) \
((mm)->pgd + ((address) >> PGDIR_SHIFT))

/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)

/* Find an entry in the second-level page table.. */

452
#if CONFIG_PGTABLE_LEVELS == 3
453
#define pmd_index(addr)         (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
L
Linus Torvalds 已提交
454
#define pmd_offset(dir,address) \
455
((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address))
L
Linus Torvalds 已提交
456 457 458 459 460 461 462
#else
#define pmd_offset(dir,addr) ((pmd_t *) dir)
#endif

/* Find an entry in the third-level page table.. */ 
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
#define pte_offset_kernel(pmd, address) \
463
	((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
L
Linus Torvalds 已提交
464 465 466 467 468 469 470 471 472 473 474 475
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)

#define pte_unmap(pte)			do { } while (0)
#define pte_unmap_nested(pte)		do { } while (0)

extern void paging_init (void);

/* Used for deferring calls to flush_dcache_page() */

#define PG_dcache_dirty         PG_arch_1

476
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
L
Linus Torvalds 已提交
477 478 479 480 481 482 483 484 485 486 487 488

/* Encode and de-code a swap entry */

#define __swp_type(x)                     ((x).val & 0x1f)
#define __swp_offset(x)                   ( (((x).val >> 6) &  0x7) | \
					  (((x).val >> 8) & ~0x7) )
#define __swp_entry(type, offset)         ((swp_entry_t) { (type) | \
					    ((offset &  0x7) << 6) | \
					    ((offset & ~0x7) << 8) })
#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x)		((pte_t) { (x).val })

489 490 491 492 493 494 495 496 497

static inline spinlock_t *pgd_spinlock(pgd_t *pgd)
{
	if (unlikely(pgd == swapper_pg_dir))
		return &pa_swapper_pg_lock;
	return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1)));
}


L
Linus Torvalds 已提交
498 499
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
500 501 502
	pte_t pte;
	unsigned long flags;

L
Linus Torvalds 已提交
503 504
	if (!pte_young(*ptep))
		return 0;
505

506
	spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags);
507 508
	pte = *ptep;
	if (!pte_young(pte)) {
509
		spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
L
Linus Torvalds 已提交
510
		return 0;
511
	}
512
	set_pte(ptep, pte_mkold(pte));
513
	purge_tlb_entries(vma->vm_mm, addr);
514
	spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
L
Linus Torvalds 已提交
515 516 517
	return 1;
}

518
struct mm_struct;
L
Linus Torvalds 已提交
519 520 521
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
	pte_t old_pte;
522
	unsigned long flags;
L
Linus Torvalds 已提交
523

524
	spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
525
	old_pte = *ptep;
526
	set_pte(ptep, __pte(0));
527
	purge_tlb_entries(mm, addr);
528
	spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
L
Linus Torvalds 已提交
529 530 531 532 533 534

	return old_pte;
}

static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
535
	unsigned long flags;
536
	spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
537
	set_pte(ptep, pte_wrprotect(*ptep));
538
	purge_tlb_entries(mm, addr);
539
	spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
L
Linus Torvalds 已提交
540 541 542 543
}

#define pte_same(A,B)	(pte_val(A) == pte_val(B))

544 545 546
struct seq_file;
extern void arch_report_meminfo(struct seq_file *m);

L
Linus Torvalds 已提交
547 548
#endif /* !__ASSEMBLY__ */

549 550 551

/* TLB page size encoding - see table 3-1 in parisc20.pdf */
#define _PAGE_SIZE_ENCODING_4K		0
552 553
#define _PAGE_SIZE_ENCODING_16K		1
#define _PAGE_SIZE_ENCODING_64K		2
554 555 556
#define _PAGE_SIZE_ENCODING_256K	3
#define _PAGE_SIZE_ENCODING_1M		4
#define _PAGE_SIZE_ENCODING_4M		5
557 558
#define _PAGE_SIZE_ENCODING_16M		6
#define _PAGE_SIZE_ENCODING_64M		7
559 560 561 562 563 564 565 566 567 568

#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K
#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K
#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K
#endif


569 570
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)

L
Linus Torvalds 已提交
571 572 573
/* We provide our own get_unmapped_area to provide cache coherency */

#define HAVE_ARCH_UNMAPPED_AREA
574
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
L
Linus Torvalds 已提交
575 576 577 578 579 580 581 582

#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME
#include <asm-generic/pgtable.h>

#endif /* _PARISC_PGTABLE_H */