pgtable.h 14.0 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
V
Vineet Gupta 已提交
2 3 4 5 6 7 8 9 10 11
/*
 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
 *
 * vineetg: May 2011
 *  -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
 *     They are semantically the same although in different contexts
 *     VALID marks a TLB entry exists and it will only happen if PRESENT
 *  - Utilise some unused free bits to confine PTE flags to 12 bits
 *     This is a must for 4k pg-sz
 *
12
 * vineetg: Mar 2011 - changes to accommodate MMU TLB Page Descriptor mods
V
Vineet Gupta 已提交
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 *  -TLB Locking never really existed, except for initial specs
 *  -SILENT_xxx not needed for our port
 *  -Per my request, MMU V3 changes the layout of some of the bits
 *     to avoid a few shifts in TLB Miss handlers.
 *
 * vineetg: April 2010
 *  -PGD entry no longer contains any flags. If empty it is 0, otherwise has
 *   Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
 *
 * vineetg: April 2010
 *  -Switched form 8:11:13 split for page table lookup to 11:8:13
 *  -this speeds up page table allocation itself as we now have to memset 1K
 *    instead of 8k per page table.
 * -TODO: Right now page table alloc is 8K and rest 7K is unused
 *    need to optimise it
 *
 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
 */

#ifndef _ASM_ARC_PGTABLE_H
#define _ASM_ARC_PGTABLE_H

35
#include <linux/const.h>
36
#define __ARCH_USE_5LEVEL_HACK
V
Vineet Gupta 已提交
37
#include <asm-generic/pgtable-nopmd.h>
38 39
#include <asm/page.h>
#include <asm/mmu.h>	/* to propagate CONFIG_ARC_MMU_VER <n> */
V
Vineet Gupta 已提交
40 41 42 43 44 45 46 47

/**************************************************************************
 * Page Table Flags
 *
 * ARC700 MMU only deals with softare managed TLB entries.
 * Page Tables are purely for Linux VM's consumption and the bits below are
 * suited to that (uniqueness). Hence some are not implemented in the TLB and
 * some have different value in TLB.
A
Andrea Gelmini 已提交
48
 * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
V
Vineet Gupta 已提交
49 50 51 52 53 54 55 56 57 58
 *      seperate PD0 and PD1, which combined forms a translation entry)
 *      while for PTE perspective, they are 8 and 9 respectively
 * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
 *      (saves some bit shift ops in TLB Miss hdlrs)
 */

#if (CONFIG_ARC_MMU_VER <= 2)

#define _PAGE_ACCESSED      (1<<1)	/* Page is accessed (S) */
#define _PAGE_CACHEABLE     (1<<2)	/* Page is cached (H) */
59 60 61
#define _PAGE_EXECUTE       (1<<3)	/* Page has user execute perm (H) */
#define _PAGE_WRITE         (1<<4)	/* Page has user write perm (H) */
#define _PAGE_READ          (1<<5)	/* Page has user read perm (H) */
62
#define _PAGE_DIRTY         (1<<6)	/* Page modified (dirty) (S) */
V
Vineet Gupta 已提交
63
#define _PAGE_SPECIAL       (1<<7)
64 65
#define _PAGE_GLOBAL        (1<<8)	/* Page is global (H) */
#define _PAGE_PRESENT       (1<<10)	/* TLB entry is valid (H) */
V
Vineet Gupta 已提交
66

67
#else	/* MMU v3 onwards */
V
Vineet Gupta 已提交
68 69

#define _PAGE_CACHEABLE     (1<<0)	/* Page is cached (H) */
70 71 72
#define _PAGE_EXECUTE       (1<<1)	/* Page has user execute perm (H) */
#define _PAGE_WRITE         (1<<2)	/* Page has user write perm (H) */
#define _PAGE_READ          (1<<3)	/* Page has user read perm (H) */
73
#define _PAGE_ACCESSED      (1<<4)	/* Page is accessed (S) */
74
#define _PAGE_DIRTY         (1<<5)	/* Page modified (dirty) (S) */
V
Vineet Gupta 已提交
75
#define _PAGE_SPECIAL       (1<<6)
76 77 78 79 80

#if (CONFIG_ARC_MMU_VER >= 4)
#define _PAGE_WTHRU         (1<<7)	/* Page cache mode write-thru (H) */
#endif

V
Vineet Gupta 已提交
81 82
#define _PAGE_GLOBAL        (1<<8)	/* Page is global (H) */
#define _PAGE_PRESENT       (1<<9)	/* TLB entry is valid (H) */
83 84

#if (CONFIG_ARC_MMU_VER >= 4)
V
Vineet Gupta 已提交
85
#define _PAGE_HW_SZ         (1<<10)	/* Page Size indicator (H): 0 normal, 1 super */
86 87
#endif

88
#define _PAGE_SHARED_CODE   (1<<11)	/* Shared Code page with cmn vaddr
V
Vineet Gupta 已提交
89
					   usable for shared TLB entries (H) */
V
Vineet Gupta 已提交
90 91

#define _PAGE_UNUSED_BIT    (1<<12)
V
Vineet Gupta 已提交
92 93
#endif

94 95
/* vmalloc permissions */
#define _K_PAGE_PERMS  (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
96
			_PAGE_GLOBAL | _PAGE_PRESENT)
V
Vineet Gupta 已提交
97

98 99 100
#ifndef CONFIG_ARC_CACHE_PAGES
#undef _PAGE_CACHEABLE
#define _PAGE_CACHEABLE 0
V
Vineet Gupta 已提交
101 102
#endif

V
Vineet Gupta 已提交
103 104 105 106
#ifndef _PAGE_HW_SZ
#define _PAGE_HW_SZ	0
#endif

107 108
/* Defaults for every user page */
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
109

V
Vineet Gupta 已提交
110
/* Set of bits not changed in pte_modify */
111
#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
V
Vineet Gupta 已提交
112 113 114 115 116 117 118 119 120 121 122

/* More Abbrevaited helpers */
#define PAGE_U_NONE     __pgprot(___DEF)
#define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
#define PAGE_U_W_R      __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
#define PAGE_U_X_R      __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
#define PAGE_U_X_W_R    __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
						       _PAGE_EXECUTE)

#define PAGE_SHARED	PAGE_U_W_R

123 124
/* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
 * user vaddr space - visible in all addr spaces, but kernel mode only
V
Vineet Gupta 已提交
125 126
 * Thus Global, all-kernel-access, no-user-access, cached
 */
127
#define PAGE_KERNEL          __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE)
V
Vineet Gupta 已提交
128 129

/* ioremap */
130
#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
V
Vineet Gupta 已提交
131

V
Vineet Gupta 已提交
132
/* Masks for actual TLB "PD"s */
V
Vineet Gupta 已提交
133
#define PTE_BITS_IN_PD0		(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
134
#define PTE_BITS_RWX		(_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
V
Vineet Gupta 已提交
135 136 137 138

#ifdef CONFIG_ARC_HAS_PAE40
#define PTE_BITS_NON_RWX_IN_PD1	(0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
#else
139
#define PTE_BITS_NON_RWX_IN_PD1	(PAGE_MASK | _PAGE_CACHEABLE)
V
Vineet Gupta 已提交
140
#endif
V
Vineet Gupta 已提交
141

V
Vineet Gupta 已提交
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
/**************************************************************************
 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
 *
 * Certain cases have 1:1 mapping
 *  e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
 *       which directly corresponds to  PAGE_U_X_R
 *
 * Other rules which cause the divergence from 1:1 mapping
 *
 *  1. Although ARC700 can do exclusive execute/write protection (meaning R
 *     can be tracked independet of X/W unlike some other CPUs), still to
 *     keep things consistent with other archs:
 *      -Write implies Read:   W => R
 *      -Execute implies Read: X => R
 *
 *  2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
 *     This is to enable COW mechanism
 */
	/* xwr */
#define __P000  PAGE_U_NONE
#define __P001  PAGE_U_R
#define __P010  PAGE_U_R	/* Pvt-W => !W */
#define __P011  PAGE_U_R	/* Pvt-W => !W */
#define __P100  PAGE_U_X_R	/* X => R */
#define __P101  PAGE_U_X_R
#define __P110  PAGE_U_X_R	/* Pvt-W => !W and X => R */
#define __P111  PAGE_U_X_R	/* Pvt-W => !W */

#define __S000  PAGE_U_NONE
#define __S001  PAGE_U_R
#define __S010  PAGE_U_W_R	/* W => R */
#define __S011  PAGE_U_W_R
#define __S100  PAGE_U_X_R	/* X => R */
#define __S101  PAGE_U_X_R
#define __S110  PAGE_U_X_W_R	/* X => R */
#define __S111  PAGE_U_X_W_R

/****************************************************************
180
 * 2 tier (PGD:PTE) software page walker
V
Vineet Gupta 已提交
181
 *
182
 * [31]		    32 bit virtual address              [0]
V
Vineet Gupta 已提交
183
 * -------------------------------------------------------
184 185 186
 * |               | <------------ PGDIR_SHIFT ----------> |
 * |		   |					 |
 * | BITS_FOR_PGD  |  BITS_FOR_PTE  | <-- PAGE_SHIFT --> |
V
Vineet Gupta 已提交
187 188 189 190 191
 * -------------------------------------------------------
 *       |                  |                |
 *       |                  |                --> off in page frame
 *       |                  ---> index into Page Table
 *       ----> index into Page Directory
192 193 194 195 196 197 198 199 200
 *
 * In a single page size configuration, only PAGE_SHIFT is fixed
 * So both PGD and PTE sizing can be tweaked
 *  e.g. 8K page (PAGE_SHIFT 13) can have
 *  - PGDIR_SHIFT 21  -> 11:8:13 address split
 *  - PGDIR_SHIFT 24  -> 8:11:13 address split
 *
 * If Super Page is configured, PGDIR_SHIFT becomes fixed too,
 * so the sizing flexibility is gone.
V
Vineet Gupta 已提交
201 202
 */

203 204 205 206 207 208 209 210 211 212
#if defined(CONFIG_ARC_HUGEPAGE_16M)
#define PGDIR_SHIFT	24
#elif defined(CONFIG_ARC_HUGEPAGE_2M)
#define PGDIR_SHIFT	21
#else
/*
 * Only Normal page support so "hackable" (see comment above)
 * Default value provides 11:8:13 (8K), 11:9:12 (4K)
 */
#define PGDIR_SHIFT	21
V
Vineet Gupta 已提交
213 214
#endif

215 216
#define BITS_FOR_PTE	(PGDIR_SHIFT - PAGE_SHIFT)
#define BITS_FOR_PGD	(32 - PGDIR_SHIFT)
V
Vineet Gupta 已提交
217

N
Noam Camus 已提交
218
#define PGDIR_SIZE	_BITUL(PGDIR_SHIFT)	/* vaddr span, not PDG sz */
V
Vineet Gupta 已提交
219 220
#define PGDIR_MASK	(~(PGDIR_SIZE-1))

221 222 223
#define	PTRS_PER_PTE	_BITUL(BITS_FOR_PTE)
#define	PTRS_PER_PGD	_BITUL(BITS_FOR_PGD)

V
Vineet Gupta 已提交
224 225 226 227 228 229 230 231 232 233
/*
 * Number of entries a user land program use.
 * TASK_SIZE is the maximum vaddr that can be used by a userland program.
 */
#define	USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)

/*
 * No special requirements for lowest virtual address we permit any user space
 * mapping to be mapped at.
 */
234
#define FIRST_USER_ADDRESS      0UL
V
Vineet Gupta 已提交
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278


/****************************************************************
 * Bucket load of VM Helpers
 */

#ifndef __ASSEMBLY__

#define pte_ERROR(e) \
	pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pgd_ERROR(e) \
	pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))

/* the zero page used for uninitialized and anonymous pages */
extern char empty_zero_page[PAGE_SIZE];
#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))

#define pte_unmap(pte)		do { } while (0)
#define pte_unmap_nested(pte)		do { } while (0)

#define set_pte(pteptr, pteval)	((*(pteptr)) = (pteval))
#define set_pmd(pmdptr, pmdval)	(*(pmdptr) = pmdval)

/* find the page descriptor of the Page Tbl ref by PMD entry */
#define pmd_page(pmd)		virt_to_page(pmd_val(pmd) & PAGE_MASK)

/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
#define pmd_page_vaddr(pmd)	(pmd_val(pmd) & PAGE_MASK)

/* In a 2 level sys, setup the PGD entry with PTE value */
static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
{
	pmd_val(*pmdp) = (unsigned long)ptep;
}

#define pte_none(x)			(!pte_val(x))
#define pte_present(x)			(pte_val(x) & _PAGE_PRESENT)
#define pte_clear(mm, addr, ptep)	set_pte_at(mm, addr, ptep, __pte(0))

#define pmd_none(x)			(!pmd_val(x))
#define	pmd_bad(x)			((pmd_val(x) & ~PAGE_MASK))
#define pmd_present(x)			(pmd_val(x))
#define pmd_clear(xp)			do { pmd_val(*(xp)) = 0; } while (0)

279
#define pte_page(pte)		pfn_to_page(pte_pfn(pte))
280
#define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)
281
#define pfn_pte(pfn, prot)	__pte(__pfn_to_phys(pfn) | pgprot_val(prot))
282 283 284 285

/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
#define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
#define __pte_index(addr)	(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
V
Vineet Gupta 已提交
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300

/*
 * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
 * and returns ptr to PTE entry corresponding to @addr
 */
#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
					 __pte_index(addr))

/* No mapping of Page Tables in high mem etc, so following same as above */
#define pte_offset_kernel(dir, addr)		pte_offset(dir, addr)
#define pte_offset_map(dir, addr)		pte_offset(dir, addr)

/* Zoo of pte_xxx function */
#define pte_read(pte)		(pte_val(pte) & _PAGE_READ)
#define pte_write(pte)		(pte_val(pte) & _PAGE_WRITE)
301
#define pte_dirty(pte)		(pte_val(pte) & _PAGE_DIRTY)
V
Vineet Gupta 已提交
302
#define pte_young(pte)		(pte_val(pte) & _PAGE_ACCESSED)
V
Vineet Gupta 已提交
303
#define pte_special(pte)	(pte_val(pte) & _PAGE_SPECIAL)
V
Vineet Gupta 已提交
304 305 306 307

#define PTE_BIT_FUNC(fn, op) \
	static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }

V
Vineet Gupta 已提交
308
PTE_BIT_FUNC(mknotpresent,	&= ~(_PAGE_PRESENT));
V
Vineet Gupta 已提交
309 310
PTE_BIT_FUNC(wrprotect,	&= ~(_PAGE_WRITE));
PTE_BIT_FUNC(mkwrite,	|= (_PAGE_WRITE));
311 312
PTE_BIT_FUNC(mkclean,	&= ~(_PAGE_DIRTY));
PTE_BIT_FUNC(mkdirty,	|= (_PAGE_DIRTY));
V
Vineet Gupta 已提交
313 314 315 316
PTE_BIT_FUNC(mkold,	&= ~(_PAGE_ACCESSED));
PTE_BIT_FUNC(mkyoung,	|= (_PAGE_ACCESSED));
PTE_BIT_FUNC(exprotect,	&= ~(_PAGE_EXECUTE));
PTE_BIT_FUNC(mkexec,	|= (_PAGE_EXECUTE));
V
Vineet Gupta 已提交
317
PTE_BIT_FUNC(mkspecial,	|= (_PAGE_SPECIAL));
V
Vineet Gupta 已提交
318
PTE_BIT_FUNC(mkhuge,	|= (_PAGE_HW_SZ));
V
Vineet Gupta 已提交
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353

static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
}

/* Macro to mark a page protection as uncacheable */
#define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))

static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep, pte_t pteval)
{
	set_pte(ptep, pteval);
}

/*
 * All kernel related VM pages are in init's mm.
 */
#define pgd_offset_k(address)	pgd_offset(&init_mm, address)
#define pgd_index(addr)		((addr) >> PGDIR_SHIFT)
#define pgd_offset(mm, addr)	(((mm)->pgd)+pgd_index(addr))

/*
 * Macro to quickly access the PGD entry, utlising the fact that some
 * arch may cache the pointer to Page Directory of "current" task
 * in a MMU register
 *
 * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
 * becomes read a register
 *
 * ********CAUTION*******:
 * Kernel code might be dealing with some mm_struct of NON "current"
 * Thus use this macro only when you are certain that "current" is current
 * e.g. when dealing with signal frame setup code etc
 */
V
Vineet Gupta 已提交
354
#ifndef CONFIG_SMP
V
Vineet Gupta 已提交
355 356 357 358 359
#define pgd_offset_fast(mm, addr)	\
({					\
	pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0);  \
	pgd_base + pgd_index(addr);	\
})
V
Vineet Gupta 已提交
360 361 362
#else
#define pgd_offset_fast(mm, addr)	pgd_offset(mm, addr)
#endif
V
Vineet Gupta 已提交
363 364 365 366 367 368 369

extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
		      pte_t *ptep);

/* Encode swap {type,off} tuple into PTE
 * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
370
 * PAGE_PRESENT is zero in a PTE holding swap "identifier"
V
Vineet Gupta 已提交
371 372 373 374 375 376
 */
#define __swp_entry(type, off)	((swp_entry_t) { \
					((type) & 0x1f) | ((off) << 13) })

/* Decode a PTE containing swap "identifier "into constituents */
#define __swp_type(pte_lookalike)	(((pte_lookalike).val) & 0x1f)
A
Alexey Brodkin 已提交
377
#define __swp_offset(pte_lookalike)	((pte_lookalike).val >> 13)
V
Vineet Gupta 已提交
378 379 380 381 382 383 384 385 386 387 388

/* NOPs, to keep generic kernel happy */
#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x)	((pte_t) { (x).val })

#define kern_addr_valid(addr)	(1)

/*
 * remap a physical page `pfn' of size `size' with page protection `prot'
 * into virtual address `from'
 */
V
Vineet Gupta 已提交
389 390 391 392
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#include <asm/hugepage.h>
#endif

V
Vineet Gupta 已提交
393 394
#include <asm-generic/pgtable.h>

395 396 397
/* to cope with aliasing VIPT cache */
#define HAVE_ARCH_UNMAPPED_AREA

V
Vineet Gupta 已提交
398 399 400 401 402 403 404 405
/*
 * No page table caches to initialise
 */
#define pgtable_cache_init()   do { } while (0)

#endif /* __ASSEMBLY__ */

#endif