pgtable.h 33.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 *  include/asm-s390/pgtable.h
 *
 *  S390 version
 *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
 *    Author(s): Hartmut Penner (hp@de.ibm.com)
 *               Ulrich Weigand (weigand@de.ibm.com)
 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
 *
 *  Derived from "include/asm-i386/pgtable.h"
 */

#ifndef _ASM_S390_PGTABLE_H
#define _ASM_S390_PGTABLE_H

/*
 * The Linux memory management assumes a three-level page table setup. For
 * s390 31 bit we "fold" the mid level into the top-level page table, so
 * that we physically have the same two-level page table as the s390 mmu
 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
 * the hardware provides (region first and region second tables are not
 * used).
 *
 * The "pgd_xxx()" functions are trivial for a folded two-level
 * setup: the pgd is never bad, and a pmd always exists (as it's folded
 * into the pgd entry)
 *
 * This file contains the functions and defines necessary to modify and use
 * the S390 page table tree.
 */
#ifndef __ASSEMBLY__
32
#include <linux/mm_types.h>
33
#include <asm/bitops.h>
L
Linus Torvalds 已提交
34 35 36 37 38
#include <asm/bug.h>
#include <asm/processor.h>

extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
extern void paging_init(void);
39
extern void vmem_map_init(void);
L
Linus Torvalds 已提交
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60

/*
 * The S390 doesn't have any external MMU info: the kernel page
 * tables contain all the necessary information.
 */
#define update_mmu_cache(vma, address, pte)     do { } while (0)

/*
 * ZERO_PAGE is a global shared page that is always zero: used
 * for zero-mapped memory areas etc..
 */
extern char empty_zero_page[PAGE_SIZE];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* !__ASSEMBLY__ */

/*
 * PMD_SHIFT determines the size of the area a second-level page
 * table can map
 * PGDIR_SHIFT determines what a third-level page table entry can map
 */
#ifndef __s390x__
61 62 63
# define PMD_SHIFT	20
# define PUD_SHIFT	20
# define PGDIR_SHIFT	20
L
Linus Torvalds 已提交
64
#else /* __s390x__ */
65
# define PMD_SHIFT	20
M
Martin Schwidefsky 已提交
66
# define PUD_SHIFT	31
67
# define PGDIR_SHIFT	42
L
Linus Torvalds 已提交
68 69 70 71
#endif /* __s390x__ */

#define PMD_SIZE        (1UL << PMD_SHIFT)
#define PMD_MASK        (~(PMD_SIZE-1))
M
Martin Schwidefsky 已提交
72 73
#define PUD_SIZE	(1UL << PUD_SHIFT)
#define PUD_MASK	(~(PUD_SIZE-1))
74 75
#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
#define PGDIR_MASK	(~(PGDIR_SIZE-1))
L
Linus Torvalds 已提交
76 77 78 79 80 81 82

/*
 * entries per page directory level: the S390 is two-level, so
 * we don't really have any PMD directory physically.
 * for S390 segment-table entries are combined to one PGD
 * that leads to 1024 pte per pgd
 */
83
#define PTRS_PER_PTE	256
L
Linus Torvalds 已提交
84
#ifndef __s390x__
85
#define PTRS_PER_PMD	1
86
#define PTRS_PER_PUD	1
L
Linus Torvalds 已提交
87
#else /* __s390x__ */
88
#define PTRS_PER_PMD	2048
89
#define PTRS_PER_PUD	2048
L
Linus Torvalds 已提交
90
#endif /* __s390x__ */
91
#define PTRS_PER_PGD	2048
L
Linus Torvalds 已提交
92

93 94
#define FIRST_USER_ADDRESS  0

L
Linus Torvalds 已提交
95 96 97 98
#define pte_ERROR(e) \
	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
#define pmd_ERROR(e) \
	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
M
Martin Schwidefsky 已提交
99 100
#define pud_ERROR(e) \
	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
L
Linus Torvalds 已提交
101 102 103 104 105
#define pgd_ERROR(e) \
	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))

#ifndef __ASSEMBLY__
/*
106 107 108 109 110 111 112
 * The vmalloc area will always be on the topmost area of the kernel
 * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc,
 * which should be enough for any sane case.
 * By putting vmalloc at the top, we maximise the gap between physical
 * memory and vmalloc to catch misplaced memory accesses. As a side
 * effect, this also makes sure that 64 bit module code cannot be used
 * as system call address.
H
Heiko Carstens 已提交
113
 */
L
Linus Torvalds 已提交
114
#ifndef __s390x__
115 116
#define VMALLOC_START	0x78000000UL
#define VMALLOC_END	0x7e000000UL
117
#define VMEM_MAP_END	0x80000000UL
L
Linus Torvalds 已提交
118
#else /* __s390x__ */
119 120
#define VMALLOC_START	0x3e000000000UL
#define VMALLOC_END	0x3e040000000UL
121
#define VMEM_MAP_END	0x40000000000UL
L
Linus Torvalds 已提交
122 123
#endif /* __s390x__ */

124 125 126 127 128
/*
 * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1
 * mapping. This needs to be calculated at compile time since the size of the
 * VMEM_MAP is static but the size of struct page can change.
 */
129 130 131
#define VMEM_MAX_PAGES	((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
#define VMEM_MAX_PFN	min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
#define VMEM_MAX_PHYS	((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
132 133
#define VMEM_MAP	((struct page *) VMALLOC_END)

L
Linus Torvalds 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
/*
 * A 31 bit pagetable entry of S390 has following format:
 *  |   PFRA          |    |  OS  |
 * 0                   0IP0
 * 00000000001111111111222222222233
 * 01234567890123456789012345678901
 *
 * I Page-Invalid Bit:    Page is not available for address-translation
 * P Page-Protection Bit: Store access not possible for page
 *
 * A 31 bit segmenttable entry of S390 has following format:
 *  |   P-table origin      |  |PTL
 * 0                         IC
 * 00000000001111111111222222222233
 * 01234567890123456789012345678901
 *
 * I Segment-Invalid Bit:    Segment is not available for address-translation
 * C Common-Segment Bit:     Segment is not private (PoP 3-30)
 * PTL Page-Table-Length:    Page-table length (PTL+1*16 entries -> up to 256)
 *
 * The 31 bit segmenttable origin of S390 has following format:
 *
 *  |S-table origin   |     | STL |
 * X                   **GPS
 * 00000000001111111111222222222233
 * 01234567890123456789012345678901
 *
 * X Space-Switch event:
 * G Segment-Invalid Bit:     *
 * P Private-Space Bit:       Segment is not private (PoP 3-30)
 * S Storage-Alteration:
 * STL Segment-Table-Length:  Segment-table length (STL+1*16 entries -> up to 2048)
 *
 * A 64 bit pagetable entry of S390 has following format:
 * |                     PFRA                         |0IP0|  OS  |
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * I Page-Invalid Bit:    Page is not available for address-translation
 * P Page-Protection Bit: Store access not possible for page
 *
 * A 64 bit segmenttable entry of S390 has following format:
 * |        P-table origin                              |      TT
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * I Segment-Invalid Bit:    Segment is not available for address-translation
 * C Common-Segment Bit:     Segment is not private (PoP 3-30)
 * P Page-Protection Bit: Store access not possible for page
 * TT Type 00
 *
 * A 64 bit region table entry of S390 has following format:
 * |        S-table origin                             |   TF  TTTL
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * I Segment-Invalid Bit:    Segment is not available for address-translation
 * TT Type 01
 * TF
M
Martin Schwidefsky 已提交
193
 * TL Table length
L
Linus Torvalds 已提交
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
 *
 * The 64 bit regiontable origin of S390 has following format:
 * |      region table origon                          |       DTTL
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * X Space-Switch event:
 * G Segment-Invalid Bit:  
 * P Private-Space Bit:    
 * S Storage-Alteration:
 * R Real space
 * TL Table-Length:
 *
 * A storage key has the following format:
 * | ACC |F|R|C|0|
 *  0   3 4 5 6 7
 * ACC: access key
 * F  : fetch protection bit
 * R  : referenced bit
 * C  : changed bit
 */

/* Hardware bits in the page table entry */
M
Martin Schwidefsky 已提交
217 218
#define _PAGE_RO	0x200		/* HW read-only bit  */
#define _PAGE_INVALID	0x400		/* HW invalid bit    */
219 220

/* Software bits in the page table entry */
M
Martin Schwidefsky 已提交
221 222
#define _PAGE_SWT	0x001		/* SW pte type bit t */
#define _PAGE_SWX	0x002		/* SW pte type bit x */
N
Nick Piggin 已提交
223 224
#define _PAGE_SPECIAL	0x004		/* SW associated with special page */
#define __HAVE_ARCH_PTE_SPECIAL
L
Linus Torvalds 已提交
225

M
Martin Schwidefsky 已提交
226
/* Six different types of pages. */
227 228
#define _PAGE_TYPE_EMPTY	0x400
#define _PAGE_TYPE_NONE		0x401
M
Martin Schwidefsky 已提交
229 230
#define _PAGE_TYPE_SWAP		0x403
#define _PAGE_TYPE_FILE		0x601	/* bit 0x002 is used for offset !! */
231 232
#define _PAGE_TYPE_RO		0x200
#define _PAGE_TYPE_RW		0x000
G
Gerald Schaefer 已提交
233 234
#define _PAGE_TYPE_EX_RO	0x202
#define _PAGE_TYPE_EX_RW	0x002
L
Linus Torvalds 已提交
235

M
Martin Schwidefsky 已提交
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
/*
 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
 * pte_none and pte_file to find out the pte type WITHOUT holding the page
 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
 * This change is done while holding the lock, but the intermediate step
 * of a previously valid pte with the hw invalid bit set can be observed by
 * handle_pte_fault. That makes it necessary that all valid pte types with
 * the hw invalid bit set must be distinguishable from the four pte types
 * empty, none, swap and file.
 *
 *			irxt  ipte  irxt
 * _PAGE_TYPE_EMPTY	1000   ->   1000
 * _PAGE_TYPE_NONE	1001   ->   1001
 * _PAGE_TYPE_SWAP	1011   ->   1011
 * _PAGE_TYPE_FILE	11?1   ->   11?1
 * _PAGE_TYPE_RO	0100   ->   1100
 * _PAGE_TYPE_RW	0000   ->   1000
G
Gerald Schaefer 已提交
255 256
 * _PAGE_TYPE_EX_RO	0110   ->   1110
 * _PAGE_TYPE_EX_RW	0010   ->   1010
M
Martin Schwidefsky 已提交
257
 *
G
Gerald Schaefer 已提交
258
 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
M
Martin Schwidefsky 已提交
259 260
 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
 * pte_file is true for bits combinations 1101, 1111
G
Gerald Schaefer 已提交
261
 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
M
Martin Schwidefsky 已提交
262 263
 */

264 265 266 267 268 269 270
/* Page status table bits for virtualization */
#define RCP_PCL_BIT	55
#define RCP_HR_BIT	54
#define RCP_HC_BIT	53
#define RCP_GR_BIT	50
#define RCP_GC_BIT	49

L
Linus Torvalds 已提交
271 272
#ifndef __s390x__

273 274 275 276 277 278
/* Bits in the segment table address-space-control-element */
#define _ASCE_SPACE_SWITCH	0x80000000UL	/* space switch event	    */
#define _ASCE_ORIGIN_MASK	0x7ffff000UL	/* segment table origin	    */
#define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
#define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
#define _ASCE_TABLE_LENGTH	0x7f	/* 128 x 64 entries = 8k	    */
L
Linus Torvalds 已提交
279

280 281 282 283 284
/* Bits in the segment table entry */
#define _SEGMENT_ENTRY_ORIGIN	0x7fffffc0UL	/* page table origin	    */
#define _SEGMENT_ENTRY_INV	0x20	/* invalid segment table entry	    */
#define _SEGMENT_ENTRY_COMMON	0x10	/* common segment bit		    */
#define _SEGMENT_ENTRY_PTL	0x0f	/* page table length		    */
L
Linus Torvalds 已提交
285

286 287
#define _SEGMENT_ENTRY		(_SEGMENT_ENTRY_PTL)
#define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INV)
L
Linus Torvalds 已提交
288 289 290

#else /* __s390x__ */

291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
/* Bits in the segment/region table address-space-control-element */
#define _ASCE_ORIGIN		~0xfffUL/* segment table origin		    */
#define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
#define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
#define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
#define _ASCE_REAL_SPACE	0x20	/* real space control		    */
#define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
#define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
#define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
#define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
#define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
#define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */

/* Bits in the region table entry */
#define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
#define _REGION_ENTRY_INV	0x20	/* invalid region table entry	    */
#define _REGION_ENTRY_TYPE_MASK	0x0c	/* region/segment table type mask   */
#define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
#define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
#define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
#define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */

#define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
#define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
#define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
#define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
#define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
#define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)

L
Linus Torvalds 已提交
320
/* Bits in the segment table entry */
321 322 323
#define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* segment table origin		    */
#define _SEGMENT_ENTRY_RO	0x200	/* page protection bit		    */
#define _SEGMENT_ENTRY_INV	0x20	/* invalid segment table entry	    */
L
Linus Torvalds 已提交
324

325 326 327 328
#define _SEGMENT_ENTRY		(0)
#define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INV)

#endif /* __s390x__ */
L
Linus Torvalds 已提交
329 330

/*
331 332 333
 * A user page table pointer has the space-switch-event bit, the
 * private-space-control bit and the storage-alteration-event-control
 * bit set. A kernel page table pointer doesn't need them.
L
Linus Torvalds 已提交
334
 */
335 336
#define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
				 _ASCE_ALT_EVENT)
L
Linus Torvalds 已提交
337

338
/* Bits int the storage key */
L
Linus Torvalds 已提交
339 340 341 342
#define _PAGE_CHANGED    0x02          /* HW changed bit                   */
#define _PAGE_REFERENCED 0x04          /* HW referenced bit                */

/*
343
 * Page protection definitions.
L
Linus Torvalds 已提交
344
 */
345 346 347
#define PAGE_NONE	__pgprot(_PAGE_TYPE_NONE)
#define PAGE_RO		__pgprot(_PAGE_TYPE_RO)
#define PAGE_RW		__pgprot(_PAGE_TYPE_RW)
G
Gerald Schaefer 已提交
348 349
#define PAGE_EX_RO	__pgprot(_PAGE_TYPE_EX_RO)
#define PAGE_EX_RW	__pgprot(_PAGE_TYPE_EX_RW)
350 351 352

#define PAGE_KERNEL	PAGE_RW
#define PAGE_COPY	PAGE_RO
L
Linus Torvalds 已提交
353 354

/*
G
Gerald Schaefer 已提交
355 356 357 358 359 360
 * Dependent on the EXEC_PROTECT option s390 can do execute protection.
 * Write permission always implies read permission. In theory with a
 * primary/secondary page table execute only can be implemented but
 * it would cost an additional bit in the pte to distinguish all the
 * different pte types. To avoid that execute permission currently
 * implies read permission as well.
L
Linus Torvalds 已提交
361 362
 */
         /*xwr*/
363 364 365 366
#define __P000	PAGE_NONE
#define __P001	PAGE_RO
#define __P010	PAGE_RO
#define __P011	PAGE_RO
G
Gerald Schaefer 已提交
367 368 369 370
#define __P100	PAGE_EX_RO
#define __P101	PAGE_EX_RO
#define __P110	PAGE_EX_RO
#define __P111	PAGE_EX_RO
371 372 373 374 375

#define __S000	PAGE_NONE
#define __S001	PAGE_RO
#define __S010	PAGE_RW
#define __S011	PAGE_RW
G
Gerald Schaefer 已提交
376 377 378 379 380 381
#define __S100	PAGE_EX_RO
#define __S101	PAGE_EX_RO
#define __S110	PAGE_EX_RW
#define __S111	PAGE_EX_RW

#ifndef __s390x__
382
# define PxD_SHADOW_SHIFT	1
G
Gerald Schaefer 已提交
383
#else /* __s390x__ */
384
# define PxD_SHADOW_SHIFT	2
G
Gerald Schaefer 已提交
385 386
#endif /* __s390x__ */

387
static inline void *get_shadow_table(void *table)
G
Gerald Schaefer 已提交
388
{
389 390 391 392 393 394 395
	unsigned long addr, offset;
	struct page *page;

	addr = (unsigned long) table;
	offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1);
	page = virt_to_page((void *)(addr ^ offset));
	return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
G
Gerald Schaefer 已提交
396
}
L
Linus Torvalds 已提交
397 398 399 400 401 402

/*
 * Certain architectures need to do special things when PTEs
 * within a page table are directly modified.  Thus, the following
 * hook is made available.
 */
M
Martin Schwidefsky 已提交
403
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
404
			      pte_t *ptep, pte_t entry)
L
Linus Torvalds 已提交
405
{
406 407 408 409 410
	*ptep = entry;
	if (mm->context.noexec) {
		if (!(pte_val(entry) & _PAGE_INVALID) &&
		    (pte_val(entry) & _PAGE_SWX))
			pte_val(entry) |= _PAGE_RO;
G
Gerald Schaefer 已提交
411
		else
412 413
			pte_val(entry) = _PAGE_TYPE_EMPTY;
		ptep[PTRS_PER_PTE] = entry;
G
Gerald Schaefer 已提交
414
	}
L
Linus Torvalds 已提交
415 416 417 418 419 420 421
}

/*
 * pgd/pmd/pte query functions
 */
#ifndef __s390x__

422 423 424
static inline int pgd_present(pgd_t pgd) { return 1; }
static inline int pgd_none(pgd_t pgd)    { return 0; }
static inline int pgd_bad(pgd_t pgd)     { return 0; }
L
Linus Torvalds 已提交
425

M
Martin Schwidefsky 已提交
426 427 428 429
static inline int pud_present(pud_t pud) { return 1; }
static inline int pud_none(pud_t pud)	 { return 0; }
static inline int pud_bad(pud_t pud)	 { return 0; }

L
Linus Torvalds 已提交
430 431
#else /* __s390x__ */

432 433
static inline int pgd_present(pgd_t pgd)
{
M
Martin Schwidefsky 已提交
434 435
	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
		return 1;
436 437 438 439 440
	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
}

static inline int pgd_none(pgd_t pgd)
{
M
Martin Schwidefsky 已提交
441 442
	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
		return 0;
443 444 445 446 447
	return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
}

static inline int pgd_bad(pgd_t pgd)
{
M
Martin Schwidefsky 已提交
448 449 450 451 452
	/*
	 * With dynamic page table levels the pgd can be a region table
	 * entry or a segment table entry. Check for the bit that are
	 * invalid for either table entry.
	 */
453
	unsigned long mask =
M
Martin Schwidefsky 已提交
454
		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
455 456 457
		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
	return (pgd_val(pgd) & mask) != 0;
}
M
Martin Schwidefsky 已提交
458 459

static inline int pud_present(pud_t pud)
L
Linus Torvalds 已提交
460
{
M
Martin Schwidefsky 已提交
461 462
	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
		return 1;
463
	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
L
Linus Torvalds 已提交
464 465
}

M
Martin Schwidefsky 已提交
466
static inline int pud_none(pud_t pud)
L
Linus Torvalds 已提交
467
{
M
Martin Schwidefsky 已提交
468 469
	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
		return 0;
470
	return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
L
Linus Torvalds 已提交
471 472
}

M
Martin Schwidefsky 已提交
473
static inline int pud_bad(pud_t pud)
L
Linus Torvalds 已提交
474
{
M
Martin Schwidefsky 已提交
475 476 477 478 479
	/*
	 * With dynamic page table levels the pud can be a region table
	 * entry or a segment table entry. Check for the bit that are
	 * invalid for either table entry.
	 */
480
	unsigned long mask =
M
Martin Schwidefsky 已提交
481
		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
482 483
		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
	return (pud_val(pud) & mask) != 0;
L
Linus Torvalds 已提交
484 485
}

486 487
#endif /* __s390x__ */

488
static inline int pmd_present(pmd_t pmd)
L
Linus Torvalds 已提交
489
{
490
	return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
L
Linus Torvalds 已提交
491 492
}

493
static inline int pmd_none(pmd_t pmd)
L
Linus Torvalds 已提交
494
{
495
	return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
L
Linus Torvalds 已提交
496 497
}

498
static inline int pmd_bad(pmd_t pmd)
L
Linus Torvalds 已提交
499
{
500 501
	unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
	return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
L
Linus Torvalds 已提交
502 503
}

504
static inline int pte_none(pte_t pte)
L
Linus Torvalds 已提交
505
{
M
Martin Schwidefsky 已提交
506
	return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
L
Linus Torvalds 已提交
507 508
}

509
static inline int pte_present(pte_t pte)
L
Linus Torvalds 已提交
510
{
M
Martin Schwidefsky 已提交
511 512 513 514
	unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
	return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
		(!(pte_val(pte) & _PAGE_INVALID) &&
		 !(pte_val(pte) & _PAGE_SWT));
L
Linus Torvalds 已提交
515 516
}

517
static inline int pte_file(pte_t pte)
L
Linus Torvalds 已提交
518
{
M
Martin Schwidefsky 已提交
519 520
	unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
	return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
L
Linus Torvalds 已提交
521 522
}

N
Nick Piggin 已提交
523 524
static inline int pte_special(pte_t pte)
{
N
Nick Piggin 已提交
525
	return (pte_val(pte) & _PAGE_SPECIAL);
N
Nick Piggin 已提交
526 527
}

M
Martin Schwidefsky 已提交
528 529
#define __HAVE_ARCH_PTE_SAME
#define pte_same(a,b)  (pte_val(a) == pte_val(b))
L
Linus Torvalds 已提交
530

531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
static inline void rcp_lock(pte_t *ptep)
{
#ifdef CONFIG_PGSTE
	unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
	preempt_disable();
	while (test_and_set_bit(RCP_PCL_BIT, pgste))
		;
#endif
}

static inline void rcp_unlock(pte_t *ptep)
{
#ifdef CONFIG_PGSTE
	unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
	clear_bit(RCP_PCL_BIT, pgste);
	preempt_enable();
#endif
}

/* forward declaration for SetPageUptodate in page-flags.h*/
static inline void page_clear_dirty(struct page *page);
#include <linux/page-flags.h>

static inline void ptep_rcp_copy(pte_t *ptep)
{
#ifdef CONFIG_PGSTE
	struct page *page = virt_to_page(pte_val(*ptep));
	unsigned int skey;
	unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);

	skey = page_get_storage_key(page_to_phys(page));
	if (skey & _PAGE_CHANGED)
563
		set_bit_simple(RCP_GC_BIT, pgste);
564
	if (skey & _PAGE_REFERENCED)
565 566
		set_bit_simple(RCP_GR_BIT, pgste);
	if (test_and_clear_bit_simple(RCP_HC_BIT, pgste))
567
		SetPageDirty(page);
568
	if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
569 570 571 572
		SetPageReferenced(page);
#endif
}

L
Linus Torvalds 已提交
573 574 575 576
/*
 * query functions pte_write/pte_dirty/pte_young only work if
 * pte_present() is true. Undefined behaviour if not..
 */
577
static inline int pte_write(pte_t pte)
L
Linus Torvalds 已提交
578 579 580 581
{
	return (pte_val(pte) & _PAGE_RO) == 0;
}

582
static inline int pte_dirty(pte_t pte)
L
Linus Torvalds 已提交
583 584 585 586 587 588 589 590
{
	/* A pte is neither clean nor dirty on s/390. The dirty bit
	 * is in the storage key. See page_test_and_clear_dirty for
	 * details.
	 */
	return 0;
}

591
static inline int pte_young(pte_t pte)
L
Linus Torvalds 已提交
592 593 594 595 596 597 598 599 600 601 602 603 604 605
{
	/* A pte is neither young nor old on s/390. The young bit
	 * is in the storage key. See page_test_and_clear_young for
	 * details.
	 */
	return 0;
}

/*
 * pgd/pmd/pte modification functions
 */

#ifndef __s390x__

M
Martin Schwidefsky 已提交
606 607
#define pgd_clear(pgd)		do { } while (0)
#define pud_clear(pud)		do { } while (0)
L
Linus Torvalds 已提交
608 609 610

#else /* __s390x__ */

611 612
static inline void pgd_clear_kernel(pgd_t * pgd)
{
M
Martin Schwidefsky 已提交
613 614
	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
		pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
615 616 617 618 619 620 621 622 623 624
}

static inline void pgd_clear(pgd_t * pgd)
{
	pgd_t *shadow = get_shadow_table(pgd);

	pgd_clear_kernel(pgd);
	if (shadow)
		pgd_clear_kernel(shadow);
}
M
Martin Schwidefsky 已提交
625 626

static inline void pud_clear_kernel(pud_t *pud)
L
Linus Torvalds 已提交
627
{
M
Martin Schwidefsky 已提交
628 629
	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
L
Linus Torvalds 已提交
630 631
}

M
Martin Schwidefsky 已提交
632
static inline void pud_clear(pud_t *pud)
G
Gerald Schaefer 已提交
633
{
M
Martin Schwidefsky 已提交
634
	pud_t *shadow = get_shadow_table(pud);
G
Gerald Schaefer 已提交
635

M
Martin Schwidefsky 已提交
636 637 638
	pud_clear_kernel(pud);
	if (shadow)
		pud_clear_kernel(shadow);
G
Gerald Schaefer 已提交
639 640
}

641 642
#endif /* __s390x__ */

G
Gerald Schaefer 已提交
643
static inline void pmd_clear_kernel(pmd_t * pmdp)
L
Linus Torvalds 已提交
644
{
645
	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
L
Linus Torvalds 已提交
646 647
}

648
static inline void pmd_clear(pmd_t *pmd)
G
Gerald Schaefer 已提交
649
{
650
	pmd_t *shadow = get_shadow_table(pmd);
G
Gerald Schaefer 已提交
651

652 653 654
	pmd_clear_kernel(pmd);
	if (shadow)
		pmd_clear_kernel(shadow);
G
Gerald Schaefer 已提交
655 656
}

657
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
L
Linus Torvalds 已提交
658
{
659 660
	if (mm->context.pgstes)
		ptep_rcp_copy(ptep);
661
	pte_val(*ptep) = _PAGE_TYPE_EMPTY;
662 663
	if (mm->context.noexec)
		pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
L
Linus Torvalds 已提交
664 665 666 667 668 669
}

/*
 * The following pte modification functions only work if
 * pte_present() is true. Undefined behaviour if not..
 */
670
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
L
Linus Torvalds 已提交
671 672 673 674 675 676
{
	pte_val(pte) &= PAGE_MASK;
	pte_val(pte) |= pgprot_val(newprot);
	return pte;
}

677
static inline pte_t pte_wrprotect(pte_t pte)
L
Linus Torvalds 已提交
678
{
679
	/* Do not clobber _PAGE_TYPE_NONE pages!  */
L
Linus Torvalds 已提交
680 681 682 683 684
	if (!(pte_val(pte) & _PAGE_INVALID))
		pte_val(pte) |= _PAGE_RO;
	return pte;
}

685
static inline pte_t pte_mkwrite(pte_t pte)
L
Linus Torvalds 已提交
686 687 688 689 690
{
	pte_val(pte) &= ~_PAGE_RO;
	return pte;
}

691
static inline pte_t pte_mkclean(pte_t pte)
L
Linus Torvalds 已提交
692 693 694 695 696 697 698 699
{
	/* The only user of pte_mkclean is the fork() code.
	   We must *not* clear the *physical* page dirty bit
	   just because fork() wants to clear the dirty bit in
	   *one* of the page's mappings.  So we just do nothing. */
	return pte;
}

700
static inline pte_t pte_mkdirty(pte_t pte)
L
Linus Torvalds 已提交
701 702 703 704 705 706 707 708
{
	/* We do not explicitly set the dirty bit because the
	 * sske instruction is slow. It is faster to let the
	 * next instruction set the dirty bit.
	 */
	return pte;
}

709
static inline pte_t pte_mkold(pte_t pte)
L
Linus Torvalds 已提交
710 711 712 713 714 715 716
{
	/* S/390 doesn't keep its dirty/referenced bit in the pte.
	 * There is no point in clearing the real referenced bit.
	 */
	return pte;
}

717
static inline pte_t pte_mkyoung(pte_t pte)
L
Linus Torvalds 已提交
718 719 720 721 722 723 724
{
	/* S/390 doesn't keep its dirty/referenced bit in the pte.
	 * There is no point in setting the real referenced bit.
	 */
	return pte;
}

N
Nick Piggin 已提交
725 726
static inline pte_t pte_mkspecial(pte_t pte)
{
N
Nick Piggin 已提交
727
	pte_val(pte) |= _PAGE_SPECIAL;
N
Nick Piggin 已提交
728 729 730
	return pte;
}

M
Martin Schwidefsky 已提交
731 732 733
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
					    unsigned long addr, pte_t *ptep)
L
Linus Torvalds 已提交
734
{
735 736 737 738 739 740 741 742 743 744 745 746 747
#ifdef CONFIG_PGSTE
	unsigned long physpage;
	int young;
	unsigned long *pgste;

	if (!vma->vm_mm->context.pgstes)
		return 0;
	physpage = pte_val(*ptep) & PAGE_MASK;
	pgste = (unsigned long *) (ptep + PTRS_PER_PTE);

	young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0);
	rcp_lock(ptep);
	if (young)
748 749
		set_bit_simple(RCP_GR_BIT, pgste);
	young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
750 751 752
	rcp_unlock(ptep);
	return young;
#endif
L
Linus Torvalds 已提交
753 754 755
	return 0;
}

M
Martin Schwidefsky 已提交
756 757 758
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
					 unsigned long address, pte_t *ptep)
L
Linus Torvalds 已提交
759
{
760 761 762 763 764 765 766
	/* No need to flush TLB
	 * On s390 reference bits are in storage key and never in TLB
	 * With virtualization we handle the reference bit, without we
	 * we can simply return */
#ifdef CONFIG_PGSTE
	return ptep_test_and_clear_young(vma, address, ptep);
#endif
M
Martin Schwidefsky 已提交
767
	return 0;
L
Linus Torvalds 已提交
768 769
}

770
static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
L
Linus Torvalds 已提交
771
{
772
	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
L
Linus Torvalds 已提交
773
#ifndef __s390x__
774
		/* pto must point to the start of the segment table */
L
Linus Torvalds 已提交
775
		pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
776 777 778 779
#else
		/* ipte in zarch mode can do the math */
		pte_t *pto = ptep;
#endif
780 781 782 783
		asm volatile(
			"	ipte	%2,%3"
			: "=m" (*ptep) : "m" (*ptep),
			  "a" (pto), "a" (address));
L
Linus Torvalds 已提交
784
	}
785 786
}

787 788
static inline void ptep_invalidate(struct mm_struct *mm,
				   unsigned long address, pte_t *ptep)
789
{
790 791 792 793 794 795 796 797
	if (mm->context.pgstes) {
		rcp_lock(ptep);
		__ptep_ipte(address, ptep);
		ptep_rcp_copy(ptep);
		pte_val(*ptep) = _PAGE_TYPE_EMPTY;
		rcp_unlock(ptep);
		return;
	}
798
	__ptep_ipte(address, ptep);
799 800
	pte_val(*ptep) = _PAGE_TYPE_EMPTY;
	if (mm->context.noexec) {
801
		__ptep_ipte(address, ptep + PTRS_PER_PTE);
802 803
		pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
	}
M
Martin Schwidefsky 已提交
804 805
}

M
Martin Schwidefsky 已提交
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
/*
 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
 * both clear the TLB for the unmapped pte. The reason is that
 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
 * to modify an active pte. The sequence is
 *   1) ptep_get_and_clear
 *   2) set_pte_at
 *   3) flush_tlb_range
 * On s390 the tlb needs to get flushed with the modification of the pte
 * if the pte is active. The only way how this can be implemented is to
 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
 * is a nop.
 */
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define ptep_get_and_clear(__mm, __address, __ptep)			\
({									\
	pte_t __pte = *(__ptep);					\
	if (atomic_read(&(__mm)->mm_users) > 1 ||			\
	    (__mm) != current->active_mm)				\
825
		ptep_invalidate(__mm, __address, __ptep);		\
M
Martin Schwidefsky 已提交
826 827 828 829 830 831
	else								\
		pte_clear((__mm), (__address), (__ptep));		\
	__pte;								\
})

#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
M
Martin Schwidefsky 已提交
832 833 834 835
static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
				     unsigned long address, pte_t *ptep)
{
	pte_t pte = *ptep;
836
	ptep_invalidate(vma->vm_mm, address, ptep);
L
Linus Torvalds 已提交
837 838 839
	return pte;
}

M
Martin Schwidefsky 已提交
840 841 842 843 844 845 846 847 848 849 850
/*
 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
 * cannot be accessed while the batched unmap is running. In this case
 * full==1 and a simple pte_clear is enough. See tlb.h.
 */
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
					    unsigned long addr,
					    pte_t *ptep, int full)
L
Linus Torvalds 已提交
851
{
M
Martin Schwidefsky 已提交
852 853 854 855 856
	pte_t pte = *ptep;

	if (full)
		pte_clear(mm, addr, ptep);
	else
857
		ptep_invalidate(mm, addr, ptep);
M
Martin Schwidefsky 已提交
858
	return pte;
L
Linus Torvalds 已提交
859 860
}

M
Martin Schwidefsky 已提交
861 862 863 864 865 866 867
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define ptep_set_wrprotect(__mm, __addr, __ptep)			\
({									\
	pte_t __pte = *(__ptep);					\
	if (pte_write(__pte)) {						\
		if (atomic_read(&(__mm)->mm_users) > 1 ||		\
		    (__mm) != current->active_mm)			\
868
			ptep_invalidate(__mm, __addr, __ptep);		\
M
Martin Schwidefsky 已提交
869 870 871 872 873
		set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte));	\
	}								\
})

#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
M
Martin Schwidefsky 已提交
874 875 876 877
#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty)	\
({									\
	int __changed = !pte_same(*(__ptep), __entry);			\
	if (__changed) {						\
878
		ptep_invalidate((__vma)->vm_mm, __addr, __ptep);	\
M
Martin Schwidefsky 已提交
879 880 881
		set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry);	\
	}								\
	__changed;							\
882
})
L
Linus Torvalds 已提交
883 884 885 886 887 888 889 890

/*
 * Test and clear dirty bit in storage key.
 * We can't clear the changed bit atomically. This is a potential
 * race against modification of the referenced bit. This function
 * should therefore only be called if it is not mapped in any
 * address space.
 */
M
Martin Schwidefsky 已提交
891
#define __HAVE_ARCH_PAGE_TEST_DIRTY
892
static inline int page_test_dirty(struct page *page)
893
{
894 895
	return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
}
896

M
Martin Schwidefsky 已提交
897
#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
898 899 900
static inline void page_clear_dirty(struct page *page)
{
	page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
901
}
L
Linus Torvalds 已提交
902 903 904 905

/*
 * Test and clear referenced bit in storage key.
 */
M
Martin Schwidefsky 已提交
906
#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
907 908
static inline int page_test_and_clear_young(struct page *page)
{
909
	unsigned long physpage = page_to_phys(page);
910 911
	int ccode;

912 913 914 915
	asm volatile(
		"	rrbe	0,%1\n"
		"	ipm	%0\n"
		"	srl	%0,28\n"
916 917 918
		: "=d" (ccode) : "a" (physpage) : "cc" );
	return ccode & 2;
}
L
Linus Torvalds 已提交
919 920 921 922 923 924 925 926 927 928 929 930

/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 */
static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
	pte_t __pte;
	pte_val(__pte) = physpage + pgprot_val(pgprot);
	return __pte;
}

931 932
static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
{
933
	unsigned long physpage = page_to_phys(page);
L
Linus Torvalds 已提交
934

935 936 937
	return mk_pte_phys(physpage, pgprot);
}

M
Martin Schwidefsky 已提交
938 939 940 941
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
L
Linus Torvalds 已提交
942

M
Martin Schwidefsky 已提交
943 944
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
L
Linus Torvalds 已提交
945

M
Martin Schwidefsky 已提交
946
#ifndef __s390x__
L
Linus Torvalds 已提交
947

M
Martin Schwidefsky 已提交
948 949 950
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pmd) ({ BUG(); 0UL; })
#define pgd_deref(pmd) ({ BUG(); 0UL; })
951

M
Martin Schwidefsky 已提交
952 953
#define pud_offset(pgd, address) ((pud_t *) pgd)
#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
L
Linus Torvalds 已提交
954

M
Martin Schwidefsky 已提交
955
#else /* __s390x__ */
L
Linus Torvalds 已提交
956

M
Martin Schwidefsky 已提交
957 958
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
959
#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
L
Linus Torvalds 已提交
960

961 962
static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
{
M
Martin Schwidefsky 已提交
963 964 965
	pud_t *pud = (pud_t *) pgd;
	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
		pud = (pud_t *) pgd_deref(*pgd);
966 967
	return pud  + pud_index(address);
}
L
Linus Torvalds 已提交
968

M
Martin Schwidefsky 已提交
969
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
L
Linus Torvalds 已提交
970
{
M
Martin Schwidefsky 已提交
971 972 973
	pmd_t *pmd = (pmd_t *) pud;
	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
		pmd = (pmd_t *) pud_deref(*pud);
M
Martin Schwidefsky 已提交
974
	return pmd + pmd_index(address);
L
Linus Torvalds 已提交
975 976
}

M
Martin Schwidefsky 已提交
977
#endif /* __s390x__ */
L
Linus Torvalds 已提交
978

M
Martin Schwidefsky 已提交
979 980 981
#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
L
Linus Torvalds 已提交
982

M
Martin Schwidefsky 已提交
983
#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
L
Linus Torvalds 已提交
984

M
Martin Schwidefsky 已提交
985 986 987
/* Find an entry in the lowest level page table.. */
#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
L
Linus Torvalds 已提交
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)

/*
 * 31 bit swap entry format:
 * A page-table entry has some bits we have to treat in a special way.
 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
 * exception will occur instead of a page translation exception. The
 * specifiation exception has the bad habit not to store necessary
 * information in the lowcore.
 * Bit 21 and bit 22 are the page invalid bit and the page protection
 * bit. We set both to indicate a swapped page.
 * Bit 30 and 31 are used to distinguish the different page types. For
 * a swapped page these bits need to be zero.
 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
 * plus 24 for the offset.
 * 0|     offset        |0110|o|type |00|
 * 0 0000000001111111111 2222 2 22222 33
 * 0 1234567890123456789 0123 4 56789 01
 *
 * 64 bit swap entry format:
 * A page-table entry has some bits we have to treat in a special way.
 * Bits 52 and bit 55 have to be zero, otherwise an specification
 * exception will occur instead of a page translation exception. The
 * specifiation exception has the bad habit not to store necessary
 * information in the lowcore.
 * Bit 53 and bit 54 are the page invalid bit and the page protection
 * bit. We set both to indicate a swapped page.
 * Bit 62 and 63 are used to distinguish the different page types. For
 * a swapped page these bits need to be zero.
 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
 * plus 56 for the offset.
 * |                      offset                        |0110|o|type |00|
 *  0000000000111111111122222222223333333333444444444455 5555 5 55566 66
 *  0123456789012345678901234567890123456789012345678901 2345 6 78901 23
 */
#ifndef __s390x__
#define __SWP_OFFSET_MASK (~0UL >> 12)
#else
#define __SWP_OFFSET_MASK (~0UL >> 11)
#endif
1033
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
L
Linus Torvalds 已提交
1034 1035 1036
{
	pte_t pte;
	offset &= __SWP_OFFSET_MASK;
1037
	pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
L
Linus Torvalds 已提交
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
		((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
	return pte;
}

#define __swp_type(entry)	(((entry).val >> 2) & 0x1f)
#define __swp_offset(entry)	(((entry).val >> 11) | (((entry).val >> 7) & 1))
#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })

#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x)	((pte_t) { (x).val })

#ifndef __s390x__
# define PTE_FILE_MAX_BITS	26
#else /* __s390x__ */
# define PTE_FILE_MAX_BITS	59
#endif /* __s390x__ */

#define pte_to_pgoff(__pte) \
	((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))

#define pgoff_to_pte(__off) \
	((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1060
		   | _PAGE_TYPE_FILE })
L
Linus Torvalds 已提交
1061 1062 1063 1064 1065

#endif /* !__ASSEMBLY__ */

#define kern_addr_valid(addr)   (1)

H
Heiko Carstens 已提交
1066 1067
extern int add_shared_memory(unsigned long start, unsigned long size);
extern int remove_shared_memory(unsigned long start, unsigned long size);
1068
extern int s390_enable_sie(void);
H
Heiko Carstens 已提交
1069

L
Linus Torvalds 已提交
1070 1071 1072 1073 1074
/*
 * No page table caches to initialise
 */
#define pgtable_cache_init()	do { } while (0)

H
Heiko Carstens 已提交
1075 1076 1077
#define __HAVE_ARCH_MEMMAP_INIT
extern void memmap_init(unsigned long, int, unsigned long, unsigned long);

L
Linus Torvalds 已提交
1078 1079 1080
#include <asm-generic/pgtable.h>

#endif /* _S390_PAGE_H */