pgtable.h 48.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 *  S390 version
3
 *    Copyright IBM Corp. 1999, 2000
L
Linus Torvalds 已提交
4 5 6 7 8 9 10 11 12 13 14
 *    Author(s): Hartmut Penner (hp@de.ibm.com)
 *               Ulrich Weigand (weigand@de.ibm.com)
 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
 *
 *  Derived from "include/asm-i386/pgtable.h"
 */

#ifndef _ASM_S390_PGTABLE_H
#define _ASM_S390_PGTABLE_H

/*
15 16 17
 * The Linux memory management assumes a three-level page table setup.
 * For s390 64 bit we use up to four of the five levels the hardware
 * provides (region first tables are not used).
L
Linus Torvalds 已提交
18 19 20 21 22 23 24 25 26
 *
 * The "pgd_xxx()" functions are trivial for a folded two-level
 * setup: the pgd is never bad, and a pmd always exists (as it's folded
 * into the pgd entry)
 *
 * This file contains the functions and defines necessary to modify and use
 * the S390 page table tree.
 */
#ifndef __ASSEMBLY__
27
#include <linux/sched.h>
28
#include <linux/mm_types.h>
29
#include <linux/page-flags.h>
30
#include <linux/radix-tree.h>
L
Linus Torvalds 已提交
31
#include <asm/bug.h>
32
#include <asm/page.h>
L
Linus Torvalds 已提交
33 34 35

extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
extern void paging_init(void);
36
extern void vmem_map_init(void);
L
Linus Torvalds 已提交
37 38 39 40 41

/*
 * The S390 doesn't have any external MMU info: the kernel page
 * tables contain all the necessary information.
 */
42
#define update_mmu_cache(vma, address, ptep)     do { } while (0)
43
#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
L
Linus Torvalds 已提交
44 45

/*
46
 * ZERO_PAGE is a global shared page that is always zero; used
L
Linus Torvalds 已提交
47 48
 * for zero-mapped memory areas etc..
 */
49 50 51 52 53 54 55

extern unsigned long empty_zero_page;
extern unsigned long zero_page_mask;

#define ZERO_PAGE(vaddr) \
	(virt_to_page((void *)(empty_zero_page + \
	 (((unsigned long)(vaddr)) &zero_page_mask))))
56
#define __HAVE_COLOR_ZERO_PAGE
57

58
/* TODO: s390 cannot support io_remap_pfn_range... */
L
Linus Torvalds 已提交
59 60 61 62 63 64 65
#endif /* !__ASSEMBLY__ */

/*
 * PMD_SHIFT determines the size of the area a second-level page
 * table can map
 * PGDIR_SHIFT determines what a third-level page table entry can map
 */
H
Heiko Carstens 已提交
66 67 68
#define PMD_SHIFT	20
#define PUD_SHIFT	31
#define PGDIR_SHIFT	42
L
Linus Torvalds 已提交
69 70 71

#define PMD_SIZE        (1UL << PMD_SHIFT)
#define PMD_MASK        (~(PMD_SIZE-1))
M
Martin Schwidefsky 已提交
72 73
#define PUD_SIZE	(1UL << PUD_SHIFT)
#define PUD_MASK	(~(PUD_SIZE-1))
74 75
#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
#define PGDIR_MASK	(~(PGDIR_SIZE-1))
L
Linus Torvalds 已提交
76 77 78 79 80 81 82

/*
 * entries per page directory level: the S390 is two-level, so
 * we don't really have any PMD directory physically.
 * for S390 segment-table entries are combined to one PGD
 * that leads to 1024 pte per pgd
 */
83 84
#define PTRS_PER_PTE	256
#define PTRS_PER_PMD	2048
85
#define PTRS_PER_PUD	2048
86
#define PTRS_PER_PGD	2048
L
Linus Torvalds 已提交
87

88
#define FIRST_USER_ADDRESS  0UL
89

L
Linus Torvalds 已提交
90 91 92 93
#define pte_ERROR(e) \
	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
#define pmd_ERROR(e) \
	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
M
Martin Schwidefsky 已提交
94 95
#define pud_ERROR(e) \
	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
L
Linus Torvalds 已提交
96 97 98 99 100
#define pgd_ERROR(e) \
	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))

#ifndef __ASSEMBLY__
/*
101 102
 * The vmalloc and module area will always be on the topmost area of the
 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
103 104 105 106
 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
 * modules will reside. That makes sure that inter module branches always
 * happen without trampolines and in addition the placement within a 2GB frame
 * is branch prediction unit friendly.
H
Heiko Carstens 已提交
107
 */
108
extern unsigned long VMALLOC_START;
109 110
extern unsigned long VMALLOC_END;
extern struct page *vmemmap;
111

112
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
113

114 115 116 117 118 119
extern unsigned long MODULES_VADDR;
extern unsigned long MODULES_END;
#define MODULES_VADDR	MODULES_VADDR
#define MODULES_END	MODULES_END
#define MODULES_LEN	(1UL << 31)

120 121 122 123 124 125 126 127 128 129
static inline int is_module_addr(void *addr)
{
	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
	if (addr < (void *)MODULES_VADDR)
		return 0;
	if (addr > (void *)MODULES_END)
		return 0;
	return 1;
}

L
Linus Torvalds 已提交
130 131
/*
 * A 64 bit pagetable entry of S390 has following format:
132
 * |			 PFRA			      |0IPC|  OS  |
L
Linus Torvalds 已提交
133 134 135 136 137
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * I Page-Invalid Bit:    Page is not available for address-translation
 * P Page-Protection Bit: Store access not possible for page
138
 * C Change-bit override: HW is not required to set change bit
L
Linus Torvalds 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
 *
 * A 64 bit segmenttable entry of S390 has following format:
 * |        P-table origin                              |      TT
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * I Segment-Invalid Bit:    Segment is not available for address-translation
 * C Common-Segment Bit:     Segment is not private (PoP 3-30)
 * P Page-Protection Bit: Store access not possible for page
 * TT Type 00
 *
 * A 64 bit region table entry of S390 has following format:
 * |        S-table origin                             |   TF  TTTL
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * I Segment-Invalid Bit:    Segment is not available for address-translation
 * TT Type 01
 * TF
M
Martin Schwidefsky 已提交
158
 * TL Table length
L
Linus Torvalds 已提交
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
 *
 * The 64 bit regiontable origin of S390 has following format:
 * |      region table origon                          |       DTTL
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * X Space-Switch event:
 * G Segment-Invalid Bit:  
 * P Private-Space Bit:    
 * S Storage-Alteration:
 * R Real space
 * TL Table-Length:
 *
 * A storage key has the following format:
 * | ACC |F|R|C|0|
 *  0   3 4 5 6 7
 * ACC: access key
 * F  : fetch protection bit
 * R  : referenced bit
 * C  : changed bit
 */

/* Hardware bits in the page table entry */
182
#define _PAGE_PROTECT	0x200		/* HW read-only bit  */
M
Martin Schwidefsky 已提交
183
#define _PAGE_INVALID	0x400		/* HW invalid bit    */
184
#define _PAGE_LARGE	0x800		/* Bit to mark a large pte */
185 186

/* Software bits in the page table entry */
187 188 189
#define _PAGE_PRESENT	0x001		/* SW pte present bit */
#define _PAGE_YOUNG	0x004		/* SW pte young bit */
#define _PAGE_DIRTY	0x008		/* SW pte dirty bit */
190 191 192
#define _PAGE_READ	0x010		/* SW pte read bit */
#define _PAGE_WRITE	0x020		/* SW pte write bit */
#define _PAGE_SPECIAL	0x040		/* SW associated with special page */
193
#define _PAGE_UNUSED	0x080		/* SW bit for pgste usage state */
N
Nick Piggin 已提交
194
#define __HAVE_ARCH_PTE_SPECIAL
L
Linus Torvalds 已提交
195

196 197 198 199 200 201
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SOFT_DIRTY 0x002		/* SW pte soft dirty bit */
#else
#define _PAGE_SOFT_DIRTY 0x000
#endif

202
/* Set of bits not changed in pte_modify */
203
#define _PAGE_CHG_MASK		(PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
204
				 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
205

M
Martin Schwidefsky 已提交
206
/*
207 208 209 210
 * handle_pte_fault uses pte_present and pte_none to find out the pte type
 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
 * distinguish present from not-present ptes. It is changed only with the page
 * table lock held.
M
Martin Schwidefsky 已提交
211
 *
212
 * The following table gives the different possible bit combinations for
213 214
 * the pte hardware and software bits in the last 12 bits of a pte
 * (. unassigned bit, x don't care, t swap type):
M
Martin Schwidefsky 已提交
215
 *
216 217 218
 *				842100000000
 *				000084210000
 *				000000008421
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
 *				.IR.uswrdy.p
 * empty			.10.00000000
 * swap				.11..ttttt.0
 * prot-none, clean, old	.11.xx0000.1
 * prot-none, clean, young	.11.xx0001.1
 * prot-none, dirty, old	.10.xx0010.1
 * prot-none, dirty, young	.10.xx0011.1
 * read-only, clean, old	.11.xx0100.1
 * read-only, clean, young	.01.xx0101.1
 * read-only, dirty, old	.11.xx0110.1
 * read-only, dirty, young	.01.xx0111.1
 * read-write, clean, old	.11.xx1100.1
 * read-write, clean, young	.01.xx1101.1
 * read-write, dirty, old	.10.xx1110.1
 * read-write, dirty, young	.00.xx1111.1
 * HW-bits: R read-only, I invalid
 * SW-bits: p present, y young, d dirty, r read, w write, s special,
 *	    u unused, l large
237
 *
238 239 240
 * pte_none    is true for the bit pattern .10.00000000, pte == 0x400
 * pte_swap    is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
M
Martin Schwidefsky 已提交
241 242
 */

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
/* Bits in the segment/region table address-space-control-element */
#define _ASCE_ORIGIN		~0xfffUL/* segment table origin		    */
#define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
#define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
#define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
#define _ASCE_REAL_SPACE	0x20	/* real space control		    */
#define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
#define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
#define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
#define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
#define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
#define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */

/* Bits in the region table entry */
#define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
258 259
#define _REGION_ENTRY_PROTECT	0x200	/* region protection bit	    */
#define _REGION_ENTRY_INVALID	0x20	/* invalid region table entry	    */
260 261 262 263 264 265 266
#define _REGION_ENTRY_TYPE_MASK	0x0c	/* region/segment table type mask   */
#define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
#define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
#define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
#define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */

#define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
267
#define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
268
#define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
269
#define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
270
#define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
271
#define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
272

273
#define _REGION3_ENTRY_LARGE	0x400	/* RTTE-format control, large page  */
274
#define _REGION3_ENTRY_RO	0x200	/* page protection bit		    */
275

L
Linus Torvalds 已提交
276
/* Bits in the segment table entry */
277
#define _SEGMENT_ENTRY_BITS	0xfffffffffffffe33UL
278
#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
279
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address	    */
280
#define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* segment table origin		    */
281 282
#define _SEGMENT_ENTRY_PROTECT	0x200	/* page protection bit		    */
#define _SEGMENT_ENTRY_INVALID	0x20	/* invalid segment table entry	    */
L
Linus Torvalds 已提交
283

284
#define _SEGMENT_ENTRY		(0)
285
#define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INVALID)
286

287 288 289 290 291 292
#define _SEGMENT_ENTRY_DIRTY	0x2000	/* SW segment dirty bit */
#define _SEGMENT_ENTRY_YOUNG	0x1000	/* SW segment young bit */
#define _SEGMENT_ENTRY_SPLIT	0x0800	/* THP splitting bit */
#define _SEGMENT_ENTRY_LARGE	0x0400	/* STE-format control, large page */
#define _SEGMENT_ENTRY_READ	0x0002	/* SW segment read bit */
#define _SEGMENT_ENTRY_WRITE	0x0001	/* SW segment write bit */
293

294 295 296 297 298 299
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
#else
#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
#endif

300 301
/*
 * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
302 303 304 305 306 307 308 309 310 311 312 313 314
 *				dy..R...I...wr
 * prot-none, clean, old	00..1...1...00
 * prot-none, clean, young	01..1...1...00
 * prot-none, dirty, old	10..1...1...00
 * prot-none, dirty, young	11..1...1...00
 * read-only, clean, old	00..1...1...01
 * read-only, clean, young	01..1...0...01
 * read-only, dirty, old	10..1...1...01
 * read-only, dirty, young	11..1...0...01
 * read-write, clean, old	00..1...1...11
 * read-write, clean, young	01..1...0...11
 * read-write, dirty, old	10..0...1...11
 * read-write, dirty, young	11..0...0...11
315 316
 * The segment table origin is used to distinguish empty (origin==0) from
 * read-write, old segment table entries (origin!=0)
317 318
 * HW-bits: R read-only, I invalid
 * SW-bits: y young, d dirty, r read, w write
319
 */
320

321
#define _SEGMENT_ENTRY_SPLIT_BIT 11	/* THP splitting bit number */
322

323
/* Page status table bits for virtualization */
324 325 326 327 328 329 330
#define PGSTE_ACC_BITS	0xf000000000000000UL
#define PGSTE_FP_BIT	0x0800000000000000UL
#define PGSTE_PCL_BIT	0x0080000000000000UL
#define PGSTE_HR_BIT	0x0040000000000000UL
#define PGSTE_HC_BIT	0x0020000000000000UL
#define PGSTE_GR_BIT	0x0004000000000000UL
#define PGSTE_GC_BIT	0x0002000000000000UL
331 332
#define PGSTE_UC_BIT	0x0000800000000000UL	/* user dirty (migration) */
#define PGSTE_IN_BIT	0x0000400000000000UL	/* IPTE notify bit */
333

334 335 336 337 338 339
/* Guest Page State used for virtualization */
#define _PGSTE_GPS_ZERO		0x0000000080000000UL
#define _PGSTE_GPS_USAGE_MASK	0x0000000003000000UL
#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL

L
Linus Torvalds 已提交
340
/*
341 342 343
 * A user page table pointer has the space-switch-event bit, the
 * private-space-control bit and the storage-alteration-event-control
 * bit set. A kernel page table pointer doesn't need them.
L
Linus Torvalds 已提交
344
 */
345 346
#define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
				 _ASCE_ALT_EVENT)
L
Linus Torvalds 已提交
347 348

/*
349
 * Page protection definitions.
L
Linus Torvalds 已提交
350
 */
351
#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_INVALID)
352 353 354 355 356 357 358 359 360 361 362
#define PAGE_READ	__pgprot(_PAGE_PRESENT | _PAGE_READ | \
				 _PAGE_INVALID | _PAGE_PROTECT)
#define PAGE_WRITE	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
				 _PAGE_INVALID | _PAGE_PROTECT)

#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
				 _PAGE_YOUNG | _PAGE_DIRTY)
#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
				 _PAGE_YOUNG | _PAGE_DIRTY)
#define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
				 _PAGE_PROTECT)
L
Linus Torvalds 已提交
363 364

/*
365 366 367
 * On s390 the page table entry has an invalid bit and a read-only bit.
 * Read permission implies execute permission and write permission
 * implies read permission.
L
Linus Torvalds 已提交
368 369
 */
         /*xwr*/
370
#define __P000	PAGE_NONE
371 372 373 374 375 376 377
#define __P001	PAGE_READ
#define __P010	PAGE_READ
#define __P011	PAGE_READ
#define __P100	PAGE_READ
#define __P101	PAGE_READ
#define __P110	PAGE_READ
#define __P111	PAGE_READ
378 379

#define __S000	PAGE_NONE
380 381 382 383 384 385 386
#define __S001	PAGE_READ
#define __S010	PAGE_WRITE
#define __S011	PAGE_WRITE
#define __S100	PAGE_READ
#define __S101	PAGE_READ
#define __S110	PAGE_WRITE
#define __S111	PAGE_WRITE
L
Linus Torvalds 已提交
387

388 389 390
/*
 * Segment entry (large page) protection definitions.
 */
391 392
#define SEGMENT_NONE	__pgprot(_SEGMENT_ENTRY_INVALID | \
				 _SEGMENT_ENTRY_PROTECT)
393 394 395 396
#define SEGMENT_READ	__pgprot(_SEGMENT_ENTRY_PROTECT | \
				 _SEGMENT_ENTRY_READ)
#define SEGMENT_WRITE	__pgprot(_SEGMENT_ENTRY_READ | \
				 _SEGMENT_ENTRY_WRITE)
397

398 399 400 401 402 403 404 405
static inline int mm_has_pgste(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
	if (unlikely(mm->context.has_pgste))
		return 1;
#endif
	return 0;
}
406

407 408 409 410 411 412 413 414 415
static inline int mm_alloc_pgste(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
	if (unlikely(mm->context.alloc_pgste))
		return 1;
#endif
	return 0;
}

416 417 418 419 420
/*
 * In the case that a guest uses storage keys
 * faults should no longer be backed by zero pages
 */
#define mm_forbids_zeropage mm_use_skey
421 422 423 424 425 426 427 428 429
static inline int mm_use_skey(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
	if (mm->context.use_skey)
		return 1;
#endif
	return 0;
}

L
Linus Torvalds 已提交
430 431 432
/*
 * pgd/pmd/pte query functions
 */
433 434
static inline int pgd_present(pgd_t pgd)
{
M
Martin Schwidefsky 已提交
435 436
	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
		return 1;
437 438 439 440 441
	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
}

static inline int pgd_none(pgd_t pgd)
{
M
Martin Schwidefsky 已提交
442 443
	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
		return 0;
444
	return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
445 446 447 448
}

static inline int pgd_bad(pgd_t pgd)
{
M
Martin Schwidefsky 已提交
449 450 451 452 453
	/*
	 * With dynamic page table levels the pgd can be a region table
	 * entry or a segment table entry. Check for the bit that are
	 * invalid for either table entry.
	 */
454
	unsigned long mask =
455
		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
456 457 458
		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
	return (pgd_val(pgd) & mask) != 0;
}
M
Martin Schwidefsky 已提交
459 460

static inline int pud_present(pud_t pud)
L
Linus Torvalds 已提交
461
{
M
Martin Schwidefsky 已提交
462 463
	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
		return 1;
464
	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
L
Linus Torvalds 已提交
465 466
}

M
Martin Schwidefsky 已提交
467
static inline int pud_none(pud_t pud)
L
Linus Torvalds 已提交
468
{
M
Martin Schwidefsky 已提交
469 470
	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
		return 0;
471
	return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
L
Linus Torvalds 已提交
472 473
}

474 475 476 477 478 479 480
static inline int pud_large(pud_t pud)
{
	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
		return 0;
	return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
}

M
Martin Schwidefsky 已提交
481
static inline int pud_bad(pud_t pud)
L
Linus Torvalds 已提交
482
{
M
Martin Schwidefsky 已提交
483 484 485 486 487
	/*
	 * With dynamic page table levels the pud can be a region table
	 * entry or a segment table entry. Check for the bit that are
	 * invalid for either table entry.
	 */
488
	unsigned long mask =
489
		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
490 491
		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
	return (pud_val(pud) & mask) != 0;
L
Linus Torvalds 已提交
492 493
}

494
static inline int pmd_present(pmd_t pmd)
L
Linus Torvalds 已提交
495
{
496
	return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
L
Linus Torvalds 已提交
497 498
}

499
static inline int pmd_none(pmd_t pmd)
L
Linus Torvalds 已提交
500
{
501
	return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
L
Linus Torvalds 已提交
502 503
}

504 505
static inline int pmd_large(pmd_t pmd)
{
506
	return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
507 508
}

509
static inline unsigned long pmd_pfn(pmd_t pmd)
510
{
511 512 513 514 515 516
	unsigned long origin_mask;

	origin_mask = _SEGMENT_ENTRY_ORIGIN;
	if (pmd_large(pmd))
		origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
	return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
517 518
}

519
static inline int pmd_bad(pmd_t pmd)
L
Linus Torvalds 已提交
520
{
521 522 523
	if (pmd_large(pmd))
		return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
	return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
L
Linus Torvalds 已提交
524 525
}

526 527 528 529
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
extern void pmdp_splitting_flush(struct vm_area_struct *vma,
				 unsigned long addr, pmd_t *pmdp);

530 531 532 533 534 535 536 537 538 539 540 541
#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
				 unsigned long address, pmd_t *pmdp,
				 pmd_t entry, int dirty);

#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
				  unsigned long address, pmd_t *pmdp);

#define __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd)
{
542 543 544 545 546 547 548 549 550
	return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
}

static inline int pmd_dirty(pmd_t pmd)
{
	int dirty = 1;
	if (pmd_large(pmd))
		dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
	return dirty;
551 552 553 554
}

static inline int pmd_young(pmd_t pmd)
{
555 556
	int young = 1;
	if (pmd_large(pmd))
557 558
		young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
	return young;
559 560
}

561
static inline int pte_present(pte_t pte)
L
Linus Torvalds 已提交
562
{
563 564
	/* Bit pattern: (pte & 0x001) == 0x001 */
	return (pte_val(pte) & _PAGE_PRESENT) != 0;
L
Linus Torvalds 已提交
565 566
}

567
static inline int pte_none(pte_t pte)
L
Linus Torvalds 已提交
568
{
569 570
	/* Bit pattern: pte == 0x400 */
	return pte_val(pte) == _PAGE_INVALID;
L
Linus Torvalds 已提交
571 572
}

573 574
static inline int pte_swap(pte_t pte)
{
575 576 577
	/* Bit pattern: (pte & 0x201) == 0x200 */
	return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
		== _PAGE_PROTECT;
578 579
}

N
Nick Piggin 已提交
580 581
static inline int pte_special(pte_t pte)
{
N
Nick Piggin 已提交
582
	return (pte_val(pte) & _PAGE_SPECIAL);
N
Nick Piggin 已提交
583 584
}

M
Martin Schwidefsky 已提交
585
#define __HAVE_ARCH_PTE_SAME
586 587 588 589
static inline int pte_same(pte_t a, pte_t b)
{
	return pte_val(a) == pte_val(b);
}
L
Linus Torvalds 已提交
590

591 592 593 594 595 596 597 598 599 600 601 602 603
#ifdef CONFIG_NUMA_BALANCING
static inline int pte_protnone(pte_t pte)
{
	return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
}

static inline int pmd_protnone(pmd_t pmd)
{
	/* pmd_large(pmd) implies pmd_present(pmd) */
	return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
}
#endif

604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
static inline int pte_soft_dirty(pte_t pte)
{
	return pte_val(pte) & _PAGE_SOFT_DIRTY;
}
#define pte_swp_soft_dirty pte_soft_dirty

static inline pte_t pte_mksoft_dirty(pte_t pte)
{
	pte_val(pte) |= _PAGE_SOFT_DIRTY;
	return pte;
}
#define pte_swp_mksoft_dirty pte_mksoft_dirty

static inline pte_t pte_clear_soft_dirty(pte_t pte)
{
	pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
	return pte;
}
#define pte_swp_clear_soft_dirty pte_clear_soft_dirty

static inline int pmd_soft_dirty(pmd_t pmd)
{
	return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
}

static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
{
	pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
	return pmd;
}

static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
{
	pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
	return pmd;
}

641
static inline pgste_t pgste_get_lock(pte_t *ptep)
642
{
643
	unsigned long new = 0;
644
#ifdef CONFIG_PGSTE
645 646
	unsigned long old;

647
	preempt_disable();
648 649 650
	asm(
		"	lg	%0,%2\n"
		"0:	lgr	%1,%0\n"
651 652
		"	nihh	%0,0xff7f\n"	/* clear PCL bit in old */
		"	oihh	%1,0x0080\n"	/* set PCL bit in new */
653 654 655
		"	csg	%0,%1,%2\n"
		"	jl	0b\n"
		: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
656
		: "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
657
#endif
658
	return __pgste(new);
659 660
}

661
static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
662 663
{
#ifdef CONFIG_PGSTE
664
	asm(
665
		"	nihh	%1,0xff7f\n"	/* clear PCL bit */
666 667
		"	stg	%1,%0\n"
		: "=Q" (ptep[PTRS_PER_PTE])
668 669
		: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
		: "cc", "memory");
670 671 672 673
	preempt_enable();
#endif
}

674 675 676 677 678 679 680 681 682
static inline pgste_t pgste_get(pte_t *ptep)
{
	unsigned long pgste = 0;
#ifdef CONFIG_PGSTE
	pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
#endif
	return __pgste(pgste);
}

683 684 685 686 687 688 689
static inline void pgste_set(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
	*(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
#endif
}

690 691
static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste,
				       struct mm_struct *mm)
692 693
{
#ifdef CONFIG_PGSTE
694
	unsigned long address, bits, skey;
695

696
	if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID)
M
Martin Schwidefsky 已提交
697
		return pgste;
698
	address = pte_val(*ptep) & PAGE_MASK;
699
	skey = (unsigned long) page_get_storage_key(address);
700 701
	bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
	/* Transfer page changed & referenced bit to guest bits in pgste */
702
	pgste_val(pgste) |= bits << 48;		/* GR bit & GC bit */
703
	/* Copy page access key and fetch protection bit to pgste */
704 705
	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
	pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
706 707 708 709 710
#endif
	return pgste;

}

711 712
static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
				 struct mm_struct *mm)
713 714
{
#ifdef CONFIG_PGSTE
715
	unsigned long address;
716
	unsigned long nkey;
717

718
	if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
M
Martin Schwidefsky 已提交
719
		return;
720
	VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
M
Martin Schwidefsky 已提交
721
	address = pte_val(entry) & PAGE_MASK;
722 723 724 725 726
	/*
	 * Set page access key and fetch protection bit from pgste.
	 * The guest C/R information is still in the PGSTE, set real
	 * key C/R to 0.
	 */
727
	nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
728
	nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
729
	page_set_storage_key(address, nkey, 0);
730 731 732
#endif
}

733
static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
734
{
735 736 737 738 739 740 741 742 743 744 745 746 747 748
	if ((pte_val(entry) & _PAGE_PRESENT) &&
	    (pte_val(entry) & _PAGE_WRITE) &&
	    !(pte_val(entry) & _PAGE_INVALID)) {
		if (!MACHINE_HAS_ESOP) {
			/*
			 * Without enhanced suppression-on-protection force
			 * the dirty bit on for all writable ptes.
			 */
			pte_val(entry) |= _PAGE_DIRTY;
			pte_val(entry) &= ~_PAGE_PROTECT;
		}
		if (!(pte_val(entry) & _PAGE_PROTECT))
			/* This pte allows write access, set user-dirty */
			pgste_val(pgste) |= PGSTE_UC_BIT;
749 750
	}
	*ptep = entry;
751
	return pgste;
752 753
}

754 755
/**
 * struct gmap_struct - guest address space
756
 * @crst_list: list of all crst tables used in the guest address space
757
 * @mm: pointer to the parent mm_struct
758 759 760
 * @guest_to_host: radix tree with guest to host address translation
 * @host_to_guest: radix tree with pointer to segment table entries
 * @guest_table_lock: spinlock to protect all entries in the guest page table
761
 * @table: pointer to the page directory
762
 * @asce: address space control element for gmap page table
763
 * @pfault_enabled: defines if pfaults are applicable for the guest
764 765 766
 */
struct gmap {
	struct list_head list;
767
	struct list_head crst_list;
768
	struct mm_struct *mm;
769 770 771
	struct radix_tree_root guest_to_host;
	struct radix_tree_root host_to_guest;
	spinlock_t guest_table_lock;
772
	unsigned long *table;
773
	unsigned long asce;
774
	unsigned long asce_end;
775
	void *private;
776
	bool pfault_enabled;
777 778
};

779 780 781 782 783 784
/**
 * struct gmap_notifier - notify function block for page invalidation
 * @notifier_call: address of callback function
 */
struct gmap_notifier {
	struct list_head list;
785
	void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
786 787
};

788
struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
789 790 791 792
void gmap_free(struct gmap *gmap);
void gmap_enable(struct gmap *gmap);
void gmap_disable(struct gmap *gmap);
int gmap_map_segment(struct gmap *gmap, unsigned long from,
793
		     unsigned long to, unsigned long len);
794
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
795 796
unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
797 798
int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
799 800
void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
void __gmap_zap(struct gmap *, unsigned long gaddr);
801 802
bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *);

803

804 805 806
void gmap_register_ipte_notifier(struct gmap_notifier *);
void gmap_unregister_ipte_notifier(struct gmap_notifier *);
int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
807
void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
808 809

static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
810
					unsigned long addr,
811 812 813
					pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
814 815
	if (pgste_val(pgste) & PGSTE_IN_BIT) {
		pgste_val(pgste) &= ~PGSTE_IN_BIT;
816
		gmap_do_ipte_notify(mm, addr, ptep);
817 818 819 820 821
	}
#endif
	return pgste;
}

822 823 824 825 826 827 828 829 830 831 832 833
/*
 * Certain architectures need to do special things when PTEs
 * within a page table are directly modified.  Thus, the following
 * hook is made available.
 */
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep, pte_t entry)
{
	pgste_t pgste;

	if (mm_has_pgste(mm)) {
		pgste = pgste_get_lock(ptep);
834
		pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
835
		pgste_set_key(ptep, pgste, entry, mm);
836
		pgste = pgste_set_pte(ptep, pgste, entry);
837
		pgste_set_unlock(ptep, pgste);
838
	} else {
839
		*ptep = entry;
840
	}
841 842
}

L
Linus Torvalds 已提交
843 844 845 846
/*
 * query functions pte_write/pte_dirty/pte_young only work if
 * pte_present() is true. Undefined behaviour if not..
 */
847
static inline int pte_write(pte_t pte)
L
Linus Torvalds 已提交
848
{
849
	return (pte_val(pte) & _PAGE_WRITE) != 0;
L
Linus Torvalds 已提交
850 851
}

852
static inline int pte_dirty(pte_t pte)
L
Linus Torvalds 已提交
853
{
854
	return (pte_val(pte) & _PAGE_DIRTY) != 0;
L
Linus Torvalds 已提交
855 856
}

857
static inline int pte_young(pte_t pte)
L
Linus Torvalds 已提交
858
{
859
	return (pte_val(pte) & _PAGE_YOUNG) != 0;
L
Linus Torvalds 已提交
860 861
}

862 863 864 865 866 867
#define __HAVE_ARCH_PTE_UNUSED
static inline int pte_unused(pte_t pte)
{
	return pte_val(pte) & _PAGE_UNUSED;
}

L
Linus Torvalds 已提交
868 869 870 871
/*
 * pgd/pmd/pte modification functions
 */

872
static inline void pgd_clear(pgd_t *pgd)
873
{
M
Martin Schwidefsky 已提交
874 875
	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
		pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
876 877
}

878
static inline void pud_clear(pud_t *pud)
L
Linus Torvalds 已提交
879
{
M
Martin Schwidefsky 已提交
880 881
	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
L
Linus Torvalds 已提交
882 883
}

884
static inline void pmd_clear(pmd_t *pmdp)
L
Linus Torvalds 已提交
885
{
886
	pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
L
Linus Torvalds 已提交
887 888
}

889
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
L
Linus Torvalds 已提交
890
{
891
	pte_val(*ptep) = _PAGE_INVALID;
L
Linus Torvalds 已提交
892 893 894 895 896 897
}

/*
 * The following pte modification functions only work if
 * pte_present() is true. Undefined behaviour if not..
 */
898
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
L
Linus Torvalds 已提交
899
{
900
	pte_val(pte) &= _PAGE_CHG_MASK;
L
Linus Torvalds 已提交
901
	pte_val(pte) |= pgprot_val(newprot);
902 903 904 905 906 907 908 909 910 911
	/*
	 * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
	 * invalid bit set, clear it again for readable, young pages
	 */
	if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
		pte_val(pte) &= ~_PAGE_INVALID;
	/*
	 * newprot for PAGE_READ and PAGE_WRITE has the page protection
	 * bit set, clear it again for writable, dirty pages
	 */
912 913
	if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
		pte_val(pte) &= ~_PAGE_PROTECT;
L
Linus Torvalds 已提交
914 915 916
	return pte;
}

917
static inline pte_t pte_wrprotect(pte_t pte)
L
Linus Torvalds 已提交
918
{
919 920
	pte_val(pte) &= ~_PAGE_WRITE;
	pte_val(pte) |= _PAGE_PROTECT;
L
Linus Torvalds 已提交
921 922 923
	return pte;
}

924
static inline pte_t pte_mkwrite(pte_t pte)
L
Linus Torvalds 已提交
925
{
926 927 928
	pte_val(pte) |= _PAGE_WRITE;
	if (pte_val(pte) & _PAGE_DIRTY)
		pte_val(pte) &= ~_PAGE_PROTECT;
L
Linus Torvalds 已提交
929 930 931
	return pte;
}

932
static inline pte_t pte_mkclean(pte_t pte)
L
Linus Torvalds 已提交
933
{
934 935
	pte_val(pte) &= ~_PAGE_DIRTY;
	pte_val(pte) |= _PAGE_PROTECT;
L
Linus Torvalds 已提交
936 937 938
	return pte;
}

939
static inline pte_t pte_mkdirty(pte_t pte)
L
Linus Torvalds 已提交
940
{
941
	pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
942 943
	if (pte_val(pte) & _PAGE_WRITE)
		pte_val(pte) &= ~_PAGE_PROTECT;
L
Linus Torvalds 已提交
944 945 946
	return pte;
}

947
static inline pte_t pte_mkold(pte_t pte)
L
Linus Torvalds 已提交
948
{
949
	pte_val(pte) &= ~_PAGE_YOUNG;
950
	pte_val(pte) |= _PAGE_INVALID;
L
Linus Torvalds 已提交
951 952 953
	return pte;
}

954
static inline pte_t pte_mkyoung(pte_t pte)
L
Linus Torvalds 已提交
955
{
956 957 958
	pte_val(pte) |= _PAGE_YOUNG;
	if (pte_val(pte) & _PAGE_READ)
		pte_val(pte) &= ~_PAGE_INVALID;
L
Linus Torvalds 已提交
959 960 961
	return pte;
}

N
Nick Piggin 已提交
962 963
static inline pte_t pte_mkspecial(pte_t pte)
{
N
Nick Piggin 已提交
964
	pte_val(pte) |= _PAGE_SPECIAL;
N
Nick Piggin 已提交
965 966 967
	return pte;
}

968 969 970
#ifdef CONFIG_HUGETLB_PAGE
static inline pte_t pte_mkhuge(pte_t pte)
{
971
	pte_val(pte) |= _PAGE_LARGE;
972 973 974 975
	return pte;
}
#endif

976
static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
L
Linus Torvalds 已提交
977
{
978 979 980 981 982 983 984 985
	unsigned long pto = (unsigned long) ptep;

	/* Invalidation + global TLB flush for the pte */
	asm volatile(
		"	ipte	%2,%3"
		: "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
}

986 987 988 989 990 991 992 993 994 995
static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
{
	unsigned long pto = (unsigned long) ptep;

	/* Invalidation + local TLB flush for the pte */
	asm volatile(
		"	.insn rrf,0xb2210000,%2,%3,0,1"
		: "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
}

996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
{
	unsigned long pto = (unsigned long) ptep;

	/* Invalidate a range of ptes + global TLB flush of the ptes */
	do {
		asm volatile(
			"	.insn rrf,0xb2210000,%2,%0,%1,0"
			: "+a" (address), "+a" (nr) : "a" (pto) : "memory");
	} while (nr != 255);
}

1008 1009 1010
static inline void ptep_flush_direct(struct mm_struct *mm,
				     unsigned long address, pte_t *ptep)
{
1011 1012
	int active, count;

1013 1014
	if (pte_val(*ptep) & _PAGE_INVALID)
		return;
1015 1016 1017 1018 1019 1020 1021 1022
	active = (mm == current->active_mm) ? 1 : 0;
	count = atomic_add_return(0x10000, &mm->context.attach_count);
	if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
		__ptep_ipte_local(address, ptep);
	else
		__ptep_ipte(address, ptep);
	atomic_sub(0x10000, &mm->context.attach_count);
1023 1024
}

1025 1026 1027
static inline void ptep_flush_lazy(struct mm_struct *mm,
				   unsigned long address, pte_t *ptep)
{
1028
	int active, count;
1029

1030 1031 1032 1033 1034 1035
	if (pte_val(*ptep) & _PAGE_INVALID)
		return;
	active = (mm == current->active_mm) ? 1 : 0;
	count = atomic_add_return(0x10000, &mm->context.attach_count);
	if ((count & 0xffff) <= active) {
		pte_val(*ptep) |= _PAGE_INVALID;
1036
		mm->context.flush_mm = 1;
1037 1038 1039
	} else
		__ptep_ipte(address, ptep);
	atomic_sub(0x10000, &mm->context.attach_count);
1040 1041
}

1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
/*
 * Get (and clear) the user dirty bit for a pte.
 */
static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
						 unsigned long addr,
						 pte_t *ptep)
{
	pgste_t pgste;
	pte_t pte;
	int dirty;

	if (!mm_has_pgste(mm))
		return 0;
	pgste = pgste_get_lock(ptep);
	dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
	pgste_val(pgste) &= ~PGSTE_UC_BIT;
	pte = *ptep;
	if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
1060
		pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
		__ptep_ipte(addr, ptep);
		if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
			pte_val(pte) |= _PAGE_PROTECT;
		else
			pte_val(pte) |= _PAGE_INVALID;
		*ptep = pte;
	}
	pgste_set_unlock(ptep, pgste);
	return dirty;
}

1072 1073 1074 1075 1076
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
					    unsigned long addr, pte_t *ptep)
{
	pgste_t pgste;
1077
	pte_t pte, oldpte;
1078 1079 1080 1081
	int young;

	if (mm_has_pgste(vma->vm_mm)) {
		pgste = pgste_get_lock(ptep);
1082
		pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
1083 1084
	}

1085
	oldpte = pte = *ptep;
1086
	ptep_flush_direct(vma->vm_mm, addr, ptep);
1087 1088 1089 1090
	young = pte_young(pte);
	pte = pte_mkold(pte);

	if (mm_has_pgste(vma->vm_mm)) {
1091
		pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm);
1092
		pgste = pgste_set_pte(ptep, pgste, pte);
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
		pgste_set_unlock(ptep, pgste);
	} else
		*ptep = pte;

	return young;
}

#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
					 unsigned long address, pte_t *ptep)
{
	return ptep_test_and_clear_young(vma, address, ptep);
}

M
Martin Schwidefsky 已提交
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
/*
 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
 * both clear the TLB for the unmapped pte. The reason is that
 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
 * to modify an active pte. The sequence is
 *   1) ptep_get_and_clear
 *   2) set_pte_at
 *   3) flush_tlb_range
 * On s390 the tlb needs to get flushed with the modification of the pte
 * if the pte is active. The only way how this can be implemented is to
 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
 * is a nop.
 */
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1121 1122 1123 1124 1125 1126
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
				       unsigned long address, pte_t *ptep)
{
	pgste_t pgste;
	pte_t pte;

1127
	if (mm_has_pgste(mm)) {
1128
		pgste = pgste_get_lock(ptep);
1129
		pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1130
	}
1131 1132

	pte = *ptep;
1133
	ptep_flush_lazy(mm, address, ptep);
1134
	pte_val(*ptep) = _PAGE_INVALID;
1135 1136

	if (mm_has_pgste(mm)) {
1137
		pgste = pgste_update_all(&pte, pgste, mm);
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
		pgste_set_unlock(ptep, pgste);
	}
	return pte;
}

#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
					   unsigned long address,
					   pte_t *ptep)
{
1148
	pgste_t pgste;
1149 1150
	pte_t pte;

1151 1152
	if (mm_has_pgste(mm)) {
		pgste = pgste_get_lock(ptep);
1153
		pgste_ipte_notify(mm, address, ptep, pgste);
1154
	}
1155 1156

	pte = *ptep;
1157
	ptep_flush_lazy(mm, address, ptep);
1158

1159
	if (mm_has_pgste(mm)) {
1160
		pgste = pgste_update_all(&pte, pgste, mm);
1161 1162
		pgste_set(ptep, pgste);
	}
1163 1164 1165 1166 1167 1168 1169
	return pte;
}

static inline void ptep_modify_prot_commit(struct mm_struct *mm,
					   unsigned long address,
					   pte_t *ptep, pte_t pte)
{
1170 1171
	pgste_t pgste;

1172
	if (mm_has_pgste(mm)) {
1173
		pgste = pgste_get(ptep);
1174
		pgste_set_key(ptep, pgste, pte, mm);
1175
		pgste = pgste_set_pte(ptep, pgste, pte);
1176
		pgste_set_unlock(ptep, pgste);
1177 1178
	} else
		*ptep = pte;
1179
}
M
Martin Schwidefsky 已提交
1180 1181

#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
M
Martin Schwidefsky 已提交
1182 1183 1184
static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
				     unsigned long address, pte_t *ptep)
{
1185 1186 1187
	pgste_t pgste;
	pte_t pte;

1188
	if (mm_has_pgste(vma->vm_mm)) {
1189
		pgste = pgste_get_lock(ptep);
1190
		pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1191
	}
1192 1193

	pte = *ptep;
1194
	ptep_flush_direct(vma->vm_mm, address, ptep);
1195
	pte_val(*ptep) = _PAGE_INVALID;
1196 1197

	if (mm_has_pgste(vma->vm_mm)) {
1198 1199 1200
		if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
		    _PGSTE_GPS_USAGE_UNUSED)
			pte_val(pte) |= _PAGE_UNUSED;
1201
		pgste = pgste_update_all(&pte, pgste, vma->vm_mm);
1202 1203
		pgste_set_unlock(ptep, pgste);
	}
L
Linus Torvalds 已提交
1204 1205 1206
	return pte;
}

M
Martin Schwidefsky 已提交
1207 1208 1209 1210 1211 1212 1213 1214 1215
/*
 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
 * cannot be accessed while the batched unmap is running. In this case
 * full==1 and a simple pte_clear is enough. See tlb.h.
 */
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1216
					    unsigned long address,
M
Martin Schwidefsky 已提交
1217
					    pte_t *ptep, int full)
L
Linus Torvalds 已提交
1218
{
1219 1220 1221
	pgste_t pgste;
	pte_t pte;

1222
	if (!full && mm_has_pgste(mm)) {
1223
		pgste = pgste_get_lock(ptep);
1224
		pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1225
	}
M
Martin Schwidefsky 已提交
1226

1227 1228
	pte = *ptep;
	if (!full)
1229
		ptep_flush_lazy(mm, address, ptep);
1230
	pte_val(*ptep) = _PAGE_INVALID;
1231

1232
	if (!full && mm_has_pgste(mm)) {
1233
		pgste = pgste_update_all(&pte, pgste, mm);
1234 1235
		pgste_set_unlock(ptep, pgste);
	}
M
Martin Schwidefsky 已提交
1236
	return pte;
L
Linus Torvalds 已提交
1237 1238
}

M
Martin Schwidefsky 已提交
1239
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1240 1241 1242 1243 1244 1245 1246
static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
				       unsigned long address, pte_t *ptep)
{
	pgste_t pgste;
	pte_t pte = *ptep;

	if (pte_write(pte)) {
1247
		if (mm_has_pgste(mm)) {
1248
			pgste = pgste_get_lock(ptep);
1249
			pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1250
		}
1251

1252
		ptep_flush_lazy(mm, address, ptep);
1253
		pte = pte_wrprotect(pte);
1254

1255
		if (mm_has_pgste(mm)) {
1256
			pgste = pgste_set_pte(ptep, pgste, pte);
1257
			pgste_set_unlock(ptep, pgste);
1258 1259
		} else
			*ptep = pte;
1260 1261 1262
	}
	return pte;
}
M
Martin Schwidefsky 已提交
1263 1264

#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1265 1266 1267 1268 1269
static inline int ptep_set_access_flags(struct vm_area_struct *vma,
					unsigned long address, pte_t *ptep,
					pte_t entry, int dirty)
{
	pgste_t pgste;
1270
	pte_t oldpte;
1271

1272 1273
	oldpte = *ptep;
	if (pte_same(oldpte, entry))
1274
		return 0;
1275
	if (mm_has_pgste(vma->vm_mm)) {
1276
		pgste = pgste_get_lock(ptep);
1277
		pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1278
	}
1279

1280
	ptep_flush_direct(vma->vm_mm, address, ptep);
1281

1282
	if (mm_has_pgste(vma->vm_mm)) {
1283 1284
		if (pte_val(oldpte) & _PAGE_INVALID)
			pgste_set_key(ptep, pgste, entry, vma->vm_mm);
1285
		pgste = pgste_set_pte(ptep, pgste, entry);
1286
		pgste_set_unlock(ptep, pgste);
1287 1288
	} else
		*ptep = entry;
1289 1290
	return 1;
}
L
Linus Torvalds 已提交
1291 1292 1293 1294 1295 1296 1297 1298 1299

/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 */
static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
	pte_t __pte;
	pte_val(__pte) = physpage + pgprot_val(pgprot);
1300
	return pte_mkyoung(__pte);
L
Linus Torvalds 已提交
1301 1302
}

1303 1304
static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
{
1305
	unsigned long physpage = page_to_phys(page);
1306
	pte_t __pte = mk_pte_phys(physpage, pgprot);
L
Linus Torvalds 已提交
1307

1308 1309
	if (pte_write(__pte) && PageDirty(page))
		__pte = pte_mkdirty(__pte);
1310
	return __pte;
1311 1312
}

M
Martin Schwidefsky 已提交
1313 1314 1315 1316
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
L
Linus Torvalds 已提交
1317

M
Martin Schwidefsky 已提交
1318 1319
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
L
Linus Torvalds 已提交
1320

M
Martin Schwidefsky 已提交
1321 1322
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1323
#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
L
Linus Torvalds 已提交
1324

1325 1326
static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
{
M
Martin Schwidefsky 已提交
1327 1328 1329
	pud_t *pud = (pud_t *) pgd;
	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
		pud = (pud_t *) pgd_deref(*pgd);
1330 1331
	return pud  + pud_index(address);
}
L
Linus Torvalds 已提交
1332

M
Martin Schwidefsky 已提交
1333
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
L
Linus Torvalds 已提交
1334
{
M
Martin Schwidefsky 已提交
1335 1336 1337
	pmd_t *pmd = (pmd_t *) pud;
	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
		pmd = (pmd_t *) pud_deref(*pud);
M
Martin Schwidefsky 已提交
1338
	return pmd + pmd_index(address);
L
Linus Torvalds 已提交
1339 1340
}

M
Martin Schwidefsky 已提交
1341 1342 1343
#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
L
Linus Torvalds 已提交
1344

1345
#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
L
Linus Torvalds 已提交
1346

M
Martin Schwidefsky 已提交
1347 1348 1349
/* Find an entry in the lowest level page table.. */
#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
L
Linus Torvalds 已提交
1350 1351 1352
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)

1353
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1354 1355
static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
{
1356
	/*
1357
	 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
1358 1359 1360 1361
	 * Convert to segment table entry format.
	 */
	if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
		return pgprot_val(SEGMENT_NONE);
1362 1363 1364
	if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
		return pgprot_val(SEGMENT_READ);
	return pgprot_val(SEGMENT_WRITE);
1365 1366
}

1367
static inline pmd_t pmd_wrprotect(pmd_t pmd)
1368
{
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
	pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
	return pmd;
}

static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
	pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
	if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
		return pmd;
	pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
	return pmd;
}

static inline pmd_t pmd_mkclean(pmd_t pmd)
{
	if (pmd_large(pmd)) {
		pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1387
		pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1388 1389 1390 1391 1392 1393 1394
	}
	return pmd;
}

static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
	if (pmd_large(pmd)) {
1395 1396
		pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
				_SEGMENT_ENTRY_SOFT_DIRTY;
1397 1398 1399 1400 1401 1402 1403 1404 1405
		if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
			pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
	}
	return pmd;
}

static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
	if (pmd_large(pmd)) {
1406
		pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1407 1408
		if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
			pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1409 1410 1411 1412 1413 1414
	}
	return pmd;
}

static inline pmd_t pmd_mkold(pmd_t pmd)
{
1415
	if (pmd_large(pmd)) {
1416 1417 1418 1419 1420 1421
		pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
		pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
	}
	return pmd;
}

1422 1423
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
1424 1425 1426
	if (pmd_large(pmd)) {
		pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
			_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1427 1428
			_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT |
			_SEGMENT_ENTRY_SOFT_DIRTY;
1429 1430 1431 1432 1433 1434 1435 1436
		pmd_val(pmd) |= massage_pgprot_pmd(newprot);
		if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
			pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
		if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
			pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
		return pmd;
	}
	pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1437 1438 1439 1440
	pmd_val(pmd) |= massage_pgprot_pmd(newprot);
	return pmd;
}

1441
static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1442
{
1443 1444
	pmd_t __pmd;
	pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1445
	return __pmd;
1446 1447
}

1448 1449
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */

1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
static inline void __pmdp_csp(pmd_t *pmdp)
{
	register unsigned long reg2 asm("2") = pmd_val(*pmdp);
	register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
					       _SEGMENT_ENTRY_INVALID;
	register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;

	asm volatile(
		"	csp %1,%3"
		: "=m" (*pmdp)
		: "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
}

static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
{
	unsigned long sto;

	sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
	asm volatile(
		"	.insn	rrf,0xb98e0000,%2,%3,0,0"
		: "=m" (*pmdp)
		: "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
		: "cc" );
}

static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
{
	unsigned long sto;

	sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
	asm volatile(
		"	.insn	rrf,0xb98e0000,%2,%3,0,1"
		: "=m" (*pmdp)
		: "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
		: "cc" );
}

static inline void pmdp_flush_direct(struct mm_struct *mm,
				     unsigned long address, pmd_t *pmdp)
{
	int active, count;

	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
		return;
	if (!MACHINE_HAS_IDTE) {
		__pmdp_csp(pmdp);
		return;
	}
	active = (mm == current->active_mm) ? 1 : 0;
	count = atomic_add_return(0x10000, &mm->context.attach_count);
	if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
		__pmdp_idte_local(address, pmdp);
	else
		__pmdp_idte(address, pmdp);
	atomic_sub(0x10000, &mm->context.attach_count);
}

1508 1509 1510
static inline void pmdp_flush_lazy(struct mm_struct *mm,
				   unsigned long address, pmd_t *pmdp)
{
1511
	int active, count;
1512

1513 1514
	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
		return;
1515 1516 1517 1518
	active = (mm == current->active_mm) ? 1 : 0;
	count = atomic_add_return(0x10000, &mm->context.attach_count);
	if ((count & 0xffff) <= active) {
		pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
1519
		mm->context.flush_mm = 1;
1520 1521 1522 1523
	} else if (MACHINE_HAS_IDTE)
		__pmdp_idte(address, pmdp);
	else
		__pmdp_csp(pmdp);
1524
	atomic_sub(0x10000, &mm->context.attach_count);
1525 1526
}

1527 1528 1529
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

#define __HAVE_ARCH_PGTABLE_DEPOSIT
1530 1531
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
				       pgtable_t pgtable);
1532 1533

#define __HAVE_ARCH_PGTABLE_WITHDRAW
1534
extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1535 1536 1537

static inline int pmd_trans_splitting(pmd_t pmd)
{
1538 1539
	return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) &&
		(pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT);
1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
}

static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
			      pmd_t *pmdp, pmd_t entry)
{
	*pmdp = entry;
}

static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
	pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1551 1552
	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1553 1554 1555 1556 1557 1558 1559
	return pmd;
}

#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
					    unsigned long address, pmd_t *pmdp)
{
1560
	pmd_t pmd;
1561

1562
	pmd = *pmdp;
1563
	pmdp_flush_direct(vma->vm_mm, address, pmdp);
1564 1565
	*pmdp = pmd_mkold(pmd);
	return pmd_young(pmd);
1566 1567
}

1568 1569 1570
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
					    unsigned long address, pmd_t *pmdp)
1571 1572 1573
{
	pmd_t pmd = *pmdp;

1574
	pmdp_flush_direct(mm, address, pmdp);
1575 1576 1577 1578
	pmd_clear(pmdp);
	return pmd;
}

1579 1580 1581 1582
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
						 unsigned long address,
						 pmd_t *pmdp, int full)
1583 1584 1585 1586 1587 1588 1589 1590 1591
{
	pmd_t pmd = *pmdp;

	if (!full)
		pmdp_flush_lazy(mm, address, pmdp);
	pmd_clear(pmdp);
	return pmd;
}

1592 1593 1594
#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
					  unsigned long address, pmd_t *pmdp)
1595
{
1596
	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1597 1598 1599 1600 1601 1602
}

#define __HAVE_ARCH_PMDP_INVALIDATE
static inline void pmdp_invalidate(struct vm_area_struct *vma,
				   unsigned long address, pmd_t *pmdp)
{
1603
	pmdp_flush_direct(vma->vm_mm, address, pmdp);
1604 1605
}

1606 1607 1608 1609 1610 1611 1612
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
				      unsigned long address, pmd_t *pmdp)
{
	pmd_t pmd = *pmdp;

	if (pmd_write(pmd)) {
1613
		pmdp_flush_direct(mm, address, pmdp);
1614 1615 1616 1617
		set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
	}
}

1618 1619 1620 1621
static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
					unsigned long address,
					pmd_t *pmdp)
{
1622
	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1623 1624 1625
}
#define pmdp_collapse_flush pmdp_collapse_flush

1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
#define pfn_pmd(pfn, pgprot)	mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
#define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))

static inline int pmd_trans_huge(pmd_t pmd)
{
	return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
}

static inline int has_transparent_hugepage(void)
{
	return MACHINE_HAS_HPAGE ? 1 : 0;
}
1638 1639
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

L
Linus Torvalds 已提交
1640 1641 1642
/*
 * 64 bit swap entry format:
 * A page-table entry has some bits we have to treat in a special way.
1643
 * Bits 52 and bit 55 have to be zero, otherwise a specification
L
Linus Torvalds 已提交
1644
 * exception will occur instead of a page translation exception. The
1645
 * specification exception has the bad habit not to store necessary
L
Linus Torvalds 已提交
1646
 * information in the lowcore.
1647 1648 1649 1650 1651 1652 1653 1654
 * Bits 54 and 63 are used to indicate the page type.
 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
 * for the offset.
 * |			  offset			|01100|type |00|
 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
L
Linus Torvalds 已提交
1655
 */
H
Heiko Carstens 已提交
1656

1657 1658 1659 1660
#define __SWP_OFFSET_MASK	((1UL << 52) - 1)
#define __SWP_OFFSET_SHIFT	12
#define __SWP_TYPE_MASK		((1UL << 5) - 1)
#define __SWP_TYPE_SHIFT	2
H
Heiko Carstens 已提交
1661

1662
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
L
Linus Torvalds 已提交
1663 1664
{
	pte_t pte;
1665 1666 1667 1668

	pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
	pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
	pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
L
Linus Torvalds 已提交
1669 1670 1671
	return pte;
}

1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
static inline unsigned long __swp_type(swp_entry_t entry)
{
	return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
}

static inline unsigned long __swp_offset(swp_entry_t entry)
{
	return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
}

static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
{
	return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
}
L
Linus Torvalds 已提交
1686 1687 1688 1689 1690 1691 1692 1693

#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x)	((pte_t) { (x).val })

#endif /* !__ASSEMBLY__ */

#define kern_addr_valid(addr)   (1)

1694 1695
extern int vmem_add_mapping(unsigned long start, unsigned long size);
extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1696
extern int s390_enable_sie(void);
1697
extern int s390_enable_skey(void);
1698
extern void s390_reset_cmma(struct mm_struct *mm);
H
Heiko Carstens 已提交
1699

1700 1701 1702 1703
/* s390 has a private copy of get unmapped area to deal with cache synonyms */
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN

L
Linus Torvalds 已提交
1704 1705 1706
/*
 * No page table caches to initialise
 */
1707 1708
static inline void pgtable_cache_init(void) { }
static inline void check_pgt_cache(void) { }
L
Linus Torvalds 已提交
1709 1710 1711 1712

#include <asm-generic/pgtable.h>

#endif /* _S390_PAGE_H */