pgtable.h 47.6 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3
/*
 *  S390 version
4
 *    Copyright IBM Corp. 1999, 2000
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14
 *    Author(s): Hartmut Penner (hp@de.ibm.com)
 *               Ulrich Weigand (weigand@de.ibm.com)
 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
 *
 *  Derived from "include/asm-i386/pgtable.h"
 */

#ifndef _ASM_S390_PGTABLE_H
#define _ASM_S390_PGTABLE_H

15
#include <linux/sched.h>
16
#include <linux/mm_types.h>
17
#include <linux/page-flags.h>
18
#include <linux/radix-tree.h>
19
#include <linux/atomic.h>
L
Linus Torvalds 已提交
20
#include <asm/bug.h>
21
#include <asm/page.h>
L
Linus Torvalds 已提交
22

23
extern pgd_t swapper_pg_dir[];
L
Linus Torvalds 已提交
24 25
extern void paging_init(void);

26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
enum {
	PG_DIRECT_MAP_4K = 0,
	PG_DIRECT_MAP_1M,
	PG_DIRECT_MAP_2G,
	PG_DIRECT_MAP_MAX
};

extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];

static inline void update_page_count(int level, long count)
{
	if (IS_ENABLED(CONFIG_PROC_FS))
		atomic_long_add(count, &direct_pages_count[level]);
}

struct seq_file;
void arch_report_meminfo(struct seq_file *m);

L
Linus Torvalds 已提交
44 45 46 47
/*
 * The S390 doesn't have any external MMU info: the kernel page
 * tables contain all the necessary information.
 */
48
#define update_mmu_cache(vma, address, ptep)     do { } while (0)
49
#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
L
Linus Torvalds 已提交
50 51

/*
52
 * ZERO_PAGE is a global shared page that is always zero; used
L
Linus Torvalds 已提交
53 54
 * for zero-mapped memory areas etc..
 */
55 56 57 58 59 60 61

extern unsigned long empty_zero_page;
extern unsigned long zero_page_mask;

#define ZERO_PAGE(vaddr) \
	(virt_to_page((void *)(empty_zero_page + \
	 (((unsigned long)(vaddr)) &zero_page_mask))))
62
#define __HAVE_COLOR_ZERO_PAGE
63

64
/* TODO: s390 cannot support io_remap_pfn_range... */
L
Linus Torvalds 已提交
65

66
#define FIRST_USER_ADDRESS  0UL
67

L
Linus Torvalds 已提交
68 69 70 71
#define pte_ERROR(e) \
	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
#define pmd_ERROR(e) \
	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
M
Martin Schwidefsky 已提交
72 73
#define pud_ERROR(e) \
	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
74 75
#define p4d_ERROR(e) \
	printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
L
Linus Torvalds 已提交
76 77 78 79
#define pgd_ERROR(e) \
	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))

/*
80 81
 * The vmalloc and module area will always be on the topmost area of the
 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
82 83 84 85
 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
 * modules will reside. That makes sure that inter module branches always
 * happen without trampolines and in addition the placement within a 2GB frame
 * is branch prediction unit friendly.
H
Heiko Carstens 已提交
86
 */
87
extern unsigned long VMALLOC_START;
88 89
extern unsigned long VMALLOC_END;
extern struct page *vmemmap;
90

91
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
92

93 94 95 96 97 98
extern unsigned long MODULES_VADDR;
extern unsigned long MODULES_END;
#define MODULES_VADDR	MODULES_VADDR
#define MODULES_END	MODULES_END
#define MODULES_LEN	(1UL << 31)

99 100 101 102 103 104 105 106 107 108
static inline int is_module_addr(void *addr)
{
	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
	if (addr < (void *)MODULES_VADDR)
		return 0;
	if (addr > (void *)MODULES_END)
		return 0;
	return 1;
}

L
Linus Torvalds 已提交
109 110
/*
 * A 64 bit pagetable entry of S390 has following format:
111
 * |			 PFRA			      |0IPC|  OS  |
L
Linus Torvalds 已提交
112 113 114 115 116
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * I Page-Invalid Bit:    Page is not available for address-translation
 * P Page-Protection Bit: Store access not possible for page
117
 * C Change-bit override: HW is not required to set change bit
L
Linus Torvalds 已提交
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
 *
 * A 64 bit segmenttable entry of S390 has following format:
 * |        P-table origin                              |      TT
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * I Segment-Invalid Bit:    Segment is not available for address-translation
 * C Common-Segment Bit:     Segment is not private (PoP 3-30)
 * P Page-Protection Bit: Store access not possible for page
 * TT Type 00
 *
 * A 64 bit region table entry of S390 has following format:
 * |        S-table origin                             |   TF  TTTL
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * I Segment-Invalid Bit:    Segment is not available for address-translation
 * TT Type 01
 * TF
M
Martin Schwidefsky 已提交
137
 * TL Table length
L
Linus Torvalds 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
 *
 * The 64 bit regiontable origin of S390 has following format:
 * |      region table origon                          |       DTTL
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * X Space-Switch event:
 * G Segment-Invalid Bit:  
 * P Private-Space Bit:    
 * S Storage-Alteration:
 * R Real space
 * TL Table-Length:
 *
 * A storage key has the following format:
 * | ACC |F|R|C|0|
 *  0   3 4 5 6 7
 * ACC: access key
 * F  : fetch protection bit
 * R  : referenced bit
 * C  : changed bit
 */

/* Hardware bits in the page table entry */
161
#define _PAGE_NOEXEC	0x100		/* HW no-execute bit  */
162
#define _PAGE_PROTECT	0x200		/* HW read-only bit  */
M
Martin Schwidefsky 已提交
163
#define _PAGE_INVALID	0x400		/* HW invalid bit    */
164
#define _PAGE_LARGE	0x800		/* Bit to mark a large pte */
165 166

/* Software bits in the page table entry */
167 168 169
#define _PAGE_PRESENT	0x001		/* SW pte present bit */
#define _PAGE_YOUNG	0x004		/* SW pte young bit */
#define _PAGE_DIRTY	0x008		/* SW pte dirty bit */
170 171 172
#define _PAGE_READ	0x010		/* SW pte read bit */
#define _PAGE_WRITE	0x020		/* SW pte write bit */
#define _PAGE_SPECIAL	0x040		/* SW associated with special page */
173
#define _PAGE_UNUSED	0x080		/* SW bit for pgste usage state */
L
Linus Torvalds 已提交
174

175 176 177 178 179 180
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SOFT_DIRTY 0x002		/* SW pte soft dirty bit */
#else
#define _PAGE_SOFT_DIRTY 0x000
#endif

181
/* Set of bits not changed in pte_modify */
182
#define _PAGE_CHG_MASK		(PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
183
				 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
184

M
Martin Schwidefsky 已提交
185
/*
186 187 188 189
 * handle_pte_fault uses pte_present and pte_none to find out the pte type
 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
 * distinguish present from not-present ptes. It is changed only with the page
 * table lock held.
M
Martin Schwidefsky 已提交
190
 *
191
 * The following table gives the different possible bit combinations for
192 193
 * the pte hardware and software bits in the last 12 bits of a pte
 * (. unassigned bit, x don't care, t swap type):
M
Martin Schwidefsky 已提交
194
 *
195 196 197
 *				842100000000
 *				000084210000
 *				000000008421
198 199 200 201 202
 *				.IR.uswrdy.p
 * empty			.10.00000000
 * swap				.11..ttttt.0
 * prot-none, clean, old	.11.xx0000.1
 * prot-none, clean, young	.11.xx0001.1
203 204
 * prot-none, dirty, old	.11.xx0010.1
 * prot-none, dirty, young	.11.xx0011.1
205 206 207 208 209 210 211 212 213 214 215
 * read-only, clean, old	.11.xx0100.1
 * read-only, clean, young	.01.xx0101.1
 * read-only, dirty, old	.11.xx0110.1
 * read-only, dirty, young	.01.xx0111.1
 * read-write, clean, old	.11.xx1100.1
 * read-write, clean, young	.01.xx1101.1
 * read-write, dirty, old	.10.xx1110.1
 * read-write, dirty, young	.00.xx1111.1
 * HW-bits: R read-only, I invalid
 * SW-bits: p present, y young, d dirty, r read, w write, s special,
 *	    u unused, l large
216
 *
217 218 219
 * pte_none    is true for the bit pattern .10.00000000, pte == 0x400
 * pte_swap    is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
M
Martin Schwidefsky 已提交
220 221
 */

222
/* Bits in the segment/region table address-space-control-element */
223
#define _ASCE_ORIGIN		~0xfffUL/* region/segment table origin	    */
224 225 226 227 228 229 230 231 232 233 234 235 236
#define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
#define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
#define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
#define _ASCE_REAL_SPACE	0x20	/* real space control		    */
#define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
#define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
#define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
#define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
#define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
#define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */

/* Bits in the region table entry */
#define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
237
#define _REGION_ENTRY_PROTECT	0x200	/* region protection bit	    */
238
#define _REGION_ENTRY_NOEXEC	0x100	/* region no-execute bit	    */
239
#define _REGION_ENTRY_OFFSET	0xc0	/* region table offset		    */
240
#define _REGION_ENTRY_INVALID	0x20	/* invalid region table entry	    */
241 242 243 244 245 246 247
#define _REGION_ENTRY_TYPE_MASK	0x0c	/* region/segment table type mask   */
#define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
#define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
#define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
#define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */

#define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
248
#define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
249
#define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
250
#define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
251
#define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
252
#define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
253

254
#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address	     */
255 256 257 258 259 260 261 262 263 264 265 266
#define _REGION3_ENTRY_DIRTY	0x2000	/* SW region dirty bit */
#define _REGION3_ENTRY_YOUNG	0x1000	/* SW region young bit */
#define _REGION3_ENTRY_LARGE	0x0400	/* RTTE-format control, large page  */
#define _REGION3_ENTRY_READ	0x0002	/* SW region read bit */
#define _REGION3_ENTRY_WRITE	0x0001	/* SW region write bit */

#ifdef CONFIG_MEM_SOFT_DIRTY
#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
#else
#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
#endif

267 268
#define _REGION_ENTRY_BITS	 0xfffffffffffff22fUL
#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
269

L
Linus Torvalds 已提交
270
/* Bits in the segment table entry */
J
Janosch Frank 已提交
271 272 273 274
#define _SEGMENT_ENTRY_BITS			0xfffffffffffffe33UL
#define _SEGMENT_ENTRY_BITS_LARGE		0xfffffffffff0ff33UL
#define _SEGMENT_ENTRY_HARDWARE_BITS		0xfffffffffffffe30UL
#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE	0xfffffffffff00730UL
275
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address	    */
276 277 278
#define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* page table origin		    */
#define _SEGMENT_ENTRY_PROTECT	0x200	/* segment protection bit	    */
#define _SEGMENT_ENTRY_NOEXEC	0x100	/* segment no-execute bit	    */
279
#define _SEGMENT_ENTRY_INVALID	0x20	/* invalid segment table entry	    */
L
Linus Torvalds 已提交
280

281
#define _SEGMENT_ENTRY		(0)
282
#define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INVALID)
283

284 285 286
#define _SEGMENT_ENTRY_DIRTY	0x2000	/* SW segment dirty bit */
#define _SEGMENT_ENTRY_YOUNG	0x1000	/* SW segment young bit */
#define _SEGMENT_ENTRY_LARGE	0x0400	/* STE-format control, large page */
287 288
#define _SEGMENT_ENTRY_WRITE	0x0002	/* SW segment write bit */
#define _SEGMENT_ENTRY_READ	0x0001	/* SW segment read bit */
289

290 291 292 293 294 295
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
#else
#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
#endif

296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
#define _CRST_ENTRIES	2048	/* number of region/segment table entries */
#define _PAGE_ENTRIES	256	/* number of page table entries	*/

#define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
#define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)

#define _REGION1_SHIFT	53
#define _REGION2_SHIFT	42
#define _REGION3_SHIFT	31
#define _SEGMENT_SHIFT	20

#define _REGION1_INDEX	(0x7ffUL << _REGION1_SHIFT)
#define _REGION2_INDEX	(0x7ffUL << _REGION2_SHIFT)
#define _REGION3_INDEX	(0x7ffUL << _REGION3_SHIFT)
#define _SEGMENT_INDEX	(0x7ffUL << _SEGMENT_SHIFT)
#define _PAGE_INDEX	(0xffUL  << _PAGE_SHIFT)

#define _REGION1_SIZE	(1UL << _REGION1_SHIFT)
#define _REGION2_SIZE	(1UL << _REGION2_SHIFT)
#define _REGION3_SIZE	(1UL << _REGION3_SHIFT)
#define _SEGMENT_SIZE	(1UL << _SEGMENT_SHIFT)

#define _REGION1_MASK	(~(_REGION1_SIZE - 1))
#define _REGION2_MASK	(~(_REGION2_SIZE - 1))
#define _REGION3_MASK	(~(_REGION3_SIZE - 1))
#define _SEGMENT_MASK	(~(_SEGMENT_SIZE - 1))

#define PMD_SHIFT	_SEGMENT_SHIFT
#define PUD_SHIFT	_REGION3_SHIFT
#define P4D_SHIFT	_REGION2_SHIFT
#define PGDIR_SHIFT	_REGION1_SHIFT

#define PMD_SIZE	_SEGMENT_SIZE
#define PUD_SIZE	_REGION3_SIZE
#define P4D_SIZE	_REGION2_SIZE
#define PGDIR_SIZE	_REGION1_SIZE

#define PMD_MASK	_SEGMENT_MASK
#define PUD_MASK	_REGION3_MASK
#define P4D_MASK	_REGION2_MASK
#define PGDIR_MASK	_REGION1_MASK

#define PTRS_PER_PTE	_PAGE_ENTRIES
#define PTRS_PER_PMD	_CRST_ENTRIES
#define PTRS_PER_PUD	_CRST_ENTRIES
#define PTRS_PER_P4D	_CRST_ENTRIES
#define PTRS_PER_PGD	_CRST_ENTRIES

344
/*
345 346
 * Segment table and region3 table entry encoding
 * (R = read-only, I = invalid, y = young bit):
347
 *				dy..R...I...wr
348 349 350 351
 * prot-none, clean, old	00..1...1...00
 * prot-none, clean, young	01..1...1...00
 * prot-none, dirty, old	10..1...1...00
 * prot-none, dirty, young	11..1...1...00
352 353 354 355
 * read-only, clean, old	00..1...1...01
 * read-only, clean, young	01..1...0...01
 * read-only, dirty, old	10..1...1...01
 * read-only, dirty, young	11..1...0...01
356 357 358 359
 * read-write, clean, old	00..1...1...11
 * read-write, clean, young	01..1...0...11
 * read-write, dirty, old	10..0...1...11
 * read-write, dirty, young	11..0...0...11
360 361
 * The segment table origin is used to distinguish empty (origin==0) from
 * read-write, old segment table entries (origin!=0)
362 363
 * HW-bits: R read-only, I invalid
 * SW-bits: y young, d dirty, r read, w write
364
 */
365

366
/* Page status table bits for virtualization */
367 368 369 370 371 372 373
#define PGSTE_ACC_BITS	0xf000000000000000UL
#define PGSTE_FP_BIT	0x0800000000000000UL
#define PGSTE_PCL_BIT	0x0080000000000000UL
#define PGSTE_HR_BIT	0x0040000000000000UL
#define PGSTE_HC_BIT	0x0020000000000000UL
#define PGSTE_GR_BIT	0x0004000000000000UL
#define PGSTE_GC_BIT	0x0002000000000000UL
374 375
#define PGSTE_UC_BIT	0x0000800000000000UL	/* user dirty (migration) */
#define PGSTE_IN_BIT	0x0000400000000000UL	/* IPTE notify bit */
376
#define PGSTE_VSIE_BIT	0x0000200000000000UL	/* ref'd in a shadow table */
377

378
/* Guest Page State used for virtualization */
379
#define _PGSTE_GPS_ZERO			0x0000000080000000UL
380
#define _PGSTE_GPS_NODAT		0x0000000040000000UL
381 382 383 384 385
#define _PGSTE_GPS_USAGE_MASK		0x0000000003000000UL
#define _PGSTE_GPS_USAGE_STABLE		0x0000000000000000UL
#define _PGSTE_GPS_USAGE_UNUSED		0x0000000001000000UL
#define _PGSTE_GPS_USAGE_POT_VOLATILE	0x0000000002000000UL
#define _PGSTE_GPS_USAGE_VOLATILE	_PGSTE_GPS_USAGE_MASK
386

L
Linus Torvalds 已提交
387
/*
388 389 390
 * A user page table pointer has the space-switch-event bit, the
 * private-space-control bit and the storage-alteration-event-control
 * bit set. A kernel page table pointer doesn't need them.
L
Linus Torvalds 已提交
391
 */
392 393
#define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
				 _ASCE_ALT_EVENT)
L
Linus Torvalds 已提交
394 395

/*
396
 * Page protection definitions.
L
Linus Torvalds 已提交
397
 */
398
#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
399 400 401
#define PAGE_RO		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
#define PAGE_RX		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
402
				 _PAGE_INVALID | _PAGE_PROTECT)
403 404 405
#define PAGE_RW		__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
#define PAGE_RWX	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
406 407 408
				 _PAGE_INVALID | _PAGE_PROTECT)

#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
409
				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
410
#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
411
				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
412
#define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
413 414 415
				 _PAGE_PROTECT | _PAGE_NOEXEC)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
				  _PAGE_YOUNG |	_PAGE_DIRTY)
L
Linus Torvalds 已提交
416 417

/*
418 419 420
 * On s390 the page table entry has an invalid bit and a read-only bit.
 * Read permission implies execute permission and write permission
 * implies read permission.
L
Linus Torvalds 已提交
421 422
 */
         /*xwr*/
423
#define __P000	PAGE_NONE
424 425 426 427 428 429 430
#define __P001	PAGE_RO
#define __P010	PAGE_RO
#define __P011	PAGE_RO
#define __P100	PAGE_RX
#define __P101	PAGE_RX
#define __P110	PAGE_RX
#define __P111	PAGE_RX
431 432

#define __S000	PAGE_NONE
433 434 435 436 437 438 439
#define __S001	PAGE_RO
#define __S010	PAGE_RW
#define __S011	PAGE_RW
#define __S100	PAGE_RX
#define __S101	PAGE_RX
#define __S110	PAGE_RWX
#define __S111	PAGE_RWX
L
Linus Torvalds 已提交
440

441 442 443
/*
 * Segment entry (large page) protection definitions.
 */
444 445
#define SEGMENT_NONE	__pgprot(_SEGMENT_ENTRY_INVALID | \
				 _SEGMENT_ENTRY_PROTECT)
446 447 448 449
#define SEGMENT_RO	__pgprot(_SEGMENT_ENTRY_PROTECT | \
				 _SEGMENT_ENTRY_READ | \
				 _SEGMENT_ENTRY_NOEXEC)
#define SEGMENT_RX	__pgprot(_SEGMENT_ENTRY_PROTECT | \
450
				 _SEGMENT_ENTRY_READ)
451 452 453 454
#define SEGMENT_RW	__pgprot(_SEGMENT_ENTRY_READ | \
				 _SEGMENT_ENTRY_WRITE | \
				 _SEGMENT_ENTRY_NOEXEC)
#define SEGMENT_RWX	__pgprot(_SEGMENT_ENTRY_READ | \
455
				 _SEGMENT_ENTRY_WRITE)
456 457 458 459 460
#define SEGMENT_KERNEL	__pgprot(_SEGMENT_ENTRY |	\
				 _SEGMENT_ENTRY_LARGE |	\
				 _SEGMENT_ENTRY_READ |	\
				 _SEGMENT_ENTRY_WRITE | \
				 _SEGMENT_ENTRY_YOUNG | \
461 462
				 _SEGMENT_ENTRY_DIRTY | \
				 _SEGMENT_ENTRY_NOEXEC)
463 464 465 466
#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY |	\
				 _SEGMENT_ENTRY_LARGE |	\
				 _SEGMENT_ENTRY_READ |	\
				 _SEGMENT_ENTRY_YOUNG |	\
467 468
				 _SEGMENT_ENTRY_PROTECT | \
				 _SEGMENT_ENTRY_NOEXEC)
469 470 471 472 473 474 475 476 477 478

/*
 * Region3 entry (large page) protection definitions.
 */

#define REGION3_KERNEL	__pgprot(_REGION_ENTRY_TYPE_R3 | \
				 _REGION3_ENTRY_LARGE |	 \
				 _REGION3_ENTRY_READ |	 \
				 _REGION3_ENTRY_WRITE |	 \
				 _REGION3_ENTRY_YOUNG |	 \
479 480
				 _REGION3_ENTRY_DIRTY | \
				 _REGION_ENTRY_NOEXEC)
481 482 483 484
#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
				   _REGION3_ENTRY_LARGE |  \
				   _REGION3_ENTRY_READ |   \
				   _REGION3_ENTRY_YOUNG |  \
485 486
				   _REGION_ENTRY_PROTECT | \
				   _REGION_ENTRY_NOEXEC)
487

488 489 490 491 492 493 494 495
static inline int mm_has_pgste(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
	if (unlikely(mm->context.has_pgste))
		return 1;
#endif
	return 0;
}
496

497 498 499 500 501 502 503 504 505
static inline int mm_alloc_pgste(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
	if (unlikely(mm->context.alloc_pgste))
		return 1;
#endif
	return 0;
}

506 507 508 509
/*
 * In the case that a guest uses storage keys
 * faults should no longer be backed by zero pages
 */
510
#define mm_forbids_zeropage mm_has_pgste
511
static inline int mm_uses_skeys(struct mm_struct *mm)
512 513
{
#ifdef CONFIG_PGSTE
514
	if (mm->context.uses_skeys)
515 516 517 518 519
		return 1;
#endif
	return 0;
}

520 521 522 523 524 525 526 527 528 529 530 531 532
static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
{
	register unsigned long reg2 asm("2") = old;
	register unsigned long reg3 asm("3") = new;
	unsigned long address = (unsigned long)ptr | 1;

	asm volatile(
		"	csp	%0,%3"
		: "+d" (reg2), "+m" (*ptr)
		: "d" (reg3), "d" (address)
		: "cc");
}

533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
{
	register unsigned long reg2 asm("2") = old;
	register unsigned long reg3 asm("3") = new;
	unsigned long address = (unsigned long)ptr | 1;

	asm volatile(
		"	.insn	rre,0xb98a0000,%0,%3"
		: "+d" (reg2), "+m" (*ptr)
		: "d" (reg3), "d" (address)
		: "cc");
}

#define CRDTE_DTT_PAGE		0x00UL
#define CRDTE_DTT_SEGMENT	0x10UL
#define CRDTE_DTT_REGION3	0x14UL
#define CRDTE_DTT_REGION2	0x18UL
#define CRDTE_DTT_REGION1	0x1cUL

static inline void crdte(unsigned long old, unsigned long new,
			 unsigned long table, unsigned long dtt,
			 unsigned long address, unsigned long asce)
{
	register unsigned long reg2 asm("2") = old;
	register unsigned long reg3 asm("3") = new;
	register unsigned long reg4 asm("4") = table | dtt;
	register unsigned long reg5 asm("5") = address;

	asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
		     : "+d" (reg2)
		     : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
		     : "memory", "cc");
}

L
Linus Torvalds 已提交
567
/*
568
 * pgd/p4d/pud/pmd/pte query functions
L
Linus Torvalds 已提交
569
 */
570 571 572 573 574
static inline int pgd_folded(pgd_t pgd)
{
	return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
}

575 576
static inline int pgd_present(pgd_t pgd)
{
577
	if (pgd_folded(pgd))
M
Martin Schwidefsky 已提交
578
		return 1;
579 580 581 582 583
	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
}

static inline int pgd_none(pgd_t pgd)
{
584
	if (pgd_folded(pgd))
M
Martin Schwidefsky 已提交
585
		return 0;
586
	return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
587 588 589 590
}

static inline int pgd_bad(pgd_t pgd)
{
M
Martin Schwidefsky 已提交
591 592 593 594 595
	/*
	 * With dynamic page table levels the pgd can be a region table
	 * entry or a segment table entry. Check for the bit that are
	 * invalid for either table entry.
	 */
596
	unsigned long mask =
597
		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
598 599 600
		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
	return (pgd_val(pgd) & mask) != 0;
}
M
Martin Schwidefsky 已提交
601

602 603 604 605 606
static inline int p4d_folded(p4d_t p4d)
{
	return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
}

607 608
static inline int p4d_present(p4d_t p4d)
{
609
	if (p4d_folded(p4d))
610 611 612 613 614 615
		return 1;
	return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
}

static inline int p4d_none(p4d_t p4d)
{
616
	if (p4d_folded(p4d))
617 618 619 620 621 622 623 624 625 626 627 628
		return 0;
	return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
}

static inline unsigned long p4d_pfn(p4d_t p4d)
{
	unsigned long origin_mask;

	origin_mask = _REGION_ENTRY_ORIGIN;
	return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
}

629 630 631 632 633
static inline int pud_folded(pud_t pud)
{
	return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
}

M
Martin Schwidefsky 已提交
634
static inline int pud_present(pud_t pud)
L
Linus Torvalds 已提交
635
{
636
	if (pud_folded(pud))
M
Martin Schwidefsky 已提交
637
		return 1;
638
	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
L
Linus Torvalds 已提交
639 640
}

M
Martin Schwidefsky 已提交
641
static inline int pud_none(pud_t pud)
L
Linus Torvalds 已提交
642
{
643
	if (pud_folded(pud))
M
Martin Schwidefsky 已提交
644
		return 0;
645
	return pud_val(pud) == _REGION3_ENTRY_EMPTY;
L
Linus Torvalds 已提交
646 647
}

648 649 650 651 652 653 654
static inline int pud_large(pud_t pud)
{
	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
		return 0;
	return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
}

655 656 657 658
static inline unsigned long pud_pfn(pud_t pud)
{
	unsigned long origin_mask;

659
	origin_mask = _REGION_ENTRY_ORIGIN;
660 661 662 663 664
	if (pud_large(pud))
		origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
	return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
}

665 666 667 668 669 670 671 672 673 674 675 676
static inline int pmd_large(pmd_t pmd)
{
	return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
}

static inline int pmd_bad(pmd_t pmd)
{
	if (pmd_large(pmd))
		return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
	return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
}

M
Martin Schwidefsky 已提交
677
static inline int pud_bad(pud_t pud)
L
Linus Torvalds 已提交
678
{
679 680 681 682 683
	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
		return pmd_bad(__pmd(pud_val(pud)));
	if (pud_large(pud))
		return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
	return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
L
Linus Torvalds 已提交
684 685
}

686 687 688 689 690 691 692
static inline int p4d_bad(p4d_t p4d)
{
	if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
		return pud_bad(__pud(p4d_val(p4d)));
	return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
}

693
static inline int pmd_present(pmd_t pmd)
L
Linus Torvalds 已提交
694
{
695
	return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
L
Linus Torvalds 已提交
696 697
}

698
static inline int pmd_none(pmd_t pmd)
L
Linus Torvalds 已提交
699
{
700
	return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
L
Linus Torvalds 已提交
701 702
}

703
static inline unsigned long pmd_pfn(pmd_t pmd)
704
{
705 706 707 708 709 710
	unsigned long origin_mask;

	origin_mask = _SEGMENT_ENTRY_ORIGIN;
	if (pmd_large(pmd))
		origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
	return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
711 712
}

713
#define pmd_write pmd_write
714 715
static inline int pmd_write(pmd_t pmd)
{
716 717 718 719 720 721 722 723 724
	return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
}

static inline int pmd_dirty(pmd_t pmd)
{
	int dirty = 1;
	if (pmd_large(pmd))
		dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
	return dirty;
725 726 727 728
}

static inline int pmd_young(pmd_t pmd)
{
729 730
	int young = 1;
	if (pmd_large(pmd))
731 732
		young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
	return young;
733 734
}

735
static inline int pte_present(pte_t pte)
L
Linus Torvalds 已提交
736
{
737 738
	/* Bit pattern: (pte & 0x001) == 0x001 */
	return (pte_val(pte) & _PAGE_PRESENT) != 0;
L
Linus Torvalds 已提交
739 740
}

741
static inline int pte_none(pte_t pte)
L
Linus Torvalds 已提交
742
{
743 744
	/* Bit pattern: pte == 0x400 */
	return pte_val(pte) == _PAGE_INVALID;
L
Linus Torvalds 已提交
745 746
}

747 748
static inline int pte_swap(pte_t pte)
{
749 750 751
	/* Bit pattern: (pte & 0x201) == 0x200 */
	return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
		== _PAGE_PROTECT;
752 753
}

N
Nick Piggin 已提交
754 755
static inline int pte_special(pte_t pte)
{
N
Nick Piggin 已提交
756
	return (pte_val(pte) & _PAGE_SPECIAL);
N
Nick Piggin 已提交
757 758
}

M
Martin Schwidefsky 已提交
759
#define __HAVE_ARCH_PTE_SAME
760 761 762 763
static inline int pte_same(pte_t a, pte_t b)
{
	return pte_val(a) == pte_val(b);
}
L
Linus Torvalds 已提交
764

765 766 767 768 769 770 771 772 773 774 775 776 777
#ifdef CONFIG_NUMA_BALANCING
static inline int pte_protnone(pte_t pte)
{
	return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
}

static inline int pmd_protnone(pmd_t pmd)
{
	/* pmd_large(pmd) implies pmd_present(pmd) */
	return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
}
#endif

778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
static inline int pte_soft_dirty(pte_t pte)
{
	return pte_val(pte) & _PAGE_SOFT_DIRTY;
}
#define pte_swp_soft_dirty pte_soft_dirty

static inline pte_t pte_mksoft_dirty(pte_t pte)
{
	pte_val(pte) |= _PAGE_SOFT_DIRTY;
	return pte;
}
#define pte_swp_mksoft_dirty pte_mksoft_dirty

static inline pte_t pte_clear_soft_dirty(pte_t pte)
{
	pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
	return pte;
}
#define pte_swp_clear_soft_dirty pte_clear_soft_dirty

static inline int pmd_soft_dirty(pmd_t pmd)
{
	return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
}

static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
{
	pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
	return pmd;
}

static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
{
	pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
	return pmd;
}

L
Linus Torvalds 已提交
815 816 817 818
/*
 * query functions pte_write/pte_dirty/pte_young only work if
 * pte_present() is true. Undefined behaviour if not..
 */
819
static inline int pte_write(pte_t pte)
L
Linus Torvalds 已提交
820
{
821
	return (pte_val(pte) & _PAGE_WRITE) != 0;
L
Linus Torvalds 已提交
822 823
}

824
static inline int pte_dirty(pte_t pte)
L
Linus Torvalds 已提交
825
{
826
	return (pte_val(pte) & _PAGE_DIRTY) != 0;
L
Linus Torvalds 已提交
827 828
}

829
static inline int pte_young(pte_t pte)
L
Linus Torvalds 已提交
830
{
831
	return (pte_val(pte) & _PAGE_YOUNG) != 0;
L
Linus Torvalds 已提交
832 833
}

834 835 836 837 838 839
#define __HAVE_ARCH_PTE_UNUSED
static inline int pte_unused(pte_t pte)
{
	return pte_val(pte) & _PAGE_UNUSED;
}

L
Linus Torvalds 已提交
840 841 842 843
/*
 * pgd/pmd/pte modification functions
 */

844
static inline void pgd_clear(pgd_t *pgd)
845
{
846 847 848 849 850 851 852 853
	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
		pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
}

static inline void p4d_clear(p4d_t *p4d)
{
	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
		p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
854 855
}

856
static inline void pud_clear(pud_t *pud)
L
Linus Torvalds 已提交
857
{
M
Martin Schwidefsky 已提交
858 859
	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
L
Linus Torvalds 已提交
860 861
}

862
static inline void pmd_clear(pmd_t *pmdp)
L
Linus Torvalds 已提交
863
{
864
	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
L
Linus Torvalds 已提交
865 866
}

867
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
L
Linus Torvalds 已提交
868
{
869
	pte_val(*ptep) = _PAGE_INVALID;
L
Linus Torvalds 已提交
870 871 872 873 874 875
}

/*
 * The following pte modification functions only work if
 * pte_present() is true. Undefined behaviour if not..
 */
876
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
L
Linus Torvalds 已提交
877
{
878
	pte_val(pte) &= _PAGE_CHG_MASK;
L
Linus Torvalds 已提交
879
	pte_val(pte) |= pgprot_val(newprot);
880
	/*
881 882
	 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
	 * has the invalid bit set, clear it again for readable, young pages
883 884 885 886
	 */
	if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
		pte_val(pte) &= ~_PAGE_INVALID;
	/*
887 888
	 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
	 * protection bit set, clear it again for writable, dirty pages
889
	 */
890 891
	if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
		pte_val(pte) &= ~_PAGE_PROTECT;
L
Linus Torvalds 已提交
892 893 894
	return pte;
}

895
static inline pte_t pte_wrprotect(pte_t pte)
L
Linus Torvalds 已提交
896
{
897 898
	pte_val(pte) &= ~_PAGE_WRITE;
	pte_val(pte) |= _PAGE_PROTECT;
L
Linus Torvalds 已提交
899 900 901
	return pte;
}

902
static inline pte_t pte_mkwrite(pte_t pte)
L
Linus Torvalds 已提交
903
{
904 905 906
	pte_val(pte) |= _PAGE_WRITE;
	if (pte_val(pte) & _PAGE_DIRTY)
		pte_val(pte) &= ~_PAGE_PROTECT;
L
Linus Torvalds 已提交
907 908 909
	return pte;
}

910
static inline pte_t pte_mkclean(pte_t pte)
L
Linus Torvalds 已提交
911
{
912 913
	pte_val(pte) &= ~_PAGE_DIRTY;
	pte_val(pte) |= _PAGE_PROTECT;
L
Linus Torvalds 已提交
914 915 916
	return pte;
}

917
static inline pte_t pte_mkdirty(pte_t pte)
L
Linus Torvalds 已提交
918
{
919
	pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
920 921
	if (pte_val(pte) & _PAGE_WRITE)
		pte_val(pte) &= ~_PAGE_PROTECT;
L
Linus Torvalds 已提交
922 923 924
	return pte;
}

925
static inline pte_t pte_mkold(pte_t pte)
L
Linus Torvalds 已提交
926
{
927
	pte_val(pte) &= ~_PAGE_YOUNG;
928
	pte_val(pte) |= _PAGE_INVALID;
L
Linus Torvalds 已提交
929 930 931
	return pte;
}

932
static inline pte_t pte_mkyoung(pte_t pte)
L
Linus Torvalds 已提交
933
{
934 935 936
	pte_val(pte) |= _PAGE_YOUNG;
	if (pte_val(pte) & _PAGE_READ)
		pte_val(pte) &= ~_PAGE_INVALID;
L
Linus Torvalds 已提交
937 938 939
	return pte;
}

N
Nick Piggin 已提交
940 941
static inline pte_t pte_mkspecial(pte_t pte)
{
N
Nick Piggin 已提交
942
	pte_val(pte) |= _PAGE_SPECIAL;
N
Nick Piggin 已提交
943 944 945
	return pte;
}

946 947 948
#ifdef CONFIG_HUGETLB_PAGE
static inline pte_t pte_mkhuge(pte_t pte)
{
949
	pte_val(pte) |= _PAGE_LARGE;
950 951 952 953
	return pte;
}
#endif

954 955
#define IPTE_GLOBAL	0
#define	IPTE_LOCAL	1
956

957
#define IPTE_NODAT	0x400
958
#define IPTE_GUEST_ASCE	0x800
959 960

static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
961 962
			       unsigned long opt, unsigned long asce,
			       int local)
963 964 965
{
	unsigned long pto = (unsigned long) ptep;

966 967 968 969 970 971 972 973 974 975
	if (__builtin_constant_p(opt) && opt == 0) {
		/* Invalidation + TLB flush for the pte */
		asm volatile(
			"	.insn	rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
			: "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
			  [m4] "i" (local));
		return;
	}

	/* Invalidate ptes with options + TLB flush of the ptes */
976
	opt = opt | (asce & _ASCE_ORIGIN);
977
	asm volatile(
978 979 980
		"	.insn	rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
		: [r2] "+a" (address), [r3] "+a" (opt)
		: [r1] "a" (pto), [m4] "i" (local) : "memory");
981 982
}

983 984
static inline void __ptep_ipte_range(unsigned long address, int nr,
				     pte_t *ptep, int local)
985 986 987
{
	unsigned long pto = (unsigned long) ptep;

988
	/* Invalidate a range of ptes + TLB flush of the ptes */
989 990
	do {
		asm volatile(
991 992 993
			"       .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
			: [r2] "+a" (address), [r3] "+a" (nr)
			: [r1] "a" (pto), [m4] "i" (local) : "memory");
994 995 996
	} while (nr != 255);
}

997
/*
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
 * both clear the TLB for the unmapped pte. The reason is that
 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
 * to modify an active pte. The sequence is
 *   1) ptep_get_and_clear
 *   2) set_pte_at
 *   3) flush_tlb_range
 * On s390 the tlb needs to get flushed with the modification of the pte
 * if the pte is active. The only way how this can be implemented is to
 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
 * is a nop.
1009
 */
1010 1011
pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1012

1013 1014 1015 1016
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
					    unsigned long addr, pte_t *ptep)
{
1017
	pte_t pte = *ptep;
1018

1019 1020
	pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
	return pte_young(pte);
1021 1022 1023 1024 1025 1026 1027 1028 1029
}

#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
					 unsigned long address, pte_t *ptep)
{
	return ptep_test_and_clear_young(vma, address, ptep);
}

M
Martin Schwidefsky 已提交
1030
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1031
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1032
				       unsigned long addr, pte_t *ptep)
1033
{
1034
	return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1035 1036 1037
}

#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1038 1039
pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *);
void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t);
M
Martin Schwidefsky 已提交
1040 1041

#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
M
Martin Schwidefsky 已提交
1042
static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1043
				     unsigned long addr, pte_t *ptep)
M
Martin Schwidefsky 已提交
1044
{
1045
	return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
L
Linus Torvalds 已提交
1046 1047
}

M
Martin Schwidefsky 已提交
1048 1049 1050 1051 1052 1053 1054 1055 1056
/*
 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
 * cannot be accessed while the batched unmap is running. In this case
 * full==1 and a simple pte_clear is enough. See tlb.h.
 */
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1057
					    unsigned long addr,
M
Martin Schwidefsky 已提交
1058
					    pte_t *ptep, int full)
L
Linus Torvalds 已提交
1059
{
1060 1061 1062 1063
	if (full) {
		pte_t pte = *ptep;
		*ptep = __pte(_PAGE_INVALID);
		return pte;
1064
	}
1065
	return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
L
Linus Torvalds 已提交
1066 1067
}

M
Martin Schwidefsky 已提交
1068
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1069 1070
static inline void ptep_set_wrprotect(struct mm_struct *mm,
				      unsigned long addr, pte_t *ptep)
1071 1072 1073
{
	pte_t pte = *ptep;

1074 1075
	if (pte_write(pte))
		ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1076
}
M
Martin Schwidefsky 已提交
1077 1078

#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1079
static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1080
					unsigned long addr, pte_t *ptep,
1081 1082
					pte_t entry, int dirty)
{
1083
	if (pte_same(*ptep, entry))
1084
		return 0;
1085 1086 1087
	ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
	return 1;
}
1088

1089 1090 1091 1092 1093 1094
/*
 * Additional functions to handle KVM guest page tables
 */
void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, pte_t entry);
void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1095 1096
void ptep_notify(struct mm_struct *mm, unsigned long addr,
		 pte_t *ptep, unsigned long bits);
1097
int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1098
		    pte_t *ptep, int prot, unsigned long bit);
1099 1100 1101
void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep , int reset);
void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1102
int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1103
		    pte_t *sptep, pte_t *tptep, pte_t pte);
1104
void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1105

1106 1107
bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
			    pte_t *ptep);
1108 1109
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
			  unsigned char key, bool nq);
1110 1111 1112
int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
			       unsigned char key, unsigned char *oldkey,
			       bool nq, bool mr, bool mc);
1113
int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1114 1115
int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
			  unsigned char *key);
1116

1117 1118 1119 1120 1121
int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
				unsigned long bits, unsigned long value);
int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
			unsigned long *oldpte, unsigned long *oldpgste);
1122 1123 1124 1125
void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1126

1127 1128 1129 1130 1131 1132 1133 1134
/*
 * Certain architectures need to do special things when PTEs
 * within a page table are directly modified.  Thus, the following
 * hook is made available.
 */
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep, pte_t entry)
{
1135 1136
	if (!MACHINE_HAS_NX)
		pte_val(entry) &= ~_PAGE_NOEXEC;
1137 1138
	if (pte_present(entry))
		pte_val(entry) &= ~_PAGE_UNUSED;
1139
	if (mm_has_pgste(mm))
1140
		ptep_set_pte_at(mm, addr, ptep, entry);
1141
	else
1142
		*ptep = entry;
1143
}
L
Linus Torvalds 已提交
1144 1145 1146 1147 1148 1149 1150 1151 1152

/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 */
static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
	pte_t __pte;
	pte_val(__pte) = physpage + pgprot_val(pgprot);
1153
	return pte_mkyoung(__pte);
L
Linus Torvalds 已提交
1154 1155
}

1156 1157
static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
{
1158
	unsigned long physpage = page_to_phys(page);
1159
	pte_t __pte = mk_pte_phys(physpage, pgprot);
L
Linus Torvalds 已提交
1160

1161 1162
	if (pte_write(__pte) && PageDirty(page))
		__pte = pte_mkdirty(__pte);
1163
	return __pte;
1164 1165
}

M
Martin Schwidefsky 已提交
1166
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1167
#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
M
Martin Schwidefsky 已提交
1168 1169 1170
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
L
Linus Torvalds 已提交
1171

M
Martin Schwidefsky 已提交
1172 1173
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
L
Linus Torvalds 已提交
1174

M
Martin Schwidefsky 已提交
1175 1176
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1177
#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1178
#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
L
Linus Torvalds 已提交
1179

1180
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
1181
{
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
	p4d_t *p4d = (p4d_t *) pgd;

	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
		p4d = (p4d_t *) pgd_deref(*pgd);
	return p4d + p4d_index(address);
}

static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
	pud_t *pud = (pud_t *) p4d;

	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
		pud = (pud_t *) p4d_deref(*p4d);
	return pud + pud_index(address);
1196
}
L
Linus Torvalds 已提交
1197

M
Martin Schwidefsky 已提交
1198
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
L
Linus Torvalds 已提交
1199
{
M
Martin Schwidefsky 已提交
1200
	pmd_t *pmd = (pmd_t *) pud;
1201

M
Martin Schwidefsky 已提交
1202 1203
	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
		pmd = (pmd_t *) pud_deref(*pud);
M
Martin Schwidefsky 已提交
1204
	return pmd + pmd_index(address);
L
Linus Torvalds 已提交
1205 1206
}

M
Martin Schwidefsky 已提交
1207 1208 1209
#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
L
Linus Torvalds 已提交
1210

1211
#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1212
#define pud_page(pud) pfn_to_page(pud_pfn(pud))
1213
#define p4d_page(pud) pfn_to_page(p4d_pfn(p4d))
L
Linus Torvalds 已提交
1214

M
Martin Schwidefsky 已提交
1215 1216 1217
/* Find an entry in the lowest level page table.. */
#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
L
Linus Torvalds 已提交
1218 1219 1220
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)

1221
static inline pmd_t pmd_wrprotect(pmd_t pmd)
1222
{
1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
	pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
	return pmd;
}

static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
	pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
	if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
		return pmd;
	pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
	return pmd;
}

static inline pmd_t pmd_mkclean(pmd_t pmd)
{
	if (pmd_large(pmd)) {
		pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1241
		pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1242 1243 1244 1245 1246 1247 1248
	}
	return pmd;
}

static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
	if (pmd_large(pmd)) {
1249 1250
		pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
				_SEGMENT_ENTRY_SOFT_DIRTY;
1251 1252 1253 1254 1255 1256
		if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
			pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
	}
	return pmd;
}

1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
static inline pud_t pud_wrprotect(pud_t pud)
{
	pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
	pud_val(pud) |= _REGION_ENTRY_PROTECT;
	return pud;
}

static inline pud_t pud_mkwrite(pud_t pud)
{
	pud_val(pud) |= _REGION3_ENTRY_WRITE;
	if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
		return pud;
	pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
	return pud;
}

static inline pud_t pud_mkclean(pud_t pud)
{
	if (pud_large(pud)) {
		pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
		pud_val(pud) |= _REGION_ENTRY_PROTECT;
	}
	return pud;
}

static inline pud_t pud_mkdirty(pud_t pud)
{
	if (pud_large(pud)) {
		pud_val(pud) |= _REGION3_ENTRY_DIRTY |
				_REGION3_ENTRY_SOFT_DIRTY;
		if (pud_val(pud) & _REGION3_ENTRY_WRITE)
			pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
	}
	return pud;
}

#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
{
	/*
1297 1298
	 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
	 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1299 1300 1301
	 */
	if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
		return pgprot_val(SEGMENT_NONE);
1302 1303 1304 1305 1306 1307 1308
	if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
		return pgprot_val(SEGMENT_RO);
	if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
		return pgprot_val(SEGMENT_RX);
	if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
		return pgprot_val(SEGMENT_RW);
	return pgprot_val(SEGMENT_RWX);
1309 1310
}

1311 1312 1313
static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
	if (pmd_large(pmd)) {
1314
		pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1315 1316
		if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
			pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1317 1318 1319 1320 1321 1322
	}
	return pmd;
}

static inline pmd_t pmd_mkold(pmd_t pmd)
{
1323
	if (pmd_large(pmd)) {
1324 1325 1326 1327 1328 1329
		pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
		pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
	}
	return pmd;
}

1330 1331
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
1332 1333 1334
	if (pmd_large(pmd)) {
		pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
			_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1335
			_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1336 1337 1338 1339 1340 1341 1342 1343
		pmd_val(pmd) |= massage_pgprot_pmd(newprot);
		if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
			pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
		if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
			pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
		return pmd;
	}
	pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1344 1345 1346 1347
	pmd_val(pmd) |= massage_pgprot_pmd(newprot);
	return pmd;
}

1348
static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1349
{
1350 1351
	pmd_t __pmd;
	pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1352
	return __pmd;
1353 1354
}

1355 1356
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */

1357 1358
static inline void __pmdp_csp(pmd_t *pmdp)
{
1359 1360
	csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
	    pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1361 1362
}

1363 1364
#define IDTE_GLOBAL	0
#define IDTE_LOCAL	1
1365

1366 1367
#define IDTE_PTOA	0x0800
#define IDTE_NODAT	0x1000
1368
#define IDTE_GUEST_ASCE	0x2000
1369 1370

static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1371 1372
			       unsigned long opt, unsigned long asce,
			       int local)
1373 1374 1375
{
	unsigned long sto;

1376
	sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
	if (__builtin_constant_p(opt) && opt == 0) {
		/* flush without guest asce */
		asm volatile(
			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
			: "+m" (*pmdp)
			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
			  [m4] "i" (local)
			: "cc" );
	} else {
		/* flush with guest asce */
		asm volatile(
			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
			: "+m" (*pmdp)
			: [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
			  [r3] "a" (asce), [m4] "i" (local)
			: "cc" );
	}
1394 1395
}

1396
static inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1397 1398
			       unsigned long opt, unsigned long asce,
			       int local)
1399 1400 1401
{
	unsigned long r3o;

1402
	r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1403
	r3o |= _ASCE_TYPE_REGION3;
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
	if (__builtin_constant_p(opt) && opt == 0) {
		/* flush without guest asce */
		asm volatile(
			"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
			: "+m" (*pudp)
			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
			  [m4] "i" (local)
			: "cc");
	} else {
		/* flush with guest asce */
		asm volatile(
			"	.insn	rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
			: "+m" (*pudp)
			: [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
			  [r3] "a" (asce), [m4] "i" (local)
			: "cc" );
	}
1421 1422
}

1423 1424
pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1425
pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1426

1427 1428 1429 1430 1431 1432 1433 1434
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

#define __HAVE_ARCH_PGTABLE_DEPOSIT
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
				pgtable_t pgtable);

#define __HAVE_ARCH_PGTABLE_WITHDRAW
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1435

1436 1437 1438 1439
#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
					unsigned long addr, pmd_t *pmdp,
					pmd_t entry, int dirty)
1440
{
1441
	VM_BUG_ON(addr & ~HPAGE_MASK);
1442

1443 1444 1445 1446 1447 1448 1449
	entry = pmd_mkyoung(entry);
	if (dirty)
		entry = pmd_mkdirty(entry);
	if (pmd_val(*pmdp) == pmd_val(entry))
		return 0;
	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
	return 1;
1450 1451
}

1452 1453 1454 1455 1456
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
					    unsigned long addr, pmd_t *pmdp)
{
	pmd_t pmd = *pmdp;
1457

1458 1459 1460
	pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
	return pmd_young(pmd);
}
1461

1462 1463 1464 1465 1466 1467 1468
#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
					 unsigned long addr, pmd_t *pmdp)
{
	VM_BUG_ON(addr & ~HPAGE_MASK);
	return pmdp_test_and_clear_young(vma, addr, pmdp);
}
1469 1470 1471 1472

static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
			      pmd_t *pmdp, pmd_t entry)
{
1473 1474
	if (!MACHINE_HAS_NX)
		pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1475 1476 1477 1478 1479 1480
	*pmdp = entry;
}

static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
	pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1481 1482
	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1483 1484 1485
	return pmd;
}

1486 1487
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1488
					    unsigned long addr, pmd_t *pmdp)
1489
{
1490
	return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1491 1492
}

1493 1494
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1495
						 unsigned long addr,
1496
						 pmd_t *pmdp, int full)
1497
{
1498 1499
	if (full) {
		pmd_t pmd = *pmdp;
1500
		*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1501 1502
		return pmd;
	}
1503
	return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1504 1505
}

1506 1507
#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1508
					  unsigned long addr, pmd_t *pmdp)
1509
{
1510
	return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1511 1512 1513
}

#define __HAVE_ARCH_PMDP_INVALIDATE
1514
static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1515
				   unsigned long addr, pmd_t *pmdp)
1516
{
1517 1518
	pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);

1519
	return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1520 1521
}

1522 1523
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1524
				      unsigned long addr, pmd_t *pmdp)
1525 1526 1527
{
	pmd_t pmd = *pmdp;

1528 1529
	if (pmd_write(pmd))
		pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1530 1531
}

1532 1533 1534 1535
static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
					unsigned long address,
					pmd_t *pmdp)
{
1536
	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1537 1538 1539
}
#define pmdp_collapse_flush pmdp_collapse_flush

1540 1541 1542 1543 1544 1545 1546 1547
#define pfn_pmd(pfn, pgprot)	mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
#define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))

static inline int pmd_trans_huge(pmd_t pmd)
{
	return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
}

1548
#define has_transparent_hugepage has_transparent_hugepage
1549 1550
static inline int has_transparent_hugepage(void)
{
1551
	return MACHINE_HAS_EDAT1 ? 1 : 0;
1552
}
1553 1554
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

L
Linus Torvalds 已提交
1555 1556 1557
/*
 * 64 bit swap entry format:
 * A page-table entry has some bits we have to treat in a special way.
1558
 * Bits 52 and bit 55 have to be zero, otherwise a specification
L
Linus Torvalds 已提交
1559
 * exception will occur instead of a page translation exception. The
1560
 * specification exception has the bad habit not to store necessary
L
Linus Torvalds 已提交
1561
 * information in the lowcore.
1562 1563 1564 1565 1566 1567 1568 1569
 * Bits 54 and 63 are used to indicate the page type.
 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
 * for the offset.
 * |			  offset			|01100|type |00|
 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
L
Linus Torvalds 已提交
1570
 */
H
Heiko Carstens 已提交
1571

1572 1573 1574 1575
#define __SWP_OFFSET_MASK	((1UL << 52) - 1)
#define __SWP_OFFSET_SHIFT	12
#define __SWP_TYPE_MASK		((1UL << 5) - 1)
#define __SWP_TYPE_SHIFT	2
H
Heiko Carstens 已提交
1576

1577
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
L
Linus Torvalds 已提交
1578 1579
{
	pte_t pte;
1580 1581 1582 1583

	pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
	pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
	pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
L
Linus Torvalds 已提交
1584 1585 1586
	return pte;
}

1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600
static inline unsigned long __swp_type(swp_entry_t entry)
{
	return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
}

static inline unsigned long __swp_offset(swp_entry_t entry)
{
	return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
}

static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
{
	return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
}
L
Linus Torvalds 已提交
1601 1602 1603 1604 1605 1606

#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x)	((pte_t) { (x).val })

#define kern_addr_valid(addr)   (1)

1607 1608
extern int vmem_add_mapping(unsigned long start, unsigned long size);
extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1609
extern int s390_enable_sie(void);
1610
extern int s390_enable_skey(void);
1611
extern void s390_reset_cmma(struct mm_struct *mm);
H
Heiko Carstens 已提交
1612

1613 1614 1615 1616
/* s390 has a private copy of get unmapped area to deal with cache synonyms */
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN

L
Linus Torvalds 已提交
1617 1618 1619
/*
 * No page table caches to initialise
 */
1620 1621
static inline void pgtable_cache_init(void) { }
static inline void check_pgt_cache(void) { }
L
Linus Torvalds 已提交
1622 1623 1624 1625

#include <asm-generic/pgtable.h>

#endif /* _S390_PAGE_H */