pgtable.h 44.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 *  S390 version
3
 *    Copyright IBM Corp. 1999, 2000
L
Linus Torvalds 已提交
4 5 6 7 8 9 10 11 12 13 14
 *    Author(s): Hartmut Penner (hp@de.ibm.com)
 *               Ulrich Weigand (weigand@de.ibm.com)
 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
 *
 *  Derived from "include/asm-i386/pgtable.h"
 */

#ifndef _ASM_S390_PGTABLE_H
#define _ASM_S390_PGTABLE_H

/*
15 16 17
 * The Linux memory management assumes a three-level page table setup.
 * For s390 64 bit we use up to four of the five levels the hardware
 * provides (region first tables are not used).
L
Linus Torvalds 已提交
18 19 20 21 22 23 24 25 26
 *
 * The "pgd_xxx()" functions are trivial for a folded two-level
 * setup: the pgd is never bad, and a pmd always exists (as it's folded
 * into the pgd entry)
 *
 * This file contains the functions and defines necessary to modify and use
 * the S390 page table tree.
 */
#ifndef __ASSEMBLY__
27
#include <asm-generic/5level-fixup.h>
28
#include <linux/sched.h>
29
#include <linux/mm_types.h>
30
#include <linux/page-flags.h>
31
#include <linux/radix-tree.h>
32
#include <linux/atomic.h>
L
Linus Torvalds 已提交
33
#include <asm/bug.h>
34
#include <asm/page.h>
L
Linus Torvalds 已提交
35

36
extern pgd_t swapper_pg_dir[];
L
Linus Torvalds 已提交
37
extern void paging_init(void);
38
extern void vmem_map_init(void);
39 40
pmd_t *vmem_pmd_alloc(void);
pte_t *vmem_pte_alloc(void);
L
Linus Torvalds 已提交
41

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
enum {
	PG_DIRECT_MAP_4K = 0,
	PG_DIRECT_MAP_1M,
	PG_DIRECT_MAP_2G,
	PG_DIRECT_MAP_MAX
};

extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];

static inline void update_page_count(int level, long count)
{
	if (IS_ENABLED(CONFIG_PROC_FS))
		atomic_long_add(count, &direct_pages_count[level]);
}

struct seq_file;
void arch_report_meminfo(struct seq_file *m);

L
Linus Torvalds 已提交
60 61 62 63
/*
 * The S390 doesn't have any external MMU info: the kernel page
 * tables contain all the necessary information.
 */
64
#define update_mmu_cache(vma, address, ptep)     do { } while (0)
65
#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
L
Linus Torvalds 已提交
66 67

/*
68
 * ZERO_PAGE is a global shared page that is always zero; used
L
Linus Torvalds 已提交
69 70
 * for zero-mapped memory areas etc..
 */
71 72 73 74 75 76 77

extern unsigned long empty_zero_page;
extern unsigned long zero_page_mask;

#define ZERO_PAGE(vaddr) \
	(virt_to_page((void *)(empty_zero_page + \
	 (((unsigned long)(vaddr)) &zero_page_mask))))
78
#define __HAVE_COLOR_ZERO_PAGE
79

80
/* TODO: s390 cannot support io_remap_pfn_range... */
L
Linus Torvalds 已提交
81 82 83 84 85 86 87
#endif /* !__ASSEMBLY__ */

/*
 * PMD_SHIFT determines the size of the area a second-level page
 * table can map
 * PGDIR_SHIFT determines what a third-level page table entry can map
 */
H
Heiko Carstens 已提交
88 89 90
#define PMD_SHIFT	20
#define PUD_SHIFT	31
#define PGDIR_SHIFT	42
L
Linus Torvalds 已提交
91 92 93

#define PMD_SIZE        (1UL << PMD_SHIFT)
#define PMD_MASK        (~(PMD_SIZE-1))
M
Martin Schwidefsky 已提交
94 95
#define PUD_SIZE	(1UL << PUD_SHIFT)
#define PUD_MASK	(~(PUD_SIZE-1))
96 97
#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
#define PGDIR_MASK	(~(PGDIR_SIZE-1))
L
Linus Torvalds 已提交
98 99 100 101 102 103 104

/*
 * entries per page directory level: the S390 is two-level, so
 * we don't really have any PMD directory physically.
 * for S390 segment-table entries are combined to one PGD
 * that leads to 1024 pte per pgd
 */
105 106
#define PTRS_PER_PTE	256
#define PTRS_PER_PMD	2048
107
#define PTRS_PER_PUD	2048
108
#define PTRS_PER_PGD	2048
L
Linus Torvalds 已提交
109

110
#define FIRST_USER_ADDRESS  0UL
111

L
Linus Torvalds 已提交
112 113 114 115
#define pte_ERROR(e) \
	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
#define pmd_ERROR(e) \
	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
M
Martin Schwidefsky 已提交
116 117
#define pud_ERROR(e) \
	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
L
Linus Torvalds 已提交
118 119 120 121 122
#define pgd_ERROR(e) \
	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))

#ifndef __ASSEMBLY__
/*
123 124
 * The vmalloc and module area will always be on the topmost area of the
 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
125 126 127 128
 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
 * modules will reside. That makes sure that inter module branches always
 * happen without trampolines and in addition the placement within a 2GB frame
 * is branch prediction unit friendly.
H
Heiko Carstens 已提交
129
 */
130
extern unsigned long VMALLOC_START;
131 132
extern unsigned long VMALLOC_END;
extern struct page *vmemmap;
133

134
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
135

136 137 138 139 140 141
extern unsigned long MODULES_VADDR;
extern unsigned long MODULES_END;
#define MODULES_VADDR	MODULES_VADDR
#define MODULES_END	MODULES_END
#define MODULES_LEN	(1UL << 31)

142 143 144 145 146 147 148 149 150 151
static inline int is_module_addr(void *addr)
{
	BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
	if (addr < (void *)MODULES_VADDR)
		return 0;
	if (addr > (void *)MODULES_END)
		return 0;
	return 1;
}

L
Linus Torvalds 已提交
152 153
/*
 * A 64 bit pagetable entry of S390 has following format:
154
 * |			 PFRA			      |0IPC|  OS  |
L
Linus Torvalds 已提交
155 156 157 158 159
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * I Page-Invalid Bit:    Page is not available for address-translation
 * P Page-Protection Bit: Store access not possible for page
160
 * C Change-bit override: HW is not required to set change bit
L
Linus Torvalds 已提交
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
 *
 * A 64 bit segmenttable entry of S390 has following format:
 * |        P-table origin                              |      TT
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * I Segment-Invalid Bit:    Segment is not available for address-translation
 * C Common-Segment Bit:     Segment is not private (PoP 3-30)
 * P Page-Protection Bit: Store access not possible for page
 * TT Type 00
 *
 * A 64 bit region table entry of S390 has following format:
 * |        S-table origin                             |   TF  TTTL
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * I Segment-Invalid Bit:    Segment is not available for address-translation
 * TT Type 01
 * TF
M
Martin Schwidefsky 已提交
180
 * TL Table length
L
Linus Torvalds 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
 *
 * The 64 bit regiontable origin of S390 has following format:
 * |      region table origon                          |       DTTL
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * X Space-Switch event:
 * G Segment-Invalid Bit:  
 * P Private-Space Bit:    
 * S Storage-Alteration:
 * R Real space
 * TL Table-Length:
 *
 * A storage key has the following format:
 * | ACC |F|R|C|0|
 *  0   3 4 5 6 7
 * ACC: access key
 * F  : fetch protection bit
 * R  : referenced bit
 * C  : changed bit
 */

/* Hardware bits in the page table entry */
204
#define _PAGE_NOEXEC	0x100		/* HW no-execute bit  */
205
#define _PAGE_PROTECT	0x200		/* HW read-only bit  */
M
Martin Schwidefsky 已提交
206
#define _PAGE_INVALID	0x400		/* HW invalid bit    */
207
#define _PAGE_LARGE	0x800		/* Bit to mark a large pte */
208 209

/* Software bits in the page table entry */
210 211 212
#define _PAGE_PRESENT	0x001		/* SW pte present bit */
#define _PAGE_YOUNG	0x004		/* SW pte young bit */
#define _PAGE_DIRTY	0x008		/* SW pte dirty bit */
213 214 215
#define _PAGE_READ	0x010		/* SW pte read bit */
#define _PAGE_WRITE	0x020		/* SW pte write bit */
#define _PAGE_SPECIAL	0x040		/* SW associated with special page */
216
#define _PAGE_UNUSED	0x080		/* SW bit for pgste usage state */
N
Nick Piggin 已提交
217
#define __HAVE_ARCH_PTE_SPECIAL
L
Linus Torvalds 已提交
218

219 220 221 222 223 224
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SOFT_DIRTY 0x002		/* SW pte soft dirty bit */
#else
#define _PAGE_SOFT_DIRTY 0x000
#endif

225
/* Set of bits not changed in pte_modify */
226
#define _PAGE_CHG_MASK		(PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
227
				 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
228

M
Martin Schwidefsky 已提交
229
/*
230 231 232 233
 * handle_pte_fault uses pte_present and pte_none to find out the pte type
 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
 * distinguish present from not-present ptes. It is changed only with the page
 * table lock held.
M
Martin Schwidefsky 已提交
234
 *
235
 * The following table gives the different possible bit combinations for
236 237
 * the pte hardware and software bits in the last 12 bits of a pte
 * (. unassigned bit, x don't care, t swap type):
M
Martin Schwidefsky 已提交
238
 *
239 240 241
 *				842100000000
 *				000084210000
 *				000000008421
242 243 244 245 246
 *				.IR.uswrdy.p
 * empty			.10.00000000
 * swap				.11..ttttt.0
 * prot-none, clean, old	.11.xx0000.1
 * prot-none, clean, young	.11.xx0001.1
247 248
 * prot-none, dirty, old	.11.xx0010.1
 * prot-none, dirty, young	.11.xx0011.1
249 250 251 252 253 254 255 256 257 258 259
 * read-only, clean, old	.11.xx0100.1
 * read-only, clean, young	.01.xx0101.1
 * read-only, dirty, old	.11.xx0110.1
 * read-only, dirty, young	.01.xx0111.1
 * read-write, clean, old	.11.xx1100.1
 * read-write, clean, young	.01.xx1101.1
 * read-write, dirty, old	.10.xx1110.1
 * read-write, dirty, young	.00.xx1111.1
 * HW-bits: R read-only, I invalid
 * SW-bits: p present, y young, d dirty, r read, w write, s special,
 *	    u unused, l large
260
 *
261 262 263
 * pte_none    is true for the bit pattern .10.00000000, pte == 0x400
 * pte_swap    is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
M
Martin Schwidefsky 已提交
264 265
 */

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
/* Bits in the segment/region table address-space-control-element */
#define _ASCE_ORIGIN		~0xfffUL/* segment table origin		    */
#define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
#define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
#define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
#define _ASCE_REAL_SPACE	0x20	/* real space control		    */
#define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
#define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
#define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
#define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
#define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
#define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */

/* Bits in the region table entry */
#define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
281
#define _REGION_ENTRY_PROTECT	0x200	/* region protection bit	    */
282
#define _REGION_ENTRY_NOEXEC	0x100	/* region no-execute bit	    */
283
#define _REGION_ENTRY_OFFSET	0xc0	/* region table offset		    */
284
#define _REGION_ENTRY_INVALID	0x20	/* invalid region table entry	    */
285 286 287 288 289 290 291
#define _REGION_ENTRY_TYPE_MASK	0x0c	/* region/segment table type mask   */
#define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
#define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
#define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
#define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */

#define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
292
#define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
293
#define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
294
#define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
295
#define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
296
#define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
297

298 299 300
#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address	     */
#define _REGION3_ENTRY_ORIGIN  ~0x7ffUL/* region third table origin	     */

301 302 303 304 305 306 307 308 309 310 311 312
#define _REGION3_ENTRY_DIRTY	0x2000	/* SW region dirty bit */
#define _REGION3_ENTRY_YOUNG	0x1000	/* SW region young bit */
#define _REGION3_ENTRY_LARGE	0x0400	/* RTTE-format control, large page  */
#define _REGION3_ENTRY_READ	0x0002	/* SW region read bit */
#define _REGION3_ENTRY_WRITE	0x0001	/* SW region write bit */

#ifdef CONFIG_MEM_SOFT_DIRTY
#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
#else
#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
#endif

313 314 315
#define _REGION_ENTRY_BITS	 0xfffffffffffff227UL
#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe27UL

L
Linus Torvalds 已提交
316
/* Bits in the segment table entry */
317
#define _SEGMENT_ENTRY_BITS	0xfffffffffffffe33UL
318
#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
319
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address	    */
320
#define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* segment table origin		    */
321
#define _SEGMENT_ENTRY_PROTECT	0x200	/* page protection bit		    */
322
#define _SEGMENT_ENTRY_NOEXEC	0x100	/* region no-execute bit	    */
323
#define _SEGMENT_ENTRY_INVALID	0x20	/* invalid segment table entry	    */
L
Linus Torvalds 已提交
324

325
#define _SEGMENT_ENTRY		(0)
326
#define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INVALID)
327

328 329 330
#define _SEGMENT_ENTRY_DIRTY	0x2000	/* SW segment dirty bit */
#define _SEGMENT_ENTRY_YOUNG	0x1000	/* SW segment young bit */
#define _SEGMENT_ENTRY_LARGE	0x0400	/* STE-format control, large page */
331 332
#define _SEGMENT_ENTRY_WRITE	0x0002	/* SW segment write bit */
#define _SEGMENT_ENTRY_READ	0x0001	/* SW segment read bit */
333

334 335 336 337 338 339
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
#else
#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
#endif

340
/*
341 342
 * Segment table and region3 table entry encoding
 * (R = read-only, I = invalid, y = young bit):
343
 *				dy..R...I...wr
344 345 346 347
 * prot-none, clean, old	00..1...1...00
 * prot-none, clean, young	01..1...1...00
 * prot-none, dirty, old	10..1...1...00
 * prot-none, dirty, young	11..1...1...00
348 349 350 351
 * read-only, clean, old	00..1...1...01
 * read-only, clean, young	01..1...0...01
 * read-only, dirty, old	10..1...1...01
 * read-only, dirty, young	11..1...0...01
352 353 354 355
 * read-write, clean, old	00..1...1...11
 * read-write, clean, young	01..1...0...11
 * read-write, dirty, old	10..0...1...11
 * read-write, dirty, young	11..0...0...11
356 357
 * The segment table origin is used to distinguish empty (origin==0) from
 * read-write, old segment table entries (origin!=0)
358 359
 * HW-bits: R read-only, I invalid
 * SW-bits: y young, d dirty, r read, w write
360
 */
361

362
/* Page status table bits for virtualization */
363 364 365 366 367 368 369
#define PGSTE_ACC_BITS	0xf000000000000000UL
#define PGSTE_FP_BIT	0x0800000000000000UL
#define PGSTE_PCL_BIT	0x0080000000000000UL
#define PGSTE_HR_BIT	0x0040000000000000UL
#define PGSTE_HC_BIT	0x0020000000000000UL
#define PGSTE_GR_BIT	0x0004000000000000UL
#define PGSTE_GC_BIT	0x0002000000000000UL
370 371
#define PGSTE_UC_BIT	0x0000800000000000UL	/* user dirty (migration) */
#define PGSTE_IN_BIT	0x0000400000000000UL	/* IPTE notify bit */
372
#define PGSTE_VSIE_BIT	0x0000200000000000UL	/* ref'd in a shadow table */
373

374 375 376 377 378 379
/* Guest Page State used for virtualization */
#define _PGSTE_GPS_ZERO		0x0000000080000000UL
#define _PGSTE_GPS_USAGE_MASK	0x0000000003000000UL
#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL

L
Linus Torvalds 已提交
380
/*
381 382 383
 * A user page table pointer has the space-switch-event bit, the
 * private-space-control bit and the storage-alteration-event-control
 * bit set. A kernel page table pointer doesn't need them.
L
Linus Torvalds 已提交
384
 */
385 386
#define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
				 _ASCE_ALT_EVENT)
L
Linus Torvalds 已提交
387 388

/*
389
 * Page protection definitions.
L
Linus Torvalds 已提交
390
 */
391
#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
392 393 394
#define PAGE_RO		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
#define PAGE_RX		__pgprot(_PAGE_PRESENT | _PAGE_READ | \
395
				 _PAGE_INVALID | _PAGE_PROTECT)
396 397 398
#define PAGE_RW		__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
				 _PAGE_NOEXEC  | _PAGE_INVALID | _PAGE_PROTECT)
#define PAGE_RWX	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
399 400 401
				 _PAGE_INVALID | _PAGE_PROTECT)

#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
402
				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
403
#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
404
				 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
405
#define PAGE_KERNEL_RO	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
406 407 408
				 _PAGE_PROTECT | _PAGE_NOEXEC)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
				  _PAGE_YOUNG |	_PAGE_DIRTY)
L
Linus Torvalds 已提交
409 410

/*
411 412 413
 * On s390 the page table entry has an invalid bit and a read-only bit.
 * Read permission implies execute permission and write permission
 * implies read permission.
L
Linus Torvalds 已提交
414 415
 */
         /*xwr*/
416
#define __P000	PAGE_NONE
417 418 419 420 421 422 423
#define __P001	PAGE_RO
#define __P010	PAGE_RO
#define __P011	PAGE_RO
#define __P100	PAGE_RX
#define __P101	PAGE_RX
#define __P110	PAGE_RX
#define __P111	PAGE_RX
424 425

#define __S000	PAGE_NONE
426 427 428 429 430 431 432
#define __S001	PAGE_RO
#define __S010	PAGE_RW
#define __S011	PAGE_RW
#define __S100	PAGE_RX
#define __S101	PAGE_RX
#define __S110	PAGE_RWX
#define __S111	PAGE_RWX
L
Linus Torvalds 已提交
433

434 435 436
/*
 * Segment entry (large page) protection definitions.
 */
437 438
#define SEGMENT_NONE	__pgprot(_SEGMENT_ENTRY_INVALID | \
				 _SEGMENT_ENTRY_PROTECT)
439 440 441 442
#define SEGMENT_RO	__pgprot(_SEGMENT_ENTRY_PROTECT | \
				 _SEGMENT_ENTRY_READ | \
				 _SEGMENT_ENTRY_NOEXEC)
#define SEGMENT_RX	__pgprot(_SEGMENT_ENTRY_PROTECT | \
443
				 _SEGMENT_ENTRY_READ)
444 445 446 447
#define SEGMENT_RW	__pgprot(_SEGMENT_ENTRY_READ | \
				 _SEGMENT_ENTRY_WRITE | \
				 _SEGMENT_ENTRY_NOEXEC)
#define SEGMENT_RWX	__pgprot(_SEGMENT_ENTRY_READ | \
448
				 _SEGMENT_ENTRY_WRITE)
449 450 451 452 453
#define SEGMENT_KERNEL	__pgprot(_SEGMENT_ENTRY |	\
				 _SEGMENT_ENTRY_LARGE |	\
				 _SEGMENT_ENTRY_READ |	\
				 _SEGMENT_ENTRY_WRITE | \
				 _SEGMENT_ENTRY_YOUNG | \
454 455
				 _SEGMENT_ENTRY_DIRTY | \
				 _SEGMENT_ENTRY_NOEXEC)
456 457 458 459
#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY |	\
				 _SEGMENT_ENTRY_LARGE |	\
				 _SEGMENT_ENTRY_READ |	\
				 _SEGMENT_ENTRY_YOUNG |	\
460 461
				 _SEGMENT_ENTRY_PROTECT | \
				 _SEGMENT_ENTRY_NOEXEC)
462 463 464 465 466 467 468 469 470 471

/*
 * Region3 entry (large page) protection definitions.
 */

#define REGION3_KERNEL	__pgprot(_REGION_ENTRY_TYPE_R3 | \
				 _REGION3_ENTRY_LARGE |	 \
				 _REGION3_ENTRY_READ |	 \
				 _REGION3_ENTRY_WRITE |	 \
				 _REGION3_ENTRY_YOUNG |	 \
472 473
				 _REGION3_ENTRY_DIRTY | \
				 _REGION_ENTRY_NOEXEC)
474 475 476 477
#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
				   _REGION3_ENTRY_LARGE |  \
				   _REGION3_ENTRY_READ |   \
				   _REGION3_ENTRY_YOUNG |  \
478 479
				   _REGION_ENTRY_PROTECT | \
				   _REGION_ENTRY_NOEXEC)
480

481 482 483 484 485 486 487 488
static inline int mm_has_pgste(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
	if (unlikely(mm->context.has_pgste))
		return 1;
#endif
	return 0;
}
489

490 491 492 493 494 495 496 497 498
static inline int mm_alloc_pgste(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
	if (unlikely(mm->context.alloc_pgste))
		return 1;
#endif
	return 0;
}

499 500 501 502 503
/*
 * In the case that a guest uses storage keys
 * faults should no longer be backed by zero pages
 */
#define mm_forbids_zeropage mm_use_skey
504 505 506 507 508 509 510 511 512
static inline int mm_use_skey(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
	if (mm->context.use_skey)
		return 1;
#endif
	return 0;
}

513 514 515 516 517 518 519 520 521 522 523 524 525
static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
{
	register unsigned long reg2 asm("2") = old;
	register unsigned long reg3 asm("3") = new;
	unsigned long address = (unsigned long)ptr | 1;

	asm volatile(
		"	csp	%0,%3"
		: "+d" (reg2), "+m" (*ptr)
		: "d" (reg3), "d" (address)
		: "cc");
}

526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
{
	register unsigned long reg2 asm("2") = old;
	register unsigned long reg3 asm("3") = new;
	unsigned long address = (unsigned long)ptr | 1;

	asm volatile(
		"	.insn	rre,0xb98a0000,%0,%3"
		: "+d" (reg2), "+m" (*ptr)
		: "d" (reg3), "d" (address)
		: "cc");
}

#define CRDTE_DTT_PAGE		0x00UL
#define CRDTE_DTT_SEGMENT	0x10UL
#define CRDTE_DTT_REGION3	0x14UL
#define CRDTE_DTT_REGION2	0x18UL
#define CRDTE_DTT_REGION1	0x1cUL

static inline void crdte(unsigned long old, unsigned long new,
			 unsigned long table, unsigned long dtt,
			 unsigned long address, unsigned long asce)
{
	register unsigned long reg2 asm("2") = old;
	register unsigned long reg3 asm("3") = new;
	register unsigned long reg4 asm("4") = table | dtt;
	register unsigned long reg5 asm("5") = address;

	asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
		     : "+d" (reg2)
		     : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
		     : "memory", "cc");
}

L
Linus Torvalds 已提交
560 561 562
/*
 * pgd/pmd/pte query functions
 */
563 564
static inline int pgd_present(pgd_t pgd)
{
M
Martin Schwidefsky 已提交
565 566
	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
		return 1;
567 568 569 570 571
	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
}

static inline int pgd_none(pgd_t pgd)
{
M
Martin Schwidefsky 已提交
572 573
	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
		return 0;
574
	return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
575 576 577 578
}

static inline int pgd_bad(pgd_t pgd)
{
M
Martin Schwidefsky 已提交
579 580 581 582 583
	/*
	 * With dynamic page table levels the pgd can be a region table
	 * entry or a segment table entry. Check for the bit that are
	 * invalid for either table entry.
	 */
584
	unsigned long mask =
585
		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
586 587 588
		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
	return (pgd_val(pgd) & mask) != 0;
}
M
Martin Schwidefsky 已提交
589 590

static inline int pud_present(pud_t pud)
L
Linus Torvalds 已提交
591
{
M
Martin Schwidefsky 已提交
592 593
	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
		return 1;
594
	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
L
Linus Torvalds 已提交
595 596
}

M
Martin Schwidefsky 已提交
597
static inline int pud_none(pud_t pud)
L
Linus Torvalds 已提交
598
{
M
Martin Schwidefsky 已提交
599 600
	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
		return 0;
601
	return pud_val(pud) == _REGION3_ENTRY_EMPTY;
L
Linus Torvalds 已提交
602 603
}

604 605 606 607 608 609 610
static inline int pud_large(pud_t pud)
{
	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
		return 0;
	return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
}

611 612 613 614 615 616 617 618 619 620
static inline unsigned long pud_pfn(pud_t pud)
{
	unsigned long origin_mask;

	origin_mask = _REGION3_ENTRY_ORIGIN;
	if (pud_large(pud))
		origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
	return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
}

621 622 623 624 625 626 627 628 629 630 631 632
static inline int pmd_large(pmd_t pmd)
{
	return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
}

static inline int pmd_bad(pmd_t pmd)
{
	if (pmd_large(pmd))
		return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
	return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
}

M
Martin Schwidefsky 已提交
633
static inline int pud_bad(pud_t pud)
L
Linus Torvalds 已提交
634
{
635 636 637 638 639
	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
		return pmd_bad(__pmd(pud_val(pud)));
	if (pud_large(pud))
		return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
	return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
L
Linus Torvalds 已提交
640 641
}

642
static inline int pmd_present(pmd_t pmd)
L
Linus Torvalds 已提交
643
{
644
	return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
L
Linus Torvalds 已提交
645 646
}

647
static inline int pmd_none(pmd_t pmd)
L
Linus Torvalds 已提交
648
{
649
	return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
L
Linus Torvalds 已提交
650 651
}

652
static inline unsigned long pmd_pfn(pmd_t pmd)
653
{
654 655 656 657 658 659
	unsigned long origin_mask;

	origin_mask = _SEGMENT_ENTRY_ORIGIN;
	if (pmd_large(pmd))
		origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
	return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
660 661
}

662 663 664
#define __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd)
{
665 666 667 668 669 670 671 672 673
	return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
}

static inline int pmd_dirty(pmd_t pmd)
{
	int dirty = 1;
	if (pmd_large(pmd))
		dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
	return dirty;
674 675 676 677
}

static inline int pmd_young(pmd_t pmd)
{
678 679
	int young = 1;
	if (pmd_large(pmd))
680 681
		young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
	return young;
682 683
}

684
static inline int pte_present(pte_t pte)
L
Linus Torvalds 已提交
685
{
686 687
	/* Bit pattern: (pte & 0x001) == 0x001 */
	return (pte_val(pte) & _PAGE_PRESENT) != 0;
L
Linus Torvalds 已提交
688 689
}

690
static inline int pte_none(pte_t pte)
L
Linus Torvalds 已提交
691
{
692 693
	/* Bit pattern: pte == 0x400 */
	return pte_val(pte) == _PAGE_INVALID;
L
Linus Torvalds 已提交
694 695
}

696 697
static inline int pte_swap(pte_t pte)
{
698 699 700
	/* Bit pattern: (pte & 0x201) == 0x200 */
	return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
		== _PAGE_PROTECT;
701 702
}

N
Nick Piggin 已提交
703 704
static inline int pte_special(pte_t pte)
{
N
Nick Piggin 已提交
705
	return (pte_val(pte) & _PAGE_SPECIAL);
N
Nick Piggin 已提交
706 707
}

M
Martin Schwidefsky 已提交
708
#define __HAVE_ARCH_PTE_SAME
709 710 711 712
static inline int pte_same(pte_t a, pte_t b)
{
	return pte_val(a) == pte_val(b);
}
L
Linus Torvalds 已提交
713

714 715 716 717 718 719 720 721 722 723 724 725 726
#ifdef CONFIG_NUMA_BALANCING
static inline int pte_protnone(pte_t pte)
{
	return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
}

static inline int pmd_protnone(pmd_t pmd)
{
	/* pmd_large(pmd) implies pmd_present(pmd) */
	return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
}
#endif

727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
static inline int pte_soft_dirty(pte_t pte)
{
	return pte_val(pte) & _PAGE_SOFT_DIRTY;
}
#define pte_swp_soft_dirty pte_soft_dirty

static inline pte_t pte_mksoft_dirty(pte_t pte)
{
	pte_val(pte) |= _PAGE_SOFT_DIRTY;
	return pte;
}
#define pte_swp_mksoft_dirty pte_mksoft_dirty

static inline pte_t pte_clear_soft_dirty(pte_t pte)
{
	pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
	return pte;
}
#define pte_swp_clear_soft_dirty pte_clear_soft_dirty

static inline int pmd_soft_dirty(pmd_t pmd)
{
	return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
}

static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
{
	pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
	return pmd;
}

static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
{
	pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
	return pmd;
}

L
Linus Torvalds 已提交
764 765 766 767
/*
 * query functions pte_write/pte_dirty/pte_young only work if
 * pte_present() is true. Undefined behaviour if not..
 */
768
static inline int pte_write(pte_t pte)
L
Linus Torvalds 已提交
769
{
770
	return (pte_val(pte) & _PAGE_WRITE) != 0;
L
Linus Torvalds 已提交
771 772
}

773
static inline int pte_dirty(pte_t pte)
L
Linus Torvalds 已提交
774
{
775
	return (pte_val(pte) & _PAGE_DIRTY) != 0;
L
Linus Torvalds 已提交
776 777
}

778
static inline int pte_young(pte_t pte)
L
Linus Torvalds 已提交
779
{
780
	return (pte_val(pte) & _PAGE_YOUNG) != 0;
L
Linus Torvalds 已提交
781 782
}

783 784 785 786 787 788
#define __HAVE_ARCH_PTE_UNUSED
static inline int pte_unused(pte_t pte)
{
	return pte_val(pte) & _PAGE_UNUSED;
}

L
Linus Torvalds 已提交
789 790 791 792
/*
 * pgd/pmd/pte modification functions
 */

793
static inline void pgd_clear(pgd_t *pgd)
794
{
M
Martin Schwidefsky 已提交
795 796
	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
		pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
797 798
}

799
static inline void pud_clear(pud_t *pud)
L
Linus Torvalds 已提交
800
{
M
Martin Schwidefsky 已提交
801 802
	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
L
Linus Torvalds 已提交
803 804
}

805
static inline void pmd_clear(pmd_t *pmdp)
L
Linus Torvalds 已提交
806
{
807
	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
L
Linus Torvalds 已提交
808 809
}

810
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
L
Linus Torvalds 已提交
811
{
812
	pte_val(*ptep) = _PAGE_INVALID;
L
Linus Torvalds 已提交
813 814 815 816 817 818
}

/*
 * The following pte modification functions only work if
 * pte_present() is true. Undefined behaviour if not..
 */
819
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
L
Linus Torvalds 已提交
820
{
821
	pte_val(pte) &= _PAGE_CHG_MASK;
L
Linus Torvalds 已提交
822
	pte_val(pte) |= pgprot_val(newprot);
823
	/*
824 825
	 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
	 * has the invalid bit set, clear it again for readable, young pages
826 827 828 829
	 */
	if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
		pte_val(pte) &= ~_PAGE_INVALID;
	/*
830 831
	 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
	 * protection bit set, clear it again for writable, dirty pages
832
	 */
833 834
	if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
		pte_val(pte) &= ~_PAGE_PROTECT;
L
Linus Torvalds 已提交
835 836 837
	return pte;
}

838
static inline pte_t pte_wrprotect(pte_t pte)
L
Linus Torvalds 已提交
839
{
840 841
	pte_val(pte) &= ~_PAGE_WRITE;
	pte_val(pte) |= _PAGE_PROTECT;
L
Linus Torvalds 已提交
842 843 844
	return pte;
}

845
static inline pte_t pte_mkwrite(pte_t pte)
L
Linus Torvalds 已提交
846
{
847 848 849
	pte_val(pte) |= _PAGE_WRITE;
	if (pte_val(pte) & _PAGE_DIRTY)
		pte_val(pte) &= ~_PAGE_PROTECT;
L
Linus Torvalds 已提交
850 851 852
	return pte;
}

853
static inline pte_t pte_mkclean(pte_t pte)
L
Linus Torvalds 已提交
854
{
855 856
	pte_val(pte) &= ~_PAGE_DIRTY;
	pte_val(pte) |= _PAGE_PROTECT;
L
Linus Torvalds 已提交
857 858 859
	return pte;
}

860
static inline pte_t pte_mkdirty(pte_t pte)
L
Linus Torvalds 已提交
861
{
862
	pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
863 864
	if (pte_val(pte) & _PAGE_WRITE)
		pte_val(pte) &= ~_PAGE_PROTECT;
L
Linus Torvalds 已提交
865 866 867
	return pte;
}

868
static inline pte_t pte_mkold(pte_t pte)
L
Linus Torvalds 已提交
869
{
870
	pte_val(pte) &= ~_PAGE_YOUNG;
871
	pte_val(pte) |= _PAGE_INVALID;
L
Linus Torvalds 已提交
872 873 874
	return pte;
}

875
static inline pte_t pte_mkyoung(pte_t pte)
L
Linus Torvalds 已提交
876
{
877 878 879
	pte_val(pte) |= _PAGE_YOUNG;
	if (pte_val(pte) & _PAGE_READ)
		pte_val(pte) &= ~_PAGE_INVALID;
L
Linus Torvalds 已提交
880 881 882
	return pte;
}

N
Nick Piggin 已提交
883 884
static inline pte_t pte_mkspecial(pte_t pte)
{
N
Nick Piggin 已提交
885
	pte_val(pte) |= _PAGE_SPECIAL;
N
Nick Piggin 已提交
886 887 888
	return pte;
}

889 890 891
#ifdef CONFIG_HUGETLB_PAGE
static inline pte_t pte_mkhuge(pte_t pte)
{
892
	pte_val(pte) |= _PAGE_LARGE;
893 894 895 896
	return pte;
}
#endif

897 898
#define IPTE_GLOBAL	0
#define	IPTE_LOCAL	1
899

900
static inline void __ptep_ipte(unsigned long address, pte_t *ptep, int local)
901 902 903
{
	unsigned long pto = (unsigned long) ptep;

904
	/* Invalidation + TLB flush for the pte */
905
	asm volatile(
906 907 908
		"       .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
		: "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
		  [m4] "i" (local));
909 910
}

911 912
static inline void __ptep_ipte_range(unsigned long address, int nr,
				     pte_t *ptep, int local)
913 914 915
{
	unsigned long pto = (unsigned long) ptep;

916
	/* Invalidate a range of ptes + TLB flush of the ptes */
917 918
	do {
		asm volatile(
919 920 921
			"       .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
			: [r2] "+a" (address), [r3] "+a" (nr)
			: [r1] "a" (pto), [m4] "i" (local) : "memory");
922 923 924
	} while (nr != 255);
}

925
/*
926 927 928 929 930 931 932 933 934 935 936
 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
 * both clear the TLB for the unmapped pte. The reason is that
 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
 * to modify an active pte. The sequence is
 *   1) ptep_get_and_clear
 *   2) set_pte_at
 *   3) flush_tlb_range
 * On s390 the tlb needs to get flushed with the modification of the pte
 * if the pte is active. The only way how this can be implemented is to
 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
 * is a nop.
937
 */
938 939
pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
940

941 942 943 944
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
					    unsigned long addr, pte_t *ptep)
{
945
	pte_t pte = *ptep;
946

947 948
	pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
	return pte_young(pte);
949 950 951 952 953 954 955 956 957
}

#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
					 unsigned long address, pte_t *ptep)
{
	return ptep_test_and_clear_young(vma, address, ptep);
}

M
Martin Schwidefsky 已提交
958
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
959
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
960
				       unsigned long addr, pte_t *ptep)
961
{
962
	return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
963 964 965
}

#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
966 967
pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *);
void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t);
M
Martin Schwidefsky 已提交
968 969

#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
M
Martin Schwidefsky 已提交
970
static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
971
				     unsigned long addr, pte_t *ptep)
M
Martin Schwidefsky 已提交
972
{
973
	return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
L
Linus Torvalds 已提交
974 975
}

M
Martin Schwidefsky 已提交
976 977 978 979 980 981 982 983 984
/*
 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
 * cannot be accessed while the batched unmap is running. In this case
 * full==1 and a simple pte_clear is enough. See tlb.h.
 */
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
985
					    unsigned long addr,
M
Martin Schwidefsky 已提交
986
					    pte_t *ptep, int full)
L
Linus Torvalds 已提交
987
{
988 989 990 991
	if (full) {
		pte_t pte = *ptep;
		*ptep = __pte(_PAGE_INVALID);
		return pte;
992
	}
993
	return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
L
Linus Torvalds 已提交
994 995
}

M
Martin Schwidefsky 已提交
996
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
997 998
static inline void ptep_set_wrprotect(struct mm_struct *mm,
				      unsigned long addr, pte_t *ptep)
999 1000 1001
{
	pte_t pte = *ptep;

1002 1003
	if (pte_write(pte))
		ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1004
}
M
Martin Schwidefsky 已提交
1005 1006

#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1007
static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1008
					unsigned long addr, pte_t *ptep,
1009 1010
					pte_t entry, int dirty)
{
1011
	if (pte_same(*ptep, entry))
1012
		return 0;
1013 1014 1015
	ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
	return 1;
}
1016

1017 1018 1019 1020 1021 1022
/*
 * Additional functions to handle KVM guest page tables
 */
void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, pte_t entry);
void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1023 1024
void ptep_notify(struct mm_struct *mm, unsigned long addr,
		 pte_t *ptep, unsigned long bits);
1025
int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1026
		    pte_t *ptep, int prot, unsigned long bit);
1027 1028 1029
void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep , int reset);
void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1030
int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1031
		    pte_t *sptep, pte_t *tptep, pte_t pte);
1032
void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1033 1034 1035 1036

bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
			  unsigned char key, bool nq);
1037 1038 1039
int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
			       unsigned char key, unsigned char *oldkey,
			       bool nq, bool mr, bool mc);
1040
int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1041 1042
int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
			  unsigned char *key);
1043

1044 1045 1046 1047 1048 1049 1050 1051
/*
 * Certain architectures need to do special things when PTEs
 * within a page table are directly modified.  Thus, the following
 * hook is made available.
 */
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep, pte_t entry)
{
1052 1053
	if (!MACHINE_HAS_NX)
		pte_val(entry) &= ~_PAGE_NOEXEC;
1054
	if (mm_has_pgste(mm))
1055
		ptep_set_pte_at(mm, addr, ptep, entry);
1056
	else
1057
		*ptep = entry;
1058
}
L
Linus Torvalds 已提交
1059 1060 1061 1062 1063 1064 1065 1066 1067

/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 */
static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
	pte_t __pte;
	pte_val(__pte) = physpage + pgprot_val(pgprot);
1068
	return pte_mkyoung(__pte);
L
Linus Torvalds 已提交
1069 1070
}

1071 1072
static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
{
1073
	unsigned long physpage = page_to_phys(page);
1074
	pte_t __pte = mk_pte_phys(physpage, pgprot);
L
Linus Torvalds 已提交
1075

1076 1077
	if (pte_write(__pte) && PageDirty(page))
		__pte = pte_mkdirty(__pte);
1078
	return __pte;
1079 1080
}

M
Martin Schwidefsky 已提交
1081 1082 1083 1084
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
L
Linus Torvalds 已提交
1085

M
Martin Schwidefsky 已提交
1086 1087
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
L
Linus Torvalds 已提交
1088

M
Martin Schwidefsky 已提交
1089 1090
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1091
#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
L
Linus Torvalds 已提交
1092

1093 1094
static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
{
M
Martin Schwidefsky 已提交
1095 1096 1097
	pud_t *pud = (pud_t *) pgd;
	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
		pud = (pud_t *) pgd_deref(*pgd);
1098 1099
	return pud  + pud_index(address);
}
L
Linus Torvalds 已提交
1100

M
Martin Schwidefsky 已提交
1101
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
L
Linus Torvalds 已提交
1102
{
M
Martin Schwidefsky 已提交
1103 1104 1105
	pmd_t *pmd = (pmd_t *) pud;
	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
		pmd = (pmd_t *) pud_deref(*pud);
M
Martin Schwidefsky 已提交
1106
	return pmd + pmd_index(address);
L
Linus Torvalds 已提交
1107 1108
}

M
Martin Schwidefsky 已提交
1109 1110 1111
#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
L
Linus Torvalds 已提交
1112

1113
#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1114
#define pud_page(pud) pfn_to_page(pud_pfn(pud))
L
Linus Torvalds 已提交
1115

M
Martin Schwidefsky 已提交
1116 1117 1118
/* Find an entry in the lowest level page table.. */
#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
L
Linus Torvalds 已提交
1119 1120 1121
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)

1122
static inline pmd_t pmd_wrprotect(pmd_t pmd)
1123
{
1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
	pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
	return pmd;
}

static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
	pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
	if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
		return pmd;
	pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
	return pmd;
}

static inline pmd_t pmd_mkclean(pmd_t pmd)
{
	if (pmd_large(pmd)) {
		pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1142
		pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1143 1144 1145 1146 1147 1148 1149
	}
	return pmd;
}

static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
	if (pmd_large(pmd)) {
1150 1151
		pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
				_SEGMENT_ENTRY_SOFT_DIRTY;
1152 1153 1154 1155 1156 1157
		if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
			pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
	}
	return pmd;
}

1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
static inline pud_t pud_wrprotect(pud_t pud)
{
	pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
	pud_val(pud) |= _REGION_ENTRY_PROTECT;
	return pud;
}

static inline pud_t pud_mkwrite(pud_t pud)
{
	pud_val(pud) |= _REGION3_ENTRY_WRITE;
	if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
		return pud;
	pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
	return pud;
}

static inline pud_t pud_mkclean(pud_t pud)
{
	if (pud_large(pud)) {
		pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
		pud_val(pud) |= _REGION_ENTRY_PROTECT;
	}
	return pud;
}

static inline pud_t pud_mkdirty(pud_t pud)
{
	if (pud_large(pud)) {
		pud_val(pud) |= _REGION3_ENTRY_DIRTY |
				_REGION3_ENTRY_SOFT_DIRTY;
		if (pud_val(pud) & _REGION3_ENTRY_WRITE)
			pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
	}
	return pud;
}

#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
{
	/*
1198 1199
	 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
	 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1200 1201 1202
	 */
	if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
		return pgprot_val(SEGMENT_NONE);
1203 1204 1205 1206 1207 1208 1209
	if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
		return pgprot_val(SEGMENT_RO);
	if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
		return pgprot_val(SEGMENT_RX);
	if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
		return pgprot_val(SEGMENT_RW);
	return pgprot_val(SEGMENT_RWX);
1210 1211
}

1212 1213 1214
static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
	if (pmd_large(pmd)) {
1215
		pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1216 1217
		if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
			pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1218 1219 1220 1221 1222 1223
	}
	return pmd;
}

static inline pmd_t pmd_mkold(pmd_t pmd)
{
1224
	if (pmd_large(pmd)) {
1225 1226 1227 1228 1229 1230
		pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
		pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
	}
	return pmd;
}

1231 1232
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
1233 1234 1235
	if (pmd_large(pmd)) {
		pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
			_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1236
			_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1237 1238 1239 1240 1241 1242 1243 1244
		pmd_val(pmd) |= massage_pgprot_pmd(newprot);
		if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
			pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
		if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
			pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
		return pmd;
	}
	pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1245 1246 1247 1248
	pmd_val(pmd) |= massage_pgprot_pmd(newprot);
	return pmd;
}

1249
static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1250
{
1251 1252
	pmd_t __pmd;
	pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1253
	return __pmd;
1254 1255
}

1256 1257
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */

1258 1259
static inline void __pmdp_csp(pmd_t *pmdp)
{
1260 1261
	csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
	    pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1262 1263
}

1264 1265
#define IDTE_GLOBAL	0
#define IDTE_LOCAL	1
1266

1267
static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp, int local)
1268 1269 1270 1271 1272
{
	unsigned long sto;

	sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
	asm volatile(
1273 1274 1275 1276
		"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
		: "+m" (*pmdp)
		: [r1] "a" (sto), [r2] "a" ((address & HPAGE_MASK)),
		  [m4] "i" (local)
1277 1278 1279
		: "cc" );
}

1280
static inline void __pudp_idte(unsigned long address, pud_t *pudp, int local)
1281 1282 1283 1284 1285 1286
{
	unsigned long r3o;

	r3o = (unsigned long) pudp - pud_index(address) * sizeof(pud_t);
	r3o |= _ASCE_TYPE_REGION3;
	asm volatile(
1287 1288 1289 1290
		"	.insn	rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
		: "+m" (*pudp)
		: [r1] "a" (r3o), [r2] "a" ((address & PUD_MASK)),
		  [m4] "i" (local)
1291 1292 1293
		: "cc");
}

1294 1295
pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1296
pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1297

1298 1299 1300 1301 1302 1303 1304 1305
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

#define __HAVE_ARCH_PGTABLE_DEPOSIT
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
				pgtable_t pgtable);

#define __HAVE_ARCH_PGTABLE_WITHDRAW
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1306

1307 1308 1309 1310
#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
					unsigned long addr, pmd_t *pmdp,
					pmd_t entry, int dirty)
1311
{
1312
	VM_BUG_ON(addr & ~HPAGE_MASK);
1313

1314 1315 1316 1317 1318 1319 1320
	entry = pmd_mkyoung(entry);
	if (dirty)
		entry = pmd_mkdirty(entry);
	if (pmd_val(*pmdp) == pmd_val(entry))
		return 0;
	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
	return 1;
1321 1322
}

1323 1324 1325 1326 1327
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
					    unsigned long addr, pmd_t *pmdp)
{
	pmd_t pmd = *pmdp;
1328

1329 1330 1331
	pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
	return pmd_young(pmd);
}
1332

1333 1334 1335 1336 1337 1338 1339
#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
					 unsigned long addr, pmd_t *pmdp)
{
	VM_BUG_ON(addr & ~HPAGE_MASK);
	return pmdp_test_and_clear_young(vma, addr, pmdp);
}
1340 1341 1342 1343

static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
			      pmd_t *pmdp, pmd_t entry)
{
1344 1345
	if (!MACHINE_HAS_NX)
		pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1346 1347 1348 1349 1350 1351
	*pmdp = entry;
}

static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
	pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1352 1353
	pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
	pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1354 1355 1356
	return pmd;
}

1357 1358
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1359
					    unsigned long addr, pmd_t *pmdp)
1360
{
1361
	return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1362 1363
}

1364 1365
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1366
						 unsigned long addr,
1367
						 pmd_t *pmdp, int full)
1368
{
1369 1370
	if (full) {
		pmd_t pmd = *pmdp;
1371
		*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1372 1373
		return pmd;
	}
1374
	return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1375 1376
}

1377 1378
#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1379
					  unsigned long addr, pmd_t *pmdp)
1380
{
1381
	return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1382 1383 1384 1385
}

#define __HAVE_ARCH_PMDP_INVALIDATE
static inline void pmdp_invalidate(struct vm_area_struct *vma,
1386
				   unsigned long addr, pmd_t *pmdp)
1387
{
1388
	pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1389 1390
}

1391 1392
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1393
				      unsigned long addr, pmd_t *pmdp)
1394 1395 1396
{
	pmd_t pmd = *pmdp;

1397 1398
	if (pmd_write(pmd))
		pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1399 1400
}

1401 1402 1403 1404
static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
					unsigned long address,
					pmd_t *pmdp)
{
1405
	return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1406 1407 1408
}
#define pmdp_collapse_flush pmdp_collapse_flush

1409 1410 1411 1412 1413 1414 1415 1416
#define pfn_pmd(pfn, pgprot)	mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
#define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))

static inline int pmd_trans_huge(pmd_t pmd)
{
	return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
}

1417
#define has_transparent_hugepage has_transparent_hugepage
1418 1419
static inline int has_transparent_hugepage(void)
{
1420
	return MACHINE_HAS_EDAT1 ? 1 : 0;
1421
}
1422 1423
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

L
Linus Torvalds 已提交
1424 1425 1426
/*
 * 64 bit swap entry format:
 * A page-table entry has some bits we have to treat in a special way.
1427
 * Bits 52 and bit 55 have to be zero, otherwise a specification
L
Linus Torvalds 已提交
1428
 * exception will occur instead of a page translation exception. The
1429
 * specification exception has the bad habit not to store necessary
L
Linus Torvalds 已提交
1430
 * information in the lowcore.
1431 1432 1433 1434 1435 1436 1437 1438
 * Bits 54 and 63 are used to indicate the page type.
 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
 * for the offset.
 * |			  offset			|01100|type |00|
 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
L
Linus Torvalds 已提交
1439
 */
H
Heiko Carstens 已提交
1440

1441 1442 1443 1444
#define __SWP_OFFSET_MASK	((1UL << 52) - 1)
#define __SWP_OFFSET_SHIFT	12
#define __SWP_TYPE_MASK		((1UL << 5) - 1)
#define __SWP_TYPE_SHIFT	2
H
Heiko Carstens 已提交
1445

1446
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
L
Linus Torvalds 已提交
1447 1448
{
	pte_t pte;
1449 1450 1451 1452

	pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
	pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
	pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
L
Linus Torvalds 已提交
1453 1454 1455
	return pte;
}

1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
static inline unsigned long __swp_type(swp_entry_t entry)
{
	return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
}

static inline unsigned long __swp_offset(swp_entry_t entry)
{
	return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
}

static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
{
	return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
}
L
Linus Torvalds 已提交
1470 1471 1472 1473 1474 1475 1476 1477

#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x)	((pte_t) { (x).val })

#endif /* !__ASSEMBLY__ */

#define kern_addr_valid(addr)   (1)

1478 1479
extern int vmem_add_mapping(unsigned long start, unsigned long size);
extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1480
extern int s390_enable_sie(void);
1481
extern int s390_enable_skey(void);
1482
extern void s390_reset_cmma(struct mm_struct *mm);
H
Heiko Carstens 已提交
1483

1484 1485 1486 1487
/* s390 has a private copy of get unmapped area to deal with cache synonyms */
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN

L
Linus Torvalds 已提交
1488 1489 1490
/*
 * No page table caches to initialise
 */
1491 1492
static inline void pgtable_cache_init(void) { }
static inline void check_pgt_cache(void) { }
L
Linus Torvalds 已提交
1493 1494 1495 1496

#include <asm-generic/pgtable.h>

#endif /* _S390_PAGE_H */