pgtable.h 37.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 *  S390 version
3
 *    Copyright IBM Corp. 1999, 2000
L
Linus Torvalds 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
 *    Author(s): Hartmut Penner (hp@de.ibm.com)
 *               Ulrich Weigand (weigand@de.ibm.com)
 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
 *
 *  Derived from "include/asm-i386/pgtable.h"
 */

#ifndef _ASM_S390_PGTABLE_H
#define _ASM_S390_PGTABLE_H

/*
 * The Linux memory management assumes a three-level page table setup. For
 * s390 31 bit we "fold" the mid level into the top-level page table, so
 * that we physically have the same two-level page table as the s390 mmu
 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
 * the hardware provides (region first and region second tables are not
 * used).
 *
 * The "pgd_xxx()" functions are trivial for a folded two-level
 * setup: the pgd is never bad, and a pmd always exists (as it's folded
 * into the pgd entry)
 *
 * This file contains the functions and defines necessary to modify and use
 * the S390 page table tree.
 */
#ifndef __ASSEMBLY__
30
#include <linux/sched.h>
31
#include <linux/mm_types.h>
L
Linus Torvalds 已提交
32
#include <asm/bug.h>
33
#include <asm/page.h>
L
Linus Torvalds 已提交
34 35 36

extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
extern void paging_init(void);
37
extern void vmem_map_init(void);
38
extern void fault_init(void);
L
Linus Torvalds 已提交
39 40 41 42 43

/*
 * The S390 doesn't have any external MMU info: the kernel page
 * tables contain all the necessary information.
 */
44
#define update_mmu_cache(vma, address, ptep)     do { } while (0)
L
Linus Torvalds 已提交
45 46

/*
47
 * ZERO_PAGE is a global shared page that is always zero; used
L
Linus Torvalds 已提交
48 49
 * for zero-mapped memory areas etc..
 */
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

extern unsigned long empty_zero_page;
extern unsigned long zero_page_mask;

#define ZERO_PAGE(vaddr) \
	(virt_to_page((void *)(empty_zero_page + \
	 (((unsigned long)(vaddr)) &zero_page_mask))))

#define is_zero_pfn is_zero_pfn
static inline int is_zero_pfn(unsigned long pfn)
{
	extern unsigned long zero_pfn;
	unsigned long offset_from_zero_pfn = pfn - zero_pfn;
	return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
}

#define my_zero_pfn(addr)	page_to_pfn(ZERO_PAGE(addr))

L
Linus Torvalds 已提交
68 69 70 71 72 73 74
#endif /* !__ASSEMBLY__ */

/*
 * PMD_SHIFT determines the size of the area a second-level page
 * table can map
 * PGDIR_SHIFT determines what a third-level page table entry can map
 */
75
#ifndef CONFIG_64BIT
76 77 78
# define PMD_SHIFT	20
# define PUD_SHIFT	20
# define PGDIR_SHIFT	20
79
#else /* CONFIG_64BIT */
80
# define PMD_SHIFT	20
M
Martin Schwidefsky 已提交
81
# define PUD_SHIFT	31
82
# define PGDIR_SHIFT	42
83
#endif /* CONFIG_64BIT */
L
Linus Torvalds 已提交
84 85 86

#define PMD_SIZE        (1UL << PMD_SHIFT)
#define PMD_MASK        (~(PMD_SIZE-1))
M
Martin Schwidefsky 已提交
87 88
#define PUD_SIZE	(1UL << PUD_SHIFT)
#define PUD_MASK	(~(PUD_SIZE-1))
89 90
#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
#define PGDIR_MASK	(~(PGDIR_SIZE-1))
L
Linus Torvalds 已提交
91 92 93 94 95 96 97

/*
 * entries per page directory level: the S390 is two-level, so
 * we don't really have any PMD directory physically.
 * for S390 segment-table entries are combined to one PGD
 * that leads to 1024 pte per pgd
 */
98
#define PTRS_PER_PTE	256
99
#ifndef CONFIG_64BIT
100
#define PTRS_PER_PMD	1
101
#define PTRS_PER_PUD	1
102
#else /* CONFIG_64BIT */
103
#define PTRS_PER_PMD	2048
104
#define PTRS_PER_PUD	2048
105
#endif /* CONFIG_64BIT */
106
#define PTRS_PER_PGD	2048
L
Linus Torvalds 已提交
107

108 109
#define FIRST_USER_ADDRESS  0

L
Linus Torvalds 已提交
110 111 112 113
#define pte_ERROR(e) \
	printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
#define pmd_ERROR(e) \
	printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
M
Martin Schwidefsky 已提交
114 115
#define pud_ERROR(e) \
	printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
L
Linus Torvalds 已提交
116 117 118 119 120
#define pgd_ERROR(e) \
	printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))

#ifndef __ASSEMBLY__
/*
121
 * The vmalloc area will always be on the topmost area of the kernel
122
 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc,
123 124 125 126 127
 * which should be enough for any sane case.
 * By putting vmalloc at the top, we maximise the gap between physical
 * memory and vmalloc to catch misplaced memory accesses. As a side
 * effect, this also makes sure that 64 bit module code cannot be used
 * as system call address.
H
Heiko Carstens 已提交
128
 */
129
extern unsigned long VMALLOC_START;
130 131
extern unsigned long VMALLOC_END;
extern struct page *vmemmap;
132

133
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
134

L
Linus Torvalds 已提交
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
/*
 * A 31 bit pagetable entry of S390 has following format:
 *  |   PFRA          |    |  OS  |
 * 0                   0IP0
 * 00000000001111111111222222222233
 * 01234567890123456789012345678901
 *
 * I Page-Invalid Bit:    Page is not available for address-translation
 * P Page-Protection Bit: Store access not possible for page
 *
 * A 31 bit segmenttable entry of S390 has following format:
 *  |   P-table origin      |  |PTL
 * 0                         IC
 * 00000000001111111111222222222233
 * 01234567890123456789012345678901
 *
 * I Segment-Invalid Bit:    Segment is not available for address-translation
 * C Common-Segment Bit:     Segment is not private (PoP 3-30)
 * PTL Page-Table-Length:    Page-table length (PTL+1*16 entries -> up to 256)
 *
 * The 31 bit segmenttable origin of S390 has following format:
 *
 *  |S-table origin   |     | STL |
 * X                   **GPS
 * 00000000001111111111222222222233
 * 01234567890123456789012345678901
 *
 * X Space-Switch event:
 * G Segment-Invalid Bit:     *
 * P Private-Space Bit:       Segment is not private (PoP 3-30)
 * S Storage-Alteration:
 * STL Segment-Table-Length:  Segment-table length (STL+1*16 entries -> up to 2048)
 *
 * A 64 bit pagetable entry of S390 has following format:
169
 * |			 PFRA			      |0IPC|  OS  |
L
Linus Torvalds 已提交
170 171 172 173 174
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * I Page-Invalid Bit:    Page is not available for address-translation
 * P Page-Protection Bit: Store access not possible for page
175
 * C Change-bit override: HW is not required to set change bit
L
Linus Torvalds 已提交
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
 *
 * A 64 bit segmenttable entry of S390 has following format:
 * |        P-table origin                              |      TT
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * I Segment-Invalid Bit:    Segment is not available for address-translation
 * C Common-Segment Bit:     Segment is not private (PoP 3-30)
 * P Page-Protection Bit: Store access not possible for page
 * TT Type 00
 *
 * A 64 bit region table entry of S390 has following format:
 * |        S-table origin                             |   TF  TTTL
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * I Segment-Invalid Bit:    Segment is not available for address-translation
 * TT Type 01
 * TF
M
Martin Schwidefsky 已提交
195
 * TL Table length
L
Linus Torvalds 已提交
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
 *
 * The 64 bit regiontable origin of S390 has following format:
 * |      region table origon                          |       DTTL
 * 0000000000111111111122222222223333333333444444444455555555556666
 * 0123456789012345678901234567890123456789012345678901234567890123
 *
 * X Space-Switch event:
 * G Segment-Invalid Bit:  
 * P Private-Space Bit:    
 * S Storage-Alteration:
 * R Real space
 * TL Table-Length:
 *
 * A storage key has the following format:
 * | ACC |F|R|C|0|
 *  0   3 4 5 6 7
 * ACC: access key
 * F  : fetch protection bit
 * R  : referenced bit
 * C  : changed bit
 */

/* Hardware bits in the page table entry */
219
#define _PAGE_CO	0x100		/* HW Change-bit override */
M
Martin Schwidefsky 已提交
220 221
#define _PAGE_RO	0x200		/* HW read-only bit  */
#define _PAGE_INVALID	0x400		/* HW invalid bit    */
222 223

/* Software bits in the page table entry */
M
Martin Schwidefsky 已提交
224 225
#define _PAGE_SWT	0x001		/* SW pte type bit t */
#define _PAGE_SWX	0x002		/* SW pte type bit x */
226 227 228
#define _PAGE_SWC	0x004		/* SW pte changed bit (for KVM) */
#define _PAGE_SWR	0x008		/* SW pte referenced bit (for KVM) */
#define _PAGE_SPECIAL	0x010		/* SW associated with special page */
N
Nick Piggin 已提交
229
#define __HAVE_ARCH_PTE_SPECIAL
L
Linus Torvalds 已提交
230

231
/* Set of bits not changed in pte_modify */
232
#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR)
233

M
Martin Schwidefsky 已提交
234
/* Six different types of pages. */
235 236
#define _PAGE_TYPE_EMPTY	0x400
#define _PAGE_TYPE_NONE		0x401
M
Martin Schwidefsky 已提交
237 238
#define _PAGE_TYPE_SWAP		0x403
#define _PAGE_TYPE_FILE		0x601	/* bit 0x002 is used for offset !! */
239 240
#define _PAGE_TYPE_RO		0x200
#define _PAGE_TYPE_RW		0x000
L
Linus Torvalds 已提交
241

242 243 244 245 246 247 248 249 250
/*
 * Only four types for huge pages, using the invalid bit and protection bit
 * of a segment table entry.
 */
#define _HPAGE_TYPE_EMPTY	0x020	/* _SEGMENT_ENTRY_INV */
#define _HPAGE_TYPE_NONE	0x220
#define _HPAGE_TYPE_RO		0x200	/* _SEGMENT_ENTRY_RO  */
#define _HPAGE_TYPE_RW		0x000

M
Martin Schwidefsky 已提交
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
/*
 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
 * pte_none and pte_file to find out the pte type WITHOUT holding the page
 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
 * This change is done while holding the lock, but the intermediate step
 * of a previously valid pte with the hw invalid bit set can be observed by
 * handle_pte_fault. That makes it necessary that all valid pte types with
 * the hw invalid bit set must be distinguishable from the four pte types
 * empty, none, swap and file.
 *
 *			irxt  ipte  irxt
 * _PAGE_TYPE_EMPTY	1000   ->   1000
 * _PAGE_TYPE_NONE	1001   ->   1001
 * _PAGE_TYPE_SWAP	1011   ->   1011
 * _PAGE_TYPE_FILE	11?1   ->   11?1
 * _PAGE_TYPE_RO	0100   ->   1100
 * _PAGE_TYPE_RW	0000   ->   1000
 *
G
Gerald Schaefer 已提交
271
 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
M
Martin Schwidefsky 已提交
272 273
 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
 * pte_file is true for bits combinations 1101, 1111
G
Gerald Schaefer 已提交
274
 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
M
Martin Schwidefsky 已提交
275 276
 */

277
#ifndef CONFIG_64BIT
L
Linus Torvalds 已提交
278

279 280 281 282 283 284
/* Bits in the segment table address-space-control-element */
#define _ASCE_SPACE_SWITCH	0x80000000UL	/* space switch event	    */
#define _ASCE_ORIGIN_MASK	0x7ffff000UL	/* segment table origin	    */
#define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
#define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
#define _ASCE_TABLE_LENGTH	0x7f	/* 128 x 64 entries = 8k	    */
L
Linus Torvalds 已提交
285

286 287
/* Bits in the segment table entry */
#define _SEGMENT_ENTRY_ORIGIN	0x7fffffc0UL	/* page table origin	    */
288
#define _SEGMENT_ENTRY_RO	0x200	/* page protection bit		    */
289 290 291
#define _SEGMENT_ENTRY_INV	0x20	/* invalid segment table entry	    */
#define _SEGMENT_ENTRY_COMMON	0x10	/* common segment bit		    */
#define _SEGMENT_ENTRY_PTL	0x0f	/* page table length		    */
L
Linus Torvalds 已提交
292

293 294
#define _SEGMENT_ENTRY		(_SEGMENT_ENTRY_PTL)
#define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INV)
L
Linus Torvalds 已提交
295

296 297 298 299 300 301 302 303 304 305 306 307 308
/* Page status table bits for virtualization */
#define RCP_ACC_BITS	0xf0000000UL
#define RCP_FP_BIT	0x08000000UL
#define RCP_PCL_BIT	0x00800000UL
#define RCP_HR_BIT	0x00400000UL
#define RCP_HC_BIT	0x00200000UL
#define RCP_GR_BIT	0x00040000UL
#define RCP_GC_BIT	0x00020000UL

/* User dirty / referenced bit for KVM's migration feature */
#define KVM_UR_BIT	0x00008000UL
#define KVM_UC_BIT	0x00004000UL

309
#else /* CONFIG_64BIT */
L
Linus Torvalds 已提交
310

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
/* Bits in the segment/region table address-space-control-element */
#define _ASCE_ORIGIN		~0xfffUL/* segment table origin		    */
#define _ASCE_PRIVATE_SPACE	0x100	/* private space control	    */
#define _ASCE_ALT_EVENT		0x80	/* storage alteration event control */
#define _ASCE_SPACE_SWITCH	0x40	/* space switch event		    */
#define _ASCE_REAL_SPACE	0x20	/* real space control		    */
#define _ASCE_TYPE_MASK		0x0c	/* asce table type mask		    */
#define _ASCE_TYPE_REGION1	0x0c	/* region first table type	    */
#define _ASCE_TYPE_REGION2	0x08	/* region second table type	    */
#define _ASCE_TYPE_REGION3	0x04	/* region third table type	    */
#define _ASCE_TYPE_SEGMENT	0x00	/* segment table type		    */
#define _ASCE_TABLE_LENGTH	0x03	/* region table length		    */

/* Bits in the region table entry */
#define _REGION_ENTRY_ORIGIN	~0xfffUL/* region/segment table origin	    */
#define _REGION_ENTRY_INV	0x20	/* invalid region table entry	    */
#define _REGION_ENTRY_TYPE_MASK	0x0c	/* region/segment table type mask   */
#define _REGION_ENTRY_TYPE_R1	0x0c	/* region first table type	    */
#define _REGION_ENTRY_TYPE_R2	0x08	/* region second table type	    */
#define _REGION_ENTRY_TYPE_R3	0x04	/* region third table type	    */
#define _REGION_ENTRY_LENGTH	0x03	/* region third length		    */

#define _REGION1_ENTRY		(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
#define _REGION1_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
#define _REGION2_ENTRY		(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
#define _REGION2_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
#define _REGION3_ENTRY		(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
#define _REGION3_ENTRY_EMPTY	(_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)

L
Linus Torvalds 已提交
340
/* Bits in the segment table entry */
341 342 343
#define _SEGMENT_ENTRY_ORIGIN	~0x7ffUL/* segment table origin		    */
#define _SEGMENT_ENTRY_RO	0x200	/* page protection bit		    */
#define _SEGMENT_ENTRY_INV	0x20	/* invalid segment table entry	    */
L
Linus Torvalds 已提交
344

345 346 347
#define _SEGMENT_ENTRY		(0)
#define _SEGMENT_ENTRY_EMPTY	(_SEGMENT_ENTRY_INV)

348 349
#define _SEGMENT_ENTRY_LARGE	0x400	/* STE-format control, large page   */
#define _SEGMENT_ENTRY_CO	0x100	/* change-recording override   */
350 351
#define _SEGMENT_ENTRY_SPLIT_BIT 0	/* THP splitting bit number */
#define _SEGMENT_ENTRY_SPLIT	(1UL << _SEGMENT_ENTRY_SPLIT_BIT)
352

353 354 355 356 357 358 359 360 361 362 363 364 365
/* Page status table bits for virtualization */
#define RCP_ACC_BITS	0xf000000000000000UL
#define RCP_FP_BIT	0x0800000000000000UL
#define RCP_PCL_BIT	0x0080000000000000UL
#define RCP_HR_BIT	0x0040000000000000UL
#define RCP_HC_BIT	0x0020000000000000UL
#define RCP_GR_BIT	0x0004000000000000UL
#define RCP_GC_BIT	0x0002000000000000UL

/* User dirty / referenced bit for KVM's migration feature */
#define KVM_UR_BIT	0x0000800000000000UL
#define KVM_UC_BIT	0x0000400000000000UL

366
#endif /* CONFIG_64BIT */
L
Linus Torvalds 已提交
367 368

/*
369 370 371
 * A user page table pointer has the space-switch-event bit, the
 * private-space-control bit and the storage-alteration-event-control
 * bit set. A kernel page table pointer doesn't need them.
L
Linus Torvalds 已提交
372
 */
373 374
#define _ASCE_USER_BITS		(_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
				 _ASCE_ALT_EVENT)
L
Linus Torvalds 已提交
375 376

/*
377
 * Page protection definitions.
L
Linus Torvalds 已提交
378
 */
379 380 381 382 383 384
#define PAGE_NONE	__pgprot(_PAGE_TYPE_NONE)
#define PAGE_RO		__pgprot(_PAGE_TYPE_RO)
#define PAGE_RW		__pgprot(_PAGE_TYPE_RW)

#define PAGE_KERNEL	PAGE_RW
#define PAGE_COPY	PAGE_RO
L
Linus Torvalds 已提交
385 386

/*
387 388 389
 * On s390 the page table entry has an invalid bit and a read-only bit.
 * Read permission implies execute permission and write permission
 * implies read permission.
L
Linus Torvalds 已提交
390 391
 */
         /*xwr*/
392 393 394 395
#define __P000	PAGE_NONE
#define __P001	PAGE_RO
#define __P010	PAGE_RO
#define __P011	PAGE_RO
396 397 398 399
#define __P100	PAGE_RO
#define __P101	PAGE_RO
#define __P110	PAGE_RO
#define __P111	PAGE_RO
400 401 402 403 404

#define __S000	PAGE_NONE
#define __S001	PAGE_RO
#define __S010	PAGE_RW
#define __S011	PAGE_RW
405 406 407 408
#define __S100	PAGE_RO
#define __S101	PAGE_RO
#define __S110	PAGE_RW
#define __S111	PAGE_RW
L
Linus Torvalds 已提交
409

410
static inline int mm_exclusive(struct mm_struct *mm)
L
Linus Torvalds 已提交
411
{
412 413
	return likely(mm == current->active_mm &&
		      atomic_read(&mm->context.attach_count) <= 1);
L
Linus Torvalds 已提交
414 415
}

416 417 418 419 420 421 422 423
static inline int mm_has_pgste(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
	if (unlikely(mm->context.has_pgste))
		return 1;
#endif
	return 0;
}
L
Linus Torvalds 已提交
424 425 426
/*
 * pgd/pmd/pte query functions
 */
427
#ifndef CONFIG_64BIT
L
Linus Torvalds 已提交
428

429 430 431
static inline int pgd_present(pgd_t pgd) { return 1; }
static inline int pgd_none(pgd_t pgd)    { return 0; }
static inline int pgd_bad(pgd_t pgd)     { return 0; }
L
Linus Torvalds 已提交
432

M
Martin Schwidefsky 已提交
433 434 435 436
static inline int pud_present(pud_t pud) { return 1; }
static inline int pud_none(pud_t pud)	 { return 0; }
static inline int pud_bad(pud_t pud)	 { return 0; }

437
#else /* CONFIG_64BIT */
L
Linus Torvalds 已提交
438

439 440
static inline int pgd_present(pgd_t pgd)
{
M
Martin Schwidefsky 已提交
441 442
	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
		return 1;
443 444 445 446 447
	return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
}

static inline int pgd_none(pgd_t pgd)
{
M
Martin Schwidefsky 已提交
448 449
	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
		return 0;
450 451 452 453 454
	return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
}

static inline int pgd_bad(pgd_t pgd)
{
M
Martin Schwidefsky 已提交
455 456 457 458 459
	/*
	 * With dynamic page table levels the pgd can be a region table
	 * entry or a segment table entry. Check for the bit that are
	 * invalid for either table entry.
	 */
460
	unsigned long mask =
M
Martin Schwidefsky 已提交
461
		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
462 463 464
		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
	return (pgd_val(pgd) & mask) != 0;
}
M
Martin Schwidefsky 已提交
465 466

static inline int pud_present(pud_t pud)
L
Linus Torvalds 已提交
467
{
M
Martin Schwidefsky 已提交
468 469
	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
		return 1;
470
	return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
L
Linus Torvalds 已提交
471 472
}

M
Martin Schwidefsky 已提交
473
static inline int pud_none(pud_t pud)
L
Linus Torvalds 已提交
474
{
M
Martin Schwidefsky 已提交
475 476
	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
		return 0;
477
	return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
L
Linus Torvalds 已提交
478 479
}

M
Martin Schwidefsky 已提交
480
static inline int pud_bad(pud_t pud)
L
Linus Torvalds 已提交
481
{
M
Martin Schwidefsky 已提交
482 483 484 485 486
	/*
	 * With dynamic page table levels the pud can be a region table
	 * entry or a segment table entry. Check for the bit that are
	 * invalid for either table entry.
	 */
487
	unsigned long mask =
M
Martin Schwidefsky 已提交
488
		~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
489 490
		~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
	return (pud_val(pud) & mask) != 0;
L
Linus Torvalds 已提交
491 492
}

493
#endif /* CONFIG_64BIT */
494

495
static inline int pmd_present(pmd_t pmd)
L
Linus Torvalds 已提交
496
{
497
	return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
L
Linus Torvalds 已提交
498 499
}

500
static inline int pmd_none(pmd_t pmd)
L
Linus Torvalds 已提交
501
{
502
	return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
L
Linus Torvalds 已提交
503 504
}

505
static inline int pmd_bad(pmd_t pmd)
L
Linus Torvalds 已提交
506
{
507 508
	unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
	return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
L
Linus Torvalds 已提交
509 510
}

511 512 513 514
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
extern void pmdp_splitting_flush(struct vm_area_struct *vma,
				 unsigned long addr, pmd_t *pmdp);

515
static inline int pte_none(pte_t pte)
L
Linus Torvalds 已提交
516
{
M
Martin Schwidefsky 已提交
517
	return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
L
Linus Torvalds 已提交
518 519
}

520
static inline int pte_present(pte_t pte)
L
Linus Torvalds 已提交
521
{
M
Martin Schwidefsky 已提交
522 523 524 525
	unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
	return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
		(!(pte_val(pte) & _PAGE_INVALID) &&
		 !(pte_val(pte) & _PAGE_SWT));
L
Linus Torvalds 已提交
526 527
}

528
static inline int pte_file(pte_t pte)
L
Linus Torvalds 已提交
529
{
M
Martin Schwidefsky 已提交
530 531
	unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
	return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
L
Linus Torvalds 已提交
532 533
}

N
Nick Piggin 已提交
534 535
static inline int pte_special(pte_t pte)
{
N
Nick Piggin 已提交
536
	return (pte_val(pte) & _PAGE_SPECIAL);
N
Nick Piggin 已提交
537 538
}

M
Martin Schwidefsky 已提交
539
#define __HAVE_ARCH_PTE_SAME
540 541 542 543
static inline int pte_same(pte_t a, pte_t b)
{
	return pte_val(a) == pte_val(b);
}
L
Linus Torvalds 已提交
544

545
static inline pgste_t pgste_get_lock(pte_t *ptep)
546
{
547
	unsigned long new = 0;
548
#ifdef CONFIG_PGSTE
549 550
	unsigned long old;

551
	preempt_disable();
552 553 554 555 556 557 558 559 560
	asm(
		"	lg	%0,%2\n"
		"0:	lgr	%1,%0\n"
		"	nihh	%0,0xff7f\n"	/* clear RCP_PCL_BIT in old */
		"	oihh	%1,0x0080\n"	/* set RCP_PCL_BIT in new */
		"	csg	%0,%1,%2\n"
		"	jl	0b\n"
		: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
		: "Q" (ptep[PTRS_PER_PTE]) : "cc");
561
#endif
562
	return __pgste(new);
563 564
}

565
static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
566 567
{
#ifdef CONFIG_PGSTE
568 569 570 571 572
	asm(
		"	nihh	%1,0xff7f\n"	/* clear RCP_PCL_BIT */
		"	stg	%1,%0\n"
		: "=Q" (ptep[PTRS_PER_PTE])
		: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
573 574 575 576
	preempt_enable();
#endif
}

577
static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
578 579
{
#ifdef CONFIG_PGSTE
580
	unsigned long address, bits;
581 582
	unsigned char skey;

M
Martin Schwidefsky 已提交
583 584
	if (!pte_present(*ptep))
		return pgste;
585 586
	address = pte_val(*ptep) & PAGE_MASK;
	skey = page_get_storage_key(address);
587 588
	bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
	/* Clear page changed & referenced bit in the storage key */
589 590 591 592
	if (bits & _PAGE_CHANGED)
		page_set_storage_key(address, skey ^ bits, 1);
	else if (bits)
		page_reset_referenced(address);
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
	/* Transfer page changed & referenced bit to guest bits in pgste */
	pgste_val(pgste) |= bits << 48;		/* RCP_GR_BIT & RCP_GC_BIT */
	/* Get host changed & referenced bits from pgste */
	bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
	/* Clear host bits in pgste. */
	pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
	pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
	/* Copy page access key and fetch protection bit to pgste */
	pgste_val(pgste) |=
		(unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
	/* Transfer changed and referenced to kvm user bits */
	pgste_val(pgste) |= bits << 45;		/* KVM_UR_BIT & KVM_UC_BIT */
	/* Transfer changed & referenced to pte sofware bits */
	pte_val(*ptep) |= bits << 1;		/* _PAGE_SWR & _PAGE_SWC */
#endif
	return pgste;

}

static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
	int young;

M
Martin Schwidefsky 已提交
617 618
	if (!pte_present(*ptep))
		return pgste;
619 620 621 622 623 624 625 626 627 628 629 630 631
	young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
	/* Transfer page referenced bit to pte software bit (host view) */
	if (young || (pgste_val(pgste) & RCP_HR_BIT))
		pte_val(*ptep) |= _PAGE_SWR;
	/* Clear host referenced bit in pgste. */
	pgste_val(pgste) &= ~RCP_HR_BIT;
	/* Transfer page referenced bit to guest bit in pgste */
	pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */
#endif
	return pgste;

}

M
Martin Schwidefsky 已提交
632
static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
633 634
{
#ifdef CONFIG_PGSTE
635
	unsigned long address;
636 637
	unsigned long okey, nkey;

M
Martin Schwidefsky 已提交
638 639 640
	if (!pte_present(entry))
		return;
	address = pte_val(entry) & PAGE_MASK;
641
	okey = nkey = page_get_storage_key(address);
642 643 644 645
	nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
	/* Set page access key and fetch protection bit from pgste */
	nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
	if (okey != nkey)
646
		page_set_storage_key(address, nkey, 1);
647 648 649
#endif
}

650 651 652 653
/**
 * struct gmap_struct - guest address space
 * @mm: pointer to the parent mm_struct
 * @table: pointer to the page directory
654
 * @asce: address space control element for gmap page table
655 656 657 658 659 660
 * @crst_list: list of all crst tables used in the guest address space
 */
struct gmap {
	struct list_head list;
	struct mm_struct *mm;
	unsigned long *table;
661
	unsigned long asce;
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
	struct list_head crst_list;
};

/**
 * struct gmap_rmap - reverse mapping for segment table entries
 * @next: pointer to the next gmap_rmap structure in the list
 * @entry: pointer to a segment table entry
 */
struct gmap_rmap {
	struct list_head list;
	unsigned long *entry;
};

/**
 * struct gmap_pgtable - gmap information attached to a page table
 * @vmaddr: address of the 1MB segment in the process virtual memory
 * @mapper: list of segment table entries maping a page table
 */
struct gmap_pgtable {
	unsigned long vmaddr;
	struct list_head mapper;
};

struct gmap *gmap_alloc(struct mm_struct *mm);
void gmap_free(struct gmap *gmap);
void gmap_enable(struct gmap *gmap);
void gmap_disable(struct gmap *gmap);
int gmap_map_segment(struct gmap *gmap, unsigned long from,
		     unsigned long to, unsigned long length);
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
692
unsigned long __gmap_fault(unsigned long address, struct gmap *);
693
unsigned long gmap_fault(unsigned long address, struct gmap *);
694
void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
695

696 697 698 699 700 701 702 703 704 705 706 707
/*
 * Certain architectures need to do special things when PTEs
 * within a page table are directly modified.  Thus, the following
 * hook is made available.
 */
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep, pte_t entry)
{
	pgste_t pgste;

	if (mm_has_pgste(mm)) {
		pgste = pgste_get_lock(ptep);
M
Martin Schwidefsky 已提交
708
		pgste_set_pte(ptep, pgste, entry);
709 710 711 712 713 714
		*ptep = entry;
		pgste_set_unlock(ptep, pgste);
	} else
		*ptep = entry;
}

L
Linus Torvalds 已提交
715 716 717 718
/*
 * query functions pte_write/pte_dirty/pte_young only work if
 * pte_present() is true. Undefined behaviour if not..
 */
719
static inline int pte_write(pte_t pte)
L
Linus Torvalds 已提交
720 721 722 723
{
	return (pte_val(pte) & _PAGE_RO) == 0;
}

724
static inline int pte_dirty(pte_t pte)
L
Linus Torvalds 已提交
725
{
726 727 728 729
#ifdef CONFIG_PGSTE
	if (pte_val(pte) & _PAGE_SWC)
		return 1;
#endif
L
Linus Torvalds 已提交
730 731 732
	return 0;
}

733
static inline int pte_young(pte_t pte)
L
Linus Torvalds 已提交
734
{
735 736 737 738
#ifdef CONFIG_PGSTE
	if (pte_val(pte) & _PAGE_SWR)
		return 1;
#endif
L
Linus Torvalds 已提交
739 740 741 742 743 744 745
	return 0;
}

/*
 * pgd/pmd/pte modification functions
 */

746
static inline void pgd_clear(pgd_t *pgd)
747
{
748
#ifdef CONFIG_64BIT
M
Martin Schwidefsky 已提交
749 750
	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
		pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
751
#endif
752 753
}

754
static inline void pud_clear(pud_t *pud)
L
Linus Torvalds 已提交
755
{
756
#ifdef CONFIG_64BIT
M
Martin Schwidefsky 已提交
757 758
	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
		pud_val(*pud) = _REGION3_ENTRY_EMPTY;
759
#endif
L
Linus Torvalds 已提交
760 761
}

762
static inline void pmd_clear(pmd_t *pmdp)
L
Linus Torvalds 已提交
763
{
764
	pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
L
Linus Torvalds 已提交
765 766
}

767
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
L
Linus Torvalds 已提交
768
{
769
	pte_val(*ptep) = _PAGE_TYPE_EMPTY;
L
Linus Torvalds 已提交
770 771 772 773 774 775
}

/*
 * The following pte modification functions only work if
 * pte_present() is true. Undefined behaviour if not..
 */
776
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
L
Linus Torvalds 已提交
777
{
778
	pte_val(pte) &= _PAGE_CHG_MASK;
L
Linus Torvalds 已提交
779 780 781 782
	pte_val(pte) |= pgprot_val(newprot);
	return pte;
}

783
static inline pte_t pte_wrprotect(pte_t pte)
L
Linus Torvalds 已提交
784
{
785
	/* Do not clobber _PAGE_TYPE_NONE pages!  */
L
Linus Torvalds 已提交
786 787 788 789 790
	if (!(pte_val(pte) & _PAGE_INVALID))
		pte_val(pte) |= _PAGE_RO;
	return pte;
}

791
static inline pte_t pte_mkwrite(pte_t pte)
L
Linus Torvalds 已提交
792 793 794 795 796
{
	pte_val(pte) &= ~_PAGE_RO;
	return pte;
}

797
static inline pte_t pte_mkclean(pte_t pte)
L
Linus Torvalds 已提交
798
{
799 800 801
#ifdef CONFIG_PGSTE
	pte_val(pte) &= ~_PAGE_SWC;
#endif
L
Linus Torvalds 已提交
802 803 804
	return pte;
}

805
static inline pte_t pte_mkdirty(pte_t pte)
L
Linus Torvalds 已提交
806 807 808 809
{
	return pte;
}

810
static inline pte_t pte_mkold(pte_t pte)
L
Linus Torvalds 已提交
811
{
812 813 814
#ifdef CONFIG_PGSTE
	pte_val(pte) &= ~_PAGE_SWR;
#endif
L
Linus Torvalds 已提交
815 816 817
	return pte;
}

818
static inline pte_t pte_mkyoung(pte_t pte)
L
Linus Torvalds 已提交
819 820 821 822
{
	return pte;
}

N
Nick Piggin 已提交
823 824
static inline pte_t pte_mkspecial(pte_t pte)
{
N
Nick Piggin 已提交
825
	pte_val(pte) |= _PAGE_SPECIAL;
N
Nick Piggin 已提交
826 827 828
	return pte;
}

829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
#ifdef CONFIG_HUGETLB_PAGE
static inline pte_t pte_mkhuge(pte_t pte)
{
	/*
	 * PROT_NONE needs to be remapped from the pte type to the ste type.
	 * The HW invalid bit is also different for pte and ste. The pte
	 * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
	 * bit, so we don't have to clear it.
	 */
	if (pte_val(pte) & _PAGE_INVALID) {
		if (pte_val(pte) & _PAGE_SWT)
			pte_val(pte) |= _HPAGE_TYPE_NONE;
		pte_val(pte) |= _SEGMENT_ENTRY_INV;
	}
	/*
	 * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
	 * table entry.
	 */
	pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
	/*
	 * Also set the change-override bit because we don't need dirty bit
	 * tracking for hugetlbfs pages.
	 */
	pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
	return pte;
}
#endif

857
/*
858
 * Get (and clear) the user dirty bit for a pte.
859
 */
860 861
static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
						 pte_t *ptep)
862
{
863 864 865 866 867 868 869 870 871 872
	pgste_t pgste;
	int dirty = 0;

	if (mm_has_pgste(mm)) {
		pgste = pgste_get_lock(ptep);
		pgste = pgste_update_all(ptep, pgste);
		dirty = !!(pgste_val(pgste) & KVM_UC_BIT);
		pgste_val(pgste) &= ~KVM_UC_BIT;
		pgste_set_unlock(ptep, pgste);
		return dirty;
873 874 875
	}
	return dirty;
}
876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894

/*
 * Get (and clear) the user referenced bit for a pte.
 */
static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
						 pte_t *ptep)
{
	pgste_t pgste;
	int young = 0;

	if (mm_has_pgste(mm)) {
		pgste = pgste_get_lock(ptep);
		pgste = pgste_update_young(ptep, pgste);
		young = !!(pgste_val(pgste) & KVM_UR_BIT);
		pgste_val(pgste) &= ~KVM_UR_BIT;
		pgste_set_unlock(ptep, pgste);
	}
	return young;
}
895

M
Martin Schwidefsky 已提交
896 897 898
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
					    unsigned long addr, pte_t *ptep)
L
Linus Torvalds 已提交
899
{
900 901 902 903 904 905 906 907 908 909 910
	pgste_t pgste;
	pte_t pte;

	if (mm_has_pgste(vma->vm_mm)) {
		pgste = pgste_get_lock(ptep);
		pgste = pgste_update_young(ptep, pgste);
		pte = *ptep;
		*ptep = pte_mkold(pte);
		pgste_set_unlock(ptep, pgste);
		return pte_young(pte);
	}
L
Linus Torvalds 已提交
911 912 913
	return 0;
}

M
Martin Schwidefsky 已提交
914 915 916
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
					 unsigned long address, pte_t *ptep)
L
Linus Torvalds 已提交
917
{
918 919 920 921 922
	/* No need to flush TLB
	 * On s390 reference bits are in storage key and never in TLB
	 * With virtualization we handle the reference bit, without we
	 * we can simply return */
	return ptep_test_and_clear_young(vma, address, ptep);
L
Linus Torvalds 已提交
923 924
}

925
static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
L
Linus Torvalds 已提交
926
{
927
	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
928
#ifndef CONFIG_64BIT
929
		/* pto must point to the start of the segment table */
L
Linus Torvalds 已提交
930
		pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
931 932 933 934
#else
		/* ipte in zarch mode can do the math */
		pte_t *pto = ptep;
#endif
935 936 937 938
		asm volatile(
			"	ipte	%2,%3"
			: "=m" (*ptep) : "m" (*ptep),
			  "a" (pto), "a" (address));
L
Linus Torvalds 已提交
939
	}
940 941
}

M
Martin Schwidefsky 已提交
942 943 944 945 946 947 948 949 950 951 952 953 954 955
/*
 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
 * both clear the TLB for the unmapped pte. The reason is that
 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
 * to modify an active pte. The sequence is
 *   1) ptep_get_and_clear
 *   2) set_pte_at
 *   3) flush_tlb_range
 * On s390 the tlb needs to get flushed with the modification of the pte
 * if the pte is active. The only way how this can be implemented is to
 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
 * is a nop.
 */
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
				       unsigned long address, pte_t *ptep)
{
	pgste_t pgste;
	pte_t pte;

	mm->context.flush_mm = 1;
	if (mm_has_pgste(mm))
		pgste = pgste_get_lock(ptep);

	pte = *ptep;
	if (!mm_exclusive(mm))
		__ptep_ipte(address, ptep);
	pte_val(*ptep) = _PAGE_TYPE_EMPTY;

	if (mm_has_pgste(mm)) {
		pgste = pgste_update_all(&pte, pgste);
		pgste_set_unlock(ptep, pgste);
	}
	return pte;
}

#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
					   unsigned long address,
					   pte_t *ptep)
{
	pte_t pte;

	mm->context.flush_mm = 1;
	if (mm_has_pgste(mm))
		pgste_get_lock(ptep);

	pte = *ptep;
	if (!mm_exclusive(mm))
		__ptep_ipte(address, ptep);
	return pte;
}

static inline void ptep_modify_prot_commit(struct mm_struct *mm,
					   unsigned long address,
					   pte_t *ptep, pte_t pte)
{
	*ptep = pte;
	if (mm_has_pgste(mm))
		pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
}
M
Martin Schwidefsky 已提交
1003 1004

#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
M
Martin Schwidefsky 已提交
1005 1006 1007
static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
				     unsigned long address, pte_t *ptep)
{
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
	pgste_t pgste;
	pte_t pte;

	if (mm_has_pgste(vma->vm_mm))
		pgste = pgste_get_lock(ptep);

	pte = *ptep;
	__ptep_ipte(address, ptep);
	pte_val(*ptep) = _PAGE_TYPE_EMPTY;

	if (mm_has_pgste(vma->vm_mm)) {
		pgste = pgste_update_all(&pte, pgste);
		pgste_set_unlock(ptep, pgste);
	}
L
Linus Torvalds 已提交
1022 1023 1024
	return pte;
}

M
Martin Schwidefsky 已提交
1025 1026 1027 1028 1029 1030 1031 1032 1033
/*
 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
 * cannot be accessed while the batched unmap is running. In this case
 * full==1 and a simple pte_clear is enough. See tlb.h.
 */
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1034
					    unsigned long address,
M
Martin Schwidefsky 已提交
1035
					    pte_t *ptep, int full)
L
Linus Torvalds 已提交
1036
{
1037 1038 1039 1040 1041
	pgste_t pgste;
	pte_t pte;

	if (mm_has_pgste(mm))
		pgste = pgste_get_lock(ptep);
M
Martin Schwidefsky 已提交
1042

1043 1044 1045 1046 1047 1048 1049 1050 1051
	pte = *ptep;
	if (!full)
		__ptep_ipte(address, ptep);
	pte_val(*ptep) = _PAGE_TYPE_EMPTY;

	if (mm_has_pgste(mm)) {
		pgste = pgste_update_all(&pte, pgste);
		pgste_set_unlock(ptep, pgste);
	}
M
Martin Schwidefsky 已提交
1052
	return pte;
L
Linus Torvalds 已提交
1053 1054
}

M
Martin Schwidefsky 已提交
1055
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
				       unsigned long address, pte_t *ptep)
{
	pgste_t pgste;
	pte_t pte = *ptep;

	if (pte_write(pte)) {
		mm->context.flush_mm = 1;
		if (mm_has_pgste(mm))
			pgste = pgste_get_lock(ptep);

		if (!mm_exclusive(mm))
			__ptep_ipte(address, ptep);
		*ptep = pte_wrprotect(pte);

		if (mm_has_pgste(mm))
			pgste_set_unlock(ptep, pgste);
	}
	return pte;
}
M
Martin Schwidefsky 已提交
1076 1077

#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
static inline int ptep_set_access_flags(struct vm_area_struct *vma,
					unsigned long address, pte_t *ptep,
					pte_t entry, int dirty)
{
	pgste_t pgste;

	if (pte_same(*ptep, entry))
		return 0;
	if (mm_has_pgste(vma->vm_mm))
		pgste = pgste_get_lock(ptep);

	__ptep_ipte(address, ptep);
	*ptep = entry;

	if (mm_has_pgste(vma->vm_mm))
		pgste_set_unlock(ptep, pgste);
	return 1;
}
L
Linus Torvalds 已提交
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107

/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 */
static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
	pte_t __pte;
	pte_val(__pte) = physpage + pgprot_val(pgprot);
	return __pte;
}

1108 1109
static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
{
1110
	unsigned long physpage = page_to_phys(page);
L
Linus Torvalds 已提交
1111

1112 1113 1114
	return mk_pte_phys(physpage, pgprot);
}

M
Martin Schwidefsky 已提交
1115 1116 1117 1118
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
L
Linus Torvalds 已提交
1119

M
Martin Schwidefsky 已提交
1120 1121
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
L
Linus Torvalds 已提交
1122

1123
#ifndef CONFIG_64BIT
L
Linus Torvalds 已提交
1124

M
Martin Schwidefsky 已提交
1125 1126 1127
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pmd) ({ BUG(); 0UL; })
#define pgd_deref(pmd) ({ BUG(); 0UL; })
1128

M
Martin Schwidefsky 已提交
1129 1130
#define pud_offset(pgd, address) ((pud_t *) pgd)
#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
L
Linus Torvalds 已提交
1131

1132
#else /* CONFIG_64BIT */
L
Linus Torvalds 已提交
1133

M
Martin Schwidefsky 已提交
1134 1135
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1136
#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
L
Linus Torvalds 已提交
1137

1138 1139
static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
{
M
Martin Schwidefsky 已提交
1140 1141 1142
	pud_t *pud = (pud_t *) pgd;
	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
		pud = (pud_t *) pgd_deref(*pgd);
1143 1144
	return pud  + pud_index(address);
}
L
Linus Torvalds 已提交
1145

M
Martin Schwidefsky 已提交
1146
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
L
Linus Torvalds 已提交
1147
{
M
Martin Schwidefsky 已提交
1148 1149 1150
	pmd_t *pmd = (pmd_t *) pud;
	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
		pmd = (pmd_t *) pud_deref(*pud);
M
Martin Schwidefsky 已提交
1151
	return pmd + pmd_index(address);
L
Linus Torvalds 已提交
1152 1153
}

1154
#endif /* CONFIG_64BIT */
L
Linus Torvalds 已提交
1155

M
Martin Schwidefsky 已提交
1156 1157 1158
#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
L
Linus Torvalds 已提交
1159

M
Martin Schwidefsky 已提交
1160
#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
L
Linus Torvalds 已提交
1161

M
Martin Schwidefsky 已提交
1162 1163 1164
/* Find an entry in the lowest level page table.. */
#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
L
Linus Torvalds 已提交
1165 1166 1167
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)

1168 1169 1170 1171 1172 1173 1174
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_trans_splitting(pmd_t pmd)
{
	return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

L
Linus Torvalds 已提交
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
/*
 * 31 bit swap entry format:
 * A page-table entry has some bits we have to treat in a special way.
 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
 * exception will occur instead of a page translation exception. The
 * specifiation exception has the bad habit not to store necessary
 * information in the lowcore.
 * Bit 21 and bit 22 are the page invalid bit and the page protection
 * bit. We set both to indicate a swapped page.
 * Bit 30 and 31 are used to distinguish the different page types. For
 * a swapped page these bits need to be zero.
 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
 * plus 24 for the offset.
 * 0|     offset        |0110|o|type |00|
 * 0 0000000001111111111 2222 2 22222 33
 * 0 1234567890123456789 0123 4 56789 01
 *
 * 64 bit swap entry format:
 * A page-table entry has some bits we have to treat in a special way.
 * Bits 52 and bit 55 have to be zero, otherwise an specification
 * exception will occur instead of a page translation exception. The
 * specifiation exception has the bad habit not to store necessary
 * information in the lowcore.
 * Bit 53 and bit 54 are the page invalid bit and the page protection
 * bit. We set both to indicate a swapped page.
 * Bit 62 and 63 are used to distinguish the different page types. For
 * a swapped page these bits need to be zero.
 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
 * plus 56 for the offset.
 * |                      offset                        |0110|o|type |00|
 *  0000000000111111111122222222223333333333444444444455 5555 5 55566 66
 *  0123456789012345678901234567890123456789012345678901 2345 6 78901 23
 */
1210
#ifndef CONFIG_64BIT
L
Linus Torvalds 已提交
1211 1212 1213 1214
#define __SWP_OFFSET_MASK (~0UL >> 12)
#else
#define __SWP_OFFSET_MASK (~0UL >> 11)
#endif
1215
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
L
Linus Torvalds 已提交
1216 1217 1218
{
	pte_t pte;
	offset &= __SWP_OFFSET_MASK;
1219
	pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
L
Linus Torvalds 已提交
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
		((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
	return pte;
}

#define __swp_type(entry)	(((entry).val >> 2) & 0x1f)
#define __swp_offset(entry)	(((entry).val >> 11) | (((entry).val >> 7) & 1))
#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })

#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x)	((pte_t) { (x).val })

1231
#ifndef CONFIG_64BIT
L
Linus Torvalds 已提交
1232
# define PTE_FILE_MAX_BITS	26
1233
#else /* CONFIG_64BIT */
L
Linus Torvalds 已提交
1234
# define PTE_FILE_MAX_BITS	59
1235
#endif /* CONFIG_64BIT */
L
Linus Torvalds 已提交
1236 1237 1238 1239 1240 1241

#define pte_to_pgoff(__pte) \
	((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))

#define pgoff_to_pte(__off) \
	((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1242
		   | _PAGE_TYPE_FILE })
L
Linus Torvalds 已提交
1243 1244 1245 1246 1247

#endif /* !__ASSEMBLY__ */

#define kern_addr_valid(addr)   (1)

1248 1249
extern int vmem_add_mapping(unsigned long start, unsigned long size);
extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1250
extern int s390_enable_sie(void);
H
Heiko Carstens 已提交
1251

L
Linus Torvalds 已提交
1252 1253 1254 1255 1256 1257 1258 1259
/*
 * No page table caches to initialise
 */
#define pgtable_cache_init()	do { } while (0)

#include <asm-generic/pgtable.h>

#endif /* _S390_PAGE_H */