pgtable.c 25.7 KB
Newer Older
1
/*
2
 *    Copyright IBM Corp. 2007, 2011
3 4 5 6 7 8
 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
 */

#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
9
#include <linux/gfp.h>
10 11 12 13
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
14
#include <linux/rcupdate.h>
15
#include <linux/slab.h>
16
#include <linux/swapops.h>
17
#include <linux/sysctl.h>
18 19
#include <linux/ksm.h>
#include <linux/mman.h>
20 21 22 23 24

#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
M
Martin Schwidefsky 已提交
25
#include <asm/mmu_context.h>
26
#include <asm/page-states.h>
27

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
				   pte_t *ptep)
{
	unsigned long opt, asce;

	if (MACHINE_HAS_TLB_GUEST) {
		opt = 0;
		asce = READ_ONCE(mm->context.gmap_asce);
		if (asce == 0UL)
			opt |= IPTE_NODAT;
		__ptep_ipte(addr, ptep, opt, IPTE_LOCAL);
	} else {
		__ptep_ipte(addr, ptep, 0, IPTE_LOCAL);
	}
}

static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr,
				    pte_t *ptep)
{
	unsigned long opt, asce;

	if (MACHINE_HAS_TLB_GUEST) {
		opt = 0;
		asce = READ_ONCE(mm->context.gmap_asce);
		if (asce == 0UL)
			opt |= IPTE_NODAT;
		__ptep_ipte(addr, ptep, opt, IPTE_GLOBAL);
	} else {
		__ptep_ipte(addr, ptep, 0, IPTE_GLOBAL);
	}
}

60 61 62 63 64 65 66 67
static inline pte_t ptep_flush_direct(struct mm_struct *mm,
				      unsigned long addr, pte_t *ptep)
{
	pte_t old;

	old = *ptep;
	if (unlikely(pte_val(old) & _PAGE_INVALID))
		return old;
68 69
	atomic_inc(&mm->context.flush_count);
	if (MACHINE_HAS_TLB_LC &&
70
	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
71
		ptep_ipte_local(mm, addr, ptep);
72
	else
73
		ptep_ipte_global(mm, addr, ptep);
74
	atomic_dec(&mm->context.flush_count);
75 76 77 78 79 80 81 82 83 84 85
	return old;
}

static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
				    unsigned long addr, pte_t *ptep)
{
	pte_t old;

	old = *ptep;
	if (unlikely(pte_val(old) & _PAGE_INVALID))
		return old;
86 87 88
	atomic_inc(&mm->context.flush_count);
	if (cpumask_equal(&mm->context.cpu_attach_mask,
			  cpumask_of(smp_processor_id()))) {
89 90 91
		pte_val(*ptep) |= _PAGE_INVALID;
		mm->context.flush_mm = 1;
	} else
92
		ptep_ipte_global(mm, addr, ptep);
93
	atomic_dec(&mm->context.flush_count);
94 95 96
	return old;
}

97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
static inline pgste_t pgste_get_lock(pte_t *ptep)
{
	unsigned long new = 0;
#ifdef CONFIG_PGSTE
	unsigned long old;

	asm(
		"	lg	%0,%2\n"
		"0:	lgr	%1,%0\n"
		"	nihh	%0,0xff7f\n"	/* clear PCL bit in old */
		"	oihh	%1,0x0080\n"	/* set PCL bit in new */
		"	csg	%0,%1,%2\n"
		"	jl	0b\n"
		: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
		: "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
#endif
	return __pgste(new);
}

static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
	asm(
		"	nihh	%1,0xff7f\n"	/* clear PCL bit */
		"	stg	%1,%0\n"
		: "=Q" (ptep[PTRS_PER_PTE])
		: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
		: "cc", "memory");
#endif
}

static inline pgste_t pgste_get(pte_t *ptep)
{
	unsigned long pgste = 0;
#ifdef CONFIG_PGSTE
	pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
#endif
	return __pgste(pgste);
}

static inline void pgste_set(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
	*(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
#endif
}

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
				       struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
	unsigned long address, bits, skey;

	if (!mm_use_skey(mm) || pte_val(pte) & _PAGE_INVALID)
		return pgste;
	address = pte_val(pte) & PAGE_MASK;
	skey = (unsigned long) page_get_storage_key(address);
	bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
	/* Transfer page changed & referenced bit to guest bits in pgste */
	pgste_val(pgste) |= bits << 48;		/* GR bit & GC bit */
	/* Copy page access key and fetch protection bit to pgste */
	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
	pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
#endif
	return pgste;

}

static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
				 struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
	unsigned long address;
	unsigned long nkey;

	if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
		return;
	VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
	address = pte_val(entry) & PAGE_MASK;
	/*
	 * Set page access key and fetch protection bit from pgste.
	 * The guest C/R information is still in the PGSTE, set real
	 * key C/R to 0.
	 */
	nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
	nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
	page_set_storage_key(address, nkey, 0);
#endif
}

static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
{
#ifdef CONFIG_PGSTE
	if ((pte_val(entry) & _PAGE_PRESENT) &&
	    (pte_val(entry) & _PAGE_WRITE) &&
	    !(pte_val(entry) & _PAGE_INVALID)) {
		if (!MACHINE_HAS_ESOP) {
			/*
			 * Without enhanced suppression-on-protection force
			 * the dirty bit on for all writable ptes.
			 */
			pte_val(entry) |= _PAGE_DIRTY;
			pte_val(entry) &= ~_PAGE_PROTECT;
		}
		if (!(pte_val(entry) & _PAGE_PROTECT))
			/* This pte allows write access, set user-dirty */
			pgste_val(pgste) |= PGSTE_UC_BIT;
	}
#endif
	*ptep = entry;
	return pgste;
}

210 211 212
static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
				       unsigned long addr,
				       pte_t *ptep, pgste_t pgste)
213 214
{
#ifdef CONFIG_PGSTE
215 216 217 218 219 220
	unsigned long bits;

	bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
	if (bits) {
		pgste_val(pgste) ^= bits;
		ptep_notify(mm, addr, ptep, bits);
221 222 223 224 225 226 227 228 229 230 231 232
	}
#endif
	return pgste;
}

static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
				      unsigned long addr, pte_t *ptep)
{
	pgste_t pgste = __pgste(0);

	if (mm_has_pgste(mm)) {
		pgste = pgste_get_lock(ptep);
233
		pgste = pgste_pte_notify(mm, addr, ptep, pgste);
234 235 236 237
	}
	return pgste;
}

238
static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
				    unsigned long addr, pte_t *ptep,
				    pgste_t pgste, pte_t old, pte_t new)
{
	if (mm_has_pgste(mm)) {
		if (pte_val(old) & _PAGE_INVALID)
			pgste_set_key(ptep, pgste, new, mm);
		if (pte_val(new) & _PAGE_INVALID) {
			pgste = pgste_update_all(old, pgste, mm);
			if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
			    _PGSTE_GPS_USAGE_UNUSED)
				pte_val(old) |= _PAGE_UNUSED;
		}
		pgste = pgste_set_pte(ptep, pgste, new);
		pgste_set_unlock(ptep, pgste);
	} else {
		*ptep = new;
	}
256
	return old;
257 258 259 260 261 262 263 264
}

pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
		       pte_t *ptep, pte_t new)
{
	pgste_t pgste;
	pte_t old;

265
	preempt_disable();
266 267
	pgste = ptep_xchg_start(mm, addr, ptep);
	old = ptep_flush_direct(mm, addr, ptep);
268
	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
269
	preempt_enable();
270 271 272 273 274 275 276 277 278 279
	return old;
}
EXPORT_SYMBOL(ptep_xchg_direct);

pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, pte_t new)
{
	pgste_t pgste;
	pte_t old;

280
	preempt_disable();
281 282
	pgste = ptep_xchg_start(mm, addr, ptep);
	old = ptep_flush_lazy(mm, addr, ptep);
283
	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
284
	preempt_enable();
285 286 287 288 289 290 291 292 293 294
	return old;
}
EXPORT_SYMBOL(ptep_xchg_lazy);

pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
			     pte_t *ptep)
{
	pgste_t pgste;
	pte_t old;

295
	preempt_disable();
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
	pgste = ptep_xchg_start(mm, addr, ptep);
	old = ptep_flush_lazy(mm, addr, ptep);
	if (mm_has_pgste(mm)) {
		pgste = pgste_update_all(old, pgste, mm);
		pgste_set(ptep, pgste);
	}
	return old;
}
EXPORT_SYMBOL(ptep_modify_prot_start);

void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
			     pte_t *ptep, pte_t pte)
{
	pgste_t pgste;

311 312
	if (!MACHINE_HAS_NX)
		pte_val(pte) &= ~_PAGE_NOEXEC;
313 314 315 316 317 318 319 320
	if (mm_has_pgste(mm)) {
		pgste = pgste_get(ptep);
		pgste_set_key(ptep, pgste, pte, mm);
		pgste = pgste_set_pte(ptep, pgste, pte);
		pgste_set_unlock(ptep, pgste);
	} else {
		*ptep = pte;
	}
321
	preempt_enable();
322 323 324
}
EXPORT_SYMBOL(ptep_modify_prot_commit);

325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
static inline void pmdp_idte_local(struct mm_struct *mm,
				   unsigned long addr, pmd_t *pmdp)
{
	if (MACHINE_HAS_TLB_GUEST)
		__pmdp_idte(addr, pmdp, IDTE_NODAT, IDTE_LOCAL);
	else
		__pmdp_idte(addr, pmdp, 0, IDTE_LOCAL);
}

static inline void pmdp_idte_global(struct mm_struct *mm,
				    unsigned long addr, pmd_t *pmdp)
{
	if (MACHINE_HAS_TLB_GUEST)
		__pmdp_idte(addr, pmdp, IDTE_NODAT, IDTE_GLOBAL);
	else if (MACHINE_HAS_IDTE)
		__pmdp_idte(addr, pmdp, 0, IDTE_GLOBAL);
	else
		__pmdp_csp(pmdp);
}

345 346 347 348 349 350 351 352
static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
				      unsigned long addr, pmd_t *pmdp)
{
	pmd_t old;

	old = *pmdp;
	if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
		return old;
353 354
	atomic_inc(&mm->context.flush_count);
	if (MACHINE_HAS_TLB_LC &&
355
	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
356
		pmdp_idte_local(mm, addr, pmdp);
357
	else
358
		pmdp_idte_global(mm, addr, pmdp);
359
	atomic_dec(&mm->context.flush_count);
360 361 362 363 364 365 366 367 368 369 370
	return old;
}

static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
				    unsigned long addr, pmd_t *pmdp)
{
	pmd_t old;

	old = *pmdp;
	if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
		return old;
371 372 373
	atomic_inc(&mm->context.flush_count);
	if (cpumask_equal(&mm->context.cpu_attach_mask,
			  cpumask_of(smp_processor_id()))) {
374 375
		pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
		mm->context.flush_mm = 1;
376 377 378
	} else {
		pmdp_idte_global(mm, addr, pmdp);
	}
379
	atomic_dec(&mm->context.flush_count);
380 381 382 383 384 385 386 387
	return old;
}

pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
		       pmd_t *pmdp, pmd_t new)
{
	pmd_t old;

388
	preempt_disable();
389 390
	old = pmdp_flush_direct(mm, addr, pmdp);
	*pmdp = new;
391
	preempt_enable();
392 393 394 395 396 397 398 399 400
	return old;
}
EXPORT_SYMBOL(pmdp_xchg_direct);

pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
		     pmd_t *pmdp, pmd_t new)
{
	pmd_t old;

401
	preempt_disable();
402 403
	old = pmdp_flush_lazy(mm, addr, pmdp);
	*pmdp = new;
404
	preempt_enable();
405 406 407 408
	return old;
}
EXPORT_SYMBOL(pmdp_xchg_lazy);

409 410
static inline void pudp_idte_local(struct mm_struct *mm,
				   unsigned long addr, pud_t *pudp)
411
{
412 413 414 415 416
	if (MACHINE_HAS_TLB_GUEST)
		__pudp_idte(addr, pudp, IDTE_NODAT, IDTE_LOCAL);
	else
		__pudp_idte(addr, pudp, 0, IDTE_LOCAL);
}
417

418 419 420 421 422 423 424 425
static inline void pudp_idte_global(struct mm_struct *mm,
				    unsigned long addr, pud_t *pudp)
{
	if (MACHINE_HAS_TLB_GUEST)
		__pudp_idte(addr, pudp, IDTE_NODAT, IDTE_GLOBAL);
	else if (MACHINE_HAS_IDTE)
		__pudp_idte(addr, pudp, 0, IDTE_GLOBAL);
	else
426 427 428 429 430
		/*
		 * Invalid bit position is the same for pmd and pud, so we can
		 * re-use _pmd_csp() here
		 */
		__pmdp_csp((pmd_t *) pudp);
431 432 433 434 435 436 437 438 439
}

static inline pud_t pudp_flush_direct(struct mm_struct *mm,
				      unsigned long addr, pud_t *pudp)
{
	pud_t old;

	old = *pudp;
	if (pud_val(old) & _REGION_ENTRY_INVALID)
440 441 442 443
		return old;
	atomic_inc(&mm->context.flush_count);
	if (MACHINE_HAS_TLB_LC &&
	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
444
		pudp_idte_local(mm, addr, pudp);
445
	else
446
		pudp_idte_global(mm, addr, pudp);
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
	atomic_dec(&mm->context.flush_count);
	return old;
}

pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
		       pud_t *pudp, pud_t new)
{
	pud_t old;

	preempt_disable();
	old = pudp_flush_direct(mm, addr, pudp);
	*pudp = new;
	preempt_enable();
	return old;
}
EXPORT_SYMBOL(pudp_xchg_direct);

464
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
465 466
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
				pgtable_t pgtable)
467 468 469
{
	struct list_head *lh = (struct list_head *) pgtable;

470
	assert_spin_locked(pmd_lockptr(mm, pmdp));
471 472

	/* FIFO */
473
	if (!pmd_huge_pte(mm, pmdp))
474 475
		INIT_LIST_HEAD(lh);
	else
476 477
		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
	pmd_huge_pte(mm, pmdp) = pgtable;
478 479
}

480
pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
481 482 483 484 485
{
	struct list_head *lh;
	pgtable_t pgtable;
	pte_t *ptep;

486
	assert_spin_locked(pmd_lockptr(mm, pmdp));
487 488

	/* FIFO */
489
	pgtable = pmd_huge_pte(mm, pmdp);
490 491
	lh = (struct list_head *) pgtable;
	if (list_empty(lh))
492
		pmd_huge_pte(mm, pmdp) = NULL;
493
	else {
494
		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
495 496 497
		list_del(lh);
	}
	ptep = (pte_t *) pgtable;
498
	pte_val(*ptep) = _PAGE_INVALID;
499
	ptep++;
500
	pte_val(*ptep) = _PAGE_INVALID;
501 502
	return pgtable;
}
503
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
504 505 506 507 508 509 510 511

#ifdef CONFIG_PGSTE
void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, pte_t entry)
{
	pgste_t pgste;

	/* the mm_has_pgste() check is done in set_pte_at() */
512
	preempt_disable();
513 514 515 516 517
	pgste = pgste_get_lock(ptep);
	pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
	pgste_set_key(ptep, pgste, entry, mm);
	pgste = pgste_set_pte(ptep, pgste, entry);
	pgste_set_unlock(ptep, pgste);
518
	preempt_enable();
519 520 521 522 523 524
}

void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
	pgste_t pgste;

525
	preempt_disable();
526 527 528
	pgste = pgste_get_lock(ptep);
	pgste_val(pgste) |= PGSTE_IN_BIT;
	pgste_set_unlock(ptep, pgste);
529
	preempt_enable();
530 531
}

532 533 534 535 536 537
/**
 * ptep_force_prot - change access rights of a locked pte
 * @mm: pointer to the process mm_struct
 * @addr: virtual address in the guest address space
 * @ptep: pointer to the page table entry
 * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
538
 * @bit: pgste bit to set (e.g. for notification)
539 540 541 542 543
 *
 * Returns 0 if the access rights were changed and -EAGAIN if the current
 * and requested access rights are incompatible.
 */
int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
544
		    pte_t *ptep, int prot, unsigned long bit)
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
{
	pte_t entry;
	pgste_t pgste;
	int pte_i, pte_p;

	pgste = pgste_get_lock(ptep);
	entry = *ptep;
	/* Check pte entry after all locks have been acquired */
	pte_i = pte_val(entry) & _PAGE_INVALID;
	pte_p = pte_val(entry) & _PAGE_PROTECT;
	if ((pte_i && (prot != PROT_NONE)) ||
	    (pte_p && (prot & PROT_WRITE))) {
		pgste_set_unlock(ptep, pgste);
		return -EAGAIN;
	}
560
	/* Change access rights and set pgste bit */
561 562 563 564 565 566 567 568 569 570
	if (prot == PROT_NONE && !pte_i) {
		ptep_flush_direct(mm, addr, ptep);
		pgste = pgste_update_all(entry, pgste, mm);
		pte_val(entry) |= _PAGE_INVALID;
	}
	if (prot == PROT_READ && !pte_p) {
		ptep_flush_direct(mm, addr, ptep);
		pte_val(entry) &= ~_PAGE_INVALID;
		pte_val(entry) |= _PAGE_PROTECT;
	}
571
	pgste_val(pgste) |= bit;
572 573 574 575 576
	pgste = pgste_set_pte(ptep, pgste, entry);
	pgste_set_unlock(ptep, pgste);
	return 0;
}

577
int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
578
		    pte_t *sptep, pte_t *tptep, pte_t pte)
579 580 581 582 583
{
	pgste_t spgste, tpgste;
	pte_t spte, tpte;
	int rc = -EAGAIN;

584 585
	if (!(pte_val(*tptep) & _PAGE_INVALID))
		return 0;	/* already shadowed */
586 587 588
	spgste = pgste_get_lock(sptep);
	spte = *sptep;
	if (!(pte_val(spte) & _PAGE_INVALID) &&
589 590
	    !((pte_val(spte) & _PAGE_PROTECT) &&
	      !(pte_val(pte) & _PAGE_PROTECT))) {
591 592 593
		pgste_val(spgste) |= PGSTE_VSIE_BIT;
		tpgste = pgste_get_lock(tptep);
		pte_val(tpte) = (pte_val(spte) & PAGE_MASK) |
594
				(pte_val(pte) & _PAGE_PROTECT);
595 596 597
		/* don't touch the storage key - it belongs to parent pgste */
		tpgste = pgste_set_pte(tptep, tpgste, tpte);
		pgste_set_unlock(tptep, tpgste);
598
		rc = 1;
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
	}
	pgste_set_unlock(sptep, spgste);
	return rc;
}

void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
{
	pgste_t pgste;

	pgste = pgste_get_lock(ptep);
	/* notifier is called by the caller */
	ptep_flush_direct(mm, saddr, ptep);
	/* don't touch the storage key - it belongs to parent pgste */
	pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
	pgste_set_unlock(ptep, pgste);
}

616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
{
	if (!non_swap_entry(entry))
		dec_mm_counter(mm, MM_SWAPENTS);
	else if (is_migration_entry(entry)) {
		struct page *page = migration_entry_to_page(entry);

		dec_mm_counter(mm, mm_counter(page));
	}
	free_swap_and_cache(entry);
}

void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, int reset)
{
	unsigned long pgstev;
	pgste_t pgste;
	pte_t pte;

	/* Zap unused and logically-zero pages */
636
	preempt_disable();
637 638 639
	pgste = pgste_get_lock(ptep);
	pgstev = pgste_val(pgste);
	pte = *ptep;
640
	if (!reset && pte_swap(pte) &&
641 642 643 644 645 646 647 648
	    ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
	     (pgstev & _PGSTE_GPS_ZERO))) {
		ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
		pte_clear(mm, addr, ptep);
	}
	if (reset)
		pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
	pgste_set_unlock(ptep, pgste);
649
	preempt_enable();
650 651 652 653 654 655 656 657
}

void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
	unsigned long ptev;
	pgste_t pgste;

	/* Clear storage key */
658
	preempt_disable();
659 660 661 662 663 664 665
	pgste = pgste_get_lock(ptep);
	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
			      PGSTE_GR_BIT | PGSTE_GC_BIT);
	ptev = pte_val(*ptep);
	if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
		page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
	pgste_set_unlock(ptep, pgste);
666
	preempt_enable();
667 668 669 670 671 672 673 674
}

/*
 * Test and reset if a guest page is dirty
 */
bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
{
	spinlock_t *ptl;
675
	pgd_t *pgd;
676
	p4d_t *p4d;
677 678
	pud_t *pud;
	pmd_t *pmd;
679 680 681 682 683
	pgste_t pgste;
	pte_t *ptep;
	pte_t pte;
	bool dirty;

684
	pgd = pgd_offset(mm, addr);
685 686 687 688
	p4d = p4d_alloc(mm, pgd, addr);
	if (!p4d)
		return false;
	pud = pud_alloc(mm, p4d, addr);
689 690 691 692 693 694 695 696 697 698 699 700 701
	if (!pud)
		return false;
	pmd = pmd_alloc(mm, pud, addr);
	if (!pmd)
		return false;
	/* We can't run guests backed by huge pages, but userspace can
	 * still set them up and then try to migrate them without any
	 * migration support.
	 */
	if (pmd_large(*pmd))
		return true;

	ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
702 703 704 705 706 707 708 709
	if (unlikely(!ptep))
		return false;

	pgste = pgste_get_lock(ptep);
	dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
	pgste_val(pgste) &= ~PGSTE_UC_BIT;
	pte = *ptep;
	if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
710
		pgste = pgste_pte_notify(mm, addr, ptep, pgste);
711
		ptep_ipte_global(mm, addr, ptep);
712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
		if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
			pte_val(pte) |= _PAGE_PROTECT;
		else
			pte_val(pte) |= _PAGE_INVALID;
		*ptep = pte;
	}
	pgste_set_unlock(ptep, pgste);

	spin_unlock(ptl);
	return dirty;
}
EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty);

int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
			  unsigned char key, bool nq)
{
	unsigned long keyul;
	spinlock_t *ptl;
	pgste_t old, new;
	pte_t *ptep;

	ptep = get_locked_pte(mm, addr, &ptl);
734
	if (unlikely(!ptep))
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
		return -EFAULT;

	new = old = pgste_get_lock(ptep);
	pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
			    PGSTE_ACC_BITS | PGSTE_FP_BIT);
	keyul = (unsigned long) key;
	pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
	pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
		unsigned long address, bits, skey;

		address = pte_val(*ptep) & PAGE_MASK;
		skey = (unsigned long) page_get_storage_key(address);
		bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
		skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
		/* Set storage key ACC and FP */
		page_set_storage_key(address, skey, !nq);
		/* Merge host changed & referenced into pgste  */
		pgste_val(new) |= bits << 52;
	}
	/* changing the guest storage key is considered a change of the page */
	if ((pgste_val(new) ^ pgste_val(old)) &
	    (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
		pgste_val(new) |= PGSTE_UC_BIT;

	pgste_set_unlock(ptep, new);
	pte_unmap_unlock(ptep, ptl);
	return 0;
}
EXPORT_SYMBOL(set_guest_storage_key);

766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
/**
 * Conditionally set a guest storage key (handling csske).
 * oldkey will be updated when either mr or mc is set and a pointer is given.
 *
 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
 * storage key was updated and -EFAULT on access errors.
 */
int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
			       unsigned char key, unsigned char *oldkey,
			       bool nq, bool mr, bool mc)
{
	unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
	int rc;

	/* we can drop the pgste lock between getting and setting the key */
	if (mr | mc) {
		rc = get_guest_storage_key(current->mm, addr, &tmp);
		if (rc)
			return rc;
		if (oldkey)
			*oldkey = tmp;
		if (!mr)
			mask |= _PAGE_REFERENCED;
		if (!mc)
			mask |= _PAGE_CHANGED;
		if (!((tmp ^ key) & mask))
			return 0;
	}
	rc = set_guest_storage_key(current->mm, addr, key, nq);
	return rc < 0 ? rc : 1;
}
EXPORT_SYMBOL(cond_set_guest_storage_key);

799 800 801 802 803 804
/**
 * Reset a guest reference bit (rrbe), returning the reference and changed bit.
 *
 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
 */
int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
805 806
{
	spinlock_t *ptl;
807
	pgste_t old, new;
808
	pte_t *ptep;
809
	int cc = 0;
810 811

	ptep = get_locked_pte(mm, addr, &ptl);
812
	if (unlikely(!ptep))
813 814
		return -EFAULT;

815 816 817
	new = old = pgste_get_lock(ptep);
	/* Reset guest reference bit only */
	pgste_val(new) &= ~PGSTE_GR_BIT;
818

819 820 821 822
	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
		cc = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
		/* Merge real referenced bit into host-set */
		pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
823
	}
824 825 826 827 828 829 830 831
	/* Reflect guest's logical view, not physical */
	cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
	/* Changing the guest storage key is considered a change of the page */
	if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
		pgste_val(new) |= PGSTE_UC_BIT;

	pgste_set_unlock(ptep, new);
	pte_unmap_unlock(ptep, ptl);
832
	return cc;
833 834 835
}
EXPORT_SYMBOL(reset_guest_reference_bit);

836 837
int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
			  unsigned char *key)
838 839 840 841 842 843
{
	spinlock_t *ptl;
	pgste_t pgste;
	pte_t *ptep;

	ptep = get_locked_pte(mm, addr, &ptl);
844
	if (unlikely(!ptep))
845 846
		return -EFAULT;

847
	pgste = pgste_get_lock(ptep);
848
	*key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
849
	if (!(pte_val(*ptep) & _PAGE_INVALID))
850
		*key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK);
851
	/* Reflect guest's logical view, not physical */
852
	*key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
853 854
	pgste_set_unlock(ptep, pgste);
	pte_unmap_unlock(ptep, ptl);
855
	return 0;
856 857
}
EXPORT_SYMBOL(get_guest_storage_key);
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009

/**
 * pgste_perform_essa - perform ESSA actions on the PGSTE.
 * @mm: the memory context. It must have PGSTEs, no check is performed here!
 * @hva: the host virtual address of the page whose PGSTE is to be processed
 * @orc: the specific action to perform, see the ESSA_SET_* macros.
 * @oldpte: the PTE will be saved there if the pointer is not NULL.
 * @oldpgste: the old PGSTE will be saved there if the pointer is not NULL.
 *
 * Return: 1 if the page is to be added to the CBRL, otherwise 0,
 *	   or < 0 in case of error. -EINVAL is returned for invalid values
 *	   of orc, -EFAULT for invalid addresses.
 */
int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
			unsigned long *oldpte, unsigned long *oldpgste)
{
	unsigned long pgstev;
	spinlock_t *ptl;
	pgste_t pgste;
	pte_t *ptep;
	int res = 0;

	WARN_ON_ONCE(orc > ESSA_MAX);
	if (unlikely(orc > ESSA_MAX))
		return -EINVAL;
	ptep = get_locked_pte(mm, hva, &ptl);
	if (unlikely(!ptep))
		return -EFAULT;
	pgste = pgste_get_lock(ptep);
	pgstev = pgste_val(pgste);
	if (oldpte)
		*oldpte = pte_val(*ptep);
	if (oldpgste)
		*oldpgste = pgstev;

	switch (orc) {
	case ESSA_GET_STATE:
		break;
	case ESSA_SET_STABLE:
		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
		pgstev |= _PGSTE_GPS_USAGE_STABLE;
		break;
	case ESSA_SET_UNUSED:
		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
		pgstev |= _PGSTE_GPS_USAGE_UNUSED;
		if (pte_val(*ptep) & _PAGE_INVALID)
			res = 1;
		break;
	case ESSA_SET_VOLATILE:
		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
		pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
		if (pte_val(*ptep) & _PAGE_INVALID)
			res = 1;
		break;
	case ESSA_SET_POT_VOLATILE:
		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
		if (!(pte_val(*ptep) & _PAGE_INVALID)) {
			pgstev |= _PGSTE_GPS_USAGE_POT_VOLATILE;
			break;
		}
		if (pgstev & _PGSTE_GPS_ZERO) {
			pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
			break;
		}
		if (!(pgstev & PGSTE_GC_BIT)) {
			pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
			res = 1;
			break;
		}
		break;
	case ESSA_SET_STABLE_RESIDENT:
		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
		pgstev |= _PGSTE_GPS_USAGE_STABLE;
		/*
		 * Since the resident state can go away any time after this
		 * call, we will not make this page resident. We can revisit
		 * this decision if a guest will ever start using this.
		 */
		break;
	case ESSA_SET_STABLE_IF_RESIDENT:
		if (!(pte_val(*ptep) & _PAGE_INVALID)) {
			pgstev &= ~_PGSTE_GPS_USAGE_MASK;
			pgstev |= _PGSTE_GPS_USAGE_STABLE;
		}
		break;
	default:
		/* we should never get here! */
		break;
	}
	/* If we are discarding a page, set it to logical zero */
	if (res)
		pgstev |= _PGSTE_GPS_ZERO;

	pgste_val(pgste) = pgstev;
	pgste_set_unlock(ptep, pgste);
	pte_unmap_unlock(ptep, ptl);
	return res;
}
EXPORT_SYMBOL(pgste_perform_essa);

/**
 * set_pgste_bits - set specific PGSTE bits.
 * @mm: the memory context. It must have PGSTEs, no check is performed here!
 * @hva: the host virtual address of the page whose PGSTE is to be processed
 * @bits: a bitmask representing the bits that will be touched
 * @value: the values of the bits to be written. Only the bits in the mask
 *	   will be written.
 *
 * Return: 0 on success, < 0 in case of error.
 */
int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
			unsigned long bits, unsigned long value)
{
	spinlock_t *ptl;
	pgste_t new;
	pte_t *ptep;

	ptep = get_locked_pte(mm, hva, &ptl);
	if (unlikely(!ptep))
		return -EFAULT;
	new = pgste_get_lock(ptep);

	pgste_val(new) &= ~bits;
	pgste_val(new) |= value & bits;

	pgste_set_unlock(ptep, new);
	pte_unmap_unlock(ptep, ptl);
	return 0;
}
EXPORT_SYMBOL(set_pgste_bits);

/**
 * get_pgste - get the current PGSTE for the given address.
 * @mm: the memory context. It must have PGSTEs, no check is performed here!
 * @hva: the host virtual address of the page whose PGSTE is to be processed
 * @pgstep: will be written with the current PGSTE for the given address.
 *
 * Return: 0 on success, < 0 in case of error.
 */
int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
{
	spinlock_t *ptl;
	pte_t *ptep;

	ptep = get_locked_pte(mm, hva, &ptl);
	if (unlikely(!ptep))
		return -EFAULT;
	*pgstep = pgste_val(pgste_get(ptep));
	pte_unmap_unlock(ptep, ptl);
	return 0;
}
EXPORT_SYMBOL(get_pgste);
1010
#endif