book3s_64_mmu_hv.c 27.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
 */

#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/hugetlb.h>
26
#include <linux/vmalloc.h>
27 28 29 30 31 32 33 34 35 36

#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu-hash64.h>
#include <asm/hvcall.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
#include <asm/cputable.h>

37 38
/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
#define MAX_LPID_970	63
39

40 41 42 43
/* Power architecture requires HPT is at least 256kB */
#define PPC_MIN_HPT_ORDER	18

long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
44 45
{
	unsigned long hpt;
46
	struct revmap_entry *rev;
A
Alexander Graf 已提交
47
	struct kvmppc_linear_info *li;
48
	long order = kvm_hpt_order;
49

50 51 52 53 54 55 56 57 58 59 60 61
	if (htab_orderp) {
		order = *htab_orderp;
		if (order < PPC_MIN_HPT_ORDER)
			order = PPC_MIN_HPT_ORDER;
	}

	/*
	 * If the user wants a different size from default,
	 * try first to allocate it from the kernel page allocator.
	 */
	hpt = 0;
	if (order != kvm_hpt_order) {
A
Alexander Graf 已提交
62
		hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
63 64 65
				       __GFP_NOWARN, order - PAGE_SHIFT);
		if (!hpt)
			--order;
A
Alexander Graf 已提交
66 67
	}

68
	/* Next try to allocate from the preallocated pool */
69
	if (!hpt) {
70 71 72 73 74 75
		li = kvm_alloc_hpt();
		if (li) {
			hpt = (ulong)li->base_virt;
			kvm->arch.hpt_li = li;
			order = kvm_hpt_order;
		}
76
	}
77 78 79 80 81 82 83 84 85 86 87 88

	/* Lastly try successively smaller sizes from the page allocator */
	while (!hpt && order > PPC_MIN_HPT_ORDER) {
		hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
				       __GFP_NOWARN, order - PAGE_SHIFT);
		if (!hpt)
			--order;
	}

	if (!hpt)
		return -ENOMEM;

89
	kvm->arch.hpt_virt = hpt;
90 91 92 93 94
	kvm->arch.hpt_order = order;
	/* HPTEs are 2**4 bytes long */
	kvm->arch.hpt_npte = 1ul << (order - 4);
	/* 128 (2**7) bytes in each HPTEG */
	kvm->arch.hpt_mask = (1ul << (order - 7)) - 1;
95

96
	/* Allocate reverse map array */
97
	rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte);
98 99 100 101 102
	if (!rev) {
		pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
		goto out_freehpt;
	}
	kvm->arch.revmap = rev;
103
	kvm->arch.sdr1 = __pa(hpt) | (order - 18);
104

105 106
	pr_info("KVM guest htab at %lx (order %ld), LPID %x\n",
		hpt, order, kvm->arch.lpid);
107

108 109
	if (htab_orderp)
		*htab_orderp = order;
110
	return 0;
111 112

 out_freehpt:
113 114 115 116
	if (kvm->arch.hpt_li)
		kvm_release_hpt(kvm->arch.hpt_li);
	else
		free_pages(hpt, order - PAGE_SHIFT);
117
	return -ENOMEM;
118 119
}

120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
{
	long err = -EBUSY;
	long order;

	mutex_lock(&kvm->lock);
	if (kvm->arch.rma_setup_done) {
		kvm->arch.rma_setup_done = 0;
		/* order rma_setup_done vs. vcpus_running */
		smp_mb();
		if (atomic_read(&kvm->arch.vcpus_running)) {
			kvm->arch.rma_setup_done = 1;
			goto out;
		}
	}
	if (kvm->arch.hpt_virt) {
		order = kvm->arch.hpt_order;
		/* Set the entire HPT to 0, i.e. invalid HPTEs */
		memset((void *)kvm->arch.hpt_virt, 0, 1ul << order);
		/*
		 * Set the whole last_vcpu array to an invalid vcpu number.
		 * This ensures that each vcpu will flush its TLB on next entry.
		 */
		memset(kvm->arch.last_vcpu, 0xff, sizeof(kvm->arch.last_vcpu));
		*htab_orderp = order;
		err = 0;
	} else {
		err = kvmppc_alloc_hpt(kvm, htab_orderp);
		order = *htab_orderp;
	}
 out:
	mutex_unlock(&kvm->lock);
	return err;
}

155 156
void kvmppc_free_hpt(struct kvm *kvm)
{
157
	kvmppc_free_lpid(kvm->arch.lpid);
158
	vfree(kvm->arch.revmap);
A
Alexander Graf 已提交
159 160 161
	if (kvm->arch.hpt_li)
		kvm_release_hpt(kvm->arch.hpt_li);
	else
162 163
		free_pages(kvm->arch.hpt_virt,
			   kvm->arch.hpt_order - PAGE_SHIFT);
164 165
}

166 167 168 169 170 171 172 173 174 175 176 177 178 179
/* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
{
	return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
}

/* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
{
	return (pgsize == 0x10000) ? 0x1000 : 0;
}

void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
		     unsigned long porder)
180 181
{
	unsigned long i;
182
	unsigned long npages;
183 184
	unsigned long hp_v, hp_r;
	unsigned long addr, hash;
185 186
	unsigned long psize;
	unsigned long hp0, hp1;
187
	long ret;
188
	struct kvm *kvm = vcpu->kvm;
189

190 191
	psize = 1ul << porder;
	npages = memslot->npages >> (porder - PAGE_SHIFT);
192 193

	/* VRMA can't be > 1TB */
194 195
	if (npages > 1ul << (40 - porder))
		npages = 1ul << (40 - porder);
196
	/* Can't use more than 1 HPTE per HPTEG */
197 198
	if (npages > kvm->arch.hpt_mask + 1)
		npages = kvm->arch.hpt_mask + 1;
199

200 201 202 203 204
	hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
		HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
	hp1 = hpte1_pgsize_encoding(psize) |
		HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;

205
	for (i = 0; i < npages; ++i) {
206
		addr = i << porder;
207
		/* can't use hpt_hash since va > 64 bits */
208
		hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask;
209 210 211 212 213 214
		/*
		 * We assume that the hash table is empty and no
		 * vcpus are using it at this stage.  Since we create
		 * at most one HPTE per HPTEG, we just assume entry 7
		 * is available and use it.
		 */
215
		hash = (hash << 3) + 7;
216 217
		hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
		hp_r = hp1 | addr;
218 219 220 221 222 223
		ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r);
		if (ret != H_SUCCESS) {
			pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
			       addr, ret);
			break;
		}
224 225 226 227 228
	}
}

int kvmppc_mmu_hv_init(void)
{
229 230 231
	unsigned long host_lpid, rsvd_lpid;

	if (!cpu_has_feature(CPU_FTR_HVMODE))
232
		return -EINVAL;
233

234
	/* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
235 236 237 238 239 240 241 242
	if (cpu_has_feature(CPU_FTR_ARCH_206)) {
		host_lpid = mfspr(SPRN_LPID);	/* POWER7 */
		rsvd_lpid = LPID_RSVD;
	} else {
		host_lpid = 0;			/* PPC970 */
		rsvd_lpid = MAX_LPID_970;
	}

243 244 245
	kvmppc_init_lpid(rsvd_lpid + 1);

	kvmppc_claim_lpid(host_lpid);
246
	/* rsvd_lpid is reserved for use in partition switching */
247
	kvmppc_claim_lpid(rsvd_lpid);
248 249 250 251 252 253 254 255 256 257 258 259 260

	return 0;
}

void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
}

static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
{
	kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
}

261 262 263 264 265
/*
 * This is called to get a reference to a guest page if there isn't
 * one already in the kvm->arch.slot_phys[][] arrays.
 */
static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
266 267
				  struct kvm_memory_slot *memslot,
				  unsigned long psize)
268 269
{
	unsigned long start;
270 271 272
	long np, err;
	struct page *page, *hpage, *pages[1];
	unsigned long s, pgsize;
273
	unsigned long *physp;
274 275
	unsigned int is_io, got, pgorder;
	struct vm_area_struct *vma;
276
	unsigned long pfn, i, npages;
277 278 279 280

	physp = kvm->arch.slot_phys[memslot->id];
	if (!physp)
		return -EINVAL;
281
	if (physp[gfn - memslot->base_gfn])
282 283
		return 0;

284 285
	is_io = 0;
	got = 0;
286
	page = NULL;
287
	pgsize = psize;
288
	err = -EINVAL;
289 290 291 292
	start = gfn_to_hva_memslot(memslot, gfn);

	/* Instantiate and get the page we want access to */
	np = get_user_pages_fast(start, 1, 1, pages);
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
	if (np != 1) {
		/* Look up the vma for the page */
		down_read(&current->mm->mmap_sem);
		vma = find_vma(current->mm, start);
		if (!vma || vma->vm_start > start ||
		    start + psize > vma->vm_end ||
		    !(vma->vm_flags & VM_PFNMAP))
			goto up_err;
		is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
		pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
		/* check alignment of pfn vs. requested page size */
		if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
			goto up_err;
		up_read(&current->mm->mmap_sem);

	} else {
		page = pages[0];
		got = KVMPPC_GOT_PAGE;

		/* See if this is a large page */
		s = PAGE_SIZE;
		if (PageHuge(page)) {
			hpage = compound_head(page);
			s <<= compound_order(hpage);
			/* Get the whole large page if slot alignment is ok */
			if (s > psize && slot_is_aligned(memslot, s) &&
			    !(memslot->userspace_addr & (s - 1))) {
				start &= ~(s - 1);
				pgsize = s;
322 323
				get_page(hpage);
				put_page(page);
324 325
				page = hpage;
			}
326
		}
327 328 329
		if (s < psize)
			goto out;
		pfn = page_to_pfn(page);
330 331
	}

332 333 334
	npages = pgsize >> PAGE_SHIFT;
	pgorder = __ilog2(npages);
	physp += (gfn - memslot->base_gfn) & ~(npages - 1);
335
	spin_lock(&kvm->arch.slot_phys_lock);
336 337
	for (i = 0; i < npages; ++i) {
		if (!physp[i]) {
338 339
			physp[i] = ((pfn + i) << PAGE_SHIFT) +
				got + is_io + pgorder;
340 341 342
			got = 0;
		}
	}
343
	spin_unlock(&kvm->arch.slot_phys_lock);
344
	err = 0;
345

346
 out:
347
	if (got)
348 349
		put_page(page);
	return err;
350 351 352 353

 up_err:
	up_read(&current->mm->mmap_sem);
	return err;
354 355 356
}

/*
357 358 359
 * We come here on a H_ENTER call from the guest when we are not
 * using mmu notifiers and we don't have the requested page pinned
 * already.
360 361 362 363 364 365 366 367 368
 */
long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
			long pte_index, unsigned long pteh, unsigned long ptel)
{
	struct kvm *kvm = vcpu->kvm;
	unsigned long psize, gpa, gfn;
	struct kvm_memory_slot *memslot;
	long ret;

369 370 371
	if (kvm->arch.using_mmu_notifiers)
		goto do_insert;

372 373 374 375
	psize = hpte_page_size(pteh, ptel);
	if (!psize)
		return H_PARAMETER;

376 377
	pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);

378 379 380 381
	/* Find the memslot (if any) for this address */
	gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
	gfn = gpa >> PAGE_SHIFT;
	memslot = gfn_to_memslot(kvm, gfn);
382 383 384 385 386 387
	if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
		if (!slot_is_aligned(memslot, psize))
			return H_PARAMETER;
		if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
			return H_PARAMETER;
	}
388

389 390 391 392
 do_insert:
	/* Protect linux PTE lookup from page table destruction */
	rcu_read_lock_sched();	/* this disables preemption too */
	vcpu->arch.pgdir = current->mm->pgd;
393
	ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel);
394
	rcu_read_unlock_sched();
395 396 397 398 399 400 401 402 403
	if (ret == H_TOO_HARD) {
		/* this can't happen */
		pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
		ret = H_RESOURCE;	/* or something */
	}
	return ret;

}

404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
							 gva_t eaddr)
{
	u64 mask;
	int i;

	for (i = 0; i < vcpu->arch.slb_nr; i++) {
		if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
			continue;

		if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
			mask = ESID_MASK_1T;
		else
			mask = ESID_MASK;

		if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
			return &vcpu->arch.slb[i];
	}
	return NULL;
}

static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
			unsigned long ea)
{
	unsigned long ra_mask;

	ra_mask = hpte_page_size(v, r) - 1;
	return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
}

434
static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
435
			struct kvmppc_pte *gpte, bool data)
436
{
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
	struct kvm *kvm = vcpu->kvm;
	struct kvmppc_slb *slbe;
	unsigned long slb_v;
	unsigned long pp, key;
	unsigned long v, gr;
	unsigned long *hptep;
	int index;
	int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);

	/* Get SLB entry */
	if (virtmode) {
		slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
		if (!slbe)
			return -EINVAL;
		slb_v = slbe->origv;
	} else {
		/* real mode access */
		slb_v = vcpu->kvm->arch.vrma_slb_v;
	}

	/* Find the HPTE in the hash table */
	index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
					 HPTE_V_VALID | HPTE_V_ABSENT);
	if (index < 0)
		return -ENOENT;
	hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
	v = hptep[0] & ~HPTE_V_HVLOCK;
	gr = kvm->arch.revmap[index].guest_rpte;

	/* Unlock the HPTE */
	asm volatile("lwsync" : : : "memory");
	hptep[0] = v;

	gpte->eaddr = eaddr;
	gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);

	/* Get PP bits and key for permission check */
	pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
	key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
	key &= slb_v;

	/* Calculate permissions */
	gpte->may_read = hpte_read_permission(pp, key);
	gpte->may_write = hpte_write_permission(pp, key);
	gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));

	/* Storage key permission check for POWER7 */
	if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
		int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
		if (amrfield & 1)
			gpte->may_read = 0;
		if (amrfield & 2)
			gpte->may_write = 0;
	}

	/* Get the guest physical address */
	gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
	return 0;
}

/*
 * Quick test for whether an instruction is a load or a store.
 * If the instruction is a load or a store, then this will indicate
 * which it is, at least on server processors.  (Embedded processors
 * have some external PID instructions that don't follow the rule
 * embodied here.)  If the instruction isn't a load or store, then
 * this doesn't return anything useful.
 */
static int instruction_is_store(unsigned int instr)
{
	unsigned int mask;

	mask = 0x10000000;
	if ((instr & 0xfc000000) == 0x7c000000)
		mask = 0x100;		/* major opcode 31 */
	return (instr & mask) != 0;
}

static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
516
				  unsigned long gpa, gva_t ea, int is_store)
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
{
	int ret;
	u32 last_inst;
	unsigned long srr0 = kvmppc_get_pc(vcpu);

	/* We try to load the last instruction.  We don't let
	 * emulate_instruction do it as it doesn't check what
	 * kvmppc_ld returns.
	 * If we fail, we just return to the guest and try executing it again.
	 */
	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) {
		ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
		if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED)
			return RESUME_GUEST;
		vcpu->arch.last_inst = last_inst;
	}

	/*
	 * WARNING: We do not know for sure whether the instruction we just
	 * read from memory is the same that caused the fault in the first
	 * place.  If the instruction we read is neither an load or a store,
	 * then it can't access memory, so we don't need to worry about
	 * enforcing access permissions.  So, assuming it is a load or
	 * store, we just check that its direction (load or store) is
	 * consistent with the original fault, since that's what we
	 * checked the access permissions against.  If there is a mismatch
	 * we just return and retry the instruction.
	 */

	if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
		return RESUME_GUEST;

	/*
	 * Emulated accesses are emulated by looking at the hash for
	 * translation once, then performing the access later. The
	 * translation could be invalidated in the meantime in which
	 * point performing the subsequent memory access on the old
	 * physical address could possibly be a security hole for the
	 * guest (but not the host).
	 *
	 * This is less of an issue for MMIO stores since they aren't
	 * globally visible. It could be an issue for MMIO loads to
	 * a certain extent but we'll ignore it for now.
	 */

	vcpu->arch.paddr_accessed = gpa;
563
	vcpu->arch.vaddr_accessed = ea;
564 565 566 567 568 569 570
	return kvmppc_emulate_mmio(run, vcpu);
}

int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
				unsigned long ea, unsigned long dsisr)
{
	struct kvm *kvm = vcpu->kvm;
571 572 573
	unsigned long *hptep, hpte[3], r;
	unsigned long mmu_seq, psize, pte_size;
	unsigned long gfn, hva, pfn;
574
	struct kvm_memory_slot *memslot;
575
	unsigned long *rmap;
576
	struct revmap_entry *rev;
577 578 579
	struct page *page, *pages[1];
	long index, ret, npages;
	unsigned long is_io;
580
	unsigned int writing, write_ok;
581
	struct vm_area_struct *vma;
582
	unsigned long rcbits;
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599

	/*
	 * Real-mode code has already searched the HPT and found the
	 * entry we're interested in.  Lock the entry and check that
	 * it hasn't changed.  If it has, just return and re-execute the
	 * instruction.
	 */
	if (ea != vcpu->arch.pgfault_addr)
		return RESUME_GUEST;
	index = vcpu->arch.pgfault_index;
	hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
	rev = &kvm->arch.revmap[index];
	preempt_disable();
	while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
		cpu_relax();
	hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
	hpte[1] = hptep[1];
600
	hpte[2] = r = rev->guest_rpte;
601 602 603 604 605 606 607 608 609
	asm volatile("lwsync" : : : "memory");
	hptep[0] = hpte[0];
	preempt_enable();

	if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
	    hpte[1] != vcpu->arch.pgfault_hpte[1])
		return RESUME_GUEST;

	/* Translate the logical address and get the page */
610 611
	psize = hpte_page_size(hpte[0], r);
	gfn = hpte_rpn(r, psize);
612 613 614 615 616
	memslot = gfn_to_memslot(kvm, gfn);

	/* No memslot means it's an emulated MMIO region */
	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
		unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
617
		return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
618 619 620
					      dsisr & DSISR_ISSTORE);
	}

621 622 623 624 625 626 627 628 629 630 631
	if (!kvm->arch.using_mmu_notifiers)
		return -EFAULT;		/* should never get here */

	/* used to check for invalidations in progress */
	mmu_seq = kvm->mmu_notifier_seq;
	smp_rmb();

	is_io = 0;
	pfn = 0;
	page = NULL;
	pte_size = PAGE_SIZE;
632 633 634
	writing = (dsisr & DSISR_ISSTORE) != 0;
	/* If writing != 0, then the HPTE must allow writing, if we get here */
	write_ok = writing;
635
	hva = gfn_to_hva_memslot(memslot, gfn);
636
	npages = get_user_pages_fast(hva, 1, writing, pages);
637 638 639 640 641 642 643 644 645 646
	if (npages < 1) {
		/* Check if it's an I/O mapping */
		down_read(&current->mm->mmap_sem);
		vma = find_vma(current->mm, hva);
		if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
		    (vma->vm_flags & VM_PFNMAP)) {
			pfn = vma->vm_pgoff +
				((hva - vma->vm_start) >> PAGE_SHIFT);
			pte_size = psize;
			is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
647
			write_ok = vma->vm_flags & VM_WRITE;
648 649 650 651 652 653 654 655 656 657
		}
		up_read(&current->mm->mmap_sem);
		if (!pfn)
			return -EFAULT;
	} else {
		page = pages[0];
		if (PageHuge(page)) {
			page = compound_head(page);
			pte_size <<= compound_order(page);
		}
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
		/* if the guest wants write access, see if that is OK */
		if (!writing && hpte_is_writable(r)) {
			pte_t *ptep, pte;

			/*
			 * We need to protect against page table destruction
			 * while looking up and updating the pte.
			 */
			rcu_read_lock_sched();
			ptep = find_linux_pte_or_hugepte(current->mm->pgd,
							 hva, NULL);
			if (ptep && pte_present(*ptep)) {
				pte = kvmppc_read_update_linux_pte(ptep, 1);
				if (pte_write(pte))
					write_ok = 1;
			}
			rcu_read_unlock_sched();
		}
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
		pfn = page_to_pfn(page);
	}

	ret = -EFAULT;
	if (psize > pte_size)
		goto out_put;

	/* Check WIMG vs. the actual page we're accessing */
	if (!hpte_cache_flags_ok(r, is_io)) {
		if (is_io)
			return -EFAULT;
		/*
		 * Allow guest to map emulated device memory as
		 * uncacheable, but actually make it cacheable.
		 */
		r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
	}

	/* Set the HPTE to point to pfn */
	r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
696 697
	if (hpte_is_writable(r) && !write_ok)
		r = hpte_make_readonly(r);
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
	ret = RESUME_GUEST;
	preempt_disable();
	while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
		cpu_relax();
	if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] ||
	    rev->guest_rpte != hpte[2])
		/* HPTE has been changed under us; let the guest retry */
		goto out_unlock;
	hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;

	rmap = &memslot->rmap[gfn - memslot->base_gfn];
	lock_rmap(rmap);

	/* Check if we might have been invalidated; let the guest retry if so */
	ret = RESUME_GUEST;
	if (mmu_notifier_retry(vcpu, mmu_seq)) {
		unlock_rmap(rmap);
		goto out_unlock;
	}
717

718 719 720 721
	/* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
	rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
	r &= rcbits | ~(HPTE_R_R | HPTE_R_C);

722 723 724 725 726
	if (hptep[0] & HPTE_V_VALID) {
		/* HPTE was previously valid, so we need to invalidate it */
		unlock_rmap(rmap);
		hptep[0] |= HPTE_V_ABSENT;
		kvmppc_invalidate_hpte(kvm, hptep, index);
727 728
		/* don't lose previous R and C bits */
		r |= hptep[1] & (HPTE_R_R | HPTE_R_C);
729 730 731
	} else {
		kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
	}
732 733 734 735 736 737

	hptep[1] = r;
	eieio();
	hptep[0] = hpte[0];
	asm volatile("ptesync" : : : "memory");
	preempt_enable();
738
	if (page && hpte_is_writable(r))
739 740 741
		SetPageDirty(page);

 out_put:
742 743 744 745 746 747 748 749 750
	if (page) {
		/*
		 * We drop pages[0] here, not page because page might
		 * have been set to the head page of a compound, but
		 * we have to drop the reference on the correct tail
		 * page to match the get inside gup()
		 */
		put_page(pages[0]);
	}
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
	return ret;

 out_unlock:
	hptep[0] &= ~HPTE_V_HVLOCK;
	preempt_enable();
	goto out_put;
}

static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
			  int (*handler)(struct kvm *kvm, unsigned long *rmapp,
					 unsigned long gfn))
{
	int ret;
	int retval = 0;
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;

	slots = kvm_memslots(kvm);
	kvm_for_each_memslot(memslot, slots) {
		unsigned long start = memslot->userspace_addr;
		unsigned long end;

		end = start + (memslot->npages << PAGE_SHIFT);
		if (hva >= start && hva < end) {
			gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;

			ret = handler(kvm, &memslot->rmap[gfn_offset],
				      memslot->base_gfn + gfn_offset);
			retval |= ret;
		}
	}

	return retval;
}

static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
			   unsigned long gfn)
{
	struct revmap_entry *rev = kvm->arch.revmap;
	unsigned long h, i, j;
	unsigned long *hptep;
792
	unsigned long ptel, psize, rcbits;
793 794

	for (;;) {
795
		lock_rmap(rmapp);
796
		if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
797
			unlock_rmap(rmapp);
798 799 800 801 802
			break;
		}

		/*
		 * To avoid an ABBA deadlock with the HPTE lock bit,
803 804
		 * we can't spin on the HPTE lock while holding the
		 * rmap chain lock.
805 806
		 */
		i = *rmapp & KVMPPC_RMAP_INDEX;
807 808 809 810 811 812 813 814
		hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
			/* unlock rmap before spinning on the HPTE lock */
			unlock_rmap(rmapp);
			while (hptep[0] & HPTE_V_HVLOCK)
				cpu_relax();
			continue;
		}
815 816 817
		j = rev[i].forw;
		if (j == i) {
			/* chain is now empty */
818
			*rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
819 820 821 822 823 824
		} else {
			/* remove i from chain */
			h = rev[i].back;
			rev[h].forw = j;
			rev[j].back = h;
			rev[i].forw = rev[i].back = i;
825
			*rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j;
826 827
		}

828
		/* Now check and modify the HPTE */
829 830 831 832 833
		ptel = rev[i].guest_rpte;
		psize = hpte_page_size(hptep[0], ptel);
		if ((hptep[0] & HPTE_V_VALID) &&
		    hpte_rpn(ptel, psize) == gfn) {
			hptep[0] |= HPTE_V_ABSENT;
834 835 836 837 838
			kvmppc_invalidate_hpte(kvm, hptep, i);
			/* Harvest R and C */
			rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
			*rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
			rev[i].guest_rpte = ptel | rcbits;
839
		}
840
		unlock_rmap(rmapp);
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
		hptep[0] &= ~HPTE_V_HVLOCK;
	}
	return 0;
}

int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
	if (kvm->arch.using_mmu_notifiers)
		kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
	return 0;
}

static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
			 unsigned long gfn)
{
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
	struct revmap_entry *rev = kvm->arch.revmap;
	unsigned long head, i, j;
	unsigned long *hptep;
	int ret = 0;

 retry:
	lock_rmap(rmapp);
	if (*rmapp & KVMPPC_RMAP_REFERENCED) {
		*rmapp &= ~KVMPPC_RMAP_REFERENCED;
		ret = 1;
	}
	if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
		unlock_rmap(rmapp);
		return ret;
	}

	i = head = *rmapp & KVMPPC_RMAP_INDEX;
	do {
		hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
		j = rev[i].forw;

		/* If this HPTE isn't referenced, ignore it */
		if (!(hptep[1] & HPTE_R_R))
			continue;

		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
			/* unlock rmap before spinning on the HPTE lock */
			unlock_rmap(rmapp);
			while (hptep[0] & HPTE_V_HVLOCK)
				cpu_relax();
			goto retry;
		}

		/* Now check and modify the HPTE */
		if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
			kvmppc_clear_ref_hpte(kvm, hptep, i);
			rev[i].guest_rpte |= HPTE_R_R;
			ret = 1;
		}
		hptep[0] &= ~HPTE_V_HVLOCK;
	} while ((i = j) != head);

	unlock_rmap(rmapp);
	return ret;
900 901 902 903 904 905 906 907 908 909 910 911
}

int kvm_age_hva(struct kvm *kvm, unsigned long hva)
{
	if (!kvm->arch.using_mmu_notifiers)
		return 0;
	return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
}

static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
			      unsigned long gfn)
{
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937
	struct revmap_entry *rev = kvm->arch.revmap;
	unsigned long head, i, j;
	unsigned long *hp;
	int ret = 1;

	if (*rmapp & KVMPPC_RMAP_REFERENCED)
		return 1;

	lock_rmap(rmapp);
	if (*rmapp & KVMPPC_RMAP_REFERENCED)
		goto out;

	if (*rmapp & KVMPPC_RMAP_PRESENT) {
		i = head = *rmapp & KVMPPC_RMAP_INDEX;
		do {
			hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
			j = rev[i].forw;
			if (hp[1] & HPTE_R_R)
				goto out;
		} while ((i = j) != head);
	}
	ret = 0;

 out:
	unlock_rmap(rmapp);
	return ret;
938 939 940 941 942 943 944 945 946 947 948 949 950 951
}

int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
	if (!kvm->arch.using_mmu_notifiers)
		return 0;
	return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
}

void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
	if (!kvm->arch.using_mmu_notifiers)
		return;
	kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
952 953
}

954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
{
	struct revmap_entry *rev = kvm->arch.revmap;
	unsigned long head, i, j;
	unsigned long *hptep;
	int ret = 0;

 retry:
	lock_rmap(rmapp);
	if (*rmapp & KVMPPC_RMAP_CHANGED) {
		*rmapp &= ~KVMPPC_RMAP_CHANGED;
		ret = 1;
	}
	if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
		unlock_rmap(rmapp);
		return ret;
	}

	i = head = *rmapp & KVMPPC_RMAP_INDEX;
	do {
		hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
		j = rev[i].forw;

		if (!(hptep[1] & HPTE_R_C))
			continue;

		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
			/* unlock rmap before spinning on the HPTE lock */
			unlock_rmap(rmapp);
			while (hptep[0] & HPTE_V_HVLOCK)
				cpu_relax();
			goto retry;
		}

		/* Now check and modify the HPTE */
		if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) {
			/* need to make it temporarily absent to clear C */
			hptep[0] |= HPTE_V_ABSENT;
			kvmppc_invalidate_hpte(kvm, hptep, i);
			hptep[1] &= ~HPTE_R_C;
			eieio();
			hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
			rev[i].guest_rpte |= HPTE_R_C;
			ret = 1;
		}
		hptep[0] &= ~HPTE_V_HVLOCK;
	} while ((i = j) != head);

	unlock_rmap(rmapp);
	return ret;
}

long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
	unsigned long i;
	unsigned long *rmapp, *map;

	preempt_disable();
	rmapp = memslot->rmap;
	map = memslot->dirty_bitmap;
	for (i = 0; i < memslot->npages; ++i) {
		if (kvm_test_clear_dirty(kvm, rmapp))
			__set_bit_le(i, map);
		++rmapp;
	}
	preempt_enable();
	return 0;
}

1023 1024 1025 1026 1027
void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
			    unsigned long *nb_ret)
{
	struct kvm_memory_slot *memslot;
	unsigned long gfn = gpa >> PAGE_SHIFT;
1028 1029 1030
	struct page *page, *pages[1];
	int npages;
	unsigned long hva, psize, offset;
1031
	unsigned long pa;
1032 1033 1034 1035 1036
	unsigned long *physp;

	memslot = gfn_to_memslot(kvm, gfn);
	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
		return NULL;
1037 1038 1039
	if (!kvm->arch.using_mmu_notifiers) {
		physp = kvm->arch.slot_phys[memslot->id];
		if (!physp)
1040
			return NULL;
1041
		physp += gfn - memslot->base_gfn;
1042
		pa = *physp;
1043 1044 1045 1046 1047 1048 1049
		if (!pa) {
			if (kvmppc_get_guest_page(kvm, gfn, memslot,
						  PAGE_SIZE) < 0)
				return NULL;
			pa = *physp;
		}
		page = pfn_to_page(pa >> PAGE_SHIFT);
1050
		get_page(page);
1051 1052 1053 1054 1055 1056
	} else {
		hva = gfn_to_hva_memslot(memslot, gfn);
		npages = get_user_pages_fast(hva, 1, 1, pages);
		if (npages < 1)
			return NULL;
		page = pages[0];
1057
	}
1058 1059 1060 1061 1062 1063
	psize = PAGE_SIZE;
	if (PageHuge(page)) {
		page = compound_head(page);
		psize <<= compound_order(page);
	}
	offset = gpa & (psize - 1);
1064
	if (nb_ret)
1065
		*nb_ret = psize - offset;
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
	return page_address(page) + offset;
}

void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
{
	struct page *page = virt_to_page(va);

	put_page(page);
}

1076 1077 1078 1079
void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
{
	struct kvmppc_mmu *mmu = &vcpu->arch.mmu;

1080 1081 1082 1083
	if (cpu_has_feature(CPU_FTR_ARCH_206))
		vcpu->arch.slb_nr = 32;		/* POWER7 */
	else
		vcpu->arch.slb_nr = 64;
1084 1085 1086 1087 1088 1089

	mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
	mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;

	vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
}