book3s_64_mmu_hv.c 26.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
 */

#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/hugetlb.h>
26
#include <linux/vmalloc.h>
27 28 29 30 31 32 33 34 35 36

#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu-hash64.h>
#include <asm/hvcall.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
#include <asm/cputable.h>

37 38
/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
#define MAX_LPID_970	63
39 40 41 42 43 44 45
#define NR_LPIDS	(LPID_RSVD + 1)
unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];

long kvmppc_alloc_hpt(struct kvm *kvm)
{
	unsigned long hpt;
	unsigned long lpid;
46
	struct revmap_entry *rev;
47

48
	/* Allocate guest's hashed page table */
49 50 51 52 53 54 55 56
	hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|__GFP_NOWARN,
			       HPT_ORDER - PAGE_SHIFT);
	if (!hpt) {
		pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
		return -ENOMEM;
	}
	kvm->arch.hpt_virt = hpt;

57 58 59 60 61 62 63 64 65
	/* Allocate reverse map array */
	rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE);
	if (!rev) {
		pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
		goto out_freehpt;
	}
	kvm->arch.revmap = rev;

	/* Allocate the guest's logical partition ID */
66 67 68 69
	do {
		lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
		if (lpid >= NR_LPIDS) {
			pr_err("kvm_alloc_hpt: No LPIDs free\n");
70
			goto out_freeboth;
71 72 73 74 75 76 77 78
		}
	} while (test_and_set_bit(lpid, lpid_inuse));

	kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
	kvm->arch.lpid = lpid;

	pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
	return 0;
79 80 81 82 83 84

 out_freeboth:
	vfree(rev);
 out_freehpt:
	free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
	return -ENOMEM;
85 86 87 88 89
}

void kvmppc_free_hpt(struct kvm *kvm)
{
	clear_bit(kvm->arch.lpid, lpid_inuse);
90
	vfree(kvm->arch.revmap);
91 92 93
	free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
}

94 95 96 97 98 99 100 101 102 103 104 105 106 107
/* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
{
	return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
}

/* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
{
	return (pgsize == 0x10000) ? 0x1000 : 0;
}

void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
		     unsigned long porder)
108 109
{
	unsigned long i;
110
	unsigned long npages;
111 112
	unsigned long hp_v, hp_r;
	unsigned long addr, hash;
113 114
	unsigned long psize;
	unsigned long hp0, hp1;
115
	long ret;
116

117 118
	psize = 1ul << porder;
	npages = memslot->npages >> (porder - PAGE_SHIFT);
119 120

	/* VRMA can't be > 1TB */
121 122
	if (npages > 1ul << (40 - porder))
		npages = 1ul << (40 - porder);
123 124 125 126
	/* Can't use more than 1 HPTE per HPTEG */
	if (npages > HPT_NPTEG)
		npages = HPT_NPTEG;

127 128 129 130 131
	hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
		HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
	hp1 = hpte1_pgsize_encoding(psize) |
		HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;

132
	for (i = 0; i < npages; ++i) {
133
		addr = i << porder;
134 135 136 137 138 139 140 141
		/* can't use hpt_hash since va > 64 bits */
		hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK;
		/*
		 * We assume that the hash table is empty and no
		 * vcpus are using it at this stage.  Since we create
		 * at most one HPTE per HPTEG, we just assume entry 7
		 * is available and use it.
		 */
142
		hash = (hash << 3) + 7;
143 144
		hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
		hp_r = hp1 | addr;
145 146 147 148 149 150
		ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r);
		if (ret != H_SUCCESS) {
			pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
			       addr, ret);
			break;
		}
151 152 153 154 155
	}
}

int kvmppc_mmu_hv_init(void)
{
156 157 158
	unsigned long host_lpid, rsvd_lpid;

	if (!cpu_has_feature(CPU_FTR_HVMODE))
159
		return -EINVAL;
160

161
	memset(lpid_inuse, 0, sizeof(lpid_inuse));
162 163 164 165 166 167 168 169 170 171 172 173

	if (cpu_has_feature(CPU_FTR_ARCH_206)) {
		host_lpid = mfspr(SPRN_LPID);	/* POWER7 */
		rsvd_lpid = LPID_RSVD;
	} else {
		host_lpid = 0;			/* PPC970 */
		rsvd_lpid = MAX_LPID_970;
	}

	set_bit(host_lpid, lpid_inuse);
	/* rsvd_lpid is reserved for use in partition switching */
	set_bit(rsvd_lpid, lpid_inuse);
174 175 176 177 178 179 180 181 182 183 184 185 186

	return 0;
}

void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
{
}

static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
{
	kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
}

187 188 189 190 191
/*
 * This is called to get a reference to a guest page if there isn't
 * one already in the kvm->arch.slot_phys[][] arrays.
 */
static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
192 193
				  struct kvm_memory_slot *memslot,
				  unsigned long psize)
194 195
{
	unsigned long start;
196 197 198
	long np, err;
	struct page *page, *hpage, *pages[1];
	unsigned long s, pgsize;
199
	unsigned long *physp;
200 201
	unsigned int is_io, got, pgorder;
	struct vm_area_struct *vma;
202
	unsigned long pfn, i, npages;
203 204 205 206

	physp = kvm->arch.slot_phys[memslot->id];
	if (!physp)
		return -EINVAL;
207
	if (physp[gfn - memslot->base_gfn])
208 209
		return 0;

210 211
	is_io = 0;
	got = 0;
212
	page = NULL;
213
	pgsize = psize;
214
	err = -EINVAL;
215 216 217 218
	start = gfn_to_hva_memslot(memslot, gfn);

	/* Instantiate and get the page we want access to */
	np = get_user_pages_fast(start, 1, 1, pages);
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
	if (np != 1) {
		/* Look up the vma for the page */
		down_read(&current->mm->mmap_sem);
		vma = find_vma(current->mm, start);
		if (!vma || vma->vm_start > start ||
		    start + psize > vma->vm_end ||
		    !(vma->vm_flags & VM_PFNMAP))
			goto up_err;
		is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
		pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
		/* check alignment of pfn vs. requested page size */
		if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
			goto up_err;
		up_read(&current->mm->mmap_sem);

	} else {
		page = pages[0];
		got = KVMPPC_GOT_PAGE;

		/* See if this is a large page */
		s = PAGE_SIZE;
		if (PageHuge(page)) {
			hpage = compound_head(page);
			s <<= compound_order(hpage);
			/* Get the whole large page if slot alignment is ok */
			if (s > psize && slot_is_aligned(memslot, s) &&
			    !(memslot->userspace_addr & (s - 1))) {
				start &= ~(s - 1);
				pgsize = s;
				page = hpage;
			}
250
		}
251 252 253
		if (s < psize)
			goto out;
		pfn = page_to_pfn(page);
254 255
	}

256 257 258
	npages = pgsize >> PAGE_SHIFT;
	pgorder = __ilog2(npages);
	physp += (gfn - memslot->base_gfn) & ~(npages - 1);
259
	spin_lock(&kvm->arch.slot_phys_lock);
260 261
	for (i = 0; i < npages; ++i) {
		if (!physp[i]) {
262 263
			physp[i] = ((pfn + i) << PAGE_SHIFT) +
				got + is_io + pgorder;
264 265 266
			got = 0;
		}
	}
267
	spin_unlock(&kvm->arch.slot_phys_lock);
268
	err = 0;
269

270 271 272 273 274 275 276
 out:
	if (got) {
		if (PageHuge(page))
			page = compound_head(page);
		put_page(page);
	}
	return err;
277 278 279 280

 up_err:
	up_read(&current->mm->mmap_sem);
	return err;
281 282 283
}

/*
284 285 286
 * We come here on a H_ENTER call from the guest when we are not
 * using mmu notifiers and we don't have the requested page pinned
 * already.
287 288 289 290 291 292 293 294 295
 */
long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
			long pte_index, unsigned long pteh, unsigned long ptel)
{
	struct kvm *kvm = vcpu->kvm;
	unsigned long psize, gpa, gfn;
	struct kvm_memory_slot *memslot;
	long ret;

296 297 298
	if (kvm->arch.using_mmu_notifiers)
		goto do_insert;

299 300 301 302
	psize = hpte_page_size(pteh, ptel);
	if (!psize)
		return H_PARAMETER;

303 304
	pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);

305 306 307 308
	/* Find the memslot (if any) for this address */
	gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
	gfn = gpa >> PAGE_SHIFT;
	memslot = gfn_to_memslot(kvm, gfn);
309 310 311 312 313 314
	if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
		if (!slot_is_aligned(memslot, psize))
			return H_PARAMETER;
		if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
			return H_PARAMETER;
	}
315

316 317 318 319
 do_insert:
	/* Protect linux PTE lookup from page table destruction */
	rcu_read_lock_sched();	/* this disables preemption too */
	vcpu->arch.pgdir = current->mm->pgd;
320
	ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel);
321
	rcu_read_unlock_sched();
322 323 324 325 326 327 328 329 330
	if (ret == H_TOO_HARD) {
		/* this can't happen */
		pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
		ret = H_RESOURCE;	/* or something */
	}
	return ret;

}

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
							 gva_t eaddr)
{
	u64 mask;
	int i;

	for (i = 0; i < vcpu->arch.slb_nr; i++) {
		if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
			continue;

		if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
			mask = ESID_MASK_1T;
		else
			mask = ESID_MASK;

		if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
			return &vcpu->arch.slb[i];
	}
	return NULL;
}

static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
			unsigned long ea)
{
	unsigned long ra_mask;

	ra_mask = hpte_page_size(v, r) - 1;
	return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
}

361
static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
362
			struct kvmppc_pte *gpte, bool data)
363
{
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
	struct kvm *kvm = vcpu->kvm;
	struct kvmppc_slb *slbe;
	unsigned long slb_v;
	unsigned long pp, key;
	unsigned long v, gr;
	unsigned long *hptep;
	int index;
	int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);

	/* Get SLB entry */
	if (virtmode) {
		slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
		if (!slbe)
			return -EINVAL;
		slb_v = slbe->origv;
	} else {
		/* real mode access */
		slb_v = vcpu->kvm->arch.vrma_slb_v;
	}

	/* Find the HPTE in the hash table */
	index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
					 HPTE_V_VALID | HPTE_V_ABSENT);
	if (index < 0)
		return -ENOENT;
	hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
	v = hptep[0] & ~HPTE_V_HVLOCK;
	gr = kvm->arch.revmap[index].guest_rpte;

	/* Unlock the HPTE */
	asm volatile("lwsync" : : : "memory");
	hptep[0] = v;

	gpte->eaddr = eaddr;
	gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);

	/* Get PP bits and key for permission check */
	pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
	key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
	key &= slb_v;

	/* Calculate permissions */
	gpte->may_read = hpte_read_permission(pp, key);
	gpte->may_write = hpte_write_permission(pp, key);
	gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));

	/* Storage key permission check for POWER7 */
	if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
		int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
		if (amrfield & 1)
			gpte->may_read = 0;
		if (amrfield & 2)
			gpte->may_write = 0;
	}

	/* Get the guest physical address */
	gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
	return 0;
}

/*
 * Quick test for whether an instruction is a load or a store.
 * If the instruction is a load or a store, then this will indicate
 * which it is, at least on server processors.  (Embedded processors
 * have some external PID instructions that don't follow the rule
 * embodied here.)  If the instruction isn't a load or store, then
 * this doesn't return anything useful.
 */
static int instruction_is_store(unsigned int instr)
{
	unsigned int mask;

	mask = 0x10000000;
	if ((instr & 0xfc000000) == 0x7c000000)
		mask = 0x100;		/* major opcode 31 */
	return (instr & mask) != 0;
}

static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
				  unsigned long gpa, int is_store)
{
	int ret;
	u32 last_inst;
	unsigned long srr0 = kvmppc_get_pc(vcpu);

	/* We try to load the last instruction.  We don't let
	 * emulate_instruction do it as it doesn't check what
	 * kvmppc_ld returns.
	 * If we fail, we just return to the guest and try executing it again.
	 */
	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) {
		ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
		if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED)
			return RESUME_GUEST;
		vcpu->arch.last_inst = last_inst;
	}

	/*
	 * WARNING: We do not know for sure whether the instruction we just
	 * read from memory is the same that caused the fault in the first
	 * place.  If the instruction we read is neither an load or a store,
	 * then it can't access memory, so we don't need to worry about
	 * enforcing access permissions.  So, assuming it is a load or
	 * store, we just check that its direction (load or store) is
	 * consistent with the original fault, since that's what we
	 * checked the access permissions against.  If there is a mismatch
	 * we just return and retry the instruction.
	 */

	if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
		return RESUME_GUEST;

	/*
	 * Emulated accesses are emulated by looking at the hash for
	 * translation once, then performing the access later. The
	 * translation could be invalidated in the meantime in which
	 * point performing the subsequent memory access on the old
	 * physical address could possibly be a security hole for the
	 * guest (but not the host).
	 *
	 * This is less of an issue for MMIO stores since they aren't
	 * globally visible. It could be an issue for MMIO loads to
	 * a certain extent but we'll ignore it for now.
	 */

	vcpu->arch.paddr_accessed = gpa;
	return kvmppc_emulate_mmio(run, vcpu);
}

int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
				unsigned long ea, unsigned long dsisr)
{
	struct kvm *kvm = vcpu->kvm;
497 498 499
	unsigned long *hptep, hpte[3], r;
	unsigned long mmu_seq, psize, pte_size;
	unsigned long gfn, hva, pfn;
500
	struct kvm_memory_slot *memslot;
501
	unsigned long *rmap;
502
	struct revmap_entry *rev;
503 504 505
	struct page *page, *pages[1];
	long index, ret, npages;
	unsigned long is_io;
506
	unsigned int writing, write_ok;
507
	struct vm_area_struct *vma;
508
	unsigned long rcbits;
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525

	/*
	 * Real-mode code has already searched the HPT and found the
	 * entry we're interested in.  Lock the entry and check that
	 * it hasn't changed.  If it has, just return and re-execute the
	 * instruction.
	 */
	if (ea != vcpu->arch.pgfault_addr)
		return RESUME_GUEST;
	index = vcpu->arch.pgfault_index;
	hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
	rev = &kvm->arch.revmap[index];
	preempt_disable();
	while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
		cpu_relax();
	hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
	hpte[1] = hptep[1];
526
	hpte[2] = r = rev->guest_rpte;
527 528 529 530 531 532 533 534 535
	asm volatile("lwsync" : : : "memory");
	hptep[0] = hpte[0];
	preempt_enable();

	if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
	    hpte[1] != vcpu->arch.pgfault_hpte[1])
		return RESUME_GUEST;

	/* Translate the logical address and get the page */
536 537
	psize = hpte_page_size(hpte[0], r);
	gfn = hpte_rpn(r, psize);
538 539 540 541 542 543 544 545 546
	memslot = gfn_to_memslot(kvm, gfn);

	/* No memslot means it's an emulated MMIO region */
	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
		unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
		return kvmppc_hv_emulate_mmio(run, vcpu, gpa,
					      dsisr & DSISR_ISSTORE);
	}

547 548 549 550 551 552 553 554 555 556 557
	if (!kvm->arch.using_mmu_notifiers)
		return -EFAULT;		/* should never get here */

	/* used to check for invalidations in progress */
	mmu_seq = kvm->mmu_notifier_seq;
	smp_rmb();

	is_io = 0;
	pfn = 0;
	page = NULL;
	pte_size = PAGE_SIZE;
558 559 560
	writing = (dsisr & DSISR_ISSTORE) != 0;
	/* If writing != 0, then the HPTE must allow writing, if we get here */
	write_ok = writing;
561
	hva = gfn_to_hva_memslot(memslot, gfn);
562
	npages = get_user_pages_fast(hva, 1, writing, pages);
563 564 565 566 567 568 569 570 571 572
	if (npages < 1) {
		/* Check if it's an I/O mapping */
		down_read(&current->mm->mmap_sem);
		vma = find_vma(current->mm, hva);
		if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
		    (vma->vm_flags & VM_PFNMAP)) {
			pfn = vma->vm_pgoff +
				((hva - vma->vm_start) >> PAGE_SHIFT);
			pte_size = psize;
			is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
573
			write_ok = vma->vm_flags & VM_WRITE;
574 575 576 577 578 579 580 581 582 583
		}
		up_read(&current->mm->mmap_sem);
		if (!pfn)
			return -EFAULT;
	} else {
		page = pages[0];
		if (PageHuge(page)) {
			page = compound_head(page);
			pte_size <<= compound_order(page);
		}
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
		/* if the guest wants write access, see if that is OK */
		if (!writing && hpte_is_writable(r)) {
			pte_t *ptep, pte;

			/*
			 * We need to protect against page table destruction
			 * while looking up and updating the pte.
			 */
			rcu_read_lock_sched();
			ptep = find_linux_pte_or_hugepte(current->mm->pgd,
							 hva, NULL);
			if (ptep && pte_present(*ptep)) {
				pte = kvmppc_read_update_linux_pte(ptep, 1);
				if (pte_write(pte))
					write_ok = 1;
			}
			rcu_read_unlock_sched();
		}
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
		pfn = page_to_pfn(page);
	}

	ret = -EFAULT;
	if (psize > pte_size)
		goto out_put;

	/* Check WIMG vs. the actual page we're accessing */
	if (!hpte_cache_flags_ok(r, is_io)) {
		if (is_io)
			return -EFAULT;
		/*
		 * Allow guest to map emulated device memory as
		 * uncacheable, but actually make it cacheable.
		 */
		r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
	}

	/* Set the HPTE to point to pfn */
	r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
622 623
	if (hpte_is_writable(r) && !write_ok)
		r = hpte_make_readonly(r);
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
	ret = RESUME_GUEST;
	preempt_disable();
	while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
		cpu_relax();
	if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] ||
	    rev->guest_rpte != hpte[2])
		/* HPTE has been changed under us; let the guest retry */
		goto out_unlock;
	hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;

	rmap = &memslot->rmap[gfn - memslot->base_gfn];
	lock_rmap(rmap);

	/* Check if we might have been invalidated; let the guest retry if so */
	ret = RESUME_GUEST;
	if (mmu_notifier_retry(vcpu, mmu_seq)) {
		unlock_rmap(rmap);
		goto out_unlock;
	}
643

644 645 646 647
	/* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
	rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
	r &= rcbits | ~(HPTE_R_R | HPTE_R_C);

648 649 650 651 652
	if (hptep[0] & HPTE_V_VALID) {
		/* HPTE was previously valid, so we need to invalidate it */
		unlock_rmap(rmap);
		hptep[0] |= HPTE_V_ABSENT;
		kvmppc_invalidate_hpte(kvm, hptep, index);
653 654
		/* don't lose previous R and C bits */
		r |= hptep[1] & (HPTE_R_R | HPTE_R_C);
655 656 657
	} else {
		kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
	}
658 659 660 661 662 663

	hptep[1] = r;
	eieio();
	hptep[0] = hpte[0];
	asm volatile("ptesync" : : : "memory");
	preempt_enable();
664
	if (page && hpte_is_writable(r))
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
		SetPageDirty(page);

 out_put:
	if (page)
		put_page(page);
	return ret;

 out_unlock:
	hptep[0] &= ~HPTE_V_HVLOCK;
	preempt_enable();
	goto out_put;
}

static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
			  int (*handler)(struct kvm *kvm, unsigned long *rmapp,
					 unsigned long gfn))
{
	int ret;
	int retval = 0;
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;

	slots = kvm_memslots(kvm);
	kvm_for_each_memslot(memslot, slots) {
		unsigned long start = memslot->userspace_addr;
		unsigned long end;

		end = start + (memslot->npages << PAGE_SHIFT);
		if (hva >= start && hva < end) {
			gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;

			ret = handler(kvm, &memslot->rmap[gfn_offset],
				      memslot->base_gfn + gfn_offset);
			retval |= ret;
		}
	}

	return retval;
}

static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
			   unsigned long gfn)
{
	struct revmap_entry *rev = kvm->arch.revmap;
	unsigned long h, i, j;
	unsigned long *hptep;
711
	unsigned long ptel, psize, rcbits;
712 713

	for (;;) {
714
		lock_rmap(rmapp);
715
		if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
716
			unlock_rmap(rmapp);
717 718 719 720 721
			break;
		}

		/*
		 * To avoid an ABBA deadlock with the HPTE lock bit,
722 723
		 * we can't spin on the HPTE lock while holding the
		 * rmap chain lock.
724 725
		 */
		i = *rmapp & KVMPPC_RMAP_INDEX;
726 727 728 729 730 731 732 733
		hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
			/* unlock rmap before spinning on the HPTE lock */
			unlock_rmap(rmapp);
			while (hptep[0] & HPTE_V_HVLOCK)
				cpu_relax();
			continue;
		}
734 735 736
		j = rev[i].forw;
		if (j == i) {
			/* chain is now empty */
737
			*rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
738 739 740 741 742 743
		} else {
			/* remove i from chain */
			h = rev[i].back;
			rev[h].forw = j;
			rev[j].back = h;
			rev[i].forw = rev[i].back = i;
744
			*rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j;
745 746
		}

747
		/* Now check and modify the HPTE */
748 749 750 751 752
		ptel = rev[i].guest_rpte;
		psize = hpte_page_size(hptep[0], ptel);
		if ((hptep[0] & HPTE_V_VALID) &&
		    hpte_rpn(ptel, psize) == gfn) {
			hptep[0] |= HPTE_V_ABSENT;
753 754 755 756 757
			kvmppc_invalidate_hpte(kvm, hptep, i);
			/* Harvest R and C */
			rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
			*rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
			rev[i].guest_rpte = ptel | rcbits;
758
		}
759
		unlock_rmap(rmapp);
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
		hptep[0] &= ~HPTE_V_HVLOCK;
	}
	return 0;
}

int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
	if (kvm->arch.using_mmu_notifiers)
		kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
	return 0;
}

static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
			 unsigned long gfn)
{
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
	struct revmap_entry *rev = kvm->arch.revmap;
	unsigned long head, i, j;
	unsigned long *hptep;
	int ret = 0;

 retry:
	lock_rmap(rmapp);
	if (*rmapp & KVMPPC_RMAP_REFERENCED) {
		*rmapp &= ~KVMPPC_RMAP_REFERENCED;
		ret = 1;
	}
	if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
		unlock_rmap(rmapp);
		return ret;
	}

	i = head = *rmapp & KVMPPC_RMAP_INDEX;
	do {
		hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
		j = rev[i].forw;

		/* If this HPTE isn't referenced, ignore it */
		if (!(hptep[1] & HPTE_R_R))
			continue;

		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
			/* unlock rmap before spinning on the HPTE lock */
			unlock_rmap(rmapp);
			while (hptep[0] & HPTE_V_HVLOCK)
				cpu_relax();
			goto retry;
		}

		/* Now check and modify the HPTE */
		if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
			kvmppc_clear_ref_hpte(kvm, hptep, i);
			rev[i].guest_rpte |= HPTE_R_R;
			ret = 1;
		}
		hptep[0] &= ~HPTE_V_HVLOCK;
	} while ((i = j) != head);

	unlock_rmap(rmapp);
	return ret;
819 820 821 822 823 824 825 826 827 828 829 830
}

int kvm_age_hva(struct kvm *kvm, unsigned long hva)
{
	if (!kvm->arch.using_mmu_notifiers)
		return 0;
	return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
}

static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
			      unsigned long gfn)
{
831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
	struct revmap_entry *rev = kvm->arch.revmap;
	unsigned long head, i, j;
	unsigned long *hp;
	int ret = 1;

	if (*rmapp & KVMPPC_RMAP_REFERENCED)
		return 1;

	lock_rmap(rmapp);
	if (*rmapp & KVMPPC_RMAP_REFERENCED)
		goto out;

	if (*rmapp & KVMPPC_RMAP_PRESENT) {
		i = head = *rmapp & KVMPPC_RMAP_INDEX;
		do {
			hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
			j = rev[i].forw;
			if (hp[1] & HPTE_R_R)
				goto out;
		} while ((i = j) != head);
	}
	ret = 0;

 out:
	unlock_rmap(rmapp);
	return ret;
857 858 859 860 861 862 863 864 865 866 867 868 869 870
}

int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
	if (!kvm->arch.using_mmu_notifiers)
		return 0;
	return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
}

void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
	if (!kvm->arch.using_mmu_notifiers)
		return;
	kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
871 872
}

873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941
static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
{
	struct revmap_entry *rev = kvm->arch.revmap;
	unsigned long head, i, j;
	unsigned long *hptep;
	int ret = 0;

 retry:
	lock_rmap(rmapp);
	if (*rmapp & KVMPPC_RMAP_CHANGED) {
		*rmapp &= ~KVMPPC_RMAP_CHANGED;
		ret = 1;
	}
	if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
		unlock_rmap(rmapp);
		return ret;
	}

	i = head = *rmapp & KVMPPC_RMAP_INDEX;
	do {
		hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
		j = rev[i].forw;

		if (!(hptep[1] & HPTE_R_C))
			continue;

		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
			/* unlock rmap before spinning on the HPTE lock */
			unlock_rmap(rmapp);
			while (hptep[0] & HPTE_V_HVLOCK)
				cpu_relax();
			goto retry;
		}

		/* Now check and modify the HPTE */
		if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) {
			/* need to make it temporarily absent to clear C */
			hptep[0] |= HPTE_V_ABSENT;
			kvmppc_invalidate_hpte(kvm, hptep, i);
			hptep[1] &= ~HPTE_R_C;
			eieio();
			hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
			rev[i].guest_rpte |= HPTE_R_C;
			ret = 1;
		}
		hptep[0] &= ~HPTE_V_HVLOCK;
	} while ((i = j) != head);

	unlock_rmap(rmapp);
	return ret;
}

long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
	unsigned long i;
	unsigned long *rmapp, *map;

	preempt_disable();
	rmapp = memslot->rmap;
	map = memslot->dirty_bitmap;
	for (i = 0; i < memslot->npages; ++i) {
		if (kvm_test_clear_dirty(kvm, rmapp))
			__set_bit_le(i, map);
		++rmapp;
	}
	preempt_enable();
	return 0;
}

942 943 944 945 946
void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
			    unsigned long *nb_ret)
{
	struct kvm_memory_slot *memslot;
	unsigned long gfn = gpa >> PAGE_SHIFT;
947 948 949
	struct page *page, *pages[1];
	int npages;
	unsigned long hva, psize, offset;
950
	unsigned long pa;
951 952 953 954 955
	unsigned long *physp;

	memslot = gfn_to_memslot(kvm, gfn);
	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
		return NULL;
956 957 958
	if (!kvm->arch.using_mmu_notifiers) {
		physp = kvm->arch.slot_phys[memslot->id];
		if (!physp)
959
			return NULL;
960
		physp += gfn - memslot->base_gfn;
961
		pa = *physp;
962 963 964 965 966 967 968 969 970 971 972 973 974
		if (!pa) {
			if (kvmppc_get_guest_page(kvm, gfn, memslot,
						  PAGE_SIZE) < 0)
				return NULL;
			pa = *physp;
		}
		page = pfn_to_page(pa >> PAGE_SHIFT);
	} else {
		hva = gfn_to_hva_memslot(memslot, gfn);
		npages = get_user_pages_fast(hva, 1, 1, pages);
		if (npages < 1)
			return NULL;
		page = pages[0];
975
	}
976 977 978 979 980
	psize = PAGE_SIZE;
	if (PageHuge(page)) {
		page = compound_head(page);
		psize <<= compound_order(page);
	}
981 982
	if (!kvm->arch.using_mmu_notifiers)
		get_page(page);
983
	offset = gpa & (psize - 1);
984
	if (nb_ret)
985
		*nb_ret = psize - offset;
986 987 988 989 990 991 992 993 994 995 996
	return page_address(page) + offset;
}

void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
{
	struct page *page = virt_to_page(va);

	page = compound_head(page);
	put_page(page);
}

997 998 999 1000
void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
{
	struct kvmppc_mmu *mmu = &vcpu->arch.mmu;

1001 1002 1003 1004
	if (cpu_has_feature(CPU_FTR_ARCH_206))
		vcpu->arch.slb_nr = 32;		/* POWER7 */
	else
		vcpu->arch.slb_nr = 64;
1005 1006 1007 1008 1009 1010

	mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
	mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;

	vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
}