book3s_hv.c 56.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
 *
 * Authors:
 *    Paul Mackerras <paulus@au1.ibm.com>
 *    Alexander Graf <agraf@suse.de>
 *    Kevin Wolf <mail@kevin-wolf.de>
 *
 * Description: KVM functions specific to running on Book 3S
 * processors in hypervisor mode (specifically POWER7 and later).
 *
 * This file is derived from arch/powerpc/kvm/book3s.c,
 * by Alexander Graf <agraf@suse.de>.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 */

#include <linux/kvm_host.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/preempt.h>
#include <linux/sched.h>
#include <linux/delay.h>
27
#include <linux/export.h>
28 29 30
#include <linux/fs.h>
#include <linux/anon_inodes.h>
#include <linux/cpumask.h>
31 32
#include <linux/spinlock.h>
#include <linux/page-flags.h>
33
#include <linux/srcu.h>
34 35 36 37 38 39 40 41 42 43 44 45

#include <asm/reg.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu_context.h>
#include <asm/lppaca.h>
#include <asm/processor.h>
46
#include <asm/cputhreads.h>
47
#include <asm/page.h>
48
#include <asm/hvcall.h>
49
#include <asm/switch_to.h>
50
#include <asm/smp.h>
51 52 53
#include <linux/gfp.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
54
#include <linux/hugetlb.h>
55
#include <linux/module.h>
56

57 58
#include "book3s.h"

59 60 61 62
/* #define EXIT_DEBUG */
/* #define EXIT_DEBUG_SIMPLE */
/* #define EXIT_DEBUG_INT */

63 64 65
/* Used to indicate that a guest page fault needs to be handled */
#define RESUME_PAGE_FAULT	(RESUME_GUEST | RESUME_FLAG_ARCH1)

66 67 68
/* Used as a "null" value for timebase values */
#define TB_NIL	(~(u64)0)

69
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
70
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
71

72
static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
{
	int me;
	int cpu = vcpu->cpu;
	wait_queue_head_t *wqp;

	wqp = kvm_arch_vcpu_wq(vcpu);
	if (waitqueue_active(wqp)) {
		wake_up_interruptible(wqp);
		++vcpu->stat.halt_wakeup;
	}

	me = get_cpu();

	/* CPU points to the first thread of the core */
	if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
		int real_cpu = cpu + vcpu->arch.ptid;
		if (paca[real_cpu].kvm_hstate.xics_phys)
			xics_wake_cpu(real_cpu);
		else if (cpu_online(cpu))
			smp_send_reschedule(cpu);
	}
	put_cpu();
}

97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
/*
 * We use the vcpu_load/put functions to measure stolen time.
 * Stolen time is counted as time when either the vcpu is able to
 * run as part of a virtual core, but the task running the vcore
 * is preempted or sleeping, or when the vcpu needs something done
 * in the kernel by the task running the vcpu, but that task is
 * preempted or sleeping.  Those two things have to be counted
 * separately, since one of the vcpu tasks will take on the job
 * of running the core, and the other vcpu tasks in the vcore will
 * sleep waiting for it to do that, but that sleep shouldn't count
 * as stolen time.
 *
 * Hence we accumulate stolen time when the vcpu can run as part of
 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
 * needs its task to do other things in the kernel (for example,
 * service a page fault) in busy_stolen.  We don't accumulate
 * stolen time for a vcore when it is inactive, or for a vcpu
 * when it is in state RUNNING or NOTREADY.  NOTREADY is a bit of
 * a misnomer; it means that the vcpu task is not executing in
 * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
 * the kernel.  We don't have any way of dividing up that time
 * between time that the vcpu is genuinely stopped, time that
 * the task is actively working on behalf of the vcpu, and time
 * that the task is preempted, so we don't count any of it as
 * stolen.
 *
 * Updates to busy_stolen are protected by arch.tbacct_lock;
 * updates to vc->stolen_tb are protected by the arch.tbacct_lock
 * of the vcpu that has taken responsibility for running the vcore
 * (i.e. vc->runner).  The stolen times are measured in units of
 * timebase ticks.  (Note that the != TB_NIL checks below are
 * purely defensive; they should never fail.)
 */

131
static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
132
{
133
	struct kvmppc_vcore *vc = vcpu->arch.vcore;
134
	unsigned long flags;
135

136
	spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
137 138
	if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
	    vc->preempt_tb != TB_NIL) {
139
		vc->stolen_tb += mftb() - vc->preempt_tb;
140 141 142 143 144 145 146
		vc->preempt_tb = TB_NIL;
	}
	if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
	    vcpu->arch.busy_preempt != TB_NIL) {
		vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
		vcpu->arch.busy_preempt = TB_NIL;
	}
147
	spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
148 149
}

150
static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
151
{
152
	struct kvmppc_vcore *vc = vcpu->arch.vcore;
153
	unsigned long flags;
154

155
	spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
156 157
	if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
		vc->preempt_tb = mftb();
158 159
	if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
		vcpu->arch.busy_preempt = mftb();
160
	spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
161 162
}

163
static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
164 165
{
	vcpu->arch.shregs.msr = msr;
166
	kvmppc_end_cede(vcpu);
167 168
}

169
void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
170 171 172 173
{
	vcpu->arch.pvr = pvr;
}

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
{
	unsigned long pcr = 0;
	struct kvmppc_vcore *vc = vcpu->arch.vcore;

	if (arch_compat) {
		if (!cpu_has_feature(CPU_FTR_ARCH_206))
			return -EINVAL;	/* 970 has no compat mode support */

		switch (arch_compat) {
		case PVR_ARCH_205:
			pcr = PCR_ARCH_205;
			break;
		case PVR_ARCH_206:
		case PVR_ARCH_206p:
			break;
		default:
			return -EINVAL;
		}
	}

	spin_lock(&vc->lock);
	vc->arch_compat = arch_compat;
	vc->pcr = pcr;
	spin_unlock(&vc->lock);

	return 0;
}

203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
{
	int r;

	pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
	pr_err("pc  = %.16lx  msr = %.16llx  trap = %x\n",
	       vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
	for (r = 0; r < 16; ++r)
		pr_err("r%2d = %.16lx  r%d = %.16lx\n",
		       r, kvmppc_get_gpr(vcpu, r),
		       r+16, kvmppc_get_gpr(vcpu, r+16));
	pr_err("ctr = %.16lx  lr  = %.16lx\n",
	       vcpu->arch.ctr, vcpu->arch.lr);
	pr_err("srr0 = %.16llx srr1 = %.16llx\n",
	       vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
	pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
	       vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
	pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
	       vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
	pr_err("cr = %.8x  xer = %.16lx  dsisr = %.8x\n",
	       vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
	pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
	pr_err("fault dar = %.16lx dsisr = %.8x\n",
	       vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
	pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
	for (r = 0; r < vcpu->arch.slb_max; ++r)
		pr_err("  ESID = %.16llx VSID = %.16llx\n",
		       vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
	pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
232
	       vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
233 234 235
	       vcpu->arch.last_inst);
}

236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
{
	int r;
	struct kvm_vcpu *v, *ret = NULL;

	mutex_lock(&kvm->lock);
	kvm_for_each_vcpu(r, v, kvm) {
		if (v->vcpu_id == id) {
			ret = v;
			break;
		}
	}
	mutex_unlock(&kvm->lock);
	return ret;
}

static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
{
254
	vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
255 256 257
	vpa->yield_count = 1;
}

258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
		   unsigned long addr, unsigned long len)
{
	/* check address is cacheline aligned */
	if (addr & (L1_CACHE_BYTES - 1))
		return -EINVAL;
	spin_lock(&vcpu->arch.vpa_update_lock);
	if (v->next_gpa != addr || v->len != len) {
		v->next_gpa = addr;
		v->len = addr ? len : 0;
		v->update_pending = 1;
	}
	spin_unlock(&vcpu->arch.vpa_update_lock);
	return 0;
}

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
/* Length for a per-processor buffer is passed in at offset 4 in the buffer */
struct reg_vpa {
	u32 dummy;
	union {
		u16 hword;
		u32 word;
	} length;
};

static int vpa_is_registered(struct kvmppc_vpa *vpap)
{
	if (vpap->update_pending)
		return vpap->next_gpa != 0;
	return vpap->pinned_addr != NULL;
}

290 291 292 293 294
static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
				       unsigned long flags,
				       unsigned long vcpuid, unsigned long vpa)
{
	struct kvm *kvm = vcpu->kvm;
295
	unsigned long len, nb;
296 297
	void *va;
	struct kvm_vcpu *tvcpu;
298 299 300
	int err;
	int subfunc;
	struct kvmppc_vpa *vpap;
301 302 303 304 305

	tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
	if (!tvcpu)
		return H_PARAMETER;

306 307 308 309 310
	subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
	if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
	    subfunc == H_VPA_REG_SLB) {
		/* Registering new area - address must be cache-line aligned */
		if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
311
			return H_PARAMETER;
312 313

		/* convert logical addr to kernel addr and read length */
314 315
		va = kvmppc_pin_guest_page(kvm, vpa, &nb);
		if (va == NULL)
316
			return H_PARAMETER;
317 318
		if (subfunc == H_VPA_REG_VPA)
			len = ((struct reg_vpa *)va)->length.hword;
319
		else
320
			len = ((struct reg_vpa *)va)->length.word;
321
		kvmppc_unpin_guest_page(kvm, va, vpa, false);
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337

		/* Check length */
		if (len > nb || len < sizeof(struct reg_vpa))
			return H_PARAMETER;
	} else {
		vpa = 0;
		len = 0;
	}

	err = H_PARAMETER;
	vpap = NULL;
	spin_lock(&tvcpu->arch.vpa_update_lock);

	switch (subfunc) {
	case H_VPA_REG_VPA:		/* register VPA */
		if (len < sizeof(struct lppaca))
338
			break;
339 340 341 342 343 344
		vpap = &tvcpu->arch.vpa;
		err = 0;
		break;

	case H_VPA_REG_DTL:		/* register DTL */
		if (len < sizeof(struct dtl_entry))
345
			break;
346 347 348 349 350
		len -= len % sizeof(struct dtl_entry);

		/* Check that they have previously registered a VPA */
		err = H_RESOURCE;
		if (!vpa_is_registered(&tvcpu->arch.vpa))
351
			break;
352 353 354 355 356 357 358 359 360

		vpap = &tvcpu->arch.dtl;
		err = 0;
		break;

	case H_VPA_REG_SLB:		/* register SLB shadow buffer */
		/* Check that they have previously registered a VPA */
		err = H_RESOURCE;
		if (!vpa_is_registered(&tvcpu->arch.vpa))
361
			break;
362 363 364 365 366 367 368 369 370 371

		vpap = &tvcpu->arch.slb_shadow;
		err = 0;
		break;

	case H_VPA_DEREG_VPA:		/* deregister VPA */
		/* Check they don't still have a DTL or SLB buf registered */
		err = H_RESOURCE;
		if (vpa_is_registered(&tvcpu->arch.dtl) ||
		    vpa_is_registered(&tvcpu->arch.slb_shadow))
372
			break;
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392

		vpap = &tvcpu->arch.vpa;
		err = 0;
		break;

	case H_VPA_DEREG_DTL:		/* deregister DTL */
		vpap = &tvcpu->arch.dtl;
		err = 0;
		break;

	case H_VPA_DEREG_SLB:		/* deregister SLB shadow buffer */
		vpap = &tvcpu->arch.slb_shadow;
		err = 0;
		break;
	}

	if (vpap) {
		vpap->next_gpa = vpa;
		vpap->len = len;
		vpap->update_pending = 1;
393
	}
394

395 396
	spin_unlock(&tvcpu->arch.vpa_update_lock);

397
	return err;
398 399
}

400
static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
401
{
402
	struct kvm *kvm = vcpu->kvm;
403 404
	void *va;
	unsigned long nb;
405
	unsigned long gpa;
406

407 408 409 410 411 412 413 414 415 416 417 418 419 420
	/*
	 * We need to pin the page pointed to by vpap->next_gpa,
	 * but we can't call kvmppc_pin_guest_page under the lock
	 * as it does get_user_pages() and down_read().  So we
	 * have to drop the lock, pin the page, then get the lock
	 * again and check that a new area didn't get registered
	 * in the meantime.
	 */
	for (;;) {
		gpa = vpap->next_gpa;
		spin_unlock(&vcpu->arch.vpa_update_lock);
		va = NULL;
		nb = 0;
		if (gpa)
421
			va = kvmppc_pin_guest_page(kvm, gpa, &nb);
422 423 424 425 426
		spin_lock(&vcpu->arch.vpa_update_lock);
		if (gpa == vpap->next_gpa)
			break;
		/* sigh... unpin that one and try again */
		if (va)
427
			kvmppc_unpin_guest_page(kvm, va, gpa, false);
428 429 430 431 432 433 434 435 436
	}

	vpap->update_pending = 0;
	if (va && nb < vpap->len) {
		/*
		 * If it's now too short, it must be that userspace
		 * has changed the mappings underlying guest memory,
		 * so unregister the region.
		 */
437
		kvmppc_unpin_guest_page(kvm, va, gpa, false);
438
		va = NULL;
439 440
	}
	if (vpap->pinned_addr)
441 442 443
		kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
					vpap->dirty);
	vpap->gpa = gpa;
444
	vpap->pinned_addr = va;
445
	vpap->dirty = false;
446 447 448 449 450 451
	if (va)
		vpap->pinned_end = va + vpap->len;
}

static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
{
452 453 454 455 456
	if (!(vcpu->arch.vpa.update_pending ||
	      vcpu->arch.slb_shadow.update_pending ||
	      vcpu->arch.dtl.update_pending))
		return;

457 458
	spin_lock(&vcpu->arch.vpa_update_lock);
	if (vcpu->arch.vpa.update_pending) {
459
		kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
460 461
		if (vcpu->arch.vpa.pinned_addr)
			init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
462 463
	}
	if (vcpu->arch.dtl.update_pending) {
464
		kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
465 466 467 468
		vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
		vcpu->arch.dtl_index = 0;
	}
	if (vcpu->arch.slb_shadow.update_pending)
469
		kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
470 471 472
	spin_unlock(&vcpu->arch.vpa_update_lock);
}

473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
/*
 * Return the accumulated stolen time for the vcore up until `now'.
 * The caller should hold the vcore lock.
 */
static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
{
	u64 p;

	/*
	 * If we are the task running the vcore, then since we hold
	 * the vcore lock, we can't be preempted, so stolen_tb/preempt_tb
	 * can't be updated, so we don't need the tbacct_lock.
	 * If the vcore is inactive, it can't become active (since we
	 * hold the vcore lock), so the vcpu load/put functions won't
	 * update stolen_tb/preempt_tb, and we don't need tbacct_lock.
	 */
	if (vc->vcore_state != VCORE_INACTIVE &&
	    vc->runner->arch.run_task != current) {
491
		spin_lock_irq(&vc->runner->arch.tbacct_lock);
492 493 494
		p = vc->stolen_tb;
		if (vc->preempt_tb != TB_NIL)
			p += now - vc->preempt_tb;
495
		spin_unlock_irq(&vc->runner->arch.tbacct_lock);
496 497 498 499 500 501
	} else {
		p = vc->stolen_tb;
	}
	return p;
}

502 503 504 505 506
static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
				    struct kvmppc_vcore *vc)
{
	struct dtl_entry *dt;
	struct lppaca *vpa;
507 508 509
	unsigned long stolen;
	unsigned long core_stolen;
	u64 now;
510 511 512

	dt = vcpu->arch.dtl_ptr;
	vpa = vcpu->arch.vpa.pinned_addr;
513 514 515 516
	now = mftb();
	core_stolen = vcore_stolen_time(vc, now);
	stolen = core_stolen - vcpu->arch.stolen_logged;
	vcpu->arch.stolen_logged = core_stolen;
517
	spin_lock_irq(&vcpu->arch.tbacct_lock);
518 519
	stolen += vcpu->arch.busy_stolen;
	vcpu->arch.busy_stolen = 0;
520
	spin_unlock_irq(&vcpu->arch.tbacct_lock);
521 522 523 524 525
	if (!dt || !vpa)
		return;
	memset(dt, 0, sizeof(struct dtl_entry));
	dt->dispatch_reason = 7;
	dt->processor_id = vc->pcpu + vcpu->arch.ptid;
526
	dt->timebase = now + vc->tb_offset;
527
	dt->enqueue_to_dispatch_time = stolen;
528 529 530 531 532 533 534 535 536
	dt->srr0 = kvmppc_get_pc(vcpu);
	dt->srr1 = vcpu->arch.shregs.msr;
	++dt;
	if (dt == vcpu->arch.dtl.pinned_end)
		dt = vcpu->arch.dtl.pinned_addr;
	vcpu->arch.dtl_ptr = dt;
	/* order writing *dt vs. writing vpa->dtl_idx */
	smp_wmb();
	vpa->dtl_idx = ++vcpu->arch.dtl_index;
537
	vcpu->arch.dtl.dirty = true;
538 539
}

540 541 542 543 544
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
{
	unsigned long req = kvmppc_get_gpr(vcpu, 3);
	unsigned long target, ret = H_SUCCESS;
	struct kvm_vcpu *tvcpu;
545
	int idx, rc;
546 547

	switch (req) {
548
	case H_ENTER:
549
		idx = srcu_read_lock(&vcpu->kvm->srcu);
550 551 552 553
		ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
					      kvmppc_get_gpr(vcpu, 5),
					      kvmppc_get_gpr(vcpu, 6),
					      kvmppc_get_gpr(vcpu, 7));
554
		srcu_read_unlock(&vcpu->kvm->srcu, idx);
555
		break;
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
	case H_CEDE:
		break;
	case H_PROD:
		target = kvmppc_get_gpr(vcpu, 4);
		tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
		if (!tvcpu) {
			ret = H_PARAMETER;
			break;
		}
		tvcpu->arch.prodded = 1;
		smp_mb();
		if (vcpu->arch.ceded) {
			if (waitqueue_active(&vcpu->wq)) {
				wake_up_interruptible(&vcpu->wq);
				vcpu->stat.halt_wakeup++;
			}
		}
		break;
	case H_CONFER:
575 576 577 578 579 580 581 582 583
		target = kvmppc_get_gpr(vcpu, 4);
		if (target == -1)
			break;
		tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
		if (!tvcpu) {
			ret = H_PARAMETER;
			break;
		}
		kvm_vcpu_yield_to(tvcpu);
584 585 586 587 588 589
		break;
	case H_REGISTER_VPA:
		ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
					kvmppc_get_gpr(vcpu, 5),
					kvmppc_get_gpr(vcpu, 6));
		break;
590 591 592 593
	case H_RTAS:
		if (list_empty(&vcpu->kvm->arch.rtas_tokens))
			return RESUME_HOST;

594
		idx = srcu_read_lock(&vcpu->kvm->srcu);
595
		rc = kvmppc_rtas_hcall(vcpu);
596
		srcu_read_unlock(&vcpu->kvm->srcu, idx);
597 598 599 600 601 602 603 604

		if (rc == -ENOENT)
			return RESUME_HOST;
		else if (rc == 0)
			break;

		/* Send the error out to userspace via KVM_RUN */
		return rc;
605 606 607 608 609

	case H_XIRR:
	case H_CPPR:
	case H_EOI:
	case H_IPI:
610 611
	case H_IPOLL:
	case H_XIRR_X:
612 613 614 615
		if (kvmppc_xics_enabled(vcpu)) {
			ret = kvmppc_xics_hcall(vcpu, req);
			break;
		} /* fallthrough */
616 617 618 619 620 621 622 623
	default:
		return RESUME_HOST;
	}
	kvmppc_set_gpr(vcpu, 3, ret);
	vcpu->arch.hcall_needed = 0;
	return RESUME_GUEST;
}

624 625
static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
				 struct task_struct *tsk)
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
{
	int r = RESUME_HOST;

	vcpu->stat.sum_exits++;

	run->exit_reason = KVM_EXIT_UNKNOWN;
	run->ready_for_interrupt_injection = 1;
	switch (vcpu->arch.trap) {
	/* We're good on these - the host merely wanted to get our attention */
	case BOOK3S_INTERRUPT_HV_DECREMENTER:
		vcpu->stat.dec_exits++;
		r = RESUME_GUEST;
		break;
	case BOOK3S_INTERRUPT_EXTERNAL:
		vcpu->stat.ext_intr_exits++;
		r = RESUME_GUEST;
		break;
	case BOOK3S_INTERRUPT_PERFMON:
		r = RESUME_GUEST;
		break;
646 647 648 649 650 651 652 653 654 655 656
	case BOOK3S_INTERRUPT_MACHINE_CHECK:
		/*
		 * Deliver a machine check interrupt to the guest.
		 * We have to do this, even if the host has handled the
		 * machine check, because machine checks use SRR0/1 and
		 * the interrupt might have trashed guest state in them.
		 */
		kvmppc_book3s_queue_irqprio(vcpu,
					    BOOK3S_INTERRUPT_MACHINE_CHECK);
		r = RESUME_GUEST;
		break;
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
	case BOOK3S_INTERRUPT_PROGRAM:
	{
		ulong flags;
		/*
		 * Normally program interrupts are delivered directly
		 * to the guest by the hardware, but we can get here
		 * as a result of a hypervisor emulation interrupt
		 * (e40) getting turned into a 700 by BML RTAS.
		 */
		flags = vcpu->arch.shregs.msr & 0x1f0000ull;
		kvmppc_core_queue_program(vcpu, flags);
		r = RESUME_GUEST;
		break;
	}
	case BOOK3S_INTERRUPT_SYSCALL:
	{
		/* hcall - punt to userspace */
		int i;

		if (vcpu->arch.shregs.msr & MSR_PR) {
			/* sc 1 from userspace - reflect to guest syscall */
			kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
			r = RESUME_GUEST;
			break;
		}
		run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
		for (i = 0; i < 9; ++i)
			run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
		run->exit_reason = KVM_EXIT_PAPR_HCALL;
		vcpu->arch.hcall_needed = 1;
		r = RESUME_HOST;
		break;
	}
	/*
691 692 693 694 695
	 * We get these next two if the guest accesses a page which it thinks
	 * it has mapped but which is not actually present, either because
	 * it is for an emulated I/O device or because the corresonding
	 * host page has been paged out.  Any other HDSI/HISI interrupts
	 * have been handled already.
696 697
	 */
	case BOOK3S_INTERRUPT_H_DATA_STORAGE:
698
		r = RESUME_PAGE_FAULT;
699 700
		break;
	case BOOK3S_INTERRUPT_H_INST_STORAGE:
701 702 703
		vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
		vcpu->arch.fault_dsisr = 0;
		r = RESUME_PAGE_FAULT;
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
		break;
	/*
	 * This occurs if the guest executes an illegal instruction.
	 * We just generate a program interrupt to the guest, since
	 * we don't emulate any guest instructions at this stage.
	 */
	case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
		kvmppc_core_queue_program(vcpu, 0x80000);
		r = RESUME_GUEST;
		break;
	default:
		kvmppc_dump_regs(vcpu);
		printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
			vcpu->arch.trap, kvmppc_get_pc(vcpu),
			vcpu->arch.shregs.msr);
719
		run->hw.hardware_exit_reason = vcpu->arch.trap;
720 721 722 723 724 725 726
		r = RESUME_HOST;
		break;
	}

	return r;
}

727 728
static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
					    struct kvm_sregs *sregs)
729 730 731 732
{
	int i;

	memset(sregs, 0, sizeof(struct kvm_sregs));
733
	sregs->pvr = vcpu->arch.pvr;
734 735 736 737 738 739 740 741
	for (i = 0; i < vcpu->arch.slb_max; i++) {
		sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
		sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
	}

	return 0;
}

742 743
static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
					    struct kvm_sregs *sregs)
744 745 746
{
	int i, j;

747
	kvmppc_set_pvr_hv(vcpu, sregs->pvr);
748 749 750 751 752 753 754 755 756 757 758 759 760 761

	j = 0;
	for (i = 0; i < vcpu->arch.slb_nr; i++) {
		if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
			vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
			vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
			++j;
		}
	}
	vcpu->arch.slb_max = j;

	return 0;
}

762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
{
	struct kvmppc_vcore *vc = vcpu->arch.vcore;
	u64 mask;

	spin_lock(&vc->lock);
	/*
	 * Userspace can only modify DPFD (default prefetch depth),
	 * ILE (interrupt little-endian) and TC (translation control).
	 */
	mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
	vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
	spin_unlock(&vc->lock);
}

777 778
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
				 union kvmppc_one_reg *val)
779
{
780 781
	int r = 0;
	long int i;
782

783
	switch (id) {
784
	case KVM_REG_PPC_HIOR:
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
		*val = get_reg_val(id, 0);
		break;
	case KVM_REG_PPC_DABR:
		*val = get_reg_val(id, vcpu->arch.dabr);
		break;
	case KVM_REG_PPC_DSCR:
		*val = get_reg_val(id, vcpu->arch.dscr);
		break;
	case KVM_REG_PPC_PURR:
		*val = get_reg_val(id, vcpu->arch.purr);
		break;
	case KVM_REG_PPC_SPURR:
		*val = get_reg_val(id, vcpu->arch.spurr);
		break;
	case KVM_REG_PPC_AMR:
		*val = get_reg_val(id, vcpu->arch.amr);
		break;
	case KVM_REG_PPC_UAMOR:
		*val = get_reg_val(id, vcpu->arch.uamor);
		break;
	case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
		i = id - KVM_REG_PPC_MMCR0;
		*val = get_reg_val(id, vcpu->arch.mmcr[i]);
		break;
	case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
		i = id - KVM_REG_PPC_PMC1;
		*val = get_reg_val(id, vcpu->arch.pmc[i]);
812
		break;
813 814 815 816 817 818
	case KVM_REG_PPC_SIAR:
		*val = get_reg_val(id, vcpu->arch.siar);
		break;
	case KVM_REG_PPC_SDAR:
		*val = get_reg_val(id, vcpu->arch.sdar);
		break;
819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
#ifdef CONFIG_VSX
	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
		if (cpu_has_feature(CPU_FTR_VSX)) {
			/* VSX => FP reg i is stored in arch.vsr[2*i] */
			long int i = id - KVM_REG_PPC_FPR0;
			*val = get_reg_val(id, vcpu->arch.vsr[2 * i]);
		} else {
			/* let generic code handle it */
			r = -EINVAL;
		}
		break;
	case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
		if (cpu_has_feature(CPU_FTR_VSX)) {
			long int i = id - KVM_REG_PPC_VSR0;
			val->vsxval[0] = vcpu->arch.vsr[2 * i];
			val->vsxval[1] = vcpu->arch.vsr[2 * i + 1];
		} else {
			r = -ENXIO;
		}
		break;
#endif /* CONFIG_VSX */
840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
	case KVM_REG_PPC_VPA_ADDR:
		spin_lock(&vcpu->arch.vpa_update_lock);
		*val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
		spin_unlock(&vcpu->arch.vpa_update_lock);
		break;
	case KVM_REG_PPC_VPA_SLB:
		spin_lock(&vcpu->arch.vpa_update_lock);
		val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
		val->vpaval.length = vcpu->arch.slb_shadow.len;
		spin_unlock(&vcpu->arch.vpa_update_lock);
		break;
	case KVM_REG_PPC_VPA_DTL:
		spin_lock(&vcpu->arch.vpa_update_lock);
		val->vpaval.addr = vcpu->arch.dtl.next_gpa;
		val->vpaval.length = vcpu->arch.dtl.len;
		spin_unlock(&vcpu->arch.vpa_update_lock);
		break;
857 858 859
	case KVM_REG_PPC_TB_OFFSET:
		*val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
		break;
860 861 862
	case KVM_REG_PPC_LPCR:
		*val = get_reg_val(id, vcpu->arch.vcore->lpcr);
		break;
863 864 865
	case KVM_REG_PPC_PPR:
		*val = get_reg_val(id, vcpu->arch.ppr);
		break;
866 867 868
	case KVM_REG_PPC_ARCH_COMPAT:
		*val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
		break;
869
	default:
870
		r = -EINVAL;
871 872 873 874 875 876
		break;
	}

	return r;
}

877 878
static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
				 union kvmppc_one_reg *val)
879
{
880 881
	int r = 0;
	long int i;
882
	unsigned long addr, len;
883

884
	switch (id) {
885 886
	case KVM_REG_PPC_HIOR:
		/* Only allow this to be set to zero */
887
		if (set_reg_val(id, *val))
888 889
			r = -EINVAL;
		break;
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
	case KVM_REG_PPC_DABR:
		vcpu->arch.dabr = set_reg_val(id, *val);
		break;
	case KVM_REG_PPC_DSCR:
		vcpu->arch.dscr = set_reg_val(id, *val);
		break;
	case KVM_REG_PPC_PURR:
		vcpu->arch.purr = set_reg_val(id, *val);
		break;
	case KVM_REG_PPC_SPURR:
		vcpu->arch.spurr = set_reg_val(id, *val);
		break;
	case KVM_REG_PPC_AMR:
		vcpu->arch.amr = set_reg_val(id, *val);
		break;
	case KVM_REG_PPC_UAMOR:
		vcpu->arch.uamor = set_reg_val(id, *val);
		break;
	case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
		i = id - KVM_REG_PPC_MMCR0;
		vcpu->arch.mmcr[i] = set_reg_val(id, *val);
		break;
	case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
		i = id - KVM_REG_PPC_PMC1;
		vcpu->arch.pmc[i] = set_reg_val(id, *val);
		break;
916 917 918 919 920 921
	case KVM_REG_PPC_SIAR:
		vcpu->arch.siar = set_reg_val(id, *val);
		break;
	case KVM_REG_PPC_SDAR:
		vcpu->arch.sdar = set_reg_val(id, *val);
		break;
922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
#ifdef CONFIG_VSX
	case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
		if (cpu_has_feature(CPU_FTR_VSX)) {
			/* VSX => FP reg i is stored in arch.vsr[2*i] */
			long int i = id - KVM_REG_PPC_FPR0;
			vcpu->arch.vsr[2 * i] = set_reg_val(id, *val);
		} else {
			/* let generic code handle it */
			r = -EINVAL;
		}
		break;
	case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
		if (cpu_has_feature(CPU_FTR_VSX)) {
			long int i = id - KVM_REG_PPC_VSR0;
			vcpu->arch.vsr[2 * i] = val->vsxval[0];
			vcpu->arch.vsr[2 * i + 1] = val->vsxval[1];
		} else {
			r = -ENXIO;
		}
		break;
#endif /* CONFIG_VSX */
943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
	case KVM_REG_PPC_VPA_ADDR:
		addr = set_reg_val(id, *val);
		r = -EINVAL;
		if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
			      vcpu->arch.dtl.next_gpa))
			break;
		r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
		break;
	case KVM_REG_PPC_VPA_SLB:
		addr = val->vpaval.addr;
		len = val->vpaval.length;
		r = -EINVAL;
		if (addr && !vcpu->arch.vpa.next_gpa)
			break;
		r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
		break;
	case KVM_REG_PPC_VPA_DTL:
		addr = val->vpaval.addr;
		len = val->vpaval.length;
		r = -EINVAL;
963 964
		if (addr && (len < sizeof(struct dtl_entry) ||
			     !vcpu->arch.vpa.next_gpa))
965 966 967 968
			break;
		len -= len % sizeof(struct dtl_entry);
		r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
		break;
969 970 971 972 973
	case KVM_REG_PPC_TB_OFFSET:
		/* round up to multiple of 2^24 */
		vcpu->arch.vcore->tb_offset =
			ALIGN(set_reg_val(id, *val), 1UL << 24);
		break;
974 975 976
	case KVM_REG_PPC_LPCR:
		kvmppc_set_lpcr(vcpu, set_reg_val(id, *val));
		break;
977 978 979
	case KVM_REG_PPC_PPR:
		vcpu->arch.ppr = set_reg_val(id, *val);
		break;
980 981 982
	case KVM_REG_PPC_ARCH_COMPAT:
		r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
		break;
983
	default:
984
		r = -EINVAL;
985 986 987 988 989 990
		break;
	}

	return r;
}

991 992
static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
						   unsigned int id)
993 994
{
	struct kvm_vcpu *vcpu;
995 996 997
	int err = -EINVAL;
	int core;
	struct kvmppc_vcore *vcore;
998

999 1000 1001 1002 1003
	core = id / threads_per_core;
	if (core >= KVM_MAX_VCORES)
		goto out;

	err = -ENOMEM;
1004
	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
	if (!vcpu)
		goto out;

	err = kvm_vcpu_init(vcpu, kvm, id);
	if (err)
		goto free_vcpu;

	vcpu->arch.shared = &vcpu->arch.shregs;
	vcpu->arch.mmcr[0] = MMCR0_FC;
	vcpu->arch.ctrl = CTRL_RUNLATCH;
	/* default to host PVR, since we can't spoof it */
1016
	kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
1017
	spin_lock_init(&vcpu->arch.vpa_update_lock);
1018 1019
	spin_lock_init(&vcpu->arch.tbacct_lock);
	vcpu->arch.busy_preempt = TB_NIL;
1020 1021 1022

	kvmppc_mmu_book3s_hv_init(vcpu);

1023
	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033

	init_waitqueue_head(&vcpu->arch.cpu_run);

	mutex_lock(&kvm->lock);
	vcore = kvm->arch.vcores[core];
	if (!vcore) {
		vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
		if (vcore) {
			INIT_LIST_HEAD(&vcore->runnable_threads);
			spin_lock_init(&vcore->lock);
1034
			init_waitqueue_head(&vcore->wq);
1035
			vcore->preempt_tb = TB_NIL;
1036
			vcore->lpcr = kvm->arch.lpcr;
1037 1038
		}
		kvm->arch.vcores[core] = vcore;
1039
		kvm->arch.online_vcores++;
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
	}
	mutex_unlock(&kvm->lock);

	if (!vcore)
		goto free_vcpu;

	spin_lock(&vcore->lock);
	++vcore->num_threads;
	spin_unlock(&vcore->lock);
	vcpu->arch.vcore = vcore;

1051 1052 1053
	vcpu->arch.cpu_type = KVM_CPU_3S_64;
	kvmppc_sanity_check(vcpu);

1054 1055 1056
	return vcpu;

free_vcpu:
1057
	kmem_cache_free(kvm_vcpu_cache, vcpu);
1058 1059 1060 1061
out:
	return ERR_PTR(err);
}

1062 1063 1064 1065 1066 1067 1068
static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
{
	if (vpa->pinned_addr)
		kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
					vpa->dirty);
}

1069
static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
1070
{
1071
	spin_lock(&vcpu->arch.vpa_update_lock);
1072 1073 1074
	unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
	unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
	unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
1075
	spin_unlock(&vcpu->arch.vpa_update_lock);
1076
	kvm_vcpu_uninit(vcpu);
1077
	kmem_cache_free(kvm_vcpu_cache, vcpu);
1078 1079
}

1080 1081 1082 1083 1084 1085
static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
{
	/* Indicate we want to get back into the guest */
	return 1;
}

1086
static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
1087
{
1088
	unsigned long dec_nsec, now;
1089

1090 1091 1092 1093
	now = get_tb();
	if (now > vcpu->arch.dec_expires) {
		/* decrementer has already gone negative */
		kvmppc_core_queue_dec(vcpu);
1094
		kvmppc_core_prepare_to_enter(vcpu);
1095
		return;
1096
	}
1097 1098 1099 1100 1101
	dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
		   / tb_ticks_per_sec;
	hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
		      HRTIMER_MODE_REL);
	vcpu->arch.timer_running = 1;
1102 1103
}

1104
static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
1105
{
1106 1107 1108 1109 1110
	vcpu->arch.ceded = 0;
	if (vcpu->arch.timer_running) {
		hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
		vcpu->arch.timer_running = 0;
	}
1111 1112
}

1113 1114
extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);

1115 1116
static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
				   struct kvm_vcpu *vcpu)
1117
{
1118 1119
	u64 now;

1120 1121
	if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
		return;
1122
	spin_lock_irq(&vcpu->arch.tbacct_lock);
1123 1124 1125 1126 1127
	now = mftb();
	vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
		vcpu->arch.stolen_logged;
	vcpu->arch.busy_preempt = now;
	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
1128
	spin_unlock_irq(&vcpu->arch.tbacct_lock);
1129 1130 1131 1132
	--vc->n_runnable;
	list_del(&vcpu->arch.run_list);
}

1133 1134 1135 1136 1137 1138 1139 1140 1141
static int kvmppc_grab_hwthread(int cpu)
{
	struct paca_struct *tpaca;
	long timeout = 1000;

	tpaca = &paca[cpu];

	/* Ensure the thread won't go into the kernel if it wakes */
	tpaca->kvm_hstate.hwthread_req = 1;
1142
	tpaca->kvm_hstate.kvm_vcpu = NULL;
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172

	/*
	 * If the thread is already executing in the kernel (e.g. handling
	 * a stray interrupt), wait for it to get back to nap mode.
	 * The smp_mb() is to ensure that our setting of hwthread_req
	 * is visible before we look at hwthread_state, so if this
	 * races with the code at system_reset_pSeries and the thread
	 * misses our setting of hwthread_req, we are sure to see its
	 * setting of hwthread_state, and vice versa.
	 */
	smp_mb();
	while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
		if (--timeout <= 0) {
			pr_err("KVM: couldn't grab cpu %d\n", cpu);
			return -EBUSY;
		}
		udelay(1);
	}
	return 0;
}

static void kvmppc_release_hwthread(int cpu)
{
	struct paca_struct *tpaca;

	tpaca = &paca[cpu];
	tpaca->kvm_hstate.hwthread_req = 0;
	tpaca->kvm_hstate.kvm_vcpu = NULL;
}

1173 1174 1175 1176 1177 1178
static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
{
	int cpu;
	struct paca_struct *tpaca;
	struct kvmppc_vcore *vc = vcpu->arch.vcore;

1179 1180 1181 1182
	if (vcpu->arch.timer_running) {
		hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
		vcpu->arch.timer_running = 0;
	}
1183 1184 1185 1186
	cpu = vc->pcpu + vcpu->arch.ptid;
	tpaca = &paca[cpu];
	tpaca->kvm_hstate.kvm_vcpu = vcpu;
	tpaca->kvm_hstate.kvm_vcore = vc;
1187 1188
	tpaca->kvm_hstate.napping = 0;
	vcpu->cpu = vc->pcpu;
1189
	smp_wmb();
1190
#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
1191 1192 1193
	if (vcpu->arch.ptid) {
		xics_wake_cpu(cpu);
		++vc->n_woken;
1194
	}
1195 1196
#endif
}
1197

1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
{
	int i;

	HMT_low();
	i = 0;
	while (vc->nap_count < vc->n_woken) {
		if (++i >= 1000000) {
			pr_err("kvmppc_wait_for_nap timeout %d %d\n",
			       vc->nap_count, vc->n_woken);
			break;
		}
		cpu_relax();
	}
	HMT_medium();
}

/*
 * Check that we are on thread 0 and that any other threads in
1217 1218
 * this core are off-line.  Then grab the threads so they can't
 * enter the kernel.
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
 */
static int on_primary_thread(void)
{
	int cpu = smp_processor_id();
	int thr = cpu_thread_in_core(cpu);

	if (thr)
		return 0;
	while (++thr < threads_per_core)
		if (cpu_online(cpu + thr))
			return 0;
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240

	/* Grab all hw threads so they can't go into the kernel */
	for (thr = 1; thr < threads_per_core; ++thr) {
		if (kvmppc_grab_hwthread(cpu + thr)) {
			/* Couldn't grab one; let the others go */
			do {
				kvmppc_release_hwthread(cpu + thr);
			} while (--thr > 0);
			return 0;
		}
	}
1241 1242 1243 1244 1245 1246 1247
	return 1;
}

/*
 * Run a set of guest threads on a physical core.
 * Called with vc->lock held.
 */
1248
static void kvmppc_run_core(struct kvmppc_vcore *vc)
1249
{
1250
	struct kvm_vcpu *vcpu, *vcpu0, *vnext;
1251 1252
	long ret;
	u64 now;
1253
	int ptid, i, need_vpa_update;
1254
	int srcu_idx;
1255
	struct kvm_vcpu *vcpus_to_update[threads_per_core];
1256 1257

	/* don't start if any threads have a signal pending */
1258 1259
	need_vpa_update = 0;
	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1260
		if (signal_pending(vcpu->arch.run_task))
1261 1262 1263 1264 1265
			return;
		if (vcpu->arch.vpa.update_pending ||
		    vcpu->arch.slb_shadow.update_pending ||
		    vcpu->arch.dtl.update_pending)
			vcpus_to_update[need_vpa_update++] = vcpu;
1266 1267 1268 1269 1270 1271 1272 1273 1274
	}

	/*
	 * Initialize *vc, in particular vc->vcore_state, so we can
	 * drop the vcore lock if necessary.
	 */
	vc->n_woken = 0;
	vc->nap_count = 0;
	vc->entry_exit_count = 0;
1275
	vc->vcore_state = VCORE_STARTING;
1276 1277 1278 1279 1280 1281 1282 1283 1284
	vc->in_guest = 0;
	vc->napping_threads = 0;

	/*
	 * Updating any of the vpas requires calling kvmppc_pin_guest_page,
	 * which can't be called with any spinlocks held.
	 */
	if (need_vpa_update) {
		spin_unlock(&vc->lock);
1285 1286
		for (i = 0; i < need_vpa_update; ++i)
			kvmppc_update_vpas(vcpus_to_update[i]);
1287 1288
		spin_lock(&vc->lock);
	}
1289

1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
	/*
	 * Assign physical thread IDs, first to non-ceded vcpus
	 * and then to ceded ones.
	 */
	ptid = 0;
	vcpu0 = NULL;
	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
		if (!vcpu->arch.ceded) {
			if (!ptid)
				vcpu0 = vcpu;
			vcpu->arch.ptid = ptid++;
		}
	}
1303 1304
	if (!vcpu0)
		goto out;	/* nothing to run; should never happen */
1305 1306 1307 1308
	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
		if (vcpu->arch.ceded)
			vcpu->arch.ptid = ptid++;

1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
	/*
	 * Make sure we are running on thread 0, and that
	 * secondary threads are offline.
	 */
	if (threads_per_core > 1 && !on_primary_thread()) {
		list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
			vcpu->arch.ret = -EBUSY;
		goto out;
	}

1319
	vc->pcpu = smp_processor_id();
1320
	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1321
		kvmppc_start_thread(vcpu);
1322
		kvmppc_create_dtl_entry(vcpu, vc);
1323
	}
1324

1325
	vc->vcore_state = VCORE_RUNNING;
1326
	preempt_disable();
1327
	spin_unlock(&vc->lock);
1328

1329
	kvm_guest_enter();
1330 1331 1332

	srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu);

1333
	__kvmppc_vcore_entry(NULL, vcpu0);
1334

1335
	spin_lock(&vc->lock);
1336 1337 1338 1339
	/* disable sending of IPIs on virtual external irqs */
	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
		vcpu->cpu = -1;
	/* wait for secondary threads to finish writing their state to memory */
1340 1341
	if (vc->nap_count < vc->n_woken)
		kvmppc_wait_for_nap(vc);
1342 1343
	for (i = 0; i < threads_per_core; ++i)
		kvmppc_release_hwthread(vc->pcpu + i);
1344
	/* prevent other vcpu threads from doing kvmppc_start_thread() now */
1345
	vc->vcore_state = VCORE_EXITING;
1346 1347
	spin_unlock(&vc->lock);

1348 1349
	srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx);

1350 1351
	/* make sure updates to secondary vcpu structs are visible now */
	smp_mb();
1352 1353 1354 1355 1356
	kvm_guest_exit();

	preempt_enable();
	kvm_resched(vcpu);

1357
	spin_lock(&vc->lock);
1358
	now = get_tb();
1359 1360 1361 1362 1363
	list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
		/* cancel pending dec exception if dec is positive */
		if (now < vcpu->arch.dec_expires &&
		    kvmppc_core_pending_dec(vcpu))
			kvmppc_core_dequeue_dec(vcpu);
1364 1365 1366

		ret = RESUME_GUEST;
		if (vcpu->arch.trap)
1367 1368
			ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
						    vcpu->arch.run_task);
1369

1370 1371
		vcpu->arch.ret = ret;
		vcpu->arch.trap = 0;
1372 1373 1374 1375 1376 1377 1378

		if (vcpu->arch.ceded) {
			if (ret != RESUME_GUEST)
				kvmppc_end_cede(vcpu);
			else
				kvmppc_set_timer(vcpu);
		}
1379
	}
1380 1381

 out:
1382
	vc->vcore_state = VCORE_INACTIVE;
1383 1384 1385 1386 1387 1388 1389 1390 1391
	list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
				 arch.run_list) {
		if (vcpu->arch.ret != RESUME_GUEST) {
			kvmppc_remove_runnable(vc, vcpu);
			wake_up(&vcpu->arch.cpu_run);
		}
	}
}

1392 1393 1394 1395 1396
/*
 * Wait for some other vcpu thread to execute us, and
 * wake us up when we need to handle something in the host.
 */
static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
1397 1398 1399
{
	DEFINE_WAIT(wait);

1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
	prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
	if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
		schedule();
	finish_wait(&vcpu->arch.cpu_run, &wait);
}

/*
 * All the vcpus in this vcore are idle, so wait for a decrementer
 * or external interrupt to one of the vcpus.  vc->lock is held.
 */
static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
{
	DEFINE_WAIT(wait);

	prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
	vc->vcore_state = VCORE_SLEEPING;
	spin_unlock(&vc->lock);
1417
	schedule();
1418 1419 1420 1421
	finish_wait(&vc->wq, &wait);
	spin_lock(&vc->lock);
	vc->vcore_state = VCORE_INACTIVE;
}
1422

1423 1424 1425 1426 1427
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
	int n_ceded;
	struct kvmppc_vcore *vc;
	struct kvm_vcpu *v, *vn;
1428

1429 1430 1431
	kvm_run->exit_reason = 0;
	vcpu->arch.ret = RESUME_GUEST;
	vcpu->arch.trap = 0;
1432
	kvmppc_update_vpas(vcpu);
1433 1434 1435 1436 1437 1438

	/*
	 * Synchronize with other threads in this virtual core
	 */
	vc = vcpu->arch.vcore;
	spin_lock(&vc->lock);
1439
	vcpu->arch.ceded = 0;
1440 1441
	vcpu->arch.run_task = current;
	vcpu->arch.kvm_run = kvm_run;
1442
	vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
1443
	vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
1444
	vcpu->arch.busy_preempt = TB_NIL;
1445 1446 1447
	list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
	++vc->n_runnable;

1448 1449 1450 1451 1452
	/*
	 * This happens the first time this is called for a vcpu.
	 * If the vcore is already running, we may be able to start
	 * this thread straight away and have it join in.
	 */
1453
	if (!signal_pending(current)) {
1454 1455 1456
		if (vc->vcore_state == VCORE_RUNNING &&
		    VCORE_EXIT_COUNT(vc) == 0) {
			vcpu->arch.ptid = vc->n_runnable - 1;
1457
			kvmppc_create_dtl_entry(vcpu, vc);
1458
			kvmppc_start_thread(vcpu);
1459 1460
		} else if (vc->vcore_state == VCORE_SLEEPING) {
			wake_up(&vc->wq);
1461 1462
		}

1463
	}
1464

1465 1466
	while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
	       !signal_pending(current)) {
1467
		if (vc->vcore_state != VCORE_INACTIVE) {
1468 1469 1470 1471 1472 1473 1474
			spin_unlock(&vc->lock);
			kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
			spin_lock(&vc->lock);
			continue;
		}
		list_for_each_entry_safe(v, vn, &vc->runnable_threads,
					 arch.run_list) {
1475
			kvmppc_core_prepare_to_enter(v);
1476 1477 1478 1479 1480 1481 1482 1483
			if (signal_pending(v->arch.run_task)) {
				kvmppc_remove_runnable(vc, v);
				v->stat.signal_exits++;
				v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
				v->arch.ret = -EINTR;
				wake_up(&v->arch.cpu_run);
			}
		}
1484 1485 1486 1487
		if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
			break;
		vc->runner = vcpu;
		n_ceded = 0;
1488
		list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
1489 1490
			if (!v->arch.pending_exceptions)
				n_ceded += v->arch.ceded;
1491 1492 1493
			else
				v->arch.ceded = 0;
		}
1494 1495 1496 1497
		if (n_ceded == vc->n_runnable)
			kvmppc_vcore_blocked(vc);
		else
			kvmppc_run_core(vc);
1498
		vc->runner = NULL;
1499
	}
1500

1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
	while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
	       (vc->vcore_state == VCORE_RUNNING ||
		vc->vcore_state == VCORE_EXITING)) {
		spin_unlock(&vc->lock);
		kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
		spin_lock(&vc->lock);
	}

	if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
		kvmppc_remove_runnable(vc, vcpu);
		vcpu->stat.signal_exits++;
		kvm_run->exit_reason = KVM_EXIT_INTR;
		vcpu->arch.ret = -EINTR;
	}

	if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
		/* Wake up some vcpu to run the core */
		v = list_first_entry(&vc->runnable_threads,
				     struct kvm_vcpu, arch.run_list);
		wake_up(&v->arch.cpu_run);
1521 1522 1523 1524
	}

	spin_unlock(&vc->lock);
	return vcpu->arch.ret;
1525 1526
}

1527
static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
1528 1529
{
	int r;
1530
	int srcu_idx;
1531

1532 1533 1534 1535 1536
	if (!vcpu->arch.sane) {
		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		return -EINVAL;
	}

1537 1538
	kvmppc_core_prepare_to_enter(vcpu);

1539 1540 1541 1542 1543 1544
	/* No need to go into the guest when all we'll do is come back out */
	if (signal_pending(current)) {
		run->exit_reason = KVM_EXIT_INTR;
		return -EINTR;
	}

1545 1546 1547 1548 1549
	atomic_inc(&vcpu->kvm->arch.vcpus_running);
	/* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */
	smp_mb();

	/* On the first time here, set up HTAB and VRMA or RMA */
1550
	if (!vcpu->kvm->arch.rma_setup_done) {
1551
		r = kvmppc_hv_setup_htab_rma(vcpu);
1552
		if (r)
1553
			goto out;
1554
	}
1555 1556 1557 1558 1559

	flush_fp_to_thread(current);
	flush_altivec_to_thread(current);
	flush_vsx_to_thread(current);
	vcpu->arch.wqp = &vcpu->arch.vcore->wq;
1560
	vcpu->arch.pgdir = current->mm->pgd;
1561
	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
1562

1563 1564 1565 1566 1567 1568
	do {
		r = kvmppc_run_vcpu(run, vcpu);

		if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
		    !(vcpu->arch.shregs.msr & MSR_PR)) {
			r = kvmppc_pseries_do_hcall(vcpu);
1569
			kvmppc_core_prepare_to_enter(vcpu);
1570 1571 1572 1573 1574
		} else if (r == RESUME_PAGE_FAULT) {
			srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
			r = kvmppc_book3s_hv_page_fault(run, vcpu,
				vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
			srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1575 1576
		}
	} while (r == RESUME_GUEST);
1577 1578

 out:
1579
	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
1580
	atomic_dec(&vcpu->kvm->arch.vcpus_running);
1581 1582 1583
	return r;
}

1584

1585
/* Work out RMLS (real mode limit selector) field value for a given RMA size.
1586
   Assumes POWER7 or PPC970. */
1587 1588 1589 1590
static inline int lpcr_rmls(unsigned long rma_size)
{
	switch (rma_size) {
	case 32ul << 20:	/* 32 MB */
1591 1592 1593
		if (cpu_has_feature(CPU_FTR_ARCH_206))
			return 8;	/* only supported on POWER7 */
		return -1;
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
	case 64ul << 20:	/* 64 MB */
		return 3;
	case 128ul << 20:	/* 128 MB */
		return 7;
	case 256ul << 20:	/* 256 MB */
		return 4;
	case 1ul << 30:		/* 1 GB */
		return 2;
	case 16ul << 30:	/* 16 GB */
		return 1;
	case 256ul << 30:	/* 256 GB */
		return 0;
	default:
		return -1;
	}
}

static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct page *page;
1614
	struct kvm_rma_info *ri = vma->vm_file->private_data;
1615

1616
	if (vmf->pgoff >= kvm_rma_pages)
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
		return VM_FAULT_SIGBUS;

	page = pfn_to_page(ri->base_pfn + vmf->pgoff);
	get_page(page);
	vmf->page = page;
	return 0;
}

static const struct vm_operations_struct kvm_rma_vm_ops = {
	.fault = kvm_rma_fault,
};

static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
{
1631
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1632 1633 1634 1635 1636 1637
	vma->vm_ops = &kvm_rma_vm_ops;
	return 0;
}

static int kvm_rma_release(struct inode *inode, struct file *filp)
{
1638
	struct kvm_rma_info *ri = filp->private_data;
1639 1640 1641 1642 1643

	kvm_release_rma(ri);
	return 0;
}

1644
static const struct file_operations kvm_rma_fops = {
1645 1646 1647 1648
	.mmap           = kvm_rma_mmap,
	.release	= kvm_rma_release,
};

1649 1650
static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
				      struct kvm_allocate_rma *ret)
1651 1652
{
	long fd;
1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
	struct kvm_rma_info *ri;
	/*
	 * Only do this on PPC970 in HV mode
	 */
	if (!cpu_has_feature(CPU_FTR_HVMODE) ||
	    !cpu_has_feature(CPU_FTR_ARCH_201))
		return -EINVAL;

	if (!kvm_rma_pages)
		return -EINVAL;
1663 1664 1665 1666 1667

	ri = kvm_alloc_rma();
	if (!ri)
		return -ENOMEM;

1668
	fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR | O_CLOEXEC);
1669 1670 1671
	if (fd < 0)
		kvm_release_rma(ri);

1672
	ret->rma_size = kvm_rma_pages << PAGE_SHIFT;
1673 1674 1675
	return fd;
}

1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
				     int linux_psize)
{
	struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];

	if (!def->shift)
		return;
	(*sps)->page_shift = def->shift;
	(*sps)->slb_enc = def->sllp;
	(*sps)->enc[0].page_shift = def->shift;
1686 1687 1688 1689 1690 1691 1692
	/*
	 * Only return base page encoding. We don't want to return
	 * all the supporting pte_enc, because our H_ENTER doesn't
	 * support MPSS yet. Once they do, we can start passing all
	 * support pte_enc here
	 */
	(*sps)->enc[0].pte_enc = def->penc[linux_psize];
1693 1694 1695
	(*sps)++;
}

1696 1697
static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
					 struct kvm_ppc_smmu_info *info)
1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
{
	struct kvm_ppc_one_seg_page_size *sps;

	info->flags = KVM_PPC_PAGE_SIZES_REAL;
	if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
		info->flags |= KVM_PPC_1T_SEGMENTS;
	info->slb_size = mmu_slb_size;

	/* We only support these sizes for now, and no muti-size segments */
	sps = &info->sps[0];
	kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
	kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
	kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);

	return 0;
}

1715 1716 1717
/*
 * Get (and clear) the dirty memory log for a memory slot.
 */
1718 1719
static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
					 struct kvm_dirty_log *log)
1720 1721 1722 1723 1724 1725 1726 1727
{
	struct kvm_memory_slot *memslot;
	int r;
	unsigned long n;

	mutex_lock(&kvm->slots_lock);

	r = -EINVAL;
1728
	if (log->slot >= KVM_USER_MEM_SLOTS)
1729 1730 1731 1732 1733 1734 1735 1736 1737 1738
		goto out;

	memslot = id_to_memslot(kvm->memslots, log->slot);
	r = -ENOENT;
	if (!memslot->dirty_bitmap)
		goto out;

	n = kvm_dirty_bitmap_bytes(memslot);
	memset(memslot->dirty_bitmap, 0, n);

1739
	r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap);
1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752
	if (r)
		goto out;

	r = -EFAULT;
	if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
		goto out;

	r = 0;
out:
	mutex_unlock(&kvm->slots_lock);
	return r;
}

1753
static void unpin_slot(struct kvm_memory_slot *memslot)
1754
{
1755 1756 1757
	unsigned long *physp;
	unsigned long j, npages, pfn;
	struct page *page;
1758

1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772
	physp = memslot->arch.slot_phys;
	npages = memslot->npages;
	if (!physp)
		return;
	for (j = 0; j < npages; j++) {
		if (!(physp[j] & KVMPPC_GOT_PAGE))
			continue;
		pfn = physp[j] >> PAGE_SHIFT;
		page = pfn_to_page(pfn);
		SetPageDirty(page);
		put_page(page);
	}
}

1773 1774
static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
					struct kvm_memory_slot *dont)
1775 1776 1777 1778
{
	if (!dont || free->arch.rmap != dont->arch.rmap) {
		vfree(free->arch.rmap);
		free->arch.rmap = NULL;
1779
	}
1780 1781 1782 1783 1784 1785 1786
	if (!dont || free->arch.slot_phys != dont->arch.slot_phys) {
		unpin_slot(free);
		vfree(free->arch.slot_phys);
		free->arch.slot_phys = NULL;
	}
}

1787 1788
static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
					 unsigned long npages)
1789 1790 1791 1792 1793
{
	slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
	if (!slot->arch.rmap)
		return -ENOMEM;
	slot->arch.slot_phys = NULL;
1794

1795 1796
	return 0;
}
1797

1798 1799 1800
static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
					struct kvm_memory_slot *memslot,
					struct kvm_userspace_memory_region *mem)
1801
{
1802
	unsigned long *phys;
1803

1804 1805 1806 1807 1808 1809 1810
	/* Allocate a slot_phys array if needed */
	phys = memslot->arch.slot_phys;
	if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) {
		phys = vzalloc(memslot->npages * sizeof(unsigned long));
		if (!phys)
			return -ENOMEM;
		memslot->arch.slot_phys = phys;
1811
	}
1812 1813

	return 0;
1814 1815
}

1816 1817 1818
static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
				struct kvm_userspace_memory_region *mem,
				const struct kvm_memory_slot *old)
1819
{
1820 1821 1822
	unsigned long npages = mem->memory_size >> PAGE_SHIFT;
	struct kvm_memory_slot *memslot;

1823
	if (npages && old->npages) {
1824 1825 1826 1827 1828 1829 1830 1831 1832
		/*
		 * If modifying a memslot, reset all the rmap dirty bits.
		 * If this is a new memslot, we don't need to do anything
		 * since the rmap array starts out as all zeroes,
		 * i.e. no pages are dirty.
		 */
		memslot = id_to_memslot(kvm->memslots, mem->slot);
		kvmppc_hv_get_dirty_log(kvm, memslot, NULL);
	}
1833 1834
}

1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860
/*
 * Update LPCR values in kvm->arch and in vcores.
 * Caller must hold kvm->lock.
 */
void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
{
	long int i;
	u32 cores_done = 0;

	if ((kvm->arch.lpcr & mask) == lpcr)
		return;

	kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;

	for (i = 0; i < KVM_MAX_VCORES; ++i) {
		struct kvmppc_vcore *vc = kvm->arch.vcores[i];
		if (!vc)
			continue;
		spin_lock(&vc->lock);
		vc->lpcr = (vc->lpcr & ~mask) | lpcr;
		spin_unlock(&vc->lock);
		if (++cores_done >= kvm->arch.online_vcores)
			break;
	}
}

1861 1862 1863 1864 1865
static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
{
	return;
}

1866
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1867 1868 1869
{
	int err = 0;
	struct kvm *kvm = vcpu->kvm;
1870
	struct kvm_rma_info *ri = NULL;
1871 1872 1873
	unsigned long hva;
	struct kvm_memory_slot *memslot;
	struct vm_area_struct *vma;
1874 1875
	unsigned long lpcr = 0, senc;
	unsigned long lpcr_mask = 0;
1876 1877 1878 1879
	unsigned long psize, porder;
	unsigned long rma_size;
	unsigned long rmls;
	unsigned long *physp;
1880
	unsigned long i, npages;
1881
	int srcu_idx;
1882 1883 1884 1885

	mutex_lock(&kvm->lock);
	if (kvm->arch.rma_setup_done)
		goto out;	/* another vcpu beat us to it */
1886

1887 1888 1889 1890 1891 1892 1893 1894 1895
	/* Allocate hashed page table (if not done already) and reset it */
	if (!kvm->arch.hpt_virt) {
		err = kvmppc_alloc_hpt(kvm, NULL);
		if (err) {
			pr_err("KVM: Couldn't alloc HPT\n");
			goto out;
		}
	}

1896
	/* Look up the memslot for guest physical address 0 */
1897
	srcu_idx = srcu_read_lock(&kvm->srcu);
1898
	memslot = gfn_to_memslot(kvm, 0);
1899

1900 1901 1902
	/* We must have some memory at 0 by now */
	err = -EINVAL;
	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
1903
		goto out_srcu;
1904 1905 1906 1907 1908 1909 1910 1911 1912

	/* Look up the VMA for the start of this memory slot */
	hva = memslot->userspace_addr;
	down_read(&current->mm->mmap_sem);
	vma = find_vma(current->mm, hva);
	if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
		goto up_out;

	psize = vma_kernel_pagesize(vma);
1913
	porder = __ilog2(psize);
1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926

	/* Is this one of our preallocated RMAs? */
	if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops &&
	    hva == vma->vm_start)
		ri = vma->vm_file->private_data;

	up_read(&current->mm->mmap_sem);

	if (!ri) {
		/* On POWER7, use VRMA; on PPC970, give up */
		err = -EPERM;
		if (cpu_has_feature(CPU_FTR_ARCH_201)) {
			pr_err("KVM: CPU requires an RMO\n");
1927
			goto out_srcu;
1928 1929
		}

1930 1931 1932 1933
		/* We can handle 4k, 64k or 16M pages in the VRMA */
		err = -EINVAL;
		if (!(psize == 0x1000 || psize == 0x10000 ||
		      psize == 0x1000000))
1934
			goto out_srcu;
1935

1936
		/* Update VRMASD field in the LPCR */
1937
		senc = slb_pgsize_encoding(psize);
1938 1939
		kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
			(VRMA_VSID << SLB_VSID_SHIFT_1T);
1940 1941 1942
		lpcr_mask = LPCR_VRMASD;
		/* the -4 is to account for senc values starting at 0x10 */
		lpcr = senc << (LPCR_VRMASD_SH - 4);
1943 1944

		/* Create HPTEs in the hash page table for the VRMA */
1945
		kvmppc_map_vrma(vcpu, memslot, porder);
1946 1947 1948

	} else {
		/* Set up to use an RMO region */
1949
		rma_size = kvm_rma_pages;
1950 1951 1952
		if (rma_size > memslot->npages)
			rma_size = memslot->npages;
		rma_size <<= PAGE_SHIFT;
1953
		rmls = lpcr_rmls(rma_size);
1954
		err = -EINVAL;
1955
		if ((long)rmls < 0) {
1956
			pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
1957
			goto out_srcu;
1958 1959 1960
		}
		atomic_inc(&ri->use_count);
		kvm->arch.rma = ri;
1961 1962 1963 1964

		/* Update LPCR and RMOR */
		if (cpu_has_feature(CPU_FTR_ARCH_201)) {
			/* PPC970; insert RMLS value (split field) in HID4 */
1965 1966 1967
			lpcr_mask = (1ul << HID4_RMLS0_SH) |
				(3ul << HID4_RMLS2_SH) | HID4_RMOR;
			lpcr = ((rmls >> 2) << HID4_RMLS0_SH) |
1968 1969 1970 1971 1972 1973
				((rmls & 3) << HID4_RMLS2_SH);
			/* RMOR is also in HID4 */
			lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
				<< HID4_RMOR_SH;
		} else {
			/* POWER7 */
1974 1975
			lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS;
			lpcr = rmls << LPCR_RMLS_SH;
1976
			kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
1977
		}
1978
		pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
1979 1980
			ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);

1981
		/* Initialize phys addrs of pages in RMO */
1982
		npages = kvm_rma_pages;
1983
		porder = __ilog2(npages);
1984 1985 1986 1987 1988 1989 1990 1991 1992 1993
		physp = memslot->arch.slot_phys;
		if (physp) {
			if (npages > memslot->npages)
				npages = memslot->npages;
			spin_lock(&kvm->arch.slot_phys_lock);
			for (i = 0; i < npages; ++i)
				physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) +
					porder;
			spin_unlock(&kvm->arch.slot_phys_lock);
		}
1994 1995
	}

1996 1997
	kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);

1998 1999 2000 2001
	/* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
	smp_wmb();
	kvm->arch.rma_setup_done = 1;
	err = 0;
2002 2003
 out_srcu:
	srcu_read_unlock(&kvm->srcu, srcu_idx);
2004 2005 2006
 out:
	mutex_unlock(&kvm->lock);
	return err;
2007

2008 2009
 up_out:
	up_read(&current->mm->mmap_sem);
2010
	goto out_srcu;
2011 2012
}

2013
static int kvmppc_core_init_vm_hv(struct kvm *kvm)
2014
{
2015
	unsigned long lpcr, lpid;
2016

2017 2018 2019
	/* Allocate the guest's logical partition ID */

	lpid = kvmppc_alloc_lpid();
2020
	if ((long)lpid < 0)
2021 2022
		return -ENOMEM;
	kvm->arch.lpid = lpid;
2023

2024 2025 2026 2027 2028 2029 2030
	/*
	 * Since we don't flush the TLB when tearing down a VM,
	 * and this lpid might have previously been used,
	 * make sure we flush on each core before running the new VM.
	 */
	cpumask_setall(&kvm->arch.need_tlb_flush);

2031 2032
	kvm->arch.rma = NULL;

2033
	kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
2034

2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
	if (cpu_has_feature(CPU_FTR_ARCH_201)) {
		/* PPC970; HID4 is effectively the LPCR */
		kvm->arch.host_lpid = 0;
		kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
		lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
		lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
			((lpid & 0xf) << HID4_LPID5_SH);
	} else {
		/* POWER7; init LPCR for virtual RMA mode */
		kvm->arch.host_lpid = mfspr(SPRN_LPID);
		kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
		lpcr &= LPCR_PECE | LPCR_LPES;
		lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
2048 2049 2050
			LPCR_VPM0 | LPCR_VPM1;
		kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
			(VRMA_VSID << SLB_VSID_SHIFT_1T);
2051 2052
	}
	kvm->arch.lpcr = lpcr;
2053

2054
	kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
2055
	spin_lock_init(&kvm->arch.slot_phys_lock);
2056 2057 2058 2059 2060 2061 2062

	/*
	 * Don't allow secondary CPU threads to come online
	 * while any KVM VMs exist.
	 */
	inhibit_secondary_onlining();

2063
	return 0;
2064 2065
}

2066 2067 2068 2069 2070 2071 2072 2073 2074
static void kvmppc_free_vcores(struct kvm *kvm)
{
	long int i;

	for (i = 0; i < KVM_MAX_VCORES; ++i)
		kfree(kvm->arch.vcores[i]);
	kvm->arch.online_vcores = 0;
}

2075
static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
2076
{
2077 2078
	uninhibit_secondary_onlining();

2079
	kvmppc_free_vcores(kvm);
2080 2081 2082 2083 2084
	if (kvm->arch.rma) {
		kvm_release_rma(kvm->arch.rma);
		kvm->arch.rma = NULL;
	}

2085 2086 2087
	kvmppc_free_hpt(kvm);
}

2088 2089 2090
/* We don't need to emulate any privileged instructions or dcbz */
static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
				     unsigned int inst, int *advance)
2091
{
2092
	return EMULATE_FAIL;
2093 2094
}

2095 2096
static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
					ulong spr_val)
2097 2098 2099 2100
{
	return EMULATE_FAIL;
}

2101 2102
static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
					ulong *spr_val)
2103 2104 2105 2106
{
	return EMULATE_FAIL;
}

2107
static int kvmppc_core_check_processor_compat_hv(void)
2108
{
2109 2110 2111
	if (!cpu_has_feature(CPU_FTR_HVMODE))
		return -EIO;
	return 0;
2112 2113
}

2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165
static long kvm_arch_vm_ioctl_hv(struct file *filp,
				 unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm __maybe_unused = filp->private_data;
	void __user *argp = (void __user *)arg;
	long r;

	switch (ioctl) {

	case KVM_ALLOCATE_RMA: {
		struct kvm_allocate_rma rma;
		struct kvm *kvm = filp->private_data;

		r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
		if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
			r = -EFAULT;
		break;
	}

	case KVM_PPC_ALLOCATE_HTAB: {
		u32 htab_order;

		r = -EFAULT;
		if (get_user(htab_order, (u32 __user *)argp))
			break;
		r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
		if (r)
			break;
		r = -EFAULT;
		if (put_user(htab_order, (u32 __user *)argp))
			break;
		r = 0;
		break;
	}

	case KVM_PPC_GET_HTAB_FD: {
		struct kvm_get_htab_fd ghf;

		r = -EFAULT;
		if (copy_from_user(&ghf, argp, sizeof(ghf)))
			break;
		r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
		break;
	}

	default:
		r = -ENOTTY;
	}

	return r;
}

2166
static struct kvmppc_ops kvm_ops_hv = {
2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200
	.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
	.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
	.get_one_reg = kvmppc_get_one_reg_hv,
	.set_one_reg = kvmppc_set_one_reg_hv,
	.vcpu_load   = kvmppc_core_vcpu_load_hv,
	.vcpu_put    = kvmppc_core_vcpu_put_hv,
	.set_msr     = kvmppc_set_msr_hv,
	.vcpu_run    = kvmppc_vcpu_run_hv,
	.vcpu_create = kvmppc_core_vcpu_create_hv,
	.vcpu_free   = kvmppc_core_vcpu_free_hv,
	.check_requests = kvmppc_core_check_requests_hv,
	.get_dirty_log  = kvm_vm_ioctl_get_dirty_log_hv,
	.flush_memslot  = kvmppc_core_flush_memslot_hv,
	.prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
	.commit_memory_region  = kvmppc_core_commit_memory_region_hv,
	.unmap_hva = kvm_unmap_hva_hv,
	.unmap_hva_range = kvm_unmap_hva_range_hv,
	.age_hva  = kvm_age_hva_hv,
	.test_age_hva = kvm_test_age_hva_hv,
	.set_spte_hva = kvm_set_spte_hva_hv,
	.mmu_destroy  = kvmppc_mmu_destroy_hv,
	.free_memslot = kvmppc_core_free_memslot_hv,
	.create_memslot = kvmppc_core_create_memslot_hv,
	.init_vm =  kvmppc_core_init_vm_hv,
	.destroy_vm = kvmppc_core_destroy_vm_hv,
	.get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
	.emulate_op = kvmppc_core_emulate_op_hv,
	.emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
	.emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
	.fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
	.arch_vm_ioctl  = kvm_arch_vm_ioctl_hv,
};

static int kvmppc_book3s_init_hv(void)
2201 2202
{
	int r;
2203 2204 2205 2206 2207
	/*
	 * FIXME!! Do we need to check on all cpus ?
	 */
	r = kvmppc_core_check_processor_compat_hv();
	if (r < 0)
2208 2209
		return r;

2210 2211
	kvm_ops_hv.owner = THIS_MODULE;
	kvmppc_hv_ops = &kvm_ops_hv;
2212

2213
	r = kvmppc_mmu_hv_init();
2214 2215 2216
	return r;
}

2217
static void kvmppc_book3s_exit_hv(void)
2218
{
2219
	kvmppc_hv_ops = NULL;
2220 2221
}

2222 2223
module_init(kvmppc_book3s_init_hv);
module_exit(kvmppc_book3s_exit_hv);
2224
MODULE_LICENSE("GPL");