powerpc.c 21.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright IBM Corp. 2007
 *
 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
 *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
 */

#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/vmalloc.h>
A
Alexander Graf 已提交
25
#include <linux/hrtimer.h>
26
#include <linux/fs.h>
27
#include <linux/slab.h>
28 29 30
#include <asm/cputable.h>
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
31
#include <asm/tlbflush.h>
32
#include <asm/cputhreads.h>
33
#include <asm/irqflags.h>
34
#include "timing.h"
P
Paul Mackerras 已提交
35
#include "../mm/mmu_decl.h"
36

37 38 39
#define CREATE_TRACE_POINTS
#include "trace.h"

40 41
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
42
	return !!(v->arch.pending_exceptions) ||
43
	       v->requests;
44 45
}

46 47 48 49 50
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
	return 1;
}

51 52 53 54 55
#ifndef CONFIG_KVM_BOOK3S_64_HV
/*
 * Common checks before entering the guest world.  Call with interrupts
 * disabled.
 *
56 57 58 59
 * returns:
 *
 * == 1 if we're ready to go into guest state
 * <= 0 if we need to go back to the host with return value
60 61 62
 */
int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
{
63
	int r = 1;
64 65 66 67 68 69 70 71 72 73 74

	WARN_ON_ONCE(!irqs_disabled());
	while (true) {
		if (need_resched()) {
			local_irq_enable();
			cond_resched();
			local_irq_disable();
			continue;
		}

		if (signal_pending(current)) {
75 76 77
			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
			vcpu->run->exit_reason = KVM_EXIT_INTR;
			r = -EINTR;
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
			break;
		}

		smp_mb();
		if (vcpu->requests) {
			/* Make sure we process requests preemptable */
			local_irq_enable();
			trace_kvm_check_requests(vcpu);
			kvmppc_core_check_requests(vcpu);
			local_irq_disable();
			continue;
		}

		if (kvmppc_core_prepare_to_enter(vcpu)) {
			/* interrupts got enabled in between, so we
			   are back at square 1 */
			continue;
		}

97 98 99 100 101 102 103
#ifdef CONFIG_PPC64
		/* lazy EE magic */
		hard_irq_disable();
		if (lazy_irq_pending()) {
			/* Got an interrupt in between, try again */
			local_irq_enable();
			local_irq_disable();
104
			kvm_guest_exit();
105 106 107 108 109 110
			continue;
		}

		trace_hardirqs_on();
#endif

111 112
		kvm_guest_enter();

113 114 115 116 117 118 119 120 121 122 123
		/* Going into guest context! Yay! */
		vcpu->mode = IN_GUEST_MODE;
		smp_wmb();

		break;
	}

	return r;
}
#endif /* CONFIG_KVM_BOOK3S_64_HV */

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
{
	int nr = kvmppc_get_gpr(vcpu, 11);
	int r;
	unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
	unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
	unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
	unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
	unsigned long r2 = 0;

	if (!(vcpu->arch.shared->msr & MSR_SF)) {
		/* 32 bit mode */
		param1 &= 0xffffffff;
		param2 &= 0xffffffff;
		param3 &= 0xffffffff;
		param4 &= 0xffffffff;
	}

	switch (nr) {
143
	case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
144 145 146 147
	{
		vcpu->arch.magic_page_pa = param1;
		vcpu->arch.magic_page_ea = param2;

148
		r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
149

150
		r = EV_SUCCESS;
151 152
		break;
	}
153 154
	case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
		r = EV_SUCCESS;
155
#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
S
Scott Wood 已提交
156
		/* XXX Missing magic page on 44x */
157 158
		r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
#endif
159 160 161

		/* Second return value is in r4 */
		break;
162 163 164 165 166
	case EV_HCALL_TOKEN(EV_IDLE):
		r = EV_SUCCESS;
		kvm_vcpu_block(vcpu);
		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
		break;
167
	default:
168
		r = EV_UNIMPLEMENTED;
169 170 171
		break;
	}

172 173
	kvmppc_set_gpr(vcpu, 4, r2);

174 175
	return r;
}
176

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
{
	int r = false;

	/* We have to know what CPU to virtualize */
	if (!vcpu->arch.pvr)
		goto out;

	/* PAPR only works with book3s_64 */
	if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
		goto out;

#ifdef CONFIG_KVM_BOOK3S_64_HV
	/* HV KVM can only do PAPR mode for now */
	if (!vcpu->arch.papr_enabled)
		goto out;
#endif

195 196 197 198 199
#ifdef CONFIG_KVM_BOOKE_HV
	if (!cpu_has_feature(CPU_FTR_EMB_HV))
		goto out;
#endif

200 201 202 203 204 205 206
	r = true;

out:
	vcpu->arch.sane = r;
	return r ? 0 : -EINVAL;
}

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
	enum emulation_result er;
	int r;

	er = kvmppc_emulate_instruction(run, vcpu);
	switch (er) {
	case EMULATE_DONE:
		/* Future optimization: only reload non-volatiles if they were
		 * actually modified. */
		r = RESUME_GUEST_NV;
		break;
	case EMULATE_DO_MMIO:
		run->exit_reason = KVM_EXIT_MMIO;
		/* We must reload nonvolatiles because "update" load/store
		 * instructions modify register state. */
		/* Future optimization: only reload non-volatiles if they were
		 * actually modified. */
		r = RESUME_HOST_NV;
		break;
	case EMULATE_FAIL:
		/* XXX Deliver Program interrupt to guest. */
		printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
230
		       kvmppc_get_last_inst(vcpu));
231 232 233 234 235 236 237 238 239
		r = RESUME_HOST;
		break;
	default:
		BUG();
	}

	return r;
}

240
int kvm_arch_hardware_enable(void *garbage)
241
{
242
	return 0;
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
}

void kvm_arch_hardware_disable(void *garbage)
{
}

int kvm_arch_hardware_setup(void)
{
	return 0;
}

void kvm_arch_hardware_unsetup(void)
{
}

void kvm_arch_check_processor_compat(void *rtn)
{
260
	*(int *)rtn = kvmppc_core_check_processor_compat();
261 262
}

263
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
264
{
265 266 267
	if (type)
		return -EINVAL;

268
	return kvmppc_core_init_vm(kvm);
269 270
}

271
void kvm_arch_destroy_vm(struct kvm *kvm)
272 273
{
	unsigned int i;
274
	struct kvm_vcpu *vcpu;
275

276 277 278 279 280 281 282 283
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_arch_vcpu_free(vcpu);

	mutex_lock(&kvm->lock);
	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
		kvm->vcpus[i] = NULL;

	atomic_set(&kvm->online_vcpus, 0);
284 285 286

	kvmppc_core_destroy_vm(kvm);

287
	mutex_unlock(&kvm->lock);
288 289
}

290 291 292 293
void kvm_arch_sync_events(struct kvm *kvm)
{
}

294 295 296 297 298
int kvm_dev_ioctl_check_extension(long ext)
{
	int r;

	switch (ext) {
S
Scott Wood 已提交
299 300 301
#ifdef CONFIG_BOOKE
	case KVM_CAP_PPC_BOOKE_SREGS:
#else
302
	case KVM_CAP_PPC_SEGSTATE:
303
	case KVM_CAP_PPC_HIOR:
304
	case KVM_CAP_PPC_PAPR:
S
Scott Wood 已提交
305
#endif
306
	case KVM_CAP_PPC_UNSET_IRQ:
307
	case KVM_CAP_PPC_IRQ_LEVEL:
308
	case KVM_CAP_ENABLE_CAP:
309
	case KVM_CAP_ONE_REG:
310 311 312 313
		r = 1;
		break;
#ifndef CONFIG_KVM_BOOK3S_64_HV
	case KVM_CAP_PPC_PAIRED_SINGLES:
314
	case KVM_CAP_PPC_OSI:
315
	case KVM_CAP_PPC_GET_PVINFO:
316
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
S
Scott Wood 已提交
317 318
	case KVM_CAP_SW_TLB:
#endif
319 320
		r = 1;
		break;
321 322 323
	case KVM_CAP_COALESCED_MMIO:
		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
		break;
324
#endif
325
#ifdef CONFIG_PPC_BOOK3S_64
326
	case KVM_CAP_SPAPR_TCE:
327
	case KVM_CAP_PPC_ALLOC_HTAB:
328 329
		r = 1;
		break;
330 331
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_KVM_BOOK3S_64_HV
332 333 334
	case KVM_CAP_PPC_SMT:
		r = threads_per_core;
		break;
335 336
	case KVM_CAP_PPC_RMA:
		r = 1;
337 338 339
		/* PPC970 requires an RMA */
		if (cpu_has_feature(CPU_FTR_ARCH_201))
			r = 2;
340
		break;
341
#endif
342
	case KVM_CAP_SYNC_MMU:
343
#ifdef CONFIG_KVM_BOOK3S_64_HV
344
		r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
345 346 347 348
#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
		r = 1;
#else
		r = 0;
349
#endif
350
		break;
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
	case KVM_CAP_NR_VCPUS:
		/*
		 * Recommending a number of CPUs is somewhat arbitrary; we
		 * return the number of present CPUs for -HV (since a host
		 * will have secondary threads "offline"), and for other KVM
		 * implementations just count online CPUs.
		 */
#ifdef CONFIG_KVM_BOOK3S_64_HV
		r = num_present_cpus();
#else
		r = num_online_cpus();
#endif
		break;
	case KVM_CAP_MAX_VCPUS:
		r = KVM_MAX_VCPUS;
		break;
367 368 369 370 371
#ifdef CONFIG_PPC_BOOK3S_64
	case KVM_CAP_PPC_GET_SMMU_INFO:
		r = 1;
		break;
#endif
372 373 374 375 376 377 378 379 380 381 382 383 384 385
	default:
		r = 0;
		break;
	}
	return r;

}

long kvm_arch_dev_ioctl(struct file *filp,
                        unsigned int ioctl, unsigned long arg)
{
	return -EINVAL;
}

386 387 388
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
			   struct kvm_memory_slot *dont)
{
389 390 391 392
	if (!dont || free->arch.rmap != dont->arch.rmap) {
		vfree(free->arch.rmap);
		free->arch.rmap = NULL;
	}
393 394 395 396
}

int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
{
397 398 399 400
	slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
	if (!slot->arch.rmap)
		return -ENOMEM;

401 402 403
	return 0;
}

404 405 406 407 408
int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                   struct kvm_memory_slot *memslot,
                                   struct kvm_memory_slot old,
                                   struct kvm_userspace_memory_region *mem,
                                   int user_alloc)
409
{
410
	return kvmppc_core_prepare_memory_region(kvm, mem);
411 412
}

413 414 415 416 417
void kvm_arch_commit_memory_region(struct kvm *kvm,
               struct kvm_userspace_memory_region *mem,
               struct kvm_memory_slot old,
               int user_alloc)
{
418
	kvmppc_core_commit_memory_region(kvm, mem);
419 420
}

421 422 423
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
}
424

425 426
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
				   struct kvm_memory_slot *slot)
427 428 429
{
}

430 431
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
432 433
	struct kvm_vcpu *vcpu;
	vcpu = kvmppc_core_vcpu_create(kvm, id);
434 435
	if (!IS_ERR(vcpu)) {
		vcpu->arch.wqp = &vcpu->wq;
436
		kvmppc_create_vcpu_debugfs(vcpu, id);
437
	}
438
	return vcpu;
439 440 441 442
}

void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
443 444 445 446
	/* Make sure we're not using the vcpu anymore */
	hrtimer_cancel(&vcpu->arch.dec_timer);
	tasklet_kill(&vcpu->arch.tasklet);

447
	kvmppc_remove_vcpu_debugfs(vcpu);
448
	kvmppc_core_vcpu_free(vcpu);
449 450 451 452 453 454 455 456 457
}

void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
	kvm_arch_vcpu_free(vcpu);
}

int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
458
	return kvmppc_core_pending_dec(vcpu);
459 460
}

A
Alexander Graf 已提交
461 462 463 464 465 466 467 468 469 470 471 472 473 474
/*
 * low level hrtimer wake routine. Because this runs in hardirq context
 * we schedule a tasklet to do the real work.
 */
enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
{
	struct kvm_vcpu *vcpu;

	vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
	tasklet_schedule(&vcpu->arch.tasklet);

	return HRTIMER_NORESTART;
}

475 476
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
A
Alexander Graf 已提交
477 478 479
	hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
	tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
	vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
480
	vcpu->arch.dec_expires = ~(u64)0;
481

482 483 484 485
#ifdef CONFIG_KVM_EXIT_TIMING
	mutex_init(&vcpu->arch.exit_timing_lock);
#endif

486 487 488 489 490
	return 0;
}

void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
491
	kvmppc_mmu_destroy(vcpu);
492 493 494 495
}

void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
496 497 498 499 500 501 502 503 504 505
#ifdef CONFIG_BOOKE
	/*
	 * vrsave (formerly usprg0) isn't used by Linux, but may
	 * be used by the guest.
	 *
	 * On non-booke this is associated with Altivec and
	 * is handled by code in book3s.c.
	 */
	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
#endif
506
	kvmppc_core_vcpu_load(vcpu, cpu);
507
	vcpu->cpu = smp_processor_id();
508 509 510 511
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
512
	kvmppc_core_vcpu_put(vcpu);
513 514 515
#ifdef CONFIG_BOOKE
	vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
#endif
516
	vcpu->cpu = -1;
517 518
}

J
Jan Kiszka 已提交
519
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
520
                                        struct kvm_guest_debug *dbg)
521
{
522
	return -EINVAL;
523 524 525 526 527
}

static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
                                     struct kvm_run *run)
{
528
	kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
529 530 531 532 533
}

static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
                                      struct kvm_run *run)
{
534
	u64 uninitialized_var(gpr);
535

536
	if (run->mmio.len > sizeof(gpr)) {
537 538 539 540 541 542
		printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
		return;
	}

	if (vcpu->arch.mmio_is_bigendian) {
		switch (run->mmio.len) {
543
		case 8: gpr = *(u64 *)run->mmio.data; break;
544 545 546
		case 4: gpr = *(u32 *)run->mmio.data; break;
		case 2: gpr = *(u16 *)run->mmio.data; break;
		case 1: gpr = *(u8 *)run->mmio.data; break;
547 548 549 550
		}
	} else {
		/* Convert BE data from userland back to LE. */
		switch (run->mmio.len) {
551 552 553
		case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
		case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
		case 1: gpr = *(u8 *)run->mmio.data; break;
554 555
		}
	}
556

A
Alexander Graf 已提交
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
	if (vcpu->arch.mmio_sign_extend) {
		switch (run->mmio.len) {
#ifdef CONFIG_PPC64
		case 4:
			gpr = (s64)(s32)gpr;
			break;
#endif
		case 2:
			gpr = (s64)(s16)gpr;
			break;
		case 1:
			gpr = (s64)(s8)gpr;
			break;
		}
	}

573
	kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
574

575 576
	switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
	case KVM_MMIO_REG_GPR:
577 578
		kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
		break;
579 580
	case KVM_MMIO_REG_FPR:
		vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
581
		break;
582
#ifdef CONFIG_PPC_BOOK3S
583 584
	case KVM_MMIO_REG_QPR:
		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
585
		break;
586 587 588
	case KVM_MMIO_REG_FQPR:
		vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
589
		break;
590
#endif
591 592 593
	default:
		BUG();
	}
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
}

int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
                       unsigned int rt, unsigned int bytes, int is_bigendian)
{
	if (bytes > sizeof(run->mmio.data)) {
		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
		       run->mmio.len);
	}

	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
	run->mmio.len = bytes;
	run->mmio.is_write = 0;

	vcpu->arch.io_gpr = rt;
	vcpu->arch.mmio_is_bigendian = is_bigendian;
	vcpu->mmio_needed = 1;
	vcpu->mmio_is_write = 0;
A
Alexander Graf 已提交
612
	vcpu->arch.mmio_sign_extend = 0;
613 614 615 616

	return EMULATE_DO_MMIO;
}

A
Alexander Graf 已提交
617 618 619 620 621 622 623 624 625 626 627 628
/* Same as above, but sign extends */
int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        unsigned int rt, unsigned int bytes, int is_bigendian)
{
	int r;

	r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
	vcpu->arch.mmio_sign_extend = 1;

	return r;
}

629
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
630
                        u64 val, unsigned int bytes, int is_bigendian)
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
{
	void *data = run->mmio.data;

	if (bytes > sizeof(run->mmio.data)) {
		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
		       run->mmio.len);
	}

	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
	run->mmio.len = bytes;
	run->mmio.is_write = 1;
	vcpu->mmio_needed = 1;
	vcpu->mmio_is_write = 1;

	/* Store the value at the lowest bytes in 'data'. */
	if (is_bigendian) {
		switch (bytes) {
648
		case 8: *(u64 *)data = val; break;
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
		case 4: *(u32 *)data = val; break;
		case 2: *(u16 *)data = val; break;
		case 1: *(u8  *)data = val; break;
		}
	} else {
		/* Store LE value into 'data'. */
		switch (bytes) {
		case 4: st_le32(data, val); break;
		case 2: st_le16(data, val); break;
		case 1: *(u8 *)data = val; break;
		}
	}

	return EMULATE_DO_MMIO;
}

int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
	int r;
	sigset_t sigsaved;

	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

	if (vcpu->mmio_needed) {
		if (!vcpu->mmio_is_write)
			kvmppc_complete_mmio_load(vcpu, run);
		vcpu->mmio_needed = 0;
	} else if (vcpu->arch.dcr_needed) {
		if (!vcpu->arch.dcr_is_write)
			kvmppc_complete_dcr_load(vcpu, run);
		vcpu->arch.dcr_needed = 0;
681 682 683 684 685 686 687
	} else if (vcpu->arch.osi_needed) {
		u64 *gprs = run->osi.gprs;
		int i;

		for (i = 0; i < 32; i++)
			kvmppc_set_gpr(vcpu, i, gprs[i]);
		vcpu->arch.osi_needed = 0;
688 689 690 691 692 693 694
	} else if (vcpu->arch.hcall_needed) {
		int i;

		kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
		for (i = 0; i < 9; ++i)
			kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
		vcpu->arch.hcall_needed = 0;
695 696
	}

697
	r = kvmppc_vcpu_run(run, vcpu);
698 699 700 701 702 703 704 705 706

	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &sigsaved, NULL);

	return r;
}

int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{
707
	if (irq->irq == KVM_INTERRUPT_UNSET) {
708
		kvmppc_core_dequeue_external(vcpu, irq);
709 710 711 712
		return 0;
	}

	kvmppc_core_queue_external(vcpu, irq);
713

714
	kvm_vcpu_kick(vcpu);
715

716 717 718
	return 0;
}

719 720 721 722 723 724 725 726 727
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
				     struct kvm_enable_cap *cap)
{
	int r;

	if (cap->flags)
		return -EINVAL;

	switch (cap->cap) {
728 729 730 731
	case KVM_CAP_PPC_OSI:
		r = 0;
		vcpu->arch.osi_enabled = true;
		break;
732 733 734 735
	case KVM_CAP_PPC_PAPR:
		r = 0;
		vcpu->arch.papr_enabled = true;
		break;
736
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
S
Scott Wood 已提交
737 738 739 740 741 742 743 744 745 746 747 748
	case KVM_CAP_SW_TLB: {
		struct kvm_config_tlb cfg;
		void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];

		r = -EFAULT;
		if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
			break;

		r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
		break;
	}
#endif
749 750 751 752 753
	default:
		r = -EINVAL;
		break;
	}

754 755 756
	if (!r)
		r = kvmppc_sanity_check(vcpu);

757 758 759
	return r;
}

760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
{
	return -EINVAL;
}

int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
{
	return -EINVAL;
}

long kvm_arch_vcpu_ioctl(struct file *filp,
                         unsigned int ioctl, unsigned long arg)
{
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = (void __user *)arg;
	long r;

779 780
	switch (ioctl) {
	case KVM_INTERRUPT: {
781 782 783
		struct kvm_interrupt irq;
		r = -EFAULT;
		if (copy_from_user(&irq, argp, sizeof(irq)))
784
			goto out;
785
		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
786
		goto out;
787
	}
788

789 790 791 792 793 794 795 796 797
	case KVM_ENABLE_CAP:
	{
		struct kvm_enable_cap cap;
		r = -EFAULT;
		if (copy_from_user(&cap, argp, sizeof(cap)))
			goto out;
		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
		break;
	}
S
Scott Wood 已提交
798

799 800 801 802 803 804 805 806 807 808 809 810 811 812
	case KVM_SET_ONE_REG:
	case KVM_GET_ONE_REG:
	{
		struct kvm_one_reg reg;
		r = -EFAULT;
		if (copy_from_user(&reg, argp, sizeof(reg)))
			goto out;
		if (ioctl == KVM_SET_ONE_REG)
			r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
		else
			r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
		break;
	}

813
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
S
Scott Wood 已提交
814 815 816 817 818 819 820 821 822
	case KVM_DIRTY_TLB: {
		struct kvm_dirty_tlb dirty;
		r = -EFAULT;
		if (copy_from_user(&dirty, argp, sizeof(dirty)))
			goto out;
		r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
		break;
	}
#endif
823 824 825 826 827 828 829 830
	default:
		r = -EINVAL;
	}

out:
	return r;
}

831 832 833 834 835
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
	return VM_FAULT_SIGBUS;
}

836 837
static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
{
838 839 840 841 842 843 844 845
	u32 inst_nop = 0x60000000;
#ifdef CONFIG_KVM_BOOKE_HV
	u32 inst_sc1 = 0x44000022;
	pvinfo->hcall[0] = inst_sc1;
	pvinfo->hcall[1] = inst_nop;
	pvinfo->hcall[2] = inst_nop;
	pvinfo->hcall[3] = inst_nop;
#else
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
	u32 inst_lis = 0x3c000000;
	u32 inst_ori = 0x60000000;
	u32 inst_sc = 0x44000002;
	u32 inst_imm_mask = 0xffff;

	/*
	 * The hypercall to get into KVM from within guest context is as
	 * follows:
	 *
	 *    lis r0, r0, KVM_SC_MAGIC_R0@h
	 *    ori r0, KVM_SC_MAGIC_R0@l
	 *    sc
	 *    nop
	 */
	pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
	pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
	pvinfo->hcall[2] = inst_sc;
	pvinfo->hcall[3] = inst_nop;
864
#endif
865

866 867
	pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;

868 869 870
	return 0;
}

871 872 873
long kvm_arch_vm_ioctl(struct file *filp,
                       unsigned int ioctl, unsigned long arg)
{
874
	void __user *argp = (void __user *)arg;
875 876 877
	long r;

	switch (ioctl) {
878 879
	case KVM_PPC_GET_PVINFO: {
		struct kvm_ppc_pvinfo pvinfo;
880
		memset(&pvinfo, 0, sizeof(pvinfo));
881 882 883 884 885 886 887 888
		r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
		if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
			r = -EFAULT;
			goto out;
		}

		break;
	}
889
#ifdef CONFIG_PPC_BOOK3S_64
890 891 892 893 894 895 896 897 898 899
	case KVM_CREATE_SPAPR_TCE: {
		struct kvm_create_spapr_tce create_tce;
		struct kvm *kvm = filp->private_data;

		r = -EFAULT;
		if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
			goto out;
		r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
		goto out;
	}
900
#endif /* CONFIG_PPC_BOOK3S_64 */
901

902
#ifdef CONFIG_KVM_BOOK3S_64_HV
903 904 905 906 907 908 909 910 911
	case KVM_ALLOCATE_RMA: {
		struct kvm *kvm = filp->private_data;
		struct kvm_allocate_rma rma;

		r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
		if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
			r = -EFAULT;
		break;
	}
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928

	case KVM_PPC_ALLOCATE_HTAB: {
		struct kvm *kvm = filp->private_data;
		u32 htab_order;

		r = -EFAULT;
		if (get_user(htab_order, (u32 __user *)argp))
			break;
		r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
		if (r)
			break;
		r = -EFAULT;
		if (put_user(htab_order, (u32 __user *)argp))
			break;
		r = 0;
		break;
	}
929 930
#endif /* CONFIG_KVM_BOOK3S_64_HV */

931 932 933 934 935 936 937 938 939 940 941 942
#ifdef CONFIG_PPC_BOOK3S_64
	case KVM_PPC_GET_SMMU_INFO: {
		struct kvm *kvm = filp->private_data;
		struct kvm_ppc_smmu_info info;

		memset(&info, 0, sizeof(info));
		r = kvm_vm_ioctl_get_smmu_info(kvm, &info);
		if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
			r = -EFAULT;
		break;
	}
#endif /* CONFIG_PPC_BOOK3S_64 */
943
	default:
944
		r = -ENOTTY;
945 946
	}

947
out:
948 949 950
	return r;
}

951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984
static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
static unsigned long nr_lpids;

long kvmppc_alloc_lpid(void)
{
	long lpid;

	do {
		lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
		if (lpid >= nr_lpids) {
			pr_err("%s: No LPIDs free\n", __func__);
			return -ENOMEM;
		}
	} while (test_and_set_bit(lpid, lpid_inuse));

	return lpid;
}

void kvmppc_claim_lpid(long lpid)
{
	set_bit(lpid, lpid_inuse);
}

void kvmppc_free_lpid(long lpid)
{
	clear_bit(lpid, lpid_inuse);
}

void kvmppc_init_lpid(unsigned long nr_lpids_param)
{
	nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
	memset(lpid_inuse, 0, sizeof(lpid_inuse));
}

985 986 987 988 989 990 991 992
int kvm_arch_init(void *opaque)
{
	return 0;
}

void kvm_arch_exit(void)
{
}