powerpc.c 11.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright IBM Corp. 2007
 *
 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
 *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
 */

#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
A
Alexander Graf 已提交
26
#include <linux/hrtimer.h>
27
#include <linux/fs.h>
28
#include <linux/slab.h>
29 30 31
#include <asm/cputable.h>
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
32
#include <asm/tlbflush.h>
33
#include "timing.h"
P
Paul Mackerras 已提交
34
#include "../mm/mmu_decl.h"
35

36 37 38
#define CREATE_TRACE_POINTS
#include "trace.h"

39 40 41 42 43 44 45
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
{
	return gfn;
}

int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
46
	return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions);
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
}


int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
	enum emulation_result er;
	int r;

	er = kvmppc_emulate_instruction(run, vcpu);
	switch (er) {
	case EMULATE_DONE:
		/* Future optimization: only reload non-volatiles if they were
		 * actually modified. */
		r = RESUME_GUEST_NV;
		break;
	case EMULATE_DO_MMIO:
		run->exit_reason = KVM_EXIT_MMIO;
		/* We must reload nonvolatiles because "update" load/store
		 * instructions modify register state. */
		/* Future optimization: only reload non-volatiles if they were
		 * actually modified. */
		r = RESUME_HOST_NV;
		break;
	case EMULATE_FAIL:
		/* XXX Deliver Program interrupt to guest. */
		printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
		       vcpu->arch.last_inst);
		r = RESUME_HOST;
		break;
	default:
		BUG();
	}

	return r;
}

83
int kvm_arch_hardware_enable(void *garbage)
84
{
85
	return 0;
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
}

void kvm_arch_hardware_disable(void *garbage)
{
}

int kvm_arch_hardware_setup(void)
{
	return 0;
}

void kvm_arch_hardware_unsetup(void)
{
}

void kvm_arch_check_processor_compat(void *rtn)
{
103
	*(int *)rtn = kvmppc_core_check_processor_compat();
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
}

struct kvm *kvm_arch_create_vm(void)
{
	struct kvm *kvm;

	kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
	if (!kvm)
		return ERR_PTR(-ENOMEM);

	return kvm;
}

static void kvmppc_free_vcpus(struct kvm *kvm)
{
	unsigned int i;
120
	struct kvm_vcpu *vcpu;
121

122 123 124 125 126 127 128 129 130
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_arch_vcpu_free(vcpu);

	mutex_lock(&kvm->lock);
	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
		kvm->vcpus[i] = NULL;

	atomic_set(&kvm->online_vcpus, 0);
	mutex_unlock(&kvm->lock);
131 132
}

133 134 135 136
void kvm_arch_sync_events(struct kvm *kvm)
{
}

137 138 139 140
void kvm_arch_destroy_vm(struct kvm *kvm)
{
	kvmppc_free_vcpus(kvm);
	kvm_free_physmem(kvm);
141
	cleanup_srcu_struct(&kvm->srcu);
142 143 144 145 146 147 148 149
	kfree(kvm);
}

int kvm_dev_ioctl_check_extension(long ext)
{
	int r;

	switch (ext) {
150
	case KVM_CAP_PPC_SEGSTATE:
151
	case KVM_CAP_PPC_PAIRED_SINGLES:
152
	case KVM_CAP_PPC_UNSET_IRQ:
153
	case KVM_CAP_ENABLE_CAP:
154 155
		r = 1;
		break;
156 157 158
	case KVM_CAP_COALESCED_MMIO:
		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
		break;
159 160 161 162 163 164 165 166 167 168 169 170 171 172
	default:
		r = 0;
		break;
	}
	return r;

}

long kvm_arch_dev_ioctl(struct file *filp,
                        unsigned int ioctl, unsigned long arg)
{
	return -EINVAL;
}

173 174 175 176 177
int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                   struct kvm_memory_slot *memslot,
                                   struct kvm_memory_slot old,
                                   struct kvm_userspace_memory_region *mem,
                                   int user_alloc)
178 179 180 181
{
	return 0;
}

182 183 184 185 186 187 188 189 190
void kvm_arch_commit_memory_region(struct kvm *kvm,
               struct kvm_userspace_memory_region *mem,
               struct kvm_memory_slot old,
               int user_alloc)
{
       return;
}


191 192 193 194
void kvm_arch_flush_shadow(struct kvm *kvm)
{
}

195 196
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
197 198
	struct kvm_vcpu *vcpu;
	vcpu = kvmppc_core_vcpu_create(kvm, id);
199 200
	if (!IS_ERR(vcpu))
		kvmppc_create_vcpu_debugfs(vcpu, id);
201
	return vcpu;
202 203 204 205
}

void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
206 207 208 209
	/* Make sure we're not using the vcpu anymore */
	hrtimer_cancel(&vcpu->arch.dec_timer);
	tasklet_kill(&vcpu->arch.tasklet);

210
	kvmppc_remove_vcpu_debugfs(vcpu);
211
	kvmppc_core_vcpu_free(vcpu);
212 213 214 215 216 217 218 219 220
}

void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
	kvm_arch_vcpu_free(vcpu);
}

int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
221
	return kvmppc_core_pending_dec(vcpu);
222 223 224 225 226 227
}

static void kvmppc_decrementer_func(unsigned long data)
{
	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;

228
	kvmppc_core_queue_dec(vcpu);
229 230 231 232 233

	if (waitqueue_active(&vcpu->wq)) {
		wake_up_interruptible(&vcpu->wq);
		vcpu->stat.halt_wakeup++;
	}
234 235
}

A
Alexander Graf 已提交
236 237 238 239 240 241 242 243 244 245 246 247 248 249
/*
 * low level hrtimer wake routine. Because this runs in hardirq context
 * we schedule a tasklet to do the real work.
 */
enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
{
	struct kvm_vcpu *vcpu;

	vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
	tasklet_schedule(&vcpu->arch.tasklet);

	return HRTIMER_NORESTART;
}

250 251
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
A
Alexander Graf 已提交
252 253 254
	hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
	tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
	vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
255 256 257 258 259 260

	return 0;
}

void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
261
	kvmppc_mmu_destroy(vcpu);
262 263 264 265
}

void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
266
	kvmppc_core_vcpu_load(vcpu, cpu);
267 268 269 270
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
271
	kvmppc_core_vcpu_put(vcpu);
272 273
}

J
Jan Kiszka 已提交
274
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
275
                                        struct kvm_guest_debug *dbg)
276
{
277
	return -EINVAL;
278 279 280 281 282
}

static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
                                     struct kvm_run *run)
{
283
	kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
284 285 286 287 288
}

static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
                                      struct kvm_run *run)
{
289
	u64 gpr;
290

291
	if (run->mmio.len > sizeof(gpr)) {
292 293 294 295 296 297
		printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
		return;
	}

	if (vcpu->arch.mmio_is_bigendian) {
		switch (run->mmio.len) {
298
		case 8: gpr = *(u64 *)run->mmio.data; break;
299 300 301
		case 4: gpr = *(u32 *)run->mmio.data; break;
		case 2: gpr = *(u16 *)run->mmio.data; break;
		case 1: gpr = *(u8 *)run->mmio.data; break;
302 303 304 305
		}
	} else {
		/* Convert BE data from userland back to LE. */
		switch (run->mmio.len) {
306 307 308
		case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
		case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
		case 1: gpr = *(u8 *)run->mmio.data; break;
309 310
		}
	}
311

A
Alexander Graf 已提交
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
	if (vcpu->arch.mmio_sign_extend) {
		switch (run->mmio.len) {
#ifdef CONFIG_PPC64
		case 4:
			gpr = (s64)(s32)gpr;
			break;
#endif
		case 2:
			gpr = (s64)(s16)gpr;
			break;
		case 1:
			gpr = (s64)(s8)gpr;
			break;
		}
	}

328
	kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346

	switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
	case KVM_REG_GPR:
		kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
		break;
	case KVM_REG_FPR:
		vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
		break;
	case KVM_REG_QPR:
		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
		break;
	case KVM_REG_FQPR:
		vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
		vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
		break;
	default:
		BUG();
	}
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
}

int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
                       unsigned int rt, unsigned int bytes, int is_bigendian)
{
	if (bytes > sizeof(run->mmio.data)) {
		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
		       run->mmio.len);
	}

	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
	run->mmio.len = bytes;
	run->mmio.is_write = 0;

	vcpu->arch.io_gpr = rt;
	vcpu->arch.mmio_is_bigendian = is_bigendian;
	vcpu->mmio_needed = 1;
	vcpu->mmio_is_write = 0;
A
Alexander Graf 已提交
365
	vcpu->arch.mmio_sign_extend = 0;
366 367 368 369

	return EMULATE_DO_MMIO;
}

A
Alexander Graf 已提交
370 371 372 373 374 375 376 377 378 379 380 381
/* Same as above, but sign extends */
int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        unsigned int rt, unsigned int bytes, int is_bigendian)
{
	int r;

	r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
	vcpu->arch.mmio_sign_extend = 1;

	return r;
}

382
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
383
                        u64 val, unsigned int bytes, int is_bigendian)
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
{
	void *data = run->mmio.data;

	if (bytes > sizeof(run->mmio.data)) {
		printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
		       run->mmio.len);
	}

	run->mmio.phys_addr = vcpu->arch.paddr_accessed;
	run->mmio.len = bytes;
	run->mmio.is_write = 1;
	vcpu->mmio_needed = 1;
	vcpu->mmio_is_write = 1;

	/* Store the value at the lowest bytes in 'data'. */
	if (is_bigendian) {
		switch (bytes) {
401
		case 8: *(u64 *)data = val; break;
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
		case 4: *(u32 *)data = val; break;
		case 2: *(u16 *)data = val; break;
		case 1: *(u8  *)data = val; break;
		}
	} else {
		/* Store LE value into 'data'. */
		switch (bytes) {
		case 4: st_le32(data, val); break;
		case 2: st_le16(data, val); break;
		case 1: *(u8 *)data = val; break;
		}
	}

	return EMULATE_DO_MMIO;
}

int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
	int r;
	sigset_t sigsaved;

423 424
	vcpu_load(vcpu);

425 426 427 428 429 430 431 432 433 434 435 436 437
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

	if (vcpu->mmio_needed) {
		if (!vcpu->mmio_is_write)
			kvmppc_complete_mmio_load(vcpu, run);
		vcpu->mmio_needed = 0;
	} else if (vcpu->arch.dcr_needed) {
		if (!vcpu->arch.dcr_is_write)
			kvmppc_complete_dcr_load(vcpu, run);
		vcpu->arch.dcr_needed = 0;
	}

438
	kvmppc_core_deliver_interrupts(vcpu);
439 440 441 442 443 444 445 446 447 448

	local_irq_disable();
	kvm_guest_enter();
	r = __kvmppc_vcpu_run(run, vcpu);
	kvm_guest_exit();
	local_irq_enable();

	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &sigsaved, NULL);

449 450
	vcpu_put(vcpu);

451 452 453 454 455
	return r;
}

int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{
456 457 458 459
	if (irq->irq == KVM_INTERRUPT_UNSET)
		kvmppc_core_dequeue_external(vcpu, irq);
	else
		kvmppc_core_queue_external(vcpu, irq);
460 461 462 463 464 465

	if (waitqueue_active(&vcpu->wq)) {
		wake_up_interruptible(&vcpu->wq);
		vcpu->stat.halt_wakeup++;
	}

466 467 468
	return 0;
}

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
				     struct kvm_enable_cap *cap)
{
	int r;

	if (cap->flags)
		return -EINVAL;

	switch (cap->cap) {
	default:
		r = -EINVAL;
		break;
	}

	return r;
}

486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
{
	return -EINVAL;
}

int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
{
	return -EINVAL;
}

long kvm_arch_vcpu_ioctl(struct file *filp,
                         unsigned int ioctl, unsigned long arg)
{
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = (void __user *)arg;
	long r;

	switch (ioctl) {
	case KVM_INTERRUPT: {
		struct kvm_interrupt irq;
		r = -EFAULT;
		if (copy_from_user(&irq, argp, sizeof(irq)))
			goto out;
		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
		break;
	}
514 515 516 517 518 519 520 521 522
	case KVM_ENABLE_CAP:
	{
		struct kvm_enable_cap cap;
		r = -EFAULT;
		if (copy_from_user(&cap, argp, sizeof(cap)))
			goto out;
		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
		break;
	}
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
	default:
		r = -EINVAL;
	}

out:
	return r;
}

long kvm_arch_vm_ioctl(struct file *filp,
                       unsigned int ioctl, unsigned long arg)
{
	long r;

	switch (ioctl) {
	default:
538
		r = -ENOTTY;
539 540 541 542 543 544 545 546 547 548 549 550 551
	}

	return r;
}

int kvm_arch_init(void *opaque)
{
	return 0;
}

void kvm_arch_exit(void)
{
}