booke.c 15.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright IBM Corp. 2007
 *
 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
 *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
 */

#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <asm/cputable.h>
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
30
#include <asm/cacheflush.h>
31

32
#include "booke.h"
33 34
#include "44x_tlb.h"

35 36
unsigned long kvmppc_booke_handlers;

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU

struct kvm_stats_debugfs_item debugfs_entries[] = {
	{ "mmio",       VCPU_STAT(mmio_exits) },
	{ "dcr",        VCPU_STAT(dcr_exits) },
	{ "sig",        VCPU_STAT(signal_exits) },
	{ "itlb_r",     VCPU_STAT(itlb_real_miss_exits) },
	{ "itlb_v",     VCPU_STAT(itlb_virt_miss_exits) },
	{ "dtlb_r",     VCPU_STAT(dtlb_real_miss_exits) },
	{ "dtlb_v",     VCPU_STAT(dtlb_virt_miss_exits) },
	{ "sysc",       VCPU_STAT(syscall_exits) },
	{ "isi",        VCPU_STAT(isi_exits) },
	{ "dsi",        VCPU_STAT(dsi_exits) },
	{ "inst_emu",   VCPU_STAT(emulated_inst_exits) },
	{ "dec",        VCPU_STAT(dec_exits) },
	{ "ext_intr",   VCPU_STAT(ext_intr_exits) },
54
	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
55 56 57 58 59 60 61 62
	{ NULL }
};

/* TODO: use vcpu_printf() */
void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
{
	int i;

63 64 65
	printk("pc:   %08lx msr:  %08lx\n", vcpu->arch.pc, vcpu->arch.msr);
	printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
	printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1);
66 67 68 69

	printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);

	for (i = 0; i < 32; i += 4) {
70
		printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
71 72 73 74 75 76 77
		       vcpu->arch.gpr[i],
		       vcpu->arch.gpr[i+1],
		       vcpu->arch.gpr[i+2],
		       vcpu->arch.gpr[i+3]);
	}
}

78 79
static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
                                       unsigned int priority)
80 81 82 83 84 85
{
	set_bit(priority, &vcpu->arch.pending_exceptions);
}

void kvmppc_core_queue_program(struct kvm_vcpu *vcpu)
{
86
	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
87 88 89 90
}

void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
{
91
	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
92 93 94 95
}

int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
{
96
	return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
97 98 99 100 101
}

void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
                                struct kvm_interrupt *irq)
{
102
	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL);
103 104
}

105 106 107
/* Deliver the interrupt of the corresponding priority, if possible. */
static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
                                        unsigned int priority)
108
{
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
	int allowed = 0;
	ulong msr_mask;

	switch (priority) {
	case BOOKE_IRQPRIO_PROGRAM:
	case BOOKE_IRQPRIO_DTLB_MISS:
	case BOOKE_IRQPRIO_ITLB_MISS:
	case BOOKE_IRQPRIO_SYSCALL:
	case BOOKE_IRQPRIO_DATA_STORAGE:
	case BOOKE_IRQPRIO_INST_STORAGE:
	case BOOKE_IRQPRIO_FP_UNAVAIL:
	case BOOKE_IRQPRIO_AP_UNAVAIL:
	case BOOKE_IRQPRIO_ALIGNMENT:
		allowed = 1;
		msr_mask = MSR_CE|MSR_ME|MSR_DE;
124
		break;
125 126 127 128
	case BOOKE_IRQPRIO_CRITICAL:
	case BOOKE_IRQPRIO_WATCHDOG:
		allowed = vcpu->arch.msr & MSR_CE;
		msr_mask = MSR_ME;
129
		break;
130 131 132
	case BOOKE_IRQPRIO_MACHINE_CHECK:
		allowed = vcpu->arch.msr & MSR_ME;
		msr_mask = 0;
133
		break;
134 135 136 137 138
	case BOOKE_IRQPRIO_EXTERNAL:
	case BOOKE_IRQPRIO_DECREMENTER:
	case BOOKE_IRQPRIO_FIT:
		allowed = vcpu->arch.msr & MSR_EE;
		msr_mask = MSR_CE|MSR_ME|MSR_DE;
139
		break;
140 141 142
	case BOOKE_IRQPRIO_DEBUG:
		allowed = vcpu->arch.msr & MSR_DE;
		msr_mask = MSR_ME;
143 144 145
		break;
	}

146 147 148 149 150
	if (allowed) {
		vcpu->arch.srr0 = vcpu->arch.pc;
		vcpu->arch.srr1 = vcpu->arch.msr;
		vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
		kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask);
151

152
		clear_bit(priority, &vcpu->arch.pending_exceptions);
153 154
	}

155
	return allowed;
156 157 158
}

/* Check pending exceptions and deliver one, if possible. */
159
void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
160 161 162 163
{
	unsigned long *pending = &vcpu->arch.pending_exceptions;
	unsigned int priority;

164
	priority = __ffs(*pending);
165
	while (priority <= BOOKE_MAX_INTERRUPT) {
166
		if (kvmppc_booke_irqprio_deliver(vcpu, priority))
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
			break;

		priority = find_next_bit(pending,
		                         BITS_PER_BYTE * sizeof(*pending),
		                         priority + 1);
	}
}

/**
 * kvmppc_handle_exit
 *
 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
 */
int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                       unsigned int exit_nr)
{
	enum emulation_result er;
	int r = RESUME_HOST;

	local_irq_enable();

	run->exit_reason = KVM_EXIT_UNKNOWN;
	run->ready_for_interrupt_injection = 1;

	switch (exit_nr) {
	case BOOKE_INTERRUPT_MACHINE_CHECK:
		printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
		kvmppc_dump_vcpu(vcpu);
		r = RESUME_HOST;
		break;

	case BOOKE_INTERRUPT_EXTERNAL:
199 200 201 202 203 204
		vcpu->stat.ext_intr_exits++;
		if (need_resched())
			cond_resched();
		r = RESUME_GUEST;
		break;

205 206 207 208 209 210 211 212 213
	case BOOKE_INTERRUPT_DECREMENTER:
		/* Since we switched IVPR back to the host's value, the host
		 * handled this interrupt the moment we enabled interrupts.
		 * Now we just offer it a chance to reschedule the guest. */

		/* XXX At this point the TLB still holds our shadow TLB, so if
		 * we do reschedule the host will fault over it. Perhaps we
		 * should politely restore the host's entries to minimize
		 * misses before ceding control. */
214
		vcpu->stat.dec_exits++;
215 216 217 218 219 220 221 222 223 224
		if (need_resched())
			cond_resched();
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_PROGRAM:
		if (vcpu->arch.msr & MSR_PR) {
			/* Program traps generated by user-level software must be handled
			 * by the guest kernel. */
			vcpu->arch.esr = vcpu->arch.fault_esr;
225
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
226 227 228 229 230 231 232 233 234 235 236 237 238 239
			r = RESUME_GUEST;
			break;
		}

		er = kvmppc_emulate_instruction(run, vcpu);
		switch (er) {
		case EMULATE_DONE:
			/* Future optimization: only reload non-volatiles if
			 * they were actually modified by emulation. */
			vcpu->stat.emulated_inst_exits++;
			r = RESUME_GUEST_NV;
			break;
		case EMULATE_DO_DCR:
			run->exit_reason = KVM_EXIT_DCR;
240
			vcpu->stat.dcr_exits++;
241 242 243 244
			r = RESUME_HOST;
			break;
		case EMULATE_FAIL:
			/* XXX Deliver Program interrupt to guest. */
245
			printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
246 247 248 249 250 251 252 253 254 255 256 257
			       __func__, vcpu->arch.pc, vcpu->arch.last_inst);
			/* For debugging, encode the failing instruction and
			 * report it to userspace. */
			run->hw.hardware_exit_reason = ~0ULL << 32;
			run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
			r = RESUME_HOST;
			break;
		default:
			BUG();
		}
		break;

258
	case BOOKE_INTERRUPT_FP_UNAVAIL:
259
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
260 261 262
		r = RESUME_GUEST;
		break;

263 264 265
	case BOOKE_INTERRUPT_DATA_STORAGE:
		vcpu->arch.dear = vcpu->arch.fault_dear;
		vcpu->arch.esr = vcpu->arch.fault_esr;
266
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
267 268 269 270 271 272
		vcpu->stat.dsi_exits++;
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_INST_STORAGE:
		vcpu->arch.esr = vcpu->arch.fault_esr;
273
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
274 275 276 277 278
		vcpu->stat.isi_exits++;
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_SYSCALL:
279
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
280 281 282 283 284
		vcpu->stat.syscall_exits++;
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_DTLB_MISS: {
285
		struct kvmppc_44x_tlbe *gtlbe;
286 287 288 289 290 291 292
		unsigned long eaddr = vcpu->arch.fault_dear;
		gfn_t gfn;

		/* Check the guest TLB. */
		gtlbe = kvmppc_44x_dtlb_search(vcpu, eaddr);
		if (!gtlbe) {
			/* The guest didn't have a mapping for it. */
293
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
			vcpu->arch.dear = vcpu->arch.fault_dear;
			vcpu->arch.esr = vcpu->arch.fault_esr;
			vcpu->stat.dtlb_real_miss_exits++;
			r = RESUME_GUEST;
			break;
		}

		vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr);
		gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT;

		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
			/* The guest TLB had a mapping, but the shadow TLB
			 * didn't, and it is RAM. This could be because:
			 * a) the entry is mapping the host kernel, or
			 * b) the guest used a large mapping which we're faking
			 * Either way, we need to satisfy the fault without
			 * invoking the guest. */
311 312
			kvmppc_mmu_map(vcpu, eaddr, vcpu->arch.paddr_accessed, gtlbe->tid,
			               gtlbe->word2, get_tlb_bytes(gtlbe));
313 314 315 316 317 318
			vcpu->stat.dtlb_virt_miss_exits++;
			r = RESUME_GUEST;
		} else {
			/* Guest has mapped and accessed a page which is not
			 * actually RAM. */
			r = kvmppc_emulate_mmio(run, vcpu);
319
			vcpu->stat.mmio_exits++;
320 321 322 323 324 325
		}

		break;
	}

	case BOOKE_INTERRUPT_ITLB_MISS: {
326
		struct kvmppc_44x_tlbe *gtlbe;
327
		unsigned long eaddr = vcpu->arch.pc;
328
		gpa_t gpaddr;
329 330 331 332 333 334 335 336
		gfn_t gfn;

		r = RESUME_GUEST;

		/* Check the guest TLB. */
		gtlbe = kvmppc_44x_itlb_search(vcpu, eaddr);
		if (!gtlbe) {
			/* The guest didn't have a mapping for it. */
337
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
338 339 340 341 342 343
			vcpu->stat.itlb_real_miss_exits++;
			break;
		}

		vcpu->stat.itlb_virt_miss_exits++;

344 345
		gpaddr = tlb_xlate(gtlbe, eaddr);
		gfn = gpaddr >> PAGE_SHIFT;
346 347 348 349 350 351 352 353

		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
			/* The guest TLB had a mapping, but the shadow TLB
			 * didn't. This could be because:
			 * a) the entry is mapping the host kernel, or
			 * b) the guest used a large mapping which we're faking
			 * Either way, we need to satisfy the fault without
			 * invoking the guest. */
354 355
			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid,
			               gtlbe->word2, get_tlb_bytes(gtlbe));
356 357
		} else {
			/* Guest mapped and leaped at non-RAM! */
358
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
359 360 361 362 363
		}

		break;
	}

364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
	case BOOKE_INTERRUPT_DEBUG: {
		u32 dbsr;

		vcpu->arch.pc = mfspr(SPRN_CSRR0);

		/* clear IAC events in DBSR register */
		dbsr = mfspr(SPRN_DBSR);
		dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
		mtspr(SPRN_DBSR, dbsr);

		run->exit_reason = KVM_EXIT_DEBUG;
		r = RESUME_HOST;
		break;
	}

379 380 381 382 383 384 385
	default:
		printk(KERN_EMERG "exit_nr %d\n", exit_nr);
		BUG();
	}

	local_irq_disable();

386
	kvmppc_core_deliver_interrupts(vcpu);
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408

	if (!(r & RESUME_HOST)) {
		/* To avoid clobbering exit_reason, only check for signals if
		 * we aren't already exiting to userspace for some other
		 * reason. */
		if (signal_pending(current)) {
			run->exit_reason = KVM_EXIT_INTR;
			r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
			vcpu->stat.signal_exits++;
		}
	}

	return r;
}

/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
	vcpu->arch.pc = 0;
	vcpu->arch.msr = 0;
	vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */

409 410
	vcpu->arch.shadow_pid = 1;

411 412 413 414
	/* Eye-catching number so we know if the guest takes an interrupt
	 * before it's programmed its own IVPR. */
	vcpu->arch.ivpr = 0x55550000;

415
	return kvmppc_core_vcpu_setup(vcpu);
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
}

int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
	int i;

	regs->pc = vcpu->arch.pc;
	regs->cr = vcpu->arch.cr;
	regs->ctr = vcpu->arch.ctr;
	regs->lr = vcpu->arch.lr;
	regs->xer = vcpu->arch.xer;
	regs->msr = vcpu->arch.msr;
	regs->srr0 = vcpu->arch.srr0;
	regs->srr1 = vcpu->arch.srr1;
	regs->pid = vcpu->arch.pid;
	regs->sprg0 = vcpu->arch.sprg0;
	regs->sprg1 = vcpu->arch.sprg1;
	regs->sprg2 = vcpu->arch.sprg2;
	regs->sprg3 = vcpu->arch.sprg3;
	regs->sprg5 = vcpu->arch.sprg4;
	regs->sprg6 = vcpu->arch.sprg5;
	regs->sprg7 = vcpu->arch.sprg6;

	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
		regs->gpr[i] = vcpu->arch.gpr[i];

	return 0;
}

int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
	int i;

	vcpu->arch.pc = regs->pc;
	vcpu->arch.cr = regs->cr;
	vcpu->arch.ctr = regs->ctr;
	vcpu->arch.lr = regs->lr;
	vcpu->arch.xer = regs->xer;
454
	kvmppc_set_msr(vcpu, regs->msr);
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
	vcpu->arch.srr0 = regs->srr0;
	vcpu->arch.srr1 = regs->srr1;
	vcpu->arch.sprg0 = regs->sprg0;
	vcpu->arch.sprg1 = regs->sprg1;
	vcpu->arch.sprg2 = regs->sprg2;
	vcpu->arch.sprg3 = regs->sprg3;
	vcpu->arch.sprg5 = regs->sprg4;
	vcpu->arch.sprg6 = regs->sprg5;
	vcpu->arch.sprg7 = regs->sprg6;

	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
		vcpu->arch.gpr[i] = regs->gpr[i];

	return 0;
}

int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
{
	return -ENOTSUPP;
}

int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
{
	return -ENOTSUPP;
}

int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	return -ENOTSUPP;
}

int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	return -ENOTSUPP;
}

int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
                                  struct kvm_translation *tr)
{
496
	return kvmppc_core_vcpu_translate(vcpu, tr);
497
}
498

499
int kvmppc_booke_init(void)
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
{
	unsigned long ivor[16];
	unsigned long max_ivor = 0;
	int i;

	/* We install our own exception handlers by hijacking IVPR. IVPR must
	 * be 16-bit aligned, so we need a 64KB allocation. */
	kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
	                                         VCPU_SIZE_ORDER);
	if (!kvmppc_booke_handlers)
		return -ENOMEM;

	/* XXX make sure our handlers are smaller than Linux's */

	/* Copy our interrupt handlers to match host IVORs. That way we don't
	 * have to swap the IVORs on every guest/host transition. */
	ivor[0] = mfspr(SPRN_IVOR0);
	ivor[1] = mfspr(SPRN_IVOR1);
	ivor[2] = mfspr(SPRN_IVOR2);
	ivor[3] = mfspr(SPRN_IVOR3);
	ivor[4] = mfspr(SPRN_IVOR4);
	ivor[5] = mfspr(SPRN_IVOR5);
	ivor[6] = mfspr(SPRN_IVOR6);
	ivor[7] = mfspr(SPRN_IVOR7);
	ivor[8] = mfspr(SPRN_IVOR8);
	ivor[9] = mfspr(SPRN_IVOR9);
	ivor[10] = mfspr(SPRN_IVOR10);
	ivor[11] = mfspr(SPRN_IVOR11);
	ivor[12] = mfspr(SPRN_IVOR12);
	ivor[13] = mfspr(SPRN_IVOR13);
	ivor[14] = mfspr(SPRN_IVOR14);
	ivor[15] = mfspr(SPRN_IVOR15);

	for (i = 0; i < 16; i++) {
		if (ivor[i] > max_ivor)
			max_ivor = ivor[i];

		memcpy((void *)kvmppc_booke_handlers + ivor[i],
		       kvmppc_handlers_start + i * kvmppc_handler_len,
		       kvmppc_handler_len);
	}
	flush_icache_range(kvmppc_booke_handlers,
	                   kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);

544
	return 0;
545 546
}

547
void __exit kvmppc_booke_exit(void)
548 549 550 551
{
	free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
	kvm_exit();
}