booke.c 16.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright IBM Corp. 2007
 *
 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
 *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
 */

#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
27

28 29 30
#include <asm/cputable.h>
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
31
#include "timing.h"
32
#include <asm/cacheflush.h>
33

34
#include "booke.h"
35

36 37
unsigned long kvmppc_booke_handlers;

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU

struct kvm_stats_debugfs_item debugfs_entries[] = {
	{ "mmio",       VCPU_STAT(mmio_exits) },
	{ "dcr",        VCPU_STAT(dcr_exits) },
	{ "sig",        VCPU_STAT(signal_exits) },
	{ "itlb_r",     VCPU_STAT(itlb_real_miss_exits) },
	{ "itlb_v",     VCPU_STAT(itlb_virt_miss_exits) },
	{ "dtlb_r",     VCPU_STAT(dtlb_real_miss_exits) },
	{ "dtlb_v",     VCPU_STAT(dtlb_virt_miss_exits) },
	{ "sysc",       VCPU_STAT(syscall_exits) },
	{ "isi",        VCPU_STAT(isi_exits) },
	{ "dsi",        VCPU_STAT(dsi_exits) },
	{ "inst_emu",   VCPU_STAT(emulated_inst_exits) },
	{ "dec",        VCPU_STAT(dec_exits) },
	{ "ext_intr",   VCPU_STAT(ext_intr_exits) },
55
	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
56 57 58 59 60 61 62 63
	{ NULL }
};

/* TODO: use vcpu_printf() */
void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
{
	int i;

64 65 66
	printk("pc:   %08lx msr:  %08lx\n", vcpu->arch.pc, vcpu->arch.msr);
	printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
	printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1);
67 68 69 70

	printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);

	for (i = 0; i < 32; i += 4) {
71
		printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
72 73 74 75
		       kvmppc_get_gpr(vcpu, i),
		       kvmppc_get_gpr(vcpu, i+1),
		       kvmppc_get_gpr(vcpu, i+2),
		       kvmppc_get_gpr(vcpu, i+3));
76 77 78
	}
}

79 80
static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
                                       unsigned int priority)
81 82 83 84
{
	set_bit(priority, &vcpu->arch.pending_exceptions);
}

85
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
86
{
87
	/* BookE does flags in ESR, so ignore those we get here */
88
	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
89 90 91 92
}

void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
{
93
	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
94 95 96 97
}

int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
{
98
	return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
99 100
}

101 102 103 104 105
void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
{
	clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
}

106 107 108
void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
                                struct kvm_interrupt *irq)
{
109
	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL);
110 111
}

112 113 114
/* Deliver the interrupt of the corresponding priority, if possible. */
static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
                                        unsigned int priority)
115
{
116 117 118 119 120 121 122 123 124 125 126
	int allowed = 0;
	ulong msr_mask;

	switch (priority) {
	case BOOKE_IRQPRIO_PROGRAM:
	case BOOKE_IRQPRIO_DTLB_MISS:
	case BOOKE_IRQPRIO_ITLB_MISS:
	case BOOKE_IRQPRIO_SYSCALL:
	case BOOKE_IRQPRIO_DATA_STORAGE:
	case BOOKE_IRQPRIO_INST_STORAGE:
	case BOOKE_IRQPRIO_FP_UNAVAIL:
127 128 129
	case BOOKE_IRQPRIO_SPE_UNAVAIL:
	case BOOKE_IRQPRIO_SPE_FP_DATA:
	case BOOKE_IRQPRIO_SPE_FP_ROUND:
130 131 132 133
	case BOOKE_IRQPRIO_AP_UNAVAIL:
	case BOOKE_IRQPRIO_ALIGNMENT:
		allowed = 1;
		msr_mask = MSR_CE|MSR_ME|MSR_DE;
134
		break;
135 136 137 138
	case BOOKE_IRQPRIO_CRITICAL:
	case BOOKE_IRQPRIO_WATCHDOG:
		allowed = vcpu->arch.msr & MSR_CE;
		msr_mask = MSR_ME;
139
		break;
140 141 142
	case BOOKE_IRQPRIO_MACHINE_CHECK:
		allowed = vcpu->arch.msr & MSR_ME;
		msr_mask = 0;
143
		break;
144 145 146 147 148
	case BOOKE_IRQPRIO_EXTERNAL:
	case BOOKE_IRQPRIO_DECREMENTER:
	case BOOKE_IRQPRIO_FIT:
		allowed = vcpu->arch.msr & MSR_EE;
		msr_mask = MSR_CE|MSR_ME|MSR_DE;
149
		break;
150 151 152
	case BOOKE_IRQPRIO_DEBUG:
		allowed = vcpu->arch.msr & MSR_DE;
		msr_mask = MSR_ME;
153 154 155
		break;
	}

156 157 158 159 160
	if (allowed) {
		vcpu->arch.srr0 = vcpu->arch.pc;
		vcpu->arch.srr1 = vcpu->arch.msr;
		vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
		kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask);
161

162
		clear_bit(priority, &vcpu->arch.pending_exceptions);
163 164
	}

165
	return allowed;
166 167 168
}

/* Check pending exceptions and deliver one, if possible. */
169
void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
170 171 172 173
{
	unsigned long *pending = &vcpu->arch.pending_exceptions;
	unsigned int priority;

174
	priority = __ffs(*pending);
175
	while (priority <= BOOKE_IRQPRIO_MAX) {
176
		if (kvmppc_booke_irqprio_deliver(vcpu, priority))
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
			break;

		priority = find_next_bit(pending,
		                         BITS_PER_BYTE * sizeof(*pending),
		                         priority + 1);
	}
}

/**
 * kvmppc_handle_exit
 *
 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
 */
int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                       unsigned int exit_nr)
{
	enum emulation_result er;
	int r = RESUME_HOST;

196 197 198
	/* update before a new last_exit_type is rewritten */
	kvmppc_update_timing_stats(vcpu);

199 200 201 202 203 204 205 206 207 208 209 210 211
	local_irq_enable();

	run->exit_reason = KVM_EXIT_UNKNOWN;
	run->ready_for_interrupt_injection = 1;

	switch (exit_nr) {
	case BOOKE_INTERRUPT_MACHINE_CHECK:
		printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
		kvmppc_dump_vcpu(vcpu);
		r = RESUME_HOST;
		break;

	case BOOKE_INTERRUPT_EXTERNAL:
212
		kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
213 214 215 216 217
		if (need_resched())
			cond_resched();
		r = RESUME_GUEST;
		break;

218 219 220 221
	case BOOKE_INTERRUPT_DECREMENTER:
		/* Since we switched IVPR back to the host's value, the host
		 * handled this interrupt the moment we enabled interrupts.
		 * Now we just offer it a chance to reschedule the guest. */
222
		kvmppc_account_exit(vcpu, DEC_EXITS);
223 224 225 226 227 228 229 230 231 232
		if (need_resched())
			cond_resched();
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_PROGRAM:
		if (vcpu->arch.msr & MSR_PR) {
			/* Program traps generated by user-level software must be handled
			 * by the guest kernel. */
			vcpu->arch.esr = vcpu->arch.fault_esr;
233
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
234
			r = RESUME_GUEST;
235
			kvmppc_account_exit(vcpu, USR_PR_INST);
236 237 238 239 240 241
			break;
		}

		er = kvmppc_emulate_instruction(run, vcpu);
		switch (er) {
		case EMULATE_DONE:
242
			/* don't overwrite subtypes, just account kvm_stats */
243
			kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
244 245 246 247 248 249 250 251 252 253
			/* Future optimization: only reload non-volatiles if
			 * they were actually modified by emulation. */
			r = RESUME_GUEST_NV;
			break;
		case EMULATE_DO_DCR:
			run->exit_reason = KVM_EXIT_DCR;
			r = RESUME_HOST;
			break;
		case EMULATE_FAIL:
			/* XXX Deliver Program interrupt to guest. */
254
			printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
255 256 257 258 259 260 261 262 263 264 265 266
			       __func__, vcpu->arch.pc, vcpu->arch.last_inst);
			/* For debugging, encode the failing instruction and
			 * report it to userspace. */
			run->hw.hardware_exit_reason = ~0ULL << 32;
			run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
			r = RESUME_HOST;
			break;
		default:
			BUG();
		}
		break;

267
	case BOOKE_INTERRUPT_FP_UNAVAIL:
268
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
269
		kvmppc_account_exit(vcpu, FP_UNAVAIL);
270 271 272
		r = RESUME_GUEST;
		break;

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
	case BOOKE_INTERRUPT_SPE_UNAVAIL:
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_SPE_FP_DATA:
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_SPE_FP_ROUND:
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
		r = RESUME_GUEST;
		break;

288 289 290
	case BOOKE_INTERRUPT_DATA_STORAGE:
		vcpu->arch.dear = vcpu->arch.fault_dear;
		vcpu->arch.esr = vcpu->arch.fault_esr;
291
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
292
		kvmppc_account_exit(vcpu, DSI_EXITS);
293 294 295 296 297
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_INST_STORAGE:
		vcpu->arch.esr = vcpu->arch.fault_esr;
298
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
299
		kvmppc_account_exit(vcpu, ISI_EXITS);
300 301 302 303
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_SYSCALL:
304
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
305
		kvmppc_account_exit(vcpu, SYSCALL_EXITS);
306 307 308 309 310
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_DTLB_MISS: {
		unsigned long eaddr = vcpu->arch.fault_dear;
311
		int gtlb_index;
312
		gpa_t gpaddr;
313 314 315
		gfn_t gfn;

		/* Check the guest TLB. */
316
		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
317
		if (gtlb_index < 0) {
318
			/* The guest didn't have a mapping for it. */
319
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
320 321
			vcpu->arch.dear = vcpu->arch.fault_dear;
			vcpu->arch.esr = vcpu->arch.fault_esr;
322
			kvmppc_mmu_dtlb_miss(vcpu);
323
			kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
324 325 326 327
			r = RESUME_GUEST;
			break;
		}

328
		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
329
		gfn = gpaddr >> PAGE_SHIFT;
330 331 332 333 334 335 336 337

		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
			/* The guest TLB had a mapping, but the shadow TLB
			 * didn't, and it is RAM. This could be because:
			 * a) the entry is mapping the host kernel, or
			 * b) the guest used a large mapping which we're faking
			 * Either way, we need to satisfy the fault without
			 * invoking the guest. */
338
			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
339
			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
340 341 342 343
			r = RESUME_GUEST;
		} else {
			/* Guest has mapped and accessed a page which is not
			 * actually RAM. */
344
			vcpu->arch.paddr_accessed = gpaddr;
345
			r = kvmppc_emulate_mmio(run, vcpu);
346
			kvmppc_account_exit(vcpu, MMIO_EXITS);
347 348 349 350 351 352 353
		}

		break;
	}

	case BOOKE_INTERRUPT_ITLB_MISS: {
		unsigned long eaddr = vcpu->arch.pc;
354
		gpa_t gpaddr;
355
		gfn_t gfn;
356
		int gtlb_index;
357 358 359 360

		r = RESUME_GUEST;

		/* Check the guest TLB. */
361
		gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
362
		if (gtlb_index < 0) {
363
			/* The guest didn't have a mapping for it. */
364
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
365
			kvmppc_mmu_itlb_miss(vcpu);
366
			kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
367 368 369
			break;
		}

370
		kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
371

372
		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
373
		gfn = gpaddr >> PAGE_SHIFT;
374 375 376 377 378 379 380 381

		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
			/* The guest TLB had a mapping, but the shadow TLB
			 * didn't. This could be because:
			 * a) the entry is mapping the host kernel, or
			 * b) the guest used a large mapping which we're faking
			 * Either way, we need to satisfy the fault without
			 * invoking the guest. */
382
			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
383 384
		} else {
			/* Guest mapped and leaped at non-RAM! */
385
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
386 387 388 389 390
		}

		break;
	}

391 392 393 394 395 396 397 398 399 400 401
	case BOOKE_INTERRUPT_DEBUG: {
		u32 dbsr;

		vcpu->arch.pc = mfspr(SPRN_CSRR0);

		/* clear IAC events in DBSR register */
		dbsr = mfspr(SPRN_DBSR);
		dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
		mtspr(SPRN_DBSR, dbsr);

		run->exit_reason = KVM_EXIT_DEBUG;
402
		kvmppc_account_exit(vcpu, DEBUG_EXITS);
403 404 405 406
		r = RESUME_HOST;
		break;
	}

407 408 409 410 411 412 413
	default:
		printk(KERN_EMERG "exit_nr %d\n", exit_nr);
		BUG();
	}

	local_irq_disable();

414
	kvmppc_core_deliver_interrupts(vcpu);
415 416 417 418 419 420 421 422

	if (!(r & RESUME_HOST)) {
		/* To avoid clobbering exit_reason, only check for signals if
		 * we aren't already exiting to userspace for some other
		 * reason. */
		if (signal_pending(current)) {
			run->exit_reason = KVM_EXIT_INTR;
			r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
423
			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
424 425 426 427 428 429 430 431 432 433 434
		}
	}

	return r;
}

/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
	vcpu->arch.pc = 0;
	vcpu->arch.msr = 0;
435
	kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
436

437 438
	vcpu->arch.shadow_pid = 1;

439 440 441 442
	/* Eye-catching number so we know if the guest takes an interrupt
	 * before it's programmed its own IVPR. */
	vcpu->arch.ivpr = 0x55550000;

443 444
	kvmppc_init_timing_stats(vcpu);

445
	return kvmppc_core_vcpu_setup(vcpu);
446 447 448 449 450 451 452
}

int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
	int i;

	regs->pc = vcpu->arch.pc;
453
	regs->cr = kvmppc_get_cr(vcpu);
454 455
	regs->ctr = vcpu->arch.ctr;
	regs->lr = vcpu->arch.lr;
456
	regs->xer = kvmppc_get_xer(vcpu);
457 458 459 460 461 462 463 464 465 466 467 468 469
	regs->msr = vcpu->arch.msr;
	regs->srr0 = vcpu->arch.srr0;
	regs->srr1 = vcpu->arch.srr1;
	regs->pid = vcpu->arch.pid;
	regs->sprg0 = vcpu->arch.sprg0;
	regs->sprg1 = vcpu->arch.sprg1;
	regs->sprg2 = vcpu->arch.sprg2;
	regs->sprg3 = vcpu->arch.sprg3;
	regs->sprg5 = vcpu->arch.sprg4;
	regs->sprg6 = vcpu->arch.sprg5;
	regs->sprg7 = vcpu->arch.sprg6;

	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
470
		regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
471 472 473 474 475 476 477 478 479

	return 0;
}

int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
	int i;

	vcpu->arch.pc = regs->pc;
480
	kvmppc_set_cr(vcpu, regs->cr);
481 482
	vcpu->arch.ctr = regs->ctr;
	vcpu->arch.lr = regs->lr;
483
	kvmppc_set_xer(vcpu, regs->xer);
484
	kvmppc_set_msr(vcpu, regs->msr);
485 486 487 488 489 490 491 492 493 494
	vcpu->arch.srr0 = regs->srr0;
	vcpu->arch.srr1 = regs->srr1;
	vcpu->arch.sprg0 = regs->sprg0;
	vcpu->arch.sprg1 = regs->sprg1;
	vcpu->arch.sprg2 = regs->sprg2;
	vcpu->arch.sprg3 = regs->sprg3;
	vcpu->arch.sprg5 = regs->sprg4;
	vcpu->arch.sprg6 = regs->sprg5;
	vcpu->arch.sprg7 = regs->sprg6;

495 496
	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
		kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525

	return 0;
}

int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
{
	return -ENOTSUPP;
}

int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
{
	return -ENOTSUPP;
}

int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	return -ENOTSUPP;
}

int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	return -ENOTSUPP;
}

int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
                                  struct kvm_translation *tr)
{
526
	return kvmppc_core_vcpu_translate(vcpu, tr);
527
}
528

529 530 531 532 533
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
	return -ENOTSUPP;
}

534
int __init kvmppc_booke_init(void)
535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
{
	unsigned long ivor[16];
	unsigned long max_ivor = 0;
	int i;

	/* We install our own exception handlers by hijacking IVPR. IVPR must
	 * be 16-bit aligned, so we need a 64KB allocation. */
	kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
	                                         VCPU_SIZE_ORDER);
	if (!kvmppc_booke_handlers)
		return -ENOMEM;

	/* XXX make sure our handlers are smaller than Linux's */

	/* Copy our interrupt handlers to match host IVORs. That way we don't
	 * have to swap the IVORs on every guest/host transition. */
	ivor[0] = mfspr(SPRN_IVOR0);
	ivor[1] = mfspr(SPRN_IVOR1);
	ivor[2] = mfspr(SPRN_IVOR2);
	ivor[3] = mfspr(SPRN_IVOR3);
	ivor[4] = mfspr(SPRN_IVOR4);
	ivor[5] = mfspr(SPRN_IVOR5);
	ivor[6] = mfspr(SPRN_IVOR6);
	ivor[7] = mfspr(SPRN_IVOR7);
	ivor[8] = mfspr(SPRN_IVOR8);
	ivor[9] = mfspr(SPRN_IVOR9);
	ivor[10] = mfspr(SPRN_IVOR10);
	ivor[11] = mfspr(SPRN_IVOR11);
	ivor[12] = mfspr(SPRN_IVOR12);
	ivor[13] = mfspr(SPRN_IVOR13);
	ivor[14] = mfspr(SPRN_IVOR14);
	ivor[15] = mfspr(SPRN_IVOR15);

	for (i = 0; i < 16; i++) {
		if (ivor[i] > max_ivor)
			max_ivor = ivor[i];

		memcpy((void *)kvmppc_booke_handlers + ivor[i],
		       kvmppc_handlers_start + i * kvmppc_handler_len,
		       kvmppc_handler_len);
	}
	flush_icache_range(kvmppc_booke_handlers,
	                   kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);

579
	return 0;
580 581
}

582
void __exit kvmppc_booke_exit(void)
583 584 585 586
{
	free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
	kvm_exit();
}