book3s_emulate.c 19.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright SUSE Linux Products GmbH 2009
 *
 * Authors: Alexander Graf <agraf@suse.de>
 */

#include <asm/kvm_ppc.h>
#include <asm/disassemble.h>
#include <asm/kvm_book3s.h>
#include <asm/reg.h>
24
#include <asm/switch_to.h>
25
#include <asm/time.h>
26
#include <asm/tm.h>
T
Thomas Huth 已提交
27
#include "book3s.h"
28
#include <asm/asm-prototypes.h>
29 30 31 32 33 34 35

#define OP_19_XOP_RFID		18
#define OP_19_XOP_RFI		50

#define OP_31_XOP_MFMSR		83
#define OP_31_XOP_MTMSR		146
#define OP_31_XOP_MTMSRD	178
36
#define OP_31_XOP_MTSR		210
37 38 39
#define OP_31_XOP_MTSRIN	242
#define OP_31_XOP_TLBIEL	274
#define OP_31_XOP_TLBIE		306
40 41
/* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
#define OP_31_XOP_FAKE_SC1	308
42 43 44
#define OP_31_XOP_SLBMTE	402
#define OP_31_XOP_SLBIE		434
#define OP_31_XOP_SLBIA		498
45
#define OP_31_XOP_MFSR		595
46
#define OP_31_XOP_MFSRIN	659
47
#define OP_31_XOP_DCBA		758
48 49 50 51
#define OP_31_XOP_SLBMFEV	851
#define OP_31_XOP_EIOIO		854
#define OP_31_XOP_SLBMFEE	915

52 53
#define OP_31_XOP_TBEGIN	654

54 55 56
/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
#define OP_31_XOP_DCBZ		1010

57 58 59 60 61
#define OP_LFS			48
#define OP_LFD			50
#define OP_STFS			52
#define OP_STFD			54

A
Alexander Graf 已提交
62 63 64 65 66 67 68 69 70
#define SPRN_GQR0		912
#define SPRN_GQR1		913
#define SPRN_GQR2		914
#define SPRN_GQR3		915
#define SPRN_GQR4		916
#define SPRN_GQR5		917
#define SPRN_GQR6		918
#define SPRN_GQR7		919

71 72 73 74
/* Book3S_32 defines mfsrin(v) - but that messes up our abstract
 * function pointers, so let's just disable the define. */
#undef mfsrin

75 76 77 78 79 80 81 82 83 84 85 86 87
enum priv_level {
	PRIV_PROBLEM = 0,
	PRIV_SUPER = 1,
	PRIV_HYPER = 2,
};

static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
{
	/* PAPR VMs only access supervisor SPRs */
	if (vcpu->arch.papr_enabled && (level > PRIV_SUPER))
		return false;

	/* Limit user space to its own small SPR set */
88
	if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM)
89 90 91 92 93
		return false;

	return true;
}

94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
{
	memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0],
			sizeof(vcpu->arch.gpr_tm));
	memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp,
			sizeof(struct thread_fp_state));
	memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr,
			sizeof(struct thread_vr_state));
	vcpu->arch.ppr_tm = vcpu->arch.ppr;
	vcpu->arch.dscr_tm = vcpu->arch.dscr;
	vcpu->arch.amr_tm = vcpu->arch.amr;
	vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
	vcpu->arch.tar_tm = vcpu->arch.tar;
	vcpu->arch.lr_tm = vcpu->arch.regs.link;
	vcpu->arch.cr_tm = vcpu->arch.cr;
	vcpu->arch.xer_tm = vcpu->arch.regs.xer;
	vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
}

static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
{
	memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0],
			sizeof(vcpu->arch.regs.gpr));
	memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm,
			sizeof(struct thread_fp_state));
	memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm,
			sizeof(struct thread_vr_state));
	vcpu->arch.ppr = vcpu->arch.ppr_tm;
	vcpu->arch.dscr = vcpu->arch.dscr_tm;
	vcpu->arch.amr = vcpu->arch.amr_tm;
	vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
	vcpu->arch.tar = vcpu->arch.tar_tm;
	vcpu->arch.regs.link = vcpu->arch.lr_tm;
	vcpu->arch.cr = vcpu->arch.cr_tm;
	vcpu->arch.regs.xer = vcpu->arch.xer_tm;
	vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
}

#endif

135 136
int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
			      unsigned int inst, int *advance)
137 138
{
	int emulated = EMULATE_DONE;
139 140 141 142
	int rt = get_rt(inst);
	int rs = get_rs(inst);
	int ra = get_ra(inst);
	int rb = get_rb(inst);
143
	u32 inst_sc = 0x44000002;
144 145

	switch (get_op(inst)) {
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
	case 0:
		emulated = EMULATE_FAIL;
		if ((kvmppc_get_msr(vcpu) & MSR_LE) &&
		    (inst == swab32(inst_sc))) {
			/*
			 * This is the byte reversed syscall instruction of our
			 * hypercall handler. Early versions of LE Linux didn't
			 * swap the instructions correctly and ended up in
			 * illegal instructions.
			 * Just always fail hypercalls on these broken systems.
			 */
			kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED);
			kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
			emulated = EMULATE_DONE;
		}
		break;
162 163 164
	case 19:
		switch (get_xop(inst)) {
		case OP_19_XOP_RFID:
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
		case OP_19_XOP_RFI: {
			unsigned long srr1 = kvmppc_get_srr1(vcpu);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
			unsigned long cur_msr = kvmppc_get_msr(vcpu);

			/*
			 * add rules to fit in ISA specification regarding TM
			 * state transistion in TM disable/Suspended state,
			 * and target TM state is TM inactive(00) state. (the
			 * change should be suppressed).
			 */
			if (((cur_msr & MSR_TM) == 0) &&
				((srr1 & MSR_TM) == 0) &&
				MSR_TM_SUSPENDED(cur_msr) &&
				!MSR_TM_ACTIVE(srr1))
				srr1 |= MSR_TS_S;
#endif
182
			kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
183
			kvmppc_set_msr(vcpu, srr1);
184 185
			*advance = 0;
			break;
186
		}
187 188 189 190 191 192 193 194 195

		default:
			emulated = EMULATE_FAIL;
			break;
		}
		break;
	case 31:
		switch (get_xop(inst)) {
		case OP_31_XOP_MFMSR:
196
			kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
197 198 199
			break;
		case OP_31_XOP_MTMSRD:
		{
200
			ulong rs_val = kvmppc_get_gpr(vcpu, rs);
201
			if (inst & 0x10000) {
202
				ulong new_msr = kvmppc_get_msr(vcpu);
203 204
				new_msr &= ~(MSR_RI | MSR_EE);
				new_msr |= rs_val & (MSR_RI | MSR_EE);
205
				kvmppc_set_msr_fast(vcpu, new_msr);
206
			} else
207
				kvmppc_set_msr(vcpu, rs_val);
208 209 210
			break;
		}
		case OP_31_XOP_MTMSR:
211
			kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
212
			break;
213 214 215 216 217 218 219 220
		case OP_31_XOP_MFSR:
		{
			int srnum;

			srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
			if (vcpu->arch.mmu.mfsrin) {
				u32 sr;
				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
221
				kvmppc_set_gpr(vcpu, rt, sr);
222 223 224
			}
			break;
		}
225 226 227 228
		case OP_31_XOP_MFSRIN:
		{
			int srnum;

229
			srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
230 231 232
			if (vcpu->arch.mmu.mfsrin) {
				u32 sr;
				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
233
				kvmppc_set_gpr(vcpu, rt, sr);
234 235 236
			}
			break;
		}
237 238 239
		case OP_31_XOP_MTSR:
			vcpu->arch.mmu.mtsrin(vcpu,
				(inst >> 16) & 0xf,
240
				kvmppc_get_gpr(vcpu, rs));
241
			break;
242 243
		case OP_31_XOP_MTSRIN:
			vcpu->arch.mmu.mtsrin(vcpu,
244 245
				(kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
				kvmppc_get_gpr(vcpu, rs));
246 247 248 249 250
			break;
		case OP_31_XOP_TLBIE:
		case OP_31_XOP_TLBIEL:
		{
			bool large = (inst & 0x00200000) ? true : false;
251
			ulong addr = kvmppc_get_gpr(vcpu, rb);
252 253 254
			vcpu->arch.mmu.tlbie(vcpu, addr, large);
			break;
		}
255
#ifdef CONFIG_PPC_BOOK3S_64
256 257 258 259 260 261
		case OP_31_XOP_FAKE_SC1:
		{
			/* SC 1 papr hypercalls */
			ulong cmd = kvmppc_get_gpr(vcpu, 3);
			int i;

262
		        if ((kvmppc_get_msr(vcpu) & MSR_PR) ||
263 264 265 266 267 268 269 270 271 272 273 274 275 276
			    !vcpu->arch.papr_enabled) {
				emulated = EMULATE_FAIL;
				break;
			}

			if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
				break;

			run->papr_hcall.nr = cmd;
			for (i = 0; i < 9; ++i) {
				ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
				run->papr_hcall.args[i] = gpr;
			}

277 278
			run->exit_reason = KVM_EXIT_PAPR_HCALL;
			vcpu->arch.hcall_needed = 1;
279
			emulated = EMULATE_EXIT_USER;
280 281 282
			break;
		}
#endif
283 284 285 286 287 288
		case OP_31_XOP_EIOIO:
			break;
		case OP_31_XOP_SLBMTE:
			if (!vcpu->arch.mmu.slbmte)
				return EMULATE_FAIL;

289
			vcpu->arch.mmu.slbmte(vcpu,
290 291
					kvmppc_get_gpr(vcpu, rs),
					kvmppc_get_gpr(vcpu, rb));
292 293 294 295 296
			break;
		case OP_31_XOP_SLBIE:
			if (!vcpu->arch.mmu.slbie)
				return EMULATE_FAIL;

297
			vcpu->arch.mmu.slbie(vcpu,
298
					kvmppc_get_gpr(vcpu, rb));
299 300 301 302 303 304 305 306 307 308 309
			break;
		case OP_31_XOP_SLBIA:
			if (!vcpu->arch.mmu.slbia)
				return EMULATE_FAIL;

			vcpu->arch.mmu.slbia(vcpu);
			break;
		case OP_31_XOP_SLBMFEE:
			if (!vcpu->arch.mmu.slbmfee) {
				emulated = EMULATE_FAIL;
			} else {
310
				ulong t, rb_val;
311

312 313 314
				rb_val = kvmppc_get_gpr(vcpu, rb);
				t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
				kvmppc_set_gpr(vcpu, rt, t);
315 316 317 318 319 320
			}
			break;
		case OP_31_XOP_SLBMFEV:
			if (!vcpu->arch.mmu.slbmfev) {
				emulated = EMULATE_FAIL;
			} else {
321
				ulong t, rb_val;
322

323 324 325
				rb_val = kvmppc_get_gpr(vcpu, rb);
				t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
				kvmppc_set_gpr(vcpu, rt, t);
326 327
			}
			break;
328 329 330
		case OP_31_XOP_DCBA:
			/* Gets treated as NOP */
			break;
331 332
		case OP_31_XOP_DCBZ:
		{
333 334
			ulong rb_val = kvmppc_get_gpr(vcpu, rb);
			ulong ra_val = 0;
335
			ulong addr, vaddr;
336
			u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
A
Alexander Graf 已提交
337 338
			u32 dsisr;
			int r;
339

340 341
			if (ra)
				ra_val = kvmppc_get_gpr(vcpu, ra);
342

343
			addr = (ra_val + rb_val) & ~31ULL;
344
			if (!(kvmppc_get_msr(vcpu) & MSR_SF))
345
				addr &= 0xffffffff;
346
			vaddr = addr;
347

A
Alexander Graf 已提交
348 349 350
			r = kvmppc_st(vcpu, &addr, 32, zeros, true);
			if ((r == -ENOENT) || (r == -EPERM)) {
				*advance = 0;
351
				kvmppc_set_dar(vcpu, vaddr);
352
				vcpu->arch.fault_dar = vaddr;
A
Alexander Graf 已提交
353 354 355 356 357 358 359

				dsisr = DSISR_ISSTORE;
				if (r == -ENOENT)
					dsisr |= DSISR_NOHPTE;
				else if (r == -EPERM)
					dsisr |= DSISR_PROTFAULT;

360
				kvmppc_set_dsisr(vcpu, dsisr);
361
				vcpu->arch.fault_dsisr = dsisr;
A
Alexander Graf 已提交
362

363 364 365 366 367 368
				kvmppc_book3s_queue_irqprio(vcpu,
					BOOK3S_INTERRUPT_DATA_STORAGE);
			}

			break;
		}
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
		case OP_31_XOP_TBEGIN:
		{
			if (!cpu_has_feature(CPU_FTR_TM))
				break;

			if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
				kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
				emulated = EMULATE_AGAIN;
				break;
			}

			if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
				preempt_disable();
				vcpu->arch.cr = (CR0_TBEGIN_FAILURE |
				  (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)));

				vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
					(((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
						 << TEXASR_FC_LG));

				if ((inst >> 21) & 0x1)
					vcpu->arch.texasr |= TEXASR_ROT;

				if (kvmppc_get_msr(vcpu) & MSR_HV)
					vcpu->arch.texasr |= TEXASR_HV;

				vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4;
				vcpu->arch.tfiar = kvmppc_get_pc(vcpu);

				kvmppc_restore_tm_sprs(vcpu);
				preempt_enable();
			} else
				emulated = EMULATE_FAIL;
			break;
		}
#endif
406 407 408 409 410 411 412 413
		default:
			emulated = EMULATE_FAIL;
		}
		break;
	default:
		emulated = EMULATE_FAIL;
	}

414 415 416
	if (emulated == EMULATE_FAIL)
		emulated = kvmppc_emulate_paired_single(run, vcpu);

417 418 419
	return emulated;
}

420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
                    u32 val)
{
	if (upper) {
		/* Upper BAT */
		u32 bl = (val >> 2) & 0x7ff;
		bat->bepi_mask = (~bl << 17);
		bat->bepi = val & 0xfffe0000;
		bat->vs = (val & 2) ? 1 : 0;
		bat->vp = (val & 1) ? 1 : 0;
		bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
	} else {
		/* Lower BAT */
		bat->brpn = val & 0xfffe0000;
		bat->wimg = (val >> 3) & 0xf;
		bat->pp = val & 3;
		bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
	}
}

440
static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
A
Alexander Graf 已提交
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
{
	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
	struct kvmppc_bat *bat;

	switch (sprn) {
	case SPRN_IBAT0U ... SPRN_IBAT3L:
		bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
		break;
	case SPRN_IBAT4U ... SPRN_IBAT7L:
		bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
		break;
	case SPRN_DBAT0U ... SPRN_DBAT3L:
		bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
		break;
	case SPRN_DBAT4U ... SPRN_DBAT7L:
		bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
		break;
	default:
		BUG();
	}

462
	return bat;
463 464
}

465
int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
466 467 468 469 470
{
	int emulated = EMULATE_DONE;

	switch (sprn) {
	case SPRN_SDR1:
471 472
		if (!spr_allowed(vcpu, PRIV_HYPER))
			goto unprivileged;
473
		to_book3s(vcpu)->sdr1 = spr_val;
474 475
		break;
	case SPRN_DSISR:
476
		kvmppc_set_dsisr(vcpu, spr_val);
477 478
		break;
	case SPRN_DAR:
479
		kvmppc_set_dar(vcpu, spr_val);
480 481
		break;
	case SPRN_HIOR:
482
		to_book3s(vcpu)->hior = spr_val;
483 484 485 486 487
		break;
	case SPRN_IBAT0U ... SPRN_IBAT3L:
	case SPRN_IBAT4U ... SPRN_IBAT7L:
	case SPRN_DBAT0U ... SPRN_DBAT3L:
	case SPRN_DBAT4U ... SPRN_DBAT7L:
488 489 490 491
	{
		struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);

		kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
492 493 494
		/* BAT writes happen so rarely that we're ok to flush
		 * everything here */
		kvmppc_mmu_pte_flush(vcpu, 0, 0);
A
Alexander Graf 已提交
495
		kvmppc_mmu_flush_segments(vcpu);
496
		break;
497
	}
498
	case SPRN_HID0:
499
		to_book3s(vcpu)->hid[0] = spr_val;
500 501
		break;
	case SPRN_HID1:
502
		to_book3s(vcpu)->hid[1] = spr_val;
503 504
		break;
	case SPRN_HID2:
505
		to_book3s(vcpu)->hid[2] = spr_val;
506
		break;
A
Alexander Graf 已提交
507 508 509 510 511 512 513 514 515 516 517 518
	case SPRN_HID2_GEKKO:
		to_book3s(vcpu)->hid[2] = spr_val;
		/* HID2.PSE controls paired single on gekko */
		switch (vcpu->arch.pvr) {
		case 0x00080200:	/* lonestar 2.0 */
		case 0x00088202:	/* lonestar 2.2 */
		case 0x70000100:	/* gekko 1.0 */
		case 0x00080100:	/* gekko 2.0 */
		case 0x00083203:	/* gekko 2.3a */
		case 0x00083213:	/* gekko 2.3b */
		case 0x00083204:	/* gekko 2.4 */
		case 0x00083214:	/* gekko 2.4e (8SE) - retail HW2 */
519 520 521 522
		case 0x00087200:	/* broadway */
			if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
				/* Native paired singles */
			} else if (spr_val & (1 << 29)) { /* HID2.PSE */
A
Alexander Graf 已提交
523 524 525 526 527 528 529 530
				vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
				kvmppc_giveup_ext(vcpu, MSR_FP);
			} else {
				vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
			}
			break;
		}
		break;
531
	case SPRN_HID4:
A
Alexander Graf 已提交
532
	case SPRN_HID4_GEKKO:
533
		to_book3s(vcpu)->hid[4] = spr_val;
534 535
		break;
	case SPRN_HID5:
536
		to_book3s(vcpu)->hid[5] = spr_val;
537 538 539 540 541
		/* guest HID5 set can change is_dcbz32 */
		if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
		    (mfmsr() & MSR_HV))
			vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
		break;
A
Alexander Graf 已提交
542 543 544 545 546 547 548 549 550 551
	case SPRN_GQR0:
	case SPRN_GQR1:
	case SPRN_GQR2:
	case SPRN_GQR3:
	case SPRN_GQR4:
	case SPRN_GQR5:
	case SPRN_GQR6:
	case SPRN_GQR7:
		to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
		break;
552
#ifdef CONFIG_PPC_BOOK3S_64
553
	case SPRN_FSCR:
554
		kvmppc_set_fscr(vcpu, spr_val);
555
		break;
556 557 558 559 560 561 562 563 564
	case SPRN_BESCR:
		vcpu->arch.bescr = spr_val;
		break;
	case SPRN_EBBHR:
		vcpu->arch.ebbhr = spr_val;
		break;
	case SPRN_EBBRR:
		vcpu->arch.ebbrr = spr_val;
		break;
565 566 567 568
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	case SPRN_TFHAR:
	case SPRN_TEXASR:
	case SPRN_TFIAR:
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
		if (!cpu_has_feature(CPU_FTR_TM))
			break;

		if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
			kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
			emulated = EMULATE_AGAIN;
			break;
		}

		if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) &&
			!((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) &&
					(sprn == SPRN_TFHAR))) {
			/* it is illegal to mtspr() TM regs in
			 * other than non-transactional state, with
			 * the exception of TFHAR in suspend state.
			 */
			kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
			emulated = EMULATE_AGAIN;
			break;
		}

		tm_enable();
		if (sprn == SPRN_TFHAR)
			mtspr(SPRN_TFHAR, spr_val);
		else if (sprn == SPRN_TEXASR)
			mtspr(SPRN_TEXASR, spr_val);
		else
			mtspr(SPRN_TFIAR, spr_val);
		tm_disable();

599 600
		break;
#endif
601
#endif
602 603 604 605 606 607
	case SPRN_ICTC:
	case SPRN_THRM1:
	case SPRN_THRM2:
	case SPRN_THRM3:
	case SPRN_CTRLF:
	case SPRN_CTRLT:
A
Alexander Graf 已提交
608
	case SPRN_L2CR:
609
	case SPRN_DSCR:
A
Alexander Graf 已提交
610 611 612 613 614 615 616
	case SPRN_MMCR0_GEKKO:
	case SPRN_MMCR1_GEKKO:
	case SPRN_PMC1_GEKKO:
	case SPRN_PMC2_GEKKO:
	case SPRN_PMC3_GEKKO:
	case SPRN_PMC4_GEKKO:
	case SPRN_WPAR_GEKKO:
617
	case SPRN_MSSSR0:
618
	case SPRN_DABR:
619 620 621 622 623 624
#ifdef CONFIG_PPC_BOOK3S_64
	case SPRN_MMCRS:
	case SPRN_MMCRA:
	case SPRN_MMCR0:
	case SPRN_MMCR1:
	case SPRN_MMCR2:
625
	case SPRN_UMMCR2:
626
#endif
627
		break;
628
unprivileged:
629
	default:
630 631 632 633 634 635 636 637 638 639 640 641
		pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn);
		if (sprn & 0x10) {
			if (kvmppc_get_msr(vcpu) & MSR_PR) {
				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
				emulated = EMULATE_AGAIN;
			}
		} else {
			if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
				kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
				emulated = EMULATE_AGAIN;
			}
		}
642 643 644 645 646 647
		break;
	}

	return emulated;
}

648
int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
649 650 651 652
{
	int emulated = EMULATE_DONE;

	switch (sprn) {
A
Alexander Graf 已提交
653 654 655 656
	case SPRN_IBAT0U ... SPRN_IBAT3L:
	case SPRN_IBAT4U ... SPRN_IBAT7L:
	case SPRN_DBAT0U ... SPRN_DBAT3L:
	case SPRN_DBAT4U ... SPRN_DBAT7L:
657 658 659 660
	{
		struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);

		if (sprn % 2)
661
			*spr_val = bat->raw >> 32;
662
		else
663
			*spr_val = bat->raw;
664

A
Alexander Graf 已提交
665
		break;
666
	}
667
	case SPRN_SDR1:
668 669
		if (!spr_allowed(vcpu, PRIV_HYPER))
			goto unprivileged;
670
		*spr_val = to_book3s(vcpu)->sdr1;
671 672
		break;
	case SPRN_DSISR:
673
		*spr_val = kvmppc_get_dsisr(vcpu);
674 675
		break;
	case SPRN_DAR:
676
		*spr_val = kvmppc_get_dar(vcpu);
677 678
		break;
	case SPRN_HIOR:
679
		*spr_val = to_book3s(vcpu)->hior;
680 681
		break;
	case SPRN_HID0:
682
		*spr_val = to_book3s(vcpu)->hid[0];
683 684
		break;
	case SPRN_HID1:
685
		*spr_val = to_book3s(vcpu)->hid[1];
686 687
		break;
	case SPRN_HID2:
A
Alexander Graf 已提交
688
	case SPRN_HID2_GEKKO:
689
		*spr_val = to_book3s(vcpu)->hid[2];
690 691
		break;
	case SPRN_HID4:
A
Alexander Graf 已提交
692
	case SPRN_HID4_GEKKO:
693
		*spr_val = to_book3s(vcpu)->hid[4];
694 695
		break;
	case SPRN_HID5:
696
		*spr_val = to_book3s(vcpu)->hid[5];
697
		break;
698
	case SPRN_CFAR:
699
	case SPRN_DSCR:
700
		*spr_val = 0;
701
		break;
702
	case SPRN_PURR:
703 704 705 706
		/*
		 * On exit we would have updated purr
		 */
		*spr_val = vcpu->arch.purr;
707 708
		break;
	case SPRN_SPURR:
709 710 711 712
		/*
		 * On exit we would have updated spurr
		 */
		*spr_val = vcpu->arch.spurr;
713
		break;
714
	case SPRN_VTB:
715
		*spr_val = to_book3s(vcpu)->vtb;
716
		break;
717 718 719
	case SPRN_IC:
		*spr_val = vcpu->arch.ic;
		break;
A
Alexander Graf 已提交
720 721 722 723 724 725 726 727
	case SPRN_GQR0:
	case SPRN_GQR1:
	case SPRN_GQR2:
	case SPRN_GQR3:
	case SPRN_GQR4:
	case SPRN_GQR5:
	case SPRN_GQR6:
	case SPRN_GQR7:
728
		*spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
A
Alexander Graf 已提交
729
		break;
730
#ifdef CONFIG_PPC_BOOK3S_64
731 732 733
	case SPRN_FSCR:
		*spr_val = vcpu->arch.fscr;
		break;
734 735 736 737 738 739 740 741 742
	case SPRN_BESCR:
		*spr_val = vcpu->arch.bescr;
		break;
	case SPRN_EBBHR:
		*spr_val = vcpu->arch.ebbhr;
		break;
	case SPRN_EBBRR:
		*spr_val = vcpu->arch.ebbrr;
		break;
743 744 745 746
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	case SPRN_TFHAR:
	case SPRN_TEXASR:
	case SPRN_TFIAR:
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
		if (!cpu_has_feature(CPU_FTR_TM))
			break;

		if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
			kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
			emulated = EMULATE_AGAIN;
			break;
		}

		tm_enable();
		if (sprn == SPRN_TFHAR)
			*spr_val = mfspr(SPRN_TFHAR);
		else if (sprn == SPRN_TEXASR)
			*spr_val = mfspr(SPRN_TEXASR);
		else if (sprn == SPRN_TFIAR)
			*spr_val = mfspr(SPRN_TFIAR);
		tm_disable();
764 765
		break;
#endif
766
#endif
767 768 769 770 771
	case SPRN_THRM1:
	case SPRN_THRM2:
	case SPRN_THRM3:
	case SPRN_CTRLF:
	case SPRN_CTRLT:
A
Alexander Graf 已提交
772 773 774 775 776 777 778 779
	case SPRN_L2CR:
	case SPRN_MMCR0_GEKKO:
	case SPRN_MMCR1_GEKKO:
	case SPRN_PMC1_GEKKO:
	case SPRN_PMC2_GEKKO:
	case SPRN_PMC3_GEKKO:
	case SPRN_PMC4_GEKKO:
	case SPRN_WPAR_GEKKO:
780
	case SPRN_MSSSR0:
781
	case SPRN_DABR:
782 783 784 785 786 787
#ifdef CONFIG_PPC_BOOK3S_64
	case SPRN_MMCRS:
	case SPRN_MMCRA:
	case SPRN_MMCR0:
	case SPRN_MMCR1:
	case SPRN_MMCR2:
788
	case SPRN_UMMCR2:
789
	case SPRN_TIR:
790
#endif
791
		*spr_val = 0;
792 793
		break;
	default:
794
unprivileged:
795 796 797 798 799 800 801 802 803 804 805 806 807 808
		pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn);
		if (sprn & 0x10) {
			if (kvmppc_get_msr(vcpu) & MSR_PR) {
				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
				emulated = EMULATE_AGAIN;
			}
		} else {
			if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
			    sprn == 4 || sprn == 5 || sprn == 6) {
				kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
				emulated = EMULATE_AGAIN;
			}
		}

809 810 811 812 813 814
		break;
	}

	return emulated;
}

815 816
u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
{
817
	return make_dsisr(inst);
818 819 820 821
}

ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
{
822 823 824 825 826 827
#ifdef CONFIG_PPC_BOOK3S_64
	/*
	 * Linux's fix_alignment() assumes that DAR is valid, so can we
	 */
	return vcpu->arch.fault_dar;
#else
828
	ulong dar = 0;
829 830
	ulong ra = get_ra(inst);
	ulong rb = get_rb(inst);
831 832 833 834 835 836 837 838 839 840 841 842 843

	switch (get_op(inst)) {
	case OP_LFS:
	case OP_LFD:
	case OP_STFD:
	case OP_STFS:
		if (ra)
			dar = kvmppc_get_gpr(vcpu, ra);
		dar += (s32)((s16)inst);
		break;
	case 31:
		if (ra)
			dar = kvmppc_get_gpr(vcpu, ra);
844
		dar += kvmppc_get_gpr(vcpu, rb);
845 846 847 848 849 850 851
		break;
	default:
		printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
		break;
	}

	return dar;
852
#endif
853
}