book3s_emulate.c 21.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright SUSE Linux Products GmbH 2009
 *
 * Authors: Alexander Graf <agraf@suse.de>
 */

#include <asm/kvm_ppc.h>
#include <asm/disassemble.h>
#include <asm/kvm_book3s.h>
#include <asm/reg.h>
24
#include <asm/switch_to.h>
25
#include <asm/time.h>
26
#include <asm/tm.h>
T
Thomas Huth 已提交
27
#include "book3s.h"
28
#include <asm/asm-prototypes.h>
29 30 31 32 33 34 35

#define OP_19_XOP_RFID		18
#define OP_19_XOP_RFI		50

#define OP_31_XOP_MFMSR		83
#define OP_31_XOP_MTMSR		146
#define OP_31_XOP_MTMSRD	178
36
#define OP_31_XOP_MTSR		210
37 38 39
#define OP_31_XOP_MTSRIN	242
#define OP_31_XOP_TLBIEL	274
#define OP_31_XOP_TLBIE		306
40 41
/* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
#define OP_31_XOP_FAKE_SC1	308
42 43 44
#define OP_31_XOP_SLBMTE	402
#define OP_31_XOP_SLBIE		434
#define OP_31_XOP_SLBIA		498
45
#define OP_31_XOP_MFSR		595
46
#define OP_31_XOP_MFSRIN	659
47
#define OP_31_XOP_DCBA		758
48 49 50 51
#define OP_31_XOP_SLBMFEV	851
#define OP_31_XOP_EIOIO		854
#define OP_31_XOP_SLBMFEE	915

52 53
#define OP_31_XOP_TBEGIN	654

54 55
#define OP_31_XOP_TRECLAIM	942

56 57 58
/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
#define OP_31_XOP_DCBZ		1010

59 60 61 62 63
#define OP_LFS			48
#define OP_LFD			50
#define OP_STFS			52
#define OP_STFD			54

A
Alexander Graf 已提交
64 65 66 67 68 69 70 71 72
#define SPRN_GQR0		912
#define SPRN_GQR1		913
#define SPRN_GQR2		914
#define SPRN_GQR3		915
#define SPRN_GQR4		916
#define SPRN_GQR5		917
#define SPRN_GQR6		918
#define SPRN_GQR7		919

73 74 75 76
/* Book3S_32 defines mfsrin(v) - but that messes up our abstract
 * function pointers, so let's just disable the define. */
#undef mfsrin

77 78 79 80 81 82 83 84 85 86 87 88 89
enum priv_level {
	PRIV_PROBLEM = 0,
	PRIV_SUPER = 1,
	PRIV_HYPER = 2,
};

static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
{
	/* PAPR VMs only access supervisor SPRs */
	if (vcpu->arch.papr_enabled && (level > PRIV_SUPER))
		return false;

	/* Limit user space to its own small SPR set */
90
	if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM)
91 92 93 94 95
		return false;

	return true;
}

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
{
	memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0],
			sizeof(vcpu->arch.gpr_tm));
	memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp,
			sizeof(struct thread_fp_state));
	memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr,
			sizeof(struct thread_vr_state));
	vcpu->arch.ppr_tm = vcpu->arch.ppr;
	vcpu->arch.dscr_tm = vcpu->arch.dscr;
	vcpu->arch.amr_tm = vcpu->arch.amr;
	vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
	vcpu->arch.tar_tm = vcpu->arch.tar;
	vcpu->arch.lr_tm = vcpu->arch.regs.link;
	vcpu->arch.cr_tm = vcpu->arch.cr;
	vcpu->arch.xer_tm = vcpu->arch.regs.xer;
	vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
}

static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
{
	memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0],
			sizeof(vcpu->arch.regs.gpr));
	memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm,
			sizeof(struct thread_fp_state));
	memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm,
			sizeof(struct thread_vr_state));
	vcpu->arch.ppr = vcpu->arch.ppr_tm;
	vcpu->arch.dscr = vcpu->arch.dscr_tm;
	vcpu->arch.amr = vcpu->arch.amr_tm;
	vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
	vcpu->arch.tar = vcpu->arch.tar_tm;
	vcpu->arch.regs.link = vcpu->arch.lr_tm;
	vcpu->arch.cr = vcpu->arch.cr_tm;
	vcpu->arch.regs.xer = vcpu->arch.xer_tm;
	vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
}

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
{
	unsigned long guest_msr = kvmppc_get_msr(vcpu);
	int fc_val = ra_val ? ra_val : 1;

	/* CR0 = 0 | MSR[TS] | 0 */
	vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
		(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
		 << CR0_SHIFT);

	preempt_disable();
	kvmppc_save_tm_pr(vcpu);
	kvmppc_copyfrom_vcpu_tm(vcpu);

	tm_enable();
	vcpu->arch.texasr = mfspr(SPRN_TEXASR);
	/* failure recording depends on Failure Summary bit */
	if (!(vcpu->arch.texasr & TEXASR_FS)) {
		vcpu->arch.texasr &= ~TEXASR_FC;
		vcpu->arch.texasr |= ((u64)fc_val << TEXASR_FC_LG);

		vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV);
		if (kvmppc_get_msr(vcpu) & MSR_PR)
			vcpu->arch.texasr |= TEXASR_PR;

		if (kvmppc_get_msr(vcpu) & MSR_HV)
			vcpu->arch.texasr |= TEXASR_HV;

		vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
		mtspr(SPRN_TEXASR, vcpu->arch.texasr);
		mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
	}
	tm_disable();
	/*
	 * treclaim need quit to non-transactional state.
	 */
	guest_msr &= ~(MSR_TS_MASK);
	kvmppc_set_msr(vcpu, guest_msr);
	preempt_enable();
}
175 176
#endif

177 178
int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
			      unsigned int inst, int *advance)
179 180
{
	int emulated = EMULATE_DONE;
181 182 183 184
	int rt = get_rt(inst);
	int rs = get_rs(inst);
	int ra = get_ra(inst);
	int rb = get_rb(inst);
185
	u32 inst_sc = 0x44000002;
186 187

	switch (get_op(inst)) {
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
	case 0:
		emulated = EMULATE_FAIL;
		if ((kvmppc_get_msr(vcpu) & MSR_LE) &&
		    (inst == swab32(inst_sc))) {
			/*
			 * This is the byte reversed syscall instruction of our
			 * hypercall handler. Early versions of LE Linux didn't
			 * swap the instructions correctly and ended up in
			 * illegal instructions.
			 * Just always fail hypercalls on these broken systems.
			 */
			kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED);
			kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
			emulated = EMULATE_DONE;
		}
		break;
204 205 206
	case 19:
		switch (get_xop(inst)) {
		case OP_19_XOP_RFID:
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
		case OP_19_XOP_RFI: {
			unsigned long srr1 = kvmppc_get_srr1(vcpu);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
			unsigned long cur_msr = kvmppc_get_msr(vcpu);

			/*
			 * add rules to fit in ISA specification regarding TM
			 * state transistion in TM disable/Suspended state,
			 * and target TM state is TM inactive(00) state. (the
			 * change should be suppressed).
			 */
			if (((cur_msr & MSR_TM) == 0) &&
				((srr1 & MSR_TM) == 0) &&
				MSR_TM_SUSPENDED(cur_msr) &&
				!MSR_TM_ACTIVE(srr1))
				srr1 |= MSR_TS_S;
#endif
224
			kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
225
			kvmppc_set_msr(vcpu, srr1);
226 227
			*advance = 0;
			break;
228
		}
229 230 231 232 233 234 235 236 237

		default:
			emulated = EMULATE_FAIL;
			break;
		}
		break;
	case 31:
		switch (get_xop(inst)) {
		case OP_31_XOP_MFMSR:
238
			kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
239 240 241
			break;
		case OP_31_XOP_MTMSRD:
		{
242
			ulong rs_val = kvmppc_get_gpr(vcpu, rs);
243
			if (inst & 0x10000) {
244
				ulong new_msr = kvmppc_get_msr(vcpu);
245 246
				new_msr &= ~(MSR_RI | MSR_EE);
				new_msr |= rs_val & (MSR_RI | MSR_EE);
247
				kvmppc_set_msr_fast(vcpu, new_msr);
248
			} else
249
				kvmppc_set_msr(vcpu, rs_val);
250 251 252
			break;
		}
		case OP_31_XOP_MTMSR:
253
			kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
254
			break;
255 256 257 258 259 260 261 262
		case OP_31_XOP_MFSR:
		{
			int srnum;

			srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
			if (vcpu->arch.mmu.mfsrin) {
				u32 sr;
				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
263
				kvmppc_set_gpr(vcpu, rt, sr);
264 265 266
			}
			break;
		}
267 268 269 270
		case OP_31_XOP_MFSRIN:
		{
			int srnum;

271
			srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
272 273 274
			if (vcpu->arch.mmu.mfsrin) {
				u32 sr;
				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
275
				kvmppc_set_gpr(vcpu, rt, sr);
276 277 278
			}
			break;
		}
279 280 281
		case OP_31_XOP_MTSR:
			vcpu->arch.mmu.mtsrin(vcpu,
				(inst >> 16) & 0xf,
282
				kvmppc_get_gpr(vcpu, rs));
283
			break;
284 285
		case OP_31_XOP_MTSRIN:
			vcpu->arch.mmu.mtsrin(vcpu,
286 287
				(kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
				kvmppc_get_gpr(vcpu, rs));
288 289 290 291 292
			break;
		case OP_31_XOP_TLBIE:
		case OP_31_XOP_TLBIEL:
		{
			bool large = (inst & 0x00200000) ? true : false;
293
			ulong addr = kvmppc_get_gpr(vcpu, rb);
294 295 296
			vcpu->arch.mmu.tlbie(vcpu, addr, large);
			break;
		}
297
#ifdef CONFIG_PPC_BOOK3S_64
298 299 300 301 302 303
		case OP_31_XOP_FAKE_SC1:
		{
			/* SC 1 papr hypercalls */
			ulong cmd = kvmppc_get_gpr(vcpu, 3);
			int i;

304
		        if ((kvmppc_get_msr(vcpu) & MSR_PR) ||
305 306 307 308 309 310 311 312 313 314 315 316 317 318
			    !vcpu->arch.papr_enabled) {
				emulated = EMULATE_FAIL;
				break;
			}

			if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
				break;

			run->papr_hcall.nr = cmd;
			for (i = 0; i < 9; ++i) {
				ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
				run->papr_hcall.args[i] = gpr;
			}

319 320
			run->exit_reason = KVM_EXIT_PAPR_HCALL;
			vcpu->arch.hcall_needed = 1;
321
			emulated = EMULATE_EXIT_USER;
322 323 324
			break;
		}
#endif
325 326 327 328 329 330
		case OP_31_XOP_EIOIO:
			break;
		case OP_31_XOP_SLBMTE:
			if (!vcpu->arch.mmu.slbmte)
				return EMULATE_FAIL;

331
			vcpu->arch.mmu.slbmte(vcpu,
332 333
					kvmppc_get_gpr(vcpu, rs),
					kvmppc_get_gpr(vcpu, rb));
334 335 336 337 338
			break;
		case OP_31_XOP_SLBIE:
			if (!vcpu->arch.mmu.slbie)
				return EMULATE_FAIL;

339
			vcpu->arch.mmu.slbie(vcpu,
340
					kvmppc_get_gpr(vcpu, rb));
341 342 343 344 345 346 347 348 349 350 351
			break;
		case OP_31_XOP_SLBIA:
			if (!vcpu->arch.mmu.slbia)
				return EMULATE_FAIL;

			vcpu->arch.mmu.slbia(vcpu);
			break;
		case OP_31_XOP_SLBMFEE:
			if (!vcpu->arch.mmu.slbmfee) {
				emulated = EMULATE_FAIL;
			} else {
352
				ulong t, rb_val;
353

354 355 356
				rb_val = kvmppc_get_gpr(vcpu, rb);
				t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
				kvmppc_set_gpr(vcpu, rt, t);
357 358 359 360 361 362
			}
			break;
		case OP_31_XOP_SLBMFEV:
			if (!vcpu->arch.mmu.slbmfev) {
				emulated = EMULATE_FAIL;
			} else {
363
				ulong t, rb_val;
364

365 366 367
				rb_val = kvmppc_get_gpr(vcpu, rb);
				t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
				kvmppc_set_gpr(vcpu, rt, t);
368 369
			}
			break;
370 371 372
		case OP_31_XOP_DCBA:
			/* Gets treated as NOP */
			break;
373 374
		case OP_31_XOP_DCBZ:
		{
375 376
			ulong rb_val = kvmppc_get_gpr(vcpu, rb);
			ulong ra_val = 0;
377
			ulong addr, vaddr;
378
			u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
A
Alexander Graf 已提交
379 380
			u32 dsisr;
			int r;
381

382 383
			if (ra)
				ra_val = kvmppc_get_gpr(vcpu, ra);
384

385
			addr = (ra_val + rb_val) & ~31ULL;
386
			if (!(kvmppc_get_msr(vcpu) & MSR_SF))
387
				addr &= 0xffffffff;
388
			vaddr = addr;
389

A
Alexander Graf 已提交
390 391 392
			r = kvmppc_st(vcpu, &addr, 32, zeros, true);
			if ((r == -ENOENT) || (r == -EPERM)) {
				*advance = 0;
393
				kvmppc_set_dar(vcpu, vaddr);
394
				vcpu->arch.fault_dar = vaddr;
A
Alexander Graf 已提交
395 396 397 398 399 400 401

				dsisr = DSISR_ISSTORE;
				if (r == -ENOENT)
					dsisr |= DSISR_NOHPTE;
				else if (r == -EPERM)
					dsisr |= DSISR_PROTFAULT;

402
				kvmppc_set_dsisr(vcpu, dsisr);
403
				vcpu->arch.fault_dsisr = dsisr;
A
Alexander Graf 已提交
404

405 406 407 408 409 410
				kvmppc_book3s_queue_irqprio(vcpu,
					BOOK3S_INTERRUPT_DATA_STORAGE);
			}

			break;
		}
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
		case OP_31_XOP_TBEGIN:
		{
			if (!cpu_has_feature(CPU_FTR_TM))
				break;

			if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
				kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
				emulated = EMULATE_AGAIN;
				break;
			}

			if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
				preempt_disable();
				vcpu->arch.cr = (CR0_TBEGIN_FAILURE |
				  (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)));

				vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
					(((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
						 << TEXASR_FC_LG));

				if ((inst >> 21) & 0x1)
					vcpu->arch.texasr |= TEXASR_ROT;

				if (kvmppc_get_msr(vcpu) & MSR_HV)
					vcpu->arch.texasr |= TEXASR_HV;

				vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4;
				vcpu->arch.tfiar = kvmppc_get_pc(vcpu);

				kvmppc_restore_tm_sprs(vcpu);
				preempt_enable();
			} else
				emulated = EMULATE_FAIL;
			break;
		}
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
		case OP_31_XOP_TRECLAIM:
		{
			ulong guest_msr = kvmppc_get_msr(vcpu);
			unsigned long ra_val = 0;

			if (!cpu_has_feature(CPU_FTR_TM))
				break;

			if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
				kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
				emulated = EMULATE_AGAIN;
				break;
			}

			/* generate interrupts based on priorities */
			if (guest_msr & MSR_PR) {
				/* Privileged Instruction type Program Interrupt */
				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
				emulated = EMULATE_AGAIN;
				break;
			}

			if (!MSR_TM_ACTIVE(guest_msr)) {
				/* TM bad thing interrupt */
				kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
				emulated = EMULATE_AGAIN;
				break;
			}

			if (ra)
				ra_val = kvmppc_get_gpr(vcpu, ra);
			kvmppc_emulate_treclaim(vcpu, ra_val);
			break;
		}
481
#endif
482 483 484 485 486 487 488 489
		default:
			emulated = EMULATE_FAIL;
		}
		break;
	default:
		emulated = EMULATE_FAIL;
	}

490 491 492
	if (emulated == EMULATE_FAIL)
		emulated = kvmppc_emulate_paired_single(run, vcpu);

493 494 495
	return emulated;
}

496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
                    u32 val)
{
	if (upper) {
		/* Upper BAT */
		u32 bl = (val >> 2) & 0x7ff;
		bat->bepi_mask = (~bl << 17);
		bat->bepi = val & 0xfffe0000;
		bat->vs = (val & 2) ? 1 : 0;
		bat->vp = (val & 1) ? 1 : 0;
		bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
	} else {
		/* Lower BAT */
		bat->brpn = val & 0xfffe0000;
		bat->wimg = (val >> 3) & 0xf;
		bat->pp = val & 3;
		bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
	}
}

516
static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
A
Alexander Graf 已提交
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
{
	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
	struct kvmppc_bat *bat;

	switch (sprn) {
	case SPRN_IBAT0U ... SPRN_IBAT3L:
		bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
		break;
	case SPRN_IBAT4U ... SPRN_IBAT7L:
		bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
		break;
	case SPRN_DBAT0U ... SPRN_DBAT3L:
		bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
		break;
	case SPRN_DBAT4U ... SPRN_DBAT7L:
		bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
		break;
	default:
		BUG();
	}

538
	return bat;
539 540
}

541
int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
542 543 544 545 546
{
	int emulated = EMULATE_DONE;

	switch (sprn) {
	case SPRN_SDR1:
547 548
		if (!spr_allowed(vcpu, PRIV_HYPER))
			goto unprivileged;
549
		to_book3s(vcpu)->sdr1 = spr_val;
550 551
		break;
	case SPRN_DSISR:
552
		kvmppc_set_dsisr(vcpu, spr_val);
553 554
		break;
	case SPRN_DAR:
555
		kvmppc_set_dar(vcpu, spr_val);
556 557
		break;
	case SPRN_HIOR:
558
		to_book3s(vcpu)->hior = spr_val;
559 560 561 562 563
		break;
	case SPRN_IBAT0U ... SPRN_IBAT3L:
	case SPRN_IBAT4U ... SPRN_IBAT7L:
	case SPRN_DBAT0U ... SPRN_DBAT3L:
	case SPRN_DBAT4U ... SPRN_DBAT7L:
564 565 566 567
	{
		struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);

		kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
568 569 570
		/* BAT writes happen so rarely that we're ok to flush
		 * everything here */
		kvmppc_mmu_pte_flush(vcpu, 0, 0);
A
Alexander Graf 已提交
571
		kvmppc_mmu_flush_segments(vcpu);
572
		break;
573
	}
574
	case SPRN_HID0:
575
		to_book3s(vcpu)->hid[0] = spr_val;
576 577
		break;
	case SPRN_HID1:
578
		to_book3s(vcpu)->hid[1] = spr_val;
579 580
		break;
	case SPRN_HID2:
581
		to_book3s(vcpu)->hid[2] = spr_val;
582
		break;
A
Alexander Graf 已提交
583 584 585 586 587 588 589 590 591 592 593 594
	case SPRN_HID2_GEKKO:
		to_book3s(vcpu)->hid[2] = spr_val;
		/* HID2.PSE controls paired single on gekko */
		switch (vcpu->arch.pvr) {
		case 0x00080200:	/* lonestar 2.0 */
		case 0x00088202:	/* lonestar 2.2 */
		case 0x70000100:	/* gekko 1.0 */
		case 0x00080100:	/* gekko 2.0 */
		case 0x00083203:	/* gekko 2.3a */
		case 0x00083213:	/* gekko 2.3b */
		case 0x00083204:	/* gekko 2.4 */
		case 0x00083214:	/* gekko 2.4e (8SE) - retail HW2 */
595 596 597 598
		case 0x00087200:	/* broadway */
			if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
				/* Native paired singles */
			} else if (spr_val & (1 << 29)) { /* HID2.PSE */
A
Alexander Graf 已提交
599 600 601 602 603 604 605 606
				vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
				kvmppc_giveup_ext(vcpu, MSR_FP);
			} else {
				vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
			}
			break;
		}
		break;
607
	case SPRN_HID4:
A
Alexander Graf 已提交
608
	case SPRN_HID4_GEKKO:
609
		to_book3s(vcpu)->hid[4] = spr_val;
610 611
		break;
	case SPRN_HID5:
612
		to_book3s(vcpu)->hid[5] = spr_val;
613 614 615 616 617
		/* guest HID5 set can change is_dcbz32 */
		if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
		    (mfmsr() & MSR_HV))
			vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
		break;
A
Alexander Graf 已提交
618 619 620 621 622 623 624 625 626 627
	case SPRN_GQR0:
	case SPRN_GQR1:
	case SPRN_GQR2:
	case SPRN_GQR3:
	case SPRN_GQR4:
	case SPRN_GQR5:
	case SPRN_GQR6:
	case SPRN_GQR7:
		to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
		break;
628
#ifdef CONFIG_PPC_BOOK3S_64
629
	case SPRN_FSCR:
630
		kvmppc_set_fscr(vcpu, spr_val);
631
		break;
632 633 634 635 636 637 638 639 640
	case SPRN_BESCR:
		vcpu->arch.bescr = spr_val;
		break;
	case SPRN_EBBHR:
		vcpu->arch.ebbhr = spr_val;
		break;
	case SPRN_EBBRR:
		vcpu->arch.ebbrr = spr_val;
		break;
641 642 643 644
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	case SPRN_TFHAR:
	case SPRN_TEXASR:
	case SPRN_TFIAR:
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
		if (!cpu_has_feature(CPU_FTR_TM))
			break;

		if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
			kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
			emulated = EMULATE_AGAIN;
			break;
		}

		if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) &&
			!((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) &&
					(sprn == SPRN_TFHAR))) {
			/* it is illegal to mtspr() TM regs in
			 * other than non-transactional state, with
			 * the exception of TFHAR in suspend state.
			 */
			kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
			emulated = EMULATE_AGAIN;
			break;
		}

		tm_enable();
		if (sprn == SPRN_TFHAR)
			mtspr(SPRN_TFHAR, spr_val);
		else if (sprn == SPRN_TEXASR)
			mtspr(SPRN_TEXASR, spr_val);
		else
			mtspr(SPRN_TFIAR, spr_val);
		tm_disable();

675 676
		break;
#endif
677
#endif
678 679 680 681 682 683
	case SPRN_ICTC:
	case SPRN_THRM1:
	case SPRN_THRM2:
	case SPRN_THRM3:
	case SPRN_CTRLF:
	case SPRN_CTRLT:
A
Alexander Graf 已提交
684
	case SPRN_L2CR:
685
	case SPRN_DSCR:
A
Alexander Graf 已提交
686 687 688 689 690 691 692
	case SPRN_MMCR0_GEKKO:
	case SPRN_MMCR1_GEKKO:
	case SPRN_PMC1_GEKKO:
	case SPRN_PMC2_GEKKO:
	case SPRN_PMC3_GEKKO:
	case SPRN_PMC4_GEKKO:
	case SPRN_WPAR_GEKKO:
693
	case SPRN_MSSSR0:
694
	case SPRN_DABR:
695 696 697 698 699 700
#ifdef CONFIG_PPC_BOOK3S_64
	case SPRN_MMCRS:
	case SPRN_MMCRA:
	case SPRN_MMCR0:
	case SPRN_MMCR1:
	case SPRN_MMCR2:
701
	case SPRN_UMMCR2:
702
#endif
703
		break;
704
unprivileged:
705
	default:
706 707 708 709 710 711 712 713 714 715 716 717
		pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn);
		if (sprn & 0x10) {
			if (kvmppc_get_msr(vcpu) & MSR_PR) {
				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
				emulated = EMULATE_AGAIN;
			}
		} else {
			if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
				kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
				emulated = EMULATE_AGAIN;
			}
		}
718 719 720 721 722 723
		break;
	}

	return emulated;
}

724
int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
725 726 727 728
{
	int emulated = EMULATE_DONE;

	switch (sprn) {
A
Alexander Graf 已提交
729 730 731 732
	case SPRN_IBAT0U ... SPRN_IBAT3L:
	case SPRN_IBAT4U ... SPRN_IBAT7L:
	case SPRN_DBAT0U ... SPRN_DBAT3L:
	case SPRN_DBAT4U ... SPRN_DBAT7L:
733 734 735 736
	{
		struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);

		if (sprn % 2)
737
			*spr_val = bat->raw >> 32;
738
		else
739
			*spr_val = bat->raw;
740

A
Alexander Graf 已提交
741
		break;
742
	}
743
	case SPRN_SDR1:
744 745
		if (!spr_allowed(vcpu, PRIV_HYPER))
			goto unprivileged;
746
		*spr_val = to_book3s(vcpu)->sdr1;
747 748
		break;
	case SPRN_DSISR:
749
		*spr_val = kvmppc_get_dsisr(vcpu);
750 751
		break;
	case SPRN_DAR:
752
		*spr_val = kvmppc_get_dar(vcpu);
753 754
		break;
	case SPRN_HIOR:
755
		*spr_val = to_book3s(vcpu)->hior;
756 757
		break;
	case SPRN_HID0:
758
		*spr_val = to_book3s(vcpu)->hid[0];
759 760
		break;
	case SPRN_HID1:
761
		*spr_val = to_book3s(vcpu)->hid[1];
762 763
		break;
	case SPRN_HID2:
A
Alexander Graf 已提交
764
	case SPRN_HID2_GEKKO:
765
		*spr_val = to_book3s(vcpu)->hid[2];
766 767
		break;
	case SPRN_HID4:
A
Alexander Graf 已提交
768
	case SPRN_HID4_GEKKO:
769
		*spr_val = to_book3s(vcpu)->hid[4];
770 771
		break;
	case SPRN_HID5:
772
		*spr_val = to_book3s(vcpu)->hid[5];
773
		break;
774
	case SPRN_CFAR:
775
	case SPRN_DSCR:
776
		*spr_val = 0;
777
		break;
778
	case SPRN_PURR:
779 780 781 782
		/*
		 * On exit we would have updated purr
		 */
		*spr_val = vcpu->arch.purr;
783 784
		break;
	case SPRN_SPURR:
785 786 787 788
		/*
		 * On exit we would have updated spurr
		 */
		*spr_val = vcpu->arch.spurr;
789
		break;
790
	case SPRN_VTB:
791
		*spr_val = to_book3s(vcpu)->vtb;
792
		break;
793 794 795
	case SPRN_IC:
		*spr_val = vcpu->arch.ic;
		break;
A
Alexander Graf 已提交
796 797 798 799 800 801 802 803
	case SPRN_GQR0:
	case SPRN_GQR1:
	case SPRN_GQR2:
	case SPRN_GQR3:
	case SPRN_GQR4:
	case SPRN_GQR5:
	case SPRN_GQR6:
	case SPRN_GQR7:
804
		*spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
A
Alexander Graf 已提交
805
		break;
806
#ifdef CONFIG_PPC_BOOK3S_64
807 808 809
	case SPRN_FSCR:
		*spr_val = vcpu->arch.fscr;
		break;
810 811 812 813 814 815 816 817 818
	case SPRN_BESCR:
		*spr_val = vcpu->arch.bescr;
		break;
	case SPRN_EBBHR:
		*spr_val = vcpu->arch.ebbhr;
		break;
	case SPRN_EBBRR:
		*spr_val = vcpu->arch.ebbrr;
		break;
819 820 821 822
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	case SPRN_TFHAR:
	case SPRN_TEXASR:
	case SPRN_TFIAR:
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
		if (!cpu_has_feature(CPU_FTR_TM))
			break;

		if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
			kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
			emulated = EMULATE_AGAIN;
			break;
		}

		tm_enable();
		if (sprn == SPRN_TFHAR)
			*spr_val = mfspr(SPRN_TFHAR);
		else if (sprn == SPRN_TEXASR)
			*spr_val = mfspr(SPRN_TEXASR);
		else if (sprn == SPRN_TFIAR)
			*spr_val = mfspr(SPRN_TFIAR);
		tm_disable();
840 841
		break;
#endif
842
#endif
843 844 845 846 847
	case SPRN_THRM1:
	case SPRN_THRM2:
	case SPRN_THRM3:
	case SPRN_CTRLF:
	case SPRN_CTRLT:
A
Alexander Graf 已提交
848 849 850 851 852 853 854 855
	case SPRN_L2CR:
	case SPRN_MMCR0_GEKKO:
	case SPRN_MMCR1_GEKKO:
	case SPRN_PMC1_GEKKO:
	case SPRN_PMC2_GEKKO:
	case SPRN_PMC3_GEKKO:
	case SPRN_PMC4_GEKKO:
	case SPRN_WPAR_GEKKO:
856
	case SPRN_MSSSR0:
857
	case SPRN_DABR:
858 859 860 861 862 863
#ifdef CONFIG_PPC_BOOK3S_64
	case SPRN_MMCRS:
	case SPRN_MMCRA:
	case SPRN_MMCR0:
	case SPRN_MMCR1:
	case SPRN_MMCR2:
864
	case SPRN_UMMCR2:
865
	case SPRN_TIR:
866
#endif
867
		*spr_val = 0;
868 869
		break;
	default:
870
unprivileged:
871 872 873 874 875 876 877 878 879 880 881 882 883 884
		pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn);
		if (sprn & 0x10) {
			if (kvmppc_get_msr(vcpu) & MSR_PR) {
				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
				emulated = EMULATE_AGAIN;
			}
		} else {
			if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
			    sprn == 4 || sprn == 5 || sprn == 6) {
				kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
				emulated = EMULATE_AGAIN;
			}
		}

885 886 887 888 889 890
		break;
	}

	return emulated;
}

891 892
u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
{
893
	return make_dsisr(inst);
894 895 896 897
}

ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
{
898 899 900 901 902 903
#ifdef CONFIG_PPC_BOOK3S_64
	/*
	 * Linux's fix_alignment() assumes that DAR is valid, so can we
	 */
	return vcpu->arch.fault_dar;
#else
904
	ulong dar = 0;
905 906
	ulong ra = get_ra(inst);
	ulong rb = get_rb(inst);
907 908 909 910 911 912 913 914 915 916 917 918 919

	switch (get_op(inst)) {
	case OP_LFS:
	case OP_LFD:
	case OP_STFD:
	case OP_STFS:
		if (ra)
			dar = kvmppc_get_gpr(vcpu, ra);
		dar += (s32)((s16)inst);
		break;
	case 31:
		if (ra)
			dar = kvmppc_get_gpr(vcpu, ra);
920
		dar += kvmppc_get_gpr(vcpu, rb);
921 922 923 924 925 926 927
		break;
	default:
		printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
		break;
	}

	return dar;
928
#endif
929
}