emulate.c 11.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright IBM Corp. 2007
 *
 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
 */

#include <linux/jiffies.h>
A
Alexander Graf 已提交
21
#include <linux/hrtimer.h>
22 23 24 25
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm_host.h>

26
#include <asm/reg.h>
27 28 29
#include <asm/time.h>
#include <asm/byteorder.h>
#include <asm/kvm_ppc.h>
30
#include <asm/disassemble.h>
31
#include "timing.h"
32
#include "trace.h"
33

34
#define OP_TRAP 3
35
#define OP_TRAP_64 2
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

#define OP_31_XOP_LWZX      23
#define OP_31_XOP_LBZX      87
#define OP_31_XOP_STWX      151
#define OP_31_XOP_STBX      215
#define OP_31_XOP_STBUX     247
#define OP_31_XOP_LHZX      279
#define OP_31_XOP_LHZUX     311
#define OP_31_XOP_MFSPR     339
#define OP_31_XOP_STHX      407
#define OP_31_XOP_STHUX     439
#define OP_31_XOP_MTSPR     467
#define OP_31_XOP_DCBI      470
#define OP_31_XOP_LWBRX     534
#define OP_31_XOP_TLBSYNC   566
#define OP_31_XOP_STWBRX    662
#define OP_31_XOP_LHBRX     790
#define OP_31_XOP_STHBRX    918

#define OP_LWZ  32
#define OP_LWZU 33
#define OP_LBZ  34
#define OP_LBZU 35
#define OP_STW  36
#define OP_STWU 37
#define OP_STB  38
#define OP_STBU 39
#define OP_LHZ  40
#define OP_LHZU 41
#define OP_STH  44
#define OP_STHU 45

68 69 70 71 72 73 74 75 76 77 78 79
#ifdef CONFIG_PPC64
static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
{
	return 1;
}
#else
static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.tcr & TCR_DIE;
}
#endif

80
void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
81
{
A
Alexander Graf 已提交
82
	unsigned long dec_nsec;
A
Alexander Graf 已提交
83

A
Alexander Graf 已提交
84
	pr_debug("mtDEC: %x\n", vcpu->arch.dec);
85 86 87
#ifdef CONFIG_PPC64
	/* POWER4+ triggers a dec interrupt if the value is < 0 */
	if (vcpu->arch.dec & 0x80000000) {
A
Alexander Graf 已提交
88
		hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
89 90 91 92 93
		kvmppc_core_queue_dec(vcpu);
		return;
	}
#endif
	if (kvmppc_dec_enabled(vcpu)) {
94 95 96 97
		/* The decrementer ticks at the same rate as the timebase, so
		 * that's how we convert the guest DEC value to the number of
		 * host ticks. */

A
Alexander Graf 已提交
98 99 100 101 102 103
		hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
		dec_nsec = vcpu->arch.dec;
		dec_nsec *= 1000;
		dec_nsec /= tb_ticks_per_usec;
		hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
			      HRTIMER_MODE_REL);
104
		vcpu->arch.dec_jiffies = get_tb();
105
	} else {
A
Alexander Graf 已提交
106
		hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
	}
}

/* XXX to do:
 * lhax
 * lhaux
 * lswx
 * lswi
 * stswx
 * stswi
 * lha
 * lhau
 * lmw
 * stmw
 *
 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
 */
124 125
/* XXX Should probably auto-generate instruction decoding for a particular core
 * from opcode tables in the future. */
126 127 128 129 130 131 132 133 134 135 136 137
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
	u32 inst = vcpu->arch.last_inst;
	u32 ea;
	int ra;
	int rb;
	int rs;
	int rt;
	int sprn;
	enum emulation_result emulated = EMULATE_DONE;
	int advance = 1;

138 139 140
	/* this default type might be overwritten by subcategories */
	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);

141 142
	pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));

143
	switch (get_op(inst)) {
144
	case OP_TRAP:
145 146 147
#ifdef CONFIG_PPC64
	case OP_TRAP_64:
#else
148
		vcpu->arch.esr |= ESR_PTR;
149
#endif
150
		kvmppc_core_queue_program(vcpu);
151 152 153 154 155 156
		advance = 0;
		break;

	case 31:
		switch (get_xop(inst)) {

157
		case OP_31_XOP_LWZX:
158 159 160 161
			rt = get_rt(inst);
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
			break;

162
		case OP_31_XOP_LBZX:
163 164 165 166
			rt = get_rt(inst);
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
			break;

167
		case OP_31_XOP_STWX:
168 169 170 171 172 173
			rs = get_rs(inst);
			emulated = kvmppc_handle_store(run, vcpu,
			                               vcpu->arch.gpr[rs],
			                               4, 1);
			break;

174
		case OP_31_XOP_STBX:
175 176 177 178 179 180
			rs = get_rs(inst);
			emulated = kvmppc_handle_store(run, vcpu,
			                               vcpu->arch.gpr[rs],
			                               1, 1);
			break;

181
		case OP_31_XOP_STBUX:
182 183 184 185 186 187 188 189 190 191 192 193 194 195
			rs = get_rs(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			ea = vcpu->arch.gpr[rb];
			if (ra)
				ea += vcpu->arch.gpr[ra];

			emulated = kvmppc_handle_store(run, vcpu,
			                               vcpu->arch.gpr[rs],
			                               1, 1);
			vcpu->arch.gpr[rs] = ea;
			break;

196
		case OP_31_XOP_LHZX:
197 198 199 200
			rt = get_rt(inst);
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			break;

201
		case OP_31_XOP_LHZUX:
202 203 204 205 206 207 208 209 210 211 212 213
			rt = get_rt(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			ea = vcpu->arch.gpr[rb];
			if (ra)
				ea += vcpu->arch.gpr[ra];

			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			vcpu->arch.gpr[ra] = ea;
			break;

214
		case OP_31_XOP_MFSPR:
215 216 217 218 219 220 221 222 223
			sprn = get_sprn(inst);
			rt = get_rt(inst);

			switch (sprn) {
			case SPRN_SRR0:
				vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
			case SPRN_SRR1:
				vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
			case SPRN_PVR:
224
				vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
225
			case SPRN_PIR:
226 227 228
				vcpu->arch.gpr[rt] = vcpu->vcpu_id; break;
			case SPRN_MSSSR0:
				vcpu->arch.gpr[rt] = 0; break;
229 230 231 232 233

			/* Note: mftb and TBRL/TBWL are user-accessible, so
			 * the guest can always access the real TB anyways.
			 * In fact, we probably will never see these traps. */
			case SPRN_TBWL:
234
				vcpu->arch.gpr[rt] = get_tb() >> 32; break;
235
			case SPRN_TBWU:
236
				vcpu->arch.gpr[rt] = get_tb(); break;
237 238 239 240 241 242 243 244 245 246 247 248

			case SPRN_SPRG0:
				vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
			case SPRN_SPRG1:
				vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break;
			case SPRN_SPRG2:
				vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break;
			case SPRN_SPRG3:
				vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break;
			/* Note: SPRG4-7 are user-readable, so we don't get
			 * a trap. */

A
Alexander Graf 已提交
249 250
			case SPRN_DEC:
			{
251
				u64 jd = get_tb() - vcpu->arch.dec_jiffies;
A
Alexander Graf 已提交
252
				vcpu->arch.gpr[rt] = vcpu->arch.dec - jd;
253
				pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]);
A
Alexander Graf 已提交
254 255
				break;
			}
256
			default:
257 258 259 260 261
				emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
				if (emulated == EMULATE_FAIL) {
					printk("mfspr: unknown spr %x\n", sprn);
					vcpu->arch.gpr[rt] = 0;
				}
262 263 264 265
				break;
			}
			break;

266
		case OP_31_XOP_STHX:
267 268 269 270 271 272 273 274 275
			rs = get_rs(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			emulated = kvmppc_handle_store(run, vcpu,
			                               vcpu->arch.gpr[rs],
			                               2, 1);
			break;

276
		case OP_31_XOP_STHUX:
277 278 279 280 281 282 283 284 285 286 287 288 289 290
			rs = get_rs(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			ea = vcpu->arch.gpr[rb];
			if (ra)
				ea += vcpu->arch.gpr[ra];

			emulated = kvmppc_handle_store(run, vcpu,
			                               vcpu->arch.gpr[rs],
			                               2, 1);
			vcpu->arch.gpr[ra] = ea;
			break;

291
		case OP_31_XOP_MTSPR:
292 293 294 295 296 297 298 299 300 301 302 303 304
			sprn = get_sprn(inst);
			rs = get_rs(inst);
			switch (sprn) {
			case SPRN_SRR0:
				vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
			case SPRN_SRR1:
				vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;

			/* XXX We need to context-switch the timebase for
			 * watchdog and FIT. */
			case SPRN_TBWL: break;
			case SPRN_TBWU: break;

305 306
			case SPRN_MSSSR0: break;

307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
			case SPRN_DEC:
				vcpu->arch.dec = vcpu->arch.gpr[rs];
				kvmppc_emulate_dec(vcpu);
				break;

			case SPRN_SPRG0:
				vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
			case SPRN_SPRG1:
				vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break;
			case SPRN_SPRG2:
				vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break;
			case SPRN_SPRG3:
				vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;

			default:
322 323 324
				emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
				if (emulated == EMULATE_FAIL)
					printk("mtspr: unknown spr %x\n", sprn);
325 326 327 328
				break;
			}
			break;

329
		case OP_31_XOP_DCBI:
330 331 332 333 334 335 336
			/* Do nothing. The guest is performing dcbi because
			 * hardware DMA is not snooped by the dcache, but
			 * emulated DMA either goes through the dcache as
			 * normal writes, or the host kernel has handled dcache
			 * coherence. */
			break;

337
		case OP_31_XOP_LWBRX:
338 339 340 341
			rt = get_rt(inst);
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
			break;

342
		case OP_31_XOP_TLBSYNC:
343 344
			break;

345
		case OP_31_XOP_STWBRX:
346 347 348 349 350 351 352 353 354
			rs = get_rs(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			emulated = kvmppc_handle_store(run, vcpu,
			                               vcpu->arch.gpr[rs],
			                               4, 0);
			break;

355
		case OP_31_XOP_LHBRX:
356 357 358 359
			rt = get_rt(inst);
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
			break;

360
		case OP_31_XOP_STHBRX:
361 362 363 364 365 366 367 368 369 370
			rs = get_rs(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			emulated = kvmppc_handle_store(run, vcpu,
			                               vcpu->arch.gpr[rs],
			                               2, 0);
			break;

		default:
371
			/* Attempt core-specific emulation below. */
372 373 374 375
			emulated = EMULATE_FAIL;
		}
		break;

376
	case OP_LWZ:
377 378 379 380
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		break;

381
	case OP_LWZU:
382 383 384 385 386 387
		ra = get_ra(inst);
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
		break;

388
	case OP_LBZ:
389 390 391 392
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		break;

393
	case OP_LBZU:
394 395 396 397 398 399
		ra = get_ra(inst);
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
		break;

400
	case OP_STW:
401 402 403 404 405
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
		                               4, 1);
		break;

406
	case OP_STWU:
407 408 409 410 411 412 413
		ra = get_ra(inst);
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
		                               4, 1);
		vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
		break;

414
	case OP_STB:
415 416 417 418 419
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
		                               1, 1);
		break;

420
	case OP_STBU:
421 422 423 424 425 426 427
		ra = get_ra(inst);
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
		                               1, 1);
		vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
		break;

428
	case OP_LHZ:
429 430 431 432
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		break;

433
	case OP_LHZU:
434 435 436 437 438 439
		ra = get_ra(inst);
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
		break;

440
	case OP_STH:
441 442 443 444 445
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
		                               2, 1);
		break;

446
	case OP_STHU:
447 448 449 450 451 452 453 454 455
		ra = get_ra(inst);
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
		                               2, 1);
		vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
		break;

	default:
		emulated = EMULATE_FAIL;
456 457 458 459 460 461 462 463 464
	}

	if (emulated == EMULATE_FAIL) {
		emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
		if (emulated == EMULATE_FAIL) {
			advance = 0;
			printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
			       "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
		}
465 466
	}

467
	trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated);
468

469 470 471 472 473
	if (advance)
		vcpu->arch.pc += 4; /* Advance past emulated instruction. */

	return emulated;
}