emulate.c 11.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright IBM Corp. 2007
16
 * Copyright 2011 Freescale Semiconductor, Inc.
17 18 19 20 21
 *
 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
 */

#include <linux/jiffies.h>
A
Alexander Graf 已提交
22
#include <linux/hrtimer.h>
23 24 25
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm_host.h>
26
#include <linux/clockchips.h>
27

28
#include <asm/reg.h>
29 30 31
#include <asm/time.h>
#include <asm/byteorder.h>
#include <asm/kvm_ppc.h>
32
#include <asm/disassemble.h>
33
#include <asm/ppc-opcode.h>
34
#include "timing.h"
35
#include "trace.h"
36

37
void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
38
{
A
Alexander Graf 已提交
39
	unsigned long dec_nsec;
40
	unsigned long long dec_time;
A
Alexander Graf 已提交
41

A
Alexander Graf 已提交
42
	pr_debug("mtDEC: %x\n", vcpu->arch.dec);
43 44
	hrtimer_try_to_cancel(&vcpu->arch.dec_timer);

45
#ifdef CONFIG_PPC_BOOK3S
46 47 48
	/* mtdec lowers the interrupt line when positive. */
	kvmppc_core_dequeue_dec(vcpu);

49 50 51 52 53 54
	/* POWER4+ triggers a dec interrupt if the value is < 0 */
	if (vcpu->arch.dec & 0x80000000) {
		kvmppc_core_queue_dec(vcpu);
		return;
	}
#endif
55 56 57 58 59 60 61 62 63 64 65 66 67 68

#ifdef CONFIG_BOOKE
	/* On BOOKE, DEC = 0 is as good as decrementer not enabled */
	if (vcpu->arch.dec == 0)
		return;
#endif

	/*
	 * The decrementer ticks at the same rate as the timebase, so
	 * that's how we convert the guest DEC value to the number of
	 * host ticks.
	 */

	dec_time = vcpu->arch.dec;
69 70 71 72 73 74
	/*
	 * Guest timebase ticks at the same frequency as host decrementer.
	 * So use the host decrementer calculations for decrementer emulation.
	 */
	dec_time = dec_time << decrementer_clockevent.shift;
	do_div(dec_time, decrementer_clockevent.mult);
75 76 77 78
	dec_nsec = do_div(dec_time, NSEC_PER_SEC);
	hrtimer_start(&vcpu->arch.dec_timer,
		ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
	vcpu->arch.dec_jiffies = get_tb();
79 80
}

S
Scott Wood 已提交
81 82 83
u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
{
	u64 jd = tb - vcpu->arch.dec_jiffies;
84 85 86 87 88 89

#ifdef CONFIG_BOOKE
	if (vcpu->arch.dec < jd)
		return 0;
#endif

S
Scott Wood 已提交
90 91 92
	return vcpu->arch.dec - jd;
}

93 94 95 96 97 98 99
static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
	enum emulation_result emulated = EMULATE_DONE;
	ulong spr_val = kvmppc_get_gpr(vcpu, rs);

	switch (sprn) {
	case SPRN_SRR0:
100
		kvmppc_set_srr0(vcpu, spr_val);
101 102
		break;
	case SPRN_SRR1:
103
		kvmppc_set_srr1(vcpu, spr_val);
104 105 106 107 108 109 110 111 112 113 114 115 116
		break;

	/* XXX We need to context-switch the timebase for
	 * watchdog and FIT. */
	case SPRN_TBWL: break;
	case SPRN_TBWU: break;

	case SPRN_DEC:
		vcpu->arch.dec = spr_val;
		kvmppc_emulate_dec(vcpu);
		break;

	case SPRN_SPRG0:
117
		kvmppc_set_sprg0(vcpu, spr_val);
118 119
		break;
	case SPRN_SPRG1:
120
		kvmppc_set_sprg1(vcpu, spr_val);
121 122
		break;
	case SPRN_SPRG2:
123
		kvmppc_set_sprg2(vcpu, spr_val);
124 125
		break;
	case SPRN_SPRG3:
126
		kvmppc_set_sprg3(vcpu, spr_val);
127 128
		break;

A
Alexander Graf 已提交
129 130 131
	/* PIR can legally be written, but we ignore it */
	case SPRN_PIR: break;

132
	default:
133 134
		emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn,
								  spr_val);
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
		if (emulated == EMULATE_FAIL)
			printk(KERN_INFO "mtspr: unknown spr "
				"0x%x\n", sprn);
		break;
	}

	kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);

	return emulated;
}

static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
{
	enum emulation_result emulated = EMULATE_DONE;
	ulong spr_val = 0;

	switch (sprn) {
	case SPRN_SRR0:
153
		spr_val = kvmppc_get_srr0(vcpu);
154 155
		break;
	case SPRN_SRR1:
156
		spr_val = kvmppc_get_srr1(vcpu);
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
		break;
	case SPRN_PVR:
		spr_val = vcpu->arch.pvr;
		break;
	case SPRN_PIR:
		spr_val = vcpu->vcpu_id;
		break;

	/* Note: mftb and TBRL/TBWL are user-accessible, so
	 * the guest can always access the real TB anyways.
	 * In fact, we probably will never see these traps. */
	case SPRN_TBWL:
		spr_val = get_tb() >> 32;
		break;
	case SPRN_TBWU:
		spr_val = get_tb();
		break;

	case SPRN_SPRG0:
176
		spr_val = kvmppc_get_sprg0(vcpu);
177 178
		break;
	case SPRN_SPRG1:
179
		spr_val = kvmppc_get_sprg1(vcpu);
180 181
		break;
	case SPRN_SPRG2:
182
		spr_val = kvmppc_get_sprg2(vcpu);
183 184
		break;
	case SPRN_SPRG3:
185
		spr_val = kvmppc_get_sprg3(vcpu);
186 187 188 189 190 191 192 193
		break;
	/* Note: SPRG4-7 are user-readable, so we don't get
	 * a trap. */

	case SPRN_DEC:
		spr_val = kvmppc_get_dec(vcpu, get_tb());
		break;
	default:
194 195
		emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn,
								  &spr_val);
196 197 198 199 200 201 202 203 204 205 206 207 208 209
		if (unlikely(emulated == EMULATE_FAIL)) {
			printk(KERN_INFO "mfspr: unknown spr "
				"0x%x\n", sprn);
		}
		break;
	}

	if (emulated == EMULATE_DONE)
		kvmppc_set_gpr(vcpu, rt, spr_val);
	kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);

	return emulated;
}

210 211 212 213 214 215 216 217 218 219 220 221 222
/* XXX to do:
 * lhax
 * lhaux
 * lswx
 * lswi
 * stswx
 * stswi
 * lha
 * lhau
 * lmw
 * stmw
 *
 */
223 224
/* XXX Should probably auto-generate instruction decoding for a particular core
 * from opcode tables in the future. */
225 226
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
227
	u32 inst = kvmppc_get_last_inst(vcpu);
228 229 230 231
	int ra = get_ra(inst);
	int rs = get_rs(inst);
	int rt = get_rt(inst);
	int sprn = get_sprn(inst);
232 233 234
	enum emulation_result emulated = EMULATE_DONE;
	int advance = 1;

235 236 237
	/* this default type might be overwritten by subcategories */
	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);

238
	pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
239

240
	switch (get_op(inst)) {
241
	case OP_TRAP:
242
#ifdef CONFIG_PPC_BOOK3S
243
	case OP_TRAP_64:
244
		kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
245
#else
246 247
		kvmppc_core_queue_program(vcpu,
					  vcpu->arch.shared->esr | ESR_PTR);
248
#endif
249 250 251 252 253 254
		advance = 0;
		break;

	case 31:
		switch (get_xop(inst)) {

255 256 257 258 259 260 261 262 263 264 265 266
		case OP_31_XOP_TRAP:
#ifdef CONFIG_64BIT
		case OP_31_XOP_TRAP_64:
#endif
#ifdef CONFIG_PPC_BOOK3S
			kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
			kvmppc_core_queue_program(vcpu,
					vcpu->arch.shared->esr | ESR_PTR);
#endif
			advance = 0;
			break;
267
		case OP_31_XOP_LWZX:
268 269 270
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
			break;

271
		case OP_31_XOP_LBZX:
272 273 274
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
			break;

275 276
		case OP_31_XOP_LBZUX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
277
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
278 279
			break;

280
		case OP_31_XOP_STWX:
281
			emulated = kvmppc_handle_store(run, vcpu,
282
						       kvmppc_get_gpr(vcpu, rs),
283 284 285
			                               4, 1);
			break;

286
		case OP_31_XOP_STBX:
287
			emulated = kvmppc_handle_store(run, vcpu,
288
						       kvmppc_get_gpr(vcpu, rs),
289 290 291
			                               1, 1);
			break;

292
		case OP_31_XOP_STBUX:
293
			emulated = kvmppc_handle_store(run, vcpu,
294
						       kvmppc_get_gpr(vcpu, rs),
295
			                               1, 1);
A
Alexander Graf 已提交
296
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
297 298
			break;

299 300 301 302
		case OP_31_XOP_LHAX:
			emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
			break;

303
		case OP_31_XOP_LHZX:
304 305 306
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			break;

307
		case OP_31_XOP_LHZUX:
308
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
309
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
310 311
			break;

312
		case OP_31_XOP_MFSPR:
313
			emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
314 315
			break;

316
		case OP_31_XOP_STHX:
317
			emulated = kvmppc_handle_store(run, vcpu,
318
						       kvmppc_get_gpr(vcpu, rs),
319 320 321
			                               2, 1);
			break;

322
		case OP_31_XOP_STHUX:
323
			emulated = kvmppc_handle_store(run, vcpu,
324
						       kvmppc_get_gpr(vcpu, rs),
325
			                               2, 1);
326
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
327 328
			break;

329
		case OP_31_XOP_MTSPR:
330
			emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
331 332
			break;

S
Stuart Yoder 已提交
333
		case OP_31_XOP_DCBST:
A
Alexander Graf 已提交
334
		case OP_31_XOP_DCBF:
335
		case OP_31_XOP_DCBI:
336 337 338 339 340 341 342
			/* Do nothing. The guest is performing dcbi because
			 * hardware DMA is not snooped by the dcache, but
			 * emulated DMA either goes through the dcache as
			 * normal writes, or the host kernel has handled dcache
			 * coherence. */
			break;

343
		case OP_31_XOP_LWBRX:
344 345 346
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
			break;

347
		case OP_31_XOP_TLBSYNC:
348 349
			break;

350
		case OP_31_XOP_STWBRX:
351
			emulated = kvmppc_handle_store(run, vcpu,
352
						       kvmppc_get_gpr(vcpu, rs),
353 354 355
			                               4, 0);
			break;

356
		case OP_31_XOP_LHBRX:
357 358 359
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
			break;

360
		case OP_31_XOP_STHBRX:
361
			emulated = kvmppc_handle_store(run, vcpu,
362
						       kvmppc_get_gpr(vcpu, rs),
363 364 365 366
			                               2, 0);
			break;

		default:
367
			/* Attempt core-specific emulation below. */
368 369 370 371
			emulated = EMULATE_FAIL;
		}
		break;

372
	case OP_LWZ:
373 374 375
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		break;

376 377 378 379 380 381
	/* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
	case OP_LD:
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
		break;

382
	case OP_LWZU:
383
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
384
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
385 386
		break;

387
	case OP_LBZ:
388 389 390
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		break;

391
	case OP_LBZU:
392
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
393
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
394 395
		break;

396
	case OP_STW:
397 398
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
399 400 401
		                               4, 1);
		break;

402 403 404 405 406 407 408 409
	/* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
	case OP_STD:
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               8, 1);
		break;

410
	case OP_STWU:
411 412
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
413
		                               4, 1);
414
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
415 416
		break;

417
	case OP_STB:
418 419
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
420 421 422
		                               1, 1);
		break;

423
	case OP_STBU:
424 425
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
426
		                               1, 1);
427
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
428 429
		break;

430
	case OP_LHZ:
431 432 433
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		break;

434
	case OP_LHZU:
435
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
436
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
437 438
		break;

A
Alexander Graf 已提交
439 440 441 442 443 444
	case OP_LHA:
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
		break;

	case OP_LHAU:
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
445
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
A
Alexander Graf 已提交
446 447
		break;

448
	case OP_STH:
449 450
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
451 452 453
		                               2, 1);
		break;

454
	case OP_STHU:
455 456
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
457
		                               2, 1);
458
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
459 460 461 462
		break;

	default:
		emulated = EMULATE_FAIL;
463 464 465
	}

	if (emulated == EMULATE_FAIL) {
466 467
		emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst,
							       &advance);
468 469 470
		if (emulated == EMULATE_AGAIN) {
			advance = 0;
		} else if (emulated == EMULATE_FAIL) {
471 472 473
			advance = 0;
			printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
			       "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
474
			kvmppc_core_queue_program(vcpu, 0);
475
		}
476 477
	}

478
	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
479

480
	/* Advance past emulated instruction. */
481
	if (advance)
482
		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
483 484 485

	return emulated;
}
486
EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction);