emulate.c 11.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright IBM Corp. 2007
16
 * Copyright 2011 Freescale Semiconductor, Inc.
17 18 19 20 21
 *
 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
 */

#include <linux/jiffies.h>
A
Alexander Graf 已提交
22
#include <linux/hrtimer.h>
23 24 25
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm_host.h>
26
#include <linux/clockchips.h>
27

28
#include <asm/reg.h>
29 30 31
#include <asm/time.h>
#include <asm/byteorder.h>
#include <asm/kvm_ppc.h>
32
#include <asm/disassemble.h>
33
#include <asm/ppc-opcode.h>
34
#include "timing.h"
35
#include "trace.h"
36

37
void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
38
{
A
Alexander Graf 已提交
39
	unsigned long dec_nsec;
40
	unsigned long long dec_time;
A
Alexander Graf 已提交
41

A
Alexander Graf 已提交
42
	pr_debug("mtDEC: %x\n", vcpu->arch.dec);
43 44
	hrtimer_try_to_cancel(&vcpu->arch.dec_timer);

45
#ifdef CONFIG_PPC_BOOK3S
46 47 48
	/* mtdec lowers the interrupt line when positive. */
	kvmppc_core_dequeue_dec(vcpu);

49 50 51 52 53 54
	/* POWER4+ triggers a dec interrupt if the value is < 0 */
	if (vcpu->arch.dec & 0x80000000) {
		kvmppc_core_queue_dec(vcpu);
		return;
	}
#endif
55 56 57 58 59 60 61 62 63 64 65 66 67 68

#ifdef CONFIG_BOOKE
	/* On BOOKE, DEC = 0 is as good as decrementer not enabled */
	if (vcpu->arch.dec == 0)
		return;
#endif

	/*
	 * The decrementer ticks at the same rate as the timebase, so
	 * that's how we convert the guest DEC value to the number of
	 * host ticks.
	 */

	dec_time = vcpu->arch.dec;
69 70 71 72 73 74
	/*
	 * Guest timebase ticks at the same frequency as host decrementer.
	 * So use the host decrementer calculations for decrementer emulation.
	 */
	dec_time = dec_time << decrementer_clockevent.shift;
	do_div(dec_time, decrementer_clockevent.mult);
75 76 77 78
	dec_nsec = do_div(dec_time, NSEC_PER_SEC);
	hrtimer_start(&vcpu->arch.dec_timer,
		ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
	vcpu->arch.dec_jiffies = get_tb();
79 80
}

S
Scott Wood 已提交
81 82 83
u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
{
	u64 jd = tb - vcpu->arch.dec_jiffies;
84 85 86 87 88 89

#ifdef CONFIG_BOOKE
	if (vcpu->arch.dec < jd)
		return 0;
#endif

S
Scott Wood 已提交
90 91 92
	return vcpu->arch.dec - jd;
}

93 94 95 96 97 98 99
static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
	enum emulation_result emulated = EMULATE_DONE;
	ulong spr_val = kvmppc_get_gpr(vcpu, rs);

	switch (sprn) {
	case SPRN_SRR0:
100
		kvmppc_set_srr0(vcpu, spr_val);
101 102
		break;
	case SPRN_SRR1:
103
		kvmppc_set_srr1(vcpu, spr_val);
104 105 106 107 108 109 110 111 112 113 114 115 116
		break;

	/* XXX We need to context-switch the timebase for
	 * watchdog and FIT. */
	case SPRN_TBWL: break;
	case SPRN_TBWU: break;

	case SPRN_DEC:
		vcpu->arch.dec = spr_val;
		kvmppc_emulate_dec(vcpu);
		break;

	case SPRN_SPRG0:
117
		kvmppc_set_sprg0(vcpu, spr_val);
118 119
		break;
	case SPRN_SPRG1:
120
		kvmppc_set_sprg1(vcpu, spr_val);
121 122
		break;
	case SPRN_SPRG2:
123
		kvmppc_set_sprg2(vcpu, spr_val);
124 125
		break;
	case SPRN_SPRG3:
126
		kvmppc_set_sprg3(vcpu, spr_val);
127 128
		break;

A
Alexander Graf 已提交
129 130 131
	/* PIR can legally be written, but we ignore it */
	case SPRN_PIR: break;

132
	default:
133 134
		emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn,
								  spr_val);
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
		if (emulated == EMULATE_FAIL)
			printk(KERN_INFO "mtspr: unknown spr "
				"0x%x\n", sprn);
		break;
	}

	kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);

	return emulated;
}

static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
{
	enum emulation_result emulated = EMULATE_DONE;
	ulong spr_val = 0;

	switch (sprn) {
	case SPRN_SRR0:
153
		spr_val = kvmppc_get_srr0(vcpu);
154 155
		break;
	case SPRN_SRR1:
156
		spr_val = kvmppc_get_srr1(vcpu);
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
		break;
	case SPRN_PVR:
		spr_val = vcpu->arch.pvr;
		break;
	case SPRN_PIR:
		spr_val = vcpu->vcpu_id;
		break;

	/* Note: mftb and TBRL/TBWL are user-accessible, so
	 * the guest can always access the real TB anyways.
	 * In fact, we probably will never see these traps. */
	case SPRN_TBWL:
		spr_val = get_tb() >> 32;
		break;
	case SPRN_TBWU:
		spr_val = get_tb();
		break;

	case SPRN_SPRG0:
176
		spr_val = kvmppc_get_sprg0(vcpu);
177 178
		break;
	case SPRN_SPRG1:
179
		spr_val = kvmppc_get_sprg1(vcpu);
180 181
		break;
	case SPRN_SPRG2:
182
		spr_val = kvmppc_get_sprg2(vcpu);
183 184
		break;
	case SPRN_SPRG3:
185
		spr_val = kvmppc_get_sprg3(vcpu);
186 187 188 189 190 191 192 193
		break;
	/* Note: SPRG4-7 are user-readable, so we don't get
	 * a trap. */

	case SPRN_DEC:
		spr_val = kvmppc_get_dec(vcpu, get_tb());
		break;
	default:
194 195
		emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn,
								  &spr_val);
196 197 198 199 200 201 202 203 204 205 206 207 208 209
		if (unlikely(emulated == EMULATE_FAIL)) {
			printk(KERN_INFO "mfspr: unknown spr "
				"0x%x\n", sprn);
		}
		break;
	}

	if (emulated == EMULATE_DONE)
		kvmppc_set_gpr(vcpu, rt, spr_val);
	kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);

	return emulated;
}

210 211 212 213 214 215 216 217 218 219 220 221 222
/* XXX to do:
 * lhax
 * lhaux
 * lswx
 * lswi
 * stswx
 * stswi
 * lha
 * lhau
 * lmw
 * stmw
 *
 */
223 224
/* XXX Should probably auto-generate instruction decoding for a particular core
 * from opcode tables in the future. */
225 226
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
227 228 229
	u32 inst;
	int ra, rs, rt, sprn;
	enum emulation_result emulated;
230 231
	int advance = 1;

232 233 234
	/* this default type might be overwritten by subcategories */
	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);

235 236 237 238
	emulated = kvmppc_get_last_inst(vcpu, false, &inst);
	if (emulated != EMULATE_DONE)
		return emulated;

239
	pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
240

241 242 243 244 245
	ra = get_ra(inst);
	rs = get_rs(inst);
	rt = get_rt(inst);
	sprn = get_sprn(inst);

246
	switch (get_op(inst)) {
247
	case OP_TRAP:
248
#ifdef CONFIG_PPC_BOOK3S
249
	case OP_TRAP_64:
250
		kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
251
#else
252 253
		kvmppc_core_queue_program(vcpu,
					  vcpu->arch.shared->esr | ESR_PTR);
254
#endif
255 256 257 258 259 260
		advance = 0;
		break;

	case 31:
		switch (get_xop(inst)) {

261 262 263 264 265 266 267 268 269 270 271 272
		case OP_31_XOP_TRAP:
#ifdef CONFIG_64BIT
		case OP_31_XOP_TRAP_64:
#endif
#ifdef CONFIG_PPC_BOOK3S
			kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
			kvmppc_core_queue_program(vcpu,
					vcpu->arch.shared->esr | ESR_PTR);
#endif
			advance = 0;
			break;
273
		case OP_31_XOP_LWZX:
274 275 276
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
			break;

277
		case OP_31_XOP_LBZX:
278 279 280
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
			break;

281 282
		case OP_31_XOP_LBZUX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
283
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
284 285
			break;

286
		case OP_31_XOP_STWX:
287
			emulated = kvmppc_handle_store(run, vcpu,
288
						       kvmppc_get_gpr(vcpu, rs),
289 290 291
			                               4, 1);
			break;

292
		case OP_31_XOP_STBX:
293
			emulated = kvmppc_handle_store(run, vcpu,
294
						       kvmppc_get_gpr(vcpu, rs),
295 296 297
			                               1, 1);
			break;

298
		case OP_31_XOP_STBUX:
299
			emulated = kvmppc_handle_store(run, vcpu,
300
						       kvmppc_get_gpr(vcpu, rs),
301
			                               1, 1);
A
Alexander Graf 已提交
302
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
303 304
			break;

305 306 307 308
		case OP_31_XOP_LHAX:
			emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
			break;

309
		case OP_31_XOP_LHZX:
310 311 312
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			break;

313
		case OP_31_XOP_LHZUX:
314
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
315
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
316 317
			break;

318
		case OP_31_XOP_MFSPR:
319
			emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
320 321
			break;

322
		case OP_31_XOP_STHX:
323
			emulated = kvmppc_handle_store(run, vcpu,
324
						       kvmppc_get_gpr(vcpu, rs),
325 326 327
			                               2, 1);
			break;

328
		case OP_31_XOP_STHUX:
329
			emulated = kvmppc_handle_store(run, vcpu,
330
						       kvmppc_get_gpr(vcpu, rs),
331
			                               2, 1);
332
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
333 334
			break;

335
		case OP_31_XOP_MTSPR:
336
			emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
337 338
			break;

S
Stuart Yoder 已提交
339
		case OP_31_XOP_DCBST:
A
Alexander Graf 已提交
340
		case OP_31_XOP_DCBF:
341
		case OP_31_XOP_DCBI:
342 343 344 345 346 347 348
			/* Do nothing. The guest is performing dcbi because
			 * hardware DMA is not snooped by the dcache, but
			 * emulated DMA either goes through the dcache as
			 * normal writes, or the host kernel has handled dcache
			 * coherence. */
			break;

349
		case OP_31_XOP_LWBRX:
350 351 352
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
			break;

353
		case OP_31_XOP_TLBSYNC:
354 355
			break;

356
		case OP_31_XOP_STWBRX:
357
			emulated = kvmppc_handle_store(run, vcpu,
358
						       kvmppc_get_gpr(vcpu, rs),
359 360 361
			                               4, 0);
			break;

362
		case OP_31_XOP_LHBRX:
363 364 365
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
			break;

366
		case OP_31_XOP_STHBRX:
367
			emulated = kvmppc_handle_store(run, vcpu,
368
						       kvmppc_get_gpr(vcpu, rs),
369 370 371 372
			                               2, 0);
			break;

		default:
373
			/* Attempt core-specific emulation below. */
374 375 376 377
			emulated = EMULATE_FAIL;
		}
		break;

378
	case OP_LWZ:
379 380 381
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		break;

382 383 384 385 386 387
	/* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
	case OP_LD:
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
		break;

388
	case OP_LWZU:
389
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
390
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
391 392
		break;

393
	case OP_LBZ:
394 395 396
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		break;

397
	case OP_LBZU:
398
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
399
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
400 401
		break;

402
	case OP_STW:
403 404
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
405 406 407
		                               4, 1);
		break;

408 409 410 411 412 413 414 415
	/* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
	case OP_STD:
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               8, 1);
		break;

416
	case OP_STWU:
417 418
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
419
		                               4, 1);
420
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
421 422
		break;

423
	case OP_STB:
424 425
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
426 427 428
		                               1, 1);
		break;

429
	case OP_STBU:
430 431
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
432
		                               1, 1);
433
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
434 435
		break;

436
	case OP_LHZ:
437 438 439
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		break;

440
	case OP_LHZU:
441
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
442
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
443 444
		break;

A
Alexander Graf 已提交
445 446 447 448 449 450
	case OP_LHA:
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
		break;

	case OP_LHAU:
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
451
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
A
Alexander Graf 已提交
452 453
		break;

454
	case OP_STH:
455 456
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
457 458 459
		                               2, 1);
		break;

460
	case OP_STHU:
461 462
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
463
		                               2, 1);
464
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
465 466 467 468
		break;

	default:
		emulated = EMULATE_FAIL;
469 470 471
	}

	if (emulated == EMULATE_FAIL) {
472 473
		emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst,
							       &advance);
474 475 476
		if (emulated == EMULATE_AGAIN) {
			advance = 0;
		} else if (emulated == EMULATE_FAIL) {
477 478 479
			advance = 0;
			printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
			       "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
480
			kvmppc_core_queue_program(vcpu, 0);
481
		}
482 483
	}

484
	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
485

486
	/* Advance past emulated instruction. */
487
	if (advance)
488
		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
489 490 491

	return emulated;
}
492
EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction);