emulate.c 11.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright IBM Corp. 2007
16
 * Copyright 2011 Freescale Semiconductor, Inc.
17 18 19 20 21
 *
 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
 */

#include <linux/jiffies.h>
A
Alexander Graf 已提交
22
#include <linux/hrtimer.h>
23 24 25
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm_host.h>
26
#include <linux/clockchips.h>
27

28
#include <asm/reg.h>
29 30 31
#include <asm/time.h>
#include <asm/byteorder.h>
#include <asm/kvm_ppc.h>
32
#include <asm/disassemble.h>
33
#include "timing.h"
34
#include "trace.h"
35

36
#define OP_TRAP 3
37
#define OP_TRAP_64 2
38

39
#define OP_31_XOP_TRAP      4
40
#define OP_31_XOP_LWZX      23
41
#define OP_31_XOP_TRAP_64   68
42 43 44
#define OP_31_XOP_LBZX      87
#define OP_31_XOP_STWX      151
#define OP_31_XOP_STBX      215
45
#define OP_31_XOP_LBZUX     119
46 47 48 49
#define OP_31_XOP_STBUX     247
#define OP_31_XOP_LHZX      279
#define OP_31_XOP_LHZUX     311
#define OP_31_XOP_MFSPR     339
50
#define OP_31_XOP_LHAX      343
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
#define OP_31_XOP_STHX      407
#define OP_31_XOP_STHUX     439
#define OP_31_XOP_MTSPR     467
#define OP_31_XOP_DCBI      470
#define OP_31_XOP_LWBRX     534
#define OP_31_XOP_TLBSYNC   566
#define OP_31_XOP_STWBRX    662
#define OP_31_XOP_LHBRX     790
#define OP_31_XOP_STHBRX    918

#define OP_LWZ  32
#define OP_LWZU 33
#define OP_LBZ  34
#define OP_LBZU 35
#define OP_STW  36
#define OP_STWU 37
#define OP_STB  38
#define OP_STBU 39
#define OP_LHZ  40
#define OP_LHZU 41
A
Alexander Graf 已提交
71 72
#define OP_LHA  42
#define OP_LHAU 43
73 74 75
#define OP_STH  44
#define OP_STHU 45

76
void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
77
{
A
Alexander Graf 已提交
78
	unsigned long dec_nsec;
79
	unsigned long long dec_time;
A
Alexander Graf 已提交
80

A
Alexander Graf 已提交
81
	pr_debug("mtDEC: %x\n", vcpu->arch.dec);
82 83
	hrtimer_try_to_cancel(&vcpu->arch.dec_timer);

84
#ifdef CONFIG_PPC_BOOK3S
85 86 87
	/* mtdec lowers the interrupt line when positive. */
	kvmppc_core_dequeue_dec(vcpu);

88 89 90 91 92 93
	/* POWER4+ triggers a dec interrupt if the value is < 0 */
	if (vcpu->arch.dec & 0x80000000) {
		kvmppc_core_queue_dec(vcpu);
		return;
	}
#endif
94 95 96 97 98 99 100 101 102 103 104 105 106 107

#ifdef CONFIG_BOOKE
	/* On BOOKE, DEC = 0 is as good as decrementer not enabled */
	if (vcpu->arch.dec == 0)
		return;
#endif

	/*
	 * The decrementer ticks at the same rate as the timebase, so
	 * that's how we convert the guest DEC value to the number of
	 * host ticks.
	 */

	dec_time = vcpu->arch.dec;
108 109 110 111 112 113
	/*
	 * Guest timebase ticks at the same frequency as host decrementer.
	 * So use the host decrementer calculations for decrementer emulation.
	 */
	dec_time = dec_time << decrementer_clockevent.shift;
	do_div(dec_time, decrementer_clockevent.mult);
114 115 116 117
	dec_nsec = do_div(dec_time, NSEC_PER_SEC);
	hrtimer_start(&vcpu->arch.dec_timer,
		ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
	vcpu->arch.dec_jiffies = get_tb();
118 119
}

S
Scott Wood 已提交
120 121 122
u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
{
	u64 jd = tb - vcpu->arch.dec_jiffies;
123 124 125 126 127 128

#ifdef CONFIG_BOOKE
	if (vcpu->arch.dec < jd)
		return 0;
#endif

S
Scott Wood 已提交
129 130 131
	return vcpu->arch.dec - jd;
}

132 133 134 135 136 137 138 139 140 141 142 143 144 145
/* XXX to do:
 * lhax
 * lhaux
 * lswx
 * lswi
 * stswx
 * stswi
 * lha
 * lhau
 * lmw
 * stmw
 *
 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
 */
146 147
/* XXX Should probably auto-generate instruction decoding for a particular core
 * from opcode tables in the future. */
148 149
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
150
	u32 inst = kvmppc_get_last_inst(vcpu);
151 152 153 154
	int ra = get_ra(inst);
	int rs = get_rs(inst);
	int rt = get_rt(inst);
	int sprn = get_sprn(inst);
155 156
	enum emulation_result emulated = EMULATE_DONE;
	int advance = 1;
157
	ulong spr_val = 0;
158

159 160 161
	/* this default type might be overwritten by subcategories */
	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);

162
	pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
163

164
	switch (get_op(inst)) {
165
	case OP_TRAP:
166
#ifdef CONFIG_PPC_BOOK3S
167
	case OP_TRAP_64:
168
		kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
169
#else
170 171
		kvmppc_core_queue_program(vcpu,
					  vcpu->arch.shared->esr | ESR_PTR);
172
#endif
173 174 175 176 177 178
		advance = 0;
		break;

	case 31:
		switch (get_xop(inst)) {

179 180 181 182 183 184 185 186 187 188 189 190
		case OP_31_XOP_TRAP:
#ifdef CONFIG_64BIT
		case OP_31_XOP_TRAP_64:
#endif
#ifdef CONFIG_PPC_BOOK3S
			kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
			kvmppc_core_queue_program(vcpu,
					vcpu->arch.shared->esr | ESR_PTR);
#endif
			advance = 0;
			break;
191
		case OP_31_XOP_LWZX:
192 193 194
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
			break;

195
		case OP_31_XOP_LBZX:
196 197 198
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
			break;

199 200
		case OP_31_XOP_LBZUX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
201
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
202 203
			break;

204
		case OP_31_XOP_STWX:
205
			emulated = kvmppc_handle_store(run, vcpu,
206
						       kvmppc_get_gpr(vcpu, rs),
207 208 209
			                               4, 1);
			break;

210
		case OP_31_XOP_STBX:
211
			emulated = kvmppc_handle_store(run, vcpu,
212
						       kvmppc_get_gpr(vcpu, rs),
213 214 215
			                               1, 1);
			break;

216
		case OP_31_XOP_STBUX:
217
			emulated = kvmppc_handle_store(run, vcpu,
218
						       kvmppc_get_gpr(vcpu, rs),
219
			                               1, 1);
A
Alexander Graf 已提交
220
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
221 222
			break;

223 224 225 226
		case OP_31_XOP_LHAX:
			emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
			break;

227
		case OP_31_XOP_LHZX:
228 229 230
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			break;

231
		case OP_31_XOP_LHZUX:
232
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
233
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
234 235
			break;

236
		case OP_31_XOP_MFSPR:
237 238
			switch (sprn) {
			case SPRN_SRR0:
239
				spr_val = vcpu->arch.shared->srr0;
240
				break;
241
			case SPRN_SRR1:
242
				spr_val = vcpu->arch.shared->srr1;
243
				break;
244
			case SPRN_PVR:
245 246
				spr_val = vcpu->arch.pvr;
				break;
247
			case SPRN_PIR:
248 249
				spr_val = vcpu->vcpu_id;
				break;
250
			case SPRN_MSSSR0:
251 252
				spr_val = 0;
				break;
253 254 255 256 257

			/* Note: mftb and TBRL/TBWL are user-accessible, so
			 * the guest can always access the real TB anyways.
			 * In fact, we probably will never see these traps. */
			case SPRN_TBWL:
258 259
				spr_val = get_tb() >> 32;
				break;
260
			case SPRN_TBWU:
261 262
				spr_val = get_tb();
				break;
263 264

			case SPRN_SPRG0:
265
				spr_val = vcpu->arch.shared->sprg0;
266
				break;
267
			case SPRN_SPRG1:
268
				spr_val = vcpu->arch.shared->sprg1;
269
				break;
270
			case SPRN_SPRG2:
271
				spr_val = vcpu->arch.shared->sprg2;
272
				break;
273
			case SPRN_SPRG3:
274
				spr_val = vcpu->arch.shared->sprg3;
275
				break;
276 277 278
			/* Note: SPRG4-7 are user-readable, so we don't get
			 * a trap. */

A
Alexander Graf 已提交
279
			case SPRN_DEC:
280
				spr_val = kvmppc_get_dec(vcpu, get_tb());
A
Alexander Graf 已提交
281
				break;
282
			default:
283 284 285 286 287
				emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
								     &spr_val);
				if (unlikely(emulated == EMULATE_FAIL)) {
					printk(KERN_INFO "mfspr: unknown spr "
						"0x%x\n", sprn);
288
				}
289 290
				break;
			}
291
			kvmppc_set_gpr(vcpu, rt, spr_val);
292
			kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
293 294
			break;

295
		case OP_31_XOP_STHX:
296
			emulated = kvmppc_handle_store(run, vcpu,
297
						       kvmppc_get_gpr(vcpu, rs),
298 299 300
			                               2, 1);
			break;

301
		case OP_31_XOP_STHUX:
302
			emulated = kvmppc_handle_store(run, vcpu,
303
						       kvmppc_get_gpr(vcpu, rs),
304
			                               2, 1);
305
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
306 307
			break;

308
		case OP_31_XOP_MTSPR:
309
			spr_val = kvmppc_get_gpr(vcpu, rs);
310 311
			switch (sprn) {
			case SPRN_SRR0:
312
				vcpu->arch.shared->srr0 = spr_val;
313
				break;
314
			case SPRN_SRR1:
315
				vcpu->arch.shared->srr1 = spr_val;
316
				break;
317 318 319 320 321 322

			/* XXX We need to context-switch the timebase for
			 * watchdog and FIT. */
			case SPRN_TBWL: break;
			case SPRN_TBWU: break;

323 324
			case SPRN_MSSSR0: break;

325
			case SPRN_DEC:
326
				vcpu->arch.dec = spr_val;
327 328 329 330
				kvmppc_emulate_dec(vcpu);
				break;

			case SPRN_SPRG0:
331
				vcpu->arch.shared->sprg0 = spr_val;
332
				break;
333
			case SPRN_SPRG1:
334
				vcpu->arch.shared->sprg1 = spr_val;
335
				break;
336
			case SPRN_SPRG2:
337
				vcpu->arch.shared->sprg2 = spr_val;
338
				break;
339
			case SPRN_SPRG3:
340
				vcpu->arch.shared->sprg3 = spr_val;
341
				break;
342 343

			default:
344 345
				emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
								     spr_val);
346
				if (emulated == EMULATE_FAIL)
347 348
					printk(KERN_INFO "mtspr: unknown spr "
						"0x%x\n", sprn);
349 350
				break;
			}
351
			kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
352 353
			break;

354
		case OP_31_XOP_DCBI:
355 356 357 358 359 360 361
			/* Do nothing. The guest is performing dcbi because
			 * hardware DMA is not snooped by the dcache, but
			 * emulated DMA either goes through the dcache as
			 * normal writes, or the host kernel has handled dcache
			 * coherence. */
			break;

362
		case OP_31_XOP_LWBRX:
363 364 365
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
			break;

366
		case OP_31_XOP_TLBSYNC:
367 368
			break;

369
		case OP_31_XOP_STWBRX:
370
			emulated = kvmppc_handle_store(run, vcpu,
371
						       kvmppc_get_gpr(vcpu, rs),
372 373 374
			                               4, 0);
			break;

375
		case OP_31_XOP_LHBRX:
376 377 378
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
			break;

379
		case OP_31_XOP_STHBRX:
380
			emulated = kvmppc_handle_store(run, vcpu,
381
						       kvmppc_get_gpr(vcpu, rs),
382 383 384 385
			                               2, 0);
			break;

		default:
386
			/* Attempt core-specific emulation below. */
387 388 389 390
			emulated = EMULATE_FAIL;
		}
		break;

391
	case OP_LWZ:
392 393 394
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		break;

395
	case OP_LWZU:
396
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
397
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
398 399
		break;

400
	case OP_LBZ:
401 402 403
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		break;

404
	case OP_LBZU:
405
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
406
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
407 408
		break;

409
	case OP_STW:
410 411
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
412 413 414
		                               4, 1);
		break;

415
	case OP_STWU:
416 417
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
418
		                               4, 1);
419
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
420 421
		break;

422
	case OP_STB:
423 424
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
425 426 427
		                               1, 1);
		break;

428
	case OP_STBU:
429 430
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
431
		                               1, 1);
432
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
433 434
		break;

435
	case OP_LHZ:
436 437 438
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		break;

439
	case OP_LHZU:
440
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
441
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
442 443
		break;

A
Alexander Graf 已提交
444 445 446 447 448 449
	case OP_LHA:
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
		break;

	case OP_LHAU:
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
450
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
A
Alexander Graf 已提交
451 452
		break;

453
	case OP_STH:
454 455
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
456 457 458
		                               2, 1);
		break;

459
	case OP_STHU:
460 461
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
462
		                               2, 1);
463
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
464 465 466 467
		break;

	default:
		emulated = EMULATE_FAIL;
468 469 470 471
	}

	if (emulated == EMULATE_FAIL) {
		emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
472 473 474
		if (emulated == EMULATE_AGAIN) {
			advance = 0;
		} else if (emulated == EMULATE_FAIL) {
475 476 477
			advance = 0;
			printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
			       "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
478
			kvmppc_core_queue_program(vcpu, 0);
479
		}
480 481
	}

482
	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
483

484
	/* Advance past emulated instruction. */
485
	if (advance)
486
		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
487 488 489

	return emulated;
}