emulate_loadstore.c 8.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright IBM Corp. 2007
 * Copyright 2011 Freescale Semiconductor, Inc.
 *
 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
 */

#include <linux/jiffies.h>
#include <linux/hrtimer.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm_host.h>
#include <linux/clockchips.h>

#include <asm/reg.h>
#include <asm/time.h>
#include <asm/byteorder.h>
#include <asm/kvm_ppc.h>
#include <asm/disassemble.h>
#include <asm/ppc-opcode.h>
34
#include <asm/sstep.h>
35 36 37
#include "timing.h"
#include "trace.h"

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
#ifdef CONFIG_PPC_FPU
static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
{
	if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
		kvmppc_core_queue_fpunavail(vcpu);
		return true;
	}

	return false;
}
#endif /* CONFIG_PPC_FPU */

#ifdef CONFIG_VSX
static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
{
	if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
		kvmppc_core_queue_vsx_unavail(vcpu);
		return true;
	}

	return false;
}
#endif /* CONFIG_VSX */

62 63 64 65 66 67 68 69 70 71 72 73
#ifdef CONFIG_ALTIVEC
static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
{
	if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
		kvmppc_core_queue_vec_unavail(vcpu);
		return true;
	}

	return false;
}
#endif /* CONFIG_ALTIVEC */

74 75 76 77
/*
 * XXX to do:
 * lfiwax, lfiwzx
 * vector loads and stores
78
 *
79 80 81
 * Instructions that trap when used on cache-inhibited mappings
 * are not emulated here: multiple and string instructions,
 * lq/stq, and the load-reserve/store-conditional instructions.
82 83 84 85 86 87
 */
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
{
	struct kvm_run *run = vcpu->run;
	u32 inst;
	int ra, rs, rt;
88
	enum emulation_result emulated = EMULATE_FAIL;
89
	int advance = 1;
90
	struct instruction_op op;
91 92 93 94

	/* this default type might be overwritten by subcategories */
	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);

95
	emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
96 97 98 99 100 101 102
	if (emulated != EMULATE_DONE)
		return emulated;

	ra = get_ra(inst);
	rs = get_rs(inst);
	rt = get_rt(inst);

103 104 105 106 107 108 109 110 111
	/*
	 * if mmio_vsx_tx_sx_enabled == 0, copy data between
	 * VSR[0..31] and memory
	 * if mmio_vsx_tx_sx_enabled == 1, copy data between
	 * VSR[32..63] and memory
	 */
	vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
	vcpu->arch.mmio_vsx_copy_nums = 0;
	vcpu->arch.mmio_vsx_offset = 0;
112
	vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
113 114
	vcpu->arch.mmio_sp64_extend = 0;
	vcpu->arch.mmio_sign_extend = 0;
115
	vcpu->arch.mmio_vmx_copy_nums = 0;
116
	vcpu->arch.mmio_host_swabbed = 0;
117

118 119 120 121 122 123
	emulated = EMULATE_FAIL;
	vcpu->arch.regs.msr = vcpu->arch.shared->msr;
	vcpu->arch.regs.ccr = vcpu->arch.cr;
	if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
		int type = op.type & INSTR_TYPE_MASK;
		int size = GETSIZE(op.type);
124

125 126 127
		switch (type) {
		case LOAD:  {
			int instr_byte_swap = op.type & BYTEREV;
128

129 130 131 132 133 134
			if (op.type & SIGNEXT)
				emulated = kvmppc_handle_loads(run, vcpu,
						op.reg, size, !instr_byte_swap);
			else
				emulated = kvmppc_handle_load(run, vcpu,
						op.reg, size, !instr_byte_swap);
135

136 137
			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
138 139

			break;
140
		}
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
#ifdef CONFIG_PPC_FPU
		case LOAD_FP:
			if (kvmppc_check_fp_disabled(vcpu))
				return EMULATE_DONE;

			if (op.type & FPCONV)
				vcpu->arch.mmio_sp64_extend = 1;

			if (op.type & SIGNEXT)
				emulated = kvmppc_handle_loads(run, vcpu,
					     KVM_MMIO_REG_FPR|op.reg, size, 1);
			else
				emulated = kvmppc_handle_load(run, vcpu,
					     KVM_MMIO_REG_FPR|op.reg, size, 1);

			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);

			break;
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
#endif
#ifdef CONFIG_VSX
		case LOAD_VSX: {
			int io_size_each;

			if (op.vsx_flags & VSX_CHECK_VEC) {
				if (kvmppc_check_altivec_disabled(vcpu))
					return EMULATE_DONE;
			} else {
				if (kvmppc_check_vsx_disabled(vcpu))
					return EMULATE_DONE;
			}

			if (op.vsx_flags & VSX_FPCONV)
				vcpu->arch.mmio_sp64_extend = 1;

			if (op.element_size == 8)  {
				if (op.vsx_flags & VSX_SPLAT)
178
					vcpu->arch.mmio_copy_type =
179 180
						KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
				else
181
					vcpu->arch.mmio_copy_type =
182 183 184
						KVMPPC_VSX_COPY_DWORD;
			} else if (op.element_size == 4) {
				if (op.vsx_flags & VSX_SPLAT)
185
					vcpu->arch.mmio_copy_type =
186 187
						KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
				else
188
					vcpu->arch.mmio_copy_type =
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
						KVMPPC_VSX_COPY_WORD;
			} else
				break;

			if (size < op.element_size) {
				/* precision convert case: lxsspx, etc */
				vcpu->arch.mmio_vsx_copy_nums = 1;
				io_size_each = size;
			} else { /* lxvw4x, lxvd2x, etc */
				vcpu->arch.mmio_vsx_copy_nums =
					size/op.element_size;
				io_size_each = op.element_size;
			}

			emulated = kvmppc_handle_vsx_load(run, vcpu,
					KVM_MMIO_REG_VSX | (op.reg & 0x1f),
					io_size_each, 1, op.type & SIGNEXT);
			break;
		}
208
#endif
209 210 211 212 213 214
		case STORE:
			/* if need byte reverse, op.val has been reversed by
			 * analyse_instr().
			 */
			emulated = kvmppc_handle_store(run, vcpu, op.val,
					size, 1);
215

216 217
			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
218 219

			break;
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
#ifdef CONFIG_PPC_FPU
		case STORE_FP:
			if (kvmppc_check_fp_disabled(vcpu))
				return EMULATE_DONE;

			/* The FP registers need to be flushed so that
			 * kvmppc_handle_store() can read actual FP vals
			 * from vcpu->arch.
			 */
			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
						MSR_FP);

			if (op.type & FPCONV)
				vcpu->arch.mmio_sp64_extend = 1;

			emulated = kvmppc_handle_store(run, vcpu,
					VCPU_FPR(vcpu, op.reg), size, 1);

			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);

			break;
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
#endif
#ifdef CONFIG_VSX
		case STORE_VSX: {
			int io_size_each;

			if (op.vsx_flags & VSX_CHECK_VEC) {
				if (kvmppc_check_altivec_disabled(vcpu))
					return EMULATE_DONE;
			} else {
				if (kvmppc_check_vsx_disabled(vcpu))
					return EMULATE_DONE;
			}

			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
						MSR_VSX);

			if (op.vsx_flags & VSX_FPCONV)
				vcpu->arch.mmio_sp64_extend = 1;

			if (op.element_size == 8)
264
				vcpu->arch.mmio_copy_type =
265 266
						KVMPPC_VSX_COPY_DWORD;
			else if (op.element_size == 4)
267
				vcpu->arch.mmio_copy_type =
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
						KVMPPC_VSX_COPY_WORD;
			else
				break;

			if (size < op.element_size) {
				/* precise conversion case, like stxsspx */
				vcpu->arch.mmio_vsx_copy_nums = 1;
				io_size_each = size;
			} else { /* stxvw4x, stxvd2x, etc */
				vcpu->arch.mmio_vsx_copy_nums =
						size/op.element_size;
				io_size_each = op.element_size;
			}

			emulated = kvmppc_handle_vsx_store(run, vcpu,
					op.reg & 0x1f, io_size_each, 1);
			break;
		}
286
#endif
287
		case CACHEOP:
288 289 290 291
			/* Do nothing. The guest is performing dcbi because
			 * hardware DMA is not snooped by the dcache, but
			 * emulated DMA either goes through the dcache as
			 * normal writes, or the host kernel has handled dcache
292 293 294
			 * coherence.
			 */
			emulated = EMULATE_DONE;
295
			break;
296
		default:
297
			break;
298 299
		}
	}
300 301


302 303
	if ((emulated == EMULATE_DONE) || (emulated == EMULATE_DO_MMIO))
		goto out;
304

305 306 307
	switch (get_op(inst)) {
	case 31:
		switch (get_xop(inst)) {
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
#ifdef CONFIG_ALTIVEC
		case OP_31_XOP_LVX:
			if (kvmppc_check_altivec_disabled(vcpu))
				return EMULATE_DONE;
			vcpu->arch.vaddr_accessed &= ~0xFULL;
			vcpu->arch.paddr_accessed &= ~0xFULL;
			vcpu->arch.mmio_vmx_copy_nums = 2;
			emulated = kvmppc_handle_load128_by2x64(run, vcpu,
					KVM_MMIO_REG_VMX|rt, 1);
			break;

		case OP_31_XOP_STVX:
			if (kvmppc_check_altivec_disabled(vcpu))
				return EMULATE_DONE;
			vcpu->arch.vaddr_accessed &= ~0xFULL;
			vcpu->arch.paddr_accessed &= ~0xFULL;
			vcpu->arch.mmio_vmx_copy_nums = 2;
			emulated = kvmppc_handle_store128_by2x64(run, vcpu,
					rs, 1);
			break;
#endif /* CONFIG_ALTIVEC */

330 331 332 333 334 335 336 337 338 339 340
		default:
			emulated = EMULATE_FAIL;
			break;
		}
		break;

	default:
		emulated = EMULATE_FAIL;
		break;
	}

341
out:
342 343 344 345 346 347 348 349 350 351 352 353 354
	if (emulated == EMULATE_FAIL) {
		advance = 0;
		kvmppc_core_queue_program(vcpu, 0);
	}

	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);

	/* Advance past emulated instruction. */
	if (advance)
		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);

	return emulated;
}