emulate.c 117.3 KB
Newer Older
A
Avi Kivity 已提交
1
/******************************************************************************
2
 * emulate.c
A
Avi Kivity 已提交
3 4 5 6 7 8
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9
 * privileged instructions:
A
Avi Kivity 已提交
10 11
 *
 * Copyright (C) 2006 Qumranet
N
Nicolas Kaiser 已提交
12
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
13 14 15 16 17 18 19 20 21 22
 *
 *   Avi Kivity <avi@qumranet.com>
 *   Yaniv Kamay <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

23
#include <linux/kvm_host.h>
24
#include "kvm_cache_regs.h"
A
Avi Kivity 已提交
25
#include <linux/module.h>
26
#include <asm/kvm_emulate.h>
A
Avi Kivity 已提交
27

28
#include "x86.h"
29
#include "tss.h"
30

31 32 33
/*
 * Operand types
 */
34 35 36 37 38 39 40 41 42
#define OpNone             0ull
#define OpImplicit         1ull  /* No generic decode */
#define OpReg              2ull  /* Register */
#define OpMem              3ull  /* Memory */
#define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
#define OpDI               5ull  /* ES:DI/EDI/RDI */
#define OpMem64            6ull  /* Memory, 64-bit */
#define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
#define OpDX               8ull  /* DX register */
43 44 45 46
#define OpCL               9ull  /* CL register (for shifts) */
#define OpImmByte         10ull  /* 8-bit sign extended immediate */
#define OpOne             11ull  /* Implied 1 */
#define OpImm             12ull  /* Sign extended immediate */
47 48 49 50 51 52 53
#define OpMem16           13ull  /* Memory operand (16-bit). */
#define OpMem32           14ull  /* Memory operand (32-bit). */
#define OpImmU            15ull  /* Immediate operand, zero extended */
#define OpSI              16ull  /* SI/ESI/RSI */
#define OpImmFAddr        17ull  /* Immediate far address */
#define OpMemFAddr        18ull  /* Far address in memory */
#define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
54 55 56 57 58 59
#define OpES              20ull  /* ES */
#define OpCS              21ull  /* CS */
#define OpSS              22ull  /* SS */
#define OpDS              23ull  /* DS */
#define OpFS              24ull  /* FS */
#define OpGS              25ull  /* GS */
60
#define OpMem8            26ull  /* 8-bit zero extended memory operand */
61 62

#define OpBits             5  /* Width of operand field */
63
#define OpMask             ((1ull << OpBits) - 1)
64

A
Avi Kivity 已提交
65 66 67 68 69 70 71 72 73 74
/*
 * Opcode effective-address decode tables.
 * Note that we only emulate instructions that have at least one memory
 * operand (excluding implicit stack references). We assume that stack
 * references and instruction fetches will never occur in special memory
 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
 * not be handled.
 */

/* Operand sizes: 8-bit operands or specified/overridden size. */
75
#define ByteOp      (1<<0)	/* 8-bit operands. */
A
Avi Kivity 已提交
76
/* Destination operand type. */
77 78 79 80 81 82 83 84 85 86
#define DstShift    1
#define ImplicitOps (OpImplicit << DstShift)
#define DstReg      (OpReg << DstShift)
#define DstMem      (OpMem << DstShift)
#define DstAcc      (OpAcc << DstShift)
#define DstDI       (OpDI << DstShift)
#define DstMem64    (OpMem64 << DstShift)
#define DstImmUByte (OpImmUByte << DstShift)
#define DstDX       (OpDX << DstShift)
#define DstMask     (OpMask << DstShift)
A
Avi Kivity 已提交
87
/* Source operand type. */
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
#define SrcShift    6
#define SrcNone     (OpNone << SrcShift)
#define SrcReg      (OpReg << SrcShift)
#define SrcMem      (OpMem << SrcShift)
#define SrcMem16    (OpMem16 << SrcShift)
#define SrcMem32    (OpMem32 << SrcShift)
#define SrcImm      (OpImm << SrcShift)
#define SrcImmByte  (OpImmByte << SrcShift)
#define SrcOne      (OpOne << SrcShift)
#define SrcImmUByte (OpImmUByte << SrcShift)
#define SrcImmU     (OpImmU << SrcShift)
#define SrcSI       (OpSI << SrcShift)
#define SrcImmFAddr (OpImmFAddr << SrcShift)
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc      (OpAcc << SrcShift)
#define SrcImmU16   (OpImmU16 << SrcShift)
#define SrcDX       (OpDX << SrcShift)
105
#define SrcMem8     (OpMem8 << SrcShift)
106
#define SrcMask     (OpMask << SrcShift)
107 108 109 110 111 112 113 114 115 116
#define BitOp       (1<<11)
#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
#define String      (1<<13)     /* String instruction (rep capable) */
#define Stack       (1<<14)     /* Stack instruction (push/pop) */
#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
#define Sse         (1<<18)     /* SSE Vector instruction */
117 118 119 120
/* Generic ModRM decode. */
#define ModRM       (1<<19)
/* Destination is only written; never read. */
#define Mov         (1<<20)
121
/* Misc flags */
122
#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
123
#define VendorSpecific (1<<22) /* Vendor specific instruction */
124
#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
125
#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
126
#define Undefined   (1<<25) /* No Such Instruction */
127
#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
128
#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
129
#define No64	    (1<<28)
130
#define PageTable   (1 << 29)   /* instruction used to write page table */
131
/* Source 2 operand type */
132
#define Src2Shift   (30)
133 134 135 136 137
#define Src2None    (OpNone << Src2Shift)
#define Src2CL      (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
#define Src2One     (OpOne << Src2Shift)
#define Src2Imm     (OpImm << Src2Shift)
138 139 140 141 142 143
#define Src2ES      (OpES << Src2Shift)
#define Src2CS      (OpCS << Src2Shift)
#define Src2SS      (OpSS << Src2Shift)
#define Src2DS      (OpDS << Src2Shift)
#define Src2FS      (OpFS << Src2Shift)
#define Src2GS      (OpGS << Src2Shift)
144
#define Src2Mask    (OpMask << Src2Shift)
A
Avi Kivity 已提交
145
#define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
146 147 148
#define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
#define Unaligned   ((u64)1 << 42)  /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx         ((u64)1 << 43)  /* Advanced Vector Extensions */
A
Avi Kivity 已提交
149

150 151 152 153 154 155 156 157
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
158

159
struct opcode {
160 161
	u64 flags : 56;
	u64 intercept : 8;
162
	union {
163
		int (*execute)(struct x86_emulate_ctxt *ctxt);
164 165
		struct opcode *group;
		struct group_dual *gdual;
166
		struct gprefix *gprefix;
167
	} u;
168
	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
169 170 171 172 173
};

struct group_dual {
	struct opcode mod012[8];
	struct opcode mod3[8];
174 175
};

176 177 178 179 180 181 182
struct gprefix {
	struct opcode pfx_no;
	struct opcode pfx_66;
	struct opcode pfx_f2;
	struct opcode pfx_f3;
};

A
Avi Kivity 已提交
183
/* EFLAGS bit definitions. */
184 185 186 187
#define EFLG_ID (1<<21)
#define EFLG_VIP (1<<20)
#define EFLG_VIF (1<<19)
#define EFLG_AC (1<<18)
188 189
#define EFLG_VM (1<<17)
#define EFLG_RF (1<<16)
190 191
#define EFLG_IOPL (3<<12)
#define EFLG_NT (1<<14)
A
Avi Kivity 已提交
192 193
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
194
#define EFLG_IF (1<<9)
195
#define EFLG_TF (1<<8)
A
Avi Kivity 已提交
196 197 198 199 200 201
#define EFLG_SF (1<<7)
#define EFLG_ZF (1<<6)
#define EFLG_AF (1<<4)
#define EFLG_PF (1<<2)
#define EFLG_CF (1<<0)

202 203 204
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
#define EFLG_RESERVED_ONE_MASK 2

A
Avi Kivity 已提交
205 206 207 208 209 210 211
/*
 * Instruction emulation:
 * Most instructions are emulated directly via a fragment of inline assembly
 * code. This allows us to save/restore EFLAGS and thus very easily pick up
 * any modified flags.
 */

212
#if defined(CONFIG_X86_64)
A
Avi Kivity 已提交
213 214 215 216 217 218 219 220 221 222 223 224 225 226
#define _LO32 "k"		/* force 32-bit operand */
#define _STK  "%%rsp"		/* stack pointer */
#elif defined(__i386__)
#define _LO32 ""		/* force 32-bit operand */
#define _STK  "%%esp"		/* stack pointer */
#endif

/*
 * These EFLAGS bits are restored from saved value during emulation, and
 * any changes are written back to the saved value after emulation.
 */
#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)

/* Before executing instruction: restore necessary bits in EFLAGS. */
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
#define _PRE_EFLAGS(_sav, _msk, _tmp)					\
	/* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
	"movl %"_sav",%"_LO32 _tmp"; "                                  \
	"push %"_tmp"; "                                                \
	"push %"_tmp"; "                                                \
	"movl %"_msk",%"_LO32 _tmp"; "                                  \
	"andl %"_LO32 _tmp",("_STK"); "                                 \
	"pushf; "                                                       \
	"notl %"_LO32 _tmp"; "                                          \
	"andl %"_LO32 _tmp",("_STK"); "                                 \
	"andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); "	\
	"pop  %"_tmp"; "                                                \
	"orl  %"_LO32 _tmp",("_STK"); "                                 \
	"popf; "                                                        \
	"pop  %"_sav"; "
A
Avi Kivity 已提交
242 243 244 245 246 247 248 249 250

/* After executing instruction: write-back necessary bits in EFLAGS. */
#define _POST_EFLAGS(_sav, _msk, _tmp) \
	/* _sav |= EFLAGS & _msk; */		\
	"pushf; "				\
	"pop  %"_tmp"; "			\
	"andl %"_msk",%"_LO32 _tmp"; "		\
	"orl  %"_LO32 _tmp",%"_sav"; "

251 252 253 254 255 256
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif

257
#define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype)	\
258 259 260 261 262
	do {								\
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "4", "2")			\
			_op _suffix " %"_x"3,%1; "			\
			_POST_EFLAGS("0", "4", "2")			\
263 264
			: "=m" ((ctxt)->eflags),			\
			  "+q" (*(_dsttype*)&(ctxt)->dst.val),		\
265
			  "=&r" (_tmp)					\
266
			: _y ((ctxt)->src.val), "i" (EFLAGS_MASK));	\
267
	} while (0)
268 269


A
Avi Kivity 已提交
270
/* Raw emulation: instruction has two explicit operands. */
271
#define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy)		\
272 273 274
	do {								\
		unsigned long _tmp;					\
									\
275
		switch ((ctxt)->dst.bytes) {				\
276
		case 2:							\
277
			____emulate_2op(ctxt,_op,_wx,_wy,"w",u16);	\
278 279
			break;						\
		case 4:							\
280
			____emulate_2op(ctxt,_op,_lx,_ly,"l",u32);	\
281 282
			break;						\
		case 8:							\
283
			ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \
284 285
			break;						\
		}							\
A
Avi Kivity 已提交
286 287
	} while (0)

288
#define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy)		     \
A
Avi Kivity 已提交
289
	do {								     \
290
		unsigned long _tmp;					     \
291
		switch ((ctxt)->dst.bytes) {				     \
A
Avi Kivity 已提交
292
		case 1:							     \
293
			____emulate_2op(ctxt,_op,_bx,_by,"b",u8);	     \
A
Avi Kivity 已提交
294 295
			break;						     \
		default:						     \
296
			__emulate_2op_nobyte(ctxt, _op,			     \
A
Avi Kivity 已提交
297 298 299 300 301 302
					     _wx, _wy, _lx, _ly, _qx, _qy);  \
			break;						     \
		}							     \
	} while (0)

/* Source operand is byte-sized and may be restricted to just %cl. */
303 304
#define emulate_2op_SrcB(ctxt, _op)					\
	__emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c")
A
Avi Kivity 已提交
305 306

/* Source operand is byte, word, long or quad sized. */
307 308
#define emulate_2op_SrcV(ctxt, _op)					\
	__emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r")
A
Avi Kivity 已提交
309 310

/* Source operand is word, long or quad sized. */
311 312
#define emulate_2op_SrcV_nobyte(ctxt, _op)				\
	__emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r")
A
Avi Kivity 已提交
313

314
/* Instruction has three operands and one operand is stored in ECX register */
315
#define __emulate_2op_cl(ctxt, _op, _suffix, _type)		\
316 317
	do {								\
		unsigned long _tmp;					\
318 319 320
		_type _clv  = (ctxt)->src2.val;				\
		_type _srcv = (ctxt)->src.val;				\
		_type _dstv = (ctxt)->dst.val;				\
321 322 323 324 325
									\
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "5", "2")			\
			_op _suffix " %4,%1 \n"				\
			_POST_EFLAGS("0", "5", "2")			\
326
			: "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \
327 328 329
			: "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK)	\
			);						\
									\
330 331 332
		(ctxt)->src2.val  = (unsigned long) _clv;		\
		(ctxt)->src2.val = (unsigned long) _srcv;		\
		(ctxt)->dst.val = (unsigned long) _dstv;		\
333 334
	} while (0)

335
#define emulate_2op_cl(ctxt, _op)					\
336
	do {								\
337
		switch ((ctxt)->dst.bytes) {				\
338
		case 2:							\
339
			__emulate_2op_cl(ctxt, _op, "w", u16);		\
340 341
			break;						\
		case 4:							\
342
			__emulate_2op_cl(ctxt, _op, "l", u32);		\
343 344
			break;						\
		case 8:							\
345
			ON64(__emulate_2op_cl(ctxt, _op, "q", ulong));	\
346 347
			break;						\
		}							\
348 349
	} while (0)

350
#define __emulate_1op(ctxt, _op, _suffix)				\
A
Avi Kivity 已提交
351 352 353
	do {								\
		unsigned long _tmp;					\
									\
354 355 356 357
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "3", "2")			\
			_op _suffix " %1; "				\
			_POST_EFLAGS("0", "3", "2")			\
358
			: "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \
359 360 361 362 363
			  "=&r" (_tmp)					\
			: "i" (EFLAGS_MASK));				\
	} while (0)

/* Instruction has only one explicit operand (no source operand). */
364
#define emulate_1op(ctxt, _op)						\
365
	do {								\
366 367 368 369 370
		switch ((ctxt)->dst.bytes) {				\
		case 1:	__emulate_1op(ctxt, _op, "b"); break;		\
		case 2:	__emulate_1op(ctxt, _op, "w"); break;		\
		case 4:	__emulate_1op(ctxt, _op, "l"); break;		\
		case 8:	ON64(__emulate_1op(ctxt, _op, "q")); break;	\
A
Avi Kivity 已提交
371 372 373
		}							\
	} while (0)

374
#define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex)			\
375 376
	do {								\
		unsigned long _tmp;					\
377 378
		ulong *rax = &(ctxt)->regs[VCPU_REGS_RAX];		\
		ulong *rdx = &(ctxt)->regs[VCPU_REGS_RDX];		\
379 380 381 382 383 384 385 386 387 388 389 390
									\
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "5", "1")			\
			"1: \n\t"					\
			_op _suffix " %6; "				\
			"2: \n\t"					\
			_POST_EFLAGS("0", "5", "1")			\
			".pushsection .fixup,\"ax\" \n\t"		\
			"3: movb $1, %4 \n\t"				\
			"jmp 2b \n\t"					\
			".popsection \n\t"				\
			_ASM_EXTABLE(1b, 3b)				\
391 392 393 394
			: "=m" ((ctxt)->eflags), "=&r" (_tmp),		\
			  "+a" (*rax), "+d" (*rdx), "+qm"(_ex)		\
			: "i" (EFLAGS_MASK), "m" ((ctxt)->src.val),	\
			  "a" (*rax), "d" (*rdx));			\
395 396
	} while (0)

397
/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
398
#define emulate_1op_rax_rdx(ctxt, _op, _ex)	\
399
	do {								\
400
		switch((ctxt)->src.bytes) {				\
401
		case 1:							\
402
			__emulate_1op_rax_rdx(ctxt, _op, "b", _ex);	\
403 404
			break;						\
		case 2:							\
405
			__emulate_1op_rax_rdx(ctxt, _op, "w", _ex);	\
406 407
			break;						\
		case 4:							\
408
			__emulate_1op_rax_rdx(ctxt, _op, "l", _ex);	\
409 410
			break;						\
		case 8: ON64(						\
411
			__emulate_1op_rax_rdx(ctxt, _op, "q", _ex));	\
412 413 414 415
			break;						\
		}							\
	} while (0)

416 417 418 419 420 421
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
				    enum x86_intercept intercept,
				    enum x86_intercept_stage stage)
{
	struct x86_instruction_info info = {
		.intercept  = intercept,
422 423 424 425 426 427 428 429
		.rep_prefix = ctxt->rep_prefix,
		.modrm_mod  = ctxt->modrm_mod,
		.modrm_reg  = ctxt->modrm_reg,
		.modrm_rm   = ctxt->modrm_rm,
		.src_val    = ctxt->src.val64,
		.src_bytes  = ctxt->src.bytes,
		.dst_bytes  = ctxt->dst.bytes,
		.ad_bytes   = ctxt->ad_bytes,
430 431 432
		.next_rip   = ctxt->eip,
	};

433
	return ctxt->ops->intercept(ctxt, &info, stage);
434 435
}

A
Avi Kivity 已提交
436 437 438 439 440
static void assign_masked(ulong *dest, ulong src, ulong mask)
{
	*dest = (*dest & ~mask) | (src & mask);
}

441
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
442
{
443
	return (1UL << (ctxt->ad_bytes << 3)) - 1;
444 445
}

A
Avi Kivity 已提交
446 447 448 449 450 451 452 453 454 455 456
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
{
	u16 sel;
	struct desc_struct ss;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return ~0UL;
	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
}

A
Avi Kivity 已提交
457 458 459 460 461
static int stack_size(struct x86_emulate_ctxt *ctxt)
{
	return (__fls(stack_mask(ctxt)) + 1) >> 3;
}

A
Avi Kivity 已提交
462
/* Access/update address held in a register, based on addressing mode. */
463
static inline unsigned long
464
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
465
{
466
	if (ctxt->ad_bytes == sizeof(unsigned long))
467 468
		return reg;
	else
469
		return reg & ad_mask(ctxt);
470 471 472
}

static inline unsigned long
473
register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
474
{
475
	return address_mask(ctxt, reg);
476 477
}

478
static inline void
479
register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
480
{
481
	if (ctxt->ad_bytes == sizeof(unsigned long))
482 483
		*reg += inc;
	else
484
		*reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
485
}
A
Avi Kivity 已提交
486

487
static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
488
{
489
	register_address_increment(ctxt, &ctxt->_eip, rel);
490
}
491

492 493 494 495 496 497 498
static u32 desc_limit_scaled(struct desc_struct *desc)
{
	u32 limit = get_desc_limit(desc);

	return desc->g ? (limit << 12) | 0xfff : limit;
}

499
static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
500
{
501 502
	ctxt->has_seg_override = true;
	ctxt->seg_override = seg;
503 504
}

505
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
506 507 508 509
{
	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
		return 0;

510
	return ctxt->ops->get_cached_segment_base(ctxt, seg);
511 512
}

513
static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
514
{
515
	if (!ctxt->has_seg_override)
516 517
		return 0;

518
	return ctxt->seg_override;
519 520
}

521 522
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
			     u32 error, bool valid)
523
{
524 525 526
	ctxt->exception.vector = vec;
	ctxt->exception.error_code = error;
	ctxt->exception.error_code_valid = valid;
527
	return X86EMUL_PROPAGATE_FAULT;
528 529
}

530 531 532 533 534
static int emulate_db(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, DB_VECTOR, 0, false);
}

535
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
536
{
537
	return emulate_exception(ctxt, GP_VECTOR, err, true);
538 539
}

540 541 542 543 544
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{
	return emulate_exception(ctxt, SS_VECTOR, err, true);
}

545
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
546
{
547
	return emulate_exception(ctxt, UD_VECTOR, 0, false);
548 549
}

550
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
551
{
552
	return emulate_exception(ctxt, TS_VECTOR, err, true);
553 554
}

555 556
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
557
	return emulate_exception(ctxt, DE_VECTOR, 0, false);
558 559
}

A
Avi Kivity 已提交
560 561 562 563 564
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, NM_VECTOR, 0, false);
}

565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
	u16 selector;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
	return selector;
}

static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
				 unsigned seg)
{
	u16 dummy;
	u32 base3;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}

585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
/*
 * x86 defines three classes of vector instructions: explicitly
 * aligned, explicitly unaligned, and the rest, which change behaviour
 * depending on whether they're AVX encoded or not.
 *
 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
 * subject to the same check.
 */
static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
{
	if (likely(size < 16))
		return false;

	if (ctxt->d & Aligned)
		return true;
	else if (ctxt->d & Unaligned)
		return false;
	else if (ctxt->d & Avx)
		return false;
	else
		return true;
}

608
static int __linearize(struct x86_emulate_ctxt *ctxt,
609
		     struct segmented_address addr,
610
		     unsigned size, bool write, bool fetch,
611 612
		     ulong *linear)
{
613 614
	struct desc_struct desc;
	bool usable;
615
	ulong la;
616
	u32 lim;
617
	u16 sel;
618
	unsigned cpl, rpl;
619

620
	la = seg_base(ctxt, addr.seg) + addr.ea;
621 622 623 624 625 626 627 628
	switch (ctxt->mode) {
	case X86EMUL_MODE_REAL:
		break;
	case X86EMUL_MODE_PROT64:
		if (((signed long)la << 16) >> 16 != la)
			return emulate_gp(ctxt, 0);
		break;
	default:
629 630
		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
						addr.seg);
631 632 633 634 635 636
		if (!usable)
			goto bad;
		/* code segment or read-only data segment */
		if (((desc.type & 8) || !(desc.type & 2)) && write)
			goto bad;
		/* unreadable code segment */
637
		if (!fetch && (desc.type & 8) && !(desc.type & 2))
638 639 640 641 642 643 644 645 646 647 648 649 650 651
			goto bad;
		lim = desc_limit_scaled(&desc);
		if ((desc.type & 8) || !(desc.type & 4)) {
			/* expand-up segment */
			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
				goto bad;
		} else {
			/* exapand-down segment */
			if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
				goto bad;
			lim = desc.d ? 0xffffffff : 0xffff;
			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
				goto bad;
		}
652
		cpl = ctxt->ops->cpl(ctxt);
653
		rpl = sel & 3;
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
		cpl = max(cpl, rpl);
		if (!(desc.type & 8)) {
			/* data segment */
			if (cpl > desc.dpl)
				goto bad;
		} else if ((desc.type & 8) && !(desc.type & 4)) {
			/* nonconforming code segment */
			if (cpl != desc.dpl)
				goto bad;
		} else if ((desc.type & 8) && (desc.type & 4)) {
			/* conforming code segment */
			if (cpl < desc.dpl)
				goto bad;
		}
		break;
	}
670
	if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
671
		la &= (u32)-1;
672 673
	if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
		return emulate_gp(ctxt, 0);
674 675
	*linear = la;
	return X86EMUL_CONTINUE;
676 677 678 679 680
bad:
	if (addr.seg == VCPU_SREG_SS)
		return emulate_ss(ctxt, addr.seg);
	else
		return emulate_gp(ctxt, addr.seg);
681 682
}

683 684 685 686 687 688 689 690 691
static int linearize(struct x86_emulate_ctxt *ctxt,
		     struct segmented_address addr,
		     unsigned size, bool write,
		     ulong *linear)
{
	return __linearize(ctxt, addr, size, write, false, linear);
}


692 693 694 695 696
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
			      struct segmented_address addr,
			      void *data,
			      unsigned size)
{
697 698 699
	int rc;
	ulong linear;

700
	rc = linearize(ctxt, addr, size, false, &linear);
701 702
	if (rc != X86EMUL_CONTINUE)
		return rc;
703
	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
704 705
}

706 707 708 709 710 711 712 713
/*
 * Fetch the next byte of the instruction being emulated which is pointed to
 * by ctxt->_eip, then increment ctxt->_eip.
 *
 * Also prefetch the remaining bytes of the instruction without crossing page
 * boundary if they are not in fetch_cache yet.
 */
static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
714
{
715
	struct fetch_cache *fc = &ctxt->fetch;
716
	int rc;
717
	int size, cur_size;
718

719
	if (ctxt->_eip == fc->end) {
720
		unsigned long linear;
721 722
		struct segmented_address addr = { .seg = VCPU_SREG_CS,
						  .ea  = ctxt->_eip };
723
		cur_size = fc->end - fc->start;
724 725
		size = min(15UL - cur_size,
			   PAGE_SIZE - offset_in_page(ctxt->_eip));
726
		rc = __linearize(ctxt, addr, size, false, true, &linear);
727
		if (unlikely(rc != X86EMUL_CONTINUE))
728
			return rc;
729 730
		rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
				      size, &ctxt->exception);
731
		if (unlikely(rc != X86EMUL_CONTINUE))
732
			return rc;
733
		fc->end += size;
734
	}
735 736
	*dest = fc->data[ctxt->_eip - fc->start];
	ctxt->_eip++;
737
	return X86EMUL_CONTINUE;
738 739 740
}

static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
741
			 void *dest, unsigned size)
742
{
743
	int rc;
744

745
	/* x86 instructions are limited to 15 bytes. */
746
	if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
747
		return X86EMUL_UNHANDLEABLE;
748
	while (size--) {
749
		rc = do_insn_fetch_byte(ctxt, dest++);
750
		if (rc != X86EMUL_CONTINUE)
751 752
			return rc;
	}
753
	return X86EMUL_CONTINUE;
754 755
}

756
/* Fetch next part of the instruction being emulated. */
757
#define insn_fetch(_type, _ctxt)					\
758
({	unsigned long _x;						\
759
	rc = do_insn_fetch(_ctxt, &_x, sizeof(_type));			\
760 761 762 763 764
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
	(_type)_x;							\
})

765 766
#define insn_fetch_arr(_arr, _size, _ctxt)				\
({	rc = do_insn_fetch(_ctxt, _arr, (_size));			\
767 768 769 770
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
})

771 772 773 774 775 776 777
/*
 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 * pointer into the block that addresses the relevant register.
 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 */
static void *decode_register(u8 modrm_reg, unsigned long *regs,
			     int highbyte_regs)
A
Avi Kivity 已提交
778 779 780 781 782 783 784 785 786 787
{
	void *p;

	p = &regs[modrm_reg];
	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
		p = (unsigned char *)&regs[modrm_reg & 3] + 1;
	return p;
}

static int read_descriptor(struct x86_emulate_ctxt *ctxt,
788
			   struct segmented_address addr,
A
Avi Kivity 已提交
789 790 791 792 793 794 795
			   u16 *size, unsigned long *address, int op_bytes)
{
	int rc;

	if (op_bytes == 2)
		op_bytes = 3;
	*address = 0;
796
	rc = segmented_read_std(ctxt, addr, size, 2);
797
	if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
798
		return rc;
799
	addr.ea += 2;
800
	rc = segmented_read_std(ctxt, addr, address, op_bytes);
A
Avi Kivity 已提交
801 802 803
	return rc;
}

804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
static int test_cc(unsigned int condition, unsigned int flags)
{
	int rc = 0;

	switch ((condition & 15) >> 1) {
	case 0: /* o */
		rc |= (flags & EFLG_OF);
		break;
	case 1: /* b/c/nae */
		rc |= (flags & EFLG_CF);
		break;
	case 2: /* z/e */
		rc |= (flags & EFLG_ZF);
		break;
	case 3: /* be/na */
		rc |= (flags & (EFLG_CF|EFLG_ZF));
		break;
	case 4: /* s */
		rc |= (flags & EFLG_SF);
		break;
	case 5: /* p/pe */
		rc |= (flags & EFLG_PF);
		break;
	case 7: /* le/ng */
		rc |= (flags & EFLG_ZF);
		/* fall through */
	case 6: /* l/nge */
		rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
		break;
	}

	/* Odd condition identifiers (lsb == 1) have inverted sense. */
	return (!!rc ^ (condition & 1));
}

839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
static void fetch_register_operand(struct operand *op)
{
	switch (op->bytes) {
	case 1:
		op->val = *(u8 *)op->addr.reg;
		break;
	case 2:
		op->val = *(u16 *)op->addr.reg;
		break;
	case 4:
		op->val = *(u32 *)op->addr.reg;
		break;
	case 8:
		op->val = *(u64 *)op->addr.reg;
		break;
	}
}

A
Avi Kivity 已提交
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
	case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
	case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
	case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
	case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
	case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
	case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
	case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
#ifdef CONFIG_X86_64
	case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
	case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
	case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
	case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
	case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
	case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
	case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
	case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
			  int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
	case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
	case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
	case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
	case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
	case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
	case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
	case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
#ifdef CONFIG_X86_64
	case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
	case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
	case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
	case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
	case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
	case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
	case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
	case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

A
Avi Kivity 已提交
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945
static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

A
Avi Kivity 已提交
946
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
947
				    struct operand *op)
948
{
949 950
	unsigned reg = ctxt->modrm_reg;
	int highbyte_regs = ctxt->rex_prefix == 0;
951

952 953
	if (!(ctxt->d & ModRM))
		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
A
Avi Kivity 已提交
954

955
	if (ctxt->d & Sse) {
A
Avi Kivity 已提交
956 957 958 959 960 961
		op->type = OP_XMM;
		op->bytes = 16;
		op->addr.xmm = reg;
		read_sse_reg(ctxt, &op->vec_val, reg);
		return;
	}
A
Avi Kivity 已提交
962 963 964 965 966 967 968
	if (ctxt->d & Mmx) {
		reg &= 7;
		op->type = OP_MM;
		op->bytes = 8;
		op->addr.mm = reg;
		return;
	}
A
Avi Kivity 已提交
969

970
	op->type = OP_REG;
971
	if (ctxt->d & ByteOp) {
972
		op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
973 974
		op->bytes = 1;
	} else {
975 976
		op->addr.reg = decode_register(reg, ctxt->regs, 0);
		op->bytes = ctxt->op_bytes;
977
	}
978
	fetch_register_operand(op);
979 980 981
	op->orig_val = op->val;
}

982 983 984 985 986 987
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
{
	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
		ctxt->modrm_seg = VCPU_SREG_SS;
}

988
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
989
			struct operand *op)
990 991
{
	u8 sib;
992
	int index_reg = 0, base_reg = 0, scale;
993
	int rc = X86EMUL_CONTINUE;
994
	ulong modrm_ea = 0;
995

996 997 998 999
	if (ctxt->rex_prefix) {
		ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1;	/* REX.R */
		index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
		ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
1000 1001
	}

1002 1003 1004 1005
	ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
	ctxt->modrm_rm |= (ctxt->modrm & 0x07);
	ctxt->modrm_seg = VCPU_SREG_DS;
1006

1007
	if (ctxt->modrm_mod == 3) {
1008
		op->type = OP_REG;
1009 1010 1011 1012
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.reg = decode_register(ctxt->modrm_rm,
					       ctxt->regs, ctxt->d & ByteOp);
		if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1013 1014
			op->type = OP_XMM;
			op->bytes = 16;
1015 1016
			op->addr.xmm = ctxt->modrm_rm;
			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
A
Avi Kivity 已提交
1017 1018
			return rc;
		}
A
Avi Kivity 已提交
1019 1020 1021 1022 1023 1024
		if (ctxt->d & Mmx) {
			op->type = OP_MM;
			op->bytes = 8;
			op->addr.xmm = ctxt->modrm_rm & 7;
			return rc;
		}
1025
		fetch_register_operand(op);
1026 1027 1028
		return rc;
	}

1029 1030
	op->type = OP_MEM;

1031 1032 1033 1034 1035
	if (ctxt->ad_bytes == 2) {
		unsigned bx = ctxt->regs[VCPU_REGS_RBX];
		unsigned bp = ctxt->regs[VCPU_REGS_RBP];
		unsigned si = ctxt->regs[VCPU_REGS_RSI];
		unsigned di = ctxt->regs[VCPU_REGS_RDI];
1036 1037

		/* 16-bit ModR/M decode. */
1038
		switch (ctxt->modrm_mod) {
1039
		case 0:
1040
			if (ctxt->modrm_rm == 6)
1041
				modrm_ea += insn_fetch(u16, ctxt);
1042 1043
			break;
		case 1:
1044
			modrm_ea += insn_fetch(s8, ctxt);
1045 1046
			break;
		case 2:
1047
			modrm_ea += insn_fetch(u16, ctxt);
1048 1049
			break;
		}
1050
		switch (ctxt->modrm_rm) {
1051
		case 0:
1052
			modrm_ea += bx + si;
1053 1054
			break;
		case 1:
1055
			modrm_ea += bx + di;
1056 1057
			break;
		case 2:
1058
			modrm_ea += bp + si;
1059 1060
			break;
		case 3:
1061
			modrm_ea += bp + di;
1062 1063
			break;
		case 4:
1064
			modrm_ea += si;
1065 1066
			break;
		case 5:
1067
			modrm_ea += di;
1068 1069
			break;
		case 6:
1070
			if (ctxt->modrm_mod != 0)
1071
				modrm_ea += bp;
1072 1073
			break;
		case 7:
1074
			modrm_ea += bx;
1075 1076
			break;
		}
1077 1078 1079
		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
			ctxt->modrm_seg = VCPU_SREG_SS;
1080
		modrm_ea = (u16)modrm_ea;
1081 1082
	} else {
		/* 32/64-bit ModR/M decode. */
1083
		if ((ctxt->modrm_rm & 7) == 4) {
1084
			sib = insn_fetch(u8, ctxt);
1085 1086 1087 1088
			index_reg |= (sib >> 3) & 7;
			base_reg |= sib & 7;
			scale = sib >> 6;

1089
			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1090
				modrm_ea += insn_fetch(s32, ctxt);
1091
			else {
1092
				modrm_ea += ctxt->regs[base_reg];
1093 1094
				adjust_modrm_seg(ctxt, base_reg);
			}
1095
			if (index_reg != 4)
1096 1097
				modrm_ea += ctxt->regs[index_reg] << scale;
		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1098
			if (ctxt->mode == X86EMUL_MODE_PROT64)
1099
				ctxt->rip_relative = 1;
1100 1101 1102 1103 1104
		} else {
			base_reg = ctxt->modrm_rm;
			modrm_ea += ctxt->regs[base_reg];
			adjust_modrm_seg(ctxt, base_reg);
		}
1105
		switch (ctxt->modrm_mod) {
1106
		case 0:
1107
			if (ctxt->modrm_rm == 5)
1108
				modrm_ea += insn_fetch(s32, ctxt);
1109 1110
			break;
		case 1:
1111
			modrm_ea += insn_fetch(s8, ctxt);
1112 1113
			break;
		case 2:
1114
			modrm_ea += insn_fetch(s32, ctxt);
1115 1116 1117
			break;
		}
	}
1118
	op->addr.mem.ea = modrm_ea;
1119 1120 1121 1122 1123
done:
	return rc;
}

static int decode_abs(struct x86_emulate_ctxt *ctxt,
1124
		      struct operand *op)
1125
{
1126
	int rc = X86EMUL_CONTINUE;
1127

1128
	op->type = OP_MEM;
1129
	switch (ctxt->ad_bytes) {
1130
	case 2:
1131
		op->addr.mem.ea = insn_fetch(u16, ctxt);
1132 1133
		break;
	case 4:
1134
		op->addr.mem.ea = insn_fetch(u32, ctxt);
1135 1136
		break;
	case 8:
1137
		op->addr.mem.ea = insn_fetch(u64, ctxt);
1138 1139 1140 1141 1142 1143
		break;
	}
done:
	return rc;
}

1144
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1145
{
1146
	long sv = 0, mask;
1147

1148 1149
	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
		mask = ~(ctxt->dst.bytes * 8 - 1);
1150

1151 1152 1153 1154
		if (ctxt->src.bytes == 2)
			sv = (s16)ctxt->src.val & (s16)mask;
		else if (ctxt->src.bytes == 4)
			sv = (s32)ctxt->src.val & (s32)mask;
1155

1156
		ctxt->dst.addr.mem.ea += (sv >> 3);
1157
	}
1158 1159

	/* only subword offset */
1160
	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1161 1162
}

1163 1164
static int read_emulated(struct x86_emulate_ctxt *ctxt,
			 unsigned long addr, void *dest, unsigned size)
A
Avi Kivity 已提交
1165
{
1166
	int rc;
1167
	struct read_cache *mc = &ctxt->mem_read;
A
Avi Kivity 已提交
1168

1169 1170 1171 1172 1173
	while (size) {
		int n = min(size, 8u);
		size -= n;
		if (mc->pos < mc->end)
			goto read_cached;
1174

1175 1176
		rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
					      &ctxt->exception);
1177 1178 1179
		if (rc != X86EMUL_CONTINUE)
			return rc;
		mc->end += n;
A
Avi Kivity 已提交
1180

1181 1182 1183 1184 1185
	read_cached:
		memcpy(dest, mc->data + mc->pos, n);
		mc->pos += n;
		dest += n;
		addr += n;
A
Avi Kivity 已提交
1186
	}
1187 1188
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1189

1190 1191 1192 1193 1194
static int segmented_read(struct x86_emulate_ctxt *ctxt,
			  struct segmented_address addr,
			  void *data,
			  unsigned size)
{
1195 1196 1197
	int rc;
	ulong linear;

1198
	rc = linearize(ctxt, addr, size, false, &linear);
1199 1200
	if (rc != X86EMUL_CONTINUE)
		return rc;
1201
	return read_emulated(ctxt, linear, data, size);
1202 1203 1204 1205 1206 1207 1208
}

static int segmented_write(struct x86_emulate_ctxt *ctxt,
			   struct segmented_address addr,
			   const void *data,
			   unsigned size)
{
1209 1210 1211
	int rc;
	ulong linear;

1212
	rc = linearize(ctxt, addr, size, true, &linear);
1213 1214
	if (rc != X86EMUL_CONTINUE)
		return rc;
1215 1216
	return ctxt->ops->write_emulated(ctxt, linear, data, size,
					 &ctxt->exception);
1217 1218 1219 1220 1221 1222 1223
}

static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
			     struct segmented_address addr,
			     const void *orig_data, const void *data,
			     unsigned size)
{
1224 1225 1226
	int rc;
	ulong linear;

1227
	rc = linearize(ctxt, addr, size, true, &linear);
1228 1229
	if (rc != X86EMUL_CONTINUE)
		return rc;
1230 1231
	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
					   size, &ctxt->exception);
1232 1233
}

1234 1235 1236 1237
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
			   unsigned int size, unsigned short port,
			   void *dest)
{
1238
	struct read_cache *rc = &ctxt->io_read;
1239

1240 1241
	if (rc->pos == rc->end) { /* refill pio read ahead */
		unsigned int in_page, n;
1242 1243
		unsigned int count = ctxt->rep_prefix ?
			address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
1244
		in_page = (ctxt->eflags & EFLG_DF) ?
1245 1246
			offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
			PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
1247 1248 1249 1250 1251
		n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
			count);
		if (n == 0)
			n = 1;
		rc->pos = rc->end = 0;
1252
		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1253 1254
			return 0;
		rc->end = n * size;
A
Avi Kivity 已提交
1255 1256
	}

1257 1258 1259 1260
	memcpy(dest, rc->data + rc->pos, size);
	rc->pos += size;
	return 1;
}
A
Avi Kivity 已提交
1261

1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
				     u16 index, struct desc_struct *desc)
{
	struct desc_ptr dt;
	ulong addr;

	ctxt->ops->get_idt(ctxt, &dt);

	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, index << 3 | 0x2);

	addr = dt.address + index * 8;
	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
				   &ctxt->exception);
}

1278 1279 1280
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
				     u16 selector, struct desc_ptr *dt)
{
1281 1282
	struct x86_emulate_ops *ops = ctxt->ops;

1283 1284
	if (selector & 1 << 2) {
		struct desc_struct desc;
1285 1286
		u16 sel;

1287
		memset (dt, 0, sizeof *dt);
1288
		if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1289
			return;
1290

1291 1292 1293
		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
		dt->address = get_desc_base(&desc);
	} else
1294
		ops->get_gdt(ctxt, dt);
1295
}
1296

1297 1298 1299 1300 1301 1302 1303
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, struct desc_struct *desc)
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
1304

1305
	get_descriptor_table_ptr(ctxt, selector, &dt);
1306

1307 1308
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
1309

1310 1311 1312
	addr = dt.address + index * 8;
	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
				   &ctxt->exception);
1313
}
1314

1315 1316 1317 1318 1319 1320 1321
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				    u16 selector, struct desc_struct *desc)
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
A
Avi Kivity 已提交
1322

1323
	get_descriptor_table_ptr(ctxt, selector, &dt);
1324

1325 1326
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
A
Avi Kivity 已提交
1327

1328
	addr = dt.address + index * 8;
1329 1330
	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
				    &ctxt->exception);
1331
}
1332

1333
/* Does not support long mode */
1334 1335 1336 1337 1338 1339 1340 1341 1342
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, int seg)
{
	struct desc_struct seg_desc;
	u8 dpl, rpl, cpl;
	unsigned err_vec = GP_VECTOR;
	u32 err_code = 0;
	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
	int ret;
1343

1344
	memset(&seg_desc, 0, sizeof seg_desc);
1345

1346 1347 1348 1349 1350 1351 1352 1353
	if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
	    || ctxt->mode == X86EMUL_MODE_REAL) {
		/* set real mode segment descriptor */
		set_desc_base(&seg_desc, selector << 4);
		set_desc_limit(&seg_desc, 0xffff);
		seg_desc.type = 3;
		seg_desc.p = 1;
		seg_desc.s = 1;
1354 1355
		if (ctxt->mode == X86EMUL_MODE_VM86)
			seg_desc.dpl = 3;
1356 1357 1358
		goto load;
	}

1359 1360 1361 1362 1363 1364 1365 1366
	rpl = selector & 3;
	cpl = ctxt->ops->cpl(ctxt);

	/* NULL selector is not valid for TR, CS and SS (except for long mode) */
	if ((seg == VCPU_SREG_CS
	     || (seg == VCPU_SREG_SS
		 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
	     || seg == VCPU_SREG_TR)
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
	    && null_selector)
		goto exception;

	/* TR should be in GDT only */
	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
		goto exception;

	if (null_selector) /* for NULL selector skip all following checks */
		goto load;

1377
	ret = read_segment_descriptor(ctxt, selector, &seg_desc);
1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
	if (ret != X86EMUL_CONTINUE)
		return ret;

	err_code = selector & 0xfffc;
	err_vec = GP_VECTOR;

	/* can't load system descriptor into segment selecor */
	if (seg <= VCPU_SREG_GS && !seg_desc.s)
		goto exception;

	if (!seg_desc.p) {
		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
		goto exception;
	}

	dpl = seg_desc.dpl;

	switch (seg) {
	case VCPU_SREG_SS:
		/*
		 * segment is not a writable data segment or segment
		 * selector's RPL != CPL or segment selector's RPL != CPL
		 */
		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
			goto exception;
A
Avi Kivity 已提交
1403
		break;
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
	case VCPU_SREG_CS:
		if (!(seg_desc.type & 8))
			goto exception;

		if (seg_desc.type & 4) {
			/* conforming */
			if (dpl > cpl)
				goto exception;
		} else {
			/* nonconforming */
			if (rpl > cpl || dpl != cpl)
				goto exception;
		}
		/* CS(RPL) <- CPL */
		selector = (selector & 0xfffc) | cpl;
A
Avi Kivity 已提交
1419
		break;
1420 1421 1422 1423 1424 1425 1426 1427 1428
	case VCPU_SREG_TR:
		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
			goto exception;
		break;
	case VCPU_SREG_LDTR:
		if (seg_desc.s || seg_desc.type != 2)
			goto exception;
		break;
	default: /*  DS, ES, FS, or GS */
1429
		/*
1430 1431 1432
		 * segment is not a data or readable code segment or
		 * ((segment is a data or nonconforming code segment)
		 * and (both RPL and CPL > DPL))
1433
		 */
1434 1435 1436 1437
		if ((seg_desc.type & 0xa) == 0x8 ||
		    (((seg_desc.type & 0xc) != 0xc) &&
		     (rpl > dpl && cpl > dpl)))
			goto exception;
A
Avi Kivity 已提交
1438
		break;
1439 1440 1441 1442 1443
	}

	if (seg_desc.s) {
		/* mark segment as accessed */
		seg_desc.type |= 1;
1444
		ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1445 1446 1447 1448
		if (ret != X86EMUL_CONTINUE)
			return ret;
	}
load:
1449
	ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1450 1451 1452 1453 1454 1455
	return X86EMUL_CONTINUE;
exception:
	emulate_exception(ctxt, err_vec, err_code, true);
	return X86EMUL_PROPAGATE_FAULT;
}

1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474
static void write_register_operand(struct operand *op)
{
	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
	switch (op->bytes) {
	case 1:
		*(u8 *)op->addr.reg = (u8)op->val;
		break;
	case 2:
		*(u16 *)op->addr.reg = (u16)op->val;
		break;
	case 4:
		*op->addr.reg = (u32)op->val;
		break;	/* 64b: zero-extend */
	case 8:
		*op->addr.reg = op->val;
		break;
	}
}

1475
static int writeback(struct x86_emulate_ctxt *ctxt)
1476 1477 1478
{
	int rc;

1479
	switch (ctxt->dst.type) {
1480
	case OP_REG:
1481
		write_register_operand(&ctxt->dst);
A
Avi Kivity 已提交
1482
		break;
1483
	case OP_MEM:
1484
		if (ctxt->lock_prefix)
1485
			rc = segmented_cmpxchg(ctxt,
1486 1487 1488 1489
					       ctxt->dst.addr.mem,
					       &ctxt->dst.orig_val,
					       &ctxt->dst.val,
					       ctxt->dst.bytes);
1490
		else
1491
			rc = segmented_write(ctxt,
1492 1493 1494
					     ctxt->dst.addr.mem,
					     &ctxt->dst.val,
					     ctxt->dst.bytes);
1495 1496
		if (rc != X86EMUL_CONTINUE)
			return rc;
1497
		break;
A
Avi Kivity 已提交
1498
	case OP_XMM:
1499
		write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
A
Avi Kivity 已提交
1500
		break;
A
Avi Kivity 已提交
1501 1502 1503
	case OP_MM:
		write_mmx_reg(ctxt, &ctxt->dst.mm_val, ctxt->dst.addr.mm);
		break;
1504 1505
	case OP_NONE:
		/* no writeback */
1506
		break;
1507
	default:
1508
		break;
A
Avi Kivity 已提交
1509
	}
1510 1511
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1512

1513
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1514
{
1515
	struct segmented_address addr;
1516

1517
	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes);
1518
	addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1519 1520
	addr.seg = VCPU_SREG_SS;

1521 1522 1523 1524 1525
	return segmented_write(ctxt, addr, data, bytes);
}

static int em_push(struct x86_emulate_ctxt *ctxt)
{
1526
	/* Disable writeback. */
1527
	ctxt->dst.type = OP_NONE;
1528
	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1529
}
1530

1531 1532 1533 1534
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
		       void *dest, int len)
{
	int rc;
1535
	struct segmented_address addr;
1536

1537
	addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1538
	addr.seg = VCPU_SREG_SS;
1539
	rc = segmented_read(ctxt, addr, dest, len);
1540 1541 1542
	if (rc != X86EMUL_CONTINUE)
		return rc;

1543
	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
1544
	return rc;
1545 1546
}

1547 1548
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
1549
	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1550 1551
}

1552
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1553
			void *dest, int len)
1554 1555
{
	int rc;
1556 1557
	unsigned long val, change_mask;
	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1558
	int cpl = ctxt->ops->cpl(ctxt);
1559

1560
	rc = emulate_pop(ctxt, &val, len);
1561 1562
	if (rc != X86EMUL_CONTINUE)
		return rc;
1563

1564 1565
	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1566

1567 1568 1569 1570 1571 1572 1573 1574 1575 1576
	switch(ctxt->mode) {
	case X86EMUL_MODE_PROT64:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT16:
		if (cpl == 0)
			change_mask |= EFLG_IOPL;
		if (cpl <= iopl)
			change_mask |= EFLG_IF;
		break;
	case X86EMUL_MODE_VM86:
1577 1578
		if (iopl < 3)
			return emulate_gp(ctxt, 0);
1579 1580 1581 1582 1583
		change_mask |= EFLG_IF;
		break;
	default: /* real mode */
		change_mask |= (EFLG_IOPL | EFLG_IF);
		break;
1584
	}
1585 1586 1587 1588 1589

	*(unsigned long *)dest =
		(ctxt->eflags & ~change_mask) | (val & change_mask);

	return rc;
1590 1591
}

1592 1593
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
1594 1595 1596 1597
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->eflags;
	ctxt->dst.bytes = ctxt->op_bytes;
	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1598 1599
}

A
Avi Kivity 已提交
1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619
static int em_enter(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	unsigned frame_size = ctxt->src.val;
	unsigned nesting_level = ctxt->src2.val & 31;

	if (nesting_level)
		return X86EMUL_UNHANDLEABLE;

	rc = push(ctxt, &ctxt->regs[VCPU_REGS_RBP], stack_size(ctxt));
	if (rc != X86EMUL_CONTINUE)
		return rc;
	assign_masked(&ctxt->regs[VCPU_REGS_RBP], ctxt->regs[VCPU_REGS_RSP],
		      stack_mask(ctxt));
	assign_masked(&ctxt->regs[VCPU_REGS_RSP],
		      ctxt->regs[VCPU_REGS_RSP] - frame_size,
		      stack_mask(ctxt));
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1620 1621 1622 1623 1624 1625 1626
static int em_leave(struct x86_emulate_ctxt *ctxt)
{
	assign_masked(&ctxt->regs[VCPU_REGS_RSP], ctxt->regs[VCPU_REGS_RBP],
		      stack_mask(ctxt));
	return emulate_pop(ctxt, &ctxt->regs[VCPU_REGS_RBP], ctxt->op_bytes);
}

1627
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1628
{
1629 1630
	int seg = ctxt->src2.val;

1631
	ctxt->src.val = get_segment_selector(ctxt, seg);
1632

1633
	return em_push(ctxt);
1634 1635
}

1636
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1637
{
1638
	int seg = ctxt->src2.val;
1639 1640
	unsigned long selector;
	int rc;
1641

1642
	rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1643 1644 1645
	if (rc != X86EMUL_CONTINUE)
		return rc;

1646
	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1647
	return rc;
1648 1649
}

1650
static int em_pusha(struct x86_emulate_ctxt *ctxt)
1651
{
1652
	unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
1653 1654
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RAX;
1655

1656 1657
	while (reg <= VCPU_REGS_RDI) {
		(reg == VCPU_REGS_RSP) ?
1658
		(ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
1659

1660
		rc = em_push(ctxt);
1661 1662
		if (rc != X86EMUL_CONTINUE)
			return rc;
1663

1664
		++reg;
1665 1666
	}

1667
	return rc;
1668 1669
}

1670 1671
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
1672
	ctxt->src.val =  (unsigned long)ctxt->eflags;
1673 1674 1675
	return em_push(ctxt);
}

1676
static int em_popa(struct x86_emulate_ctxt *ctxt)
1677
{
1678 1679
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RDI;
1680

1681 1682
	while (reg >= VCPU_REGS_RAX) {
		if (reg == VCPU_REGS_RSP) {
1683 1684
			register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
							ctxt->op_bytes);
1685 1686
			--reg;
		}
1687

1688
		rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
1689 1690 1691
		if (rc != X86EMUL_CONTINUE)
			break;
		--reg;
1692
	}
1693
	return rc;
1694 1695
}

1696
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1697
{
1698
	struct x86_emulate_ops *ops = ctxt->ops;
1699
	int rc;
1700 1701 1702 1703 1704 1705
	struct desc_ptr dt;
	gva_t cs_addr;
	gva_t eip_addr;
	u16 cs, eip;

	/* TODO: Add limit checks */
1706
	ctxt->src.val = ctxt->eflags;
1707
	rc = em_push(ctxt);
1708 1709
	if (rc != X86EMUL_CONTINUE)
		return rc;
1710 1711 1712

	ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);

1713
	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1714
	rc = em_push(ctxt);
1715 1716
	if (rc != X86EMUL_CONTINUE)
		return rc;
1717

1718
	ctxt->src.val = ctxt->_eip;
1719
	rc = em_push(ctxt);
1720 1721 1722
	if (rc != X86EMUL_CONTINUE)
		return rc;

1723
	ops->get_idt(ctxt, &dt);
1724 1725 1726 1727

	eip_addr = dt.address + (irq << 2);
	cs_addr = dt.address + (irq << 2) + 2;

1728
	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1729 1730 1731
	if (rc != X86EMUL_CONTINUE)
		return rc;

1732
	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1733 1734 1735
	if (rc != X86EMUL_CONTINUE)
		return rc;

1736
	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1737 1738 1739
	if (rc != X86EMUL_CONTINUE)
		return rc;

1740
	ctxt->_eip = eip;
1741 1742 1743 1744

	return rc;
}

1745
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1746 1747 1748
{
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
1749
		return emulate_int_real(ctxt, irq);
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
	default:
		/* Protected mode interrupts unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
	}
}

1760
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1761
{
1762 1763 1764 1765 1766 1767 1768 1769
	int rc = X86EMUL_CONTINUE;
	unsigned long temp_eip = 0;
	unsigned long temp_eflags = 0;
	unsigned long cs = 0;
	unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
			     EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
			     EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
	unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1770

1771
	/* TODO: Add stack limit check */
1772

1773
	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1774

1775 1776
	if (rc != X86EMUL_CONTINUE)
		return rc;
1777

1778 1779
	if (temp_eip & ~0xffff)
		return emulate_gp(ctxt, 0);
1780

1781
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1782

1783 1784
	if (rc != X86EMUL_CONTINUE)
		return rc;
1785

1786
	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1787

1788 1789
	if (rc != X86EMUL_CONTINUE)
		return rc;
1790

1791
	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1792

1793 1794
	if (rc != X86EMUL_CONTINUE)
		return rc;
1795

1796
	ctxt->_eip = temp_eip;
1797 1798


1799
	if (ctxt->op_bytes == 4)
1800
		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1801
	else if (ctxt->op_bytes == 2) {
1802 1803
		ctxt->eflags &= ~0xffff;
		ctxt->eflags |= temp_eflags;
1804
	}
1805 1806 1807 1808 1809

	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
	ctxt->eflags |= EFLG_RESERVED_ONE_MASK;

	return rc;
1810 1811
}

1812
static int em_iret(struct x86_emulate_ctxt *ctxt)
1813
{
1814 1815
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
1816
		return emulate_iret_real(ctxt);
1817 1818 1819 1820
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
1821
	default:
1822 1823
		/* iret from protected mode unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
1824 1825 1826
	}
}

1827 1828 1829 1830 1831
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	unsigned short sel;

1832
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1833

1834
	rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1835 1836 1837
	if (rc != X86EMUL_CONTINUE)
		return rc;

1838 1839
	ctxt->_eip = 0;
	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1840 1841 1842
	return X86EMUL_CONTINUE;
}

1843
static int em_grp2(struct x86_emulate_ctxt *ctxt)
1844
{
1845
	switch (ctxt->modrm_reg) {
1846
	case 0:	/* rol */
1847
		emulate_2op_SrcB(ctxt, "rol");
1848 1849
		break;
	case 1:	/* ror */
1850
		emulate_2op_SrcB(ctxt, "ror");
1851 1852
		break;
	case 2:	/* rcl */
1853
		emulate_2op_SrcB(ctxt, "rcl");
1854 1855
		break;
	case 3:	/* rcr */
1856
		emulate_2op_SrcB(ctxt, "rcr");
1857 1858 1859
		break;
	case 4:	/* sal/shl */
	case 6:	/* sal/shl */
1860
		emulate_2op_SrcB(ctxt, "sal");
1861 1862
		break;
	case 5:	/* shr */
1863
		emulate_2op_SrcB(ctxt, "shr");
1864 1865
		break;
	case 7:	/* sar */
1866
		emulate_2op_SrcB(ctxt, "sar");
1867 1868
		break;
	}
1869
	return X86EMUL_CONTINUE;
1870 1871
}

1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
static int em_not(struct x86_emulate_ctxt *ctxt)
{
	ctxt->dst.val = ~ctxt->dst.val;
	return X86EMUL_CONTINUE;
}

static int em_neg(struct x86_emulate_ctxt *ctxt)
{
	emulate_1op(ctxt, "neg");
	return X86EMUL_CONTINUE;
}

static int em_mul_ex(struct x86_emulate_ctxt *ctxt)
{
	u8 ex = 0;

	emulate_1op_rax_rdx(ctxt, "mul", ex);
	return X86EMUL_CONTINUE;
}

static int em_imul_ex(struct x86_emulate_ctxt *ctxt)
{
	u8 ex = 0;

	emulate_1op_rax_rdx(ctxt, "imul", ex);
	return X86EMUL_CONTINUE;
}

static int em_div_ex(struct x86_emulate_ctxt *ctxt)
1901
{
1902
	u8 de = 0;
1903

1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914
	emulate_1op_rax_rdx(ctxt, "div", de);
	if (de)
		return emulate_de(ctxt);
	return X86EMUL_CONTINUE;
}

static int em_idiv_ex(struct x86_emulate_ctxt *ctxt)
{
	u8 de = 0;

	emulate_1op_rax_rdx(ctxt, "idiv", de);
1915 1916
	if (de)
		return emulate_de(ctxt);
1917
	return X86EMUL_CONTINUE;
1918 1919
}

1920
static int em_grp45(struct x86_emulate_ctxt *ctxt)
1921
{
1922
	int rc = X86EMUL_CONTINUE;
1923

1924
	switch (ctxt->modrm_reg) {
1925
	case 0:	/* inc */
1926
		emulate_1op(ctxt, "inc");
1927 1928
		break;
	case 1:	/* dec */
1929
		emulate_1op(ctxt, "dec");
1930
		break;
1931 1932
	case 2: /* call near abs */ {
		long int old_eip;
1933 1934 1935
		old_eip = ctxt->_eip;
		ctxt->_eip = ctxt->src.val;
		ctxt->src.val = old_eip;
1936
		rc = em_push(ctxt);
1937 1938
		break;
	}
1939
	case 4: /* jmp abs */
1940
		ctxt->_eip = ctxt->src.val;
1941
		break;
1942 1943 1944
	case 5: /* jmp far */
		rc = em_jmp_far(ctxt);
		break;
1945
	case 6:	/* push */
1946
		rc = em_push(ctxt);
1947 1948
		break;
	}
1949
	return rc;
1950 1951
}

1952
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
1953
{
1954
	u64 old = ctxt->dst.orig_val64;
1955

1956 1957 1958 1959
	if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
	    ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
		ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
		ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1960
		ctxt->eflags &= ~EFLG_ZF;
1961
	} else {
1962 1963
		ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
			(u32) ctxt->regs[VCPU_REGS_RBX];
1964

1965
		ctxt->eflags |= EFLG_ZF;
1966
	}
1967
	return X86EMUL_CONTINUE;
1968 1969
}

1970 1971
static int em_ret(struct x86_emulate_ctxt *ctxt)
{
1972 1973 1974
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->_eip;
	ctxt->dst.bytes = ctxt->op_bytes;
1975 1976 1977
	return em_pop(ctxt);
}

1978
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1979 1980 1981 1982
{
	int rc;
	unsigned long cs;

1983
	rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
1984
	if (rc != X86EMUL_CONTINUE)
1985
		return rc;
1986 1987 1988
	if (ctxt->op_bytes == 4)
		ctxt->_eip = (u32)ctxt->_eip;
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1989
	if (rc != X86EMUL_CONTINUE)
1990
		return rc;
1991
	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1992 1993 1994
	return rc;
}

1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{
	/* Save real source value, then compare EAX against destination. */
	ctxt->src.orig_val = ctxt->src.val;
	ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
	emulate_2op_SrcV(ctxt, "cmp");

	if (ctxt->eflags & EFLG_ZF) {
		/* Success: write back to memory. */
		ctxt->dst.val = ctxt->src.orig_val;
	} else {
		/* Failure: write the value we saw to EAX. */
		ctxt->dst.type = OP_REG;
		ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
	}
	return X86EMUL_CONTINUE;
}

2013
static int em_lseg(struct x86_emulate_ctxt *ctxt)
2014
{
2015
	int seg = ctxt->src2.val;
2016 2017 2018
	unsigned short sel;
	int rc;

2019
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2020

2021
	rc = load_segment_descriptor(ctxt, sel, seg);
2022 2023 2024
	if (rc != X86EMUL_CONTINUE)
		return rc;

2025
	ctxt->dst.val = ctxt->src.val;
2026 2027 2028
	return rc;
}

2029
static void
2030
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2031
			struct desc_struct *cs, struct desc_struct *ss)
2032
{
2033 2034
	u16 selector;

2035
	memset(cs, 0, sizeof(struct desc_struct));
2036
	ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
2037
	memset(ss, 0, sizeof(struct desc_struct));
2038 2039

	cs->l = 0;		/* will be adjusted later */
2040
	set_desc_base(cs, 0);	/* flat segment */
2041
	cs->g = 1;		/* 4kb granularity */
2042
	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2043 2044 2045
	cs->type = 0x0b;	/* Read, Execute, Accessed */
	cs->s = 1;
	cs->dpl = 0;		/* will be adjusted later */
2046 2047
	cs->p = 1;
	cs->d = 1;
2048

2049 2050
	set_desc_base(ss, 0);	/* flat segment */
	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2051 2052 2053
	ss->g = 1;		/* 4kb granularity */
	ss->s = 1;
	ss->type = 0x03;	/* Read/Write, Accessed */
2054
	ss->d = 1;		/* 32bit stack segment */
2055
	ss->dpl = 0;
2056
	ss->p = 1;
2057 2058
}

2059 2060 2061 2062 2063
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

	eax = ecx = 0;
2064 2065
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2066 2067 2068 2069
		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
}

2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
{
	struct x86_emulate_ops *ops = ctxt->ops;
	u32 eax, ebx, ecx, edx;

	/*
	 * syscall should always be enabled in longmode - so only become
	 * vendor specific (cpuid) if other modes are active...
	 */
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return true;

	eax = 0x00000000;
	ecx = 0x00000000;
2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108
	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	/*
	 * Intel ("GenuineIntel")
	 * remark: Intel CPUs only support "syscall" in 64bit
	 * longmode. Also an 64bit guest with a
	 * 32bit compat-app running will #UD !! While this
	 * behaviour can be fixed (by emulating) into AMD
	 * response - CPUs of AMD can't behave like Intel.
	 */
	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
		return false;

	/* AMD ("AuthenticAMD") */
	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
		return true;

	/* AMD ("AMDisbetter!") */
	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
		return true;
2109 2110 2111 2112 2113

	/* default: (not Intel, not AMD), apply Intel's stricter rules... */
	return false;
}

2114
static int em_syscall(struct x86_emulate_ctxt *ctxt)
2115
{
2116
	struct x86_emulate_ops *ops = ctxt->ops;
2117
	struct desc_struct cs, ss;
2118
	u64 msr_data;
2119
	u16 cs_sel, ss_sel;
2120
	u64 efer = 0;
2121 2122

	/* syscall is not available in real mode */
2123
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2124 2125
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_ud(ctxt);
2126

2127 2128 2129
	if (!(em_syscall_is_enabled(ctxt)))
		return emulate_ud(ctxt);

2130
	ops->get_msr(ctxt, MSR_EFER, &efer);
2131
	setup_syscalls_segments(ctxt, &cs, &ss);
2132

2133 2134 2135
	if (!(efer & EFER_SCE))
		return emulate_ud(ctxt);

2136
	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2137
	msr_data >>= 32;
2138 2139
	cs_sel = (u16)(msr_data & 0xfffc);
	ss_sel = (u16)(msr_data + 8);
2140

2141
	if (efer & EFER_LMA) {
2142
		cs.d = 0;
2143 2144
		cs.l = 1;
	}
2145 2146
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2147

2148
	ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
2149
	if (efer & EFER_LMA) {
2150
#ifdef CONFIG_X86_64
2151
		ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
2152

2153
		ops->get_msr(ctxt,
2154 2155
			     ctxt->mode == X86EMUL_MODE_PROT64 ?
			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2156
		ctxt->_eip = msr_data;
2157

2158
		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2159 2160 2161 2162
		ctxt->eflags &= ~(msr_data | EFLG_RF);
#endif
	} else {
		/* legacy mode */
2163
		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2164
		ctxt->_eip = (u32)msr_data;
2165 2166 2167 2168

		ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
	}

2169
	return X86EMUL_CONTINUE;
2170 2171
}

2172
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2173
{
2174
	struct x86_emulate_ops *ops = ctxt->ops;
2175
	struct desc_struct cs, ss;
2176
	u64 msr_data;
2177
	u16 cs_sel, ss_sel;
2178
	u64 efer = 0;
2179

2180
	ops->get_msr(ctxt, MSR_EFER, &efer);
2181
	/* inject #GP if in real mode */
2182 2183
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return emulate_gp(ctxt, 0);
2184

2185 2186 2187 2188 2189 2190 2191 2192
	/*
	 * Not recognized on AMD in compat mode (but is recognized in legacy
	 * mode).
	 */
	if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
	    && !vendor_intel(ctxt))
		return emulate_ud(ctxt);

2193 2194 2195
	/* XXX sysenter/sysexit have not been tested in 64bit mode.
	* Therefore, we inject an #UD.
	*/
2196 2197
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return emulate_ud(ctxt);
2198

2199
	setup_syscalls_segments(ctxt, &cs, &ss);
2200

2201
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2202 2203
	switch (ctxt->mode) {
	case X86EMUL_MODE_PROT32:
2204 2205
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
2206 2207
		break;
	case X86EMUL_MODE_PROT64:
2208 2209
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
2210 2211 2212 2213
		break;
	}

	ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2214 2215 2216 2217
	cs_sel = (u16)msr_data;
	cs_sel &= ~SELECTOR_RPL_MASK;
	ss_sel = cs_sel + 8;
	ss_sel &= ~SELECTOR_RPL_MASK;
2218
	if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2219
		cs.d = 0;
2220 2221 2222
		cs.l = 1;
	}

2223 2224
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2225

2226
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2227
	ctxt->_eip = msr_data;
2228

2229
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2230
	ctxt->regs[VCPU_REGS_RSP] = msr_data;
2231

2232
	return X86EMUL_CONTINUE;
2233 2234
}

2235
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2236
{
2237
	struct x86_emulate_ops *ops = ctxt->ops;
2238
	struct desc_struct cs, ss;
2239 2240
	u64 msr_data;
	int usermode;
X
Xiao Guangrong 已提交
2241
	u16 cs_sel = 0, ss_sel = 0;
2242

2243 2244
	/* inject #GP if in real mode or Virtual 8086 mode */
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2245 2246
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_gp(ctxt, 0);
2247

2248
	setup_syscalls_segments(ctxt, &cs, &ss);
2249

2250
	if ((ctxt->rex_prefix & 0x8) != 0x0)
2251 2252 2253 2254 2255 2256
		usermode = X86EMUL_MODE_PROT64;
	else
		usermode = X86EMUL_MODE_PROT32;

	cs.dpl = 3;
	ss.dpl = 3;
2257
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2258 2259
	switch (usermode) {
	case X86EMUL_MODE_PROT32:
2260
		cs_sel = (u16)(msr_data + 16);
2261 2262
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
2263
		ss_sel = (u16)(msr_data + 24);
2264 2265
		break;
	case X86EMUL_MODE_PROT64:
2266
		cs_sel = (u16)(msr_data + 32);
2267 2268
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
2269 2270
		ss_sel = cs_sel + 8;
		cs.d = 0;
2271 2272 2273
		cs.l = 1;
		break;
	}
2274 2275
	cs_sel |= SELECTOR_RPL_MASK;
	ss_sel |= SELECTOR_RPL_MASK;
2276

2277 2278
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2279

2280 2281
	ctxt->_eip = ctxt->regs[VCPU_REGS_RDX];
	ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX];
2282

2283
	return X86EMUL_CONTINUE;
2284 2285
}

2286
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2287 2288 2289 2290 2291 2292 2293
{
	int iopl;
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return false;
	if (ctxt->mode == X86EMUL_MODE_VM86)
		return true;
	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2294
	return ctxt->ops->cpl(ctxt) > iopl;
2295 2296 2297 2298 2299
}

static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
					    u16 port, u16 len)
{
2300
	struct x86_emulate_ops *ops = ctxt->ops;
2301
	struct desc_struct tr_seg;
2302
	u32 base3;
2303
	int r;
2304
	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2305
	unsigned mask = (1 << len) - 1;
2306
	unsigned long base;
2307

2308
	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2309
	if (!tr_seg.p)
2310
		return false;
2311
	if (desc_limit_scaled(&tr_seg) < 103)
2312
		return false;
2313 2314 2315 2316
	base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
	base |= ((u64)base3) << 32;
#endif
2317
	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2318 2319
	if (r != X86EMUL_CONTINUE)
		return false;
2320
	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2321
		return false;
2322
	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2323 2324 2325 2326 2327 2328 2329 2330 2331 2332
	if (r != X86EMUL_CONTINUE)
		return false;
	if ((perm >> bit_idx) & mask)
		return false;
	return true;
}

static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
				 u16 port, u16 len)
{
2333 2334 2335
	if (ctxt->perm_ok)
		return true;

2336 2337
	if (emulator_bad_iopl(ctxt))
		if (!emulator_io_port_access_allowed(ctxt, port, len))
2338
			return false;
2339 2340 2341

	ctxt->perm_ok = true;

2342 2343 2344
	return true;
}

2345 2346 2347
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_16 *tss)
{
2348
	tss->ip = ctxt->_eip;
2349
	tss->flag = ctxt->eflags;
2350 2351 2352 2353 2354 2355 2356 2357
	tss->ax = ctxt->regs[VCPU_REGS_RAX];
	tss->cx = ctxt->regs[VCPU_REGS_RCX];
	tss->dx = ctxt->regs[VCPU_REGS_RDX];
	tss->bx = ctxt->regs[VCPU_REGS_RBX];
	tss->sp = ctxt->regs[VCPU_REGS_RSP];
	tss->bp = ctxt->regs[VCPU_REGS_RBP];
	tss->si = ctxt->regs[VCPU_REGS_RSI];
	tss->di = ctxt->regs[VCPU_REGS_RDI];
2358

2359 2360 2361 2362 2363
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2364 2365 2366 2367 2368 2369 2370
}

static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_16 *tss)
{
	int ret;

2371
	ctxt->_eip = tss->ip;
2372
	ctxt->eflags = tss->flag | 2;
2373 2374 2375 2376 2377 2378 2379 2380
	ctxt->regs[VCPU_REGS_RAX] = tss->ax;
	ctxt->regs[VCPU_REGS_RCX] = tss->cx;
	ctxt->regs[VCPU_REGS_RDX] = tss->dx;
	ctxt->regs[VCPU_REGS_RBX] = tss->bx;
	ctxt->regs[VCPU_REGS_RSP] = tss->sp;
	ctxt->regs[VCPU_REGS_RBP] = tss->bp;
	ctxt->regs[VCPU_REGS_RSI] = tss->si;
	ctxt->regs[VCPU_REGS_RDI] = tss->di;
2381 2382 2383 2384 2385

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
2386 2387 2388 2389 2390
	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2391 2392 2393 2394 2395

	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
2396
	ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
2397 2398
	if (ret != X86EMUL_CONTINUE)
		return ret;
2399
	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2400 2401
	if (ret != X86EMUL_CONTINUE)
		return ret;
2402
	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2403 2404
	if (ret != X86EMUL_CONTINUE)
		return ret;
2405
	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2406 2407
	if (ret != X86EMUL_CONTINUE)
		return ret;
2408
	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2409 2410 2411 2412 2413 2414 2415 2416 2417 2418
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_16(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
2419
	struct x86_emulate_ops *ops = ctxt->ops;
2420 2421
	struct tss_segment_16 tss_seg;
	int ret;
2422
	u32 new_tss_base = get_desc_base(new_desc);
2423

2424
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2425
			    &ctxt->exception);
2426
	if (ret != X86EMUL_CONTINUE)
2427 2428 2429
		/* FIXME: need to provide precise fault address */
		return ret;

2430
	save_state_to_tss16(ctxt, &tss_seg);
2431

2432
	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2433
			     &ctxt->exception);
2434
	if (ret != X86EMUL_CONTINUE)
2435 2436 2437
		/* FIXME: need to provide precise fault address */
		return ret;

2438
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2439
			    &ctxt->exception);
2440
	if (ret != X86EMUL_CONTINUE)
2441 2442 2443 2444 2445 2446
		/* FIXME: need to provide precise fault address */
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

2447
		ret = ops->write_std(ctxt, new_tss_base,
2448 2449
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2450
				     &ctxt->exception);
2451
		if (ret != X86EMUL_CONTINUE)
2452 2453 2454 2455
			/* FIXME: need to provide precise fault address */
			return ret;
	}

2456
	return load_state_from_tss16(ctxt, &tss_seg);
2457 2458 2459 2460 2461
}

static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_32 *tss)
{
2462
	tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2463
	tss->eip = ctxt->_eip;
2464
	tss->eflags = ctxt->eflags;
2465 2466 2467 2468 2469 2470 2471 2472
	tss->eax = ctxt->regs[VCPU_REGS_RAX];
	tss->ecx = ctxt->regs[VCPU_REGS_RCX];
	tss->edx = ctxt->regs[VCPU_REGS_RDX];
	tss->ebx = ctxt->regs[VCPU_REGS_RBX];
	tss->esp = ctxt->regs[VCPU_REGS_RSP];
	tss->ebp = ctxt->regs[VCPU_REGS_RBP];
	tss->esi = ctxt->regs[VCPU_REGS_RSI];
	tss->edi = ctxt->regs[VCPU_REGS_RDI];
2473

2474 2475 2476 2477 2478 2479 2480
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
	tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2481 2482 2483 2484 2485 2486 2487
}

static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_32 *tss)
{
	int ret;

2488
	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2489
		return emulate_gp(ctxt, 0);
2490
	ctxt->_eip = tss->eip;
2491
	ctxt->eflags = tss->eflags | 2;
2492 2493

	/* General purpose registers */
2494 2495 2496 2497 2498 2499 2500 2501
	ctxt->regs[VCPU_REGS_RAX] = tss->eax;
	ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
	ctxt->regs[VCPU_REGS_RDX] = tss->edx;
	ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
	ctxt->regs[VCPU_REGS_RSP] = tss->esp;
	ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
	ctxt->regs[VCPU_REGS_RSI] = tss->esi;
	ctxt->regs[VCPU_REGS_RDI] = tss->edi;
2502 2503 2504 2505 2506

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
2507 2508 2509 2510 2511 2512 2513
	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2514

2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532
	/*
	 * If we're switching between Protected Mode and VM86, we need to make
	 * sure to update the mode before loading the segment descriptors so
	 * that the selectors are interpreted correctly.
	 *
	 * Need to get rflags to the vcpu struct immediately because it
	 * influences the CPL which is checked at least when loading the segment
	 * descriptors and when pushing an error code to the new kernel stack.
	 *
	 * TODO Introduce a separate ctxt->ops->set_cpl callback
	 */
	if (ctxt->eflags & X86_EFLAGS_VM)
		ctxt->mode = X86EMUL_MODE_VM86;
	else
		ctxt->mode = X86EMUL_MODE_PROT32;

	ctxt->ops->set_rflags(ctxt, ctxt->eflags);

2533 2534 2535 2536
	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
2537
	ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2538 2539
	if (ret != X86EMUL_CONTINUE)
		return ret;
2540
	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2541 2542
	if (ret != X86EMUL_CONTINUE)
		return ret;
2543
	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2544 2545
	if (ret != X86EMUL_CONTINUE)
		return ret;
2546
	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2547 2548
	if (ret != X86EMUL_CONTINUE)
		return ret;
2549
	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2550 2551
	if (ret != X86EMUL_CONTINUE)
		return ret;
2552
	ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
2553 2554
	if (ret != X86EMUL_CONTINUE)
		return ret;
2555
	ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
2556 2557 2558 2559 2560 2561 2562 2563 2564 2565
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_32(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
2566
	struct x86_emulate_ops *ops = ctxt->ops;
2567 2568
	struct tss_segment_32 tss_seg;
	int ret;
2569
	u32 new_tss_base = get_desc_base(new_desc);
2570

2571
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2572
			    &ctxt->exception);
2573
	if (ret != X86EMUL_CONTINUE)
2574 2575 2576
		/* FIXME: need to provide precise fault address */
		return ret;

2577
	save_state_to_tss32(ctxt, &tss_seg);
2578

2579
	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2580
			     &ctxt->exception);
2581
	if (ret != X86EMUL_CONTINUE)
2582 2583 2584
		/* FIXME: need to provide precise fault address */
		return ret;

2585
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2586
			    &ctxt->exception);
2587
	if (ret != X86EMUL_CONTINUE)
2588 2589 2590 2591 2592 2593
		/* FIXME: need to provide precise fault address */
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

2594
		ret = ops->write_std(ctxt, new_tss_base,
2595 2596
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2597
				     &ctxt->exception);
2598
		if (ret != X86EMUL_CONTINUE)
2599 2600 2601 2602
			/* FIXME: need to provide precise fault address */
			return ret;
	}

2603
	return load_state_from_tss32(ctxt, &tss_seg);
2604 2605 2606
}

static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2607
				   u16 tss_selector, int idt_index, int reason,
2608
				   bool has_error_code, u32 error_code)
2609
{
2610
	struct x86_emulate_ops *ops = ctxt->ops;
2611 2612
	struct desc_struct curr_tss_desc, next_tss_desc;
	int ret;
2613
	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2614
	ulong old_tss_base =
2615
		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2616
	u32 desc_limit;
2617 2618 2619

	/* FIXME: old_tss_base == ~0 ? */

2620
	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2621 2622
	if (ret != X86EMUL_CONTINUE)
		return ret;
2623
	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2624 2625 2626 2627 2628
	if (ret != X86EMUL_CONTINUE)
		return ret;

	/* FIXME: check that next_tss_desc is tss */

2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654
	/*
	 * Check privileges. The three cases are task switch caused by...
	 *
	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
	 * 2. Exception/IRQ/iret: No check is performed
	 * 3. jmp/call to TSS: Check agains DPL of the TSS
	 */
	if (reason == TASK_SWITCH_GATE) {
		if (idt_index != -1) {
			/* Software interrupts */
			struct desc_struct task_gate_desc;
			int dpl;

			ret = read_interrupt_descriptor(ctxt, idt_index,
							&task_gate_desc);
			if (ret != X86EMUL_CONTINUE)
				return ret;

			dpl = task_gate_desc.dpl;
			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
		}
	} else if (reason != TASK_SWITCH_IRET) {
		int dpl = next_tss_desc.dpl;
		if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
			return emulate_gp(ctxt, tss_selector);
2655 2656
	}

2657

2658 2659 2660 2661
	desc_limit = desc_limit_scaled(&next_tss_desc);
	if (!next_tss_desc.p ||
	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
	     desc_limit < 0x2b)) {
2662
		emulate_ts(ctxt, tss_selector & 0xfffc);
2663 2664 2665 2666 2667
		return X86EMUL_PROPAGATE_FAULT;
	}

	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2668
		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679
	}

	if (reason == TASK_SWITCH_IRET)
		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;

	/* set back link to prev task only if NT bit is set in eflags
	   note that old_tss_sel is not used afetr this point */
	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
		old_tss_sel = 0xffff;

	if (next_tss_desc.type & 8)
2680
		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2681 2682
				     old_tss_base, &next_tss_desc);
	else
2683
		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2684
				     old_tss_base, &next_tss_desc);
2685 2686
	if (ret != X86EMUL_CONTINUE)
		return ret;
2687 2688 2689 2690 2691 2692

	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;

	if (reason != TASK_SWITCH_IRET) {
		next_tss_desc.type |= (1 << 1); /* set busy flag */
2693
		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2694 2695
	}

2696
	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
2697
	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2698

2699
	if (has_error_code) {
2700 2701 2702
		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
		ctxt->lock_prefix = 0;
		ctxt->src.val = (unsigned long) error_code;
2703
		ret = em_push(ctxt);
2704 2705
	}

2706 2707 2708 2709
	return ret;
}

int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2710
			 u16 tss_selector, int idt_index, int reason,
2711
			 bool has_error_code, u32 error_code)
2712 2713 2714
{
	int rc;

2715 2716
	ctxt->_eip = ctxt->eip;
	ctxt->dst.type = OP_NONE;
2717

2718
	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2719
				     has_error_code, error_code);
2720

2721
	if (rc == X86EMUL_CONTINUE)
2722
		ctxt->eip = ctxt->_eip;
2723

2724
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2725 2726
}

2727
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2728
			    int reg, struct operand *op)
2729 2730 2731
{
	int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;

2732 2733
	register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
	op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
2734
	op->addr.mem.seg = seg;
2735 2736
}

2737 2738 2739 2740 2741 2742
static int em_das(struct x86_emulate_ctxt *ctxt)
{
	u8 al, old_al;
	bool af, cf, old_cf;

	cf = ctxt->eflags & X86_EFLAGS_CF;
2743
	al = ctxt->dst.val;
2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760

	old_al = al;
	old_cf = cf;
	cf = false;
	af = ctxt->eflags & X86_EFLAGS_AF;
	if ((al & 0x0f) > 9 || af) {
		al -= 6;
		cf = old_cf | (al >= 250);
		af = true;
	} else {
		af = false;
	}
	if (old_al > 0x99 || old_cf) {
		al -= 0x60;
		cf = true;
	}

2761
	ctxt->dst.val = al;
2762
	/* Set PF, ZF, SF */
2763 2764 2765
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
2766
	emulate_2op_SrcV(ctxt, "or");
2767 2768 2769 2770 2771 2772 2773 2774
	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
	if (cf)
		ctxt->eflags |= X86_EFLAGS_CF;
	if (af)
		ctxt->eflags |= X86_EFLAGS_AF;
	return X86EMUL_CONTINUE;
}

2775 2776 2777 2778 2779 2780 2781 2782 2783
static int em_call(struct x86_emulate_ctxt *ctxt)
{
	long rel = ctxt->src.val;

	ctxt->src.val = (unsigned long)ctxt->_eip;
	jmp_rel(ctxt, rel);
	return em_push(ctxt);
}

2784 2785 2786 2787 2788 2789
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
	u16 sel, old_cs;
	ulong old_eip;
	int rc;

2790
	old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2791
	old_eip = ctxt->_eip;
2792

2793
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2794
	if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2795 2796
		return X86EMUL_CONTINUE;

2797 2798
	ctxt->_eip = 0;
	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
2799

2800
	ctxt->src.val = old_cs;
2801
	rc = em_push(ctxt);
2802 2803 2804
	if (rc != X86EMUL_CONTINUE)
		return rc;

2805
	ctxt->src.val = old_eip;
2806
	return em_push(ctxt);
2807 2808
}

2809 2810 2811 2812
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
	int rc;

2813 2814 2815 2816
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->_eip;
	ctxt->dst.bytes = ctxt->op_bytes;
	rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2817 2818
	if (rc != X86EMUL_CONTINUE)
		return rc;
2819
	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
2820 2821 2822
	return X86EMUL_CONTINUE;
}

2823 2824
static int em_add(struct x86_emulate_ctxt *ctxt)
{
2825
	emulate_2op_SrcV(ctxt, "add");
2826 2827 2828 2829 2830
	return X86EMUL_CONTINUE;
}

static int em_or(struct x86_emulate_ctxt *ctxt)
{
2831
	emulate_2op_SrcV(ctxt, "or");
2832 2833 2834 2835 2836
	return X86EMUL_CONTINUE;
}

static int em_adc(struct x86_emulate_ctxt *ctxt)
{
2837
	emulate_2op_SrcV(ctxt, "adc");
2838 2839 2840 2841 2842
	return X86EMUL_CONTINUE;
}

static int em_sbb(struct x86_emulate_ctxt *ctxt)
{
2843
	emulate_2op_SrcV(ctxt, "sbb");
2844 2845 2846 2847 2848
	return X86EMUL_CONTINUE;
}

static int em_and(struct x86_emulate_ctxt *ctxt)
{
2849
	emulate_2op_SrcV(ctxt, "and");
2850 2851 2852 2853 2854
	return X86EMUL_CONTINUE;
}

static int em_sub(struct x86_emulate_ctxt *ctxt)
{
2855
	emulate_2op_SrcV(ctxt, "sub");
2856 2857 2858 2859 2860
	return X86EMUL_CONTINUE;
}

static int em_xor(struct x86_emulate_ctxt *ctxt)
{
2861
	emulate_2op_SrcV(ctxt, "xor");
2862 2863 2864 2865 2866
	return X86EMUL_CONTINUE;
}

static int em_cmp(struct x86_emulate_ctxt *ctxt)
{
2867
	emulate_2op_SrcV(ctxt, "cmp");
2868
	/* Disable writeback. */
2869
	ctxt->dst.type = OP_NONE;
2870 2871 2872
	return X86EMUL_CONTINUE;
}

2873 2874
static int em_test(struct x86_emulate_ctxt *ctxt)
{
2875
	emulate_2op_SrcV(ctxt, "test");
2876 2877
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
2878 2879 2880
	return X86EMUL_CONTINUE;
}

2881 2882 2883
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
	/* Write back the register source. */
2884 2885
	ctxt->src.val = ctxt->dst.val;
	write_register_operand(&ctxt->src);
2886 2887

	/* Write back the memory destination with implicit LOCK prefix. */
2888 2889
	ctxt->dst.val = ctxt->src.orig_val;
	ctxt->lock_prefix = 1;
2890 2891 2892
	return X86EMUL_CONTINUE;
}

2893
static int em_imul(struct x86_emulate_ctxt *ctxt)
2894
{
2895
	emulate_2op_SrcV_nobyte(ctxt, "imul");
2896 2897 2898
	return X86EMUL_CONTINUE;
}

2899 2900
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
2901
	ctxt->dst.val = ctxt->src2.val;
2902 2903 2904
	return em_imul(ctxt);
}

2905 2906
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
2907 2908 2909 2910
	ctxt->dst.type = OP_REG;
	ctxt->dst.bytes = ctxt->src.bytes;
	ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2911 2912 2913 2914

	return X86EMUL_CONTINUE;
}

2915 2916 2917 2918
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
	u64 tsc = 0;

2919
	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2920 2921
	ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
	ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
2922 2923 2924
	return X86EMUL_CONTINUE;
}

2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
{
	u64 pmc;

	if (ctxt->ops->read_pmc(ctxt, ctxt->regs[VCPU_REGS_RCX], &pmc))
		return emulate_gp(ctxt, 0);
	ctxt->regs[VCPU_REGS_RAX] = (u32)pmc;
	ctxt->regs[VCPU_REGS_RDX] = pmc >> 32;
	return X86EMUL_CONTINUE;
}

2936 2937
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
S
Stefan Hajnoczi 已提交
2938
	memcpy(ctxt->dst.valptr, ctxt->src.valptr, ctxt->op_bytes);
2939 2940 2941
	return X86EMUL_CONTINUE;
}

2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

static int em_dr_write(struct x86_emulate_ctxt *ctxt)
{
	unsigned long val;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		val = ctxt->src.val & ~0ULL;
	else
		val = ctxt->src.val & ~0U;

	/* #UD condition is already handled. */
	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

	msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
		| ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
	if (ctxt->ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data))
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

	if (ctxt->ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data))
		return emulate_gp(ctxt, 0);

	ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
	ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
	return X86EMUL_CONTINUE;
}

2994 2995
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
2996
	if (ctxt->modrm_reg > VCPU_SREG_GS)
2997 2998
		return emulate_ud(ctxt);

2999
	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3000 3001 3002 3003 3004
	return X86EMUL_CONTINUE;
}

static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
3005
	u16 sel = ctxt->src.val;
3006

3007
	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3008 3009
		return emulate_ud(ctxt);

3010
	if (ctxt->modrm_reg == VCPU_SREG_SS)
3011 3012 3013
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;

	/* Disable writeback. */
3014 3015
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3016 3017
}

3018 3019
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
3020 3021 3022
	int rc;
	ulong linear;

3023
	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3024
	if (rc == X86EMUL_CONTINUE)
3025
		ctxt->ops->invlpg(ctxt, linear);
3026
	/* Disable writeback. */
3027
	ctxt->dst.type = OP_NONE;
3028 3029 3030
	return X86EMUL_CONTINUE;
}

3031 3032 3033 3034 3035 3036 3037 3038 3039 3040
static int em_clts(struct x86_emulate_ctxt *ctxt)
{
	ulong cr0;

	cr0 = ctxt->ops->get_cr(ctxt, 0);
	cr0 &= ~X86_CR0_TS;
	ctxt->ops->set_cr(ctxt, 0, cr0);
	return X86EMUL_CONTINUE;
}

3041 3042 3043 3044
static int em_vmcall(struct x86_emulate_ctxt *ctxt)
{
	int rc;

3045
	if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
3046 3047 3048 3049 3050 3051 3052
		return X86EMUL_UNHANDLEABLE;

	rc = ctxt->ops->fix_hypercall(ctxt);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	/* Let the processor re-execute the fixed hypercall */
3053
	ctxt->_eip = ctxt->eip;
3054
	/* Disable writeback. */
3055
	ctxt->dst.type = OP_NONE;
3056 3057 3058
	return X86EMUL_CONTINUE;
}

3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
				  void (*get)(struct x86_emulate_ctxt *ctxt,
					      struct desc_ptr *ptr))
{
	struct desc_ptr desc_ptr;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
	get(ctxt, &desc_ptr);
	if (ctxt->op_bytes == 2) {
		ctxt->op_bytes = 4;
		desc_ptr.address &= 0x00ffffff;
	}
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return segmented_write(ctxt, ctxt->dst.addr.mem,
			       &desc_ptr, 2 + ctxt->op_bytes);
}

static int em_sgdt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
}

static int em_sidt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}

3088 3089 3090 3091 3092
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
	struct desc_ptr desc_ptr;
	int rc;

3093 3094
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
3095
	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3096
			     &desc_ptr.size, &desc_ptr.address,
3097
			     ctxt->op_bytes);
3098 3099 3100 3101
	if (rc != X86EMUL_CONTINUE)
		return rc;
	ctxt->ops->set_gdt(ctxt, &desc_ptr);
	/* Disable writeback. */
3102
	ctxt->dst.type = OP_NONE;
3103 3104 3105
	return X86EMUL_CONTINUE;
}

3106
static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3107 3108 3109
{
	int rc;

3110 3111
	rc = ctxt->ops->fix_hypercall(ctxt);

3112
	/* Disable writeback. */
3113
	ctxt->dst.type = OP_NONE;
3114 3115 3116 3117 3118 3119 3120 3121
	return rc;
}

static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
	struct desc_ptr desc_ptr;
	int rc;

3122 3123
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
3124
	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3125
			     &desc_ptr.size, &desc_ptr.address,
3126
			     ctxt->op_bytes);
3127 3128 3129 3130
	if (rc != X86EMUL_CONTINUE)
		return rc;
	ctxt->ops->set_idt(ctxt, &desc_ptr);
	/* Disable writeback. */
3131
	ctxt->dst.type = OP_NONE;
3132 3133 3134 3135 3136
	return X86EMUL_CONTINUE;
}

static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
3137 3138
	ctxt->dst.bytes = 2;
	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3139 3140 3141 3142 3143 3144
	return X86EMUL_CONTINUE;
}

static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3145 3146
			  | (ctxt->src.val & 0x0f));
	ctxt->dst.type = OP_NONE;
3147 3148 3149
	return X86EMUL_CONTINUE;
}

3150 3151
static int em_loop(struct x86_emulate_ctxt *ctxt)
{
3152 3153 3154 3155
	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
	if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
		jmp_rel(ctxt, ctxt->src.val);
3156 3157 3158 3159 3160 3161

	return X86EMUL_CONTINUE;
}

static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{
3162 3163
	if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
		jmp_rel(ctxt, ctxt->src.val);
3164 3165 3166 3167

	return X86EMUL_CONTINUE;
}

3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185
static int em_in(struct x86_emulate_ctxt *ctxt)
{
	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
			     &ctxt->dst.val))
		return X86EMUL_IO_NEEDED;

	return X86EMUL_CONTINUE;
}

static int em_out(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
				    &ctxt->src.val, 1);
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->eflags &= ~X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

static int em_sti(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
	ctxt->eflags |= X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233
static int em_bt(struct x86_emulate_ctxt *ctxt)
{
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	/* only subword offset */
	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;

	emulate_2op_SrcV_nobyte(ctxt, "bt");
	return X86EMUL_CONTINUE;
}

static int em_bts(struct x86_emulate_ctxt *ctxt)
{
	emulate_2op_SrcV_nobyte(ctxt, "bts");
	return X86EMUL_CONTINUE;
}

static int em_btr(struct x86_emulate_ctxt *ctxt)
{
	emulate_2op_SrcV_nobyte(ctxt, "btr");
	return X86EMUL_CONTINUE;
}

static int em_btc(struct x86_emulate_ctxt *ctxt)
{
	emulate_2op_SrcV_nobyte(ctxt, "btc");
	return X86EMUL_CONTINUE;
}

3234 3235
static int em_bsf(struct x86_emulate_ctxt *ctxt)
{
3236
	emulate_2op_SrcV_nobyte(ctxt, "bsf");
3237 3238 3239 3240 3241
	return X86EMUL_CONTINUE;
}

static int em_bsr(struct x86_emulate_ctxt *ctxt)
{
3242
	emulate_2op_SrcV_nobyte(ctxt, "bsr");
3243 3244 3245
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

	eax = ctxt->regs[VCPU_REGS_RAX];
	ecx = ctxt->regs[VCPU_REGS_RCX];
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	ctxt->regs[VCPU_REGS_RAX] = eax;
	ctxt->regs[VCPU_REGS_RBX] = ebx;
	ctxt->regs[VCPU_REGS_RCX] = ecx;
	ctxt->regs[VCPU_REGS_RDX] = edx;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3260 3261 3262 3263 3264 3265 3266
static int em_lahf(struct x86_emulate_ctxt *ctxt)
{
	ctxt->regs[VCPU_REGS_RAX] &= ~0xff00UL;
	ctxt->regs[VCPU_REGS_RAX] |= (ctxt->eflags & 0xff) << 8;
	return X86EMUL_CONTINUE;
}

3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280
static bool valid_cr(int nr)
{
	switch (nr) {
	case 0:
	case 2 ... 4:
	case 8:
		return true;
	default:
		return false;
	}
}

static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
3281
	if (!valid_cr(ctxt->modrm_reg))
3282 3283 3284 3285 3286 3287 3288
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
3289 3290
	u64 new_val = ctxt->src.val64;
	int cr = ctxt->modrm_reg;
3291
	u64 efer = 0;
3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308

	static u64 cr_reserved_bits[] = {
		0xffffffff00000000ULL,
		0, 0, 0, /* CR3 checked later */
		CR4_RESERVED_BITS,
		0, 0, 0,
		CR8_RESERVED_BITS,
	};

	if (!valid_cr(cr))
		return emulate_ud(ctxt);

	if (new_val & cr_reserved_bits[cr])
		return emulate_gp(ctxt, 0);

	switch (cr) {
	case 0: {
3309
		u64 cr4;
3310 3311 3312 3313
		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
			return emulate_gp(ctxt, 0);

3314 3315
		cr4 = ctxt->ops->get_cr(ctxt, 4);
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3316 3317 3318 3319 3320 3321 3322 3323 3324 3325

		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
		    !(cr4 & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	case 3: {
		u64 rsvd = 0;

3326 3327
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
		if (efer & EFER_LMA)
3328
			rsvd = CR3_L_MODE_RESERVED_BITS;
3329
		else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
3330
			rsvd = CR3_PAE_RESERVED_BITS;
3331
		else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
3332 3333 3334 3335 3336 3337 3338 3339
			rsvd = CR3_NONPAE_RESERVED_BITS;

		if (new_val & rsvd)
			return emulate_gp(ctxt, 0);

		break;
		}
	case 4: {
3340
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351

		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	}

	return X86EMUL_CONTINUE;
}

3352 3353 3354 3355
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
	unsigned long dr7;

3356
	ctxt->ops->get_dr(ctxt, 7, &dr7);
3357 3358 3359 3360 3361 3362 3363

	/* Check if DR7.Global_Enable is set */
	return dr7 & (1 << 13);
}

static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
3364
	int dr = ctxt->modrm_reg;
3365 3366 3367 3368 3369
	u64 cr4;

	if (dr > 7)
		return emulate_ud(ctxt);

3370
	cr4 = ctxt->ops->get_cr(ctxt, 4);
3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381
	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
		return emulate_ud(ctxt);

	if (check_dr7_gd(ctxt))
		return emulate_db(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
3382 3383
	u64 new_val = ctxt->src.val64;
	int dr = ctxt->modrm_reg;
3384 3385 3386 3387 3388 3389 3390

	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
		return emulate_gp(ctxt, 0);

	return check_dr_read(ctxt);
}

3391 3392 3393 3394
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
	u64 efer;

3395
	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3396 3397 3398 3399 3400 3401 3402 3403 3404

	if (!(efer & EFER_SVME))
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
3405
	u64 rax = ctxt->regs[VCPU_REGS_RAX];
3406 3407

	/* Valid physical address? */
3408
	if (rax & 0xffff000000000000ULL)
3409 3410 3411 3412 3413
		return emulate_gp(ctxt, 0);

	return check_svme(ctxt);
}

3414 3415
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{
3416
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3417

3418
	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3419 3420 3421 3422 3423
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

3424 3425
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
3426
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3427
	u64 rcx = ctxt->regs[VCPU_REGS_RCX];
3428

3429
	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3430 3431 3432 3433 3434 3435
	    (rcx > 3))
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

3436 3437
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
3438 3439
	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3440 3441 3442 3443 3444 3445 3446
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
3447 3448
	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3449 3450 3451 3452 3453
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

3454
#define D(_y) { .flags = (_y) }
3455
#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
3456 3457
#define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
		      .check_perm = (_p) }
3458
#define N    D(0)
3459
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3460 3461
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3462
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3463 3464
#define II(_f, _e, _i) \
	{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3465 3466 3467
#define IIP(_f, _e, _i, _p) \
	{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
	  .check_perm = (_p) }
3468
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3469

3470
#define D2bv(_f)      D((_f) | ByteOp), D(_f)
3471
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3472
#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
3473 3474
#define I2bvIP(_f, _e, _i, _p) \
	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3475

3476 3477 3478
#define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
		I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
		I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3479

3480
static struct opcode group7_rm1[] = {
3481 3482
	DI(SrcNone | Priv, monitor),
	DI(SrcNone | Priv, mwait),
3483 3484 3485
	N, N, N, N, N, N,
};

3486
static struct opcode group7_rm3[] = {
3487 3488 3489 3490 3491 3492 3493 3494
	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
	II(SrcNone  | Prot | VendorSpecific,	em_vmmcall,	vmmcall),
	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
3495
};
3496

3497 3498
static struct opcode group7_rm7[] = {
	N,
3499
	DIP(SrcNone, rdtscp, check_rdtsc),
3500 3501
	N, N, N, N, N, N,
};
3502

3503
static struct opcode group1[] = {
3504
	I(Lock, em_add),
3505
	I(Lock | PageTable, em_or),
3506 3507
	I(Lock, em_adc),
	I(Lock, em_sbb),
3508
	I(Lock | PageTable, em_and),
3509 3510 3511
	I(Lock, em_sub),
	I(Lock, em_xor),
	I(0, em_cmp),
3512 3513 3514
};

static struct opcode group1A[] = {
3515
	I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3516 3517 3518
};

static struct opcode group3[] = {
3519 3520 3521 3522 3523 3524 3525 3526
	I(DstMem | SrcImm, em_test),
	I(DstMem | SrcImm, em_test),
	I(DstMem | SrcNone | Lock, em_not),
	I(DstMem | SrcNone | Lock, em_neg),
	I(SrcMem, em_mul_ex),
	I(SrcMem, em_imul_ex),
	I(SrcMem, em_div_ex),
	I(SrcMem, em_idiv_ex),
3527 3528 3529
};

static struct opcode group4[] = {
3530 3531
	I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
	I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
3532 3533 3534 3535
	N, N, N, N, N, N,
};

static struct opcode group5[] = {
3536 3537 3538 3539 3540 3541 3542
	I(DstMem | SrcNone | Lock,		em_grp45),
	I(DstMem | SrcNone | Lock,		em_grp45),
	I(SrcMem | Stack,			em_grp45),
	I(SrcMemFAddr | ImplicitOps | Stack,	em_call_far),
	I(SrcMem | Stack,			em_grp45),
	I(SrcMemFAddr | ImplicitOps,		em_grp45),
	I(SrcMem | Stack,			em_grp45), N,
3543 3544
};

3545
static struct opcode group6[] = {
3546 3547 3548 3549
	DI(Prot,	sldt),
	DI(Prot,	str),
	DI(Prot | Priv,	lldt),
	DI(Prot | Priv,	ltr),
3550 3551 3552
	N, N, N, N,
};

3553
static struct group_dual group7 = { {
3554 3555
	II(Mov | DstMem | Priv,			em_sgdt, sgdt),
	II(Mov | DstMem | Priv,			em_sidt, sidt),
3556 3557 3558 3559 3560
	II(SrcMem | Priv,			em_lgdt, lgdt),
	II(SrcMem | Priv,			em_lidt, lidt),
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
3561
}, {
3562
	I(SrcNone | Priv | VendorSpecific,	em_vmcall),
3563
	EXT(0, group7_rm1),
3564
	N, EXT(0, group7_rm3),
3565 3566 3567
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	EXT(0, group7_rm7),
3568 3569 3570 3571
} };

static struct opcode group8[] = {
	N, N, N, N,
3572 3573 3574 3575
	I(DstMem | SrcImmByte,				em_bt),
	I(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
	I(DstMem | SrcImmByte | Lock,			em_btr),
	I(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
3576 3577 3578
};

static struct group_dual group9 = { {
3579
	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3580 3581 3582 3583
}, {
	N, N, N, N, N, N, N, N,
} };

3584
static struct opcode group11[] = {
3585
	I(DstMem | SrcImm | Mov | PageTable, em_mov),
3586
	X7(D(Undefined)),
3587 3588
};

3589
static struct gprefix pfx_0f_6f_0f_7f = {
3590
	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3591 3592
};

3593 3594 3595 3596
static struct gprefix pfx_vmovntpx = {
	I(0, em_mov), N, N, N,
};

3597 3598
static struct opcode opcode_table[256] = {
	/* 0x00 - 0x07 */
3599
	I6ALU(Lock, em_add),
3600 3601
	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3602
	/* 0x08 - 0x0F */
3603
	I6ALU(Lock | PageTable, em_or),
3604 3605
	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
	N,
3606
	/* 0x10 - 0x17 */
3607
	I6ALU(Lock, em_adc),
3608 3609
	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3610
	/* 0x18 - 0x1F */
3611
	I6ALU(Lock, em_sbb),
3612 3613
	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3614
	/* 0x20 - 0x27 */
3615
	I6ALU(Lock | PageTable, em_and), N, N,
3616
	/* 0x28 - 0x2F */
3617
	I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3618
	/* 0x30 - 0x37 */
3619
	I6ALU(Lock, em_xor), N, N,
3620
	/* 0x38 - 0x3F */
3621
	I6ALU(0, em_cmp), N, N,
3622 3623 3624
	/* 0x40 - 0x4F */
	X16(D(DstReg)),
	/* 0x50 - 0x57 */
3625
	X8(I(SrcReg | Stack, em_push)),
3626
	/* 0x58 - 0x5F */
3627
	X8(I(DstReg | Stack, em_pop)),
3628
	/* 0x60 - 0x67 */
3629 3630
	I(ImplicitOps | Stack | No64, em_pusha),
	I(ImplicitOps | Stack | No64, em_popa),
3631 3632 3633
	N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
	N, N, N, N,
	/* 0x68 - 0x6F */
3634 3635
	I(SrcImm | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3636 3637
	I(SrcImmByte | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3638 3639
	I2bvIP(DstDI | SrcDX | Mov | String, em_in, ins, check_perm_in), /* insb, insw/insd */
	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3640 3641 3642
	/* 0x70 - 0x7F */
	X16(D(SrcImmByte)),
	/* 0x80 - 0x87 */
3643 3644 3645 3646
	G(ByteOp | DstMem | SrcImm, group1),
	G(DstMem | SrcImm, group1),
	G(ByteOp | DstMem | SrcImm | No64, group1),
	G(DstMem | SrcImmByte, group1),
3647
	I2bv(DstMem | SrcReg | ModRM, em_test),
3648
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3649
	/* 0x88 - 0x8F */
3650
	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3651
	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3652
	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3653 3654 3655
	D(ModRM | SrcMem | NoAccess | DstReg),
	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
	G(0, group1A),
3656
	/* 0x90 - 0x97 */
3657
	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3658
	/* 0x98 - 0x9F */
3659
	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3660
	I(SrcImmFAddr | No64, em_call_far), N,
3661
	II(ImplicitOps | Stack, em_pushf, pushf),
A
Avi Kivity 已提交
3662
	II(ImplicitOps | Stack, em_popf, popf), N, I(ImplicitOps, em_lahf),
3663
	/* 0xA0 - 0xA7 */
3664
	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3665
	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3666
	I2bv(SrcSI | DstDI | Mov | String, em_mov),
3667
	I2bv(SrcSI | DstDI | String, em_cmp),
3668
	/* 0xA8 - 0xAF */
3669
	I2bv(DstAcc | SrcImm, em_test),
3670 3671
	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3672
	I2bv(SrcAcc | DstDI | String, em_cmp),
3673
	/* 0xB0 - 0xB7 */
3674
	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3675
	/* 0xB8 - 0xBF */
3676
	X8(I(DstReg | SrcImm | Mov, em_mov)),
3677
	/* 0xC0 - 0xC7 */
3678
	D2bv(DstMem | SrcImmByte | ModRM),
3679
	I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3680
	I(ImplicitOps | Stack, em_ret),
3681 3682
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3683
	G(ByteOp, group11), G(0, group11),
3684
	/* 0xC8 - 0xCF */
A
Avi Kivity 已提交
3685 3686
	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
	N, I(ImplicitOps | Stack, em_ret_far),
3687
	D(ImplicitOps), DI(SrcImmByte, intn),
3688
	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3689
	/* 0xD0 - 0xD7 */
3690
	D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3691 3692 3693 3694
	N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
3695 3696
	X3(I(SrcImmByte, em_loop)),
	I(SrcImmByte, em_jcxz),
3697 3698
	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
3699
	/* 0xE8 - 0xEF */
3700
	I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
3701
	I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3702 3703
	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
3704
	/* 0xF0 - 0xF7 */
3705
	N, DI(ImplicitOps, icebp), N, N,
3706 3707
	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
	G(ByteOp, group3), G(0, group3),
3708
	/* 0xF8 - 0xFF */
3709 3710
	D(ImplicitOps), D(ImplicitOps),
	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3711 3712 3713 3714 3715
	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};

static struct opcode twobyte_table[256] = {
	/* 0x00 - 0x0F */
3716
	G(0, group6), GD(0, &group7), N, N,
3717 3718
	N, I(ImplicitOps | VendorSpecific, em_syscall),
	II(ImplicitOps | Priv, em_clts, clts), N,
3719
	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3720 3721 3722 3723
	N, D(ImplicitOps | ModRM), N, N,
	/* 0x10 - 0x1F */
	N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
	/* 0x20 - 0x2F */
3724
	DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3725
	DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3726 3727
	IIP(ModRM | SrcMem | Priv | Op3264, em_cr_write, cr_write, check_cr_write),
	IIP(ModRM | SrcMem | Priv | Op3264, em_dr_write, dr_write, check_dr_write),
3728
	N, N, N, N,
3729 3730
	N, N, N, GP(ModRM | DstMem | SrcReg | Sse | Mov | Aligned, &pfx_vmovntpx),
	N, N, N, N,
3731
	/* 0x30 - 0x3F */
3732
	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
3733
	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3734
	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
3735
	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
3736 3737
	I(ImplicitOps | VendorSpecific, em_sysenter),
	I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
3738
	N, N,
3739 3740 3741 3742 3743 3744
	N, N, N, N, N, N, N, N,
	/* 0x40 - 0x4F */
	X16(D(DstReg | SrcMem | ModRM | Mov)),
	/* 0x50 - 0x5F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x60 - 0x6F */
3745 3746 3747 3748
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3749
	/* 0x70 - 0x7F */
3750 3751 3752 3753
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3754 3755 3756
	/* 0x80 - 0x8F */
	X16(D(SrcImm)),
	/* 0x90 - 0x9F */
3757
	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3758
	/* 0xA0 - 0xA7 */
3759
	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
A
Avi Kivity 已提交
3760
	II(ImplicitOps, em_cpuid, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt),
3761 3762 3763
	D(DstMem | SrcReg | Src2ImmByte | ModRM),
	D(DstMem | SrcReg | Src2CL | ModRM), N, N,
	/* 0xA8 - 0xAF */
3764
	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
3765
	DI(ImplicitOps, rsm),
3766
	I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
3767 3768
	D(DstMem | SrcReg | Src2ImmByte | ModRM),
	D(DstMem | SrcReg | Src2CL | ModRM),
3769
	D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3770
	/* 0xB0 - 0xB7 */
3771
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
3772
	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
3773
	I(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
3774 3775
	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
3776
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3777 3778
	/* 0xB8 - 0xBF */
	N, N,
3779 3780
	G(BitOp, group8),
	I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
3781
	I(DstReg | SrcMem | ModRM, em_bsf), I(DstReg | SrcMem | ModRM, em_bsr),
3782
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3783
	/* 0xC0 - 0xCF */
3784
	D2bv(DstMem | SrcReg | ModRM | Lock),
3785
	N, D(DstMem | SrcReg | ModRM | Mov),
3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800
	N, N, N, GD(0, &group9),
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xDF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xEF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xFF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};

#undef D
#undef N
#undef G
#undef GD
#undef I
3801
#undef GP
3802
#undef EXT
3803

3804
#undef D2bv
3805
#undef D2bvIP
3806
#undef I2bv
3807
#undef I2bvIP
3808
#undef I6ALU
3809

3810
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3811 3812 3813
{
	unsigned size;

3814
	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826
	if (size == 8)
		size = 4;
	return size;
}

static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
		      unsigned size, bool sign_extension)
{
	int rc = X86EMUL_CONTINUE;

	op->type = OP_IMM;
	op->bytes = size;
3827
	op->addr.mem.ea = ctxt->_eip;
3828 3829 3830
	/* NB. Immediates are sign-extended as necessary. */
	switch (op->bytes) {
	case 1:
3831
		op->val = insn_fetch(s8, ctxt);
3832 3833
		break;
	case 2:
3834
		op->val = insn_fetch(s16, ctxt);
3835 3836
		break;
	case 4:
3837
		op->val = insn_fetch(s32, ctxt);
3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856
		break;
	}
	if (!sign_extension) {
		switch (op->bytes) {
		case 1:
			op->val &= 0xff;
			break;
		case 2:
			op->val &= 0xffff;
			break;
		case 4:
			op->val &= 0xffffffff;
			break;
		}
	}
done:
	return rc;
}

3857 3858 3859 3860 3861 3862 3863
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
			  unsigned d)
{
	int rc = X86EMUL_CONTINUE;

	switch (d) {
	case OpReg:
3864
		decode_register_operand(ctxt, op);
3865 3866
		break;
	case OpImmUByte:
3867
		rc = decode_imm(ctxt, op, 1, false);
3868 3869
		break;
	case OpMem:
3870
		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3871 3872 3873 3874
	mem_common:
		*op = ctxt->memop;
		ctxt->memopp = op;
		if ((ctxt->d & BitOp) && op == &ctxt->dst)
3875 3876 3877
			fetch_bit_operand(ctxt);
		op->orig_val = op->val;
		break;
3878 3879 3880
	case OpMem64:
		ctxt->memop.bytes = 8;
		goto mem_common;
3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901
	case OpAcc:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.reg = &ctxt->regs[VCPU_REGS_RAX];
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
	case OpDI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
			register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
		op->addr.mem.seg = VCPU_SREG_ES;
		op->val = 0;
		break;
	case OpDX:
		op->type = OP_REG;
		op->bytes = 2;
		op->addr.reg = &ctxt->regs[VCPU_REGS_RDX];
		fetch_register_operand(op);
		break;
3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915
	case OpCL:
		op->bytes = 1;
		op->val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
		break;
	case OpImmByte:
		rc = decode_imm(ctxt, op, 1, true);
		break;
	case OpOne:
		op->bytes = 1;
		op->val = 1;
		break;
	case OpImm:
		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
		break;
3916 3917 3918
	case OpMem8:
		ctxt->memop.bytes = 1;
		goto mem_common;
3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947
	case OpMem16:
		ctxt->memop.bytes = 2;
		goto mem_common;
	case OpMem32:
		ctxt->memop.bytes = 4;
		goto mem_common;
	case OpImmU16:
		rc = decode_imm(ctxt, op, 2, false);
		break;
	case OpImmU:
		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
		break;
	case OpSI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
			register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]);
		op->addr.mem.seg = seg_override(ctxt);
		op->val = 0;
		break;
	case OpImmFAddr:
		op->type = OP_IMM;
		op->addr.mem.ea = ctxt->_eip;
		op->bytes = ctxt->op_bytes + 2;
		insn_fetch_arr(op->valptr, op->bytes, ctxt);
		break;
	case OpMemFAddr:
		ctxt->memop.bytes = ctxt->op_bytes + 2;
		goto mem_common;
3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965
	case OpES:
		op->val = VCPU_SREG_ES;
		break;
	case OpCS:
		op->val = VCPU_SREG_CS;
		break;
	case OpSS:
		op->val = VCPU_SREG_SS;
		break;
	case OpDS:
		op->val = VCPU_SREG_DS;
		break;
	case OpFS:
		op->val = VCPU_SREG_FS;
		break;
	case OpGS:
		op->val = VCPU_SREG_GS;
		break;
3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976
	case OpImplicit:
		/* Special instructions do their own operand decoding. */
	default:
		op->type = OP_NONE; /* Disable writeback. */
		break;
	}

done:
	return rc;
}

3977
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3978 3979 3980
{
	int rc = X86EMUL_CONTINUE;
	int mode = ctxt->mode;
3981
	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3982
	bool op_prefix = false;
3983
	struct opcode opcode;
3984

3985 3986
	ctxt->memop.type = OP_NONE;
	ctxt->memopp = NULL;
3987 3988 3989
	ctxt->_eip = ctxt->eip;
	ctxt->fetch.start = ctxt->_eip;
	ctxt->fetch.end = ctxt->fetch.start + insn_len;
3990
	if (insn_len > 0)
3991
		memcpy(ctxt->fetch.data, insn, insn_len);
3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008

	switch (mode) {
	case X86EMUL_MODE_REAL:
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
		def_op_bytes = def_ad_bytes = 2;
		break;
	case X86EMUL_MODE_PROT32:
		def_op_bytes = def_ad_bytes = 4;
		break;
#ifdef CONFIG_X86_64
	case X86EMUL_MODE_PROT64:
		def_op_bytes = 4;
		def_ad_bytes = 8;
		break;
#endif
	default:
4009
		return EMULATION_FAILED;
4010 4011
	}

4012 4013
	ctxt->op_bytes = def_op_bytes;
	ctxt->ad_bytes = def_ad_bytes;
4014 4015 4016

	/* Legacy prefixes. */
	for (;;) {
4017
		switch (ctxt->b = insn_fetch(u8, ctxt)) {
4018
		case 0x66:	/* operand-size override */
4019
			op_prefix = true;
4020
			/* switch between 2/4 bytes */
4021
			ctxt->op_bytes = def_op_bytes ^ 6;
4022 4023 4024 4025
			break;
		case 0x67:	/* address-size override */
			if (mode == X86EMUL_MODE_PROT64)
				/* switch between 4/8 bytes */
4026
				ctxt->ad_bytes = def_ad_bytes ^ 12;
4027 4028
			else
				/* switch between 2/4 bytes */
4029
				ctxt->ad_bytes = def_ad_bytes ^ 6;
4030 4031 4032 4033 4034
			break;
		case 0x26:	/* ES override */
		case 0x2e:	/* CS override */
		case 0x36:	/* SS override */
		case 0x3e:	/* DS override */
4035
			set_seg_override(ctxt, (ctxt->b >> 3) & 3);
4036 4037 4038
			break;
		case 0x64:	/* FS override */
		case 0x65:	/* GS override */
4039
			set_seg_override(ctxt, ctxt->b & 7);
4040 4041 4042 4043
			break;
		case 0x40 ... 0x4f: /* REX */
			if (mode != X86EMUL_MODE_PROT64)
				goto done_prefixes;
4044
			ctxt->rex_prefix = ctxt->b;
4045 4046
			continue;
		case 0xf0:	/* LOCK */
4047
			ctxt->lock_prefix = 1;
4048 4049 4050
			break;
		case 0xf2:	/* REPNE/REPNZ */
		case 0xf3:	/* REP/REPE/REPZ */
4051
			ctxt->rep_prefix = ctxt->b;
4052 4053 4054 4055 4056 4057 4058
			break;
		default:
			goto done_prefixes;
		}

		/* Any legacy prefix after a REX prefix nullifies its effect. */

4059
		ctxt->rex_prefix = 0;
4060 4061 4062 4063 4064
	}

done_prefixes:

	/* REX prefix. */
4065 4066
	if (ctxt->rex_prefix & 8)
		ctxt->op_bytes = 8;	/* REX.W */
4067 4068

	/* Opcode byte(s). */
4069
	opcode = opcode_table[ctxt->b];
4070
	/* Two-byte opcode? */
4071 4072
	if (ctxt->b == 0x0f) {
		ctxt->twobyte = 1;
4073
		ctxt->b = insn_fetch(u8, ctxt);
4074
		opcode = twobyte_table[ctxt->b];
4075
	}
4076
	ctxt->d = opcode.flags;
4077

4078 4079 4080
	if (ctxt->d & ModRM)
		ctxt->modrm = insn_fetch(u8, ctxt);

4081 4082
	while (ctxt->d & GroupMask) {
		switch (ctxt->d & GroupMask) {
4083
		case Group:
4084
			goffset = (ctxt->modrm >> 3) & 7;
4085 4086 4087
			opcode = opcode.u.group[goffset];
			break;
		case GroupDual:
4088 4089
			goffset = (ctxt->modrm >> 3) & 7;
			if ((ctxt->modrm >> 6) == 3)
4090 4091 4092 4093 4094
				opcode = opcode.u.gdual->mod3[goffset];
			else
				opcode = opcode.u.gdual->mod012[goffset];
			break;
		case RMExt:
4095
			goffset = ctxt->modrm & 7;
4096
			opcode = opcode.u.group[goffset];
4097 4098
			break;
		case Prefix:
4099
			if (ctxt->rep_prefix && op_prefix)
4100
				return EMULATION_FAILED;
4101
			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4102 4103 4104 4105 4106 4107 4108 4109
			switch (simd_prefix) {
			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
			}
			break;
		default:
4110
			return EMULATION_FAILED;
4111
		}
4112

4113
		ctxt->d &= ~(u64)GroupMask;
4114
		ctxt->d |= opcode.flags;
4115 4116
	}

4117 4118 4119
	ctxt->execute = opcode.u.execute;
	ctxt->check_perm = opcode.check_perm;
	ctxt->intercept = opcode.intercept;
4120 4121

	/* Unrecognised? */
4122
	if (ctxt->d == 0 || (ctxt->d & Undefined))
4123
		return EMULATION_FAILED;
4124

4125
	if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
4126
		return EMULATION_FAILED;
4127

4128 4129
	if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
		ctxt->op_bytes = 8;
4130

4131
	if (ctxt->d & Op3264) {
4132
		if (mode == X86EMUL_MODE_PROT64)
4133
			ctxt->op_bytes = 8;
4134
		else
4135
			ctxt->op_bytes = 4;
4136 4137
	}

4138 4139
	if (ctxt->d & Sse)
		ctxt->op_bytes = 16;
A
Avi Kivity 已提交
4140 4141
	else if (ctxt->d & Mmx)
		ctxt->op_bytes = 8;
A
Avi Kivity 已提交
4142

4143
	/* ModRM and SIB bytes. */
4144
	if (ctxt->d & ModRM) {
4145
		rc = decode_modrm(ctxt, &ctxt->memop);
4146 4147 4148
		if (!ctxt->has_seg_override)
			set_seg_override(ctxt, ctxt->modrm_seg);
	} else if (ctxt->d & MemAbs)
4149
		rc = decode_abs(ctxt, &ctxt->memop);
4150 4151 4152
	if (rc != X86EMUL_CONTINUE)
		goto done;

4153 4154
	if (!ctxt->has_seg_override)
		set_seg_override(ctxt, VCPU_SREG_DS);
4155

4156
	ctxt->memop.addr.mem.seg = seg_override(ctxt);
4157

4158 4159
	if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8)
		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
4160 4161 4162 4163 4164

	/*
	 * Decode and fetch the source operand: register, memory
	 * or immediate.
	 */
4165
	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4166 4167 4168
	if (rc != X86EMUL_CONTINUE)
		goto done;

4169 4170 4171 4172
	/*
	 * Decode and fetch the second source operand: register, memory
	 * or immediate.
	 */
4173
	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4174 4175 4176
	if (rc != X86EMUL_CONTINUE)
		goto done;

4177
	/* Decode and fetch the destination operand: register or memory. */
4178
	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4179 4180

done:
4181 4182
	if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative)
		ctxt->memopp->addr.mem.ea += ctxt->_eip;
4183

4184
	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4185 4186
}

4187 4188 4189 4190 4191
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
{
	return ctxt->d & PageTable;
}

4192 4193 4194 4195 4196 4197 4198 4199 4200
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
	/* The second termination condition only applies for REPE
	 * and REPNE. Test if the repeat string operation prefix is
	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
	 * corresponding termination condition according to:
	 * 	- if REPE/REPZ and ZF = 0 then done
	 * 	- if REPNE/REPNZ and ZF = 1 then done
	 */
4201 4202 4203
	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
4204
		 ((ctxt->eflags & EFLG_ZF) == 0))
4205
		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
4206 4207 4208 4209 4210 4211
		    ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
		return true;

	return false;
}

A
Avi Kivity 已提交
4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
	bool fault = false;

	ctxt->ops->get_fpu(ctxt);
	asm volatile("1: fwait \n\t"
		     "2: \n\t"
		     ".pushsection .fixup,\"ax\" \n\t"
		     "3: \n\t"
		     "movb $1, %[fault] \n\t"
		     "jmp 2b \n\t"
		     ".popsection \n\t"
		     _ASM_EXTABLE(1b, 3b)
4225
		     : [fault]"+qm"(fault));
A
Avi Kivity 已提交
4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240
	ctxt->ops->put_fpu(ctxt);

	if (unlikely(fault))
		return emulate_exception(ctxt, MF_VECTOR, 0, false);

	return X86EMUL_CONTINUE;
}

static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
				       struct operand *op)
{
	if (op->type == OP_MM)
		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
}

4241
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4242
{
4243
	struct x86_emulate_ops *ops = ctxt->ops;
4244
	int rc = X86EMUL_CONTINUE;
4245
	int saved_dst_type = ctxt->dst.type;
4246

4247
	ctxt->mem_read.pos = 0;
4248

4249
	if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
4250
		rc = emulate_ud(ctxt);
4251 4252 4253
		goto done;
	}

4254
	/* LOCK prefix is allowed only with some instructions */
4255
	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4256
		rc = emulate_ud(ctxt);
4257 4258 4259
		goto done;
	}

4260
	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4261
		rc = emulate_ud(ctxt);
4262 4263 4264
		goto done;
	}

A
Avi Kivity 已提交
4265 4266
	if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
	    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
A
Avi Kivity 已提交
4267 4268 4269 4270
		rc = emulate_ud(ctxt);
		goto done;
	}

A
Avi Kivity 已提交
4271
	if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
A
Avi Kivity 已提交
4272 4273 4274 4275
		rc = emulate_nm(ctxt);
		goto done;
	}

A
Avi Kivity 已提交
4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289
	if (ctxt->d & Mmx) {
		rc = flush_pending_x87_faults(ctxt);
		if (rc != X86EMUL_CONTINUE)
			goto done;
		/*
		 * Now that we know the fpu is exception safe, we can fetch
		 * operands from it.
		 */
		fetch_possible_mmx_operand(ctxt, &ctxt->src);
		fetch_possible_mmx_operand(ctxt, &ctxt->src2);
		if (!(ctxt->d & Mov))
			fetch_possible_mmx_operand(ctxt, &ctxt->dst);
	}

4290 4291
	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
		rc = emulator_check_intercept(ctxt, ctxt->intercept,
4292
					      X86_ICPT_PRE_EXCEPT);
4293 4294 4295 4296
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

4297
	/* Privileged instruction can be executed only in CPL=0 */
4298
	if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4299
		rc = emulate_gp(ctxt, 0);
4300 4301 4302
		goto done;
	}

4303
	/* Instruction can only be executed in protected mode */
4304
	if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
4305 4306 4307 4308
		rc = emulate_ud(ctxt);
		goto done;
	}

4309
	/* Do instruction specific permission checks */
4310 4311
	if (ctxt->check_perm) {
		rc = ctxt->check_perm(ctxt);
4312 4313 4314 4315
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

4316 4317
	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
		rc = emulator_check_intercept(ctxt, ctxt->intercept,
4318
					      X86_ICPT_POST_EXCEPT);
4319 4320 4321 4322
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

4323
	if (ctxt->rep_prefix && (ctxt->d & String)) {
4324
		/* All REP prefixes have the same first termination condition */
4325 4326
		if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) {
			ctxt->eip = ctxt->_eip;
4327 4328 4329 4330
			goto done;
		}
	}

4331 4332 4333
	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
		rc = segmented_read(ctxt, ctxt->src.addr.mem,
				    ctxt->src.valptr, ctxt->src.bytes);
4334
		if (rc != X86EMUL_CONTINUE)
4335
			goto done;
4336
		ctxt->src.orig_val64 = ctxt->src.val64;
4337 4338
	}

4339 4340 4341
	if (ctxt->src2.type == OP_MEM) {
		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
				    &ctxt->src2.val, ctxt->src2.bytes);
4342 4343 4344 4345
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

4346
	if ((ctxt->d & DstMask) == ImplicitOps)
4347 4348 4349
		goto special_insn;


4350
	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4351
		/* optimisation - avoid slow emulated read if Mov */
4352 4353
		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
				   &ctxt->dst.val, ctxt->dst.bytes);
4354 4355
		if (rc != X86EMUL_CONTINUE)
			goto done;
4356
	}
4357
	ctxt->dst.orig_val = ctxt->dst.val;
4358

4359 4360
special_insn:

4361 4362
	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
		rc = emulator_check_intercept(ctxt, ctxt->intercept,
4363
					      X86_ICPT_POST_MEMACCESS);
4364 4365 4366 4367
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

4368 4369
	if (ctxt->execute) {
		rc = ctxt->execute(ctxt);
4370 4371 4372 4373 4374
		if (rc != X86EMUL_CONTINUE)
			goto done;
		goto writeback;
	}

4375
	if (ctxt->twobyte)
A
Avi Kivity 已提交
4376 4377
		goto twobyte_insn;

4378
	switch (ctxt->b) {
4379
	case 0x40 ... 0x47: /* inc r16/r32 */
4380
		emulate_1op(ctxt, "inc");
4381 4382
		break;
	case 0x48 ... 0x4f: /* dec r16/r32 */
4383
		emulate_1op(ctxt, "dec");
4384
		break;
A
Avi Kivity 已提交
4385
	case 0x63:		/* movsxd */
4386
		if (ctxt->mode != X86EMUL_MODE_PROT64)
A
Avi Kivity 已提交
4387
			goto cannot_emulate;
4388
		ctxt->dst.val = (s32) ctxt->src.val;
A
Avi Kivity 已提交
4389
		break;
4390
	case 0x70 ... 0x7f: /* jcc (short) */
4391 4392
		if (test_cc(ctxt->b, ctxt->eflags))
			jmp_rel(ctxt, ctxt->src.val);
4393
		break;
N
Nitin A Kamble 已提交
4394
	case 0x8d: /* lea r16/r32, m */
4395
		ctxt->dst.val = ctxt->src.addr.mem.ea;
N
Nitin A Kamble 已提交
4396
		break;
4397
	case 0x90 ... 0x97: /* nop / xchg reg, rax */
4398
		if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
4399
			break;
4400 4401
		rc = em_xchg(ctxt);
		break;
4402
	case 0x98: /* cbw/cwde/cdqe */
4403 4404 4405 4406
		switch (ctxt->op_bytes) {
		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4407 4408
		}
		break;
4409
	case 0xc0 ... 0xc1:
4410
		rc = em_grp2(ctxt);
4411
		break;
4412
	case 0xcc:		/* int3 */
4413 4414
		rc = emulate_int(ctxt, 3);
		break;
4415
	case 0xcd:		/* int n */
4416
		rc = emulate_int(ctxt, ctxt->src.val);
4417 4418
		break;
	case 0xce:		/* into */
4419 4420
		if (ctxt->eflags & EFLG_OF)
			rc = emulate_int(ctxt, 4);
4421
		break;
4422
	case 0xd0 ... 0xd1:	/* Grp2 */
4423
		rc = em_grp2(ctxt);
4424 4425
		break;
	case 0xd2 ... 0xd3:	/* Grp2 */
4426
		ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
4427
		rc = em_grp2(ctxt);
4428
		break;
4429
	case 0xe9: /* jmp rel */
4430
	case 0xeb: /* jmp rel short */
4431 4432
		jmp_rel(ctxt, ctxt->src.val);
		ctxt->dst.type = OP_NONE; /* Disable writeback. */
4433
		break;
4434
	case 0xf4:              /* hlt */
4435
		ctxt->ops->halt(ctxt);
4436
		break;
4437 4438 4439 4440 4441 4442 4443
	case 0xf5:	/* cmc */
		/* complement carry flag from eflags reg */
		ctxt->eflags ^= EFLG_CF;
		break;
	case 0xf8: /* clc */
		ctxt->eflags &= ~EFLG_CF;
		break;
4444 4445 4446
	case 0xf9: /* stc */
		ctxt->eflags |= EFLG_CF;
		break;
4447 4448 4449 4450 4451 4452
	case 0xfc: /* cld */
		ctxt->eflags &= ~EFLG_DF;
		break;
	case 0xfd: /* std */
		ctxt->eflags |= EFLG_DF;
		break;
4453 4454
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
4455
	}
4456

4457 4458 4459
	if (rc != X86EMUL_CONTINUE)
		goto done;

4460
writeback:
4461
	rc = writeback(ctxt);
4462
	if (rc != X86EMUL_CONTINUE)
4463 4464
		goto done;

4465 4466 4467 4468
	/*
	 * restore dst type in case the decoding will be reused
	 * (happens for string instruction )
	 */
4469
	ctxt->dst.type = saved_dst_type;
4470

4471 4472 4473
	if ((ctxt->d & SrcMask) == SrcSI)
		string_addr_inc(ctxt, seg_override(ctxt),
				VCPU_REGS_RSI, &ctxt->src);
4474

4475
	if ((ctxt->d & DstMask) == DstDI)
4476
		string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
4477
				&ctxt->dst);
4478

4479 4480 4481
	if (ctxt->rep_prefix && (ctxt->d & String)) {
		struct read_cache *r = &ctxt->io_read;
		register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
4482

4483 4484 4485 4486 4487
		if (!string_insn_completed(ctxt)) {
			/*
			 * Re-enter guest when pio read ahead buffer is empty
			 * or, if it is not used, after each 1024 iteration.
			 */
4488
			if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
4489 4490 4491 4492 4493 4494
			    (r->end == 0 || r->end != r->pos)) {
				/*
				 * Reset read cache. Usually happens before
				 * decode, but since instruction is restarted
				 * we have to do it here.
				 */
4495
				ctxt->mem_read.end = 0;
4496 4497 4498
				return EMULATION_RESTART;
			}
			goto done; /* skip rip writeback */
4499
		}
4500
	}
4501

4502
	ctxt->eip = ctxt->_eip;
4503 4504

done:
4505 4506
	if (rc == X86EMUL_PROPAGATE_FAULT)
		ctxt->have_exception = true;
4507 4508 4509
	if (rc == X86EMUL_INTERCEPTED)
		return EMULATION_INTERCEPTED;

4510
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
A
Avi Kivity 已提交
4511 4512

twobyte_insn:
4513
	switch (ctxt->b) {
4514
	case 0x09:		/* wbinvd */
4515
		(ctxt->ops->wbinvd)(ctxt);
4516 4517
		break;
	case 0x08:		/* invd */
4518 4519 4520 4521
	case 0x0d:		/* GrpP (prefetch) */
	case 0x18:		/* Grp16 (prefetch/nop) */
		break;
	case 0x20: /* mov cr, reg */
4522
		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4523
		break;
A
Avi Kivity 已提交
4524
	case 0x21: /* mov from dr to reg */
4525
		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
A
Avi Kivity 已提交
4526 4527
		break;
	case 0x40 ... 0x4f:	/* cmov */
4528 4529 4530
		ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
		if (!test_cc(ctxt->b, ctxt->eflags))
			ctxt->dst.type = OP_NONE; /* no writeback */
A
Avi Kivity 已提交
4531
		break;
4532
	case 0x80 ... 0x8f: /* jnz rel, etc*/
4533 4534
		if (test_cc(ctxt->b, ctxt->eflags))
			jmp_rel(ctxt, ctxt->src.val);
4535
		break;
4536
	case 0x90 ... 0x9f:     /* setcc r/m8 */
4537
		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4538
		break;
4539 4540
	case 0xa4: /* shld imm8, r, r/m */
	case 0xa5: /* shld cl, r, r/m */
4541
		emulate_2op_cl(ctxt, "shld");
4542 4543 4544
		break;
	case 0xac: /* shrd imm8, r, r/m */
	case 0xad: /* shrd cl, r, r/m */
4545
		emulate_2op_cl(ctxt, "shrd");
4546
		break;
4547 4548
	case 0xae:              /* clflush */
		break;
A
Avi Kivity 已提交
4549
	case 0xb6 ... 0xb7:	/* movzx */
4550
		ctxt->dst.bytes = ctxt->op_bytes;
4551
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
4552
						       : (u16) ctxt->src.val;
A
Avi Kivity 已提交
4553 4554
		break;
	case 0xbe ... 0xbf:	/* movsx */
4555
		ctxt->dst.bytes = ctxt->op_bytes;
4556
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
4557
							(s16) ctxt->src.val;
A
Avi Kivity 已提交
4558
		break;
4559
	case 0xc0 ... 0xc1:	/* xadd */
4560
		emulate_2op_SrcV(ctxt, "add");
4561
		/* Write back the register source. */
4562 4563
		ctxt->src.val = ctxt->dst.orig_val;
		write_register_operand(&ctxt->src);
4564
		break;
4565
	case 0xc3:		/* movnti */
4566 4567 4568
		ctxt->dst.bytes = ctxt->op_bytes;
		ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
							(u64) ctxt->src.val;
4569
		break;
4570 4571
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
4572
	}
4573 4574 4575 4576

	if (rc != X86EMUL_CONTINUE)
		goto done;

A
Avi Kivity 已提交
4577 4578 4579
	goto writeback;

cannot_emulate:
4580
	return EMULATION_FAILED;
A
Avi Kivity 已提交
4581
}