emulate.c 97.0 KB
Newer Older
A
Avi Kivity 已提交
1
/******************************************************************************
2
 * emulate.c
A
Avi Kivity 已提交
3 4 5 6 7 8
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9
 * privileged instructions:
A
Avi Kivity 已提交
10 11
 *
 * Copyright (C) 2006 Qumranet
N
Nicolas Kaiser 已提交
12
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
13 14 15 16 17 18 19 20 21 22
 *
 *   Avi Kivity <avi@qumranet.com>
 *   Yaniv Kamay <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

23
#include <linux/kvm_host.h>
24
#include "kvm_cache_regs.h"
A
Avi Kivity 已提交
25
#include <linux/module.h>
26
#include <asm/kvm_emulate.h>
A
Avi Kivity 已提交
27

28
#include "x86.h"
29
#include "tss.h"
30

A
Avi Kivity 已提交
31 32 33 34 35 36 37 38 39 40
/*
 * Opcode effective-address decode tables.
 * Note that we only emulate instructions that have at least one memory
 * operand (excluding implicit stack references). We assume that stack
 * references and instruction fetches will never occur in special memory
 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
 * not be handled.
 */

/* Operand sizes: 8-bit operands or specified/overridden size. */
41
#define ByteOp      (1<<0)	/* 8-bit operands. */
A
Avi Kivity 已提交
42
/* Destination operand type. */
43 44 45 46 47 48
#define ImplicitOps (1<<1)	/* Implicit in opcode. No generic decode. */
#define DstReg      (2<<1)	/* Register operand. */
#define DstMem      (3<<1)	/* Memory operand. */
#define DstAcc      (4<<1)	/* Destination Accumulator */
#define DstDI       (5<<1)	/* Destination is in ES:(E)DI */
#define DstMem64    (6<<1)	/* 64bit memory operand */
49
#define DstImmUByte (7<<1)	/* 8-bit unsigned immediate operand */
50
#define DstMask     (7<<1)
A
Avi Kivity 已提交
51
/* Source operand type. */
52 53 54 55 56 57 58
#define SrcNone     (0<<4)	/* No source operand. */
#define SrcReg      (1<<4)	/* Register operand. */
#define SrcMem      (2<<4)	/* Memory operand. */
#define SrcMem16    (3<<4)	/* Memory operand (16-bit). */
#define SrcMem32    (4<<4)	/* Memory operand (32-bit). */
#define SrcImm      (5<<4)	/* Immediate operand. */
#define SrcImmByte  (6<<4)	/* 8-bit sign-extended immediate operand. */
59
#define SrcOne      (7<<4)	/* Implied '1' */
60
#define SrcImmUByte (8<<4)      /* 8-bit unsigned immediate operand. */
61
#define SrcImmU     (9<<4)      /* Immediate operand, unsigned */
62
#define SrcSI       (0xa<<4)	/* Source is in the DS:RSI */
63 64
#define SrcImmFAddr (0xb<<4)	/* Source is immediate far address */
#define SrcMemFAddr (0xc<<4)	/* Source is far address in memory */
65
#define SrcAcc      (0xd<<4)	/* Source Accumulator */
66
#define SrcImmU16   (0xe<<4)    /* Immediate operand, unsigned, 16 bits */
67
#define SrcMask     (0xf<<4)
A
Avi Kivity 已提交
68
/* Generic ModRM decode. */
69
#define ModRM       (1<<8)
A
Avi Kivity 已提交
70
/* Destination is only written; never read. */
71 72 73
#define Mov         (1<<9)
#define BitOp       (1<<10)
#define MemAbs      (1<<11)      /* Memory operand is absolute displacement */
74 75
#define String      (1<<12)     /* String instruction (rep capable) */
#define Stack       (1<<13)     /* Stack instruction (push/pop) */
76 77
#define Group       (1<<14)     /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual   (1<<15)     /* Alternate decoding of mod == 3 */
78
/* Misc flags */
79
#define VendorSpecific (1<<22) /* Vendor specific instruction */
80
#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
81
#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
82
#define Undefined   (1<<25) /* No Such Instruction */
83
#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
84
#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
85
#define No64	    (1<<28)
86 87 88 89 90
/* Source 2 operand type */
#define Src2None    (0<<29)
#define Src2CL      (1<<29)
#define Src2ImmByte (2<<29)
#define Src2One     (3<<29)
91
#define Src2Imm     (4<<29)
92
#define Src2Mask    (7<<29)
A
Avi Kivity 已提交
93

94 95 96 97 98 99 100 101
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
102

103 104
struct opcode {
	u32 flags;
105
	union {
106
		int (*execute)(struct x86_emulate_ctxt *ctxt);
107 108 109 110 111 112 113 114
		struct opcode *group;
		struct group_dual *gdual;
	} u;
};

struct group_dual {
	struct opcode mod012[8];
	struct opcode mod3[8];
115 116
};

A
Avi Kivity 已提交
117
/* EFLAGS bit definitions. */
118 119 120 121
#define EFLG_ID (1<<21)
#define EFLG_VIP (1<<20)
#define EFLG_VIF (1<<19)
#define EFLG_AC (1<<18)
122 123
#define EFLG_VM (1<<17)
#define EFLG_RF (1<<16)
124 125
#define EFLG_IOPL (3<<12)
#define EFLG_NT (1<<14)
A
Avi Kivity 已提交
126 127
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
128
#define EFLG_IF (1<<9)
129
#define EFLG_TF (1<<8)
A
Avi Kivity 已提交
130 131 132 133 134 135
#define EFLG_SF (1<<7)
#define EFLG_ZF (1<<6)
#define EFLG_AF (1<<4)
#define EFLG_PF (1<<2)
#define EFLG_CF (1<<0)

136 137 138
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
#define EFLG_RESERVED_ONE_MASK 2

A
Avi Kivity 已提交
139 140 141 142 143 144 145
/*
 * Instruction emulation:
 * Most instructions are emulated directly via a fragment of inline assembly
 * code. This allows us to save/restore EFLAGS and thus very easily pick up
 * any modified flags.
 */

146
#if defined(CONFIG_X86_64)
A
Avi Kivity 已提交
147 148 149 150 151 152 153 154 155 156 157 158 159 160
#define _LO32 "k"		/* force 32-bit operand */
#define _STK  "%%rsp"		/* stack pointer */
#elif defined(__i386__)
#define _LO32 ""		/* force 32-bit operand */
#define _STK  "%%esp"		/* stack pointer */
#endif

/*
 * These EFLAGS bits are restored from saved value during emulation, and
 * any changes are written back to the saved value after emulation.
 */
#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)

/* Before executing instruction: restore necessary bits in EFLAGS. */
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
#define _PRE_EFLAGS(_sav, _msk, _tmp)					\
	/* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
	"movl %"_sav",%"_LO32 _tmp"; "                                  \
	"push %"_tmp"; "                                                \
	"push %"_tmp"; "                                                \
	"movl %"_msk",%"_LO32 _tmp"; "                                  \
	"andl %"_LO32 _tmp",("_STK"); "                                 \
	"pushf; "                                                       \
	"notl %"_LO32 _tmp"; "                                          \
	"andl %"_LO32 _tmp",("_STK"); "                                 \
	"andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); "	\
	"pop  %"_tmp"; "                                                \
	"orl  %"_LO32 _tmp",("_STK"); "                                 \
	"popf; "                                                        \
	"pop  %"_sav"; "
A
Avi Kivity 已提交
176 177 178 179 180 181 182 183 184

/* After executing instruction: write-back necessary bits in EFLAGS. */
#define _POST_EFLAGS(_sav, _msk, _tmp) \
	/* _sav |= EFLAGS & _msk; */		\
	"pushf; "				\
	"pop  %"_tmp"; "			\
	"andl %"_msk",%"_LO32 _tmp"; "		\
	"orl  %"_LO32 _tmp",%"_sav"; "

185 186 187 188 189 190
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif

191
#define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
192 193 194 195 196
	do {								\
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "4", "2")			\
			_op _suffix " %"_x"3,%1; "			\
			_POST_EFLAGS("0", "4", "2")			\
197
			: "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
198 199
			  "=&r" (_tmp)					\
			: _y ((_src).val), "i" (EFLAGS_MASK));		\
200
	} while (0)
201 202


A
Avi Kivity 已提交
203 204
/* Raw emulation: instruction has two explicit operands. */
#define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
205 206 207 208 209
	do {								\
		unsigned long _tmp;					\
									\
		switch ((_dst).bytes) {					\
		case 2:							\
210
			____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
211 212
			break;						\
		case 4:							\
213
			____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
214 215
			break;						\
		case 8:							\
216
			ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
217 218
			break;						\
		}							\
A
Avi Kivity 已提交
219 220 221 222
	} while (0)

#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
	do {								     \
223
		unsigned long _tmp;					     \
M
Mike Day 已提交
224
		switch ((_dst).bytes) {				             \
A
Avi Kivity 已提交
225
		case 1:							     \
226
			____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
A
Avi Kivity 已提交
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
			break;						     \
		default:						     \
			__emulate_2op_nobyte(_op, _src, _dst, _eflags,	     \
					     _wx, _wy, _lx, _ly, _qx, _qy);  \
			break;						     \
		}							     \
	} while (0)

/* Source operand is byte-sized and may be restricted to just %cl. */
#define emulate_2op_SrcB(_op, _src, _dst, _eflags)                      \
	__emulate_2op(_op, _src, _dst, _eflags,				\
		      "b", "c", "b", "c", "b", "c", "b", "c")

/* Source operand is byte, word, long or quad sized. */
#define emulate_2op_SrcV(_op, _src, _dst, _eflags)                      \
	__emulate_2op(_op, _src, _dst, _eflags,				\
		      "b", "q", "w", "r", _LO32, "r", "", "r")

/* Source operand is word, long or quad sized. */
#define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags)               \
	__emulate_2op_nobyte(_op, _src, _dst, _eflags,			\
			     "w", "r", _LO32, "r", "", "r")

250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
/* Instruction has three operands and one operand is stored in ECX register */
#define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) 	\
	do {									\
		unsigned long _tmp;						\
		_type _clv  = (_cl).val;  					\
		_type _srcv = (_src).val;    					\
		_type _dstv = (_dst).val;					\
										\
		__asm__ __volatile__ (						\
			_PRE_EFLAGS("0", "5", "2")				\
			_op _suffix " %4,%1 \n"					\
			_POST_EFLAGS("0", "5", "2")				\
			: "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp)		\
			: "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK)		\
			); 							\
										\
		(_cl).val  = (unsigned long) _clv;				\
		(_src).val = (unsigned long) _srcv;				\
		(_dst).val = (unsigned long) _dstv;				\
	} while (0)

#define emulate_2op_cl(_op, _cl, _src, _dst, _eflags)				\
	do {									\
		switch ((_dst).bytes) {						\
		case 2:								\
			__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,  	\
						"w", unsigned short);         	\
			break;							\
		case 4: 							\
			__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,  	\
						"l", unsigned int);           	\
			break;							\
		case 8:								\
			ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,	\
						"q", unsigned long));  		\
			break;							\
		}								\
	} while (0)

289
#define __emulate_1op(_op, _dst, _eflags, _suffix)			\
A
Avi Kivity 已提交
290 291 292
	do {								\
		unsigned long _tmp;					\
									\
293 294 295 296 297 298 299 300 301 302 303 304
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "3", "2")			\
			_op _suffix " %1; "				\
			_POST_EFLAGS("0", "3", "2")			\
			: "=m" (_eflags), "+m" ((_dst).val),		\
			  "=&r" (_tmp)					\
			: "i" (EFLAGS_MASK));				\
	} while (0)

/* Instruction has only one explicit operand (no source operand). */
#define emulate_1op(_op, _dst, _eflags)                                    \
	do {								\
M
Mike Day 已提交
305
		switch ((_dst).bytes) {				        \
306 307 308 309
		case 1:	__emulate_1op(_op, _dst, _eflags, "b"); break;	\
		case 2:	__emulate_1op(_op, _dst, _eflags, "w"); break;	\
		case 4:	__emulate_1op(_op, _dst, _eflags, "l"); break;	\
		case 8:	ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
A
Avi Kivity 已提交
310 311 312
		}							\
	} while (0)

313 314 315 316 317 318 319 320 321 322 323 324 325 326
#define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix)		\
	do {								\
		unsigned long _tmp;					\
									\
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "4", "1")			\
			_op _suffix " %5; "				\
			_POST_EFLAGS("0", "4", "1")			\
			: "=m" (_eflags), "=&r" (_tmp),			\
			  "+a" (_rax), "+d" (_rdx)			\
			: "i" (EFLAGS_MASK), "m" ((_src).val),		\
			  "a" (_rax), "d" (_rdx));			\
	} while (0)

327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
#define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
	do {								\
		unsigned long _tmp;					\
									\
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "5", "1")			\
			"1: \n\t"					\
			_op _suffix " %6; "				\
			"2: \n\t"					\
			_POST_EFLAGS("0", "5", "1")			\
			".pushsection .fixup,\"ax\" \n\t"		\
			"3: movb $1, %4 \n\t"				\
			"jmp 2b \n\t"					\
			".popsection \n\t"				\
			_ASM_EXTABLE(1b, 3b)				\
			: "=m" (_eflags), "=&r" (_tmp),			\
			  "+a" (_rax), "+d" (_rdx), "+qm"(_ex)		\
			: "i" (EFLAGS_MASK), "m" ((_src).val),		\
			  "a" (_rax), "d" (_rdx));			\
	} while (0)

348 349 350 351 352 353 354 355 356 357 358
/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
#define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags)			\
	do {									\
		switch((_src).bytes) {						\
		case 1: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "b"); break; \
		case 2: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx,  _eflags, "w"); break; \
		case 4: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "l"); break; \
		case 8: ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "q")); break; \
		}							\
	} while (0)

359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
#define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex)	\
	do {								\
		switch((_src).bytes) {					\
		case 1:							\
			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx,	\
						 _eflags, "b", _ex);	\
			break;						\
		case 2:							\
			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
						 _eflags, "w", _ex);	\
			break;						\
		case 4:							\
			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
						 _eflags, "l", _ex);	\
			break;						\
		case 8: ON64(						\
			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
						 _eflags, "q", _ex));	\
			break;						\
		}							\
	} while (0)

A
Avi Kivity 已提交
381 382 383
/* Fetch next part of the instruction being emulated. */
#define insn_fetch(_type, _size, _eip)                                  \
({	unsigned long _x;						\
384
	rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size));		\
385
	if (rc != X86EMUL_CONTINUE)					\
A
Avi Kivity 已提交
386 387 388 389 390
		goto done;						\
	(_eip) += (_size);						\
	(_type)_x;							\
})

391 392 393 394 395 396 397
#define insn_fetch_arr(_arr, _size, _eip)                                \
({	rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size));		\
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
	(_eip) += (_size);						\
})

398 399 400 401 402
static inline unsigned long ad_mask(struct decode_cache *c)
{
	return (1UL << (c->ad_bytes << 3)) - 1;
}

A
Avi Kivity 已提交
403
/* Access/update address held in a register, based on addressing mode. */
404 405 406 407 408 409 410 411 412 413
static inline unsigned long
address_mask(struct decode_cache *c, unsigned long reg)
{
	if (c->ad_bytes == sizeof(unsigned long))
		return reg;
	else
		return reg & ad_mask(c);
}

static inline unsigned long
414
register_address(struct decode_cache *c, unsigned long reg)
415
{
416
	return address_mask(c, reg);
417 418
}

419 420 421 422 423 424 425 426
static inline void
register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
{
	if (c->ad_bytes == sizeof(unsigned long))
		*reg += inc;
	else
		*reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
}
A
Avi Kivity 已提交
427

428 429 430 431
static inline void jmp_rel(struct decode_cache *c, int rel)
{
	register_address_increment(c, &c->eip, rel);
}
432

433 434 435 436 437 438
static void set_seg_override(struct decode_cache *c, int seg)
{
	c->has_seg_override = true;
	c->seg_override = seg;
}

439 440
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
			      struct x86_emulate_ops *ops, int seg)
441 442 443 444
{
	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
		return 0;

445
	return ops->get_cached_segment_base(seg, ctxt->vcpu);
446 447
}

448 449 450
static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
			     struct x86_emulate_ops *ops,
			     struct decode_cache *c)
451 452 453 454
{
	if (!c->has_seg_override)
		return 0;

455
	return c->seg_override;
456 457
}

458 459
static ulong linear(struct x86_emulate_ctxt *ctxt,
		    struct segmented_address addr)
460
{
461 462
	struct decode_cache *c = &ctxt->decode;
	ulong la;
463

464 465 466 467
	la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
	if (c->ad_bytes != 8)
		la &= (u32)-1;
	return la;
468 469
}

470 471
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
			     u32 error, bool valid)
472
{
473 474 475
	ctxt->exception.vector = vec;
	ctxt->exception.error_code = error;
	ctxt->exception.error_code_valid = valid;
476
	return X86EMUL_PROPAGATE_FAULT;
477 478
}

479
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
480
{
481
	return emulate_exception(ctxt, GP_VECTOR, err, true);
482 483
}

484
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
485
{
486
	return emulate_exception(ctxt, UD_VECTOR, 0, false);
487 488
}

489
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
490
{
491
	return emulate_exception(ctxt, TS_VECTOR, err, true);
492 493
}

494 495
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
496
	return emulate_exception(ctxt, DE_VECTOR, 0, false);
497 498
}

499 500
static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
			      struct x86_emulate_ops *ops,
501
			      unsigned long eip, u8 *dest)
502 503 504
{
	struct fetch_cache *fc = &ctxt->decode.fetch;
	int rc;
505
	int size, cur_size;
506

507 508 509 510
	if (eip == fc->end) {
		cur_size = fc->end - fc->start;
		size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
		rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
511
				size, ctxt->vcpu, &ctxt->exception);
512
		if (rc != X86EMUL_CONTINUE)
513
			return rc;
514
		fc->end += size;
515
	}
516
	*dest = fc->data[eip - fc->start];
517
	return X86EMUL_CONTINUE;
518 519 520 521 522 523
}

static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
			 struct x86_emulate_ops *ops,
			 unsigned long eip, void *dest, unsigned size)
{
524
	int rc;
525

526
	/* x86 instructions are limited to 15 bytes. */
527
	if (eip + size - ctxt->eip > 15)
528
		return X86EMUL_UNHANDLEABLE;
529 530
	while (size--) {
		rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
531
		if (rc != X86EMUL_CONTINUE)
532 533
			return rc;
	}
534
	return X86EMUL_CONTINUE;
535 536
}

537 538 539 540 541 542 543
/*
 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 * pointer into the block that addresses the relevant register.
 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 */
static void *decode_register(u8 modrm_reg, unsigned long *regs,
			     int highbyte_regs)
A
Avi Kivity 已提交
544 545 546 547 548 549 550 551 552 553 554
{
	void *p;

	p = &regs[modrm_reg];
	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
		p = (unsigned char *)&regs[modrm_reg & 3] + 1;
	return p;
}

static int read_descriptor(struct x86_emulate_ctxt *ctxt,
			   struct x86_emulate_ops *ops,
555
			   struct segmented_address addr,
A
Avi Kivity 已提交
556 557 558 559 560 561 562
			   u16 *size, unsigned long *address, int op_bytes)
{
	int rc;

	if (op_bytes == 2)
		op_bytes = 3;
	*address = 0;
563
	rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2,
564
			   ctxt->vcpu, &ctxt->exception);
565
	if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
566
		return rc;
567 568
	addr.ea += 2;
	rc = ops->read_std(linear(ctxt, addr), address, op_bytes,
569
			   ctxt->vcpu, &ctxt->exception);
A
Avi Kivity 已提交
570 571 572
	return rc;
}

573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
static int test_cc(unsigned int condition, unsigned int flags)
{
	int rc = 0;

	switch ((condition & 15) >> 1) {
	case 0: /* o */
		rc |= (flags & EFLG_OF);
		break;
	case 1: /* b/c/nae */
		rc |= (flags & EFLG_CF);
		break;
	case 2: /* z/e */
		rc |= (flags & EFLG_ZF);
		break;
	case 3: /* be/na */
		rc |= (flags & (EFLG_CF|EFLG_ZF));
		break;
	case 4: /* s */
		rc |= (flags & EFLG_SF);
		break;
	case 5: /* p/pe */
		rc |= (flags & EFLG_PF);
		break;
	case 7: /* le/ng */
		rc |= (flags & EFLG_ZF);
		/* fall through */
	case 6: /* l/nge */
		rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
		break;
	}

	/* Odd condition identifiers (lsb == 1) have inverted sense. */
	return (!!rc ^ (condition & 1));
}

608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
static void fetch_register_operand(struct operand *op)
{
	switch (op->bytes) {
	case 1:
		op->val = *(u8 *)op->addr.reg;
		break;
	case 2:
		op->val = *(u16 *)op->addr.reg;
		break;
	case 4:
		op->val = *(u32 *)op->addr.reg;
		break;
	case 8:
		op->val = *(u64 *)op->addr.reg;
		break;
	}
}

626 627 628 629
static void decode_register_operand(struct operand *op,
				    struct decode_cache *c,
				    int inhibit_bytereg)
{
630
	unsigned reg = c->modrm_reg;
631
	int highbyte_regs = c->rex_prefix == 0;
632 633 634

	if (!(c->d & ModRM))
		reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
635 636
	op->type = OP_REG;
	if ((c->d & ByteOp) && !inhibit_bytereg) {
637
		op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
638 639
		op->bytes = 1;
	} else {
640
		op->addr.reg = decode_register(reg, c->regs, 0);
641 642
		op->bytes = c->op_bytes;
	}
643
	fetch_register_operand(op);
644 645 646
	op->orig_val = op->val;
}

647
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
648 649
			struct x86_emulate_ops *ops,
			struct operand *op)
650 651 652
{
	struct decode_cache *c = &ctxt->decode;
	u8 sib;
653
	int index_reg = 0, base_reg = 0, scale;
654
	int rc = X86EMUL_CONTINUE;
655
	ulong modrm_ea = 0;
656 657 658 659 660 661 662 663 664 665 666

	if (c->rex_prefix) {
		c->modrm_reg = (c->rex_prefix & 4) << 1;	/* REX.R */
		index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
		c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
	}

	c->modrm = insn_fetch(u8, 1, c->eip);
	c->modrm_mod |= (c->modrm & 0xc0) >> 6;
	c->modrm_reg |= (c->modrm & 0x38) >> 3;
	c->modrm_rm |= (c->modrm & 0x07);
667
	c->modrm_seg = VCPU_SREG_DS;
668 669

	if (c->modrm_mod == 3) {
670 671 672
		op->type = OP_REG;
		op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
		op->addr.reg = decode_register(c->modrm_rm,
673
					       c->regs, c->d & ByteOp);
674
		fetch_register_operand(op);
675 676 677
		return rc;
	}

678 679
	op->type = OP_MEM;

680 681 682 683 684 685 686 687 688 689
	if (c->ad_bytes == 2) {
		unsigned bx = c->regs[VCPU_REGS_RBX];
		unsigned bp = c->regs[VCPU_REGS_RBP];
		unsigned si = c->regs[VCPU_REGS_RSI];
		unsigned di = c->regs[VCPU_REGS_RDI];

		/* 16-bit ModR/M decode. */
		switch (c->modrm_mod) {
		case 0:
			if (c->modrm_rm == 6)
690
				modrm_ea += insn_fetch(u16, 2, c->eip);
691 692
			break;
		case 1:
693
			modrm_ea += insn_fetch(s8, 1, c->eip);
694 695
			break;
		case 2:
696
			modrm_ea += insn_fetch(u16, 2, c->eip);
697 698 699 700
			break;
		}
		switch (c->modrm_rm) {
		case 0:
701
			modrm_ea += bx + si;
702 703
			break;
		case 1:
704
			modrm_ea += bx + di;
705 706
			break;
		case 2:
707
			modrm_ea += bp + si;
708 709
			break;
		case 3:
710
			modrm_ea += bp + di;
711 712
			break;
		case 4:
713
			modrm_ea += si;
714 715
			break;
		case 5:
716
			modrm_ea += di;
717 718 719
			break;
		case 6:
			if (c->modrm_mod != 0)
720
				modrm_ea += bp;
721 722
			break;
		case 7:
723
			modrm_ea += bx;
724 725 726 727
			break;
		}
		if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
		    (c->modrm_rm == 6 && c->modrm_mod != 0))
728
			c->modrm_seg = VCPU_SREG_SS;
729
		modrm_ea = (u16)modrm_ea;
730 731
	} else {
		/* 32/64-bit ModR/M decode. */
732
		if ((c->modrm_rm & 7) == 4) {
733 734 735 736 737
			sib = insn_fetch(u8, 1, c->eip);
			index_reg |= (sib >> 3) & 7;
			base_reg |= sib & 7;
			scale = sib >> 6;

738
			if ((base_reg & 7) == 5 && c->modrm_mod == 0)
739
				modrm_ea += insn_fetch(s32, 4, c->eip);
740
			else
741
				modrm_ea += c->regs[base_reg];
742
			if (index_reg != 4)
743
				modrm_ea += c->regs[index_reg] << scale;
744 745
		} else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
			if (ctxt->mode == X86EMUL_MODE_PROT64)
746
				c->rip_relative = 1;
747
		} else
748
			modrm_ea += c->regs[c->modrm_rm];
749 750 751
		switch (c->modrm_mod) {
		case 0:
			if (c->modrm_rm == 5)
752
				modrm_ea += insn_fetch(s32, 4, c->eip);
753 754
			break;
		case 1:
755
			modrm_ea += insn_fetch(s8, 1, c->eip);
756 757
			break;
		case 2:
758
			modrm_ea += insn_fetch(s32, 4, c->eip);
759 760 761
			break;
		}
	}
762
	op->addr.mem.ea = modrm_ea;
763 764 765 766 767
done:
	return rc;
}

static int decode_abs(struct x86_emulate_ctxt *ctxt,
768 769
		      struct x86_emulate_ops *ops,
		      struct operand *op)
770 771
{
	struct decode_cache *c = &ctxt->decode;
772
	int rc = X86EMUL_CONTINUE;
773

774
	op->type = OP_MEM;
775 776
	switch (c->ad_bytes) {
	case 2:
777
		op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
778 779
		break;
	case 4:
780
		op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
781 782
		break;
	case 8:
783
		op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
784 785 786 787 788 789
		break;
	}
done:
	return rc;
}

790 791
static void fetch_bit_operand(struct decode_cache *c)
{
792
	long sv = 0, mask;
793

794
	if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
795 796 797 798 799 800 801
		mask = ~(c->dst.bytes * 8 - 1);

		if (c->src.bytes == 2)
			sv = (s16)c->src.val & (s16)mask;
		else if (c->src.bytes == 4)
			sv = (s32)c->src.val & (s32)mask;

802
		c->dst.addr.mem.ea += (sv >> 3);
803
	}
804 805 806

	/* only subword offset */
	c->src.val &= (c->dst.bytes << 3) - 1;
807 808
}

809 810 811
static int read_emulated(struct x86_emulate_ctxt *ctxt,
			 struct x86_emulate_ops *ops,
			 unsigned long addr, void *dest, unsigned size)
A
Avi Kivity 已提交
812
{
813 814
	int rc;
	struct read_cache *mc = &ctxt->decode.mem_read;
A
Avi Kivity 已提交
815

816 817 818 819 820
	while (size) {
		int n = min(size, 8u);
		size -= n;
		if (mc->pos < mc->end)
			goto read_cached;
821

822 823
		rc = ops->read_emulated(addr, mc->data + mc->end, n,
					&ctxt->exception, ctxt->vcpu);
824 825 826
		if (rc != X86EMUL_CONTINUE)
			return rc;
		mc->end += n;
A
Avi Kivity 已提交
827

828 829 830 831 832
	read_cached:
		memcpy(dest, mc->data + mc->pos, n);
		mc->pos += n;
		dest += n;
		addr += n;
A
Avi Kivity 已提交
833
	}
834 835
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
836

837 838 839 840 841 842
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
			   struct x86_emulate_ops *ops,
			   unsigned int size, unsigned short port,
			   void *dest)
{
	struct read_cache *rc = &ctxt->decode.io_read;
843

844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
	if (rc->pos == rc->end) { /* refill pio read ahead */
		struct decode_cache *c = &ctxt->decode;
		unsigned int in_page, n;
		unsigned int count = c->rep_prefix ?
			address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
		in_page = (ctxt->eflags & EFLG_DF) ?
			offset_in_page(c->regs[VCPU_REGS_RDI]) :
			PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
		n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
			count);
		if (n == 0)
			n = 1;
		rc->pos = rc->end = 0;
		if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
			return 0;
		rc->end = n * size;
A
Avi Kivity 已提交
860 861
	}

862 863 864 865
	memcpy(dest, rc->data + rc->pos, size);
	rc->pos += size;
	return 1;
}
A
Avi Kivity 已提交
866

867 868 869
static u32 desc_limit_scaled(struct desc_struct *desc)
{
	u32 limit = get_desc_limit(desc);
A
Avi Kivity 已提交
870

871 872
	return desc->g ? (limit << 12) | 0xfff : limit;
}
A
Avi Kivity 已提交
873

874 875 876 877 878 879 880
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
				     struct x86_emulate_ops *ops,
				     u16 selector, struct desc_ptr *dt)
{
	if (selector & 1 << 2) {
		struct desc_struct desc;
		memset (dt, 0, sizeof *dt);
881 882
		if (!ops->get_cached_descriptor(&desc, NULL, VCPU_SREG_LDTR,
						ctxt->vcpu))
883
			return;
884

885 886 887 888 889
		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
		dt->address = get_desc_base(&desc);
	} else
		ops->get_gdt(dt, ctxt->vcpu);
}
890

891 892 893 894 895 896 897 898 899
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   struct x86_emulate_ops *ops,
				   u16 selector, struct desc_struct *desc)
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	int ret;
	ulong addr;
900

901
	get_descriptor_table_ptr(ctxt, ops, selector, &dt);
902

903 904
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
905
	addr = dt.address + index * 8;
906 907
	ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,
			    &ctxt->exception);
908

909 910
       return ret;
}
911

912 913 914 915 916 917 918 919 920
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				    struct x86_emulate_ops *ops,
				    u16 selector, struct desc_struct *desc)
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
	int ret;
A
Avi Kivity 已提交
921

922
	get_descriptor_table_ptr(ctxt, ops, selector, &dt);
923

924 925
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
A
Avi Kivity 已提交
926

927
	addr = dt.address + index * 8;
928 929
	ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu,
			     &ctxt->exception);
930

931 932
	return ret;
}
933

934
/* Does not support long mode */
935 936 937 938 939 940 941 942 943 944
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   struct x86_emulate_ops *ops,
				   u16 selector, int seg)
{
	struct desc_struct seg_desc;
	u8 dpl, rpl, cpl;
	unsigned err_vec = GP_VECTOR;
	u32 err_code = 0;
	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
	int ret;
945

946
	memset(&seg_desc, 0, sizeof seg_desc);
947

948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
	if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
	    || ctxt->mode == X86EMUL_MODE_REAL) {
		/* set real mode segment descriptor */
		set_desc_base(&seg_desc, selector << 4);
		set_desc_limit(&seg_desc, 0xffff);
		seg_desc.type = 3;
		seg_desc.p = 1;
		seg_desc.s = 1;
		goto load;
	}

	/* NULL selector is not valid for TR, CS and SS */
	if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
	    && null_selector)
		goto exception;

	/* TR should be in GDT only */
	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
		goto exception;

	if (null_selector) /* for NULL selector skip all following checks */
		goto load;

	ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
	if (ret != X86EMUL_CONTINUE)
		return ret;

	err_code = selector & 0xfffc;
	err_vec = GP_VECTOR;

	/* can't load system descriptor into segment selecor */
	if (seg <= VCPU_SREG_GS && !seg_desc.s)
		goto exception;

	if (!seg_desc.p) {
		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
		goto exception;
	}

	rpl = selector & 3;
	dpl = seg_desc.dpl;
	cpl = ops->cpl(ctxt->vcpu);

	switch (seg) {
	case VCPU_SREG_SS:
		/*
		 * segment is not a writable data segment or segment
		 * selector's RPL != CPL or segment selector's RPL != CPL
		 */
		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
			goto exception;
A
Avi Kivity 已提交
999
		break;
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
	case VCPU_SREG_CS:
		if (!(seg_desc.type & 8))
			goto exception;

		if (seg_desc.type & 4) {
			/* conforming */
			if (dpl > cpl)
				goto exception;
		} else {
			/* nonconforming */
			if (rpl > cpl || dpl != cpl)
				goto exception;
		}
		/* CS(RPL) <- CPL */
		selector = (selector & 0xfffc) | cpl;
A
Avi Kivity 已提交
1015
		break;
1016 1017 1018 1019 1020 1021 1022 1023 1024
	case VCPU_SREG_TR:
		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
			goto exception;
		break;
	case VCPU_SREG_LDTR:
		if (seg_desc.s || seg_desc.type != 2)
			goto exception;
		break;
	default: /*  DS, ES, FS, or GS */
1025
		/*
1026 1027 1028
		 * segment is not a data or readable code segment or
		 * ((segment is a data or nonconforming code segment)
		 * and (both RPL and CPL > DPL))
1029
		 */
1030 1031 1032 1033
		if ((seg_desc.type & 0xa) == 0x8 ||
		    (((seg_desc.type & 0xc) != 0xc) &&
		     (rpl > dpl && cpl > dpl)))
			goto exception;
A
Avi Kivity 已提交
1034
		break;
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
	}

	if (seg_desc.s) {
		/* mark segment as accessed */
		seg_desc.type |= 1;
		ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
		if (ret != X86EMUL_CONTINUE)
			return ret;
	}
load:
	ops->set_segment_selector(selector, seg, ctxt->vcpu);
1046
	ops->set_cached_descriptor(&seg_desc, 0, seg, ctxt->vcpu);
1047 1048 1049 1050 1051 1052
	return X86EMUL_CONTINUE;
exception:
	emulate_exception(ctxt, err_vec, err_code, true);
	return X86EMUL_PROPAGATE_FAULT;
}

1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
static void write_register_operand(struct operand *op)
{
	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
	switch (op->bytes) {
	case 1:
		*(u8 *)op->addr.reg = (u8)op->val;
		break;
	case 2:
		*(u16 *)op->addr.reg = (u16)op->val;
		break;
	case 4:
		*op->addr.reg = (u32)op->val;
		break;	/* 64b: zero-extend */
	case 8:
		*op->addr.reg = op->val;
		break;
	}
}

1072 1073 1074 1075 1076 1077 1078 1079
static inline int writeback(struct x86_emulate_ctxt *ctxt,
			    struct x86_emulate_ops *ops)
{
	int rc;
	struct decode_cache *c = &ctxt->decode;

	switch (c->dst.type) {
	case OP_REG:
1080
		write_register_operand(&c->dst);
A
Avi Kivity 已提交
1081
		break;
1082 1083 1084
	case OP_MEM:
		if (c->lock_prefix)
			rc = ops->cmpxchg_emulated(
1085
					linear(ctxt, c->dst.addr.mem),
1086 1087 1088
					&c->dst.orig_val,
					&c->dst.val,
					c->dst.bytes,
1089
					&ctxt->exception,
1090
					ctxt->vcpu);
1091
		else
1092
			rc = ops->write_emulated(
1093
					linear(ctxt, c->dst.addr.mem),
1094 1095
					&c->dst.val,
					c->dst.bytes,
1096
					&ctxt->exception,
1097 1098 1099
					ctxt->vcpu);
		if (rc != X86EMUL_CONTINUE)
			return rc;
1100
		break;
1101 1102
	case OP_NONE:
		/* no writeback */
1103
		break;
1104
	default:
1105
		break;
A
Avi Kivity 已提交
1106
	}
1107 1108
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1109

1110 1111 1112 1113
static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
				struct x86_emulate_ops *ops)
{
	struct decode_cache *c = &ctxt->decode;
1114

1115 1116 1117 1118
	c->dst.type  = OP_MEM;
	c->dst.bytes = c->op_bytes;
	c->dst.val = c->src.val;
	register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1119 1120
	c->dst.addr.mem.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
	c->dst.addr.mem.seg = VCPU_SREG_SS;
1121
}
1122

1123 1124 1125 1126 1127 1128
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
		       struct x86_emulate_ops *ops,
		       void *dest, int len)
{
	struct decode_cache *c = &ctxt->decode;
	int rc;
1129
	struct segmented_address addr;
1130

1131 1132 1133
	addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
	addr.seg = VCPU_SREG_SS;
	rc = read_emulated(ctxt, ops, linear(ctxt, addr), dest, len);
1134 1135 1136 1137 1138
	if (rc != X86EMUL_CONTINUE)
		return rc;

	register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
	return rc;
1139 1140
}

1141 1142 1143
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
		       struct x86_emulate_ops *ops,
		       void *dest, int len)
1144 1145
{
	int rc;
1146 1147 1148
	unsigned long val, change_mask;
	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
	int cpl = ops->cpl(ctxt->vcpu);
1149

1150 1151 1152
	rc = emulate_pop(ctxt, ops, &val, len);
	if (rc != X86EMUL_CONTINUE)
		return rc;
1153

1154 1155
	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1156

1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
	switch(ctxt->mode) {
	case X86EMUL_MODE_PROT64:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT16:
		if (cpl == 0)
			change_mask |= EFLG_IOPL;
		if (cpl <= iopl)
			change_mask |= EFLG_IF;
		break;
	case X86EMUL_MODE_VM86:
1167 1168
		if (iopl < 3)
			return emulate_gp(ctxt, 0);
1169 1170 1171 1172 1173
		change_mask |= EFLG_IF;
		break;
	default: /* real mode */
		change_mask |= (EFLG_IOPL | EFLG_IF);
		break;
1174
	}
1175 1176 1177 1178 1179

	*(unsigned long *)dest =
		(ctxt->eflags & ~change_mask) | (val & change_mask);

	return rc;
1180 1181
}

1182 1183
static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
			      struct x86_emulate_ops *ops, int seg)
1184
{
1185
	struct decode_cache *c = &ctxt->decode;
1186

1187
	c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
1188

1189
	emulate_push(ctxt, ops);
1190 1191
}

1192 1193
static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
			     struct x86_emulate_ops *ops, int seg)
1194
{
1195 1196 1197
	struct decode_cache *c = &ctxt->decode;
	unsigned long selector;
	int rc;
1198

1199 1200 1201 1202 1203 1204
	rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
	return rc;
1205 1206
}

1207 1208
static int emulate_pusha(struct x86_emulate_ctxt *ctxt,
			  struct x86_emulate_ops *ops)
1209
{
1210 1211 1212 1213
	struct decode_cache *c = &ctxt->decode;
	unsigned long old_esp = c->regs[VCPU_REGS_RSP];
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RAX;
1214

1215 1216 1217
	while (reg <= VCPU_REGS_RDI) {
		(reg == VCPU_REGS_RSP) ?
		(c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1218

1219
		emulate_push(ctxt, ops);
1220

1221 1222 1223
		rc = writeback(ctxt, ops);
		if (rc != X86EMUL_CONTINUE)
			return rc;
1224

1225
		++reg;
1226 1227
	}

1228 1229 1230 1231
	/* Disable writeback. */
	c->dst.type = OP_NONE;

	return rc;
1232 1233
}

1234 1235
static int emulate_popa(struct x86_emulate_ctxt *ctxt,
			struct x86_emulate_ops *ops)
1236
{
1237 1238 1239
	struct decode_cache *c = &ctxt->decode;
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RDI;
1240

1241 1242 1243 1244 1245 1246
	while (reg >= VCPU_REGS_RAX) {
		if (reg == VCPU_REGS_RSP) {
			register_address_increment(c, &c->regs[VCPU_REGS_RSP],
							c->op_bytes);
			--reg;
		}
1247

1248 1249 1250 1251
		rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
		if (rc != X86EMUL_CONTINUE)
			break;
		--reg;
1252
	}
1253
	return rc;
1254 1255
}

1256 1257 1258 1259
int emulate_int_real(struct x86_emulate_ctxt *ctxt,
			       struct x86_emulate_ops *ops, int irq)
{
	struct decode_cache *c = &ctxt->decode;
1260
	int rc;
1261 1262 1263 1264 1265 1266 1267 1268
	struct desc_ptr dt;
	gva_t cs_addr;
	gva_t eip_addr;
	u16 cs, eip;

	/* TODO: Add limit checks */
	c->src.val = ctxt->eflags;
	emulate_push(ctxt, ops);
1269 1270 1271
	rc = writeback(ctxt, ops);
	if (rc != X86EMUL_CONTINUE)
		return rc;
1272 1273 1274 1275 1276

	ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);

	c->src.val = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
	emulate_push(ctxt, ops);
1277 1278 1279
	rc = writeback(ctxt, ops);
	if (rc != X86EMUL_CONTINUE)
		return rc;
1280 1281 1282

	c->src.val = c->eip;
	emulate_push(ctxt, ops);
1283 1284 1285 1286 1287
	rc = writeback(ctxt, ops);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	c->dst.type = OP_NONE;
1288 1289 1290 1291 1292 1293

	ops->get_idt(&dt, ctxt->vcpu);

	eip_addr = dt.address + (irq << 2);
	cs_addr = dt.address + (irq << 2) + 2;

1294
	rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &ctxt->exception);
1295 1296 1297
	if (rc != X86EMUL_CONTINUE)
		return rc;

1298
	rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &ctxt->exception);
1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
	if (rc != X86EMUL_CONTINUE)
		return rc;

	rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	c->eip = eip;

	return rc;
}

static int emulate_int(struct x86_emulate_ctxt *ctxt,
		       struct x86_emulate_ops *ops, int irq)
{
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
		return emulate_int_real(ctxt, ops, irq);
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
	default:
		/* Protected mode interrupts unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
	}
}

1327 1328
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
			     struct x86_emulate_ops *ops)
1329
{
1330 1331 1332 1333 1334 1335 1336 1337 1338
	struct decode_cache *c = &ctxt->decode;
	int rc = X86EMUL_CONTINUE;
	unsigned long temp_eip = 0;
	unsigned long temp_eflags = 0;
	unsigned long cs = 0;
	unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
			     EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
			     EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
	unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1339

1340
	/* TODO: Add stack limit check */
1341

1342
	rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
1343

1344 1345
	if (rc != X86EMUL_CONTINUE)
		return rc;
1346

1347 1348
	if (temp_eip & ~0xffff)
		return emulate_gp(ctxt, 0);
1349

1350
	rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1351

1352 1353
	if (rc != X86EMUL_CONTINUE)
		return rc;
1354

1355
	rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
1356

1357 1358
	if (rc != X86EMUL_CONTINUE)
		return rc;
1359

1360
	rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1361

1362 1363
	if (rc != X86EMUL_CONTINUE)
		return rc;
1364

1365
	c->eip = temp_eip;
1366 1367


1368 1369 1370 1371 1372
	if (c->op_bytes == 4)
		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
	else if (c->op_bytes == 2) {
		ctxt->eflags &= ~0xffff;
		ctxt->eflags |= temp_eflags;
1373
	}
1374 1375 1376 1377 1378

	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
	ctxt->eflags |= EFLG_RESERVED_ONE_MASK;

	return rc;
1379 1380
}

1381 1382
static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
				    struct x86_emulate_ops* ops)
1383
{
1384 1385 1386 1387 1388 1389 1390
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
		return emulate_iret_real(ctxt, ops);
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
1391
	default:
1392 1393
		/* iret from protected mode unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
1394 1395 1396
	}
}

1397
static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1398
				struct x86_emulate_ops *ops)
1399 1400 1401
{
	struct decode_cache *c = &ctxt->decode;

1402
	return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1403 1404
}

1405
static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1406
{
1407
	struct decode_cache *c = &ctxt->decode;
1408 1409
	switch (c->modrm_reg) {
	case 0:	/* rol */
1410
		emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1411 1412
		break;
	case 1:	/* ror */
1413
		emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1414 1415
		break;
	case 2:	/* rcl */
1416
		emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1417 1418
		break;
	case 3:	/* rcr */
1419
		emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1420 1421 1422
		break;
	case 4:	/* sal/shl */
	case 6:	/* sal/shl */
1423
		emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1424 1425
		break;
	case 5:	/* shr */
1426
		emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1427 1428
		break;
	case 7:	/* sar */
1429
		emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1430 1431 1432 1433 1434
		break;
	}
}

static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1435
			       struct x86_emulate_ops *ops)
1436 1437
{
	struct decode_cache *c = &ctxt->decode;
1438 1439
	unsigned long *rax = &c->regs[VCPU_REGS_RAX];
	unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
1440
	u8 de = 0;
1441 1442 1443

	switch (c->modrm_reg) {
	case 0 ... 1:	/* test */
1444
		emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1445 1446 1447 1448 1449
		break;
	case 2:	/* not */
		c->dst.val = ~c->dst.val;
		break;
	case 3:	/* neg */
1450
		emulate_1op("neg", c->dst, ctxt->eflags);
1451
		break;
1452 1453 1454 1455 1456 1457 1458
	case 4: /* mul */
		emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
		break;
	case 5: /* imul */
		emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
		break;
	case 6: /* div */
1459 1460
		emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx,
				       ctxt->eflags, de);
1461 1462
		break;
	case 7: /* idiv */
1463 1464
		emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx,
				       ctxt->eflags, de);
1465
		break;
1466
	default:
1467
		return X86EMUL_UNHANDLEABLE;
1468
	}
1469 1470
	if (de)
		return emulate_de(ctxt);
1471
	return X86EMUL_CONTINUE;
1472 1473 1474
}

static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1475
			       struct x86_emulate_ops *ops)
1476 1477 1478 1479 1480
{
	struct decode_cache *c = &ctxt->decode;

	switch (c->modrm_reg) {
	case 0:	/* inc */
1481
		emulate_1op("inc", c->dst, ctxt->eflags);
1482 1483
		break;
	case 1:	/* dec */
1484
		emulate_1op("dec", c->dst, ctxt->eflags);
1485
		break;
1486 1487 1488 1489 1490
	case 2: /* call near abs */ {
		long int old_eip;
		old_eip = c->eip;
		c->eip = c->src.val;
		c->src.val = old_eip;
1491
		emulate_push(ctxt, ops);
1492 1493
		break;
	}
1494
	case 4: /* jmp abs */
1495
		c->eip = c->src.val;
1496 1497
		break;
	case 6:	/* push */
1498
		emulate_push(ctxt, ops);
1499 1500
		break;
	}
1501
	return X86EMUL_CONTINUE;
1502 1503 1504
}

static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1505
			       struct x86_emulate_ops *ops)
1506 1507
{
	struct decode_cache *c = &ctxt->decode;
1508
	u64 old = c->dst.orig_val64;
1509 1510 1511 1512 1513

	if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
	    ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
		c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
		c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1514
		ctxt->eflags &= ~EFLG_ZF;
1515
	} else {
1516 1517
		c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
			(u32) c->regs[VCPU_REGS_RBX];
1518

1519
		ctxt->eflags |= EFLG_ZF;
1520
	}
1521
	return X86EMUL_CONTINUE;
1522 1523
}

1524 1525 1526 1527 1528 1529 1530 1531
static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
			   struct x86_emulate_ops *ops)
{
	struct decode_cache *c = &ctxt->decode;
	int rc;
	unsigned long cs;

	rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1532
	if (rc != X86EMUL_CONTINUE)
1533 1534 1535 1536
		return rc;
	if (c->op_bytes == 4)
		c->eip = (u32)c->eip;
	rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1537
	if (rc != X86EMUL_CONTINUE)
1538
		return rc;
1539
	rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1540 1541 1542
	return rc;
}

1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
			   struct x86_emulate_ops *ops, int seg)
{
	struct decode_cache *c = &ctxt->decode;
	unsigned short sel;
	int rc;

	memcpy(&sel, c->src.valptr + c->op_bytes, 2);

	rc = load_segment_descriptor(ctxt, ops, sel, seg);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	c->dst.val = c->src.val;
	return rc;
}

1560 1561
static inline void
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1562 1563
			struct x86_emulate_ops *ops, struct desc_struct *cs,
			struct desc_struct *ss)
1564
{
1565
	memset(cs, 0, sizeof(struct desc_struct));
1566
	ops->get_cached_descriptor(cs, NULL, VCPU_SREG_CS, ctxt->vcpu);
1567
	memset(ss, 0, sizeof(struct desc_struct));
1568 1569

	cs->l = 0;		/* will be adjusted later */
1570
	set_desc_base(cs, 0);	/* flat segment */
1571
	cs->g = 1;		/* 4kb granularity */
1572
	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
1573 1574 1575
	cs->type = 0x0b;	/* Read, Execute, Accessed */
	cs->s = 1;
	cs->dpl = 0;		/* will be adjusted later */
1576 1577
	cs->p = 1;
	cs->d = 1;
1578

1579 1580
	set_desc_base(ss, 0);	/* flat segment */
	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
1581 1582 1583
	ss->g = 1;		/* 4kb granularity */
	ss->s = 1;
	ss->type = 0x03;	/* Read/Write, Accessed */
1584
	ss->d = 1;		/* 32bit stack segment */
1585
	ss->dpl = 0;
1586
	ss->p = 1;
1587 1588 1589
}

static int
1590
emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1591 1592
{
	struct decode_cache *c = &ctxt->decode;
1593
	struct desc_struct cs, ss;
1594
	u64 msr_data;
1595
	u16 cs_sel, ss_sel;
1596 1597

	/* syscall is not available in real mode */
1598
	if (ctxt->mode == X86EMUL_MODE_REAL ||
1599 1600
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_ud(ctxt);
1601

1602
	setup_syscalls_segments(ctxt, ops, &cs, &ss);
1603

1604
	ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1605
	msr_data >>= 32;
1606 1607
	cs_sel = (u16)(msr_data & 0xfffc);
	ss_sel = (u16)(msr_data + 8);
1608 1609

	if (is_long_mode(ctxt->vcpu)) {
1610
		cs.d = 0;
1611 1612
		cs.l = 1;
	}
1613
	ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1614
	ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1615
	ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1616
	ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1617 1618 1619 1620 1621 1622

	c->regs[VCPU_REGS_RCX] = c->eip;
	if (is_long_mode(ctxt->vcpu)) {
#ifdef CONFIG_X86_64
		c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;

1623 1624 1625
		ops->get_msr(ctxt->vcpu,
			     ctxt->mode == X86EMUL_MODE_PROT64 ?
			     MSR_LSTAR : MSR_CSTAR, &msr_data);
1626 1627
		c->eip = msr_data;

1628
		ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
1629 1630 1631 1632
		ctxt->eflags &= ~(msr_data | EFLG_RF);
#endif
	} else {
		/* legacy mode */
1633
		ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1634 1635 1636 1637 1638
		c->eip = (u32)msr_data;

		ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
	}

1639
	return X86EMUL_CONTINUE;
1640 1641
}

1642
static int
1643
emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1644 1645
{
	struct decode_cache *c = &ctxt->decode;
1646
	struct desc_struct cs, ss;
1647
	u64 msr_data;
1648
	u16 cs_sel, ss_sel;
1649

1650
	/* inject #GP if in real mode */
1651 1652
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return emulate_gp(ctxt, 0);
1653 1654 1655 1656

	/* XXX sysenter/sysexit have not been tested in 64bit mode.
	* Therefore, we inject an #UD.
	*/
1657 1658
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return emulate_ud(ctxt);
1659

1660
	setup_syscalls_segments(ctxt, ops, &cs, &ss);
1661

1662
	ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1663 1664
	switch (ctxt->mode) {
	case X86EMUL_MODE_PROT32:
1665 1666
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
1667 1668
		break;
	case X86EMUL_MODE_PROT64:
1669 1670
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
1671 1672 1673 1674
		break;
	}

	ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1675 1676 1677 1678
	cs_sel = (u16)msr_data;
	cs_sel &= ~SELECTOR_RPL_MASK;
	ss_sel = cs_sel + 8;
	ss_sel &= ~SELECTOR_RPL_MASK;
1679 1680
	if (ctxt->mode == X86EMUL_MODE_PROT64
		|| is_long_mode(ctxt->vcpu)) {
1681
		cs.d = 0;
1682 1683 1684
		cs.l = 1;
	}

1685
	ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1686
	ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1687
	ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1688
	ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1689

1690
	ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
1691 1692
	c->eip = msr_data;

1693
	ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
1694 1695
	c->regs[VCPU_REGS_RSP] = msr_data;

1696
	return X86EMUL_CONTINUE;
1697 1698
}

1699
static int
1700
emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1701 1702
{
	struct decode_cache *c = &ctxt->decode;
1703
	struct desc_struct cs, ss;
1704 1705
	u64 msr_data;
	int usermode;
1706
	u16 cs_sel, ss_sel;
1707

1708 1709
	/* inject #GP if in real mode or Virtual 8086 mode */
	if (ctxt->mode == X86EMUL_MODE_REAL ||
1710 1711
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_gp(ctxt, 0);
1712

1713
	setup_syscalls_segments(ctxt, ops, &cs, &ss);
1714 1715 1716 1717 1718 1719 1720 1721

	if ((c->rex_prefix & 0x8) != 0x0)
		usermode = X86EMUL_MODE_PROT64;
	else
		usermode = X86EMUL_MODE_PROT32;

	cs.dpl = 3;
	ss.dpl = 3;
1722
	ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1723 1724
	switch (usermode) {
	case X86EMUL_MODE_PROT32:
1725
		cs_sel = (u16)(msr_data + 16);
1726 1727
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
1728
		ss_sel = (u16)(msr_data + 24);
1729 1730
		break;
	case X86EMUL_MODE_PROT64:
1731
		cs_sel = (u16)(msr_data + 32);
1732 1733
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
1734 1735
		ss_sel = cs_sel + 8;
		cs.d = 0;
1736 1737 1738
		cs.l = 1;
		break;
	}
1739 1740
	cs_sel |= SELECTOR_RPL_MASK;
	ss_sel |= SELECTOR_RPL_MASK;
1741

1742
	ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1743
	ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1744
	ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1745
	ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1746

1747 1748
	c->eip = c->regs[VCPU_REGS_RDX];
	c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
1749

1750
	return X86EMUL_CONTINUE;
1751 1752
}

1753 1754
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
			      struct x86_emulate_ops *ops)
1755 1756 1757 1758 1759 1760 1761
{
	int iopl;
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return false;
	if (ctxt->mode == X86EMUL_MODE_VM86)
		return true;
	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1762
	return ops->cpl(ctxt->vcpu) > iopl;
1763 1764 1765 1766 1767 1768
}

static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
					    struct x86_emulate_ops *ops,
					    u16 port, u16 len)
{
1769
	struct desc_struct tr_seg;
1770
	u32 base3;
1771 1772 1773 1774
	int r;
	u16 io_bitmap_ptr;
	u8 perm, bit_idx = port & 0x7;
	unsigned mask = (1 << len) - 1;
1775
	unsigned long base;
1776

1777
	ops->get_cached_descriptor(&tr_seg, &base3, VCPU_SREG_TR, ctxt->vcpu);
1778
	if (!tr_seg.p)
1779
		return false;
1780
	if (desc_limit_scaled(&tr_seg) < 103)
1781
		return false;
1782 1783 1784 1785 1786
	base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
	base |= ((u64)base3) << 32;
#endif
	r = ops->read_std(base + 102, &io_bitmap_ptr, 2, ctxt->vcpu, NULL);
1787 1788
	if (r != X86EMUL_CONTINUE)
		return false;
1789
	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
1790
		return false;
1791 1792
	r = ops->read_std(base + io_bitmap_ptr + port/8, &perm, 1, ctxt->vcpu,
			  NULL);
1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
	if (r != X86EMUL_CONTINUE)
		return false;
	if ((perm >> bit_idx) & mask)
		return false;
	return true;
}

static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
				 struct x86_emulate_ops *ops,
				 u16 port, u16 len)
{
1804 1805 1806
	if (ctxt->perm_ok)
		return true;

1807
	if (emulator_bad_iopl(ctxt, ops))
1808 1809
		if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
			return false;
1810 1811 1812

	ctxt->perm_ok = true;

1813 1814 1815
	return true;
}

1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
				struct x86_emulate_ops *ops,
				struct tss_segment_16 *tss)
{
	struct decode_cache *c = &ctxt->decode;

	tss->ip = c->eip;
	tss->flag = ctxt->eflags;
	tss->ax = c->regs[VCPU_REGS_RAX];
	tss->cx = c->regs[VCPU_REGS_RCX];
	tss->dx = c->regs[VCPU_REGS_RDX];
	tss->bx = c->regs[VCPU_REGS_RBX];
	tss->sp = c->regs[VCPU_REGS_RSP];
	tss->bp = c->regs[VCPU_REGS_RBP];
	tss->si = c->regs[VCPU_REGS_RSI];
	tss->di = c->regs[VCPU_REGS_RDI];

	tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
	tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
	tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
	tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
	tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
}

static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
				 struct x86_emulate_ops *ops,
				 struct tss_segment_16 *tss)
{
	struct decode_cache *c = &ctxt->decode;
	int ret;

	c->eip = tss->ip;
	ctxt->eflags = tss->flag | 2;
	c->regs[VCPU_REGS_RAX] = tss->ax;
	c->regs[VCPU_REGS_RCX] = tss->cx;
	c->regs[VCPU_REGS_RDX] = tss->dx;
	c->regs[VCPU_REGS_RBX] = tss->bx;
	c->regs[VCPU_REGS_RSP] = tss->sp;
	c->regs[VCPU_REGS_RBP] = tss->bp;
	c->regs[VCPU_REGS_RSI] = tss->si;
	c->regs[VCPU_REGS_RDI] = tss->di;

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
	ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
	ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
	ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
	ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
	ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);

	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
	ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_16(struct x86_emulate_ctxt *ctxt,
			  struct x86_emulate_ops *ops,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
	struct tss_segment_16 tss_seg;
	int ret;
1898
	u32 new_tss_base = get_desc_base(new_desc);
1899 1900

	ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1901
			    &ctxt->exception);
1902
	if (ret != X86EMUL_CONTINUE)
1903 1904 1905 1906 1907 1908
		/* FIXME: need to provide precise fault address */
		return ret;

	save_state_to_tss16(ctxt, ops, &tss_seg);

	ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1909
			     &ctxt->exception);
1910
	if (ret != X86EMUL_CONTINUE)
1911 1912 1913 1914
		/* FIXME: need to provide precise fault address */
		return ret;

	ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1915
			    &ctxt->exception);
1916
	if (ret != X86EMUL_CONTINUE)
1917 1918 1919 1920 1921 1922 1923 1924 1925
		/* FIXME: need to provide precise fault address */
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

		ret = ops->write_std(new_tss_base,
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
1926
				     ctxt->vcpu, &ctxt->exception);
1927
		if (ret != X86EMUL_CONTINUE)
1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
			/* FIXME: need to provide precise fault address */
			return ret;
	}

	return load_state_from_tss16(ctxt, ops, &tss_seg);
}

static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
				struct x86_emulate_ops *ops,
				struct tss_segment_32 *tss)
{
	struct decode_cache *c = &ctxt->decode;

	tss->cr3 = ops->get_cr(3, ctxt->vcpu);
	tss->eip = c->eip;
	tss->eflags = ctxt->eflags;
	tss->eax = c->regs[VCPU_REGS_RAX];
	tss->ecx = c->regs[VCPU_REGS_RCX];
	tss->edx = c->regs[VCPU_REGS_RDX];
	tss->ebx = c->regs[VCPU_REGS_RBX];
	tss->esp = c->regs[VCPU_REGS_RSP];
	tss->ebp = c->regs[VCPU_REGS_RBP];
	tss->esi = c->regs[VCPU_REGS_RSI];
	tss->edi = c->regs[VCPU_REGS_RDI];

	tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
	tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
	tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
	tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
	tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
	tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
	tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
}

static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
				 struct x86_emulate_ops *ops,
				 struct tss_segment_32 *tss)
{
	struct decode_cache *c = &ctxt->decode;
	int ret;

1969 1970
	if (ops->set_cr(3, tss->cr3, ctxt->vcpu))
		return emulate_gp(ctxt, 0);
1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029
	c->eip = tss->eip;
	ctxt->eflags = tss->eflags | 2;
	c->regs[VCPU_REGS_RAX] = tss->eax;
	c->regs[VCPU_REGS_RCX] = tss->ecx;
	c->regs[VCPU_REGS_RDX] = tss->edx;
	c->regs[VCPU_REGS_RBX] = tss->ebx;
	c->regs[VCPU_REGS_RSP] = tss->esp;
	c->regs[VCPU_REGS_RBP] = tss->ebp;
	c->regs[VCPU_REGS_RSI] = tss->esi;
	c->regs[VCPU_REGS_RDI] = tss->edi;

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
	ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
	ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
	ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
	ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
	ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
	ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
	ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);

	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
	ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_32(struct x86_emulate_ctxt *ctxt,
			  struct x86_emulate_ops *ops,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
	struct tss_segment_32 tss_seg;
	int ret;
2030
	u32 new_tss_base = get_desc_base(new_desc);
2031 2032

	ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2033
			    &ctxt->exception);
2034
	if (ret != X86EMUL_CONTINUE)
2035 2036 2037 2038 2039 2040
		/* FIXME: need to provide precise fault address */
		return ret;

	save_state_to_tss32(ctxt, ops, &tss_seg);

	ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2041
			     &ctxt->exception);
2042
	if (ret != X86EMUL_CONTINUE)
2043 2044 2045 2046
		/* FIXME: need to provide precise fault address */
		return ret;

	ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2047
			    &ctxt->exception);
2048
	if (ret != X86EMUL_CONTINUE)
2049 2050 2051 2052 2053 2054 2055 2056 2057
		/* FIXME: need to provide precise fault address */
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

		ret = ops->write_std(new_tss_base,
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2058
				     ctxt->vcpu, &ctxt->exception);
2059
		if (ret != X86EMUL_CONTINUE)
2060 2061 2062 2063 2064 2065 2066 2067
			/* FIXME: need to provide precise fault address */
			return ret;
	}

	return load_state_from_tss32(ctxt, ops, &tss_seg);
}

static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2068 2069 2070
				   struct x86_emulate_ops *ops,
				   u16 tss_selector, int reason,
				   bool has_error_code, u32 error_code)
2071 2072 2073 2074 2075
{
	struct desc_struct curr_tss_desc, next_tss_desc;
	int ret;
	u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
	ulong old_tss_base =
2076
		ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu);
2077
	u32 desc_limit;
2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091

	/* FIXME: old_tss_base == ~0 ? */

	ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
	if (ret != X86EMUL_CONTINUE)
		return ret;

	/* FIXME: check that next_tss_desc is tss */

	if (reason != TASK_SWITCH_IRET) {
		if ((tss_selector & 3) > next_tss_desc.dpl ||
2092 2093
		    ops->cpl(ctxt->vcpu) > next_tss_desc.dpl)
			return emulate_gp(ctxt, 0);
2094 2095
	}

2096 2097 2098 2099
	desc_limit = desc_limit_scaled(&next_tss_desc);
	if (!next_tss_desc.p ||
	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
	     desc_limit < 0x2b)) {
2100
		emulate_ts(ctxt, tss_selector & 0xfffc);
2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
		return X86EMUL_PROPAGATE_FAULT;
	}

	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
		write_segment_descriptor(ctxt, ops, old_tss_sel,
					 &curr_tss_desc);
	}

	if (reason == TASK_SWITCH_IRET)
		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;

	/* set back link to prev task only if NT bit is set in eflags
	   note that old_tss_sel is not used afetr this point */
	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
		old_tss_sel = 0xffff;

	if (next_tss_desc.type & 8)
		ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
				     old_tss_base, &next_tss_desc);
	else
		ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
				     old_tss_base, &next_tss_desc);
2124 2125
	if (ret != X86EMUL_CONTINUE)
		return ret;
2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136

	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;

	if (reason != TASK_SWITCH_IRET) {
		next_tss_desc.type |= (1 << 1); /* set busy flag */
		write_segment_descriptor(ctxt, ops, tss_selector,
					 &next_tss_desc);
	}

	ops->set_cr(0,  ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
2137
	ops->set_cached_descriptor(&next_tss_desc, 0, VCPU_SREG_TR, ctxt->vcpu);
2138 2139
	ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);

2140 2141 2142 2143 2144 2145
	if (has_error_code) {
		struct decode_cache *c = &ctxt->decode;

		c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
		c->lock_prefix = 0;
		c->src.val = (unsigned long) error_code;
2146
		emulate_push(ctxt, ops);
2147 2148
	}

2149 2150 2151 2152
	return ret;
}

int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2153 2154
			 u16 tss_selector, int reason,
			 bool has_error_code, u32 error_code)
2155
{
2156
	struct x86_emulate_ops *ops = ctxt->ops;
2157 2158 2159 2160
	struct decode_cache *c = &ctxt->decode;
	int rc;

	c->eip = ctxt->eip;
2161
	c->dst.type = OP_NONE;
2162

2163 2164
	rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
				     has_error_code, error_code);
2165 2166

	if (rc == X86EMUL_CONTINUE) {
2167
		rc = writeback(ctxt, ops);
2168 2169
		if (rc == X86EMUL_CONTINUE)
			ctxt->eip = c->eip;
2170 2171
	}

2172
	return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2173 2174
}

2175
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2176
			    int reg, struct operand *op)
2177 2178 2179 2180
{
	struct decode_cache *c = &ctxt->decode;
	int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;

2181
	register_address_increment(c, &c->regs[reg], df * op->bytes);
2182 2183
	op->addr.mem.ea = register_address(c, c->regs[reg]);
	op->addr.mem.seg = seg;
2184 2185
}

2186 2187 2188 2189 2190 2191
static int em_push(struct x86_emulate_ctxt *ctxt)
{
	emulate_push(ctxt, ctxt->ops);
	return X86EMUL_CONTINUE;
}

2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
static int em_das(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;
	u8 al, old_al;
	bool af, cf, old_cf;

	cf = ctxt->eflags & X86_EFLAGS_CF;
	al = c->dst.val;

	old_al = al;
	old_cf = cf;
	cf = false;
	af = ctxt->eflags & X86_EFLAGS_AF;
	if ((al & 0x0f) > 9 || af) {
		al -= 6;
		cf = old_cf | (al >= 250);
		af = true;
	} else {
		af = false;
	}
	if (old_al > 0x99 || old_cf) {
		al -= 0x60;
		cf = true;
	}

	c->dst.val = al;
	/* Set PF, ZF, SF */
	c->src.type = OP_IMM;
	c->src.val = 0;
	c->src.bytes = 1;
	emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
	if (cf)
		ctxt->eflags |= X86_EFLAGS_CF;
	if (af)
		ctxt->eflags |= X86_EFLAGS_AF;
	return X86EMUL_CONTINUE;
}

2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;
	u16 sel, old_cs;
	ulong old_eip;
	int rc;

	old_cs = ctxt->ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
	old_eip = c->eip;

	memcpy(&sel, c->src.valptr + c->op_bytes, 2);
	if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS))
		return X86EMUL_CONTINUE;

	c->eip = 0;
	memcpy(&c->eip, c->src.valptr, c->op_bytes);

	c->src.val = old_cs;
	emulate_push(ctxt, ctxt->ops);
	rc = writeback(ctxt, ctxt->ops);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	c->src.val = old_eip;
	emulate_push(ctxt, ctxt->ops);
	rc = writeback(ctxt, ctxt->ops);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	c->dst.type = OP_NONE;

	return X86EMUL_CONTINUE;
}

2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;
	int rc;

	c->dst.type = OP_REG;
	c->dst.addr.reg = &c->eip;
	c->dst.bytes = c->op_bytes;
	rc = emulate_pop(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val);
	return X86EMUL_CONTINUE;
}

2280
static int em_imul(struct x86_emulate_ctxt *ctxt)
2281 2282 2283 2284 2285 2286 2287
{
	struct decode_cache *c = &ctxt->decode;

	emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags);
	return X86EMUL_CONTINUE;
}

2288 2289 2290 2291 2292 2293 2294 2295
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;

	c->dst.val = c->src2.val;
	return em_imul(ctxt);
}

2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;

	c->dst.type = OP_REG;
	c->dst.bytes = c->src.bytes;
	c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
	c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1);

	return X86EMUL_CONTINUE;
}

2308 2309 2310 2311 2312 2313
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
	unsigned cpl = ctxt->ops->cpl(ctxt->vcpu);
	struct decode_cache *c = &ctxt->decode;
	u64 tsc = 0;

2314 2315
	if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD))
		return emulate_gp(ctxt, 0);
2316 2317 2318 2319 2320 2321
	ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc);
	c->regs[VCPU_REGS_RAX] = (u32)tsc;
	c->regs[VCPU_REGS_RDX] = tsc >> 32;
	return X86EMUL_CONTINUE;
}

2322 2323 2324 2325 2326 2327 2328
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;
	c->dst.val = c->src.val;
	return X86EMUL_CONTINUE;
}

2329 2330 2331 2332 2333 2334
#define D(_y) { .flags = (_y) }
#define N    D(0)
#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }

2335 2336 2337
#define D2bv(_f)      D((_f) | ByteOp), D(_f)
#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)

2338 2339 2340 2341 2342
#define D6ALU(_f) D2bv((_f) | DstMem | SrcReg | ModRM),			\
		D2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock),		\
		D2bv(((_f) & ~Lock) | DstAcc | SrcImm)


2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353
static struct opcode group1[] = {
	X7(D(Lock)), N
};

static struct opcode group1A[] = {
	D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
};

static struct opcode group3[] = {
	D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
	D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2354
	X4(D(SrcMem | ModRM)),
2355 2356 2357 2358 2359 2360 2361 2362 2363
};

static struct opcode group4[] = {
	D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
	N, N, N, N, N, N,
};

static struct opcode group5[] = {
	D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2364 2365
	D(SrcMem | ModRM | Stack),
	I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
2366 2367 2368 2369 2370 2371 2372
	D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
	D(SrcMem | ModRM | Stack), N,
};

static struct group_dual group7 = { {
	N, N, D(ModRM | SrcMem | Priv), D(ModRM | SrcMem | Priv),
	D(SrcNone | ModRM | DstMem | Mov), N,
2373 2374
	D(SrcMem16 | ModRM | Mov | Priv),
	D(SrcMem | ModRM | ByteOp | Priv | NoAccess),
2375
}, {
2376 2377
	D(SrcNone | ModRM | Priv | VendorSpecific), N,
	N, D(SrcNone | ModRM | Priv | VendorSpecific),
2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393
	D(SrcNone | ModRM | DstMem | Mov), N,
	D(SrcMem16 | ModRM | Mov | Priv), N,
} };

static struct opcode group8[] = {
	N, N, N, N,
	D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
	D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
};

static struct group_dual group9 = { {
	N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
}, {
	N, N, N, N, N, N, N, N,
} };

2394 2395 2396 2397
static struct opcode group11[] = {
	I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
};

2398 2399
static struct opcode opcode_table[256] = {
	/* 0x00 - 0x07 */
2400
	D6ALU(Lock),
2401 2402
	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
	/* 0x08 - 0x0F */
2403
	D6ALU(Lock),
2404 2405
	D(ImplicitOps | Stack | No64), N,
	/* 0x10 - 0x17 */
2406
	D6ALU(Lock),
2407 2408
	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
	/* 0x18 - 0x1F */
2409
	D6ALU(Lock),
2410 2411
	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
	/* 0x20 - 0x27 */
2412
	D6ALU(Lock), N, N,
2413
	/* 0x28 - 0x2F */
2414
	D6ALU(Lock), N, I(ByteOp | DstAcc | No64, em_das),
2415
	/* 0x30 - 0x37 */
2416
	D6ALU(Lock), N, N,
2417
	/* 0x38 - 0x3F */
2418
	D6ALU(0), N, N,
2419 2420 2421
	/* 0x40 - 0x4F */
	X16(D(DstReg)),
	/* 0x50 - 0x57 */
2422
	X8(I(SrcReg | Stack, em_push)),
2423 2424 2425 2426 2427 2428 2429
	/* 0x58 - 0x5F */
	X8(D(DstReg | Stack)),
	/* 0x60 - 0x67 */
	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
	N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
	N, N, N, N,
	/* 0x68 - 0x6F */
2430 2431
	I(SrcImm | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
2432 2433
	I(SrcImmByte | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
2434 2435
	D2bv(DstDI | Mov | String), /* insb, insw/insd */
	D2bv(SrcSI | ImplicitOps | String), /* outsb, outsw/outsd */
2436 2437 2438 2439 2440 2441 2442
	/* 0x70 - 0x7F */
	X16(D(SrcImmByte)),
	/* 0x80 - 0x87 */
	G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
	G(DstMem | SrcImm | ModRM | Group, group1),
	G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
	G(DstMem | SrcImmByte | ModRM | Group, group1),
2443
	D2bv(DstMem | SrcReg | ModRM), D2bv(DstMem | SrcReg | ModRM | Lock),
2444
	/* 0x88 - 0x8F */
2445 2446
	I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
2447
	D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
2448 2449
	D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
	/* 0x90 - 0x97 */
2450
	X8(D(SrcAcc | DstReg)),
2451
	/* 0x98 - 0x9F */
2452
	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
2453
	I(SrcImmFAddr | No64, em_call_far), N,
2454 2455
	D(ImplicitOps | Stack), D(ImplicitOps | Stack), N, N,
	/* 0xA0 - 0xA7 */
2456 2457 2458 2459
	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
	I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
	I2bv(SrcSI | DstDI | Mov | String, em_mov),
	D2bv(SrcSI | DstDI | String),
2460
	/* 0xA8 - 0xAF */
2461
	D2bv(DstAcc | SrcImm),
2462 2463
	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
2464
	D2bv(SrcAcc | DstDI | String),
2465
	/* 0xB0 - 0xB7 */
2466
	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
2467
	/* 0xB8 - 0xBF */
2468
	X8(I(DstReg | SrcImm | Mov, em_mov)),
2469
	/* 0xC0 - 0xC7 */
2470
	D2bv(DstMem | SrcImmByte | ModRM),
2471 2472
	I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
	D(ImplicitOps | Stack),
2473
	D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
2474
	G(ByteOp, group11), G(0, group11),
2475 2476 2477 2478
	/* 0xC8 - 0xCF */
	N, N, N, D(ImplicitOps | Stack),
	D(ImplicitOps), D(SrcImmByte), D(ImplicitOps | No64), D(ImplicitOps),
	/* 0xD0 - 0xD7 */
2479
	D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
2480 2481 2482 2483
	N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
2484
	X4(D(SrcImmByte)),
2485
	D2bv(SrcImmUByte | DstAcc), D2bv(SrcAcc | DstImmUByte),
2486 2487 2488
	/* 0xE8 - 0xEF */
	D(SrcImm | Stack), D(SrcImm | ImplicitOps),
	D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
2489
	D2bv(SrcNone | DstAcc),	D2bv(SrcAcc | ImplicitOps),
2490 2491 2492 2493
	/* 0xF0 - 0xF7 */
	N, N, N, N,
	D(ImplicitOps | Priv), D(ImplicitOps), G(ByteOp, group3), G(0, group3),
	/* 0xF8 - 0xFF */
2494
	D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
2495 2496 2497 2498 2499 2500
	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};

static struct opcode twobyte_table[256] = {
	/* 0x00 - 0x0F */
	N, GD(0, &group7), N, N,
2501
	N, D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv), N,
2502 2503 2504 2505 2506
	D(ImplicitOps | Priv), D(ImplicitOps | Priv), N, N,
	N, D(ImplicitOps | ModRM), N, N,
	/* 0x10 - 0x1F */
	N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
	/* 0x20 - 0x2F */
2507 2508
	D(ModRM | DstMem | Priv | Op3264), D(ModRM | DstMem | Priv | Op3264),
	D(ModRM | SrcMem | Priv | Op3264), D(ModRM | SrcMem | Priv | Op3264),
2509 2510 2511
	N, N, N, N,
	N, N, N, N, N, N, N, N,
	/* 0x30 - 0x3F */
2512 2513
	D(ImplicitOps | Priv), I(ImplicitOps, em_rdtsc),
	D(ImplicitOps | Priv), N,
2514 2515
	D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv | VendorSpecific),
	N, N,
2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527
	N, N, N, N, N, N, N, N,
	/* 0x40 - 0x4F */
	X16(D(DstReg | SrcMem | ModRM | Mov)),
	/* 0x50 - 0x5F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x60 - 0x6F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x70 - 0x7F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x80 - 0x8F */
	X16(D(SrcImm)),
	/* 0x90 - 0x9F */
2528
	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
2529 2530 2531 2532 2533 2534 2535 2536 2537 2538
	/* 0xA0 - 0xA7 */
	D(ImplicitOps | Stack), D(ImplicitOps | Stack),
	N, D(DstMem | SrcReg | ModRM | BitOp),
	D(DstMem | SrcReg | Src2ImmByte | ModRM),
	D(DstMem | SrcReg | Src2CL | ModRM), N, N,
	/* 0xA8 - 0xAF */
	D(ImplicitOps | Stack), D(ImplicitOps | Stack),
	N, D(DstMem | SrcReg | ModRM | BitOp | Lock),
	D(DstMem | SrcReg | Src2ImmByte | ModRM),
	D(DstMem | SrcReg | Src2CL | ModRM),
2539
	D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
2540
	/* 0xB0 - 0xB7 */
2541
	D2bv(DstMem | SrcReg | ModRM | Lock),
2542 2543 2544
	D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
	D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2545 2546
	/* 0xB8 - 0xBF */
	N, N,
2547
	G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2548 2549
	D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2550
	/* 0xC0 - 0xCF */
2551
	D2bv(DstMem | SrcReg | ModRM | Lock),
2552
	N, D(DstMem | SrcReg | ModRM | Mov),
2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568
	N, N, N, GD(0, &group9),
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xDF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xEF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xFF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};

#undef D
#undef N
#undef G
#undef GD
#undef I

2569 2570
#undef D2bv
#undef I2bv
2571
#undef D6ALU
2572

2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591
static unsigned imm_size(struct decode_cache *c)
{
	unsigned size;

	size = (c->d & ByteOp) ? 1 : c->op_bytes;
	if (size == 8)
		size = 4;
	return size;
}

static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
		      unsigned size, bool sign_extension)
{
	struct decode_cache *c = &ctxt->decode;
	struct x86_emulate_ops *ops = ctxt->ops;
	int rc = X86EMUL_CONTINUE;

	op->type = OP_IMM;
	op->bytes = size;
2592
	op->addr.mem.ea = c->eip;
2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621
	/* NB. Immediates are sign-extended as necessary. */
	switch (op->bytes) {
	case 1:
		op->val = insn_fetch(s8, 1, c->eip);
		break;
	case 2:
		op->val = insn_fetch(s16, 2, c->eip);
		break;
	case 4:
		op->val = insn_fetch(s32, 4, c->eip);
		break;
	}
	if (!sign_extension) {
		switch (op->bytes) {
		case 1:
			op->val &= 0xff;
			break;
		case 2:
			op->val &= 0xffff;
			break;
		case 4:
			op->val &= 0xffffffff;
			break;
		}
	}
done:
	return rc;
}

2622
int
2623
x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
2624 2625 2626 2627 2628 2629 2630
{
	struct x86_emulate_ops *ops = ctxt->ops;
	struct decode_cache *c = &ctxt->decode;
	int rc = X86EMUL_CONTINUE;
	int mode = ctxt->mode;
	int def_op_bytes, def_ad_bytes, dual, goffset;
	struct opcode opcode, *g_mod012, *g_mod3;
2631
	struct operand memop = { .type = OP_NONE };
2632 2633

	c->eip = ctxt->eip;
2634 2635 2636 2637
	c->fetch.start = c->eip;
	c->fetch.end = c->fetch.start + insn_len;
	if (insn_len > 0)
		memcpy(c->fetch.data, insn, insn_len);
2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712
	ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);

	switch (mode) {
	case X86EMUL_MODE_REAL:
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
		def_op_bytes = def_ad_bytes = 2;
		break;
	case X86EMUL_MODE_PROT32:
		def_op_bytes = def_ad_bytes = 4;
		break;
#ifdef CONFIG_X86_64
	case X86EMUL_MODE_PROT64:
		def_op_bytes = 4;
		def_ad_bytes = 8;
		break;
#endif
	default:
		return -1;
	}

	c->op_bytes = def_op_bytes;
	c->ad_bytes = def_ad_bytes;

	/* Legacy prefixes. */
	for (;;) {
		switch (c->b = insn_fetch(u8, 1, c->eip)) {
		case 0x66:	/* operand-size override */
			/* switch between 2/4 bytes */
			c->op_bytes = def_op_bytes ^ 6;
			break;
		case 0x67:	/* address-size override */
			if (mode == X86EMUL_MODE_PROT64)
				/* switch between 4/8 bytes */
				c->ad_bytes = def_ad_bytes ^ 12;
			else
				/* switch between 2/4 bytes */
				c->ad_bytes = def_ad_bytes ^ 6;
			break;
		case 0x26:	/* ES override */
		case 0x2e:	/* CS override */
		case 0x36:	/* SS override */
		case 0x3e:	/* DS override */
			set_seg_override(c, (c->b >> 3) & 3);
			break;
		case 0x64:	/* FS override */
		case 0x65:	/* GS override */
			set_seg_override(c, c->b & 7);
			break;
		case 0x40 ... 0x4f: /* REX */
			if (mode != X86EMUL_MODE_PROT64)
				goto done_prefixes;
			c->rex_prefix = c->b;
			continue;
		case 0xf0:	/* LOCK */
			c->lock_prefix = 1;
			break;
		case 0xf2:	/* REPNE/REPNZ */
			c->rep_prefix = REPNE_PREFIX;
			break;
		case 0xf3:	/* REP/REPE/REPZ */
			c->rep_prefix = REPE_PREFIX;
			break;
		default:
			goto done_prefixes;
		}

		/* Any legacy prefix after a REX prefix nullifies its effect. */

		c->rex_prefix = 0;
	}

done_prefixes:

	/* REX prefix. */
2713 2714
	if (c->rex_prefix & 8)
		c->op_bytes = 8;	/* REX.W */
2715 2716 2717

	/* Opcode byte(s). */
	opcode = opcode_table[c->b];
2718 2719 2720 2721 2722
	/* Two-byte opcode? */
	if (c->b == 0x0f) {
		c->twobyte = 1;
		c->b = insn_fetch(u8, 1, c->eip);
		opcode = twobyte_table[c->b];
2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750
	}
	c->d = opcode.flags;

	if (c->d & Group) {
		dual = c->d & GroupDual;
		c->modrm = insn_fetch(u8, 1, c->eip);
		--c->eip;

		if (c->d & GroupDual) {
			g_mod012 = opcode.u.gdual->mod012;
			g_mod3 = opcode.u.gdual->mod3;
		} else
			g_mod012 = g_mod3 = opcode.u.group;

		c->d &= ~(Group | GroupDual);

		goffset = (c->modrm >> 3) & 7;

		if ((c->modrm >> 6) == 3)
			opcode = g_mod3[goffset];
		else
			opcode = g_mod012[goffset];
		c->d |= opcode.flags;
	}

	c->execute = opcode.u.execute;

	/* Unrecognised? */
A
Avi Kivity 已提交
2751
	if (c->d == 0 || (c->d & Undefined))
2752 2753
		return -1;

2754 2755 2756
	if (!(c->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
		return -1;

2757 2758 2759
	if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
		c->op_bytes = 8;

2760 2761 2762 2763 2764 2765 2766
	if (c->d & Op3264) {
		if (mode == X86EMUL_MODE_PROT64)
			c->op_bytes = 8;
		else
			c->op_bytes = 4;
	}

2767
	/* ModRM and SIB bytes. */
2768
	if (c->d & ModRM) {
2769
		rc = decode_modrm(ctxt, ops, &memop);
2770 2771 2772
		if (!c->has_seg_override)
			set_seg_override(c, c->modrm_seg);
	} else if (c->d & MemAbs)
2773
		rc = decode_abs(ctxt, ops, &memop);
2774 2775 2776 2777 2778 2779
	if (rc != X86EMUL_CONTINUE)
		goto done;

	if (!c->has_seg_override)
		set_seg_override(c, VCPU_SREG_DS);

2780
	memop.addr.mem.seg = seg_override(ctxt, ops, c);
2781

2782
	if (memop.type == OP_MEM && c->ad_bytes != 8)
2783
		memop.addr.mem.ea = (u32)memop.addr.mem.ea;
2784

2785
	if (memop.type == OP_MEM && c->rip_relative)
2786
		memop.addr.mem.ea += c->eip;
2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798

	/*
	 * Decode and fetch the source operand: register, memory
	 * or immediate.
	 */
	switch (c->d & SrcMask) {
	case SrcNone:
		break;
	case SrcReg:
		decode_register_operand(&c->src, c, 0);
		break;
	case SrcMem16:
2799
		memop.bytes = 2;
2800 2801
		goto srcmem_common;
	case SrcMem32:
2802
		memop.bytes = 4;
2803 2804
		goto srcmem_common;
	case SrcMem:
2805
		memop.bytes = (c->d & ByteOp) ? 1 :
2806 2807
							   c->op_bytes;
	srcmem_common:
2808
		c->src = memop;
2809
		break;
2810
	case SrcImmU16:
2811 2812
		rc = decode_imm(ctxt, &c->src, 2, false);
		break;
2813
	case SrcImm:
2814 2815
		rc = decode_imm(ctxt, &c->src, imm_size(c), true);
		break;
2816
	case SrcImmU:
2817
		rc = decode_imm(ctxt, &c->src, imm_size(c), false);
2818 2819
		break;
	case SrcImmByte:
2820 2821
		rc = decode_imm(ctxt, &c->src, 1, true);
		break;
2822
	case SrcImmUByte:
2823
		rc = decode_imm(ctxt, &c->src, 1, false);
2824 2825 2826 2827
		break;
	case SrcAcc:
		c->src.type = OP_REG;
		c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2828
		c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
2829
		fetch_register_operand(&c->src);
2830 2831 2832 2833 2834 2835 2836 2837
		break;
	case SrcOne:
		c->src.bytes = 1;
		c->src.val = 1;
		break;
	case SrcSI:
		c->src.type = OP_MEM;
		c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2838 2839 2840
		c->src.addr.mem.ea =
			register_address(c, c->regs[VCPU_REGS_RSI]);
		c->src.addr.mem.seg = seg_override(ctxt, ops, c),
2841 2842 2843 2844
		c->src.val = 0;
		break;
	case SrcImmFAddr:
		c->src.type = OP_IMM;
2845
		c->src.addr.mem.ea = c->eip;
2846 2847 2848 2849
		c->src.bytes = c->op_bytes + 2;
		insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
		break;
	case SrcMemFAddr:
2850 2851
		memop.bytes = c->op_bytes + 2;
		goto srcmem_common;
2852 2853 2854
		break;
	}

2855 2856 2857
	if (rc != X86EMUL_CONTINUE)
		goto done;

2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869
	/*
	 * Decode and fetch the second source operand: register, memory
	 * or immediate.
	 */
	switch (c->d & Src2Mask) {
	case Src2None:
		break;
	case Src2CL:
		c->src2.bytes = 1;
		c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
		break;
	case Src2ImmByte:
2870
		rc = decode_imm(ctxt, &c->src2, 1, true);
2871 2872 2873 2874 2875
		break;
	case Src2One:
		c->src2.bytes = 1;
		c->src2.val = 1;
		break;
2876 2877 2878
	case Src2Imm:
		rc = decode_imm(ctxt, &c->src2, imm_size(c), true);
		break;
2879 2880
	}

2881 2882 2883
	if (rc != X86EMUL_CONTINUE)
		goto done;

2884 2885 2886 2887 2888 2889
	/* Decode and fetch the destination operand: register or memory. */
	switch (c->d & DstMask) {
	case DstReg:
		decode_register_operand(&c->dst, c,
			 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
		break;
2890 2891
	case DstImmUByte:
		c->dst.type = OP_IMM;
2892
		c->dst.addr.mem.ea = c->eip;
2893 2894 2895
		c->dst.bytes = 1;
		c->dst.val = insn_fetch(u8, 1, c->eip);
		break;
2896 2897
	case DstMem:
	case DstMem64:
2898
		c->dst = memop;
2899 2900 2901 2902
		if ((c->d & DstMask) == DstMem64)
			c->dst.bytes = 8;
		else
			c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2903 2904
		if (c->d & BitOp)
			fetch_bit_operand(c);
2905
		c->dst.orig_val = c->dst.val;
2906 2907 2908 2909
		break;
	case DstAcc:
		c->dst.type = OP_REG;
		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2910
		c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
2911
		fetch_register_operand(&c->dst);
2912 2913 2914 2915 2916
		c->dst.orig_val = c->dst.val;
		break;
	case DstDI:
		c->dst.type = OP_MEM;
		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2917 2918 2919
		c->dst.addr.mem.ea =
			register_address(c, c->regs[VCPU_REGS_RDI]);
		c->dst.addr.mem.seg = VCPU_SREG_ES;
2920 2921
		c->dst.val = 0;
		break;
2922 2923 2924 2925 2926
	case ImplicitOps:
		/* Special instructions do their own operand decoding. */
	default:
		c->dst.type = OP_NONE; /* Disable writeback. */
		return 0;
2927 2928 2929 2930 2931 2932
	}

done:
	return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
}

2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;

	/* The second termination condition only applies for REPE
	 * and REPNE. Test if the repeat string operation prefix is
	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
	 * corresponding termination condition according to:
	 * 	- if REPE/REPZ and ZF = 0 then done
	 * 	- if REPNE/REPNZ and ZF = 1 then done
	 */
	if (((c->b == 0xa6) || (c->b == 0xa7) ||
	     (c->b == 0xae) || (c->b == 0xaf))
	    && (((c->rep_prefix == REPE_PREFIX) &&
		 ((ctxt->eflags & EFLG_ZF) == 0))
		|| ((c->rep_prefix == REPNE_PREFIX) &&
		    ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
		return true;

	return false;
}

2955
int
2956
x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
2957
{
2958
	struct x86_emulate_ops *ops = ctxt->ops;
2959 2960
	u64 msr_data;
	struct decode_cache *c = &ctxt->decode;
2961
	int rc = X86EMUL_CONTINUE;
2962
	int saved_dst_type = c->dst.type;
2963
	int irq; /* Used for int 3, int, and into */
2964

2965
	ctxt->decode.mem_read.pos = 0;
2966

2967
	if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
2968
		rc = emulate_ud(ctxt);
2969 2970 2971
		goto done;
	}

2972
	/* LOCK prefix is allowed only with some instructions */
2973
	if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
2974
		rc = emulate_ud(ctxt);
2975 2976 2977
		goto done;
	}

2978
	if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
2979
		rc = emulate_ud(ctxt);
2980 2981 2982
		goto done;
	}

2983
	/* Privileged instruction can be executed only in CPL=0 */
2984
	if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
2985
		rc = emulate_gp(ctxt, 0);
2986 2987 2988
		goto done;
	}

2989 2990
	if (c->rep_prefix && (c->d & String)) {
		/* All REP prefixes have the same first termination condition */
2991
		if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
2992
			ctxt->eip = c->eip;
2993 2994 2995 2996
			goto done;
		}
	}

2997
	if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
2998
		rc = read_emulated(ctxt, ops, linear(ctxt, c->src.addr.mem),
2999
					c->src.valptr, c->src.bytes);
3000
		if (rc != X86EMUL_CONTINUE)
3001
			goto done;
3002
		c->src.orig_val64 = c->src.val64;
3003 3004
	}

3005
	if (c->src2.type == OP_MEM) {
3006
		rc = read_emulated(ctxt, ops, linear(ctxt, c->src2.addr.mem),
3007
					&c->src2.val, c->src2.bytes);
3008 3009 3010 3011
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

3012 3013 3014 3015
	if ((c->d & DstMask) == ImplicitOps)
		goto special_insn;


3016 3017
	if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
		/* optimisation - avoid slow emulated read if Mov */
3018
		rc = read_emulated(ctxt, ops, linear(ctxt, c->dst.addr.mem),
3019
				   &c->dst.val, c->dst.bytes);
3020 3021
		if (rc != X86EMUL_CONTINUE)
			goto done;
3022
	}
3023
	c->dst.orig_val = c->dst.val;
3024

3025 3026
special_insn:

3027 3028 3029 3030 3031 3032 3033
	if (c->execute) {
		rc = c->execute(ctxt);
		if (rc != X86EMUL_CONTINUE)
			goto done;
		goto writeback;
	}

3034
	if (c->twobyte)
A
Avi Kivity 已提交
3035 3036
		goto twobyte_insn;

3037
	switch (c->b) {
A
Avi Kivity 已提交
3038 3039
	case 0x00 ... 0x05:
	      add:		/* add */
3040
		emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3041
		break;
3042
	case 0x06:		/* push es */
3043
		emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
3044 3045 3046 3047
		break;
	case 0x07:		/* pop es */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
		break;
A
Avi Kivity 已提交
3048 3049
	case 0x08 ... 0x0d:
	      or:		/* or */
3050
		emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3051
		break;
3052
	case 0x0e:		/* push cs */
3053
		emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
3054
		break;
A
Avi Kivity 已提交
3055 3056
	case 0x10 ... 0x15:
	      adc:		/* adc */
3057
		emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3058
		break;
3059
	case 0x16:		/* push ss */
3060
		emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
3061 3062 3063 3064
		break;
	case 0x17:		/* pop ss */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
		break;
A
Avi Kivity 已提交
3065 3066
	case 0x18 ... 0x1d:
	      sbb:		/* sbb */
3067
		emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3068
		break;
3069
	case 0x1e:		/* push ds */
3070
		emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
3071 3072 3073 3074
		break;
	case 0x1f:		/* pop ds */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
		break;
3075
	case 0x20 ... 0x25:
A
Avi Kivity 已提交
3076
	      and:		/* and */
3077
		emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3078 3079 3080
		break;
	case 0x28 ... 0x2d:
	      sub:		/* sub */
3081
		emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3082 3083 3084
		break;
	case 0x30 ... 0x35:
	      xor:		/* xor */
3085
		emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3086 3087 3088
		break;
	case 0x38 ... 0x3d:
	      cmp:		/* cmp */
3089
		emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3090
		break;
3091 3092 3093 3094 3095 3096 3097 3098
	case 0x40 ... 0x47: /* inc r16/r32 */
		emulate_1op("inc", c->dst, ctxt->eflags);
		break;
	case 0x48 ... 0x4f: /* dec r16/r32 */
		emulate_1op("dec", c->dst, ctxt->eflags);
		break;
	case 0x58 ... 0x5f: /* pop reg */
	pop_instruction:
3099
		rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
3100
		break;
3101
	case 0x60:	/* pusha */
3102
		rc = emulate_pusha(ctxt, ops);
3103 3104 3105 3106
		break;
	case 0x61:	/* popa */
		rc = emulate_popa(ctxt, ops);
		break;
A
Avi Kivity 已提交
3107
	case 0x63:		/* movsxd */
3108
		if (ctxt->mode != X86EMUL_MODE_PROT64)
A
Avi Kivity 已提交
3109
			goto cannot_emulate;
3110
		c->dst.val = (s32) c->src.val;
A
Avi Kivity 已提交
3111
		break;
3112 3113
	case 0x6c:		/* insb */
	case 0x6d:		/* insw/insd */
3114 3115
		c->src.val = c->regs[VCPU_REGS_RDX];
		goto do_io_in;
3116 3117
	case 0x6e:		/* outsb */
	case 0x6f:		/* outsw/outsd */
3118 3119
		c->dst.val = c->regs[VCPU_REGS_RDX];
		goto do_io_out;
3120
		break;
3121
	case 0x70 ... 0x7f: /* jcc (short) */
3122
		if (test_cc(c->b, ctxt->eflags))
3123
			jmp_rel(c, c->src.val);
3124
		break;
A
Avi Kivity 已提交
3125
	case 0x80 ... 0x83:	/* Grp1 */
3126
		switch (c->modrm_reg) {
A
Avi Kivity 已提交
3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145
		case 0:
			goto add;
		case 1:
			goto or;
		case 2:
			goto adc;
		case 3:
			goto sbb;
		case 4:
			goto and;
		case 5:
			goto sub;
		case 6:
			goto xor;
		case 7:
			goto cmp;
		}
		break;
	case 0x84 ... 0x85:
3146
	test:
3147
		emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3148 3149
		break;
	case 0x86 ... 0x87:	/* xchg */
3150
	xchg:
A
Avi Kivity 已提交
3151
		/* Write back the register source. */
3152 3153
		c->src.val = c->dst.val;
		write_register_operand(&c->src);
A
Avi Kivity 已提交
3154 3155 3156 3157
		/*
		 * Write back the memory destination with implicit LOCK
		 * prefix.
		 */
3158
		c->dst.val = c->src.orig_val;
3159
		c->lock_prefix = 1;
A
Avi Kivity 已提交
3160
		break;
3161 3162
	case 0x8c:  /* mov r/m, sreg */
		if (c->modrm_reg > VCPU_SREG_GS) {
3163
			rc = emulate_ud(ctxt);
3164
			goto done;
3165
		}
3166
		c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
3167
		break;
N
Nitin A Kamble 已提交
3168
	case 0x8d: /* lea r16/r32, m */
3169
		c->dst.val = c->src.addr.mem.ea;
N
Nitin A Kamble 已提交
3170
		break;
3171 3172 3173 3174
	case 0x8e: { /* mov seg, r/m16 */
		uint16_t sel;

		sel = c->src.val;
3175

3176 3177
		if (c->modrm_reg == VCPU_SREG_CS ||
		    c->modrm_reg > VCPU_SREG_GS) {
3178
			rc = emulate_ud(ctxt);
3179 3180 3181
			goto done;
		}

3182
		if (c->modrm_reg == VCPU_SREG_SS)
3183
			ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3184

3185
		rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
3186 3187 3188 3189

		c->dst.type = OP_NONE;  /* Disable writeback. */
		break;
	}
A
Avi Kivity 已提交
3190
	case 0x8f:		/* pop (sole member of Grp1a) */
3191
		rc = emulate_grp1a(ctxt, ops);
A
Avi Kivity 已提交
3192
		break;
3193 3194
	case 0x90 ... 0x97: /* nop / xchg reg, rax */
		if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
3195
			break;
3196
		goto xchg;
3197 3198 3199 3200 3201 3202 3203
	case 0x98: /* cbw/cwde/cdqe */
		switch (c->op_bytes) {
		case 2: c->dst.val = (s8)c->dst.val; break;
		case 4: c->dst.val = (s16)c->dst.val; break;
		case 8: c->dst.val = (s32)c->dst.val; break;
		}
		break;
N
Nitin A Kamble 已提交
3204
	case 0x9c: /* pushf */
3205
		c->src.val =  (unsigned long) ctxt->eflags;
3206
		emulate_push(ctxt, ops);
3207
		break;
N
Nitin A Kamble 已提交
3208
	case 0x9d: /* popf */
A
Avi Kivity 已提交
3209
		c->dst.type = OP_REG;
3210
		c->dst.addr.reg = &ctxt->eflags;
A
Avi Kivity 已提交
3211
		c->dst.bytes = c->op_bytes;
3212 3213
		rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
		break;
A
Avi Kivity 已提交
3214
	case 0xa6 ... 0xa7:	/* cmps */
3215
		c->dst.type = OP_NONE; /* Disable writeback. */
3216
		goto cmp;
3217 3218
	case 0xa8 ... 0xa9:	/* test ax, imm */
		goto test;
A
Avi Kivity 已提交
3219
	case 0xae ... 0xaf:	/* scas */
3220
		goto cmp;
3221 3222 3223
	case 0xc0 ... 0xc1:
		emulate_grp2(ctxt);
		break;
3224
	case 0xc3: /* ret */
A
Avi Kivity 已提交
3225
		c->dst.type = OP_REG;
3226
		c->dst.addr.reg = &c->eip;
A
Avi Kivity 已提交
3227
		c->dst.bytes = c->op_bytes;
3228
		goto pop_instruction;
3229 3230 3231 3232 3233 3234
	case 0xc4:		/* les */
		rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES);
		break;
	case 0xc5:		/* lds */
		rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS);
		break;
3235 3236
	case 0xcb:		/* ret far */
		rc = emulate_ret_far(ctxt, ops);
3237
		break;
3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251
	case 0xcc:		/* int3 */
		irq = 3;
		goto do_interrupt;
	case 0xcd:		/* int n */
		irq = c->src.val;
	do_interrupt:
		rc = emulate_int(ctxt, ops, irq);
		break;
	case 0xce:		/* into */
		if (ctxt->eflags & EFLG_OF) {
			irq = 4;
			goto do_interrupt;
		}
		break;
3252 3253
	case 0xcf:		/* iret */
		rc = emulate_iret(ctxt, ops);
3254
		break;
3255 3256 3257 3258 3259 3260 3261
	case 0xd0 ... 0xd1:	/* Grp2 */
		emulate_grp2(ctxt);
		break;
	case 0xd2 ... 0xd3:	/* Grp2 */
		c->src.val = c->regs[VCPU_REGS_RCX];
		emulate_grp2(ctxt);
		break;
3262 3263 3264 3265 3266 3267
	case 0xe0 ... 0xe2:	/* loop/loopz/loopnz */
		register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
		if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 &&
		    (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
			jmp_rel(c, c->src.val);
		break;
3268 3269 3270 3271
	case 0xe3:	/* jcxz/jecxz/jrcxz */
		if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0)
			jmp_rel(c, c->src.val);
		break;
3272 3273
	case 0xe4: 	/* inb */
	case 0xe5: 	/* in */
3274
		goto do_io_in;
3275 3276
	case 0xe6: /* outb */
	case 0xe7: /* out */
3277
		goto do_io_out;
3278
	case 0xe8: /* call (near) */ {
3279
		long int rel = c->src.val;
3280
		c->src.val = (unsigned long) c->eip;
3281
		jmp_rel(c, rel);
3282
		emulate_push(ctxt, ops);
3283
		break;
3284 3285
	}
	case 0xe9: /* jmp rel */
3286
		goto jmp;
3287 3288
	case 0xea: { /* jmp far */
		unsigned short sel;
3289
	jump_far:
3290 3291 3292
		memcpy(&sel, c->src.valptr + c->op_bytes, 2);

		if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
3293
			goto done;
3294

3295 3296
		c->eip = 0;
		memcpy(&c->eip, c->src.valptr, c->op_bytes);
3297
		break;
3298
	}
3299 3300
	case 0xeb:
	      jmp:		/* jmp rel short */
3301
		jmp_rel(c, c->src.val);
3302
		c->dst.type = OP_NONE; /* Disable writeback. */
3303
		break;
3304 3305
	case 0xec: /* in al,dx */
	case 0xed: /* in (e/r)ax,dx */
3306 3307 3308 3309
		c->src.val = c->regs[VCPU_REGS_RDX];
	do_io_in:
		c->dst.bytes = min(c->dst.bytes, 4u);
		if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
3310
			rc = emulate_gp(ctxt, 0);
3311 3312
			goto done;
		}
3313 3314
		if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
				     &c->dst.val))
3315 3316
			goto done; /* IO is needed */
		break;
3317 3318
	case 0xee: /* out dx,al */
	case 0xef: /* out dx,(e/r)ax */
3319
		c->dst.val = c->regs[VCPU_REGS_RDX];
3320
	do_io_out:
3321 3322 3323
		c->src.bytes = min(c->src.bytes, 4u);
		if (!emulator_io_permited(ctxt, ops, c->dst.val,
					  c->src.bytes)) {
3324
			rc = emulate_gp(ctxt, 0);
3325 3326
			goto done;
		}
3327 3328
		ops->pio_out_emulated(c->src.bytes, c->dst.val,
				      &c->src.val, 1, ctxt->vcpu);
3329
		c->dst.type = OP_NONE;	/* Disable writeback. */
3330
		break;
3331
	case 0xf4:              /* hlt */
3332
		ctxt->vcpu->arch.halt_request = 1;
3333
		break;
3334 3335 3336 3337
	case 0xf5:	/* cmc */
		/* complement carry flag from eflags reg */
		ctxt->eflags ^= EFLG_CF;
		break;
3338
	case 0xf6 ... 0xf7:	/* Grp3 */
3339
		rc = emulate_grp3(ctxt, ops);
3340
		break;
3341 3342 3343
	case 0xf8: /* clc */
		ctxt->eflags &= ~EFLG_CF;
		break;
3344 3345 3346
	case 0xf9: /* stc */
		ctxt->eflags |= EFLG_CF;
		break;
3347
	case 0xfa: /* cli */
3348
		if (emulator_bad_iopl(ctxt, ops)) {
3349
			rc = emulate_gp(ctxt, 0);
3350
			goto done;
3351
		} else
3352
			ctxt->eflags &= ~X86_EFLAGS_IF;
3353 3354
		break;
	case 0xfb: /* sti */
3355
		if (emulator_bad_iopl(ctxt, ops)) {
3356
			rc = emulate_gp(ctxt, 0);
3357 3358
			goto done;
		} else {
3359
			ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3360 3361
			ctxt->eflags |= X86_EFLAGS_IF;
		}
3362
		break;
3363 3364 3365 3366 3367 3368
	case 0xfc: /* cld */
		ctxt->eflags &= ~EFLG_DF;
		break;
	case 0xfd: /* std */
		ctxt->eflags |= EFLG_DF;
		break;
3369 3370
	case 0xfe: /* Grp4 */
	grp45:
3371 3372
		rc = emulate_grp45(ctxt, ops);
		break;
3373 3374 3375 3376
	case 0xff: /* Grp5 */
		if (c->modrm_reg == 5)
			goto jump_far;
		goto grp45;
3377 3378
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
3379
	}
3380

3381 3382 3383
	if (rc != X86EMUL_CONTINUE)
		goto done;

3384 3385
writeback:
	rc = writeback(ctxt, ops);
3386
	if (rc != X86EMUL_CONTINUE)
3387 3388
		goto done;

3389 3390 3391 3392 3393 3394
	/*
	 * restore dst type in case the decoding will be reused
	 * (happens for string instruction )
	 */
	c->dst.type = saved_dst_type;

3395
	if ((c->d & SrcMask) == SrcSI)
3396
		string_addr_inc(ctxt, seg_override(ctxt, ops, c),
3397
				VCPU_REGS_RSI, &c->src);
3398 3399

	if ((c->d & DstMask) == DstDI)
3400
		string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
3401
				&c->dst);
3402

3403
	if (c->rep_prefix && (c->d & String)) {
3404
		struct read_cache *r = &ctxt->decode.io_read;
3405
		register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3406

3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422
		if (!string_insn_completed(ctxt)) {
			/*
			 * Re-enter guest when pio read ahead buffer is empty
			 * or, if it is not used, after each 1024 iteration.
			 */
			if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) &&
			    (r->end == 0 || r->end != r->pos)) {
				/*
				 * Reset read cache. Usually happens before
				 * decode, but since instruction is restarted
				 * we have to do it here.
				 */
				ctxt->decode.mem_read.end = 0;
				return EMULATION_RESTART;
			}
			goto done; /* skip rip writeback */
3423
		}
3424
	}
3425 3426

	ctxt->eip = c->eip;
3427 3428

done:
3429 3430
	if (rc == X86EMUL_PROPAGATE_FAULT)
		ctxt->have_exception = true;
3431
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
A
Avi Kivity 已提交
3432 3433

twobyte_insn:
3434
	switch (c->b) {
A
Avi Kivity 已提交
3435
	case 0x01: /* lgdt, lidt, lmsw */
3436
		switch (c->modrm_reg) {
A
Avi Kivity 已提交
3437 3438 3439
			u16 size;
			unsigned long address;

3440
		case 0: /* vmcall */
3441
			if (c->modrm_mod != 3 || c->modrm_rm != 1)
3442 3443
				goto cannot_emulate;

3444
			rc = kvm_fix_hypercall(ctxt->vcpu);
3445
			if (rc != X86EMUL_CONTINUE)
3446 3447
				goto done;

3448
			/* Let the processor re-execute the fixed hypercall */
3449
			c->eip = ctxt->eip;
3450 3451
			/* Disable writeback. */
			c->dst.type = OP_NONE;
3452
			break;
A
Avi Kivity 已提交
3453
		case 2: /* lgdt */
3454
			rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3455
					     &size, &address, c->op_bytes);
3456
			if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
3457 3458
				goto done;
			realmode_lgdt(ctxt->vcpu, size, address);
3459 3460
			/* Disable writeback. */
			c->dst.type = OP_NONE;
A
Avi Kivity 已提交
3461
			break;
3462
		case 3: /* lidt/vmmcall */
3463 3464 3465 3466 3467 3468 3469 3470
			if (c->modrm_mod == 3) {
				switch (c->modrm_rm) {
				case 1:
					rc = kvm_fix_hypercall(ctxt->vcpu);
					break;
				default:
					goto cannot_emulate;
				}
3471
			} else {
3472
				rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3473
						     &size, &address,
3474
						     c->op_bytes);
3475
				if (rc != X86EMUL_CONTINUE)
3476 3477 3478
					goto done;
				realmode_lidt(ctxt->vcpu, size, address);
			}
3479 3480
			/* Disable writeback. */
			c->dst.type = OP_NONE;
A
Avi Kivity 已提交
3481 3482
			break;
		case 4: /* smsw */
3483
			c->dst.bytes = 2;
3484
			c->dst.val = ops->get_cr(0, ctxt->vcpu);
A
Avi Kivity 已提交
3485 3486
			break;
		case 6: /* lmsw */
3487
			ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0eul) |
3488
				    (c->src.val & 0x0f), ctxt->vcpu);
3489
			c->dst.type = OP_NONE;
A
Avi Kivity 已提交
3490
			break;
3491
		case 5: /* not defined */
3492
			emulate_ud(ctxt);
3493
			rc = X86EMUL_PROPAGATE_FAULT;
3494
			goto done;
A
Avi Kivity 已提交
3495
		case 7: /* invlpg*/
3496 3497
			emulate_invlpg(ctxt->vcpu,
				       linear(ctxt, c->src.addr.mem));
3498 3499
			/* Disable writeback. */
			c->dst.type = OP_NONE;
A
Avi Kivity 已提交
3500 3501 3502 3503 3504
			break;
		default:
			goto cannot_emulate;
		}
		break;
3505
	case 0x05: 		/* syscall */
3506
		rc = emulate_syscall(ctxt, ops);
3507
		break;
3508 3509 3510 3511
	case 0x06:
		emulate_clts(ctxt->vcpu);
		break;
	case 0x09:		/* wbinvd */
3512 3513 3514
		kvm_emulate_wbinvd(ctxt->vcpu);
		break;
	case 0x08:		/* invd */
3515 3516 3517 3518
	case 0x0d:		/* GrpP (prefetch) */
	case 0x18:		/* Grp16 (prefetch/nop) */
		break;
	case 0x20: /* mov cr, reg */
3519 3520 3521 3522
		switch (c->modrm_reg) {
		case 1:
		case 5 ... 7:
		case 9 ... 15:
3523
			emulate_ud(ctxt);
3524
			rc = X86EMUL_PROPAGATE_FAULT;
3525 3526
			goto done;
		}
3527
		c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
3528
		break;
A
Avi Kivity 已提交
3529
	case 0x21: /* mov from dr to reg */
3530 3531
		if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
		    (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3532
			emulate_ud(ctxt);
3533
			rc = X86EMUL_PROPAGATE_FAULT;
3534 3535
			goto done;
		}
3536
		ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu);
A
Avi Kivity 已提交
3537
		break;
3538
	case 0x22: /* mov reg, cr */
3539
		if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) {
3540
			emulate_gp(ctxt, 0);
3541
			rc = X86EMUL_PROPAGATE_FAULT;
3542 3543
			goto done;
		}
3544 3545
		c->dst.type = OP_NONE;
		break;
A
Avi Kivity 已提交
3546
	case 0x23: /* mov from reg to dr */
3547 3548
		if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
		    (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3549
			emulate_ud(ctxt);
3550
			rc = X86EMUL_PROPAGATE_FAULT;
3551 3552
			goto done;
		}
3553

3554
		if (ops->set_dr(c->modrm_reg, c->src.val &
3555 3556 3557
				((ctxt->mode == X86EMUL_MODE_PROT64) ?
				 ~0ULL : ~0U), ctxt->vcpu) < 0) {
			/* #UD condition is already handled by the code above */
3558
			emulate_gp(ctxt, 0);
3559
			rc = X86EMUL_PROPAGATE_FAULT;
3560 3561 3562
			goto done;
		}

3563
		c->dst.type = OP_NONE;	/* no writeback */
A
Avi Kivity 已提交
3564
		break;
3565 3566 3567 3568
	case 0x30:
		/* wrmsr */
		msr_data = (u32)c->regs[VCPU_REGS_RAX]
			| ((u64)c->regs[VCPU_REGS_RDX] << 32);
3569
		if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
3570
			emulate_gp(ctxt, 0);
3571
			rc = X86EMUL_PROPAGATE_FAULT;
3572
			goto done;
3573 3574 3575 3576 3577
		}
		rc = X86EMUL_CONTINUE;
		break;
	case 0x32:
		/* rdmsr */
3578
		if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
3579
			emulate_gp(ctxt, 0);
3580
			rc = X86EMUL_PROPAGATE_FAULT;
3581
			goto done;
3582 3583 3584 3585 3586 3587
		} else {
			c->regs[VCPU_REGS_RAX] = (u32)msr_data;
			c->regs[VCPU_REGS_RDX] = msr_data >> 32;
		}
		rc = X86EMUL_CONTINUE;
		break;
3588
	case 0x34:		/* sysenter */
3589
		rc = emulate_sysenter(ctxt, ops);
3590 3591
		break;
	case 0x35:		/* sysexit */
3592
		rc = emulate_sysexit(ctxt, ops);
3593
		break;
A
Avi Kivity 已提交
3594
	case 0x40 ... 0x4f:	/* cmov */
3595
		c->dst.val = c->dst.orig_val = c->src.val;
3596 3597
		if (!test_cc(c->b, ctxt->eflags))
			c->dst.type = OP_NONE; /* no writeback */
A
Avi Kivity 已提交
3598
		break;
3599
	case 0x80 ... 0x8f: /* jnz rel, etc*/
3600
		if (test_cc(c->b, ctxt->eflags))
3601
			jmp_rel(c, c->src.val);
3602
		break;
3603 3604 3605
	case 0x90 ... 0x9f:     /* setcc r/m8 */
		c->dst.val = test_cc(c->b, ctxt->eflags);
		break;
3606
	case 0xa0:	  /* push fs */
3607
		emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
3608 3609 3610 3611
		break;
	case 0xa1:	 /* pop fs */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
		break;
3612 3613
	case 0xa3:
	      bt:		/* bt */
Q
Qing He 已提交
3614
		c->dst.type = OP_NONE;
3615 3616
		/* only subword offset */
		c->src.val &= (c->dst.bytes << 3) - 1;
3617
		emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
3618
		break;
3619 3620 3621 3622
	case 0xa4: /* shld imm8, r, r/m */
	case 0xa5: /* shld cl, r, r/m */
		emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
		break;
3623
	case 0xa8:	/* push gs */
3624
		emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
3625 3626 3627 3628
		break;
	case 0xa9:	/* pop gs */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
		break;
3629 3630
	case 0xab:
	      bts:		/* bts */
3631
		emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
3632
		break;
3633 3634 3635 3636
	case 0xac: /* shrd imm8, r, r/m */
	case 0xad: /* shrd cl, r, r/m */
		emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
		break;
3637 3638
	case 0xae:              /* clflush */
		break;
A
Avi Kivity 已提交
3639 3640 3641 3642 3643
	case 0xb0 ... 0xb1:	/* cmpxchg */
		/*
		 * Save real source value, then compare EAX against
		 * destination.
		 */
3644 3645
		c->src.orig_val = c->src.val;
		c->src.val = c->regs[VCPU_REGS_RAX];
3646 3647
		emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
		if (ctxt->eflags & EFLG_ZF) {
A
Avi Kivity 已提交
3648
			/* Success: write back to memory. */
3649
			c->dst.val = c->src.orig_val;
A
Avi Kivity 已提交
3650 3651
		} else {
			/* Failure: write the value we saw to EAX. */
3652
			c->dst.type = OP_REG;
3653
			c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
A
Avi Kivity 已提交
3654 3655
		}
		break;
3656 3657 3658
	case 0xb2:		/* lss */
		rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS);
		break;
A
Avi Kivity 已提交
3659 3660
	case 0xb3:
	      btr:		/* btr */
3661
		emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3662
		break;
3663 3664 3665 3666 3667 3668
	case 0xb4:		/* lfs */
		rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS);
		break;
	case 0xb5:		/* lgs */
		rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS);
		break;
A
Avi Kivity 已提交
3669
	case 0xb6 ... 0xb7:	/* movzx */
3670 3671 3672
		c->dst.bytes = c->op_bytes;
		c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
						       : (u16) c->src.val;
A
Avi Kivity 已提交
3673 3674
		break;
	case 0xba:		/* Grp8 */
3675
		switch (c->modrm_reg & 3) {
A
Avi Kivity 已提交
3676 3677 3678 3679 3680 3681 3682 3683 3684 3685
		case 0:
			goto bt;
		case 1:
			goto bts;
		case 2:
			goto btr;
		case 3:
			goto btc;
		}
		break;
3686 3687
	case 0xbb:
	      btc:		/* btc */
3688
		emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
3689
		break;
3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713
	case 0xbc: {		/* bsf */
		u8 zf;
		__asm__ ("bsf %2, %0; setz %1"
			 : "=r"(c->dst.val), "=q"(zf)
			 : "r"(c->src.val));
		ctxt->eflags &= ~X86_EFLAGS_ZF;
		if (zf) {
			ctxt->eflags |= X86_EFLAGS_ZF;
			c->dst.type = OP_NONE;	/* Disable writeback. */
		}
		break;
	}
	case 0xbd: {		/* bsr */
		u8 zf;
		__asm__ ("bsr %2, %0; setz %1"
			 : "=r"(c->dst.val), "=q"(zf)
			 : "r"(c->src.val));
		ctxt->eflags &= ~X86_EFLAGS_ZF;
		if (zf) {
			ctxt->eflags |= X86_EFLAGS_ZF;
			c->dst.type = OP_NONE;	/* Disable writeback. */
		}
		break;
	}
A
Avi Kivity 已提交
3714
	case 0xbe ... 0xbf:	/* movsx */
3715 3716 3717
		c->dst.bytes = c->op_bytes;
		c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
							(s16) c->src.val;
A
Avi Kivity 已提交
3718
		break;
3719 3720 3721 3722 3723 3724
	case 0xc0 ... 0xc1:	/* xadd */
		emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
		/* Write back the register source. */
		c->src.val = c->dst.orig_val;
		write_register_operand(&c->src);
		break;
3725
	case 0xc3:		/* movnti */
3726 3727 3728
		c->dst.bytes = c->op_bytes;
		c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
							(u64) c->src.val;
3729
		break;
A
Avi Kivity 已提交
3730
	case 0xc7:		/* Grp9 (cmpxchg8b) */
3731
		rc = emulate_grp9(ctxt, ops);
3732
		break;
3733 3734
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
3735
	}
3736 3737 3738 3739

	if (rc != X86EMUL_CONTINUE)
		goto done;

A
Avi Kivity 已提交
3740 3741 3742 3743 3744
	goto writeback;

cannot_emulate:
	return -1;
}