emulate.c 94.8 KB
Newer Older
A
Avi Kivity 已提交
1
/******************************************************************************
2
 * emulate.c
A
Avi Kivity 已提交
3 4 5 6 7 8
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9
 * privileged instructions:
A
Avi Kivity 已提交
10 11
 *
 * Copyright (C) 2006 Qumranet
A
Avi Kivity 已提交
12
 * Copyright 2010 Red Hat, Inc. and/or its affilates.
A
Avi Kivity 已提交
13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 *   Avi Kivity <avi@qumranet.com>
 *   Yaniv Kamay <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

#ifndef __KERNEL__
#include <stdio.h>
#include <stdint.h>
#include <public/xen.h>
M
Mike Day 已提交
27
#define DPRINTF(_f, _a ...) printf(_f , ## _a)
A
Avi Kivity 已提交
28
#else
29
#include <linux/kvm_host.h>
30
#include "kvm_cache_regs.h"
A
Avi Kivity 已提交
31 32 33
#define DPRINTF(x...) do {} while (0)
#endif
#include <linux/module.h>
34
#include <asm/kvm_emulate.h>
A
Avi Kivity 已提交
35

36
#include "x86.h"
37
#include "tss.h"
38

A
Avi Kivity 已提交
39 40 41 42 43 44 45 46 47 48
/*
 * Opcode effective-address decode tables.
 * Note that we only emulate instructions that have at least one memory
 * operand (excluding implicit stack references). We assume that stack
 * references and instruction fetches will never occur in special memory
 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
 * not be handled.
 */

/* Operand sizes: 8-bit operands or specified/overridden size. */
49
#define ByteOp      (1<<0)	/* 8-bit operands. */
A
Avi Kivity 已提交
50
/* Destination operand type. */
51 52 53 54 55 56
#define ImplicitOps (1<<1)	/* Implicit in opcode. No generic decode. */
#define DstReg      (2<<1)	/* Register operand. */
#define DstMem      (3<<1)	/* Memory operand. */
#define DstAcc      (4<<1)	/* Destination Accumulator */
#define DstDI       (5<<1)	/* Destination is in ES:(E)DI */
#define DstMem64    (6<<1)	/* 64bit memory operand */
57
#define DstImmUByte (7<<1)	/* 8-bit unsigned immediate operand */
58
#define DstMask     (7<<1)
A
Avi Kivity 已提交
59
/* Source operand type. */
60 61 62 63 64 65 66 67
#define SrcNone     (0<<4)	/* No source operand. */
#define SrcImplicit (0<<4)	/* Source operand is implicit in the opcode. */
#define SrcReg      (1<<4)	/* Register operand. */
#define SrcMem      (2<<4)	/* Memory operand. */
#define SrcMem16    (3<<4)	/* Memory operand (16-bit). */
#define SrcMem32    (4<<4)	/* Memory operand (32-bit). */
#define SrcImm      (5<<4)	/* Immediate operand. */
#define SrcImmByte  (6<<4)	/* 8-bit sign-extended immediate operand. */
68
#define SrcOne      (7<<4)	/* Implied '1' */
69
#define SrcImmUByte (8<<4)      /* 8-bit unsigned immediate operand. */
70
#define SrcImmU     (9<<4)      /* Immediate operand, unsigned */
71
#define SrcSI       (0xa<<4)	/* Source is in the DS:RSI */
72 73
#define SrcImmFAddr (0xb<<4)	/* Source is immediate far address */
#define SrcMemFAddr (0xc<<4)	/* Source is far address in memory */
74
#define SrcAcc      (0xd<<4)	/* Source Accumulator */
75
#define SrcMask     (0xf<<4)
A
Avi Kivity 已提交
76
/* Generic ModRM decode. */
77
#define ModRM       (1<<8)
A
Avi Kivity 已提交
78
/* Destination is only written; never read. */
79 80 81
#define Mov         (1<<9)
#define BitOp       (1<<10)
#define MemAbs      (1<<11)      /* Memory operand is absolute displacement */
82 83
#define String      (1<<12)     /* String instruction (rep capable) */
#define Stack       (1<<13)     /* Stack instruction (push/pop) */
84 85
#define Group       (1<<14)     /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual   (1<<15)     /* Alternate decoding of mod == 3 */
86
/* Misc flags */
87
#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
88
#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
89
#define Undefined   (1<<25) /* No Such Instruction */
90
#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
91
#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
92
#define No64	    (1<<28)
93 94 95 96 97 98
/* Source 2 operand type */
#define Src2None    (0<<29)
#define Src2CL      (1<<29)
#define Src2ImmByte (2<<29)
#define Src2One     (3<<29)
#define Src2Mask    (7<<29)
A
Avi Kivity 已提交
99

100 101 102 103 104 105 106 107
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
108

109 110
struct opcode {
	u32 flags;
111
	union {
112
		int (*execute)(struct x86_emulate_ctxt *ctxt);
113 114 115 116 117 118 119 120
		struct opcode *group;
		struct group_dual *gdual;
	} u;
};

struct group_dual {
	struct opcode mod012[8];
	struct opcode mod3[8];
121 122
};

A
Avi Kivity 已提交
123
/* EFLAGS bit definitions. */
124 125 126 127
#define EFLG_ID (1<<21)
#define EFLG_VIP (1<<20)
#define EFLG_VIF (1<<19)
#define EFLG_AC (1<<18)
128 129
#define EFLG_VM (1<<17)
#define EFLG_RF (1<<16)
130 131
#define EFLG_IOPL (3<<12)
#define EFLG_NT (1<<14)
A
Avi Kivity 已提交
132 133
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
134
#define EFLG_IF (1<<9)
135
#define EFLG_TF (1<<8)
A
Avi Kivity 已提交
136 137 138 139 140 141
#define EFLG_SF (1<<7)
#define EFLG_ZF (1<<6)
#define EFLG_AF (1<<4)
#define EFLG_PF (1<<2)
#define EFLG_CF (1<<0)

142 143 144
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
#define EFLG_RESERVED_ONE_MASK 2

A
Avi Kivity 已提交
145 146 147 148 149 150 151
/*
 * Instruction emulation:
 * Most instructions are emulated directly via a fragment of inline assembly
 * code. This allows us to save/restore EFLAGS and thus very easily pick up
 * any modified flags.
 */

152
#if defined(CONFIG_X86_64)
A
Avi Kivity 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165 166
#define _LO32 "k"		/* force 32-bit operand */
#define _STK  "%%rsp"		/* stack pointer */
#elif defined(__i386__)
#define _LO32 ""		/* force 32-bit operand */
#define _STK  "%%esp"		/* stack pointer */
#endif

/*
 * These EFLAGS bits are restored from saved value during emulation, and
 * any changes are written back to the saved value after emulation.
 */
#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)

/* Before executing instruction: restore necessary bits in EFLAGS. */
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
#define _PRE_EFLAGS(_sav, _msk, _tmp)					\
	/* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
	"movl %"_sav",%"_LO32 _tmp"; "                                  \
	"push %"_tmp"; "                                                \
	"push %"_tmp"; "                                                \
	"movl %"_msk",%"_LO32 _tmp"; "                                  \
	"andl %"_LO32 _tmp",("_STK"); "                                 \
	"pushf; "                                                       \
	"notl %"_LO32 _tmp"; "                                          \
	"andl %"_LO32 _tmp",("_STK"); "                                 \
	"andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); "	\
	"pop  %"_tmp"; "                                                \
	"orl  %"_LO32 _tmp",("_STK"); "                                 \
	"popf; "                                                        \
	"pop  %"_sav"; "
A
Avi Kivity 已提交
182 183 184 185 186 187 188 189 190

/* After executing instruction: write-back necessary bits in EFLAGS. */
#define _POST_EFLAGS(_sav, _msk, _tmp) \
	/* _sav |= EFLAGS & _msk; */		\
	"pushf; "				\
	"pop  %"_tmp"; "			\
	"andl %"_msk",%"_LO32 _tmp"; "		\
	"orl  %"_LO32 _tmp",%"_sav"; "

191 192 193 194 195 196
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif

197
#define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
198 199 200 201 202
	do {								\
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "4", "2")			\
			_op _suffix " %"_x"3,%1; "			\
			_POST_EFLAGS("0", "4", "2")			\
203
			: "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
204 205
			  "=&r" (_tmp)					\
			: _y ((_src).val), "i" (EFLAGS_MASK));		\
206
	} while (0)
207 208


A
Avi Kivity 已提交
209 210
/* Raw emulation: instruction has two explicit operands. */
#define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
211 212 213 214 215
	do {								\
		unsigned long _tmp;					\
									\
		switch ((_dst).bytes) {					\
		case 2:							\
216
			____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
217 218
			break;						\
		case 4:							\
219
			____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
220 221
			break;						\
		case 8:							\
222
			ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
223 224
			break;						\
		}							\
A
Avi Kivity 已提交
225 226 227 228
	} while (0)

#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
	do {								     \
229
		unsigned long _tmp;					     \
M
Mike Day 已提交
230
		switch ((_dst).bytes) {				             \
A
Avi Kivity 已提交
231
		case 1:							     \
232
			____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
A
Avi Kivity 已提交
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
			break;						     \
		default:						     \
			__emulate_2op_nobyte(_op, _src, _dst, _eflags,	     \
					     _wx, _wy, _lx, _ly, _qx, _qy);  \
			break;						     \
		}							     \
	} while (0)

/* Source operand is byte-sized and may be restricted to just %cl. */
#define emulate_2op_SrcB(_op, _src, _dst, _eflags)                      \
	__emulate_2op(_op, _src, _dst, _eflags,				\
		      "b", "c", "b", "c", "b", "c", "b", "c")

/* Source operand is byte, word, long or quad sized. */
#define emulate_2op_SrcV(_op, _src, _dst, _eflags)                      \
	__emulate_2op(_op, _src, _dst, _eflags,				\
		      "b", "q", "w", "r", _LO32, "r", "", "r")

/* Source operand is word, long or quad sized. */
#define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags)               \
	__emulate_2op_nobyte(_op, _src, _dst, _eflags,			\
			     "w", "r", _LO32, "r", "", "r")

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
/* Instruction has three operands and one operand is stored in ECX register */
#define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) 	\
	do {									\
		unsigned long _tmp;						\
		_type _clv  = (_cl).val;  					\
		_type _srcv = (_src).val;    					\
		_type _dstv = (_dst).val;					\
										\
		__asm__ __volatile__ (						\
			_PRE_EFLAGS("0", "5", "2")				\
			_op _suffix " %4,%1 \n"					\
			_POST_EFLAGS("0", "5", "2")				\
			: "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp)		\
			: "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK)		\
			); 							\
										\
		(_cl).val  = (unsigned long) _clv;				\
		(_src).val = (unsigned long) _srcv;				\
		(_dst).val = (unsigned long) _dstv;				\
	} while (0)

#define emulate_2op_cl(_op, _cl, _src, _dst, _eflags)				\
	do {									\
		switch ((_dst).bytes) {						\
		case 2:								\
			__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,  	\
						"w", unsigned short);         	\
			break;							\
		case 4: 							\
			__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,  	\
						"l", unsigned int);           	\
			break;							\
		case 8:								\
			ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,	\
						"q", unsigned long));  		\
			break;							\
		}								\
	} while (0)

295
#define __emulate_1op(_op, _dst, _eflags, _suffix)			\
A
Avi Kivity 已提交
296 297 298
	do {								\
		unsigned long _tmp;					\
									\
299 300 301 302 303 304 305 306 307 308 309 310
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "3", "2")			\
			_op _suffix " %1; "				\
			_POST_EFLAGS("0", "3", "2")			\
			: "=m" (_eflags), "+m" ((_dst).val),		\
			  "=&r" (_tmp)					\
			: "i" (EFLAGS_MASK));				\
	} while (0)

/* Instruction has only one explicit operand (no source operand). */
#define emulate_1op(_op, _dst, _eflags)                                    \
	do {								\
M
Mike Day 已提交
311
		switch ((_dst).bytes) {				        \
312 313 314 315
		case 1:	__emulate_1op(_op, _dst, _eflags, "b"); break;	\
		case 2:	__emulate_1op(_op, _dst, _eflags, "w"); break;	\
		case 4:	__emulate_1op(_op, _dst, _eflags, "l"); break;	\
		case 8:	ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
A
Avi Kivity 已提交
316 317 318
		}							\
	} while (0)

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
#define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix)		\
	do {								\
		unsigned long _tmp;					\
									\
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "4", "1")			\
			_op _suffix " %5; "				\
			_POST_EFLAGS("0", "4", "1")			\
			: "=m" (_eflags), "=&r" (_tmp),			\
			  "+a" (_rax), "+d" (_rdx)			\
			: "i" (EFLAGS_MASK), "m" ((_src).val),		\
			  "a" (_rax), "d" (_rdx));			\
	} while (0)

/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
#define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags)			\
	do {									\
		switch((_src).bytes) {						\
		case 1: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "b"); break; \
		case 2: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx,  _eflags, "w"); break; \
		case 4: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "l"); break; \
		case 8: ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "q")); break; \
		}							\
	} while (0)

A
Avi Kivity 已提交
344 345 346
/* Fetch next part of the instruction being emulated. */
#define insn_fetch(_type, _size, _eip)                                  \
({	unsigned long _x;						\
347
	rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size));		\
348
	if (rc != X86EMUL_CONTINUE)					\
A
Avi Kivity 已提交
349 350 351 352 353
		goto done;						\
	(_eip) += (_size);						\
	(_type)_x;							\
})

354 355 356 357 358 359 360
#define insn_fetch_arr(_arr, _size, _eip)                                \
({	rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size));		\
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
	(_eip) += (_size);						\
})

361 362 363 364 365
static inline unsigned long ad_mask(struct decode_cache *c)
{
	return (1UL << (c->ad_bytes << 3)) - 1;
}

A
Avi Kivity 已提交
366
/* Access/update address held in a register, based on addressing mode. */
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
static inline unsigned long
address_mask(struct decode_cache *c, unsigned long reg)
{
	if (c->ad_bytes == sizeof(unsigned long))
		return reg;
	else
		return reg & ad_mask(c);
}

static inline unsigned long
register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
{
	return base + address_mask(c, reg);
}

382 383 384 385 386 387 388 389
static inline void
register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
{
	if (c->ad_bytes == sizeof(unsigned long))
		*reg += inc;
	else
		*reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
}
A
Avi Kivity 已提交
390

391 392 393 394
static inline void jmp_rel(struct decode_cache *c, int rel)
{
	register_address_increment(c, &c->eip, rel);
}
395

396 397 398 399 400 401
static void set_seg_override(struct decode_cache *c, int seg)
{
	c->has_seg_override = true;
	c->seg_override = seg;
}

402 403
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
			      struct x86_emulate_ops *ops, int seg)
404 405 406 407
{
	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
		return 0;

408
	return ops->get_cached_segment_base(seg, ctxt->vcpu);
409 410 411
}

static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
412
				       struct x86_emulate_ops *ops,
413 414 415 416 417
				       struct decode_cache *c)
{
	if (!c->has_seg_override)
		return 0;

418
	return seg_base(ctxt, ops, c->seg_override);
419 420
}

421 422
static unsigned long es_base(struct x86_emulate_ctxt *ctxt,
			     struct x86_emulate_ops *ops)
423
{
424
	return seg_base(ctxt, ops, VCPU_SREG_ES);
425 426
}

427 428
static unsigned long ss_base(struct x86_emulate_ctxt *ctxt,
			     struct x86_emulate_ops *ops)
429
{
430
	return seg_base(ctxt, ops, VCPU_SREG_SS);
431 432
}

433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
				      u32 error, bool valid)
{
	ctxt->exception = vec;
	ctxt->error_code = error;
	ctxt->error_code_valid = valid;
	ctxt->restart = false;
}

static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
{
	emulate_exception(ctxt, GP_VECTOR, err, true);
}

static void emulate_pf(struct x86_emulate_ctxt *ctxt, unsigned long addr,
		       int err)
{
	ctxt->cr2 = addr;
	emulate_exception(ctxt, PF_VECTOR, err, true);
}

static void emulate_ud(struct x86_emulate_ctxt *ctxt)
{
	emulate_exception(ctxt, UD_VECTOR, 0, false);
}

static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
{
	emulate_exception(ctxt, TS_VECTOR, err, true);
}

464 465
static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
			      struct x86_emulate_ops *ops,
466
			      unsigned long eip, u8 *dest)
467 468 469
{
	struct fetch_cache *fc = &ctxt->decode.fetch;
	int rc;
470
	int size, cur_size;
471

472 473 474 475 476
	if (eip == fc->end) {
		cur_size = fc->end - fc->start;
		size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
		rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
				size, ctxt->vcpu, NULL);
477
		if (rc != X86EMUL_CONTINUE)
478
			return rc;
479
		fc->end += size;
480
	}
481
	*dest = fc->data[eip - fc->start];
482
	return X86EMUL_CONTINUE;
483 484 485 486 487 488
}

static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
			 struct x86_emulate_ops *ops,
			 unsigned long eip, void *dest, unsigned size)
{
489
	int rc;
490

491
	/* x86 instructions are limited to 15 bytes. */
492
	if (eip + size - ctxt->eip > 15)
493
		return X86EMUL_UNHANDLEABLE;
494 495
	while (size--) {
		rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
496
		if (rc != X86EMUL_CONTINUE)
497 498
			return rc;
	}
499
	return X86EMUL_CONTINUE;
500 501
}

502 503 504 505 506 507 508
/*
 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 * pointer into the block that addresses the relevant register.
 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 */
static void *decode_register(u8 modrm_reg, unsigned long *regs,
			     int highbyte_regs)
A
Avi Kivity 已提交
509 510 511 512 513 514 515 516 517 518 519
{
	void *p;

	p = &regs[modrm_reg];
	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
		p = (unsigned char *)&regs[modrm_reg & 3] + 1;
	return p;
}

static int read_descriptor(struct x86_emulate_ctxt *ctxt,
			   struct x86_emulate_ops *ops,
520
			   ulong addr,
A
Avi Kivity 已提交
521 522 523 524 525 526 527
			   u16 *size, unsigned long *address, int op_bytes)
{
	int rc;

	if (op_bytes == 2)
		op_bytes = 3;
	*address = 0;
528
	rc = ops->read_std(addr, (unsigned long *)size, 2, ctxt->vcpu, NULL);
529
	if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
530
		return rc;
531
	rc = ops->read_std(addr + 2, address, op_bytes, ctxt->vcpu, NULL);
A
Avi Kivity 已提交
532 533 534
	return rc;
}

535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
static int test_cc(unsigned int condition, unsigned int flags)
{
	int rc = 0;

	switch ((condition & 15) >> 1) {
	case 0: /* o */
		rc |= (flags & EFLG_OF);
		break;
	case 1: /* b/c/nae */
		rc |= (flags & EFLG_CF);
		break;
	case 2: /* z/e */
		rc |= (flags & EFLG_ZF);
		break;
	case 3: /* be/na */
		rc |= (flags & (EFLG_CF|EFLG_ZF));
		break;
	case 4: /* s */
		rc |= (flags & EFLG_SF);
		break;
	case 5: /* p/pe */
		rc |= (flags & EFLG_PF);
		break;
	case 7: /* le/ng */
		rc |= (flags & EFLG_ZF);
		/* fall through */
	case 6: /* l/nge */
		rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
		break;
	}

	/* Odd condition identifiers (lsb == 1) have inverted sense. */
	return (!!rc ^ (condition & 1));
}

570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
static void fetch_register_operand(struct operand *op)
{
	switch (op->bytes) {
	case 1:
		op->val = *(u8 *)op->addr.reg;
		break;
	case 2:
		op->val = *(u16 *)op->addr.reg;
		break;
	case 4:
		op->val = *(u32 *)op->addr.reg;
		break;
	case 8:
		op->val = *(u64 *)op->addr.reg;
		break;
	}
}

588 589 590 591
static void decode_register_operand(struct operand *op,
				    struct decode_cache *c,
				    int inhibit_bytereg)
{
592
	unsigned reg = c->modrm_reg;
593
	int highbyte_regs = c->rex_prefix == 0;
594 595 596

	if (!(c->d & ModRM))
		reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
597 598
	op->type = OP_REG;
	if ((c->d & ByteOp) && !inhibit_bytereg) {
599
		op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
600 601
		op->bytes = 1;
	} else {
602
		op->addr.reg = decode_register(reg, c->regs, 0);
603 604
		op->bytes = c->op_bytes;
	}
605
	fetch_register_operand(op);
606 607 608
	op->orig_val = op->val;
}

609
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
610 611
			struct x86_emulate_ops *ops,
			struct operand *op)
612 613 614
{
	struct decode_cache *c = &ctxt->decode;
	u8 sib;
615
	int index_reg = 0, base_reg = 0, scale;
616
	int rc = X86EMUL_CONTINUE;
617
	ulong modrm_ea = 0;
618 619 620 621 622 623 624 625 626 627 628

	if (c->rex_prefix) {
		c->modrm_reg = (c->rex_prefix & 4) << 1;	/* REX.R */
		index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
		c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
	}

	c->modrm = insn_fetch(u8, 1, c->eip);
	c->modrm_mod |= (c->modrm & 0xc0) >> 6;
	c->modrm_reg |= (c->modrm & 0x38) >> 3;
	c->modrm_rm |= (c->modrm & 0x07);
629
	c->modrm_seg = VCPU_SREG_DS;
630 631

	if (c->modrm_mod == 3) {
632 633 634
		op->type = OP_REG;
		op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
		op->addr.reg = decode_register(c->modrm_rm,
635
					       c->regs, c->d & ByteOp);
636
		fetch_register_operand(op);
637 638 639
		return rc;
	}

640 641
	op->type = OP_MEM;

642 643 644 645 646 647 648 649 650 651
	if (c->ad_bytes == 2) {
		unsigned bx = c->regs[VCPU_REGS_RBX];
		unsigned bp = c->regs[VCPU_REGS_RBP];
		unsigned si = c->regs[VCPU_REGS_RSI];
		unsigned di = c->regs[VCPU_REGS_RDI];

		/* 16-bit ModR/M decode. */
		switch (c->modrm_mod) {
		case 0:
			if (c->modrm_rm == 6)
652
				modrm_ea += insn_fetch(u16, 2, c->eip);
653 654
			break;
		case 1:
655
			modrm_ea += insn_fetch(s8, 1, c->eip);
656 657
			break;
		case 2:
658
			modrm_ea += insn_fetch(u16, 2, c->eip);
659 660 661 662
			break;
		}
		switch (c->modrm_rm) {
		case 0:
663
			modrm_ea += bx + si;
664 665
			break;
		case 1:
666
			modrm_ea += bx + di;
667 668
			break;
		case 2:
669
			modrm_ea += bp + si;
670 671
			break;
		case 3:
672
			modrm_ea += bp + di;
673 674
			break;
		case 4:
675
			modrm_ea += si;
676 677
			break;
		case 5:
678
			modrm_ea += di;
679 680 681
			break;
		case 6:
			if (c->modrm_mod != 0)
682
				modrm_ea += bp;
683 684
			break;
		case 7:
685
			modrm_ea += bx;
686 687 688 689
			break;
		}
		if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
		    (c->modrm_rm == 6 && c->modrm_mod != 0))
690
			c->modrm_seg = VCPU_SREG_SS;
691
		modrm_ea = (u16)modrm_ea;
692 693
	} else {
		/* 32/64-bit ModR/M decode. */
694
		if ((c->modrm_rm & 7) == 4) {
695 696 697 698 699
			sib = insn_fetch(u8, 1, c->eip);
			index_reg |= (sib >> 3) & 7;
			base_reg |= sib & 7;
			scale = sib >> 6;

700
			if ((base_reg & 7) == 5 && c->modrm_mod == 0)
701
				modrm_ea += insn_fetch(s32, 4, c->eip);
702
			else
703
				modrm_ea += c->regs[base_reg];
704
			if (index_reg != 4)
705
				modrm_ea += c->regs[index_reg] << scale;
706 707
		} else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
			if (ctxt->mode == X86EMUL_MODE_PROT64)
708
				c->rip_relative = 1;
709
		} else
710
			modrm_ea += c->regs[c->modrm_rm];
711 712 713
		switch (c->modrm_mod) {
		case 0:
			if (c->modrm_rm == 5)
714
				modrm_ea += insn_fetch(s32, 4, c->eip);
715 716
			break;
		case 1:
717
			modrm_ea += insn_fetch(s8, 1, c->eip);
718 719
			break;
		case 2:
720
			modrm_ea += insn_fetch(s32, 4, c->eip);
721 722 723
			break;
		}
	}
724
	op->addr.mem = modrm_ea;
725 726 727 728 729
done:
	return rc;
}

static int decode_abs(struct x86_emulate_ctxt *ctxt,
730 731
		      struct x86_emulate_ops *ops,
		      struct operand *op)
732 733
{
	struct decode_cache *c = &ctxt->decode;
734
	int rc = X86EMUL_CONTINUE;
735

736
	op->type = OP_MEM;
737 738
	switch (c->ad_bytes) {
	case 2:
739
		op->addr.mem = insn_fetch(u16, 2, c->eip);
740 741
		break;
	case 4:
742
		op->addr.mem = insn_fetch(u32, 4, c->eip);
743 744
		break;
	case 8:
745
		op->addr.mem = insn_fetch(u64, 8, c->eip);
746 747 748 749 750 751
		break;
	}
done:
	return rc;
}

752 753 754 755
static void fetch_bit_operand(struct decode_cache *c)
{
	long sv, mask;

756
	if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
757 758 759 760 761 762 763 764 765
		mask = ~(c->dst.bytes * 8 - 1);

		if (c->src.bytes == 2)
			sv = (s16)c->src.val & (s16)mask;
		else if (c->src.bytes == 4)
			sv = (s32)c->src.val & (s32)mask;

		c->dst.addr.mem += (sv >> 3);
	}
766 767 768

	/* only subword offset */
	c->src.val &= (c->dst.bytes << 3) - 1;
769 770
}

771 772 773
static int read_emulated(struct x86_emulate_ctxt *ctxt,
			 struct x86_emulate_ops *ops,
			 unsigned long addr, void *dest, unsigned size)
A
Avi Kivity 已提交
774
{
775 776 777
	int rc;
	struct read_cache *mc = &ctxt->decode.mem_read;
	u32 err;
A
Avi Kivity 已提交
778

779 780 781 782 783
	while (size) {
		int n = min(size, 8u);
		size -= n;
		if (mc->pos < mc->end)
			goto read_cached;
784

785 786 787 788 789 790 791
		rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
					ctxt->vcpu);
		if (rc == X86EMUL_PROPAGATE_FAULT)
			emulate_pf(ctxt, addr, err);
		if (rc != X86EMUL_CONTINUE)
			return rc;
		mc->end += n;
A
Avi Kivity 已提交
792

793 794 795 796 797
	read_cached:
		memcpy(dest, mc->data + mc->pos, n);
		mc->pos += n;
		dest += n;
		addr += n;
A
Avi Kivity 已提交
798
	}
799 800
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
801

802 803 804 805 806 807
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
			   struct x86_emulate_ops *ops,
			   unsigned int size, unsigned short port,
			   void *dest)
{
	struct read_cache *rc = &ctxt->decode.io_read;
808

809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
	if (rc->pos == rc->end) { /* refill pio read ahead */
		struct decode_cache *c = &ctxt->decode;
		unsigned int in_page, n;
		unsigned int count = c->rep_prefix ?
			address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
		in_page = (ctxt->eflags & EFLG_DF) ?
			offset_in_page(c->regs[VCPU_REGS_RDI]) :
			PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
		n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
			count);
		if (n == 0)
			n = 1;
		rc->pos = rc->end = 0;
		if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
			return 0;
		rc->end = n * size;
A
Avi Kivity 已提交
825 826
	}

827 828 829 830
	memcpy(dest, rc->data + rc->pos, size);
	rc->pos += size;
	return 1;
}
A
Avi Kivity 已提交
831

832 833 834
static u32 desc_limit_scaled(struct desc_struct *desc)
{
	u32 limit = get_desc_limit(desc);
A
Avi Kivity 已提交
835

836 837
	return desc->g ? (limit << 12) | 0xfff : limit;
}
A
Avi Kivity 已提交
838

839 840 841 842 843 844 845 846 847
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
				     struct x86_emulate_ops *ops,
				     u16 selector, struct desc_ptr *dt)
{
	if (selector & 1 << 2) {
		struct desc_struct desc;
		memset (dt, 0, sizeof *dt);
		if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu))
			return;
848

849 850 851 852 853
		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
		dt->address = get_desc_base(&desc);
	} else
		ops->get_gdt(dt, ctxt->vcpu);
}
854

855 856 857 858 859 860 861 862 863 864
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   struct x86_emulate_ops *ops,
				   u16 selector, struct desc_struct *desc)
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	int ret;
	u32 err;
	ulong addr;
865

866
	get_descriptor_table_ptr(ctxt, ops, selector, &dt);
867

868 869 870
	if (dt.size < index * 8 + 7) {
		emulate_gp(ctxt, selector & 0xfffc);
		return X86EMUL_PROPAGATE_FAULT;
871
	}
872 873 874 875
	addr = dt.address + index * 8;
	ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,  &err);
	if (ret == X86EMUL_PROPAGATE_FAULT)
		emulate_pf(ctxt, addr, err);
876

877 878
       return ret;
}
879

880 881 882 883 884 885 886 887 888 889
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				    struct x86_emulate_ops *ops,
				    u16 selector, struct desc_struct *desc)
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	u32 err;
	ulong addr;
	int ret;
A
Avi Kivity 已提交
890

891
	get_descriptor_table_ptr(ctxt, ops, selector, &dt);
892

893 894 895 896
	if (dt.size < index * 8 + 7) {
		emulate_gp(ctxt, selector & 0xfffc);
		return X86EMUL_PROPAGATE_FAULT;
	}
A
Avi Kivity 已提交
897

898 899 900 901
	addr = dt.address + index * 8;
	ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
	if (ret == X86EMUL_PROPAGATE_FAULT)
		emulate_pf(ctxt, addr, err);
902

903 904
	return ret;
}
905

906 907 908 909 910 911 912 913 914 915
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   struct x86_emulate_ops *ops,
				   u16 selector, int seg)
{
	struct desc_struct seg_desc;
	u8 dpl, rpl, cpl;
	unsigned err_vec = GP_VECTOR;
	u32 err_code = 0;
	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
	int ret;
916

917
	memset(&seg_desc, 0, sizeof seg_desc);
918

919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
	if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
	    || ctxt->mode == X86EMUL_MODE_REAL) {
		/* set real mode segment descriptor */
		set_desc_base(&seg_desc, selector << 4);
		set_desc_limit(&seg_desc, 0xffff);
		seg_desc.type = 3;
		seg_desc.p = 1;
		seg_desc.s = 1;
		goto load;
	}

	/* NULL selector is not valid for TR, CS and SS */
	if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
	    && null_selector)
		goto exception;

	/* TR should be in GDT only */
	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
		goto exception;

	if (null_selector) /* for NULL selector skip all following checks */
		goto load;

	ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
	if (ret != X86EMUL_CONTINUE)
		return ret;

	err_code = selector & 0xfffc;
	err_vec = GP_VECTOR;

	/* can't load system descriptor into segment selecor */
	if (seg <= VCPU_SREG_GS && !seg_desc.s)
		goto exception;

	if (!seg_desc.p) {
		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
		goto exception;
	}

	rpl = selector & 3;
	dpl = seg_desc.dpl;
	cpl = ops->cpl(ctxt->vcpu);

	switch (seg) {
	case VCPU_SREG_SS:
		/*
		 * segment is not a writable data segment or segment
		 * selector's RPL != CPL or segment selector's RPL != CPL
		 */
		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
			goto exception;
A
Avi Kivity 已提交
970
		break;
971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
	case VCPU_SREG_CS:
		if (!(seg_desc.type & 8))
			goto exception;

		if (seg_desc.type & 4) {
			/* conforming */
			if (dpl > cpl)
				goto exception;
		} else {
			/* nonconforming */
			if (rpl > cpl || dpl != cpl)
				goto exception;
		}
		/* CS(RPL) <- CPL */
		selector = (selector & 0xfffc) | cpl;
A
Avi Kivity 已提交
986
		break;
987 988 989 990 991 992 993 994 995
	case VCPU_SREG_TR:
		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
			goto exception;
		break;
	case VCPU_SREG_LDTR:
		if (seg_desc.s || seg_desc.type != 2)
			goto exception;
		break;
	default: /*  DS, ES, FS, or GS */
996
		/*
997 998 999
		 * segment is not a data or readable code segment or
		 * ((segment is a data or nonconforming code segment)
		 * and (both RPL and CPL > DPL))
1000
		 */
1001 1002 1003 1004
		if ((seg_desc.type & 0xa) == 0x8 ||
		    (((seg_desc.type & 0xc) != 0xc) &&
		     (rpl > dpl && cpl > dpl)))
			goto exception;
A
Avi Kivity 已提交
1005
		break;
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
	}

	if (seg_desc.s) {
		/* mark segment as accessed */
		seg_desc.type |= 1;
		ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
		if (ret != X86EMUL_CONTINUE)
			return ret;
	}
load:
	ops->set_segment_selector(selector, seg, ctxt->vcpu);
	ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu);
	return X86EMUL_CONTINUE;
exception:
	emulate_exception(ctxt, err_vec, err_code, true);
	return X86EMUL_PROPAGATE_FAULT;
}

1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
static void write_register_operand(struct operand *op)
{
	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
	switch (op->bytes) {
	case 1:
		*(u8 *)op->addr.reg = (u8)op->val;
		break;
	case 2:
		*(u16 *)op->addr.reg = (u16)op->val;
		break;
	case 4:
		*op->addr.reg = (u32)op->val;
		break;	/* 64b: zero-extend */
	case 8:
		*op->addr.reg = op->val;
		break;
	}
}

1043 1044 1045 1046 1047 1048 1049 1050 1051
static inline int writeback(struct x86_emulate_ctxt *ctxt,
			    struct x86_emulate_ops *ops)
{
	int rc;
	struct decode_cache *c = &ctxt->decode;
	u32 err;

	switch (c->dst.type) {
	case OP_REG:
1052
		write_register_operand(&c->dst);
A
Avi Kivity 已提交
1053
		break;
1054 1055 1056
	case OP_MEM:
		if (c->lock_prefix)
			rc = ops->cmpxchg_emulated(
1057
					c->dst.addr.mem,
1058 1059 1060 1061 1062
					&c->dst.orig_val,
					&c->dst.val,
					c->dst.bytes,
					&err,
					ctxt->vcpu);
1063
		else
1064
			rc = ops->write_emulated(
1065
					c->dst.addr.mem,
1066 1067 1068 1069 1070
					&c->dst.val,
					c->dst.bytes,
					&err,
					ctxt->vcpu);
		if (rc == X86EMUL_PROPAGATE_FAULT)
1071
			emulate_pf(ctxt, c->dst.addr.mem, err);
1072 1073
		if (rc != X86EMUL_CONTINUE)
			return rc;
1074
		break;
1075 1076
	case OP_NONE:
		/* no writeback */
1077
		break;
1078
	default:
1079
		break;
A
Avi Kivity 已提交
1080
	}
1081 1082
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1083

1084 1085 1086 1087
static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
				struct x86_emulate_ops *ops)
{
	struct decode_cache *c = &ctxt->decode;
1088

1089 1090 1091 1092
	c->dst.type  = OP_MEM;
	c->dst.bytes = c->op_bytes;
	c->dst.val = c->src.val;
	register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1093 1094
	c->dst.addr.mem = register_address(c, ss_base(ctxt, ops),
					   c->regs[VCPU_REGS_RSP]);
1095
}
1096

1097 1098 1099 1100 1101 1102
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
		       struct x86_emulate_ops *ops,
		       void *dest, int len)
{
	struct decode_cache *c = &ctxt->decode;
	int rc;
1103

1104 1105 1106 1107 1108 1109 1110 1111
	rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops),
						       c->regs[VCPU_REGS_RSP]),
			   dest, len);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
	return rc;
1112 1113
}

1114 1115 1116
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
		       struct x86_emulate_ops *ops,
		       void *dest, int len)
1117 1118
{
	int rc;
1119 1120 1121
	unsigned long val, change_mask;
	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
	int cpl = ops->cpl(ctxt->vcpu);
1122

1123 1124 1125
	rc = emulate_pop(ctxt, ops, &val, len);
	if (rc != X86EMUL_CONTINUE)
		return rc;
1126

1127 1128
	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1129

1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
	switch(ctxt->mode) {
	case X86EMUL_MODE_PROT64:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT16:
		if (cpl == 0)
			change_mask |= EFLG_IOPL;
		if (cpl <= iopl)
			change_mask |= EFLG_IF;
		break;
	case X86EMUL_MODE_VM86:
		if (iopl < 3) {
			emulate_gp(ctxt, 0);
			return X86EMUL_PROPAGATE_FAULT;
		}
		change_mask |= EFLG_IF;
		break;
	default: /* real mode */
		change_mask |= (EFLG_IOPL | EFLG_IF);
		break;
1149
	}
1150 1151 1152 1153 1154

	*(unsigned long *)dest =
		(ctxt->eflags & ~change_mask) | (val & change_mask);

	return rc;
1155 1156
}

1157 1158
static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
			      struct x86_emulate_ops *ops, int seg)
1159
{
1160
	struct decode_cache *c = &ctxt->decode;
1161

1162
	c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
1163

1164
	emulate_push(ctxt, ops);
1165 1166
}

1167 1168
static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
			     struct x86_emulate_ops *ops, int seg)
1169
{
1170 1171 1172
	struct decode_cache *c = &ctxt->decode;
	unsigned long selector;
	int rc;
1173

1174 1175 1176 1177 1178 1179
	rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
	return rc;
1180 1181
}

1182 1183
static int emulate_pusha(struct x86_emulate_ctxt *ctxt,
			  struct x86_emulate_ops *ops)
1184
{
1185 1186 1187 1188
	struct decode_cache *c = &ctxt->decode;
	unsigned long old_esp = c->regs[VCPU_REGS_RSP];
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RAX;
1189

1190 1191 1192
	while (reg <= VCPU_REGS_RDI) {
		(reg == VCPU_REGS_RSP) ?
		(c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1193

1194
		emulate_push(ctxt, ops);
1195

1196 1197 1198
		rc = writeback(ctxt, ops);
		if (rc != X86EMUL_CONTINUE)
			return rc;
1199

1200
		++reg;
1201 1202
	}

1203 1204 1205 1206
	/* Disable writeback. */
	c->dst.type = OP_NONE;

	return rc;
1207 1208
}

1209 1210
static int emulate_popa(struct x86_emulate_ctxt *ctxt,
			struct x86_emulate_ops *ops)
1211
{
1212 1213 1214
	struct decode_cache *c = &ctxt->decode;
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RDI;
1215

1216 1217 1218 1219 1220 1221
	while (reg >= VCPU_REGS_RAX) {
		if (reg == VCPU_REGS_RSP) {
			register_address_increment(c, &c->regs[VCPU_REGS_RSP],
							c->op_bytes);
			--reg;
		}
1222

1223 1224 1225 1226
		rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
		if (rc != X86EMUL_CONTINUE)
			break;
		--reg;
1227
	}
1228
	return rc;
1229 1230
}

1231 1232 1233 1234
int emulate_int_real(struct x86_emulate_ctxt *ctxt,
			       struct x86_emulate_ops *ops, int irq)
{
	struct decode_cache *c = &ctxt->decode;
1235
	int rc;
1236 1237 1238 1239 1240 1241 1242 1243 1244
	struct desc_ptr dt;
	gva_t cs_addr;
	gva_t eip_addr;
	u16 cs, eip;
	u32 err;

	/* TODO: Add limit checks */
	c->src.val = ctxt->eflags;
	emulate_push(ctxt, ops);
1245 1246 1247
	rc = writeback(ctxt, ops);
	if (rc != X86EMUL_CONTINUE)
		return rc;
1248 1249 1250 1251 1252

	ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);

	c->src.val = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
	emulate_push(ctxt, ops);
1253 1254 1255
	rc = writeback(ctxt, ops);
	if (rc != X86EMUL_CONTINUE)
		return rc;
1256 1257 1258

	c->src.val = c->eip;
	emulate_push(ctxt, ops);
1259 1260 1261 1262 1263
	rc = writeback(ctxt, ops);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	c->dst.type = OP_NONE;
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302

	ops->get_idt(&dt, ctxt->vcpu);

	eip_addr = dt.address + (irq << 2);
	cs_addr = dt.address + (irq << 2) + 2;

	rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &err);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &err);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	c->eip = eip;

	return rc;
}

static int emulate_int(struct x86_emulate_ctxt *ctxt,
		       struct x86_emulate_ops *ops, int irq)
{
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
		return emulate_int_real(ctxt, ops, irq);
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
	default:
		/* Protected mode interrupts unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
	}
}

1303 1304
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
			     struct x86_emulate_ops *ops)
1305
{
1306 1307 1308 1309 1310 1311 1312 1313 1314
	struct decode_cache *c = &ctxt->decode;
	int rc = X86EMUL_CONTINUE;
	unsigned long temp_eip = 0;
	unsigned long temp_eflags = 0;
	unsigned long cs = 0;
	unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
			     EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
			     EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
	unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1315

1316
	/* TODO: Add stack limit check */
1317

1318
	rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
1319

1320 1321
	if (rc != X86EMUL_CONTINUE)
		return rc;
1322

1323 1324 1325 1326
	if (temp_eip & ~0xffff) {
		emulate_gp(ctxt, 0);
		return X86EMUL_PROPAGATE_FAULT;
	}
1327

1328
	rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1329

1330 1331
	if (rc != X86EMUL_CONTINUE)
		return rc;
1332

1333
	rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
1334

1335 1336
	if (rc != X86EMUL_CONTINUE)
		return rc;
1337

1338
	rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1339

1340 1341
	if (rc != X86EMUL_CONTINUE)
		return rc;
1342

1343
	c->eip = temp_eip;
1344 1345


1346 1347 1348 1349 1350
	if (c->op_bytes == 4)
		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
	else if (c->op_bytes == 2) {
		ctxt->eflags &= ~0xffff;
		ctxt->eflags |= temp_eflags;
1351
	}
1352 1353 1354 1355 1356

	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
	ctxt->eflags |= EFLG_RESERVED_ONE_MASK;

	return rc;
1357 1358
}

1359 1360
static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
				    struct x86_emulate_ops* ops)
1361
{
1362 1363 1364 1365 1366 1367 1368
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
		return emulate_iret_real(ctxt, ops);
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
1369
	default:
1370 1371
		/* iret from protected mode unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
1372 1373 1374
	}
}

1375
static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1376
				struct x86_emulate_ops *ops)
1377 1378 1379
{
	struct decode_cache *c = &ctxt->decode;

1380
	return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1381 1382
}

1383
static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1384
{
1385
	struct decode_cache *c = &ctxt->decode;
1386 1387
	switch (c->modrm_reg) {
	case 0:	/* rol */
1388
		emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1389 1390
		break;
	case 1:	/* ror */
1391
		emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1392 1393
		break;
	case 2:	/* rcl */
1394
		emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1395 1396
		break;
	case 3:	/* rcr */
1397
		emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1398 1399 1400
		break;
	case 4:	/* sal/shl */
	case 6:	/* sal/shl */
1401
		emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1402 1403
		break;
	case 5:	/* shr */
1404
		emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1405 1406
		break;
	case 7:	/* sar */
1407
		emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1408 1409 1410 1411 1412
		break;
	}
}

static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1413
			       struct x86_emulate_ops *ops)
1414 1415
{
	struct decode_cache *c = &ctxt->decode;
1416 1417
	unsigned long *rax = &c->regs[VCPU_REGS_RAX];
	unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
1418 1419 1420

	switch (c->modrm_reg) {
	case 0 ... 1:	/* test */
1421
		emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1422 1423 1424 1425 1426
		break;
	case 2:	/* not */
		c->dst.val = ~c->dst.val;
		break;
	case 3:	/* neg */
1427
		emulate_1op("neg", c->dst, ctxt->eflags);
1428
		break;
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
	case 4: /* mul */
		emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
		break;
	case 5: /* imul */
		emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
		break;
	case 6: /* div */
		emulate_1op_rax_rdx("div", c->src, *rax, *rdx, ctxt->eflags);
		break;
	case 7: /* idiv */
		emulate_1op_rax_rdx("idiv", c->src, *rax, *rdx, ctxt->eflags);
		break;
1441
	default:
1442
		return X86EMUL_UNHANDLEABLE;
1443
	}
1444
	return X86EMUL_CONTINUE;
1445 1446 1447
}

static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1448
			       struct x86_emulate_ops *ops)
1449 1450 1451 1452 1453
{
	struct decode_cache *c = &ctxt->decode;

	switch (c->modrm_reg) {
	case 0:	/* inc */
1454
		emulate_1op("inc", c->dst, ctxt->eflags);
1455 1456
		break;
	case 1:	/* dec */
1457
		emulate_1op("dec", c->dst, ctxt->eflags);
1458
		break;
1459 1460 1461 1462 1463
	case 2: /* call near abs */ {
		long int old_eip;
		old_eip = c->eip;
		c->eip = c->src.val;
		c->src.val = old_eip;
1464
		emulate_push(ctxt, ops);
1465 1466
		break;
	}
1467
	case 4: /* jmp abs */
1468
		c->eip = c->src.val;
1469 1470
		break;
	case 6:	/* push */
1471
		emulate_push(ctxt, ops);
1472 1473
		break;
	}
1474
	return X86EMUL_CONTINUE;
1475 1476 1477
}

static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1478
			       struct x86_emulate_ops *ops)
1479 1480
{
	struct decode_cache *c = &ctxt->decode;
1481
	u64 old = c->dst.orig_val64;
1482 1483 1484 1485 1486

	if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
	    ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
		c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
		c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1487
		ctxt->eflags &= ~EFLG_ZF;
1488
	} else {
1489 1490
		c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
			(u32) c->regs[VCPU_REGS_RBX];
1491

1492
		ctxt->eflags |= EFLG_ZF;
1493
	}
1494
	return X86EMUL_CONTINUE;
1495 1496
}

1497 1498 1499 1500 1501 1502 1503 1504
static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
			   struct x86_emulate_ops *ops)
{
	struct decode_cache *c = &ctxt->decode;
	int rc;
	unsigned long cs;

	rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1505
	if (rc != X86EMUL_CONTINUE)
1506 1507 1508 1509
		return rc;
	if (c->op_bytes == 4)
		c->eip = (u32)c->eip;
	rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1510
	if (rc != X86EMUL_CONTINUE)
1511
		return rc;
1512
	rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1513 1514 1515
	return rc;
}

1516 1517
static inline void
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1518 1519
			struct x86_emulate_ops *ops, struct desc_struct *cs,
			struct desc_struct *ss)
1520
{
1521 1522 1523
	memset(cs, 0, sizeof(struct desc_struct));
	ops->get_cached_descriptor(cs, VCPU_SREG_CS, ctxt->vcpu);
	memset(ss, 0, sizeof(struct desc_struct));
1524 1525

	cs->l = 0;		/* will be adjusted later */
1526
	set_desc_base(cs, 0);	/* flat segment */
1527
	cs->g = 1;		/* 4kb granularity */
1528
	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
1529 1530 1531
	cs->type = 0x0b;	/* Read, Execute, Accessed */
	cs->s = 1;
	cs->dpl = 0;		/* will be adjusted later */
1532 1533
	cs->p = 1;
	cs->d = 1;
1534

1535 1536
	set_desc_base(ss, 0);	/* flat segment */
	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
1537 1538 1539
	ss->g = 1;		/* 4kb granularity */
	ss->s = 1;
	ss->type = 0x03;	/* Read/Write, Accessed */
1540
	ss->d = 1;		/* 32bit stack segment */
1541
	ss->dpl = 0;
1542
	ss->p = 1;
1543 1544 1545
}

static int
1546
emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1547 1548
{
	struct decode_cache *c = &ctxt->decode;
1549
	struct desc_struct cs, ss;
1550
	u64 msr_data;
1551
	u16 cs_sel, ss_sel;
1552 1553

	/* syscall is not available in real mode */
1554 1555
	if (ctxt->mode == X86EMUL_MODE_REAL ||
	    ctxt->mode == X86EMUL_MODE_VM86) {
1556
		emulate_ud(ctxt);
1557 1558
		return X86EMUL_PROPAGATE_FAULT;
	}
1559

1560
	setup_syscalls_segments(ctxt, ops, &cs, &ss);
1561

1562
	ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1563
	msr_data >>= 32;
1564 1565
	cs_sel = (u16)(msr_data & 0xfffc);
	ss_sel = (u16)(msr_data + 8);
1566 1567

	if (is_long_mode(ctxt->vcpu)) {
1568
		cs.d = 0;
1569 1570
		cs.l = 1;
	}
1571 1572 1573 1574
	ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
	ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
	ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
	ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1575 1576 1577 1578 1579 1580

	c->regs[VCPU_REGS_RCX] = c->eip;
	if (is_long_mode(ctxt->vcpu)) {
#ifdef CONFIG_X86_64
		c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;

1581 1582 1583
		ops->get_msr(ctxt->vcpu,
			     ctxt->mode == X86EMUL_MODE_PROT64 ?
			     MSR_LSTAR : MSR_CSTAR, &msr_data);
1584 1585
		c->eip = msr_data;

1586
		ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
1587 1588 1589 1590
		ctxt->eflags &= ~(msr_data | EFLG_RF);
#endif
	} else {
		/* legacy mode */
1591
		ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1592 1593 1594 1595 1596
		c->eip = (u32)msr_data;

		ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
	}

1597
	return X86EMUL_CONTINUE;
1598 1599
}

1600
static int
1601
emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1602 1603
{
	struct decode_cache *c = &ctxt->decode;
1604
	struct desc_struct cs, ss;
1605
	u64 msr_data;
1606
	u16 cs_sel, ss_sel;
1607

1608 1609
	/* inject #GP if in real mode */
	if (ctxt->mode == X86EMUL_MODE_REAL) {
1610
		emulate_gp(ctxt, 0);
1611
		return X86EMUL_PROPAGATE_FAULT;
1612 1613 1614 1615 1616
	}

	/* XXX sysenter/sysexit have not been tested in 64bit mode.
	* Therefore, we inject an #UD.
	*/
1617
	if (ctxt->mode == X86EMUL_MODE_PROT64) {
1618
		emulate_ud(ctxt);
1619 1620
		return X86EMUL_PROPAGATE_FAULT;
	}
1621

1622
	setup_syscalls_segments(ctxt, ops, &cs, &ss);
1623

1624
	ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1625 1626 1627
	switch (ctxt->mode) {
	case X86EMUL_MODE_PROT32:
		if ((msr_data & 0xfffc) == 0x0) {
1628
			emulate_gp(ctxt, 0);
1629
			return X86EMUL_PROPAGATE_FAULT;
1630 1631 1632 1633
		}
		break;
	case X86EMUL_MODE_PROT64:
		if (msr_data == 0x0) {
1634
			emulate_gp(ctxt, 0);
1635
			return X86EMUL_PROPAGATE_FAULT;
1636 1637 1638 1639 1640
		}
		break;
	}

	ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1641 1642 1643 1644
	cs_sel = (u16)msr_data;
	cs_sel &= ~SELECTOR_RPL_MASK;
	ss_sel = cs_sel + 8;
	ss_sel &= ~SELECTOR_RPL_MASK;
1645 1646
	if (ctxt->mode == X86EMUL_MODE_PROT64
		|| is_long_mode(ctxt->vcpu)) {
1647
		cs.d = 0;
1648 1649 1650
		cs.l = 1;
	}

1651 1652 1653 1654
	ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
	ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
	ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
	ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1655

1656
	ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
1657 1658
	c->eip = msr_data;

1659
	ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
1660 1661
	c->regs[VCPU_REGS_RSP] = msr_data;

1662
	return X86EMUL_CONTINUE;
1663 1664
}

1665
static int
1666
emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1667 1668
{
	struct decode_cache *c = &ctxt->decode;
1669
	struct desc_struct cs, ss;
1670 1671
	u64 msr_data;
	int usermode;
1672
	u16 cs_sel, ss_sel;
1673

1674 1675 1676
	/* inject #GP if in real mode or Virtual 8086 mode */
	if (ctxt->mode == X86EMUL_MODE_REAL ||
	    ctxt->mode == X86EMUL_MODE_VM86) {
1677
		emulate_gp(ctxt, 0);
1678
		return X86EMUL_PROPAGATE_FAULT;
1679 1680
	}

1681
	setup_syscalls_segments(ctxt, ops, &cs, &ss);
1682 1683 1684 1685 1686 1687 1688 1689

	if ((c->rex_prefix & 0x8) != 0x0)
		usermode = X86EMUL_MODE_PROT64;
	else
		usermode = X86EMUL_MODE_PROT32;

	cs.dpl = 3;
	ss.dpl = 3;
1690
	ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1691 1692
	switch (usermode) {
	case X86EMUL_MODE_PROT32:
1693
		cs_sel = (u16)(msr_data + 16);
1694
		if ((msr_data & 0xfffc) == 0x0) {
1695
			emulate_gp(ctxt, 0);
1696
			return X86EMUL_PROPAGATE_FAULT;
1697
		}
1698
		ss_sel = (u16)(msr_data + 24);
1699 1700
		break;
	case X86EMUL_MODE_PROT64:
1701
		cs_sel = (u16)(msr_data + 32);
1702
		if (msr_data == 0x0) {
1703
			emulate_gp(ctxt, 0);
1704
			return X86EMUL_PROPAGATE_FAULT;
1705
		}
1706 1707
		ss_sel = cs_sel + 8;
		cs.d = 0;
1708 1709 1710
		cs.l = 1;
		break;
	}
1711 1712
	cs_sel |= SELECTOR_RPL_MASK;
	ss_sel |= SELECTOR_RPL_MASK;
1713

1714 1715 1716 1717
	ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
	ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
	ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
	ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1718

1719 1720
	c->eip = c->regs[VCPU_REGS_RDX];
	c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
1721

1722
	return X86EMUL_CONTINUE;
1723 1724
}

1725 1726
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
			      struct x86_emulate_ops *ops)
1727 1728 1729 1730 1731 1732 1733
{
	int iopl;
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return false;
	if (ctxt->mode == X86EMUL_MODE_VM86)
		return true;
	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1734
	return ops->cpl(ctxt->vcpu) > iopl;
1735 1736 1737 1738 1739 1740
}

static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
					    struct x86_emulate_ops *ops,
					    u16 port, u16 len)
{
1741
	struct desc_struct tr_seg;
1742 1743 1744 1745 1746
	int r;
	u16 io_bitmap_ptr;
	u8 perm, bit_idx = port & 0x7;
	unsigned mask = (1 << len) - 1;

1747 1748
	ops->get_cached_descriptor(&tr_seg, VCPU_SREG_TR, ctxt->vcpu);
	if (!tr_seg.p)
1749
		return false;
1750
	if (desc_limit_scaled(&tr_seg) < 103)
1751
		return false;
1752 1753
	r = ops->read_std(get_desc_base(&tr_seg) + 102, &io_bitmap_ptr, 2,
			  ctxt->vcpu, NULL);
1754 1755
	if (r != X86EMUL_CONTINUE)
		return false;
1756
	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
1757
		return false;
1758 1759
	r = ops->read_std(get_desc_base(&tr_seg) + io_bitmap_ptr + port/8,
			  &perm, 1, ctxt->vcpu, NULL);
1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770
	if (r != X86EMUL_CONTINUE)
		return false;
	if ((perm >> bit_idx) & mask)
		return false;
	return true;
}

static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
				 struct x86_emulate_ops *ops,
				 u16 port, u16 len)
{
1771 1772 1773
	if (ctxt->perm_ok)
		return true;

1774
	if (emulator_bad_iopl(ctxt, ops))
1775 1776
		if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
			return false;
1777 1778 1779

	ctxt->perm_ok = true;

1780 1781 1782
	return true;
}

1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
				struct x86_emulate_ops *ops,
				struct tss_segment_16 *tss)
{
	struct decode_cache *c = &ctxt->decode;

	tss->ip = c->eip;
	tss->flag = ctxt->eflags;
	tss->ax = c->regs[VCPU_REGS_RAX];
	tss->cx = c->regs[VCPU_REGS_RCX];
	tss->dx = c->regs[VCPU_REGS_RDX];
	tss->bx = c->regs[VCPU_REGS_RBX];
	tss->sp = c->regs[VCPU_REGS_RSP];
	tss->bp = c->regs[VCPU_REGS_RBP];
	tss->si = c->regs[VCPU_REGS_RSI];
	tss->di = c->regs[VCPU_REGS_RDI];

	tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
	tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
	tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
	tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
	tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
}

static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
				 struct x86_emulate_ops *ops,
				 struct tss_segment_16 *tss)
{
	struct decode_cache *c = &ctxt->decode;
	int ret;

	c->eip = tss->ip;
	ctxt->eflags = tss->flag | 2;
	c->regs[VCPU_REGS_RAX] = tss->ax;
	c->regs[VCPU_REGS_RCX] = tss->cx;
	c->regs[VCPU_REGS_RDX] = tss->dx;
	c->regs[VCPU_REGS_RBX] = tss->bx;
	c->regs[VCPU_REGS_RSP] = tss->sp;
	c->regs[VCPU_REGS_RBP] = tss->bp;
	c->regs[VCPU_REGS_RSI] = tss->si;
	c->regs[VCPU_REGS_RDI] = tss->di;

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
	ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
	ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
	ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
	ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
	ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);

	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
	ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_16(struct x86_emulate_ctxt *ctxt,
			  struct x86_emulate_ops *ops,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
	struct tss_segment_16 tss_seg;
	int ret;
	u32 err, new_tss_base = get_desc_base(new_desc);

	ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
			    &err);
	if (ret == X86EMUL_PROPAGATE_FAULT) {
		/* FIXME: need to provide precise fault address */
1871
		emulate_pf(ctxt, old_tss_base, err);
1872 1873 1874 1875 1876 1877 1878 1879 1880
		return ret;
	}

	save_state_to_tss16(ctxt, ops, &tss_seg);

	ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
			     &err);
	if (ret == X86EMUL_PROPAGATE_FAULT) {
		/* FIXME: need to provide precise fault address */
1881
		emulate_pf(ctxt, old_tss_base, err);
1882 1883 1884 1885 1886 1887 1888
		return ret;
	}

	ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
			    &err);
	if (ret == X86EMUL_PROPAGATE_FAULT) {
		/* FIXME: need to provide precise fault address */
1889
		emulate_pf(ctxt, new_tss_base, err);
1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901
		return ret;
	}

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

		ret = ops->write_std(new_tss_base,
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
				     ctxt->vcpu, &err);
		if (ret == X86EMUL_PROPAGATE_FAULT) {
			/* FIXME: need to provide precise fault address */
1902
			emulate_pf(ctxt, new_tss_base, err);
1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
			return ret;
		}
	}

	return load_state_from_tss16(ctxt, ops, &tss_seg);
}

static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
				struct x86_emulate_ops *ops,
				struct tss_segment_32 *tss)
{
	struct decode_cache *c = &ctxt->decode;

	tss->cr3 = ops->get_cr(3, ctxt->vcpu);
	tss->eip = c->eip;
	tss->eflags = ctxt->eflags;
	tss->eax = c->regs[VCPU_REGS_RAX];
	tss->ecx = c->regs[VCPU_REGS_RCX];
	tss->edx = c->regs[VCPU_REGS_RDX];
	tss->ebx = c->regs[VCPU_REGS_RBX];
	tss->esp = c->regs[VCPU_REGS_RSP];
	tss->ebp = c->regs[VCPU_REGS_RBP];
	tss->esi = c->regs[VCPU_REGS_RSI];
	tss->edi = c->regs[VCPU_REGS_RDI];

	tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
	tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
	tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
	tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
	tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
	tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
	tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
}

static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
				 struct x86_emulate_ops *ops,
				 struct tss_segment_32 *tss)
{
	struct decode_cache *c = &ctxt->decode;
	int ret;

1944
	if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) {
1945
		emulate_gp(ctxt, 0);
1946 1947
		return X86EMUL_PROPAGATE_FAULT;
	}
1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
	c->eip = tss->eip;
	ctxt->eflags = tss->eflags | 2;
	c->regs[VCPU_REGS_RAX] = tss->eax;
	c->regs[VCPU_REGS_RCX] = tss->ecx;
	c->regs[VCPU_REGS_RDX] = tss->edx;
	c->regs[VCPU_REGS_RBX] = tss->ebx;
	c->regs[VCPU_REGS_RSP] = tss->esp;
	c->regs[VCPU_REGS_RBP] = tss->ebp;
	c->regs[VCPU_REGS_RSI] = tss->esi;
	c->regs[VCPU_REGS_RDI] = tss->edi;

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
	ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
	ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
	ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
	ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
	ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
	ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
	ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);

	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
	ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_32(struct x86_emulate_ctxt *ctxt,
			  struct x86_emulate_ops *ops,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
	struct tss_segment_32 tss_seg;
	int ret;
	u32 err, new_tss_base = get_desc_base(new_desc);

	ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
			    &err);
	if (ret == X86EMUL_PROPAGATE_FAULT) {
		/* FIXME: need to provide precise fault address */
2013
		emulate_pf(ctxt, old_tss_base, err);
2014 2015 2016 2017 2018 2019 2020 2021 2022
		return ret;
	}

	save_state_to_tss32(ctxt, ops, &tss_seg);

	ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
			     &err);
	if (ret == X86EMUL_PROPAGATE_FAULT) {
		/* FIXME: need to provide precise fault address */
2023
		emulate_pf(ctxt, old_tss_base, err);
2024 2025 2026 2027 2028 2029 2030
		return ret;
	}

	ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
			    &err);
	if (ret == X86EMUL_PROPAGATE_FAULT) {
		/* FIXME: need to provide precise fault address */
2031
		emulate_pf(ctxt, new_tss_base, err);
2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043
		return ret;
	}

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

		ret = ops->write_std(new_tss_base,
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
				     ctxt->vcpu, &err);
		if (ret == X86EMUL_PROPAGATE_FAULT) {
			/* FIXME: need to provide precise fault address */
2044
			emulate_pf(ctxt, new_tss_base, err);
2045 2046 2047 2048 2049 2050 2051 2052
			return ret;
		}
	}

	return load_state_from_tss32(ctxt, ops, &tss_seg);
}

static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2053 2054 2055
				   struct x86_emulate_ops *ops,
				   u16 tss_selector, int reason,
				   bool has_error_code, u32 error_code)
2056 2057 2058 2059 2060
{
	struct desc_struct curr_tss_desc, next_tss_desc;
	int ret;
	u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
	ulong old_tss_base =
2061
		ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu);
2062
	u32 desc_limit;
2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077

	/* FIXME: old_tss_base == ~0 ? */

	ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
	if (ret != X86EMUL_CONTINUE)
		return ret;

	/* FIXME: check that next_tss_desc is tss */

	if (reason != TASK_SWITCH_IRET) {
		if ((tss_selector & 3) > next_tss_desc.dpl ||
		    ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) {
2078
			emulate_gp(ctxt, 0);
2079 2080 2081 2082
			return X86EMUL_PROPAGATE_FAULT;
		}
	}

2083 2084 2085 2086
	desc_limit = desc_limit_scaled(&next_tss_desc);
	if (!next_tss_desc.p ||
	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
	     desc_limit < 0x2b)) {
2087
		emulate_ts(ctxt, tss_selector & 0xfffc);
2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110
		return X86EMUL_PROPAGATE_FAULT;
	}

	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
		write_segment_descriptor(ctxt, ops, old_tss_sel,
					 &curr_tss_desc);
	}

	if (reason == TASK_SWITCH_IRET)
		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;

	/* set back link to prev task only if NT bit is set in eflags
	   note that old_tss_sel is not used afetr this point */
	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
		old_tss_sel = 0xffff;

	if (next_tss_desc.type & 8)
		ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
				     old_tss_base, &next_tss_desc);
	else
		ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
				     old_tss_base, &next_tss_desc);
2111 2112
	if (ret != X86EMUL_CONTINUE)
		return ret;
2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126

	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;

	if (reason != TASK_SWITCH_IRET) {
		next_tss_desc.type |= (1 << 1); /* set busy flag */
		write_segment_descriptor(ctxt, ops, tss_selector,
					 &next_tss_desc);
	}

	ops->set_cr(0,  ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
	ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu);
	ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);

2127 2128 2129 2130 2131 2132
	if (has_error_code) {
		struct decode_cache *c = &ctxt->decode;

		c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
		c->lock_prefix = 0;
		c->src.val = (unsigned long) error_code;
2133
		emulate_push(ctxt, ops);
2134 2135
	}

2136 2137 2138 2139
	return ret;
}

int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2140 2141
			 u16 tss_selector, int reason,
			 bool has_error_code, u32 error_code)
2142
{
2143
	struct x86_emulate_ops *ops = ctxt->ops;
2144 2145 2146 2147
	struct decode_cache *c = &ctxt->decode;
	int rc;

	c->eip = ctxt->eip;
2148
	c->dst.type = OP_NONE;
2149

2150 2151
	rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
				     has_error_code, error_code);
2152 2153

	if (rc == X86EMUL_CONTINUE) {
2154
		rc = writeback(ctxt, ops);
2155 2156
		if (rc == X86EMUL_CONTINUE)
			ctxt->eip = c->eip;
2157 2158
	}

2159
	return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2160 2161
}

2162
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
2163
			    int reg, struct operand *op)
2164 2165 2166 2167
{
	struct decode_cache *c = &ctxt->decode;
	int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;

2168
	register_address_increment(c, &c->regs[reg], df * op->bytes);
2169
	op->addr.mem = register_address(c,  base, c->regs[reg]);
2170 2171
}

2172 2173 2174 2175 2176 2177
static int em_push(struct x86_emulate_ctxt *ctxt)
{
	emulate_push(ctxt, ctxt->ops);
	return X86EMUL_CONTINUE;
}

2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216
static int em_das(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;
	u8 al, old_al;
	bool af, cf, old_cf;

	cf = ctxt->eflags & X86_EFLAGS_CF;
	al = c->dst.val;

	old_al = al;
	old_cf = cf;
	cf = false;
	af = ctxt->eflags & X86_EFLAGS_AF;
	if ((al & 0x0f) > 9 || af) {
		al -= 6;
		cf = old_cf | (al >= 250);
		af = true;
	} else {
		af = false;
	}
	if (old_al > 0x99 || old_cf) {
		al -= 0x60;
		cf = true;
	}

	c->dst.val = al;
	/* Set PF, ZF, SF */
	c->src.type = OP_IMM;
	c->src.val = 0;
	c->src.bytes = 1;
	emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
	if (cf)
		ctxt->eflags |= X86_EFLAGS_CF;
	if (af)
		ctxt->eflags |= X86_EFLAGS_AF;
	return X86EMUL_CONTINUE;
}

2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
#define D(_y) { .flags = (_y) }
#define N    D(0)
#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }

static struct opcode group1[] = {
	X7(D(Lock)), N
};

static struct opcode group1A[] = {
	D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
};

static struct opcode group3[] = {
	D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
	D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2234
	X4(D(SrcMem | ModRM)),
2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251
};

static struct opcode group4[] = {
	D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
	N, N, N, N, N, N,
};

static struct opcode group5[] = {
	D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
	D(SrcMem | ModRM | Stack), N,
	D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
	D(SrcMem | ModRM | Stack), N,
};

static struct group_dual group7 = { {
	N, N, D(ModRM | SrcMem | Priv), D(ModRM | SrcMem | Priv),
	D(SrcNone | ModRM | DstMem | Mov), N,
2252 2253
	D(SrcMem16 | ModRM | Mov | Priv),
	D(SrcMem | ModRM | ByteOp | Priv | NoAccess),
2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299
}, {
	D(SrcNone | ModRM | Priv), N, N, D(SrcNone | ModRM | Priv),
	D(SrcNone | ModRM | DstMem | Mov), N,
	D(SrcMem16 | ModRM | Mov | Priv), N,
} };

static struct opcode group8[] = {
	N, N, N, N,
	D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
	D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
};

static struct group_dual group9 = { {
	N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
}, {
	N, N, N, N, N, N, N, N,
} };

static struct opcode opcode_table[256] = {
	/* 0x00 - 0x07 */
	D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
	D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
	D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
	/* 0x08 - 0x0F */
	D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
	D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
	D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
	D(ImplicitOps | Stack | No64), N,
	/* 0x10 - 0x17 */
	D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
	D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
	D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
	/* 0x18 - 0x1F */
	D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
	D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
	D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
	/* 0x20 - 0x27 */
	D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
	D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
	D(ByteOp | DstAcc | SrcImmByte), D(DstAcc | SrcImm), N, N,
	/* 0x28 - 0x2F */
	D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
	D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
2300 2301
	D(ByteOp | DstAcc | SrcImmByte), D(DstAcc | SrcImm),
	N, I(ByteOp | DstAcc | No64, em_das),
2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313
	/* 0x30 - 0x37 */
	D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
	D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
	D(ByteOp | DstAcc | SrcImmByte), D(DstAcc | SrcImm), N, N,
	/* 0x38 - 0x3F */
	D(ByteOp | DstMem | SrcReg | ModRM), D(DstMem | SrcReg | ModRM),
	D(ByteOp | DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
	D(ByteOp | DstAcc | SrcImm), D(DstAcc | SrcImm),
	N, N,
	/* 0x40 - 0x4F */
	X16(D(DstReg)),
	/* 0x50 - 0x57 */
2314
	X8(I(SrcReg | Stack, em_push)),
2315 2316 2317 2318 2319 2320 2321
	/* 0x58 - 0x5F */
	X8(D(DstReg | Stack)),
	/* 0x60 - 0x67 */
	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
	N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
	N, N, N, N,
	/* 0x68 - 0x6F */
2322 2323
	I(SrcImm | Mov | Stack, em_push), N,
	I(SrcImmByte | Mov | Stack, em_push), N,
2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337
	D(DstDI | ByteOp | Mov | String), D(DstDI | Mov | String), /* insb, insw/insd */
	D(SrcSI | ByteOp | ImplicitOps | String), D(SrcSI | ImplicitOps | String), /* outsb, outsw/outsd */
	/* 0x70 - 0x7F */
	X16(D(SrcImmByte)),
	/* 0x80 - 0x87 */
	G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
	G(DstMem | SrcImm | ModRM | Group, group1),
	G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
	G(DstMem | SrcImmByte | ModRM | Group, group1),
	D(ByteOp | DstMem | SrcReg | ModRM), D(DstMem | SrcReg | ModRM),
	D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
	/* 0x88 - 0x8F */
	D(ByteOp | DstMem | SrcReg | ModRM | Mov), D(DstMem | SrcReg | ModRM | Mov),
	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem | ModRM | Mov),
2338
	D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
2339 2340
	D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
	/* 0x90 - 0x97 */
2341
	X8(D(SrcAcc | DstReg)),
2342
	/* 0x98 - 0x9F */
2343
	D(DstAcc | SrcNone), N, D(SrcImmFAddr | No64), N,
2344 2345 2346 2347 2348 2349 2350
	D(ImplicitOps | Stack), D(ImplicitOps | Stack), N, N,
	/* 0xA0 - 0xA7 */
	D(ByteOp | DstAcc | SrcMem | Mov | MemAbs), D(DstAcc | SrcMem | Mov | MemAbs),
	D(ByteOp | DstMem | SrcAcc | Mov | MemAbs), D(DstMem | SrcAcc | Mov | MemAbs),
	D(ByteOp | SrcSI | DstDI | Mov | String), D(SrcSI | DstDI | Mov | String),
	D(ByteOp | SrcSI | DstDI | String), D(SrcSI | DstDI | String),
	/* 0xA8 - 0xAF */
2351 2352
	D(DstAcc | SrcImmByte | ByteOp), D(DstAcc | SrcImm),
	D(ByteOp | SrcAcc | DstDI | Mov | String), D(SrcAcc | DstDI | Mov | String),
2353
	D(ByteOp | SrcSI | DstAcc | Mov | String), D(SrcSI | DstAcc | Mov | String),
2354
	D(ByteOp | SrcAcc | DstDI | String), D(SrcAcc | DstDI | String),
2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366
	/* 0xB0 - 0xB7 */
	X8(D(ByteOp | DstReg | SrcImm | Mov)),
	/* 0xB8 - 0xBF */
	X8(D(DstReg | SrcImm | Mov)),
	/* 0xC0 - 0xC7 */
	D(ByteOp | DstMem | SrcImm | ModRM), D(DstMem | SrcImmByte | ModRM),
	N, D(ImplicitOps | Stack), N, N,
	D(ByteOp | DstMem | SrcImm | ModRM | Mov), D(DstMem | SrcImm | ModRM | Mov),
	/* 0xC8 - 0xCF */
	N, N, N, D(ImplicitOps | Stack),
	D(ImplicitOps), D(SrcImmByte), D(ImplicitOps | No64), D(ImplicitOps),
	/* 0xD0 - 0xD7 */
2367
	D(ByteOp | DstMem | SrcOne | ModRM), D(DstMem | SrcOne | ModRM),
2368 2369 2370 2371 2372
	D(ByteOp | DstMem | SrcImplicit | ModRM), D(DstMem | SrcImplicit | ModRM),
	N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
2373
	X3(D(SrcImmByte)), N,
2374
	D(ByteOp | SrcImmUByte | DstAcc), D(SrcImmUByte | DstAcc),
2375
	D(ByteOp | SrcAcc | DstImmUByte), D(SrcAcc | DstImmUByte),
2376 2377 2378 2379
	/* 0xE8 - 0xEF */
	D(SrcImm | Stack), D(SrcImm | ImplicitOps),
	D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
	D(SrcNone | ByteOp | DstAcc), D(SrcNone | DstAcc),
2380
	D(ByteOp | SrcAcc | ImplicitOps), D(SrcAcc | ImplicitOps),
2381 2382 2383 2384
	/* 0xF0 - 0xF7 */
	N, N, N, N,
	D(ImplicitOps | Priv), D(ImplicitOps), G(ByteOp, group3), G(0, group3),
	/* 0xF8 - 0xFF */
2385
	D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397
	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};

static struct opcode twobyte_table[256] = {
	/* 0x00 - 0x0F */
	N, GD(0, &group7), N, N,
	N, D(ImplicitOps), D(ImplicitOps | Priv), N,
	D(ImplicitOps | Priv), D(ImplicitOps | Priv), N, N,
	N, D(ImplicitOps | ModRM), N, N,
	/* 0x10 - 0x1F */
	N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
	/* 0x20 - 0x2F */
2398 2399
	D(ModRM | DstMem | Priv | Op3264), D(ModRM | DstMem | Priv | Op3264),
	D(ModRM | SrcMem | Priv | Op3264), D(ModRM | SrcMem | Priv | Op3264),
2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416
	N, N, N, N,
	N, N, N, N, N, N, N, N,
	/* 0x30 - 0x3F */
	D(ImplicitOps | Priv), N, D(ImplicitOps | Priv), N,
	D(ImplicitOps), D(ImplicitOps | Priv), N, N,
	N, N, N, N, N, N, N, N,
	/* 0x40 - 0x4F */
	X16(D(DstReg | SrcMem | ModRM | Mov)),
	/* 0x50 - 0x5F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x60 - 0x6F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x70 - 0x7F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x80 - 0x8F */
	X16(D(SrcImm)),
	/* 0x90 - 0x9F */
2417
	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435
	/* 0xA0 - 0xA7 */
	D(ImplicitOps | Stack), D(ImplicitOps | Stack),
	N, D(DstMem | SrcReg | ModRM | BitOp),
	D(DstMem | SrcReg | Src2ImmByte | ModRM),
	D(DstMem | SrcReg | Src2CL | ModRM), N, N,
	/* 0xA8 - 0xAF */
	D(ImplicitOps | Stack), D(ImplicitOps | Stack),
	N, D(DstMem | SrcReg | ModRM | BitOp | Lock),
	D(DstMem | SrcReg | Src2ImmByte | ModRM),
	D(DstMem | SrcReg | Src2CL | ModRM),
	D(ModRM), N,
	/* 0xB0 - 0xB7 */
	D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
	N, D(DstMem | SrcReg | ModRM | BitOp | Lock),
	N, N, D(ByteOp | DstReg | SrcMem | ModRM | Mov),
	    D(DstReg | SrcMem16 | ModRM | Mov),
	/* 0xB8 - 0xBF */
	N, N,
2436
	G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2437 2438
	D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2439
	/* 0xC0 - 0xCF */
2440 2441
	D(ByteOp | DstMem | SrcReg | ModRM | Lock), D(DstMem | SrcReg | ModRM | Lock),
	N, D(DstMem | SrcReg | ModRM | Mov),
2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457
	N, N, N, GD(0, &group9),
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xDF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xEF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xFF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};

#undef D
#undef N
#undef G
#undef GD
#undef I

2458 2459 2460 2461 2462 2463 2464 2465 2466
int
x86_decode_insn(struct x86_emulate_ctxt *ctxt)
{
	struct x86_emulate_ops *ops = ctxt->ops;
	struct decode_cache *c = &ctxt->decode;
	int rc = X86EMUL_CONTINUE;
	int mode = ctxt->mode;
	int def_op_bytes, def_ad_bytes, dual, goffset;
	struct opcode opcode, *g_mod012, *g_mod3;
2467
	struct operand memop = { .type = OP_NONE };
2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548

	/* we cannot decode insn before we complete previous rep insn */
	WARN_ON(ctxt->restart);

	c->eip = ctxt->eip;
	c->fetch.start = c->fetch.end = c->eip;
	ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);

	switch (mode) {
	case X86EMUL_MODE_REAL:
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
		def_op_bytes = def_ad_bytes = 2;
		break;
	case X86EMUL_MODE_PROT32:
		def_op_bytes = def_ad_bytes = 4;
		break;
#ifdef CONFIG_X86_64
	case X86EMUL_MODE_PROT64:
		def_op_bytes = 4;
		def_ad_bytes = 8;
		break;
#endif
	default:
		return -1;
	}

	c->op_bytes = def_op_bytes;
	c->ad_bytes = def_ad_bytes;

	/* Legacy prefixes. */
	for (;;) {
		switch (c->b = insn_fetch(u8, 1, c->eip)) {
		case 0x66:	/* operand-size override */
			/* switch between 2/4 bytes */
			c->op_bytes = def_op_bytes ^ 6;
			break;
		case 0x67:	/* address-size override */
			if (mode == X86EMUL_MODE_PROT64)
				/* switch between 4/8 bytes */
				c->ad_bytes = def_ad_bytes ^ 12;
			else
				/* switch between 2/4 bytes */
				c->ad_bytes = def_ad_bytes ^ 6;
			break;
		case 0x26:	/* ES override */
		case 0x2e:	/* CS override */
		case 0x36:	/* SS override */
		case 0x3e:	/* DS override */
			set_seg_override(c, (c->b >> 3) & 3);
			break;
		case 0x64:	/* FS override */
		case 0x65:	/* GS override */
			set_seg_override(c, c->b & 7);
			break;
		case 0x40 ... 0x4f: /* REX */
			if (mode != X86EMUL_MODE_PROT64)
				goto done_prefixes;
			c->rex_prefix = c->b;
			continue;
		case 0xf0:	/* LOCK */
			c->lock_prefix = 1;
			break;
		case 0xf2:	/* REPNE/REPNZ */
			c->rep_prefix = REPNE_PREFIX;
			break;
		case 0xf3:	/* REP/REPE/REPZ */
			c->rep_prefix = REPE_PREFIX;
			break;
		default:
			goto done_prefixes;
		}

		/* Any legacy prefix after a REX prefix nullifies its effect. */

		c->rex_prefix = 0;
	}

done_prefixes:

	/* REX prefix. */
2549 2550
	if (c->rex_prefix & 8)
		c->op_bytes = 8;	/* REX.W */
2551 2552 2553

	/* Opcode byte(s). */
	opcode = opcode_table[c->b];
2554 2555 2556 2557 2558
	/* Two-byte opcode? */
	if (c->b == 0x0f) {
		c->twobyte = 1;
		c->b = insn_fetch(u8, 1, c->eip);
		opcode = twobyte_table[c->b];
2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594
	}
	c->d = opcode.flags;

	if (c->d & Group) {
		dual = c->d & GroupDual;
		c->modrm = insn_fetch(u8, 1, c->eip);
		--c->eip;

		if (c->d & GroupDual) {
			g_mod012 = opcode.u.gdual->mod012;
			g_mod3 = opcode.u.gdual->mod3;
		} else
			g_mod012 = g_mod3 = opcode.u.group;

		c->d &= ~(Group | GroupDual);

		goffset = (c->modrm >> 3) & 7;

		if ((c->modrm >> 6) == 3)
			opcode = g_mod3[goffset];
		else
			opcode = g_mod012[goffset];
		c->d |= opcode.flags;
	}

	c->execute = opcode.u.execute;

	/* Unrecognised? */
	if (c->d == 0 || (c->d & Undefined)) {
		DPRINTF("Cannot emulate %02x\n", c->b);
		return -1;
	}

	if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
		c->op_bytes = 8;

2595 2596 2597 2598 2599 2600 2601
	if (c->d & Op3264) {
		if (mode == X86EMUL_MODE_PROT64)
			c->op_bytes = 8;
		else
			c->op_bytes = 4;
	}

2602
	/* ModRM and SIB bytes. */
2603
	if (c->d & ModRM) {
2604
		rc = decode_modrm(ctxt, ops, &memop);
2605 2606 2607
		if (!c->has_seg_override)
			set_seg_override(c, c->modrm_seg);
	} else if (c->d & MemAbs)
2608
		rc = decode_abs(ctxt, ops, &memop);
2609 2610 2611 2612 2613 2614
	if (rc != X86EMUL_CONTINUE)
		goto done;

	if (!c->has_seg_override)
		set_seg_override(c, VCPU_SREG_DS);

2615 2616
	if (memop.type == OP_MEM && !(!c->twobyte && c->b == 0x8d))
		memop.addr.mem += seg_override_base(ctxt, ops, c);
2617

2618 2619
	if (memop.type == OP_MEM && c->ad_bytes != 8)
		memop.addr.mem = (u32)memop.addr.mem;
2620

2621 2622
	if (memop.type == OP_MEM && c->rip_relative)
		memop.addr.mem += c->eip;
2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634

	/*
	 * Decode and fetch the source operand: register, memory
	 * or immediate.
	 */
	switch (c->d & SrcMask) {
	case SrcNone:
		break;
	case SrcReg:
		decode_register_operand(&c->src, c, 0);
		break;
	case SrcMem16:
2635
		memop.bytes = 2;
2636 2637
		goto srcmem_common;
	case SrcMem32:
2638
		memop.bytes = 4;
2639 2640
		goto srcmem_common;
	case SrcMem:
2641
		memop.bytes = (c->d & ByteOp) ? 1 :
2642 2643
							   c->op_bytes;
	srcmem_common:
2644
		c->src = memop;
2645 2646 2647 2648
		break;
	case SrcImm:
	case SrcImmU:
		c->src.type = OP_IMM;
2649
		c->src.addr.mem = c->eip;
2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681
		c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
		if (c->src.bytes == 8)
			c->src.bytes = 4;
		/* NB. Immediates are sign-extended as necessary. */
		switch (c->src.bytes) {
		case 1:
			c->src.val = insn_fetch(s8, 1, c->eip);
			break;
		case 2:
			c->src.val = insn_fetch(s16, 2, c->eip);
			break;
		case 4:
			c->src.val = insn_fetch(s32, 4, c->eip);
			break;
		}
		if ((c->d & SrcMask) == SrcImmU) {
			switch (c->src.bytes) {
			case 1:
				c->src.val &= 0xff;
				break;
			case 2:
				c->src.val &= 0xffff;
				break;
			case 4:
				c->src.val &= 0xffffffff;
				break;
			}
		}
		break;
	case SrcImmByte:
	case SrcImmUByte:
		c->src.type = OP_IMM;
2682
		c->src.addr.mem = c->eip;
2683 2684 2685 2686 2687 2688 2689 2690 2691
		c->src.bytes = 1;
		if ((c->d & SrcMask) == SrcImmByte)
			c->src.val = insn_fetch(s8, 1, c->eip);
		else
			c->src.val = insn_fetch(u8, 1, c->eip);
		break;
	case SrcAcc:
		c->src.type = OP_REG;
		c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2692
		c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
2693
		fetch_register_operand(&c->src);
2694 2695 2696 2697 2698 2699 2700 2701
		break;
	case SrcOne:
		c->src.bytes = 1;
		c->src.val = 1;
		break;
	case SrcSI:
		c->src.type = OP_MEM;
		c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2702
		c->src.addr.mem =
2703 2704 2705 2706 2707 2708
			register_address(c,  seg_override_base(ctxt, ops, c),
					 c->regs[VCPU_REGS_RSI]);
		c->src.val = 0;
		break;
	case SrcImmFAddr:
		c->src.type = OP_IMM;
2709
		c->src.addr.mem = c->eip;
2710 2711 2712 2713
		c->src.bytes = c->op_bytes + 2;
		insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
		break;
	case SrcMemFAddr:
2714 2715
		memop.bytes = c->op_bytes + 2;
		goto srcmem_common;
2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731
		break;
	}

	/*
	 * Decode and fetch the second source operand: register, memory
	 * or immediate.
	 */
	switch (c->d & Src2Mask) {
	case Src2None:
		break;
	case Src2CL:
		c->src2.bytes = 1;
		c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
		break;
	case Src2ImmByte:
		c->src2.type = OP_IMM;
2732
		c->src2.addr.mem = c->eip;
2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747
		c->src2.bytes = 1;
		c->src2.val = insn_fetch(u8, 1, c->eip);
		break;
	case Src2One:
		c->src2.bytes = 1;
		c->src2.val = 1;
		break;
	}

	/* Decode and fetch the destination operand: register or memory. */
	switch (c->d & DstMask) {
	case DstReg:
		decode_register_operand(&c->dst, c,
			 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
		break;
2748 2749 2750 2751 2752 2753
	case DstImmUByte:
		c->dst.type = OP_IMM;
		c->dst.addr.mem = c->eip;
		c->dst.bytes = 1;
		c->dst.val = insn_fetch(u8, 1, c->eip);
		break;
2754 2755
	case DstMem:
	case DstMem64:
2756
		c->dst = memop;
2757 2758 2759 2760
		if ((c->d & DstMask) == DstMem64)
			c->dst.bytes = 8;
		else
			c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2761 2762
		if (c->d & BitOp)
			fetch_bit_operand(c);
2763
		c->dst.orig_val = c->dst.val;
2764 2765 2766 2767
		break;
	case DstAcc:
		c->dst.type = OP_REG;
		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2768
		c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
2769
		fetch_register_operand(&c->dst);
2770 2771 2772 2773 2774
		c->dst.orig_val = c->dst.val;
		break;
	case DstDI:
		c->dst.type = OP_MEM;
		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2775
		c->dst.addr.mem =
2776 2777 2778 2779
			register_address(c, es_base(ctxt, ops),
					 c->regs[VCPU_REGS_RDI]);
		c->dst.val = 0;
		break;
2780 2781 2782 2783 2784
	case ImplicitOps:
		/* Special instructions do their own operand decoding. */
	default:
		c->dst.type = OP_NONE; /* Disable writeback. */
		return 0;
2785 2786 2787 2788 2789 2790
	}

done:
	return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
}

2791
int
2792
x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
2793
{
2794
	struct x86_emulate_ops *ops = ctxt->ops;
2795 2796
	u64 msr_data;
	struct decode_cache *c = &ctxt->decode;
2797
	int rc = X86EMUL_CONTINUE;
2798
	int saved_dst_type = c->dst.type;
2799
	int irq; /* Used for int 3, int, and into */
2800

2801
	ctxt->decode.mem_read.pos = 0;
2802

2803
	if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
2804
		emulate_ud(ctxt);
2805 2806 2807
		goto done;
	}

2808
	/* LOCK prefix is allowed only with some instructions */
2809
	if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
2810
		emulate_ud(ctxt);
2811 2812 2813
		goto done;
	}

2814
	/* Privileged instruction can be executed only in CPL=0 */
2815
	if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
2816
		emulate_gp(ctxt, 0);
2817 2818 2819
		goto done;
	}

2820
	if (c->rep_prefix && (c->d & String)) {
2821
		ctxt->restart = true;
2822
		/* All REP prefixes have the same first termination condition */
2823
		if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
2824
			ctxt->restart = false;
2825
			ctxt->eip = c->eip;
2826 2827 2828 2829
			goto done;
		}
	}

2830
	if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
2831
		rc = read_emulated(ctxt, ops, c->src.addr.mem,
2832
					c->src.valptr, c->src.bytes);
2833
		if (rc != X86EMUL_CONTINUE)
2834
			goto done;
2835
		c->src.orig_val64 = c->src.val64;
2836 2837
	}

2838
	if (c->src2.type == OP_MEM) {
2839
		rc = read_emulated(ctxt, ops, c->src2.addr.mem,
2840
					&c->src2.val, c->src2.bytes);
2841 2842 2843 2844
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

2845 2846 2847 2848
	if ((c->d & DstMask) == ImplicitOps)
		goto special_insn;


2849 2850
	if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
		/* optimisation - avoid slow emulated read if Mov */
2851
		rc = read_emulated(ctxt, ops, c->dst.addr.mem,
2852
				   &c->dst.val, c->dst.bytes);
2853 2854
		if (rc != X86EMUL_CONTINUE)
			goto done;
2855
	}
2856
	c->dst.orig_val = c->dst.val;
2857

2858 2859
special_insn:

2860 2861 2862 2863 2864 2865 2866
	if (c->execute) {
		rc = c->execute(ctxt);
		if (rc != X86EMUL_CONTINUE)
			goto done;
		goto writeback;
	}

2867
	if (c->twobyte)
A
Avi Kivity 已提交
2868 2869
		goto twobyte_insn;

2870
	switch (c->b) {
A
Avi Kivity 已提交
2871 2872
	case 0x00 ... 0x05:
	      add:		/* add */
2873
		emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
2874
		break;
2875
	case 0x06:		/* push es */
2876
		emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
2877 2878 2879
		break;
	case 0x07:		/* pop es */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
2880
		if (rc != X86EMUL_CONTINUE)
2881 2882
			goto done;
		break;
A
Avi Kivity 已提交
2883 2884
	case 0x08 ... 0x0d:
	      or:		/* or */
2885
		emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
2886
		break;
2887
	case 0x0e:		/* push cs */
2888
		emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
2889
		break;
A
Avi Kivity 已提交
2890 2891
	case 0x10 ... 0x15:
	      adc:		/* adc */
2892
		emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
2893
		break;
2894
	case 0x16:		/* push ss */
2895
		emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
2896 2897 2898
		break;
	case 0x17:		/* pop ss */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
2899
		if (rc != X86EMUL_CONTINUE)
2900 2901
			goto done;
		break;
A
Avi Kivity 已提交
2902 2903
	case 0x18 ... 0x1d:
	      sbb:		/* sbb */
2904
		emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
2905
		break;
2906
	case 0x1e:		/* push ds */
2907
		emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
2908 2909 2910
		break;
	case 0x1f:		/* pop ds */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
2911
		if (rc != X86EMUL_CONTINUE)
2912 2913
			goto done;
		break;
2914
	case 0x20 ... 0x25:
A
Avi Kivity 已提交
2915
	      and:		/* and */
2916
		emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
2917 2918 2919
		break;
	case 0x28 ... 0x2d:
	      sub:		/* sub */
2920
		emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
2921 2922 2923
		break;
	case 0x30 ... 0x35:
	      xor:		/* xor */
2924
		emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
2925 2926 2927
		break;
	case 0x38 ... 0x3d:
	      cmp:		/* cmp */
2928
		emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
2929
		break;
2930 2931 2932 2933 2934 2935 2936 2937
	case 0x40 ... 0x47: /* inc r16/r32 */
		emulate_1op("inc", c->dst, ctxt->eflags);
		break;
	case 0x48 ... 0x4f: /* dec r16/r32 */
		emulate_1op("dec", c->dst, ctxt->eflags);
		break;
	case 0x58 ... 0x5f: /* pop reg */
	pop_instruction:
2938
		rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
2939
		if (rc != X86EMUL_CONTINUE)
2940 2941
			goto done;
		break;
2942
	case 0x60:	/* pusha */
2943 2944 2945
		rc = emulate_pusha(ctxt, ops);
		if (rc != X86EMUL_CONTINUE)
			goto done;
2946 2947 2948
		break;
	case 0x61:	/* popa */
		rc = emulate_popa(ctxt, ops);
2949
		if (rc != X86EMUL_CONTINUE)
2950 2951
			goto done;
		break;
A
Avi Kivity 已提交
2952
	case 0x63:		/* movsxd */
2953
		if (ctxt->mode != X86EMUL_MODE_PROT64)
A
Avi Kivity 已提交
2954
			goto cannot_emulate;
2955
		c->dst.val = (s32) c->src.val;
A
Avi Kivity 已提交
2956
		break;
2957 2958
	case 0x6c:		/* insb */
	case 0x6d:		/* insw/insd */
2959 2960
		c->src.val = c->regs[VCPU_REGS_RDX];
		goto do_io_in;
2961 2962
	case 0x6e:		/* outsb */
	case 0x6f:		/* outsw/outsd */
2963 2964
		c->dst.val = c->regs[VCPU_REGS_RDX];
		goto do_io_out;
2965
		break;
2966
	case 0x70 ... 0x7f: /* jcc (short) */
2967
		if (test_cc(c->b, ctxt->eflags))
2968
			jmp_rel(c, c->src.val);
2969
		break;
A
Avi Kivity 已提交
2970
	case 0x80 ... 0x83:	/* Grp1 */
2971
		switch (c->modrm_reg) {
A
Avi Kivity 已提交
2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990
		case 0:
			goto add;
		case 1:
			goto or;
		case 2:
			goto adc;
		case 3:
			goto sbb;
		case 4:
			goto and;
		case 5:
			goto sub;
		case 6:
			goto xor;
		case 7:
			goto cmp;
		}
		break;
	case 0x84 ... 0x85:
2991
	test:
2992
		emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
2993 2994
		break;
	case 0x86 ... 0x87:	/* xchg */
2995
	xchg:
A
Avi Kivity 已提交
2996
		/* Write back the register source. */
2997 2998
		c->src.val = c->dst.val;
		write_register_operand(&c->src);
A
Avi Kivity 已提交
2999 3000 3001 3002
		/*
		 * Write back the memory destination with implicit LOCK
		 * prefix.
		 */
3003
		c->dst.val = c->src.orig_val;
3004
		c->lock_prefix = 1;
A
Avi Kivity 已提交
3005 3006
		break;
	case 0x88 ... 0x8b:	/* mov */
3007
		goto mov;
3008 3009
	case 0x8c:  /* mov r/m, sreg */
		if (c->modrm_reg > VCPU_SREG_GS) {
3010
			emulate_ud(ctxt);
3011
			goto done;
3012
		}
3013
		c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
3014
		break;
N
Nitin A Kamble 已提交
3015
	case 0x8d: /* lea r16/r32, m */
3016
		c->dst.val = c->src.addr.mem;
N
Nitin A Kamble 已提交
3017
		break;
3018 3019 3020 3021
	case 0x8e: { /* mov seg, r/m16 */
		uint16_t sel;

		sel = c->src.val;
3022

3023 3024
		if (c->modrm_reg == VCPU_SREG_CS ||
		    c->modrm_reg > VCPU_SREG_GS) {
3025
			emulate_ud(ctxt);
3026 3027 3028
			goto done;
		}

3029
		if (c->modrm_reg == VCPU_SREG_SS)
3030
			ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3031

3032
		rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
3033 3034 3035 3036

		c->dst.type = OP_NONE;  /* Disable writeback. */
		break;
	}
A
Avi Kivity 已提交
3037
	case 0x8f:		/* pop (sole member of Grp1a) */
3038
		rc = emulate_grp1a(ctxt, ops);
3039
		if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
3040 3041
			goto done;
		break;
3042 3043
	case 0x90 ... 0x97: /* nop / xchg reg, rax */
		if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
3044
			break;
3045
		goto xchg;
3046 3047 3048 3049 3050 3051 3052
	case 0x98: /* cbw/cwde/cdqe */
		switch (c->op_bytes) {
		case 2: c->dst.val = (s8)c->dst.val; break;
		case 4: c->dst.val = (s16)c->dst.val; break;
		case 8: c->dst.val = (s32)c->dst.val; break;
		}
		break;
N
Nitin A Kamble 已提交
3053
	case 0x9c: /* pushf */
3054
		c->src.val =  (unsigned long) ctxt->eflags;
3055
		emulate_push(ctxt, ops);
3056
		break;
N
Nitin A Kamble 已提交
3057
	case 0x9d: /* popf */
A
Avi Kivity 已提交
3058
		c->dst.type = OP_REG;
3059
		c->dst.addr.reg = &ctxt->eflags;
A
Avi Kivity 已提交
3060
		c->dst.bytes = c->op_bytes;
3061 3062 3063 3064
		rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
		if (rc != X86EMUL_CONTINUE)
			goto done;
		break;
3065
	case 0xa0 ... 0xa3:	/* mov */
A
Avi Kivity 已提交
3066
	case 0xa4 ... 0xa5:	/* movs */
3067
		goto mov;
A
Avi Kivity 已提交
3068
	case 0xa6 ... 0xa7:	/* cmps */
3069
		c->dst.type = OP_NONE; /* Disable writeback. */
3070
		DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.addr.mem, c->dst.addr.mem);
3071
		goto cmp;
3072 3073
	case 0xa8 ... 0xa9:	/* test ax, imm */
		goto test;
A
Avi Kivity 已提交
3074 3075
	case 0xaa ... 0xab:	/* stos */
	case 0xac ... 0xad:	/* lods */
3076
		goto mov;
A
Avi Kivity 已提交
3077
	case 0xae ... 0xaf:	/* scas */
3078
		goto cmp;
3079
	case 0xb0 ... 0xbf: /* mov r, imm */
3080
		goto mov;
3081 3082 3083
	case 0xc0 ... 0xc1:
		emulate_grp2(ctxt);
		break;
3084
	case 0xc3: /* ret */
A
Avi Kivity 已提交
3085
		c->dst.type = OP_REG;
3086
		c->dst.addr.reg = &c->eip;
A
Avi Kivity 已提交
3087
		c->dst.bytes = c->op_bytes;
3088
		goto pop_instruction;
3089 3090 3091 3092
	case 0xc6 ... 0xc7:	/* mov (sole member of Grp11) */
	mov:
		c->dst.val = c->src.val;
		break;
3093 3094
	case 0xcb:		/* ret far */
		rc = emulate_ret_far(ctxt, ops);
3095 3096 3097
		if (rc != X86EMUL_CONTINUE)
			goto done;
		break;
3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113
	case 0xcc:		/* int3 */
		irq = 3;
		goto do_interrupt;
	case 0xcd:		/* int n */
		irq = c->src.val;
	do_interrupt:
		rc = emulate_int(ctxt, ops, irq);
		if (rc != X86EMUL_CONTINUE)
			goto done;
		break;
	case 0xce:		/* into */
		if (ctxt->eflags & EFLG_OF) {
			irq = 4;
			goto do_interrupt;
		}
		break;
3114 3115 3116
	case 0xcf:		/* iret */
		rc = emulate_iret(ctxt, ops);

3117
		if (rc != X86EMUL_CONTINUE)
3118 3119
			goto done;
		break;
3120 3121 3122 3123 3124 3125 3126
	case 0xd0 ... 0xd1:	/* Grp2 */
		emulate_grp2(ctxt);
		break;
	case 0xd2 ... 0xd3:	/* Grp2 */
		c->src.val = c->regs[VCPU_REGS_RCX];
		emulate_grp2(ctxt);
		break;
3127 3128 3129 3130 3131 3132
	case 0xe0 ... 0xe2:	/* loop/loopz/loopnz */
		register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
		if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 &&
		    (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
			jmp_rel(c, c->src.val);
		break;
3133 3134
	case 0xe4: 	/* inb */
	case 0xe5: 	/* in */
3135
		goto do_io_in;
3136 3137
	case 0xe6: /* outb */
	case 0xe7: /* out */
3138
		goto do_io_out;
3139
	case 0xe8: /* call (near) */ {
3140
		long int rel = c->src.val;
3141
		c->src.val = (unsigned long) c->eip;
3142
		jmp_rel(c, rel);
3143
		emulate_push(ctxt, ops);
3144
		break;
3145 3146
	}
	case 0xe9: /* jmp rel */
3147
		goto jmp;
3148 3149
	case 0xea: { /* jmp far */
		unsigned short sel;
3150
	jump_far:
3151 3152 3153
		memcpy(&sel, c->src.valptr + c->op_bytes, 2);

		if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
3154
			goto done;
3155

3156 3157
		c->eip = 0;
		memcpy(&c->eip, c->src.valptr, c->op_bytes);
3158
		break;
3159
	}
3160 3161
	case 0xeb:
	      jmp:		/* jmp rel short */
3162
		jmp_rel(c, c->src.val);
3163
		c->dst.type = OP_NONE; /* Disable writeback. */
3164
		break;
3165 3166
	case 0xec: /* in al,dx */
	case 0xed: /* in (e/r)ax,dx */
3167 3168 3169 3170
		c->src.val = c->regs[VCPU_REGS_RDX];
	do_io_in:
		c->dst.bytes = min(c->dst.bytes, 4u);
		if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
3171
			emulate_gp(ctxt, 0);
3172 3173
			goto done;
		}
3174 3175
		if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
				     &c->dst.val))
3176 3177
			goto done; /* IO is needed */
		break;
3178 3179
	case 0xee: /* out dx,al */
	case 0xef: /* out dx,(e/r)ax */
3180
		c->dst.val = c->regs[VCPU_REGS_RDX];
3181
	do_io_out:
3182 3183 3184
		c->src.bytes = min(c->src.bytes, 4u);
		if (!emulator_io_permited(ctxt, ops, c->dst.val,
					  c->src.bytes)) {
3185
			emulate_gp(ctxt, 0);
3186 3187
			goto done;
		}
3188 3189
		ops->pio_out_emulated(c->src.bytes, c->dst.val,
				      &c->src.val, 1, ctxt->vcpu);
3190
		c->dst.type = OP_NONE;	/* Disable writeback. */
3191
		break;
3192
	case 0xf4:              /* hlt */
3193
		ctxt->vcpu->arch.halt_request = 1;
3194
		break;
3195 3196 3197 3198
	case 0xf5:	/* cmc */
		/* complement carry flag from eflags reg */
		ctxt->eflags ^= EFLG_CF;
		break;
3199
	case 0xf6 ... 0xf7:	/* Grp3 */
3200
		if (emulate_grp3(ctxt, ops) != X86EMUL_CONTINUE)
3201
			goto cannot_emulate;
3202
		break;
3203 3204 3205
	case 0xf8: /* clc */
		ctxt->eflags &= ~EFLG_CF;
		break;
3206 3207 3208
	case 0xf9: /* stc */
		ctxt->eflags |= EFLG_CF;
		break;
3209
	case 0xfa: /* cli */
3210
		if (emulator_bad_iopl(ctxt, ops)) {
3211
			emulate_gp(ctxt, 0);
3212
			goto done;
3213
		} else
3214
			ctxt->eflags &= ~X86_EFLAGS_IF;
3215 3216
		break;
	case 0xfb: /* sti */
3217
		if (emulator_bad_iopl(ctxt, ops)) {
3218
			emulate_gp(ctxt, 0);
3219 3220
			goto done;
		} else {
3221
			ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3222 3223
			ctxt->eflags |= X86_EFLAGS_IF;
		}
3224
		break;
3225 3226 3227 3228 3229 3230
	case 0xfc: /* cld */
		ctxt->eflags &= ~EFLG_DF;
		break;
	case 0xfd: /* std */
		ctxt->eflags |= EFLG_DF;
		break;
3231 3232
	case 0xfe: /* Grp4 */
	grp45:
3233
		rc = emulate_grp45(ctxt, ops);
3234
		if (rc != X86EMUL_CONTINUE)
3235 3236
			goto done;
		break;
3237 3238 3239 3240
	case 0xff: /* Grp5 */
		if (c->modrm_reg == 5)
			goto jump_far;
		goto grp45;
3241 3242
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
3243
	}
3244 3245 3246

writeback:
	rc = writeback(ctxt, ops);
3247
	if (rc != X86EMUL_CONTINUE)
3248 3249
		goto done;

3250 3251 3252 3253 3254 3255
	/*
	 * restore dst type in case the decoding will be reused
	 * (happens for string instruction )
	 */
	c->dst.type = saved_dst_type;

3256
	if ((c->d & SrcMask) == SrcSI)
3257 3258
		string_addr_inc(ctxt, seg_override_base(ctxt, ops, c),
				VCPU_REGS_RSI, &c->src);
3259 3260

	if ((c->d & DstMask) == DstDI)
3261 3262
		string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI,
				&c->dst);
3263

3264
	if (c->rep_prefix && (c->d & String)) {
3265
		struct read_cache *rc = &ctxt->decode.io_read;
3266
		register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280
		/* The second termination condition only applies for REPE
		 * and REPNE. Test if the repeat string operation prefix is
		 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
		 * corresponding termination condition according to:
		 * 	- if REPE/REPZ and ZF = 0 then done
		 * 	- if REPNE/REPNZ and ZF = 1 then done
		 */
		if (((c->b == 0xa6) || (c->b == 0xa7) ||
		     (c->b == 0xae) || (c->b == 0xaf))
		    && (((c->rep_prefix == REPE_PREFIX) &&
			 ((ctxt->eflags & EFLG_ZF) == 0))
			|| ((c->rep_prefix == REPNE_PREFIX) &&
			    ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
			ctxt->restart = false;
3281 3282 3283 3284
		/*
		 * Re-enter guest when pio read ahead buffer is empty or,
		 * if it is not used, after each 1024 iteration.
		 */
3285 3286
		else if ((rc->end == 0 && !(c->regs[VCPU_REGS_RCX] & 0x3ff)) ||
			 (rc->end != 0 && rc->end == rc->pos)) {
3287
			ctxt->restart = false;
3288 3289
			c->eip = ctxt->eip;
		}
3290
	}
3291 3292 3293 3294 3295
	/*
	 * reset read cache here in case string instruction is restared
	 * without decoding
	 */
	ctxt->decode.mem_read.end = 0;
3296 3297
	if (!ctxt->restart)
		ctxt->eip = c->eip;
3298 3299

done:
3300
	return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
A
Avi Kivity 已提交
3301 3302

twobyte_insn:
3303
	switch (c->b) {
A
Avi Kivity 已提交
3304
	case 0x01: /* lgdt, lidt, lmsw */
3305
		switch (c->modrm_reg) {
A
Avi Kivity 已提交
3306 3307 3308
			u16 size;
			unsigned long address;

3309
		case 0: /* vmcall */
3310
			if (c->modrm_mod != 3 || c->modrm_rm != 1)
3311 3312
				goto cannot_emulate;

3313
			rc = kvm_fix_hypercall(ctxt->vcpu);
3314
			if (rc != X86EMUL_CONTINUE)
3315 3316
				goto done;

3317
			/* Let the processor re-execute the fixed hypercall */
3318
			c->eip = ctxt->eip;
3319 3320
			/* Disable writeback. */
			c->dst.type = OP_NONE;
3321
			break;
A
Avi Kivity 已提交
3322
		case 2: /* lgdt */
3323
			rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3324
					     &size, &address, c->op_bytes);
3325
			if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
3326 3327
				goto done;
			realmode_lgdt(ctxt->vcpu, size, address);
3328 3329
			/* Disable writeback. */
			c->dst.type = OP_NONE;
A
Avi Kivity 已提交
3330
			break;
3331
		case 3: /* lidt/vmmcall */
3332 3333 3334 3335
			if (c->modrm_mod == 3) {
				switch (c->modrm_rm) {
				case 1:
					rc = kvm_fix_hypercall(ctxt->vcpu);
3336
					if (rc != X86EMUL_CONTINUE)
3337 3338 3339 3340 3341
						goto done;
					break;
				default:
					goto cannot_emulate;
				}
3342
			} else {
3343
				rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3344
						     &size, &address,
3345
						     c->op_bytes);
3346
				if (rc != X86EMUL_CONTINUE)
3347 3348 3349
					goto done;
				realmode_lidt(ctxt->vcpu, size, address);
			}
3350 3351
			/* Disable writeback. */
			c->dst.type = OP_NONE;
A
Avi Kivity 已提交
3352 3353
			break;
		case 4: /* smsw */
3354
			c->dst.bytes = 2;
3355
			c->dst.val = ops->get_cr(0, ctxt->vcpu);
A
Avi Kivity 已提交
3356 3357
			break;
		case 6: /* lmsw */
3358
			ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0eul) |
3359
				    (c->src.val & 0x0f), ctxt->vcpu);
3360
			c->dst.type = OP_NONE;
A
Avi Kivity 已提交
3361
			break;
3362
		case 5: /* not defined */
3363
			emulate_ud(ctxt);
3364
			goto done;
A
Avi Kivity 已提交
3365
		case 7: /* invlpg*/
3366
			emulate_invlpg(ctxt->vcpu, c->src.addr.mem);
3367 3368
			/* Disable writeback. */
			c->dst.type = OP_NONE;
A
Avi Kivity 已提交
3369 3370 3371 3372 3373
			break;
		default:
			goto cannot_emulate;
		}
		break;
3374
	case 0x05: 		/* syscall */
3375
		rc = emulate_syscall(ctxt, ops);
3376 3377
		if (rc != X86EMUL_CONTINUE)
			goto done;
3378 3379
		else
			goto writeback;
3380
		break;
3381 3382 3383 3384
	case 0x06:
		emulate_clts(ctxt->vcpu);
		break;
	case 0x09:		/* wbinvd */
3385 3386 3387
		kvm_emulate_wbinvd(ctxt->vcpu);
		break;
	case 0x08:		/* invd */
3388 3389 3390 3391
	case 0x0d:		/* GrpP (prefetch) */
	case 0x18:		/* Grp16 (prefetch/nop) */
		break;
	case 0x20: /* mov cr, reg */
3392 3393 3394 3395
		switch (c->modrm_reg) {
		case 1:
		case 5 ... 7:
		case 9 ... 15:
3396
			emulate_ud(ctxt);
3397 3398
			goto done;
		}
3399
		c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
3400
		break;
A
Avi Kivity 已提交
3401
	case 0x21: /* mov from dr to reg */
3402 3403
		if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
		    (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3404
			emulate_ud(ctxt);
3405 3406
			goto done;
		}
3407
		ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu);
A
Avi Kivity 已提交
3408
		break;
3409
	case 0x22: /* mov reg, cr */
3410
		if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) {
3411
			emulate_gp(ctxt, 0);
3412 3413
			goto done;
		}
3414 3415
		c->dst.type = OP_NONE;
		break;
A
Avi Kivity 已提交
3416
	case 0x23: /* mov from reg to dr */
3417 3418
		if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
		    (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3419
			emulate_ud(ctxt);
3420 3421
			goto done;
		}
3422

3423
		if (ops->set_dr(c->modrm_reg, c->src.val &
3424 3425 3426
				((ctxt->mode == X86EMUL_MODE_PROT64) ?
				 ~0ULL : ~0U), ctxt->vcpu) < 0) {
			/* #UD condition is already handled by the code above */
3427
			emulate_gp(ctxt, 0);
3428 3429 3430
			goto done;
		}

3431
		c->dst.type = OP_NONE;	/* no writeback */
A
Avi Kivity 已提交
3432
		break;
3433 3434 3435 3436
	case 0x30:
		/* wrmsr */
		msr_data = (u32)c->regs[VCPU_REGS_RAX]
			| ((u64)c->regs[VCPU_REGS_RDX] << 32);
3437
		if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
3438
			emulate_gp(ctxt, 0);
3439
			goto done;
3440 3441 3442 3443 3444
		}
		rc = X86EMUL_CONTINUE;
		break;
	case 0x32:
		/* rdmsr */
3445
		if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
3446
			emulate_gp(ctxt, 0);
3447
			goto done;
3448 3449 3450 3451 3452 3453
		} else {
			c->regs[VCPU_REGS_RAX] = (u32)msr_data;
			c->regs[VCPU_REGS_RDX] = msr_data >> 32;
		}
		rc = X86EMUL_CONTINUE;
		break;
3454
	case 0x34:		/* sysenter */
3455
		rc = emulate_sysenter(ctxt, ops);
3456 3457
		if (rc != X86EMUL_CONTINUE)
			goto done;
3458 3459
		else
			goto writeback;
3460 3461
		break;
	case 0x35:		/* sysexit */
3462
		rc = emulate_sysexit(ctxt, ops);
3463 3464
		if (rc != X86EMUL_CONTINUE)
			goto done;
3465 3466
		else
			goto writeback;
3467
		break;
A
Avi Kivity 已提交
3468
	case 0x40 ... 0x4f:	/* cmov */
3469
		c->dst.val = c->dst.orig_val = c->src.val;
3470 3471
		if (!test_cc(c->b, ctxt->eflags))
			c->dst.type = OP_NONE; /* no writeback */
A
Avi Kivity 已提交
3472
		break;
3473
	case 0x80 ... 0x8f: /* jnz rel, etc*/
3474
		if (test_cc(c->b, ctxt->eflags))
3475
			jmp_rel(c, c->src.val);
3476
		break;
3477 3478 3479
	case 0x90 ... 0x9f:     /* setcc r/m8 */
		c->dst.val = test_cc(c->b, ctxt->eflags);
		break;
3480
	case 0xa0:	  /* push fs */
3481
		emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
3482 3483 3484
		break;
	case 0xa1:	 /* pop fs */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
3485
		if (rc != X86EMUL_CONTINUE)
3486 3487
			goto done;
		break;
3488 3489
	case 0xa3:
	      bt:		/* bt */
Q
Qing He 已提交
3490
		c->dst.type = OP_NONE;
3491 3492
		/* only subword offset */
		c->src.val &= (c->dst.bytes << 3) - 1;
3493
		emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
3494
		break;
3495 3496 3497 3498
	case 0xa4: /* shld imm8, r, r/m */
	case 0xa5: /* shld cl, r, r/m */
		emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
		break;
3499
	case 0xa8:	/* push gs */
3500
		emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
3501 3502 3503
		break;
	case 0xa9:	/* pop gs */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
3504
		if (rc != X86EMUL_CONTINUE)
3505 3506
			goto done;
		break;
3507 3508
	case 0xab:
	      bts:		/* bts */
3509
		emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
3510
		break;
3511 3512 3513 3514
	case 0xac: /* shrd imm8, r, r/m */
	case 0xad: /* shrd cl, r, r/m */
		emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
		break;
3515 3516
	case 0xae:              /* clflush */
		break;
A
Avi Kivity 已提交
3517 3518 3519 3520 3521
	case 0xb0 ... 0xb1:	/* cmpxchg */
		/*
		 * Save real source value, then compare EAX against
		 * destination.
		 */
3522 3523
		c->src.orig_val = c->src.val;
		c->src.val = c->regs[VCPU_REGS_RAX];
3524 3525
		emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
		if (ctxt->eflags & EFLG_ZF) {
A
Avi Kivity 已提交
3526
			/* Success: write back to memory. */
3527
			c->dst.val = c->src.orig_val;
A
Avi Kivity 已提交
3528 3529
		} else {
			/* Failure: write the value we saw to EAX. */
3530
			c->dst.type = OP_REG;
3531
			c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
A
Avi Kivity 已提交
3532 3533 3534 3535
		}
		break;
	case 0xb3:
	      btr:		/* btr */
3536
		emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3537 3538
		break;
	case 0xb6 ... 0xb7:	/* movzx */
3539 3540 3541
		c->dst.bytes = c->op_bytes;
		c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
						       : (u16) c->src.val;
A
Avi Kivity 已提交
3542 3543
		break;
	case 0xba:		/* Grp8 */
3544
		switch (c->modrm_reg & 3) {
A
Avi Kivity 已提交
3545 3546 3547 3548 3549 3550 3551 3552 3553 3554
		case 0:
			goto bt;
		case 1:
			goto bts;
		case 2:
			goto btr;
		case 3:
			goto btc;
		}
		break;
3555 3556
	case 0xbb:
	      btc:		/* btc */
3557
		emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
3558
		break;
3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582
	case 0xbc: {		/* bsf */
		u8 zf;
		__asm__ ("bsf %2, %0; setz %1"
			 : "=r"(c->dst.val), "=q"(zf)
			 : "r"(c->src.val));
		ctxt->eflags &= ~X86_EFLAGS_ZF;
		if (zf) {
			ctxt->eflags |= X86_EFLAGS_ZF;
			c->dst.type = OP_NONE;	/* Disable writeback. */
		}
		break;
	}
	case 0xbd: {		/* bsr */
		u8 zf;
		__asm__ ("bsr %2, %0; setz %1"
			 : "=r"(c->dst.val), "=q"(zf)
			 : "r"(c->src.val));
		ctxt->eflags &= ~X86_EFLAGS_ZF;
		if (zf) {
			ctxt->eflags |= X86_EFLAGS_ZF;
			c->dst.type = OP_NONE;	/* Disable writeback. */
		}
		break;
	}
A
Avi Kivity 已提交
3583
	case 0xbe ... 0xbf:	/* movsx */
3584 3585 3586
		c->dst.bytes = c->op_bytes;
		c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
							(s16) c->src.val;
A
Avi Kivity 已提交
3587
		break;
3588 3589 3590 3591 3592 3593
	case 0xc0 ... 0xc1:	/* xadd */
		emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
		/* Write back the register source. */
		c->src.val = c->dst.orig_val;
		write_register_operand(&c->src);
		break;
3594
	case 0xc3:		/* movnti */
3595 3596 3597
		c->dst.bytes = c->op_bytes;
		c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
							(u64) c->src.val;
3598
		break;
A
Avi Kivity 已提交
3599
	case 0xc7:		/* Grp9 (cmpxchg8b) */
3600
		rc = emulate_grp9(ctxt, ops);
3601
		if (rc != X86EMUL_CONTINUE)
3602 3603
			goto done;
		break;
3604 3605
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
3606 3607 3608 3609
	}
	goto writeback;

cannot_emulate:
3610
	DPRINTF("Cannot emulate %02x\n", c->b);
A
Avi Kivity 已提交
3611 3612
	return -1;
}