emulate.c 100.7 KB
Newer Older
A
Avi Kivity 已提交
1
/******************************************************************************
2
 * emulate.c
A
Avi Kivity 已提交
3 4 5 6 7 8
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9
 * privileged instructions:
A
Avi Kivity 已提交
10 11
 *
 * Copyright (C) 2006 Qumranet
N
Nicolas Kaiser 已提交
12
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
13 14 15 16 17 18 19 20 21 22
 *
 *   Avi Kivity <avi@qumranet.com>
 *   Yaniv Kamay <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

23
#include <linux/kvm_host.h>
24
#include "kvm_cache_regs.h"
A
Avi Kivity 已提交
25
#include <linux/module.h>
26
#include <asm/kvm_emulate.h>
A
Avi Kivity 已提交
27

28
#include "x86.h"
29
#include "tss.h"
30

A
Avi Kivity 已提交
31 32 33 34 35 36 37 38 39 40
/*
 * Opcode effective-address decode tables.
 * Note that we only emulate instructions that have at least one memory
 * operand (excluding implicit stack references). We assume that stack
 * references and instruction fetches will never occur in special memory
 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
 * not be handled.
 */

/* Operand sizes: 8-bit operands or specified/overridden size. */
41
#define ByteOp      (1<<0)	/* 8-bit operands. */
A
Avi Kivity 已提交
42
/* Destination operand type. */
43 44 45 46 47 48
#define ImplicitOps (1<<1)	/* Implicit in opcode. No generic decode. */
#define DstReg      (2<<1)	/* Register operand. */
#define DstMem      (3<<1)	/* Memory operand. */
#define DstAcc      (4<<1)	/* Destination Accumulator */
#define DstDI       (5<<1)	/* Destination is in ES:(E)DI */
#define DstMem64    (6<<1)	/* 64bit memory operand */
49
#define DstImmUByte (7<<1)	/* 8-bit unsigned immediate operand */
50
#define DstMask     (7<<1)
A
Avi Kivity 已提交
51
/* Source operand type. */
52 53 54 55 56 57 58
#define SrcNone     (0<<4)	/* No source operand. */
#define SrcReg      (1<<4)	/* Register operand. */
#define SrcMem      (2<<4)	/* Memory operand. */
#define SrcMem16    (3<<4)	/* Memory operand (16-bit). */
#define SrcMem32    (4<<4)	/* Memory operand (32-bit). */
#define SrcImm      (5<<4)	/* Immediate operand. */
#define SrcImmByte  (6<<4)	/* 8-bit sign-extended immediate operand. */
59
#define SrcOne      (7<<4)	/* Implied '1' */
60
#define SrcImmUByte (8<<4)      /* 8-bit unsigned immediate operand. */
61
#define SrcImmU     (9<<4)      /* Immediate operand, unsigned */
62
#define SrcSI       (0xa<<4)	/* Source is in the DS:RSI */
63 64
#define SrcImmFAddr (0xb<<4)	/* Source is immediate far address */
#define SrcMemFAddr (0xc<<4)	/* Source is far address in memory */
65
#define SrcAcc      (0xd<<4)	/* Source Accumulator */
66
#define SrcImmU16   (0xe<<4)    /* Immediate operand, unsigned, 16 bits */
67
#define SrcMask     (0xf<<4)
A
Avi Kivity 已提交
68
/* Generic ModRM decode. */
69
#define ModRM       (1<<8)
A
Avi Kivity 已提交
70
/* Destination is only written; never read. */
71 72 73
#define Mov         (1<<9)
#define BitOp       (1<<10)
#define MemAbs      (1<<11)      /* Memory operand is absolute displacement */
74 75
#define String      (1<<12)     /* String instruction (rep capable) */
#define Stack       (1<<13)     /* Stack instruction (push/pop) */
76 77
#define Group       (1<<14)     /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual   (1<<15)     /* Alternate decoding of mod == 3 */
78
#define Prefix      (1<<16)     /* Instruction varies with 66/f2/f3 prefix */
A
Avi Kivity 已提交
79
#define Sse         (1<<17)     /* SSE Vector instruction */
80
/* Misc flags */
81
#define VendorSpecific (1<<22) /* Vendor specific instruction */
82
#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
83
#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
84
#define Undefined   (1<<25) /* No Such Instruction */
85
#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
86
#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
87
#define No64	    (1<<28)
88 89 90 91 92
/* Source 2 operand type */
#define Src2None    (0<<29)
#define Src2CL      (1<<29)
#define Src2ImmByte (2<<29)
#define Src2One     (3<<29)
93
#define Src2Imm     (4<<29)
94
#define Src2Mask    (7<<29)
A
Avi Kivity 已提交
95

96 97 98 99 100 101 102 103
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
104

105 106
struct opcode {
	u32 flags;
107
	union {
108
		int (*execute)(struct x86_emulate_ctxt *ctxt);
109 110
		struct opcode *group;
		struct group_dual *gdual;
111
		struct gprefix *gprefix;
112 113 114 115 116 117
	} u;
};

struct group_dual {
	struct opcode mod012[8];
	struct opcode mod3[8];
118 119
};

120 121 122 123 124 125 126
struct gprefix {
	struct opcode pfx_no;
	struct opcode pfx_66;
	struct opcode pfx_f2;
	struct opcode pfx_f3;
};

A
Avi Kivity 已提交
127
/* EFLAGS bit definitions. */
128 129 130 131
#define EFLG_ID (1<<21)
#define EFLG_VIP (1<<20)
#define EFLG_VIF (1<<19)
#define EFLG_AC (1<<18)
132 133
#define EFLG_VM (1<<17)
#define EFLG_RF (1<<16)
134 135
#define EFLG_IOPL (3<<12)
#define EFLG_NT (1<<14)
A
Avi Kivity 已提交
136 137
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
138
#define EFLG_IF (1<<9)
139
#define EFLG_TF (1<<8)
A
Avi Kivity 已提交
140 141 142 143 144 145
#define EFLG_SF (1<<7)
#define EFLG_ZF (1<<6)
#define EFLG_AF (1<<4)
#define EFLG_PF (1<<2)
#define EFLG_CF (1<<0)

146 147 148
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
#define EFLG_RESERVED_ONE_MASK 2

A
Avi Kivity 已提交
149 150 151 152 153 154 155
/*
 * Instruction emulation:
 * Most instructions are emulated directly via a fragment of inline assembly
 * code. This allows us to save/restore EFLAGS and thus very easily pick up
 * any modified flags.
 */

156
#if defined(CONFIG_X86_64)
A
Avi Kivity 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169 170
#define _LO32 "k"		/* force 32-bit operand */
#define _STK  "%%rsp"		/* stack pointer */
#elif defined(__i386__)
#define _LO32 ""		/* force 32-bit operand */
#define _STK  "%%esp"		/* stack pointer */
#endif

/*
 * These EFLAGS bits are restored from saved value during emulation, and
 * any changes are written back to the saved value after emulation.
 */
#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)

/* Before executing instruction: restore necessary bits in EFLAGS. */
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
#define _PRE_EFLAGS(_sav, _msk, _tmp)					\
	/* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
	"movl %"_sav",%"_LO32 _tmp"; "                                  \
	"push %"_tmp"; "                                                \
	"push %"_tmp"; "                                                \
	"movl %"_msk",%"_LO32 _tmp"; "                                  \
	"andl %"_LO32 _tmp",("_STK"); "                                 \
	"pushf; "                                                       \
	"notl %"_LO32 _tmp"; "                                          \
	"andl %"_LO32 _tmp",("_STK"); "                                 \
	"andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); "	\
	"pop  %"_tmp"; "                                                \
	"orl  %"_LO32 _tmp",("_STK"); "                                 \
	"popf; "                                                        \
	"pop  %"_sav"; "
A
Avi Kivity 已提交
186 187 188 189 190 191 192 193 194

/* After executing instruction: write-back necessary bits in EFLAGS. */
#define _POST_EFLAGS(_sav, _msk, _tmp) \
	/* _sav |= EFLAGS & _msk; */		\
	"pushf; "				\
	"pop  %"_tmp"; "			\
	"andl %"_msk",%"_LO32 _tmp"; "		\
	"orl  %"_LO32 _tmp",%"_sav"; "

195 196 197 198 199 200
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif

201
#define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
202 203 204 205 206
	do {								\
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "4", "2")			\
			_op _suffix " %"_x"3,%1; "			\
			_POST_EFLAGS("0", "4", "2")			\
207
			: "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
208 209
			  "=&r" (_tmp)					\
			: _y ((_src).val), "i" (EFLAGS_MASK));		\
210
	} while (0)
211 212


A
Avi Kivity 已提交
213 214
/* Raw emulation: instruction has two explicit operands. */
#define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
215 216 217 218 219
	do {								\
		unsigned long _tmp;					\
									\
		switch ((_dst).bytes) {					\
		case 2:							\
220
			____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
221 222
			break;						\
		case 4:							\
223
			____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
224 225
			break;						\
		case 8:							\
226
			ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
227 228
			break;						\
		}							\
A
Avi Kivity 已提交
229 230 231 232
	} while (0)

#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
	do {								     \
233
		unsigned long _tmp;					     \
M
Mike Day 已提交
234
		switch ((_dst).bytes) {				             \
A
Avi Kivity 已提交
235
		case 1:							     \
236
			____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
A
Avi Kivity 已提交
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
			break;						     \
		default:						     \
			__emulate_2op_nobyte(_op, _src, _dst, _eflags,	     \
					     _wx, _wy, _lx, _ly, _qx, _qy);  \
			break;						     \
		}							     \
	} while (0)

/* Source operand is byte-sized and may be restricted to just %cl. */
#define emulate_2op_SrcB(_op, _src, _dst, _eflags)                      \
	__emulate_2op(_op, _src, _dst, _eflags,				\
		      "b", "c", "b", "c", "b", "c", "b", "c")

/* Source operand is byte, word, long or quad sized. */
#define emulate_2op_SrcV(_op, _src, _dst, _eflags)                      \
	__emulate_2op(_op, _src, _dst, _eflags,				\
		      "b", "q", "w", "r", _LO32, "r", "", "r")

/* Source operand is word, long or quad sized. */
#define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags)               \
	__emulate_2op_nobyte(_op, _src, _dst, _eflags,			\
			     "w", "r", _LO32, "r", "", "r")

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
/* Instruction has three operands and one operand is stored in ECX register */
#define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) 	\
	do {									\
		unsigned long _tmp;						\
		_type _clv  = (_cl).val;  					\
		_type _srcv = (_src).val;    					\
		_type _dstv = (_dst).val;					\
										\
		__asm__ __volatile__ (						\
			_PRE_EFLAGS("0", "5", "2")				\
			_op _suffix " %4,%1 \n"					\
			_POST_EFLAGS("0", "5", "2")				\
			: "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp)		\
			: "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK)		\
			); 							\
										\
		(_cl).val  = (unsigned long) _clv;				\
		(_src).val = (unsigned long) _srcv;				\
		(_dst).val = (unsigned long) _dstv;				\
	} while (0)

#define emulate_2op_cl(_op, _cl, _src, _dst, _eflags)				\
	do {									\
		switch ((_dst).bytes) {						\
		case 2:								\
			__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,  	\
						"w", unsigned short);         	\
			break;							\
		case 4: 							\
			__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,  	\
						"l", unsigned int);           	\
			break;							\
		case 8:								\
			ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,	\
						"q", unsigned long));  		\
			break;							\
		}								\
	} while (0)

299
#define __emulate_1op(_op, _dst, _eflags, _suffix)			\
A
Avi Kivity 已提交
300 301 302
	do {								\
		unsigned long _tmp;					\
									\
303 304 305 306 307 308 309 310 311 312 313 314
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "3", "2")			\
			_op _suffix " %1; "				\
			_POST_EFLAGS("0", "3", "2")			\
			: "=m" (_eflags), "+m" ((_dst).val),		\
			  "=&r" (_tmp)					\
			: "i" (EFLAGS_MASK));				\
	} while (0)

/* Instruction has only one explicit operand (no source operand). */
#define emulate_1op(_op, _dst, _eflags)                                    \
	do {								\
M
Mike Day 已提交
315
		switch ((_dst).bytes) {				        \
316 317 318 319
		case 1:	__emulate_1op(_op, _dst, _eflags, "b"); break;	\
		case 2:	__emulate_1op(_op, _dst, _eflags, "w"); break;	\
		case 4:	__emulate_1op(_op, _dst, _eflags, "l"); break;	\
		case 8:	ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
A
Avi Kivity 已提交
320 321 322
		}							\
	} while (0)

323 324 325 326 327 328 329 330 331 332 333 334 335 336
#define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix)		\
	do {								\
		unsigned long _tmp;					\
									\
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "4", "1")			\
			_op _suffix " %5; "				\
			_POST_EFLAGS("0", "4", "1")			\
			: "=m" (_eflags), "=&r" (_tmp),			\
			  "+a" (_rax), "+d" (_rdx)			\
			: "i" (EFLAGS_MASK), "m" ((_src).val),		\
			  "a" (_rax), "d" (_rdx));			\
	} while (0)

337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
#define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
	do {								\
		unsigned long _tmp;					\
									\
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "5", "1")			\
			"1: \n\t"					\
			_op _suffix " %6; "				\
			"2: \n\t"					\
			_POST_EFLAGS("0", "5", "1")			\
			".pushsection .fixup,\"ax\" \n\t"		\
			"3: movb $1, %4 \n\t"				\
			"jmp 2b \n\t"					\
			".popsection \n\t"				\
			_ASM_EXTABLE(1b, 3b)				\
			: "=m" (_eflags), "=&r" (_tmp),			\
			  "+a" (_rax), "+d" (_rdx), "+qm"(_ex)		\
			: "i" (EFLAGS_MASK), "m" ((_src).val),		\
			  "a" (_rax), "d" (_rdx));			\
	} while (0)

358 359 360 361 362 363 364 365 366 367 368
/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
#define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags)			\
	do {									\
		switch((_src).bytes) {						\
		case 1: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "b"); break; \
		case 2: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx,  _eflags, "w"); break; \
		case 4: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "l"); break; \
		case 8: ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "q")); break; \
		}							\
	} while (0)

369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
#define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex)	\
	do {								\
		switch((_src).bytes) {					\
		case 1:							\
			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx,	\
						 _eflags, "b", _ex);	\
			break;						\
		case 2:							\
			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
						 _eflags, "w", _ex);	\
			break;						\
		case 4:							\
			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
						 _eflags, "l", _ex);	\
			break;						\
		case 8: ON64(						\
			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
						 _eflags, "q", _ex));	\
			break;						\
		}							\
	} while (0)

A
Avi Kivity 已提交
391 392 393
/* Fetch next part of the instruction being emulated. */
#define insn_fetch(_type, _size, _eip)                                  \
({	unsigned long _x;						\
394
	rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size));		\
395
	if (rc != X86EMUL_CONTINUE)					\
A
Avi Kivity 已提交
396 397 398 399 400
		goto done;						\
	(_eip) += (_size);						\
	(_type)_x;							\
})

401 402 403 404 405 406 407
#define insn_fetch_arr(_arr, _size, _eip)                                \
({	rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size));		\
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
	(_eip) += (_size);						\
})

408 409 410 411 412
static inline unsigned long ad_mask(struct decode_cache *c)
{
	return (1UL << (c->ad_bytes << 3)) - 1;
}

A
Avi Kivity 已提交
413
/* Access/update address held in a register, based on addressing mode. */
414 415 416 417 418 419 420 421 422 423
static inline unsigned long
address_mask(struct decode_cache *c, unsigned long reg)
{
	if (c->ad_bytes == sizeof(unsigned long))
		return reg;
	else
		return reg & ad_mask(c);
}

static inline unsigned long
424
register_address(struct decode_cache *c, unsigned long reg)
425
{
426
	return address_mask(c, reg);
427 428
}

429 430 431 432 433 434 435 436
static inline void
register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
{
	if (c->ad_bytes == sizeof(unsigned long))
		*reg += inc;
	else
		*reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
}
A
Avi Kivity 已提交
437

438 439 440 441
static inline void jmp_rel(struct decode_cache *c, int rel)
{
	register_address_increment(c, &c->eip, rel);
}
442

443 444 445 446 447 448
static void set_seg_override(struct decode_cache *c, int seg)
{
	c->has_seg_override = true;
	c->seg_override = seg;
}

449 450
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
			      struct x86_emulate_ops *ops, int seg)
451 452 453 454
{
	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
		return 0;

455
	return ops->get_cached_segment_base(seg, ctxt->vcpu);
456 457
}

458 459 460
static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
			     struct x86_emulate_ops *ops,
			     struct decode_cache *c)
461 462 463 464
{
	if (!c->has_seg_override)
		return 0;

465
	return c->seg_override;
466 467
}

468 469
static ulong linear(struct x86_emulate_ctxt *ctxt,
		    struct segmented_address addr)
470
{
471 472
	struct decode_cache *c = &ctxt->decode;
	ulong la;
473

474 475 476 477
	la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
	if (c->ad_bytes != 8)
		la &= (u32)-1;
	return la;
478 479
}

480 481
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
			     u32 error, bool valid)
482
{
483 484 485
	ctxt->exception.vector = vec;
	ctxt->exception.error_code = error;
	ctxt->exception.error_code_valid = valid;
486
	return X86EMUL_PROPAGATE_FAULT;
487 488
}

489
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
490
{
491
	return emulate_exception(ctxt, GP_VECTOR, err, true);
492 493
}

494
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
495
{
496
	return emulate_exception(ctxt, UD_VECTOR, 0, false);
497 498
}

499
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
500
{
501
	return emulate_exception(ctxt, TS_VECTOR, err, true);
502 503
}

504 505
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
506
	return emulate_exception(ctxt, DE_VECTOR, 0, false);
507 508
}

A
Avi Kivity 已提交
509 510 511 512 513
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, NM_VECTOR, 0, false);
}

514 515
static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
			      struct x86_emulate_ops *ops,
516
			      unsigned long eip, u8 *dest)
517 518 519
{
	struct fetch_cache *fc = &ctxt->decode.fetch;
	int rc;
520
	int size, cur_size;
521

522 523 524 525
	if (eip == fc->end) {
		cur_size = fc->end - fc->start;
		size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
		rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
526
				size, ctxt->vcpu, &ctxt->exception);
527
		if (rc != X86EMUL_CONTINUE)
528
			return rc;
529
		fc->end += size;
530
	}
531
	*dest = fc->data[eip - fc->start];
532
	return X86EMUL_CONTINUE;
533 534 535 536 537 538
}

static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
			 struct x86_emulate_ops *ops,
			 unsigned long eip, void *dest, unsigned size)
{
539
	int rc;
540

541
	/* x86 instructions are limited to 15 bytes. */
542
	if (eip + size - ctxt->eip > 15)
543
		return X86EMUL_UNHANDLEABLE;
544 545
	while (size--) {
		rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
546
		if (rc != X86EMUL_CONTINUE)
547 548
			return rc;
	}
549
	return X86EMUL_CONTINUE;
550 551
}

552 553 554 555 556 557 558
/*
 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 * pointer into the block that addresses the relevant register.
 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 */
static void *decode_register(u8 modrm_reg, unsigned long *regs,
			     int highbyte_regs)
A
Avi Kivity 已提交
559 560 561 562 563 564 565 566 567 568 569
{
	void *p;

	p = &regs[modrm_reg];
	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
		p = (unsigned char *)&regs[modrm_reg & 3] + 1;
	return p;
}

static int read_descriptor(struct x86_emulate_ctxt *ctxt,
			   struct x86_emulate_ops *ops,
570
			   struct segmented_address addr,
A
Avi Kivity 已提交
571 572 573 574 575 576 577
			   u16 *size, unsigned long *address, int op_bytes)
{
	int rc;

	if (op_bytes == 2)
		op_bytes = 3;
	*address = 0;
578
	rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2,
579
			   ctxt->vcpu, &ctxt->exception);
580
	if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
581
		return rc;
582 583
	addr.ea += 2;
	rc = ops->read_std(linear(ctxt, addr), address, op_bytes,
584
			   ctxt->vcpu, &ctxt->exception);
A
Avi Kivity 已提交
585 586 587
	return rc;
}

588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
static int test_cc(unsigned int condition, unsigned int flags)
{
	int rc = 0;

	switch ((condition & 15) >> 1) {
	case 0: /* o */
		rc |= (flags & EFLG_OF);
		break;
	case 1: /* b/c/nae */
		rc |= (flags & EFLG_CF);
		break;
	case 2: /* z/e */
		rc |= (flags & EFLG_ZF);
		break;
	case 3: /* be/na */
		rc |= (flags & (EFLG_CF|EFLG_ZF));
		break;
	case 4: /* s */
		rc |= (flags & EFLG_SF);
		break;
	case 5: /* p/pe */
		rc |= (flags & EFLG_PF);
		break;
	case 7: /* le/ng */
		rc |= (flags & EFLG_ZF);
		/* fall through */
	case 6: /* l/nge */
		rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
		break;
	}

	/* Odd condition identifiers (lsb == 1) have inverted sense. */
	return (!!rc ^ (condition & 1));
}

623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
static void fetch_register_operand(struct operand *op)
{
	switch (op->bytes) {
	case 1:
		op->val = *(u8 *)op->addr.reg;
		break;
	case 2:
		op->val = *(u16 *)op->addr.reg;
		break;
	case 4:
		op->val = *(u32 *)op->addr.reg;
		break;
	case 8:
		op->val = *(u64 *)op->addr.reg;
		break;
	}
}

A
Avi Kivity 已提交
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
	case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
	case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
	case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
	case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
	case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
	case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
	case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
#ifdef CONFIG_X86_64
	case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
	case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
	case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
	case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
	case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
	case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
	case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
	case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
			  int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
	case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
	case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
	case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
	case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
	case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
	case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
	case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
#ifdef CONFIG_X86_64
	case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
	case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
	case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
	case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
	case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
	case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
	case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
	case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
				    struct operand *op,
698 699 700
				    struct decode_cache *c,
				    int inhibit_bytereg)
{
701
	unsigned reg = c->modrm_reg;
702
	int highbyte_regs = c->rex_prefix == 0;
703 704 705

	if (!(c->d & ModRM))
		reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
A
Avi Kivity 已提交
706 707 708 709 710 711 712 713 714

	if (c->d & Sse) {
		op->type = OP_XMM;
		op->bytes = 16;
		op->addr.xmm = reg;
		read_sse_reg(ctxt, &op->vec_val, reg);
		return;
	}

715 716
	op->type = OP_REG;
	if ((c->d & ByteOp) && !inhibit_bytereg) {
717
		op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
718 719
		op->bytes = 1;
	} else {
720
		op->addr.reg = decode_register(reg, c->regs, 0);
721 722
		op->bytes = c->op_bytes;
	}
723
	fetch_register_operand(op);
724 725 726
	op->orig_val = op->val;
}

727
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
728 729
			struct x86_emulate_ops *ops,
			struct operand *op)
730 731 732
{
	struct decode_cache *c = &ctxt->decode;
	u8 sib;
733
	int index_reg = 0, base_reg = 0, scale;
734
	int rc = X86EMUL_CONTINUE;
735
	ulong modrm_ea = 0;
736 737 738 739 740 741 742 743 744 745 746

	if (c->rex_prefix) {
		c->modrm_reg = (c->rex_prefix & 4) << 1;	/* REX.R */
		index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
		c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
	}

	c->modrm = insn_fetch(u8, 1, c->eip);
	c->modrm_mod |= (c->modrm & 0xc0) >> 6;
	c->modrm_reg |= (c->modrm & 0x38) >> 3;
	c->modrm_rm |= (c->modrm & 0x07);
747
	c->modrm_seg = VCPU_SREG_DS;
748 749

	if (c->modrm_mod == 3) {
750 751 752
		op->type = OP_REG;
		op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
		op->addr.reg = decode_register(c->modrm_rm,
753
					       c->regs, c->d & ByteOp);
A
Avi Kivity 已提交
754 755 756 757 758 759 760
		if (c->d & Sse) {
			op->type = OP_XMM;
			op->bytes = 16;
			op->addr.xmm = c->modrm_rm;
			read_sse_reg(ctxt, &op->vec_val, c->modrm_rm);
			return rc;
		}
761
		fetch_register_operand(op);
762 763 764
		return rc;
	}

765 766
	op->type = OP_MEM;

767 768 769 770 771 772 773 774 775 776
	if (c->ad_bytes == 2) {
		unsigned bx = c->regs[VCPU_REGS_RBX];
		unsigned bp = c->regs[VCPU_REGS_RBP];
		unsigned si = c->regs[VCPU_REGS_RSI];
		unsigned di = c->regs[VCPU_REGS_RDI];

		/* 16-bit ModR/M decode. */
		switch (c->modrm_mod) {
		case 0:
			if (c->modrm_rm == 6)
777
				modrm_ea += insn_fetch(u16, 2, c->eip);
778 779
			break;
		case 1:
780
			modrm_ea += insn_fetch(s8, 1, c->eip);
781 782
			break;
		case 2:
783
			modrm_ea += insn_fetch(u16, 2, c->eip);
784 785 786 787
			break;
		}
		switch (c->modrm_rm) {
		case 0:
788
			modrm_ea += bx + si;
789 790
			break;
		case 1:
791
			modrm_ea += bx + di;
792 793
			break;
		case 2:
794
			modrm_ea += bp + si;
795 796
			break;
		case 3:
797
			modrm_ea += bp + di;
798 799
			break;
		case 4:
800
			modrm_ea += si;
801 802
			break;
		case 5:
803
			modrm_ea += di;
804 805 806
			break;
		case 6:
			if (c->modrm_mod != 0)
807
				modrm_ea += bp;
808 809
			break;
		case 7:
810
			modrm_ea += bx;
811 812 813 814
			break;
		}
		if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
		    (c->modrm_rm == 6 && c->modrm_mod != 0))
815
			c->modrm_seg = VCPU_SREG_SS;
816
		modrm_ea = (u16)modrm_ea;
817 818
	} else {
		/* 32/64-bit ModR/M decode. */
819
		if ((c->modrm_rm & 7) == 4) {
820 821 822 823 824
			sib = insn_fetch(u8, 1, c->eip);
			index_reg |= (sib >> 3) & 7;
			base_reg |= sib & 7;
			scale = sib >> 6;

825
			if ((base_reg & 7) == 5 && c->modrm_mod == 0)
826
				modrm_ea += insn_fetch(s32, 4, c->eip);
827
			else
828
				modrm_ea += c->regs[base_reg];
829
			if (index_reg != 4)
830
				modrm_ea += c->regs[index_reg] << scale;
831 832
		} else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
			if (ctxt->mode == X86EMUL_MODE_PROT64)
833
				c->rip_relative = 1;
834
		} else
835
			modrm_ea += c->regs[c->modrm_rm];
836 837 838
		switch (c->modrm_mod) {
		case 0:
			if (c->modrm_rm == 5)
839
				modrm_ea += insn_fetch(s32, 4, c->eip);
840 841
			break;
		case 1:
842
			modrm_ea += insn_fetch(s8, 1, c->eip);
843 844
			break;
		case 2:
845
			modrm_ea += insn_fetch(s32, 4, c->eip);
846 847 848
			break;
		}
	}
849
	op->addr.mem.ea = modrm_ea;
850 851 852 853 854
done:
	return rc;
}

static int decode_abs(struct x86_emulate_ctxt *ctxt,
855 856
		      struct x86_emulate_ops *ops,
		      struct operand *op)
857 858
{
	struct decode_cache *c = &ctxt->decode;
859
	int rc = X86EMUL_CONTINUE;
860

861
	op->type = OP_MEM;
862 863
	switch (c->ad_bytes) {
	case 2:
864
		op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
865 866
		break;
	case 4:
867
		op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
868 869
		break;
	case 8:
870
		op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
871 872 873 874 875 876
		break;
	}
done:
	return rc;
}

877 878
static void fetch_bit_operand(struct decode_cache *c)
{
879
	long sv = 0, mask;
880

881
	if (c->dst.type == OP_MEM && c->src.type == OP_REG) {
882 883 884 885 886 887 888
		mask = ~(c->dst.bytes * 8 - 1);

		if (c->src.bytes == 2)
			sv = (s16)c->src.val & (s16)mask;
		else if (c->src.bytes == 4)
			sv = (s32)c->src.val & (s32)mask;

889
		c->dst.addr.mem.ea += (sv >> 3);
890
	}
891 892 893

	/* only subword offset */
	c->src.val &= (c->dst.bytes << 3) - 1;
894 895
}

896 897 898
static int read_emulated(struct x86_emulate_ctxt *ctxt,
			 struct x86_emulate_ops *ops,
			 unsigned long addr, void *dest, unsigned size)
A
Avi Kivity 已提交
899
{
900 901
	int rc;
	struct read_cache *mc = &ctxt->decode.mem_read;
A
Avi Kivity 已提交
902

903 904 905 906 907
	while (size) {
		int n = min(size, 8u);
		size -= n;
		if (mc->pos < mc->end)
			goto read_cached;
908

909 910
		rc = ops->read_emulated(addr, mc->data + mc->end, n,
					&ctxt->exception, ctxt->vcpu);
911 912 913
		if (rc != X86EMUL_CONTINUE)
			return rc;
		mc->end += n;
A
Avi Kivity 已提交
914

915 916 917 918 919
	read_cached:
		memcpy(dest, mc->data + mc->pos, n);
		mc->pos += n;
		dest += n;
		addr += n;
A
Avi Kivity 已提交
920
	}
921 922
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
923

924 925 926 927 928 929
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
			   struct x86_emulate_ops *ops,
			   unsigned int size, unsigned short port,
			   void *dest)
{
	struct read_cache *rc = &ctxt->decode.io_read;
930

931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
	if (rc->pos == rc->end) { /* refill pio read ahead */
		struct decode_cache *c = &ctxt->decode;
		unsigned int in_page, n;
		unsigned int count = c->rep_prefix ?
			address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
		in_page = (ctxt->eflags & EFLG_DF) ?
			offset_in_page(c->regs[VCPU_REGS_RDI]) :
			PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
		n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
			count);
		if (n == 0)
			n = 1;
		rc->pos = rc->end = 0;
		if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
			return 0;
		rc->end = n * size;
A
Avi Kivity 已提交
947 948
	}

949 950 951 952
	memcpy(dest, rc->data + rc->pos, size);
	rc->pos += size;
	return 1;
}
A
Avi Kivity 已提交
953

954 955 956
static u32 desc_limit_scaled(struct desc_struct *desc)
{
	u32 limit = get_desc_limit(desc);
A
Avi Kivity 已提交
957

958 959
	return desc->g ? (limit << 12) | 0xfff : limit;
}
A
Avi Kivity 已提交
960

961 962 963 964 965 966 967
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
				     struct x86_emulate_ops *ops,
				     u16 selector, struct desc_ptr *dt)
{
	if (selector & 1 << 2) {
		struct desc_struct desc;
		memset (dt, 0, sizeof *dt);
968 969
		if (!ops->get_cached_descriptor(&desc, NULL, VCPU_SREG_LDTR,
						ctxt->vcpu))
970
			return;
971

972 973 974 975 976
		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
		dt->address = get_desc_base(&desc);
	} else
		ops->get_gdt(dt, ctxt->vcpu);
}
977

978 979 980 981 982 983 984 985 986
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   struct x86_emulate_ops *ops,
				   u16 selector, struct desc_struct *desc)
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	int ret;
	ulong addr;
987

988
	get_descriptor_table_ptr(ctxt, ops, selector, &dt);
989

990 991
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
992
	addr = dt.address + index * 8;
993 994
	ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,
			    &ctxt->exception);
995

996 997
       return ret;
}
998

999 1000 1001 1002 1003 1004 1005 1006 1007
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				    struct x86_emulate_ops *ops,
				    u16 selector, struct desc_struct *desc)
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
	int ret;
A
Avi Kivity 已提交
1008

1009
	get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1010

1011 1012
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
A
Avi Kivity 已提交
1013

1014
	addr = dt.address + index * 8;
1015 1016
	ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu,
			     &ctxt->exception);
1017

1018 1019
	return ret;
}
1020

1021
/* Does not support long mode */
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   struct x86_emulate_ops *ops,
				   u16 selector, int seg)
{
	struct desc_struct seg_desc;
	u8 dpl, rpl, cpl;
	unsigned err_vec = GP_VECTOR;
	u32 err_code = 0;
	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
	int ret;
1032

1033
	memset(&seg_desc, 0, sizeof seg_desc);
1034

1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
	    || ctxt->mode == X86EMUL_MODE_REAL) {
		/* set real mode segment descriptor */
		set_desc_base(&seg_desc, selector << 4);
		set_desc_limit(&seg_desc, 0xffff);
		seg_desc.type = 3;
		seg_desc.p = 1;
		seg_desc.s = 1;
		goto load;
	}

	/* NULL selector is not valid for TR, CS and SS */
	if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
	    && null_selector)
		goto exception;

	/* TR should be in GDT only */
	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
		goto exception;

	if (null_selector) /* for NULL selector skip all following checks */
		goto load;

	ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
	if (ret != X86EMUL_CONTINUE)
		return ret;

	err_code = selector & 0xfffc;
	err_vec = GP_VECTOR;

	/* can't load system descriptor into segment selecor */
	if (seg <= VCPU_SREG_GS && !seg_desc.s)
		goto exception;

	if (!seg_desc.p) {
		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
		goto exception;
	}

	rpl = selector & 3;
	dpl = seg_desc.dpl;
	cpl = ops->cpl(ctxt->vcpu);

	switch (seg) {
	case VCPU_SREG_SS:
		/*
		 * segment is not a writable data segment or segment
		 * selector's RPL != CPL or segment selector's RPL != CPL
		 */
		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
			goto exception;
A
Avi Kivity 已提交
1086
		break;
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
	case VCPU_SREG_CS:
		if (!(seg_desc.type & 8))
			goto exception;

		if (seg_desc.type & 4) {
			/* conforming */
			if (dpl > cpl)
				goto exception;
		} else {
			/* nonconforming */
			if (rpl > cpl || dpl != cpl)
				goto exception;
		}
		/* CS(RPL) <- CPL */
		selector = (selector & 0xfffc) | cpl;
A
Avi Kivity 已提交
1102
		break;
1103 1104 1105 1106 1107 1108 1109 1110 1111
	case VCPU_SREG_TR:
		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
			goto exception;
		break;
	case VCPU_SREG_LDTR:
		if (seg_desc.s || seg_desc.type != 2)
			goto exception;
		break;
	default: /*  DS, ES, FS, or GS */
1112
		/*
1113 1114 1115
		 * segment is not a data or readable code segment or
		 * ((segment is a data or nonconforming code segment)
		 * and (both RPL and CPL > DPL))
1116
		 */
1117 1118 1119 1120
		if ((seg_desc.type & 0xa) == 0x8 ||
		    (((seg_desc.type & 0xc) != 0xc) &&
		     (rpl > dpl && cpl > dpl)))
			goto exception;
A
Avi Kivity 已提交
1121
		break;
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
	}

	if (seg_desc.s) {
		/* mark segment as accessed */
		seg_desc.type |= 1;
		ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
		if (ret != X86EMUL_CONTINUE)
			return ret;
	}
load:
	ops->set_segment_selector(selector, seg, ctxt->vcpu);
1133
	ops->set_cached_descriptor(&seg_desc, 0, seg, ctxt->vcpu);
1134 1135 1136 1137 1138 1139
	return X86EMUL_CONTINUE;
exception:
	emulate_exception(ctxt, err_vec, err_code, true);
	return X86EMUL_PROPAGATE_FAULT;
}

1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
static void write_register_operand(struct operand *op)
{
	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
	switch (op->bytes) {
	case 1:
		*(u8 *)op->addr.reg = (u8)op->val;
		break;
	case 2:
		*(u16 *)op->addr.reg = (u16)op->val;
		break;
	case 4:
		*op->addr.reg = (u32)op->val;
		break;	/* 64b: zero-extend */
	case 8:
		*op->addr.reg = op->val;
		break;
	}
}

1159 1160 1161 1162 1163 1164 1165 1166
static inline int writeback(struct x86_emulate_ctxt *ctxt,
			    struct x86_emulate_ops *ops)
{
	int rc;
	struct decode_cache *c = &ctxt->decode;

	switch (c->dst.type) {
	case OP_REG:
1167
		write_register_operand(&c->dst);
A
Avi Kivity 已提交
1168
		break;
1169 1170 1171
	case OP_MEM:
		if (c->lock_prefix)
			rc = ops->cmpxchg_emulated(
1172
					linear(ctxt, c->dst.addr.mem),
1173 1174 1175
					&c->dst.orig_val,
					&c->dst.val,
					c->dst.bytes,
1176
					&ctxt->exception,
1177
					ctxt->vcpu);
1178
		else
1179
			rc = ops->write_emulated(
1180
					linear(ctxt, c->dst.addr.mem),
1181 1182
					&c->dst.val,
					c->dst.bytes,
1183
					&ctxt->exception,
1184 1185 1186
					ctxt->vcpu);
		if (rc != X86EMUL_CONTINUE)
			return rc;
1187
		break;
A
Avi Kivity 已提交
1188 1189 1190
	case OP_XMM:
		write_sse_reg(ctxt, &c->dst.vec_val, c->dst.addr.xmm);
		break;
1191 1192
	case OP_NONE:
		/* no writeback */
1193
		break;
1194
	default:
1195
		break;
A
Avi Kivity 已提交
1196
	}
1197 1198
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1199

1200 1201 1202 1203
static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
				struct x86_emulate_ops *ops)
{
	struct decode_cache *c = &ctxt->decode;
1204

1205 1206 1207 1208
	c->dst.type  = OP_MEM;
	c->dst.bytes = c->op_bytes;
	c->dst.val = c->src.val;
	register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1209 1210
	c->dst.addr.mem.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
	c->dst.addr.mem.seg = VCPU_SREG_SS;
1211
}
1212

1213 1214 1215 1216 1217 1218
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
		       struct x86_emulate_ops *ops,
		       void *dest, int len)
{
	struct decode_cache *c = &ctxt->decode;
	int rc;
1219
	struct segmented_address addr;
1220

1221 1222 1223
	addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
	addr.seg = VCPU_SREG_SS;
	rc = read_emulated(ctxt, ops, linear(ctxt, addr), dest, len);
1224 1225 1226 1227 1228
	if (rc != X86EMUL_CONTINUE)
		return rc;

	register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
	return rc;
1229 1230
}

1231 1232 1233
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
		       struct x86_emulate_ops *ops,
		       void *dest, int len)
1234 1235
{
	int rc;
1236 1237 1238
	unsigned long val, change_mask;
	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
	int cpl = ops->cpl(ctxt->vcpu);
1239

1240 1241 1242
	rc = emulate_pop(ctxt, ops, &val, len);
	if (rc != X86EMUL_CONTINUE)
		return rc;
1243

1244 1245
	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1246

1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
	switch(ctxt->mode) {
	case X86EMUL_MODE_PROT64:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT16:
		if (cpl == 0)
			change_mask |= EFLG_IOPL;
		if (cpl <= iopl)
			change_mask |= EFLG_IF;
		break;
	case X86EMUL_MODE_VM86:
1257 1258
		if (iopl < 3)
			return emulate_gp(ctxt, 0);
1259 1260 1261 1262 1263
		change_mask |= EFLG_IF;
		break;
	default: /* real mode */
		change_mask |= (EFLG_IOPL | EFLG_IF);
		break;
1264
	}
1265 1266 1267 1268 1269

	*(unsigned long *)dest =
		(ctxt->eflags & ~change_mask) | (val & change_mask);

	return rc;
1270 1271
}

1272 1273
static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
			      struct x86_emulate_ops *ops, int seg)
1274
{
1275
	struct decode_cache *c = &ctxt->decode;
1276

1277
	c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
1278

1279
	emulate_push(ctxt, ops);
1280 1281
}

1282 1283
static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
			     struct x86_emulate_ops *ops, int seg)
1284
{
1285 1286 1287
	struct decode_cache *c = &ctxt->decode;
	unsigned long selector;
	int rc;
1288

1289 1290 1291 1292 1293 1294
	rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
	return rc;
1295 1296
}

1297 1298
static int emulate_pusha(struct x86_emulate_ctxt *ctxt,
			  struct x86_emulate_ops *ops)
1299
{
1300 1301 1302 1303
	struct decode_cache *c = &ctxt->decode;
	unsigned long old_esp = c->regs[VCPU_REGS_RSP];
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RAX;
1304

1305 1306 1307
	while (reg <= VCPU_REGS_RDI) {
		(reg == VCPU_REGS_RSP) ?
		(c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1308

1309
		emulate_push(ctxt, ops);
1310

1311 1312 1313
		rc = writeback(ctxt, ops);
		if (rc != X86EMUL_CONTINUE)
			return rc;
1314

1315
		++reg;
1316 1317
	}

1318 1319 1320 1321
	/* Disable writeback. */
	c->dst.type = OP_NONE;

	return rc;
1322 1323
}

1324 1325
static int emulate_popa(struct x86_emulate_ctxt *ctxt,
			struct x86_emulate_ops *ops)
1326
{
1327 1328 1329
	struct decode_cache *c = &ctxt->decode;
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RDI;
1330

1331 1332 1333 1334 1335 1336
	while (reg >= VCPU_REGS_RAX) {
		if (reg == VCPU_REGS_RSP) {
			register_address_increment(c, &c->regs[VCPU_REGS_RSP],
							c->op_bytes);
			--reg;
		}
1337

1338 1339 1340 1341
		rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
		if (rc != X86EMUL_CONTINUE)
			break;
		--reg;
1342
	}
1343
	return rc;
1344 1345
}

1346 1347 1348 1349
int emulate_int_real(struct x86_emulate_ctxt *ctxt,
			       struct x86_emulate_ops *ops, int irq)
{
	struct decode_cache *c = &ctxt->decode;
1350
	int rc;
1351 1352 1353 1354 1355 1356 1357 1358
	struct desc_ptr dt;
	gva_t cs_addr;
	gva_t eip_addr;
	u16 cs, eip;

	/* TODO: Add limit checks */
	c->src.val = ctxt->eflags;
	emulate_push(ctxt, ops);
1359 1360 1361
	rc = writeback(ctxt, ops);
	if (rc != X86EMUL_CONTINUE)
		return rc;
1362 1363 1364 1365 1366

	ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);

	c->src.val = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
	emulate_push(ctxt, ops);
1367 1368 1369
	rc = writeback(ctxt, ops);
	if (rc != X86EMUL_CONTINUE)
		return rc;
1370 1371 1372

	c->src.val = c->eip;
	emulate_push(ctxt, ops);
1373 1374 1375 1376 1377
	rc = writeback(ctxt, ops);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	c->dst.type = OP_NONE;
1378 1379 1380 1381 1382 1383

	ops->get_idt(&dt, ctxt->vcpu);

	eip_addr = dt.address + (irq << 2);
	cs_addr = dt.address + (irq << 2) + 2;

1384
	rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &ctxt->exception);
1385 1386 1387
	if (rc != X86EMUL_CONTINUE)
		return rc;

1388
	rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &ctxt->exception);
1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
	if (rc != X86EMUL_CONTINUE)
		return rc;

	rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	c->eip = eip;

	return rc;
}

static int emulate_int(struct x86_emulate_ctxt *ctxt,
		       struct x86_emulate_ops *ops, int irq)
{
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
		return emulate_int_real(ctxt, ops, irq);
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
	default:
		/* Protected mode interrupts unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
	}
}

1417 1418
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
			     struct x86_emulate_ops *ops)
1419
{
1420 1421 1422 1423 1424 1425 1426 1427 1428
	struct decode_cache *c = &ctxt->decode;
	int rc = X86EMUL_CONTINUE;
	unsigned long temp_eip = 0;
	unsigned long temp_eflags = 0;
	unsigned long cs = 0;
	unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
			     EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
			     EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
	unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1429

1430
	/* TODO: Add stack limit check */
1431

1432
	rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
1433

1434 1435
	if (rc != X86EMUL_CONTINUE)
		return rc;
1436

1437 1438
	if (temp_eip & ~0xffff)
		return emulate_gp(ctxt, 0);
1439

1440
	rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1441

1442 1443
	if (rc != X86EMUL_CONTINUE)
		return rc;
1444

1445
	rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
1446

1447 1448
	if (rc != X86EMUL_CONTINUE)
		return rc;
1449

1450
	rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1451

1452 1453
	if (rc != X86EMUL_CONTINUE)
		return rc;
1454

1455
	c->eip = temp_eip;
1456 1457


1458 1459 1460 1461 1462
	if (c->op_bytes == 4)
		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
	else if (c->op_bytes == 2) {
		ctxt->eflags &= ~0xffff;
		ctxt->eflags |= temp_eflags;
1463
	}
1464 1465 1466 1467 1468

	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
	ctxt->eflags |= EFLG_RESERVED_ONE_MASK;

	return rc;
1469 1470
}

1471 1472
static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
				    struct x86_emulate_ops* ops)
1473
{
1474 1475 1476 1477 1478 1479 1480
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
		return emulate_iret_real(ctxt, ops);
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
1481
	default:
1482 1483
		/* iret from protected mode unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
1484 1485 1486
	}
}

1487
static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
1488
				struct x86_emulate_ops *ops)
1489 1490 1491
{
	struct decode_cache *c = &ctxt->decode;

1492
	return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1493 1494
}

1495
static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1496
{
1497
	struct decode_cache *c = &ctxt->decode;
1498 1499
	switch (c->modrm_reg) {
	case 0:	/* rol */
1500
		emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1501 1502
		break;
	case 1:	/* ror */
1503
		emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1504 1505
		break;
	case 2:	/* rcl */
1506
		emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1507 1508
		break;
	case 3:	/* rcr */
1509
		emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1510 1511 1512
		break;
	case 4:	/* sal/shl */
	case 6:	/* sal/shl */
1513
		emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1514 1515
		break;
	case 5:	/* shr */
1516
		emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1517 1518
		break;
	case 7:	/* sar */
1519
		emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1520 1521 1522 1523 1524
		break;
	}
}

static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1525
			       struct x86_emulate_ops *ops)
1526 1527
{
	struct decode_cache *c = &ctxt->decode;
1528 1529
	unsigned long *rax = &c->regs[VCPU_REGS_RAX];
	unsigned long *rdx = &c->regs[VCPU_REGS_RDX];
1530
	u8 de = 0;
1531 1532 1533

	switch (c->modrm_reg) {
	case 0 ... 1:	/* test */
1534
		emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1535 1536 1537 1538 1539
		break;
	case 2:	/* not */
		c->dst.val = ~c->dst.val;
		break;
	case 3:	/* neg */
1540
		emulate_1op("neg", c->dst, ctxt->eflags);
1541
		break;
1542 1543 1544 1545 1546 1547 1548
	case 4: /* mul */
		emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags);
		break;
	case 5: /* imul */
		emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags);
		break;
	case 6: /* div */
1549 1550
		emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx,
				       ctxt->eflags, de);
1551 1552
		break;
	case 7: /* idiv */
1553 1554
		emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx,
				       ctxt->eflags, de);
1555
		break;
1556
	default:
1557
		return X86EMUL_UNHANDLEABLE;
1558
	}
1559 1560
	if (de)
		return emulate_de(ctxt);
1561
	return X86EMUL_CONTINUE;
1562 1563 1564
}

static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1565
			       struct x86_emulate_ops *ops)
1566 1567 1568 1569 1570
{
	struct decode_cache *c = &ctxt->decode;

	switch (c->modrm_reg) {
	case 0:	/* inc */
1571
		emulate_1op("inc", c->dst, ctxt->eflags);
1572 1573
		break;
	case 1:	/* dec */
1574
		emulate_1op("dec", c->dst, ctxt->eflags);
1575
		break;
1576 1577 1578 1579 1580
	case 2: /* call near abs */ {
		long int old_eip;
		old_eip = c->eip;
		c->eip = c->src.val;
		c->src.val = old_eip;
1581
		emulate_push(ctxt, ops);
1582 1583
		break;
	}
1584
	case 4: /* jmp abs */
1585
		c->eip = c->src.val;
1586 1587
		break;
	case 6:	/* push */
1588
		emulate_push(ctxt, ops);
1589 1590
		break;
	}
1591
	return X86EMUL_CONTINUE;
1592 1593 1594
}

static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1595
			       struct x86_emulate_ops *ops)
1596 1597
{
	struct decode_cache *c = &ctxt->decode;
1598
	u64 old = c->dst.orig_val64;
1599 1600 1601 1602 1603

	if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
	    ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
		c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
		c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1604
		ctxt->eflags &= ~EFLG_ZF;
1605
	} else {
1606 1607
		c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
			(u32) c->regs[VCPU_REGS_RBX];
1608

1609
		ctxt->eflags |= EFLG_ZF;
1610
	}
1611
	return X86EMUL_CONTINUE;
1612 1613
}

1614 1615 1616 1617 1618 1619 1620 1621
static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
			   struct x86_emulate_ops *ops)
{
	struct decode_cache *c = &ctxt->decode;
	int rc;
	unsigned long cs;

	rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1622
	if (rc != X86EMUL_CONTINUE)
1623 1624 1625 1626
		return rc;
	if (c->op_bytes == 4)
		c->eip = (u32)c->eip;
	rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1627
	if (rc != X86EMUL_CONTINUE)
1628
		return rc;
1629
	rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
1630 1631 1632
	return rc;
}

1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649
static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
			   struct x86_emulate_ops *ops, int seg)
{
	struct decode_cache *c = &ctxt->decode;
	unsigned short sel;
	int rc;

	memcpy(&sel, c->src.valptr + c->op_bytes, 2);

	rc = load_segment_descriptor(ctxt, ops, sel, seg);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	c->dst.val = c->src.val;
	return rc;
}

1650 1651
static inline void
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1652 1653
			struct x86_emulate_ops *ops, struct desc_struct *cs,
			struct desc_struct *ss)
1654
{
1655
	memset(cs, 0, sizeof(struct desc_struct));
1656
	ops->get_cached_descriptor(cs, NULL, VCPU_SREG_CS, ctxt->vcpu);
1657
	memset(ss, 0, sizeof(struct desc_struct));
1658 1659

	cs->l = 0;		/* will be adjusted later */
1660
	set_desc_base(cs, 0);	/* flat segment */
1661
	cs->g = 1;		/* 4kb granularity */
1662
	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
1663 1664 1665
	cs->type = 0x0b;	/* Read, Execute, Accessed */
	cs->s = 1;
	cs->dpl = 0;		/* will be adjusted later */
1666 1667
	cs->p = 1;
	cs->d = 1;
1668

1669 1670
	set_desc_base(ss, 0);	/* flat segment */
	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
1671 1672 1673
	ss->g = 1;		/* 4kb granularity */
	ss->s = 1;
	ss->type = 0x03;	/* Read/Write, Accessed */
1674
	ss->d = 1;		/* 32bit stack segment */
1675
	ss->dpl = 0;
1676
	ss->p = 1;
1677 1678 1679
}

static int
1680
emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1681 1682
{
	struct decode_cache *c = &ctxt->decode;
1683
	struct desc_struct cs, ss;
1684
	u64 msr_data;
1685
	u16 cs_sel, ss_sel;
1686 1687

	/* syscall is not available in real mode */
1688
	if (ctxt->mode == X86EMUL_MODE_REAL ||
1689 1690
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_ud(ctxt);
1691

1692
	setup_syscalls_segments(ctxt, ops, &cs, &ss);
1693

1694
	ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1695
	msr_data >>= 32;
1696 1697
	cs_sel = (u16)(msr_data & 0xfffc);
	ss_sel = (u16)(msr_data + 8);
1698 1699

	if (is_long_mode(ctxt->vcpu)) {
1700
		cs.d = 0;
1701 1702
		cs.l = 1;
	}
1703
	ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1704
	ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1705
	ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1706
	ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1707 1708 1709 1710 1711 1712

	c->regs[VCPU_REGS_RCX] = c->eip;
	if (is_long_mode(ctxt->vcpu)) {
#ifdef CONFIG_X86_64
		c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;

1713 1714 1715
		ops->get_msr(ctxt->vcpu,
			     ctxt->mode == X86EMUL_MODE_PROT64 ?
			     MSR_LSTAR : MSR_CSTAR, &msr_data);
1716 1717
		c->eip = msr_data;

1718
		ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
1719 1720 1721 1722
		ctxt->eflags &= ~(msr_data | EFLG_RF);
#endif
	} else {
		/* legacy mode */
1723
		ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1724 1725 1726 1727 1728
		c->eip = (u32)msr_data;

		ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
	}

1729
	return X86EMUL_CONTINUE;
1730 1731
}

1732
static int
1733
emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1734 1735
{
	struct decode_cache *c = &ctxt->decode;
1736
	struct desc_struct cs, ss;
1737
	u64 msr_data;
1738
	u16 cs_sel, ss_sel;
1739

1740
	/* inject #GP if in real mode */
1741 1742
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return emulate_gp(ctxt, 0);
1743 1744 1745 1746

	/* XXX sysenter/sysexit have not been tested in 64bit mode.
	* Therefore, we inject an #UD.
	*/
1747 1748
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return emulate_ud(ctxt);
1749

1750
	setup_syscalls_segments(ctxt, ops, &cs, &ss);
1751

1752
	ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1753 1754
	switch (ctxt->mode) {
	case X86EMUL_MODE_PROT32:
1755 1756
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
1757 1758
		break;
	case X86EMUL_MODE_PROT64:
1759 1760
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
1761 1762 1763 1764
		break;
	}

	ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1765 1766 1767 1768
	cs_sel = (u16)msr_data;
	cs_sel &= ~SELECTOR_RPL_MASK;
	ss_sel = cs_sel + 8;
	ss_sel &= ~SELECTOR_RPL_MASK;
1769 1770
	if (ctxt->mode == X86EMUL_MODE_PROT64
		|| is_long_mode(ctxt->vcpu)) {
1771
		cs.d = 0;
1772 1773 1774
		cs.l = 1;
	}

1775
	ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1776
	ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1777
	ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1778
	ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1779

1780
	ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
1781 1782
	c->eip = msr_data;

1783
	ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
1784 1785
	c->regs[VCPU_REGS_RSP] = msr_data;

1786
	return X86EMUL_CONTINUE;
1787 1788
}

1789
static int
1790
emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1791 1792
{
	struct decode_cache *c = &ctxt->decode;
1793
	struct desc_struct cs, ss;
1794 1795
	u64 msr_data;
	int usermode;
1796
	u16 cs_sel, ss_sel;
1797

1798 1799
	/* inject #GP if in real mode or Virtual 8086 mode */
	if (ctxt->mode == X86EMUL_MODE_REAL ||
1800 1801
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_gp(ctxt, 0);
1802

1803
	setup_syscalls_segments(ctxt, ops, &cs, &ss);
1804 1805 1806 1807 1808 1809 1810 1811

	if ((c->rex_prefix & 0x8) != 0x0)
		usermode = X86EMUL_MODE_PROT64;
	else
		usermode = X86EMUL_MODE_PROT32;

	cs.dpl = 3;
	ss.dpl = 3;
1812
	ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1813 1814
	switch (usermode) {
	case X86EMUL_MODE_PROT32:
1815
		cs_sel = (u16)(msr_data + 16);
1816 1817
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
1818
		ss_sel = (u16)(msr_data + 24);
1819 1820
		break;
	case X86EMUL_MODE_PROT64:
1821
		cs_sel = (u16)(msr_data + 32);
1822 1823
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
1824 1825
		ss_sel = cs_sel + 8;
		cs.d = 0;
1826 1827 1828
		cs.l = 1;
		break;
	}
1829 1830
	cs_sel |= SELECTOR_RPL_MASK;
	ss_sel |= SELECTOR_RPL_MASK;
1831

1832
	ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1833
	ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1834
	ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1835
	ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1836

1837 1838
	c->eip = c->regs[VCPU_REGS_RDX];
	c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
1839

1840
	return X86EMUL_CONTINUE;
1841 1842
}

1843 1844
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
			      struct x86_emulate_ops *ops)
1845 1846 1847 1848 1849 1850 1851
{
	int iopl;
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return false;
	if (ctxt->mode == X86EMUL_MODE_VM86)
		return true;
	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1852
	return ops->cpl(ctxt->vcpu) > iopl;
1853 1854 1855 1856 1857 1858
}

static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
					    struct x86_emulate_ops *ops,
					    u16 port, u16 len)
{
1859
	struct desc_struct tr_seg;
1860
	u32 base3;
1861
	int r;
1862
	u16 io_bitmap_ptr, perm, bit_idx = port & 0x7;
1863
	unsigned mask = (1 << len) - 1;
1864
	unsigned long base;
1865

1866
	ops->get_cached_descriptor(&tr_seg, &base3, VCPU_SREG_TR, ctxt->vcpu);
1867
	if (!tr_seg.p)
1868
		return false;
1869
	if (desc_limit_scaled(&tr_seg) < 103)
1870
		return false;
1871 1872 1873 1874 1875
	base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
	base |= ((u64)base3) << 32;
#endif
	r = ops->read_std(base + 102, &io_bitmap_ptr, 2, ctxt->vcpu, NULL);
1876 1877
	if (r != X86EMUL_CONTINUE)
		return false;
1878
	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
1879
		return false;
1880
	r = ops->read_std(base + io_bitmap_ptr + port/8, &perm, 2, ctxt->vcpu,
1881
			  NULL);
1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892
	if (r != X86EMUL_CONTINUE)
		return false;
	if ((perm >> bit_idx) & mask)
		return false;
	return true;
}

static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
				 struct x86_emulate_ops *ops,
				 u16 port, u16 len)
{
1893 1894 1895
	if (ctxt->perm_ok)
		return true;

1896
	if (emulator_bad_iopl(ctxt, ops))
1897 1898
		if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
			return false;
1899 1900 1901

	ctxt->perm_ok = true;

1902 1903 1904
	return true;
}

1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
				struct x86_emulate_ops *ops,
				struct tss_segment_16 *tss)
{
	struct decode_cache *c = &ctxt->decode;

	tss->ip = c->eip;
	tss->flag = ctxt->eflags;
	tss->ax = c->regs[VCPU_REGS_RAX];
	tss->cx = c->regs[VCPU_REGS_RCX];
	tss->dx = c->regs[VCPU_REGS_RDX];
	tss->bx = c->regs[VCPU_REGS_RBX];
	tss->sp = c->regs[VCPU_REGS_RSP];
	tss->bp = c->regs[VCPU_REGS_RBP];
	tss->si = c->regs[VCPU_REGS_RSI];
	tss->di = c->regs[VCPU_REGS_RDI];

	tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
	tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
	tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
	tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
	tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
}

static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
				 struct x86_emulate_ops *ops,
				 struct tss_segment_16 *tss)
{
	struct decode_cache *c = &ctxt->decode;
	int ret;

	c->eip = tss->ip;
	ctxt->eflags = tss->flag | 2;
	c->regs[VCPU_REGS_RAX] = tss->ax;
	c->regs[VCPU_REGS_RCX] = tss->cx;
	c->regs[VCPU_REGS_RDX] = tss->dx;
	c->regs[VCPU_REGS_RBX] = tss->bx;
	c->regs[VCPU_REGS_RSP] = tss->sp;
	c->regs[VCPU_REGS_RBP] = tss->bp;
	c->regs[VCPU_REGS_RSI] = tss->si;
	c->regs[VCPU_REGS_RDI] = tss->di;

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
	ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
	ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
	ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
	ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
	ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);

	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
	ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_16(struct x86_emulate_ctxt *ctxt,
			  struct x86_emulate_ops *ops,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
	struct tss_segment_16 tss_seg;
	int ret;
1987
	u32 new_tss_base = get_desc_base(new_desc);
1988 1989

	ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1990
			    &ctxt->exception);
1991
	if (ret != X86EMUL_CONTINUE)
1992 1993 1994 1995 1996 1997
		/* FIXME: need to provide precise fault address */
		return ret;

	save_state_to_tss16(ctxt, ops, &tss_seg);

	ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1998
			     &ctxt->exception);
1999
	if (ret != X86EMUL_CONTINUE)
2000 2001 2002 2003
		/* FIXME: need to provide precise fault address */
		return ret;

	ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2004
			    &ctxt->exception);
2005
	if (ret != X86EMUL_CONTINUE)
2006 2007 2008 2009 2010 2011 2012 2013 2014
		/* FIXME: need to provide precise fault address */
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

		ret = ops->write_std(new_tss_base,
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2015
				     ctxt->vcpu, &ctxt->exception);
2016
		if (ret != X86EMUL_CONTINUE)
2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057
			/* FIXME: need to provide precise fault address */
			return ret;
	}

	return load_state_from_tss16(ctxt, ops, &tss_seg);
}

static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
				struct x86_emulate_ops *ops,
				struct tss_segment_32 *tss)
{
	struct decode_cache *c = &ctxt->decode;

	tss->cr3 = ops->get_cr(3, ctxt->vcpu);
	tss->eip = c->eip;
	tss->eflags = ctxt->eflags;
	tss->eax = c->regs[VCPU_REGS_RAX];
	tss->ecx = c->regs[VCPU_REGS_RCX];
	tss->edx = c->regs[VCPU_REGS_RDX];
	tss->ebx = c->regs[VCPU_REGS_RBX];
	tss->esp = c->regs[VCPU_REGS_RSP];
	tss->ebp = c->regs[VCPU_REGS_RBP];
	tss->esi = c->regs[VCPU_REGS_RSI];
	tss->edi = c->regs[VCPU_REGS_RDI];

	tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
	tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
	tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
	tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
	tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
	tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
	tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
}

static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
				 struct x86_emulate_ops *ops,
				 struct tss_segment_32 *tss)
{
	struct decode_cache *c = &ctxt->decode;
	int ret;

2058 2059
	if (ops->set_cr(3, tss->cr3, ctxt->vcpu))
		return emulate_gp(ctxt, 0);
2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118
	c->eip = tss->eip;
	ctxt->eflags = tss->eflags | 2;
	c->regs[VCPU_REGS_RAX] = tss->eax;
	c->regs[VCPU_REGS_RCX] = tss->ecx;
	c->regs[VCPU_REGS_RDX] = tss->edx;
	c->regs[VCPU_REGS_RBX] = tss->ebx;
	c->regs[VCPU_REGS_RSP] = tss->esp;
	c->regs[VCPU_REGS_RBP] = tss->ebp;
	c->regs[VCPU_REGS_RSI] = tss->esi;
	c->regs[VCPU_REGS_RDI] = tss->edi;

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
	ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
	ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
	ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
	ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
	ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
	ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
	ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);

	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
	ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_32(struct x86_emulate_ctxt *ctxt,
			  struct x86_emulate_ops *ops,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
	struct tss_segment_32 tss_seg;
	int ret;
2119
	u32 new_tss_base = get_desc_base(new_desc);
2120 2121

	ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2122
			    &ctxt->exception);
2123
	if (ret != X86EMUL_CONTINUE)
2124 2125 2126 2127 2128 2129
		/* FIXME: need to provide precise fault address */
		return ret;

	save_state_to_tss32(ctxt, ops, &tss_seg);

	ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2130
			     &ctxt->exception);
2131
	if (ret != X86EMUL_CONTINUE)
2132 2133 2134 2135
		/* FIXME: need to provide precise fault address */
		return ret;

	ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2136
			    &ctxt->exception);
2137
	if (ret != X86EMUL_CONTINUE)
2138 2139 2140 2141 2142 2143 2144 2145 2146
		/* FIXME: need to provide precise fault address */
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

		ret = ops->write_std(new_tss_base,
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2147
				     ctxt->vcpu, &ctxt->exception);
2148
		if (ret != X86EMUL_CONTINUE)
2149 2150 2151 2152 2153 2154 2155 2156
			/* FIXME: need to provide precise fault address */
			return ret;
	}

	return load_state_from_tss32(ctxt, ops, &tss_seg);
}

static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2157 2158 2159
				   struct x86_emulate_ops *ops,
				   u16 tss_selector, int reason,
				   bool has_error_code, u32 error_code)
2160 2161 2162 2163 2164
{
	struct desc_struct curr_tss_desc, next_tss_desc;
	int ret;
	u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
	ulong old_tss_base =
2165
		ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu);
2166
	u32 desc_limit;
2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180

	/* FIXME: old_tss_base == ~0 ? */

	ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
	if (ret != X86EMUL_CONTINUE)
		return ret;
	ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
	if (ret != X86EMUL_CONTINUE)
		return ret;

	/* FIXME: check that next_tss_desc is tss */

	if (reason != TASK_SWITCH_IRET) {
		if ((tss_selector & 3) > next_tss_desc.dpl ||
2181 2182
		    ops->cpl(ctxt->vcpu) > next_tss_desc.dpl)
			return emulate_gp(ctxt, 0);
2183 2184
	}

2185 2186 2187 2188
	desc_limit = desc_limit_scaled(&next_tss_desc);
	if (!next_tss_desc.p ||
	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
	     desc_limit < 0x2b)) {
2189
		emulate_ts(ctxt, tss_selector & 0xfffc);
2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212
		return X86EMUL_PROPAGATE_FAULT;
	}

	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
		write_segment_descriptor(ctxt, ops, old_tss_sel,
					 &curr_tss_desc);
	}

	if (reason == TASK_SWITCH_IRET)
		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;

	/* set back link to prev task only if NT bit is set in eflags
	   note that old_tss_sel is not used afetr this point */
	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
		old_tss_sel = 0xffff;

	if (next_tss_desc.type & 8)
		ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel,
				     old_tss_base, &next_tss_desc);
	else
		ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel,
				     old_tss_base, &next_tss_desc);
2213 2214
	if (ret != X86EMUL_CONTINUE)
		return ret;
2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225

	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;

	if (reason != TASK_SWITCH_IRET) {
		next_tss_desc.type |= (1 << 1); /* set busy flag */
		write_segment_descriptor(ctxt, ops, tss_selector,
					 &next_tss_desc);
	}

	ops->set_cr(0,  ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
2226
	ops->set_cached_descriptor(&next_tss_desc, 0, VCPU_SREG_TR, ctxt->vcpu);
2227 2228
	ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);

2229 2230 2231 2232 2233 2234
	if (has_error_code) {
		struct decode_cache *c = &ctxt->decode;

		c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
		c->lock_prefix = 0;
		c->src.val = (unsigned long) error_code;
2235
		emulate_push(ctxt, ops);
2236 2237
	}

2238 2239 2240 2241
	return ret;
}

int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2242 2243
			 u16 tss_selector, int reason,
			 bool has_error_code, u32 error_code)
2244
{
2245
	struct x86_emulate_ops *ops = ctxt->ops;
2246 2247 2248 2249
	struct decode_cache *c = &ctxt->decode;
	int rc;

	c->eip = ctxt->eip;
2250
	c->dst.type = OP_NONE;
2251

2252 2253
	rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason,
				     has_error_code, error_code);
2254 2255

	if (rc == X86EMUL_CONTINUE) {
2256
		rc = writeback(ctxt, ops);
2257 2258
		if (rc == X86EMUL_CONTINUE)
			ctxt->eip = c->eip;
2259 2260
	}

2261
	return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2262 2263
}

2264
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2265
			    int reg, struct operand *op)
2266 2267 2268 2269
{
	struct decode_cache *c = &ctxt->decode;
	int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;

2270
	register_address_increment(c, &c->regs[reg], df * op->bytes);
2271 2272
	op->addr.mem.ea = register_address(c, c->regs[reg]);
	op->addr.mem.seg = seg;
2273 2274
}

2275 2276 2277 2278 2279 2280
static int em_push(struct x86_emulate_ctxt *ctxt)
{
	emulate_push(ctxt, ctxt->ops);
	return X86EMUL_CONTINUE;
}

2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
static int em_das(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;
	u8 al, old_al;
	bool af, cf, old_cf;

	cf = ctxt->eflags & X86_EFLAGS_CF;
	al = c->dst.val;

	old_al = al;
	old_cf = cf;
	cf = false;
	af = ctxt->eflags & X86_EFLAGS_AF;
	if ((al & 0x0f) > 9 || af) {
		al -= 6;
		cf = old_cf | (al >= 250);
		af = true;
	} else {
		af = false;
	}
	if (old_al > 0x99 || old_cf) {
		al -= 0x60;
		cf = true;
	}

	c->dst.val = al;
	/* Set PF, ZF, SF */
	c->src.type = OP_IMM;
	c->src.val = 0;
	c->src.bytes = 1;
	emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
	if (cf)
		ctxt->eflags |= X86_EFLAGS_CF;
	if (af)
		ctxt->eflags |= X86_EFLAGS_AF;
	return X86EMUL_CONTINUE;
}

2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;
	u16 sel, old_cs;
	ulong old_eip;
	int rc;

	old_cs = ctxt->ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
	old_eip = c->eip;

	memcpy(&sel, c->src.valptr + c->op_bytes, 2);
	if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS))
		return X86EMUL_CONTINUE;

	c->eip = 0;
	memcpy(&c->eip, c->src.valptr, c->op_bytes);

	c->src.val = old_cs;
	emulate_push(ctxt, ctxt->ops);
	rc = writeback(ctxt, ctxt->ops);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	c->src.val = old_eip;
	emulate_push(ctxt, ctxt->ops);
	rc = writeback(ctxt, ctxt->ops);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	c->dst.type = OP_NONE;

	return X86EMUL_CONTINUE;
}

2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;
	int rc;

	c->dst.type = OP_REG;
	c->dst.addr.reg = &c->eip;
	c->dst.bytes = c->op_bytes;
	rc = emulate_pop(ctxt, ctxt->ops, &c->dst.val, c->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val);
	return X86EMUL_CONTINUE;
}

2369
static int em_imul(struct x86_emulate_ctxt *ctxt)
2370 2371 2372 2373 2374 2375 2376
{
	struct decode_cache *c = &ctxt->decode;

	emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags);
	return X86EMUL_CONTINUE;
}

2377 2378 2379 2380 2381 2382 2383 2384
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;

	c->dst.val = c->src2.val;
	return em_imul(ctxt);
}

2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;

	c->dst.type = OP_REG;
	c->dst.bytes = c->src.bytes;
	c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
	c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1);

	return X86EMUL_CONTINUE;
}

2397 2398 2399 2400 2401 2402
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
	unsigned cpl = ctxt->ops->cpl(ctxt->vcpu);
	struct decode_cache *c = &ctxt->decode;
	u64 tsc = 0;

2403 2404
	if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD))
		return emulate_gp(ctxt, 0);
2405 2406 2407 2408 2409 2410
	ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc);
	c->regs[VCPU_REGS_RAX] = (u32)tsc;
	c->regs[VCPU_REGS_RDX] = tsc >> 32;
	return X86EMUL_CONTINUE;
}

2411 2412 2413 2414 2415 2416 2417
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;
	c->dst.val = c->src.val;
	return X86EMUL_CONTINUE;
}

2418 2419 2420 2421 2422 2423
#define D(_y) { .flags = (_y) }
#define N    D(0)
#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }

2424 2425 2426
#define D2bv(_f)      D((_f) | ByteOp), D(_f)
#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)

2427 2428 2429 2430 2431
#define D6ALU(_f) D2bv((_f) | DstMem | SrcReg | ModRM),			\
		D2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock),		\
		D2bv(((_f) & ~Lock) | DstAcc | SrcImm)


2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442
static struct opcode group1[] = {
	X7(D(Lock)), N
};

static struct opcode group1A[] = {
	D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
};

static struct opcode group3[] = {
	D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
	D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2443
	X4(D(SrcMem | ModRM)),
2444 2445 2446 2447 2448 2449 2450 2451 2452
};

static struct opcode group4[] = {
	D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
	N, N, N, N, N, N,
};

static struct opcode group5[] = {
	D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
2453 2454
	D(SrcMem | ModRM | Stack),
	I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
2455 2456 2457 2458 2459 2460 2461
	D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
	D(SrcMem | ModRM | Stack), N,
};

static struct group_dual group7 = { {
	N, N, D(ModRM | SrcMem | Priv), D(ModRM | SrcMem | Priv),
	D(SrcNone | ModRM | DstMem | Mov), N,
2462 2463
	D(SrcMem16 | ModRM | Mov | Priv),
	D(SrcMem | ModRM | ByteOp | Priv | NoAccess),
2464
}, {
2465 2466
	D(SrcNone | ModRM | Priv | VendorSpecific), N,
	N, D(SrcNone | ModRM | Priv | VendorSpecific),
2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482
	D(SrcNone | ModRM | DstMem | Mov), N,
	D(SrcMem16 | ModRM | Mov | Priv), N,
} };

static struct opcode group8[] = {
	N, N, N, N,
	D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
	D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
};

static struct group_dual group9 = { {
	N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
}, {
	N, N, N, N, N, N, N, N,
} };

2483 2484 2485 2486
static struct opcode group11[] = {
	I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
};

2487 2488
static struct opcode opcode_table[256] = {
	/* 0x00 - 0x07 */
2489
	D6ALU(Lock),
2490 2491
	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
	/* 0x08 - 0x0F */
2492
	D6ALU(Lock),
2493 2494
	D(ImplicitOps | Stack | No64), N,
	/* 0x10 - 0x17 */
2495
	D6ALU(Lock),
2496 2497
	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
	/* 0x18 - 0x1F */
2498
	D6ALU(Lock),
2499 2500
	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
	/* 0x20 - 0x27 */
2501
	D6ALU(Lock), N, N,
2502
	/* 0x28 - 0x2F */
2503
	D6ALU(Lock), N, I(ByteOp | DstAcc | No64, em_das),
2504
	/* 0x30 - 0x37 */
2505
	D6ALU(Lock), N, N,
2506
	/* 0x38 - 0x3F */
2507
	D6ALU(0), N, N,
2508 2509 2510
	/* 0x40 - 0x4F */
	X16(D(DstReg)),
	/* 0x50 - 0x57 */
2511
	X8(I(SrcReg | Stack, em_push)),
2512 2513 2514 2515 2516 2517 2518
	/* 0x58 - 0x5F */
	X8(D(DstReg | Stack)),
	/* 0x60 - 0x67 */
	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
	N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
	N, N, N, N,
	/* 0x68 - 0x6F */
2519 2520
	I(SrcImm | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
2521 2522
	I(SrcImmByte | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
2523 2524
	D2bv(DstDI | Mov | String), /* insb, insw/insd */
	D2bv(SrcSI | ImplicitOps | String), /* outsb, outsw/outsd */
2525 2526 2527 2528 2529 2530 2531
	/* 0x70 - 0x7F */
	X16(D(SrcImmByte)),
	/* 0x80 - 0x87 */
	G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
	G(DstMem | SrcImm | ModRM | Group, group1),
	G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
	G(DstMem | SrcImmByte | ModRM | Group, group1),
2532
	D2bv(DstMem | SrcReg | ModRM), D2bv(DstMem | SrcReg | ModRM | Lock),
2533
	/* 0x88 - 0x8F */
2534 2535
	I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
2536
	D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg),
2537 2538
	D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A),
	/* 0x90 - 0x97 */
2539
	X8(D(SrcAcc | DstReg)),
2540
	/* 0x98 - 0x9F */
2541
	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
2542
	I(SrcImmFAddr | No64, em_call_far), N,
2543 2544
	D(ImplicitOps | Stack), D(ImplicitOps | Stack), N, N,
	/* 0xA0 - 0xA7 */
2545 2546 2547 2548
	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
	I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
	I2bv(SrcSI | DstDI | Mov | String, em_mov),
	D2bv(SrcSI | DstDI | String),
2549
	/* 0xA8 - 0xAF */
2550
	D2bv(DstAcc | SrcImm),
2551 2552
	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
2553
	D2bv(SrcAcc | DstDI | String),
2554
	/* 0xB0 - 0xB7 */
2555
	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
2556
	/* 0xB8 - 0xBF */
2557
	X8(I(DstReg | SrcImm | Mov, em_mov)),
2558
	/* 0xC0 - 0xC7 */
2559
	D2bv(DstMem | SrcImmByte | ModRM),
2560 2561
	I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
	D(ImplicitOps | Stack),
2562
	D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
2563
	G(ByteOp, group11), G(0, group11),
2564 2565 2566 2567
	/* 0xC8 - 0xCF */
	N, N, N, D(ImplicitOps | Stack),
	D(ImplicitOps), D(SrcImmByte), D(ImplicitOps | No64), D(ImplicitOps),
	/* 0xD0 - 0xD7 */
2568
	D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
2569 2570 2571 2572
	N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
2573
	X4(D(SrcImmByte)),
2574
	D2bv(SrcImmUByte | DstAcc), D2bv(SrcAcc | DstImmUByte),
2575 2576 2577
	/* 0xE8 - 0xEF */
	D(SrcImm | Stack), D(SrcImm | ImplicitOps),
	D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
2578
	D2bv(SrcNone | DstAcc),	D2bv(SrcAcc | ImplicitOps),
2579 2580 2581 2582
	/* 0xF0 - 0xF7 */
	N, N, N, N,
	D(ImplicitOps | Priv), D(ImplicitOps), G(ByteOp, group3), G(0, group3),
	/* 0xF8 - 0xFF */
2583
	D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps),
2584 2585 2586 2587 2588 2589
	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};

static struct opcode twobyte_table[256] = {
	/* 0x00 - 0x0F */
	N, GD(0, &group7), N, N,
2590
	N, D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv), N,
2591 2592 2593 2594 2595
	D(ImplicitOps | Priv), D(ImplicitOps | Priv), N, N,
	N, D(ImplicitOps | ModRM), N, N,
	/* 0x10 - 0x1F */
	N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
	/* 0x20 - 0x2F */
2596 2597
	D(ModRM | DstMem | Priv | Op3264), D(ModRM | DstMem | Priv | Op3264),
	D(ModRM | SrcMem | Priv | Op3264), D(ModRM | SrcMem | Priv | Op3264),
2598 2599 2600
	N, N, N, N,
	N, N, N, N, N, N, N, N,
	/* 0x30 - 0x3F */
2601 2602
	D(ImplicitOps | Priv), I(ImplicitOps, em_rdtsc),
	D(ImplicitOps | Priv), N,
2603 2604
	D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv | VendorSpecific),
	N, N,
2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616
	N, N, N, N, N, N, N, N,
	/* 0x40 - 0x4F */
	X16(D(DstReg | SrcMem | ModRM | Mov)),
	/* 0x50 - 0x5F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x60 - 0x6F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x70 - 0x7F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x80 - 0x8F */
	X16(D(SrcImm)),
	/* 0x90 - 0x9F */
2617
	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
2618 2619 2620 2621 2622 2623 2624 2625 2626 2627
	/* 0xA0 - 0xA7 */
	D(ImplicitOps | Stack), D(ImplicitOps | Stack),
	N, D(DstMem | SrcReg | ModRM | BitOp),
	D(DstMem | SrcReg | Src2ImmByte | ModRM),
	D(DstMem | SrcReg | Src2CL | ModRM), N, N,
	/* 0xA8 - 0xAF */
	D(ImplicitOps | Stack), D(ImplicitOps | Stack),
	N, D(DstMem | SrcReg | ModRM | BitOp | Lock),
	D(DstMem | SrcReg | Src2ImmByte | ModRM),
	D(DstMem | SrcReg | Src2CL | ModRM),
2628
	D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
2629
	/* 0xB0 - 0xB7 */
2630
	D2bv(DstMem | SrcReg | ModRM | Lock),
2631 2632 2633
	D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
	D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2634 2635
	/* 0xB8 - 0xBF */
	N, N,
2636
	G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
2637 2638
	D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
2639
	/* 0xC0 - 0xCF */
2640
	D2bv(DstMem | SrcReg | ModRM | Lock),
2641
	N, D(DstMem | SrcReg | ModRM | Mov),
2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657
	N, N, N, GD(0, &group9),
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xDF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xEF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xFF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};

#undef D
#undef N
#undef G
#undef GD
#undef I

2658 2659
#undef D2bv
#undef I2bv
2660
#undef D6ALU
2661

2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680
static unsigned imm_size(struct decode_cache *c)
{
	unsigned size;

	size = (c->d & ByteOp) ? 1 : c->op_bytes;
	if (size == 8)
		size = 4;
	return size;
}

static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
		      unsigned size, bool sign_extension)
{
	struct decode_cache *c = &ctxt->decode;
	struct x86_emulate_ops *ops = ctxt->ops;
	int rc = X86EMUL_CONTINUE;

	op->type = OP_IMM;
	op->bytes = size;
2681
	op->addr.mem.ea = c->eip;
2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710
	/* NB. Immediates are sign-extended as necessary. */
	switch (op->bytes) {
	case 1:
		op->val = insn_fetch(s8, 1, c->eip);
		break;
	case 2:
		op->val = insn_fetch(s16, 2, c->eip);
		break;
	case 4:
		op->val = insn_fetch(s32, 4, c->eip);
		break;
	}
	if (!sign_extension) {
		switch (op->bytes) {
		case 1:
			op->val &= 0xff;
			break;
		case 2:
			op->val &= 0xffff;
			break;
		case 4:
			op->val &= 0xffffffff;
			break;
		}
	}
done:
	return rc;
}

2711
int
2712
x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
2713 2714 2715 2716 2717
{
	struct x86_emulate_ops *ops = ctxt->ops;
	struct decode_cache *c = &ctxt->decode;
	int rc = X86EMUL_CONTINUE;
	int mode = ctxt->mode;
2718 2719
	int def_op_bytes, def_ad_bytes, dual, goffset, simd_prefix;
	bool op_prefix = false;
2720
	struct opcode opcode, *g_mod012, *g_mod3;
2721
	struct operand memop = { .type = OP_NONE };
2722 2723

	c->eip = ctxt->eip;
2724 2725 2726 2727
	c->fetch.start = c->eip;
	c->fetch.end = c->fetch.start + insn_len;
	if (insn_len > 0)
		memcpy(c->fetch.data, insn, insn_len);
2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755
	ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);

	switch (mode) {
	case X86EMUL_MODE_REAL:
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
		def_op_bytes = def_ad_bytes = 2;
		break;
	case X86EMUL_MODE_PROT32:
		def_op_bytes = def_ad_bytes = 4;
		break;
#ifdef CONFIG_X86_64
	case X86EMUL_MODE_PROT64:
		def_op_bytes = 4;
		def_ad_bytes = 8;
		break;
#endif
	default:
		return -1;
	}

	c->op_bytes = def_op_bytes;
	c->ad_bytes = def_ad_bytes;

	/* Legacy prefixes. */
	for (;;) {
		switch (c->b = insn_fetch(u8, 1, c->eip)) {
		case 0x66:	/* operand-size override */
2756
			op_prefix = true;
2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787
			/* switch between 2/4 bytes */
			c->op_bytes = def_op_bytes ^ 6;
			break;
		case 0x67:	/* address-size override */
			if (mode == X86EMUL_MODE_PROT64)
				/* switch between 4/8 bytes */
				c->ad_bytes = def_ad_bytes ^ 12;
			else
				/* switch between 2/4 bytes */
				c->ad_bytes = def_ad_bytes ^ 6;
			break;
		case 0x26:	/* ES override */
		case 0x2e:	/* CS override */
		case 0x36:	/* SS override */
		case 0x3e:	/* DS override */
			set_seg_override(c, (c->b >> 3) & 3);
			break;
		case 0x64:	/* FS override */
		case 0x65:	/* GS override */
			set_seg_override(c, c->b & 7);
			break;
		case 0x40 ... 0x4f: /* REX */
			if (mode != X86EMUL_MODE_PROT64)
				goto done_prefixes;
			c->rex_prefix = c->b;
			continue;
		case 0xf0:	/* LOCK */
			c->lock_prefix = 1;
			break;
		case 0xf2:	/* REPNE/REPNZ */
		case 0xf3:	/* REP/REPE/REPZ */
2788
			c->rep_prefix = c->b;
2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801
			break;
		default:
			goto done_prefixes;
		}

		/* Any legacy prefix after a REX prefix nullifies its effect. */

		c->rex_prefix = 0;
	}

done_prefixes:

	/* REX prefix. */
2802 2803
	if (c->rex_prefix & 8)
		c->op_bytes = 8;	/* REX.W */
2804 2805 2806

	/* Opcode byte(s). */
	opcode = opcode_table[c->b];
2807 2808 2809 2810 2811
	/* Two-byte opcode? */
	if (c->b == 0x0f) {
		c->twobyte = 1;
		c->b = insn_fetch(u8, 1, c->eip);
		opcode = twobyte_table[c->b];
2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836
	}
	c->d = opcode.flags;

	if (c->d & Group) {
		dual = c->d & GroupDual;
		c->modrm = insn_fetch(u8, 1, c->eip);
		--c->eip;

		if (c->d & GroupDual) {
			g_mod012 = opcode.u.gdual->mod012;
			g_mod3 = opcode.u.gdual->mod3;
		} else
			g_mod012 = g_mod3 = opcode.u.group;

		c->d &= ~(Group | GroupDual);

		goffset = (c->modrm >> 3) & 7;

		if ((c->modrm >> 6) == 3)
			opcode = g_mod3[goffset];
		else
			opcode = g_mod012[goffset];
		c->d |= opcode.flags;
	}

2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849
	if (c->d & Prefix) {
		if (c->rep_prefix && op_prefix)
			return X86EMUL_UNHANDLEABLE;
		simd_prefix = op_prefix ? 0x66 : c->rep_prefix;
		switch (simd_prefix) {
		case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
		case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
		case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
		case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
		}
		c->d |= opcode.flags;
	}

2850 2851 2852
	c->execute = opcode.u.execute;

	/* Unrecognised? */
A
Avi Kivity 已提交
2853
	if (c->d == 0 || (c->d & Undefined))
2854 2855
		return -1;

2856 2857 2858
	if (!(c->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
		return -1;

2859 2860 2861
	if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
		c->op_bytes = 8;

2862 2863 2864 2865 2866 2867 2868
	if (c->d & Op3264) {
		if (mode == X86EMUL_MODE_PROT64)
			c->op_bytes = 8;
		else
			c->op_bytes = 4;
	}

A
Avi Kivity 已提交
2869 2870 2871
	if (c->d & Sse)
		c->op_bytes = 16;

2872
	/* ModRM and SIB bytes. */
2873
	if (c->d & ModRM) {
2874
		rc = decode_modrm(ctxt, ops, &memop);
2875 2876 2877
		if (!c->has_seg_override)
			set_seg_override(c, c->modrm_seg);
	} else if (c->d & MemAbs)
2878
		rc = decode_abs(ctxt, ops, &memop);
2879 2880 2881 2882 2883 2884
	if (rc != X86EMUL_CONTINUE)
		goto done;

	if (!c->has_seg_override)
		set_seg_override(c, VCPU_SREG_DS);

2885
	memop.addr.mem.seg = seg_override(ctxt, ops, c);
2886

2887
	if (memop.type == OP_MEM && c->ad_bytes != 8)
2888
		memop.addr.mem.ea = (u32)memop.addr.mem.ea;
2889

2890
	if (memop.type == OP_MEM && c->rip_relative)
2891
		memop.addr.mem.ea += c->eip;
2892 2893 2894 2895 2896 2897 2898 2899 2900

	/*
	 * Decode and fetch the source operand: register, memory
	 * or immediate.
	 */
	switch (c->d & SrcMask) {
	case SrcNone:
		break;
	case SrcReg:
A
Avi Kivity 已提交
2901
		decode_register_operand(ctxt, &c->src, c, 0);
2902 2903
		break;
	case SrcMem16:
2904
		memop.bytes = 2;
2905 2906
		goto srcmem_common;
	case SrcMem32:
2907
		memop.bytes = 4;
2908 2909
		goto srcmem_common;
	case SrcMem:
2910
		memop.bytes = (c->d & ByteOp) ? 1 :
2911 2912
							   c->op_bytes;
	srcmem_common:
2913
		c->src = memop;
2914
		break;
2915
	case SrcImmU16:
2916 2917
		rc = decode_imm(ctxt, &c->src, 2, false);
		break;
2918
	case SrcImm:
2919 2920
		rc = decode_imm(ctxt, &c->src, imm_size(c), true);
		break;
2921
	case SrcImmU:
2922
		rc = decode_imm(ctxt, &c->src, imm_size(c), false);
2923 2924
		break;
	case SrcImmByte:
2925 2926
		rc = decode_imm(ctxt, &c->src, 1, true);
		break;
2927
	case SrcImmUByte:
2928
		rc = decode_imm(ctxt, &c->src, 1, false);
2929 2930 2931 2932
		break;
	case SrcAcc:
		c->src.type = OP_REG;
		c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2933
		c->src.addr.reg = &c->regs[VCPU_REGS_RAX];
2934
		fetch_register_operand(&c->src);
2935 2936 2937 2938 2939 2940 2941 2942
		break;
	case SrcOne:
		c->src.bytes = 1;
		c->src.val = 1;
		break;
	case SrcSI:
		c->src.type = OP_MEM;
		c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2943 2944 2945
		c->src.addr.mem.ea =
			register_address(c, c->regs[VCPU_REGS_RSI]);
		c->src.addr.mem.seg = seg_override(ctxt, ops, c),
2946 2947 2948 2949
		c->src.val = 0;
		break;
	case SrcImmFAddr:
		c->src.type = OP_IMM;
2950
		c->src.addr.mem.ea = c->eip;
2951 2952 2953 2954
		c->src.bytes = c->op_bytes + 2;
		insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
		break;
	case SrcMemFAddr:
2955 2956
		memop.bytes = c->op_bytes + 2;
		goto srcmem_common;
2957 2958 2959
		break;
	}

2960 2961 2962
	if (rc != X86EMUL_CONTINUE)
		goto done;

2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974
	/*
	 * Decode and fetch the second source operand: register, memory
	 * or immediate.
	 */
	switch (c->d & Src2Mask) {
	case Src2None:
		break;
	case Src2CL:
		c->src2.bytes = 1;
		c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
		break;
	case Src2ImmByte:
2975
		rc = decode_imm(ctxt, &c->src2, 1, true);
2976 2977 2978 2979 2980
		break;
	case Src2One:
		c->src2.bytes = 1;
		c->src2.val = 1;
		break;
2981 2982 2983
	case Src2Imm:
		rc = decode_imm(ctxt, &c->src2, imm_size(c), true);
		break;
2984 2985
	}

2986 2987 2988
	if (rc != X86EMUL_CONTINUE)
		goto done;

2989 2990 2991
	/* Decode and fetch the destination operand: register or memory. */
	switch (c->d & DstMask) {
	case DstReg:
A
Avi Kivity 已提交
2992
		decode_register_operand(ctxt, &c->dst, c,
2993 2994
			 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
		break;
2995 2996
	case DstImmUByte:
		c->dst.type = OP_IMM;
2997
		c->dst.addr.mem.ea = c->eip;
2998 2999 3000
		c->dst.bytes = 1;
		c->dst.val = insn_fetch(u8, 1, c->eip);
		break;
3001 3002
	case DstMem:
	case DstMem64:
3003
		c->dst = memop;
3004 3005 3006 3007
		if ((c->d & DstMask) == DstMem64)
			c->dst.bytes = 8;
		else
			c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3008 3009
		if (c->d & BitOp)
			fetch_bit_operand(c);
3010
		c->dst.orig_val = c->dst.val;
3011 3012 3013 3014
		break;
	case DstAcc:
		c->dst.type = OP_REG;
		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3015
		c->dst.addr.reg = &c->regs[VCPU_REGS_RAX];
3016
		fetch_register_operand(&c->dst);
3017 3018 3019 3020 3021
		c->dst.orig_val = c->dst.val;
		break;
	case DstDI:
		c->dst.type = OP_MEM;
		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
3022 3023 3024
		c->dst.addr.mem.ea =
			register_address(c, c->regs[VCPU_REGS_RDI]);
		c->dst.addr.mem.seg = VCPU_SREG_ES;
3025 3026
		c->dst.val = 0;
		break;
3027 3028 3029 3030 3031
	case ImplicitOps:
		/* Special instructions do their own operand decoding. */
	default:
		c->dst.type = OP_NONE; /* Disable writeback. */
		return 0;
3032 3033 3034 3035 3036 3037
	}

done:
	return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
}

3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;

	/* The second termination condition only applies for REPE
	 * and REPNE. Test if the repeat string operation prefix is
	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
	 * corresponding termination condition according to:
	 * 	- if REPE/REPZ and ZF = 0 then done
	 * 	- if REPNE/REPNZ and ZF = 1 then done
	 */
	if (((c->b == 0xa6) || (c->b == 0xa7) ||
	     (c->b == 0xae) || (c->b == 0xaf))
	    && (((c->rep_prefix == REPE_PREFIX) &&
		 ((ctxt->eflags & EFLG_ZF) == 0))
		|| ((c->rep_prefix == REPNE_PREFIX) &&
		    ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
		return true;

	return false;
}

3060
int
3061
x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3062
{
3063
	struct x86_emulate_ops *ops = ctxt->ops;
3064 3065
	u64 msr_data;
	struct decode_cache *c = &ctxt->decode;
3066
	int rc = X86EMUL_CONTINUE;
3067
	int saved_dst_type = c->dst.type;
3068
	int irq; /* Used for int 3, int, and into */
3069

3070
	ctxt->decode.mem_read.pos = 0;
3071

3072
	if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
3073
		rc = emulate_ud(ctxt);
3074 3075 3076
		goto done;
	}

3077
	/* LOCK prefix is allowed only with some instructions */
3078
	if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
3079
		rc = emulate_ud(ctxt);
3080 3081 3082
		goto done;
	}

3083
	if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
3084
		rc = emulate_ud(ctxt);
3085 3086 3087
		goto done;
	}

A
Avi Kivity 已提交
3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099
	if ((c->d & Sse)
	    && ((ops->get_cr(0, ctxt->vcpu) & X86_CR0_EM)
		|| !(ops->get_cr(4, ctxt->vcpu) & X86_CR4_OSFXSR))) {
		rc = emulate_ud(ctxt);
		goto done;
	}

	if ((c->d & Sse) && (ops->get_cr(0, ctxt->vcpu) & X86_CR0_TS)) {
		rc = emulate_nm(ctxt);
		goto done;
	}

3100
	/* Privileged instruction can be executed only in CPL=0 */
3101
	if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
3102
		rc = emulate_gp(ctxt, 0);
3103 3104 3105
		goto done;
	}

3106 3107
	if (c->rep_prefix && (c->d & String)) {
		/* All REP prefixes have the same first termination condition */
3108
		if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
3109
			ctxt->eip = c->eip;
3110 3111 3112 3113
			goto done;
		}
	}

3114
	if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
3115
		rc = read_emulated(ctxt, ops, linear(ctxt, c->src.addr.mem),
3116
					c->src.valptr, c->src.bytes);
3117
		if (rc != X86EMUL_CONTINUE)
3118
			goto done;
3119
		c->src.orig_val64 = c->src.val64;
3120 3121
	}

3122
	if (c->src2.type == OP_MEM) {
3123
		rc = read_emulated(ctxt, ops, linear(ctxt, c->src2.addr.mem),
3124
					&c->src2.val, c->src2.bytes);
3125 3126 3127 3128
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

3129 3130 3131 3132
	if ((c->d & DstMask) == ImplicitOps)
		goto special_insn;


3133 3134
	if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
		/* optimisation - avoid slow emulated read if Mov */
3135
		rc = read_emulated(ctxt, ops, linear(ctxt, c->dst.addr.mem),
3136
				   &c->dst.val, c->dst.bytes);
3137 3138
		if (rc != X86EMUL_CONTINUE)
			goto done;
3139
	}
3140
	c->dst.orig_val = c->dst.val;
3141

3142 3143
special_insn:

3144 3145 3146 3147 3148 3149 3150
	if (c->execute) {
		rc = c->execute(ctxt);
		if (rc != X86EMUL_CONTINUE)
			goto done;
		goto writeback;
	}

3151
	if (c->twobyte)
A
Avi Kivity 已提交
3152 3153
		goto twobyte_insn;

3154
	switch (c->b) {
A
Avi Kivity 已提交
3155 3156
	case 0x00 ... 0x05:
	      add:		/* add */
3157
		emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3158
		break;
3159
	case 0x06:		/* push es */
3160
		emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
3161 3162 3163 3164
		break;
	case 0x07:		/* pop es */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
		break;
A
Avi Kivity 已提交
3165 3166
	case 0x08 ... 0x0d:
	      or:		/* or */
3167
		emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3168
		break;
3169
	case 0x0e:		/* push cs */
3170
		emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
3171
		break;
A
Avi Kivity 已提交
3172 3173
	case 0x10 ... 0x15:
	      adc:		/* adc */
3174
		emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3175
		break;
3176
	case 0x16:		/* push ss */
3177
		emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
3178 3179 3180 3181
		break;
	case 0x17:		/* pop ss */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
		break;
A
Avi Kivity 已提交
3182 3183
	case 0x18 ... 0x1d:
	      sbb:		/* sbb */
3184
		emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3185
		break;
3186
	case 0x1e:		/* push ds */
3187
		emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
3188 3189 3190 3191
		break;
	case 0x1f:		/* pop ds */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
		break;
3192
	case 0x20 ... 0x25:
A
Avi Kivity 已提交
3193
	      and:		/* and */
3194
		emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3195 3196 3197
		break;
	case 0x28 ... 0x2d:
	      sub:		/* sub */
3198
		emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3199 3200 3201
		break;
	case 0x30 ... 0x35:
	      xor:		/* xor */
3202
		emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3203 3204 3205
		break;
	case 0x38 ... 0x3d:
	      cmp:		/* cmp */
3206
		emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3207
		break;
3208 3209 3210 3211 3212 3213 3214 3215
	case 0x40 ... 0x47: /* inc r16/r32 */
		emulate_1op("inc", c->dst, ctxt->eflags);
		break;
	case 0x48 ... 0x4f: /* dec r16/r32 */
		emulate_1op("dec", c->dst, ctxt->eflags);
		break;
	case 0x58 ... 0x5f: /* pop reg */
	pop_instruction:
3216
		rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
3217
		break;
3218
	case 0x60:	/* pusha */
3219
		rc = emulate_pusha(ctxt, ops);
3220 3221 3222 3223
		break;
	case 0x61:	/* popa */
		rc = emulate_popa(ctxt, ops);
		break;
A
Avi Kivity 已提交
3224
	case 0x63:		/* movsxd */
3225
		if (ctxt->mode != X86EMUL_MODE_PROT64)
A
Avi Kivity 已提交
3226
			goto cannot_emulate;
3227
		c->dst.val = (s32) c->src.val;
A
Avi Kivity 已提交
3228
		break;
3229 3230
	case 0x6c:		/* insb */
	case 0x6d:		/* insw/insd */
3231 3232
		c->src.val = c->regs[VCPU_REGS_RDX];
		goto do_io_in;
3233 3234
	case 0x6e:		/* outsb */
	case 0x6f:		/* outsw/outsd */
3235 3236
		c->dst.val = c->regs[VCPU_REGS_RDX];
		goto do_io_out;
3237
		break;
3238
	case 0x70 ... 0x7f: /* jcc (short) */
3239
		if (test_cc(c->b, ctxt->eflags))
3240
			jmp_rel(c, c->src.val);
3241
		break;
A
Avi Kivity 已提交
3242
	case 0x80 ... 0x83:	/* Grp1 */
3243
		switch (c->modrm_reg) {
A
Avi Kivity 已提交
3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262
		case 0:
			goto add;
		case 1:
			goto or;
		case 2:
			goto adc;
		case 3:
			goto sbb;
		case 4:
			goto and;
		case 5:
			goto sub;
		case 6:
			goto xor;
		case 7:
			goto cmp;
		}
		break;
	case 0x84 ... 0x85:
3263
	test:
3264
		emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3265 3266
		break;
	case 0x86 ... 0x87:	/* xchg */
3267
	xchg:
A
Avi Kivity 已提交
3268
		/* Write back the register source. */
3269 3270
		c->src.val = c->dst.val;
		write_register_operand(&c->src);
A
Avi Kivity 已提交
3271 3272 3273 3274
		/*
		 * Write back the memory destination with implicit LOCK
		 * prefix.
		 */
3275
		c->dst.val = c->src.orig_val;
3276
		c->lock_prefix = 1;
A
Avi Kivity 已提交
3277
		break;
3278 3279
	case 0x8c:  /* mov r/m, sreg */
		if (c->modrm_reg > VCPU_SREG_GS) {
3280
			rc = emulate_ud(ctxt);
3281
			goto done;
3282
		}
3283
		c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
3284
		break;
N
Nitin A Kamble 已提交
3285
	case 0x8d: /* lea r16/r32, m */
3286
		c->dst.val = c->src.addr.mem.ea;
N
Nitin A Kamble 已提交
3287
		break;
3288 3289 3290 3291
	case 0x8e: { /* mov seg, r/m16 */
		uint16_t sel;

		sel = c->src.val;
3292

3293 3294
		if (c->modrm_reg == VCPU_SREG_CS ||
		    c->modrm_reg > VCPU_SREG_GS) {
3295
			rc = emulate_ud(ctxt);
3296 3297 3298
			goto done;
		}

3299
		if (c->modrm_reg == VCPU_SREG_SS)
3300
			ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3301

3302
		rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg);
3303 3304 3305 3306

		c->dst.type = OP_NONE;  /* Disable writeback. */
		break;
	}
A
Avi Kivity 已提交
3307
	case 0x8f:		/* pop (sole member of Grp1a) */
3308
		rc = emulate_grp1a(ctxt, ops);
A
Avi Kivity 已提交
3309
		break;
3310 3311
	case 0x90 ... 0x97: /* nop / xchg reg, rax */
		if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX])
3312
			break;
3313
		goto xchg;
3314 3315 3316 3317 3318 3319 3320
	case 0x98: /* cbw/cwde/cdqe */
		switch (c->op_bytes) {
		case 2: c->dst.val = (s8)c->dst.val; break;
		case 4: c->dst.val = (s16)c->dst.val; break;
		case 8: c->dst.val = (s32)c->dst.val; break;
		}
		break;
N
Nitin A Kamble 已提交
3321
	case 0x9c: /* pushf */
3322
		c->src.val =  (unsigned long) ctxt->eflags;
3323
		emulate_push(ctxt, ops);
3324
		break;
N
Nitin A Kamble 已提交
3325
	case 0x9d: /* popf */
A
Avi Kivity 已提交
3326
		c->dst.type = OP_REG;
3327
		c->dst.addr.reg = &ctxt->eflags;
A
Avi Kivity 已提交
3328
		c->dst.bytes = c->op_bytes;
3329 3330
		rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
		break;
A
Avi Kivity 已提交
3331
	case 0xa6 ... 0xa7:	/* cmps */
3332
		c->dst.type = OP_NONE; /* Disable writeback. */
3333
		goto cmp;
3334 3335
	case 0xa8 ... 0xa9:	/* test ax, imm */
		goto test;
A
Avi Kivity 已提交
3336
	case 0xae ... 0xaf:	/* scas */
3337
		goto cmp;
3338 3339 3340
	case 0xc0 ... 0xc1:
		emulate_grp2(ctxt);
		break;
3341
	case 0xc3: /* ret */
A
Avi Kivity 已提交
3342
		c->dst.type = OP_REG;
3343
		c->dst.addr.reg = &c->eip;
A
Avi Kivity 已提交
3344
		c->dst.bytes = c->op_bytes;
3345
		goto pop_instruction;
3346 3347 3348 3349 3350 3351
	case 0xc4:		/* les */
		rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES);
		break;
	case 0xc5:		/* lds */
		rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS);
		break;
3352 3353
	case 0xcb:		/* ret far */
		rc = emulate_ret_far(ctxt, ops);
3354
		break;
3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368
	case 0xcc:		/* int3 */
		irq = 3;
		goto do_interrupt;
	case 0xcd:		/* int n */
		irq = c->src.val;
	do_interrupt:
		rc = emulate_int(ctxt, ops, irq);
		break;
	case 0xce:		/* into */
		if (ctxt->eflags & EFLG_OF) {
			irq = 4;
			goto do_interrupt;
		}
		break;
3369 3370
	case 0xcf:		/* iret */
		rc = emulate_iret(ctxt, ops);
3371
		break;
3372 3373 3374 3375 3376 3377 3378
	case 0xd0 ... 0xd1:	/* Grp2 */
		emulate_grp2(ctxt);
		break;
	case 0xd2 ... 0xd3:	/* Grp2 */
		c->src.val = c->regs[VCPU_REGS_RCX];
		emulate_grp2(ctxt);
		break;
3379 3380 3381 3382 3383 3384
	case 0xe0 ... 0xe2:	/* loop/loopz/loopnz */
		register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
		if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 &&
		    (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags)))
			jmp_rel(c, c->src.val);
		break;
3385 3386 3387 3388
	case 0xe3:	/* jcxz/jecxz/jrcxz */
		if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0)
			jmp_rel(c, c->src.val);
		break;
3389 3390
	case 0xe4: 	/* inb */
	case 0xe5: 	/* in */
3391
		goto do_io_in;
3392 3393
	case 0xe6: /* outb */
	case 0xe7: /* out */
3394
		goto do_io_out;
3395
	case 0xe8: /* call (near) */ {
3396
		long int rel = c->src.val;
3397
		c->src.val = (unsigned long) c->eip;
3398
		jmp_rel(c, rel);
3399
		emulate_push(ctxt, ops);
3400
		break;
3401 3402
	}
	case 0xe9: /* jmp rel */
3403
		goto jmp;
3404 3405
	case 0xea: { /* jmp far */
		unsigned short sel;
3406
	jump_far:
3407 3408 3409
		memcpy(&sel, c->src.valptr + c->op_bytes, 2);

		if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS))
3410
			goto done;
3411

3412 3413
		c->eip = 0;
		memcpy(&c->eip, c->src.valptr, c->op_bytes);
3414
		break;
3415
	}
3416 3417
	case 0xeb:
	      jmp:		/* jmp rel short */
3418
		jmp_rel(c, c->src.val);
3419
		c->dst.type = OP_NONE; /* Disable writeback. */
3420
		break;
3421 3422
	case 0xec: /* in al,dx */
	case 0xed: /* in (e/r)ax,dx */
3423 3424 3425 3426
		c->src.val = c->regs[VCPU_REGS_RDX];
	do_io_in:
		c->dst.bytes = min(c->dst.bytes, 4u);
		if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
3427
			rc = emulate_gp(ctxt, 0);
3428 3429
			goto done;
		}
3430 3431
		if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
				     &c->dst.val))
3432 3433
			goto done; /* IO is needed */
		break;
3434 3435
	case 0xee: /* out dx,al */
	case 0xef: /* out dx,(e/r)ax */
3436
		c->dst.val = c->regs[VCPU_REGS_RDX];
3437
	do_io_out:
3438 3439 3440
		c->src.bytes = min(c->src.bytes, 4u);
		if (!emulator_io_permited(ctxt, ops, c->dst.val,
					  c->src.bytes)) {
3441
			rc = emulate_gp(ctxt, 0);
3442 3443
			goto done;
		}
3444 3445
		ops->pio_out_emulated(c->src.bytes, c->dst.val,
				      &c->src.val, 1, ctxt->vcpu);
3446
		c->dst.type = OP_NONE;	/* Disable writeback. */
3447
		break;
3448
	case 0xf4:              /* hlt */
3449
		ctxt->vcpu->arch.halt_request = 1;
3450
		break;
3451 3452 3453 3454
	case 0xf5:	/* cmc */
		/* complement carry flag from eflags reg */
		ctxt->eflags ^= EFLG_CF;
		break;
3455
	case 0xf6 ... 0xf7:	/* Grp3 */
3456
		rc = emulate_grp3(ctxt, ops);
3457
		break;
3458 3459 3460
	case 0xf8: /* clc */
		ctxt->eflags &= ~EFLG_CF;
		break;
3461 3462 3463
	case 0xf9: /* stc */
		ctxt->eflags |= EFLG_CF;
		break;
3464
	case 0xfa: /* cli */
3465
		if (emulator_bad_iopl(ctxt, ops)) {
3466
			rc = emulate_gp(ctxt, 0);
3467
			goto done;
3468
		} else
3469
			ctxt->eflags &= ~X86_EFLAGS_IF;
3470 3471
		break;
	case 0xfb: /* sti */
3472
		if (emulator_bad_iopl(ctxt, ops)) {
3473
			rc = emulate_gp(ctxt, 0);
3474 3475
			goto done;
		} else {
3476
			ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3477 3478
			ctxt->eflags |= X86_EFLAGS_IF;
		}
3479
		break;
3480 3481 3482 3483 3484 3485
	case 0xfc: /* cld */
		ctxt->eflags &= ~EFLG_DF;
		break;
	case 0xfd: /* std */
		ctxt->eflags |= EFLG_DF;
		break;
3486 3487
	case 0xfe: /* Grp4 */
	grp45:
3488 3489
		rc = emulate_grp45(ctxt, ops);
		break;
3490 3491 3492 3493
	case 0xff: /* Grp5 */
		if (c->modrm_reg == 5)
			goto jump_far;
		goto grp45;
3494 3495
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
3496
	}
3497

3498 3499 3500
	if (rc != X86EMUL_CONTINUE)
		goto done;

3501 3502
writeback:
	rc = writeback(ctxt, ops);
3503
	if (rc != X86EMUL_CONTINUE)
3504 3505
		goto done;

3506 3507 3508 3509 3510 3511
	/*
	 * restore dst type in case the decoding will be reused
	 * (happens for string instruction )
	 */
	c->dst.type = saved_dst_type;

3512
	if ((c->d & SrcMask) == SrcSI)
3513
		string_addr_inc(ctxt, seg_override(ctxt, ops, c),
3514
				VCPU_REGS_RSI, &c->src);
3515 3516

	if ((c->d & DstMask) == DstDI)
3517
		string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
3518
				&c->dst);
3519

3520
	if (c->rep_prefix && (c->d & String)) {
3521
		struct read_cache *r = &ctxt->decode.io_read;
3522
		register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
3523

3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539
		if (!string_insn_completed(ctxt)) {
			/*
			 * Re-enter guest when pio read ahead buffer is empty
			 * or, if it is not used, after each 1024 iteration.
			 */
			if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) &&
			    (r->end == 0 || r->end != r->pos)) {
				/*
				 * Reset read cache. Usually happens before
				 * decode, but since instruction is restarted
				 * we have to do it here.
				 */
				ctxt->decode.mem_read.end = 0;
				return EMULATION_RESTART;
			}
			goto done; /* skip rip writeback */
3540
		}
3541
	}
3542 3543

	ctxt->eip = c->eip;
3544 3545

done:
3546 3547
	if (rc == X86EMUL_PROPAGATE_FAULT)
		ctxt->have_exception = true;
3548
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
A
Avi Kivity 已提交
3549 3550

twobyte_insn:
3551
	switch (c->b) {
A
Avi Kivity 已提交
3552
	case 0x01: /* lgdt, lidt, lmsw */
3553
		switch (c->modrm_reg) {
A
Avi Kivity 已提交
3554 3555 3556
			u16 size;
			unsigned long address;

3557
		case 0: /* vmcall */
3558
			if (c->modrm_mod != 3 || c->modrm_rm != 1)
3559 3560
				goto cannot_emulate;

3561
			rc = kvm_fix_hypercall(ctxt->vcpu);
3562
			if (rc != X86EMUL_CONTINUE)
3563 3564
				goto done;

3565
			/* Let the processor re-execute the fixed hypercall */
3566
			c->eip = ctxt->eip;
3567 3568
			/* Disable writeback. */
			c->dst.type = OP_NONE;
3569
			break;
A
Avi Kivity 已提交
3570
		case 2: /* lgdt */
3571
			rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3572
					     &size, &address, c->op_bytes);
3573
			if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
3574 3575
				goto done;
			realmode_lgdt(ctxt->vcpu, size, address);
3576 3577
			/* Disable writeback. */
			c->dst.type = OP_NONE;
A
Avi Kivity 已提交
3578
			break;
3579
		case 3: /* lidt/vmmcall */
3580 3581 3582 3583 3584 3585 3586 3587
			if (c->modrm_mod == 3) {
				switch (c->modrm_rm) {
				case 1:
					rc = kvm_fix_hypercall(ctxt->vcpu);
					break;
				default:
					goto cannot_emulate;
				}
3588
			} else {
3589
				rc = read_descriptor(ctxt, ops, c->src.addr.mem,
3590
						     &size, &address,
3591
						     c->op_bytes);
3592
				if (rc != X86EMUL_CONTINUE)
3593 3594 3595
					goto done;
				realmode_lidt(ctxt->vcpu, size, address);
			}
3596 3597
			/* Disable writeback. */
			c->dst.type = OP_NONE;
A
Avi Kivity 已提交
3598 3599
			break;
		case 4: /* smsw */
3600
			c->dst.bytes = 2;
3601
			c->dst.val = ops->get_cr(0, ctxt->vcpu);
A
Avi Kivity 已提交
3602 3603
			break;
		case 6: /* lmsw */
3604
			ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0eul) |
3605
				    (c->src.val & 0x0f), ctxt->vcpu);
3606
			c->dst.type = OP_NONE;
A
Avi Kivity 已提交
3607
			break;
3608
		case 5: /* not defined */
3609
			emulate_ud(ctxt);
3610
			rc = X86EMUL_PROPAGATE_FAULT;
3611
			goto done;
A
Avi Kivity 已提交
3612
		case 7: /* invlpg*/
3613 3614
			emulate_invlpg(ctxt->vcpu,
				       linear(ctxt, c->src.addr.mem));
3615 3616
			/* Disable writeback. */
			c->dst.type = OP_NONE;
A
Avi Kivity 已提交
3617 3618 3619 3620 3621
			break;
		default:
			goto cannot_emulate;
		}
		break;
3622
	case 0x05: 		/* syscall */
3623
		rc = emulate_syscall(ctxt, ops);
3624
		break;
3625 3626 3627 3628
	case 0x06:
		emulate_clts(ctxt->vcpu);
		break;
	case 0x09:		/* wbinvd */
3629 3630 3631
		kvm_emulate_wbinvd(ctxt->vcpu);
		break;
	case 0x08:		/* invd */
3632 3633 3634 3635
	case 0x0d:		/* GrpP (prefetch) */
	case 0x18:		/* Grp16 (prefetch/nop) */
		break;
	case 0x20: /* mov cr, reg */
3636 3637 3638 3639
		switch (c->modrm_reg) {
		case 1:
		case 5 ... 7:
		case 9 ... 15:
3640
			emulate_ud(ctxt);
3641
			rc = X86EMUL_PROPAGATE_FAULT;
3642 3643
			goto done;
		}
3644
		c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
3645
		break;
A
Avi Kivity 已提交
3646
	case 0x21: /* mov from dr to reg */
3647 3648
		if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
		    (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3649
			emulate_ud(ctxt);
3650
			rc = X86EMUL_PROPAGATE_FAULT;
3651 3652
			goto done;
		}
3653
		ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu);
A
Avi Kivity 已提交
3654
		break;
3655
	case 0x22: /* mov reg, cr */
3656
		if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) {
3657
			emulate_gp(ctxt, 0);
3658
			rc = X86EMUL_PROPAGATE_FAULT;
3659 3660
			goto done;
		}
3661 3662
		c->dst.type = OP_NONE;
		break;
A
Avi Kivity 已提交
3663
	case 0x23: /* mov from reg to dr */
3664 3665
		if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
		    (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3666
			emulate_ud(ctxt);
3667
			rc = X86EMUL_PROPAGATE_FAULT;
3668 3669
			goto done;
		}
3670

3671
		if (ops->set_dr(c->modrm_reg, c->src.val &
3672 3673 3674
				((ctxt->mode == X86EMUL_MODE_PROT64) ?
				 ~0ULL : ~0U), ctxt->vcpu) < 0) {
			/* #UD condition is already handled by the code above */
3675
			emulate_gp(ctxt, 0);
3676
			rc = X86EMUL_PROPAGATE_FAULT;
3677 3678 3679
			goto done;
		}

3680
		c->dst.type = OP_NONE;	/* no writeback */
A
Avi Kivity 已提交
3681
		break;
3682 3683 3684 3685
	case 0x30:
		/* wrmsr */
		msr_data = (u32)c->regs[VCPU_REGS_RAX]
			| ((u64)c->regs[VCPU_REGS_RDX] << 32);
3686
		if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
3687
			emulate_gp(ctxt, 0);
3688
			rc = X86EMUL_PROPAGATE_FAULT;
3689
			goto done;
3690 3691 3692 3693 3694
		}
		rc = X86EMUL_CONTINUE;
		break;
	case 0x32:
		/* rdmsr */
3695
		if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
3696
			emulate_gp(ctxt, 0);
3697
			rc = X86EMUL_PROPAGATE_FAULT;
3698
			goto done;
3699 3700 3701 3702 3703 3704
		} else {
			c->regs[VCPU_REGS_RAX] = (u32)msr_data;
			c->regs[VCPU_REGS_RDX] = msr_data >> 32;
		}
		rc = X86EMUL_CONTINUE;
		break;
3705
	case 0x34:		/* sysenter */
3706
		rc = emulate_sysenter(ctxt, ops);
3707 3708
		break;
	case 0x35:		/* sysexit */
3709
		rc = emulate_sysexit(ctxt, ops);
3710
		break;
A
Avi Kivity 已提交
3711
	case 0x40 ... 0x4f:	/* cmov */
3712
		c->dst.val = c->dst.orig_val = c->src.val;
3713 3714
		if (!test_cc(c->b, ctxt->eflags))
			c->dst.type = OP_NONE; /* no writeback */
A
Avi Kivity 已提交
3715
		break;
3716
	case 0x80 ... 0x8f: /* jnz rel, etc*/
3717
		if (test_cc(c->b, ctxt->eflags))
3718
			jmp_rel(c, c->src.val);
3719
		break;
3720 3721 3722
	case 0x90 ... 0x9f:     /* setcc r/m8 */
		c->dst.val = test_cc(c->b, ctxt->eflags);
		break;
3723
	case 0xa0:	  /* push fs */
3724
		emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
3725 3726 3727 3728
		break;
	case 0xa1:	 /* pop fs */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
		break;
3729 3730
	case 0xa3:
	      bt:		/* bt */
Q
Qing He 已提交
3731
		c->dst.type = OP_NONE;
3732 3733
		/* only subword offset */
		c->src.val &= (c->dst.bytes << 3) - 1;
3734
		emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
3735
		break;
3736 3737 3738 3739
	case 0xa4: /* shld imm8, r, r/m */
	case 0xa5: /* shld cl, r, r/m */
		emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
		break;
3740
	case 0xa8:	/* push gs */
3741
		emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
3742 3743 3744 3745
		break;
	case 0xa9:	/* pop gs */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
		break;
3746 3747
	case 0xab:
	      bts:		/* bts */
3748
		emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
3749
		break;
3750 3751 3752 3753
	case 0xac: /* shrd imm8, r, r/m */
	case 0xad: /* shrd cl, r, r/m */
		emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
		break;
3754 3755
	case 0xae:              /* clflush */
		break;
A
Avi Kivity 已提交
3756 3757 3758 3759 3760
	case 0xb0 ... 0xb1:	/* cmpxchg */
		/*
		 * Save real source value, then compare EAX against
		 * destination.
		 */
3761 3762
		c->src.orig_val = c->src.val;
		c->src.val = c->regs[VCPU_REGS_RAX];
3763 3764
		emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
		if (ctxt->eflags & EFLG_ZF) {
A
Avi Kivity 已提交
3765
			/* Success: write back to memory. */
3766
			c->dst.val = c->src.orig_val;
A
Avi Kivity 已提交
3767 3768
		} else {
			/* Failure: write the value we saw to EAX. */
3769
			c->dst.type = OP_REG;
3770
			c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX];
A
Avi Kivity 已提交
3771 3772
		}
		break;
3773 3774 3775
	case 0xb2:		/* lss */
		rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS);
		break;
A
Avi Kivity 已提交
3776 3777
	case 0xb3:
	      btr:		/* btr */
3778
		emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
3779
		break;
3780 3781 3782 3783 3784 3785
	case 0xb4:		/* lfs */
		rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS);
		break;
	case 0xb5:		/* lgs */
		rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS);
		break;
A
Avi Kivity 已提交
3786
	case 0xb6 ... 0xb7:	/* movzx */
3787 3788 3789
		c->dst.bytes = c->op_bytes;
		c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
						       : (u16) c->src.val;
A
Avi Kivity 已提交
3790 3791
		break;
	case 0xba:		/* Grp8 */
3792
		switch (c->modrm_reg & 3) {
A
Avi Kivity 已提交
3793 3794 3795 3796 3797 3798 3799 3800 3801 3802
		case 0:
			goto bt;
		case 1:
			goto bts;
		case 2:
			goto btr;
		case 3:
			goto btc;
		}
		break;
3803 3804
	case 0xbb:
	      btc:		/* btc */
3805
		emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
3806
		break;
3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830
	case 0xbc: {		/* bsf */
		u8 zf;
		__asm__ ("bsf %2, %0; setz %1"
			 : "=r"(c->dst.val), "=q"(zf)
			 : "r"(c->src.val));
		ctxt->eflags &= ~X86_EFLAGS_ZF;
		if (zf) {
			ctxt->eflags |= X86_EFLAGS_ZF;
			c->dst.type = OP_NONE;	/* Disable writeback. */
		}
		break;
	}
	case 0xbd: {		/* bsr */
		u8 zf;
		__asm__ ("bsr %2, %0; setz %1"
			 : "=r"(c->dst.val), "=q"(zf)
			 : "r"(c->src.val));
		ctxt->eflags &= ~X86_EFLAGS_ZF;
		if (zf) {
			ctxt->eflags |= X86_EFLAGS_ZF;
			c->dst.type = OP_NONE;	/* Disable writeback. */
		}
		break;
	}
A
Avi Kivity 已提交
3831
	case 0xbe ... 0xbf:	/* movsx */
3832 3833 3834
		c->dst.bytes = c->op_bytes;
		c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
							(s16) c->src.val;
A
Avi Kivity 已提交
3835
		break;
3836 3837 3838 3839 3840 3841
	case 0xc0 ... 0xc1:	/* xadd */
		emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
		/* Write back the register source. */
		c->src.val = c->dst.orig_val;
		write_register_operand(&c->src);
		break;
3842
	case 0xc3:		/* movnti */
3843 3844 3845
		c->dst.bytes = c->op_bytes;
		c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
							(u64) c->src.val;
3846
		break;
A
Avi Kivity 已提交
3847
	case 0xc7:		/* Grp9 (cmpxchg8b) */
3848
		rc = emulate_grp9(ctxt, ops);
3849
		break;
3850 3851
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
3852
	}
3853 3854 3855 3856

	if (rc != X86EMUL_CONTINUE)
		goto done;

A
Avi Kivity 已提交
3857 3858 3859 3860 3861
	goto writeback;

cannot_emulate:
	return -1;
}