emulate.c 108.4 KB
Newer Older
A
Avi Kivity 已提交
1
/******************************************************************************
2
 * emulate.c
A
Avi Kivity 已提交
3 4 5 6 7 8
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9
 * privileged instructions:
A
Avi Kivity 已提交
10 11
 *
 * Copyright (C) 2006 Qumranet
N
Nicolas Kaiser 已提交
12
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
13 14 15 16 17 18 19 20 21 22
 *
 *   Avi Kivity <avi@qumranet.com>
 *   Yaniv Kamay <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

23
#include <linux/kvm_host.h>
24
#include "kvm_cache_regs.h"
A
Avi Kivity 已提交
25
#include <linux/module.h>
26
#include <asm/kvm_emulate.h>
A
Avi Kivity 已提交
27

28
#include "x86.h"
29
#include "tss.h"
30

A
Avi Kivity 已提交
31 32 33 34 35 36 37 38 39 40
/*
 * Opcode effective-address decode tables.
 * Note that we only emulate instructions that have at least one memory
 * operand (excluding implicit stack references). We assume that stack
 * references and instruction fetches will never occur in special memory
 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
 * not be handled.
 */

/* Operand sizes: 8-bit operands or specified/overridden size. */
41
#define ByteOp      (1<<0)	/* 8-bit operands. */
A
Avi Kivity 已提交
42
/* Destination operand type. */
43 44 45 46 47 48
#define ImplicitOps (1<<1)	/* Implicit in opcode. No generic decode. */
#define DstReg      (2<<1)	/* Register operand. */
#define DstMem      (3<<1)	/* Memory operand. */
#define DstAcc      (4<<1)	/* Destination Accumulator */
#define DstDI       (5<<1)	/* Destination is in ES:(E)DI */
#define DstMem64    (6<<1)	/* 64bit memory operand */
49
#define DstImmUByte (7<<1)	/* 8-bit unsigned immediate operand */
50 51
#define DstDX       (8<<1)	/* Destination is in DX register */
#define DstMask     (0xf<<1)
A
Avi Kivity 已提交
52
/* Source operand type. */
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
#define SrcNone     (0<<5)	/* No source operand. */
#define SrcReg      (1<<5)	/* Register operand. */
#define SrcMem      (2<<5)	/* Memory operand. */
#define SrcMem16    (3<<5)	/* Memory operand (16-bit). */
#define SrcMem32    (4<<5)	/* Memory operand (32-bit). */
#define SrcImm      (5<<5)	/* Immediate operand. */
#define SrcImmByte  (6<<5)	/* 8-bit sign-extended immediate operand. */
#define SrcOne      (7<<5)	/* Implied '1' */
#define SrcImmUByte (8<<5)      /* 8-bit unsigned immediate operand. */
#define SrcImmU     (9<<5)      /* Immediate operand, unsigned */
#define SrcSI       (0xa<<5)	/* Source is in the DS:RSI */
#define SrcImmFAddr (0xb<<5)	/* Source is immediate far address */
#define SrcMemFAddr (0xc<<5)	/* Source is far address in memory */
#define SrcAcc      (0xd<<5)	/* Source Accumulator */
#define SrcImmU16   (0xe<<5)    /* Immediate operand, unsigned, 16 bits */
#define SrcDX       (0xf<<5)	/* Source is in DX register */
#define SrcMask     (0xf<<5)
A
Avi Kivity 已提交
70
/* Generic ModRM decode. */
71
#define ModRM       (1<<9)
A
Avi Kivity 已提交
72
/* Destination is only written; never read. */
73 74 75 76 77 78 79 80 81 82 83
#define Mov         (1<<10)
#define BitOp       (1<<11)
#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
#define String      (1<<13)     /* String instruction (rep capable) */
#define Stack       (1<<14)     /* Stack instruction (push/pop) */
#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
#define Sse         (1<<18)     /* SSE Vector instruction */
84
/* Misc flags */
85
#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
86
#define VendorSpecific (1<<22) /* Vendor specific instruction */
87
#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
88
#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
89
#define Undefined   (1<<25) /* No Such Instruction */
90
#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
91
#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
92
#define No64	    (1<<28)
93 94 95 96 97
/* Source 2 operand type */
#define Src2None    (0<<29)
#define Src2CL      (1<<29)
#define Src2ImmByte (2<<29)
#define Src2One     (3<<29)
98
#define Src2Imm     (4<<29)
99
#define Src2Mask    (7<<29)
A
Avi Kivity 已提交
100

101 102 103 104 105 106 107 108
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
109

110 111
struct opcode {
	u32 flags;
112
	u8 intercept;
113
	union {
114
		int (*execute)(struct x86_emulate_ctxt *ctxt);
115 116
		struct opcode *group;
		struct group_dual *gdual;
117
		struct gprefix *gprefix;
118
	} u;
119
	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
120 121 122 123 124
};

struct group_dual {
	struct opcode mod012[8];
	struct opcode mod3[8];
125 126
};

127 128 129 130 131 132 133
struct gprefix {
	struct opcode pfx_no;
	struct opcode pfx_66;
	struct opcode pfx_f2;
	struct opcode pfx_f3;
};

A
Avi Kivity 已提交
134
/* EFLAGS bit definitions. */
135 136 137 138
#define EFLG_ID (1<<21)
#define EFLG_VIP (1<<20)
#define EFLG_VIF (1<<19)
#define EFLG_AC (1<<18)
139 140
#define EFLG_VM (1<<17)
#define EFLG_RF (1<<16)
141 142
#define EFLG_IOPL (3<<12)
#define EFLG_NT (1<<14)
A
Avi Kivity 已提交
143 144
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
145
#define EFLG_IF (1<<9)
146
#define EFLG_TF (1<<8)
A
Avi Kivity 已提交
147 148 149 150 151 152
#define EFLG_SF (1<<7)
#define EFLG_ZF (1<<6)
#define EFLG_AF (1<<4)
#define EFLG_PF (1<<2)
#define EFLG_CF (1<<0)

153 154 155
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
#define EFLG_RESERVED_ONE_MASK 2

A
Avi Kivity 已提交
156 157 158 159 160 161 162
/*
 * Instruction emulation:
 * Most instructions are emulated directly via a fragment of inline assembly
 * code. This allows us to save/restore EFLAGS and thus very easily pick up
 * any modified flags.
 */

163
#if defined(CONFIG_X86_64)
A
Avi Kivity 已提交
164 165 166 167 168 169 170 171 172 173 174 175 176 177
#define _LO32 "k"		/* force 32-bit operand */
#define _STK  "%%rsp"		/* stack pointer */
#elif defined(__i386__)
#define _LO32 ""		/* force 32-bit operand */
#define _STK  "%%esp"		/* stack pointer */
#endif

/*
 * These EFLAGS bits are restored from saved value during emulation, and
 * any changes are written back to the saved value after emulation.
 */
#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)

/* Before executing instruction: restore necessary bits in EFLAGS. */
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
#define _PRE_EFLAGS(_sav, _msk, _tmp)					\
	/* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
	"movl %"_sav",%"_LO32 _tmp"; "                                  \
	"push %"_tmp"; "                                                \
	"push %"_tmp"; "                                                \
	"movl %"_msk",%"_LO32 _tmp"; "                                  \
	"andl %"_LO32 _tmp",("_STK"); "                                 \
	"pushf; "                                                       \
	"notl %"_LO32 _tmp"; "                                          \
	"andl %"_LO32 _tmp",("_STK"); "                                 \
	"andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); "	\
	"pop  %"_tmp"; "                                                \
	"orl  %"_LO32 _tmp",("_STK"); "                                 \
	"popf; "                                                        \
	"pop  %"_sav"; "
A
Avi Kivity 已提交
193 194 195 196 197 198 199 200 201

/* After executing instruction: write-back necessary bits in EFLAGS. */
#define _POST_EFLAGS(_sav, _msk, _tmp) \
	/* _sav |= EFLAGS & _msk; */		\
	"pushf; "				\
	"pop  %"_tmp"; "			\
	"andl %"_msk",%"_LO32 _tmp"; "		\
	"orl  %"_LO32 _tmp",%"_sav"; "

202 203 204 205 206 207
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif

208
#define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype)	\
209 210 211 212 213
	do {								\
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "4", "2")			\
			_op _suffix " %"_x"3,%1; "			\
			_POST_EFLAGS("0", "4", "2")			\
214 215
			: "=m" ((ctxt)->eflags),			\
			  "+q" (*(_dsttype*)&(ctxt)->dst.val),		\
216
			  "=&r" (_tmp)					\
217
			: _y ((ctxt)->src.val), "i" (EFLAGS_MASK));	\
218
	} while (0)
219 220


A
Avi Kivity 已提交
221
/* Raw emulation: instruction has two explicit operands. */
222
#define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy)		\
223 224 225
	do {								\
		unsigned long _tmp;					\
									\
226
		switch ((ctxt)->dst.bytes) {				\
227
		case 2:							\
228
			____emulate_2op(ctxt,_op,_wx,_wy,"w",u16);	\
229 230
			break;						\
		case 4:							\
231
			____emulate_2op(ctxt,_op,_lx,_ly,"l",u32);	\
232 233
			break;						\
		case 8:							\
234
			ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \
235 236
			break;						\
		}							\
A
Avi Kivity 已提交
237 238
	} while (0)

239
#define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy)		     \
A
Avi Kivity 已提交
240
	do {								     \
241
		unsigned long _tmp;					     \
242
		switch ((ctxt)->dst.bytes) {				     \
A
Avi Kivity 已提交
243
		case 1:							     \
244
			____emulate_2op(ctxt,_op,_bx,_by,"b",u8);	     \
A
Avi Kivity 已提交
245 246
			break;						     \
		default:						     \
247
			__emulate_2op_nobyte(ctxt, _op,			     \
A
Avi Kivity 已提交
248 249 250 251 252 253
					     _wx, _wy, _lx, _ly, _qx, _qy);  \
			break;						     \
		}							     \
	} while (0)

/* Source operand is byte-sized and may be restricted to just %cl. */
254 255
#define emulate_2op_SrcB(ctxt, _op)					\
	__emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c")
A
Avi Kivity 已提交
256 257

/* Source operand is byte, word, long or quad sized. */
258 259
#define emulate_2op_SrcV(ctxt, _op)					\
	__emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r")
A
Avi Kivity 已提交
260 261

/* Source operand is word, long or quad sized. */
262 263
#define emulate_2op_SrcV_nobyte(ctxt, _op)				\
	__emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r")
A
Avi Kivity 已提交
264

265
/* Instruction has three operands and one operand is stored in ECX register */
266
#define __emulate_2op_cl(ctxt, _op, _suffix, _type)		\
267 268
	do {								\
		unsigned long _tmp;					\
269 270 271
		_type _clv  = (ctxt)->src2.val;				\
		_type _srcv = (ctxt)->src.val;				\
		_type _dstv = (ctxt)->dst.val;				\
272 273 274 275 276
									\
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "5", "2")			\
			_op _suffix " %4,%1 \n"				\
			_POST_EFLAGS("0", "5", "2")			\
277
			: "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \
278 279 280
			: "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK)	\
			);						\
									\
281 282 283
		(ctxt)->src2.val  = (unsigned long) _clv;		\
		(ctxt)->src2.val = (unsigned long) _srcv;		\
		(ctxt)->dst.val = (unsigned long) _dstv;		\
284 285
	} while (0)

286
#define emulate_2op_cl(ctxt, _op)					\
287
	do {								\
288
		switch ((ctxt)->dst.bytes) {				\
289
		case 2:							\
290
			__emulate_2op_cl(ctxt, _op, "w", u16);		\
291 292
			break;						\
		case 4:							\
293
			__emulate_2op_cl(ctxt, _op, "l", u32);		\
294 295
			break;						\
		case 8:							\
296
			ON64(__emulate_2op_cl(ctxt, _op, "q", ulong));	\
297 298
			break;						\
		}							\
299 300
	} while (0)

301
#define __emulate_1op(ctxt, _op, _suffix)				\
A
Avi Kivity 已提交
302 303 304
	do {								\
		unsigned long _tmp;					\
									\
305 306 307 308
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "3", "2")			\
			_op _suffix " %1; "				\
			_POST_EFLAGS("0", "3", "2")			\
309
			: "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \
310 311 312 313 314
			  "=&r" (_tmp)					\
			: "i" (EFLAGS_MASK));				\
	} while (0)

/* Instruction has only one explicit operand (no source operand). */
315
#define emulate_1op(ctxt, _op)						\
316
	do {								\
317 318 319 320 321
		switch ((ctxt)->dst.bytes) {				\
		case 1:	__emulate_1op(ctxt, _op, "b"); break;		\
		case 2:	__emulate_1op(ctxt, _op, "w"); break;		\
		case 4:	__emulate_1op(ctxt, _op, "l"); break;		\
		case 8:	ON64(__emulate_1op(ctxt, _op, "q")); break;	\
A
Avi Kivity 已提交
322 323 324
		}							\
	} while (0)

325 326 327 328 329 330 331 332 333 334 335 336 337 338
#define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix)		\
	do {								\
		unsigned long _tmp;					\
									\
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "4", "1")			\
			_op _suffix " %5; "				\
			_POST_EFLAGS("0", "4", "1")			\
			: "=m" (_eflags), "=&r" (_tmp),			\
			  "+a" (_rax), "+d" (_rdx)			\
			: "i" (EFLAGS_MASK), "m" ((_src).val),		\
			  "a" (_rax), "d" (_rdx));			\
	} while (0)

339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
#define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
	do {								\
		unsigned long _tmp;					\
									\
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "5", "1")			\
			"1: \n\t"					\
			_op _suffix " %6; "				\
			"2: \n\t"					\
			_POST_EFLAGS("0", "5", "1")			\
			".pushsection .fixup,\"ax\" \n\t"		\
			"3: movb $1, %4 \n\t"				\
			"jmp 2b \n\t"					\
			".popsection \n\t"				\
			_ASM_EXTABLE(1b, 3b)				\
			: "=m" (_eflags), "=&r" (_tmp),			\
			  "+a" (_rax), "+d" (_rdx), "+qm"(_ex)		\
			: "i" (EFLAGS_MASK), "m" ((_src).val),		\
			  "a" (_rax), "d" (_rdx));			\
	} while (0)

360
/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
#define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags)		\
	do {								\
		switch((_src).bytes) {					\
		case 1:							\
			__emulate_1op_rax_rdx(_op, _src, _rax, _rdx,	\
					      _eflags, "b");		\
			break;						\
		case 2:							\
			__emulate_1op_rax_rdx(_op, _src, _rax, _rdx,	\
					      _eflags, "w");		\
			break;						\
		case 4:							\
			__emulate_1op_rax_rdx(_op, _src, _rax, _rdx,	\
					      _eflags, "l");		\
			break;						\
		case 8:							\
			ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
						   _eflags, "q"));	\
			break;						\
380 381 382
		}							\
	} while (0)

383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
#define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex)	\
	do {								\
		switch((_src).bytes) {					\
		case 1:							\
			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx,	\
						 _eflags, "b", _ex);	\
			break;						\
		case 2:							\
			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
						 _eflags, "w", _ex);	\
			break;						\
		case 4:							\
			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
						 _eflags, "l", _ex);	\
			break;						\
		case 8: ON64(						\
			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
						 _eflags, "q", _ex));	\
			break;						\
		}							\
	} while (0)

405 406 407 408 409 410
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
				    enum x86_intercept intercept,
				    enum x86_intercept_stage stage)
{
	struct x86_instruction_info info = {
		.intercept  = intercept,
411 412 413 414 415 416 417 418
		.rep_prefix = ctxt->rep_prefix,
		.modrm_mod  = ctxt->modrm_mod,
		.modrm_reg  = ctxt->modrm_reg,
		.modrm_rm   = ctxt->modrm_rm,
		.src_val    = ctxt->src.val64,
		.src_bytes  = ctxt->src.bytes,
		.dst_bytes  = ctxt->dst.bytes,
		.ad_bytes   = ctxt->ad_bytes,
419 420 421
		.next_rip   = ctxt->eip,
	};

422
	return ctxt->ops->intercept(ctxt, &info, stage);
423 424
}

425
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
426
{
427
	return (1UL << (ctxt->ad_bytes << 3)) - 1;
428 429
}

A
Avi Kivity 已提交
430
/* Access/update address held in a register, based on addressing mode. */
431
static inline unsigned long
432
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
433
{
434
	if (ctxt->ad_bytes == sizeof(unsigned long))
435 436
		return reg;
	else
437
		return reg & ad_mask(ctxt);
438 439 440
}

static inline unsigned long
441
register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
442
{
443
	return address_mask(ctxt, reg);
444 445
}

446
static inline void
447
register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
448
{
449
	if (ctxt->ad_bytes == sizeof(unsigned long))
450 451
		*reg += inc;
	else
452
		*reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
453
}
A
Avi Kivity 已提交
454

455
static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
456
{
457
	register_address_increment(ctxt, &ctxt->_eip, rel);
458
}
459

460 461 462 463 464 465 466
static u32 desc_limit_scaled(struct desc_struct *desc)
{
	u32 limit = get_desc_limit(desc);

	return desc->g ? (limit << 12) | 0xfff : limit;
}

467
static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
468
{
469 470
	ctxt->has_seg_override = true;
	ctxt->seg_override = seg;
471 472
}

473
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
474 475 476 477
{
	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
		return 0;

478
	return ctxt->ops->get_cached_segment_base(ctxt, seg);
479 480
}

481
static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
482
{
483
	if (!ctxt->has_seg_override)
484 485
		return 0;

486
	return ctxt->seg_override;
487 488
}

489 490
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
			     u32 error, bool valid)
491
{
492 493 494
	ctxt->exception.vector = vec;
	ctxt->exception.error_code = error;
	ctxt->exception.error_code_valid = valid;
495
	return X86EMUL_PROPAGATE_FAULT;
496 497
}

498 499 500 501 502
static int emulate_db(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, DB_VECTOR, 0, false);
}

503
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
504
{
505
	return emulate_exception(ctxt, GP_VECTOR, err, true);
506 507
}

508 509 510 511 512
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{
	return emulate_exception(ctxt, SS_VECTOR, err, true);
}

513
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
514
{
515
	return emulate_exception(ctxt, UD_VECTOR, 0, false);
516 517
}

518
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
519
{
520
	return emulate_exception(ctxt, TS_VECTOR, err, true);
521 522
}

523 524
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
525
	return emulate_exception(ctxt, DE_VECTOR, 0, false);
526 527
}

A
Avi Kivity 已提交
528 529 530 531 532
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, NM_VECTOR, 0, false);
}

533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
	u16 selector;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
	return selector;
}

static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
				 unsigned seg)
{
	u16 dummy;
	u32 base3;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}

553
static int __linearize(struct x86_emulate_ctxt *ctxt,
554
		     struct segmented_address addr,
555
		     unsigned size, bool write, bool fetch,
556 557
		     ulong *linear)
{
558 559
	struct desc_struct desc;
	bool usable;
560
	ulong la;
561
	u32 lim;
562
	u16 sel;
563
	unsigned cpl, rpl;
564

565
	la = seg_base(ctxt, addr.seg) + addr.ea;
566 567 568 569 570 571 572 573
	switch (ctxt->mode) {
	case X86EMUL_MODE_REAL:
		break;
	case X86EMUL_MODE_PROT64:
		if (((signed long)la << 16) >> 16 != la)
			return emulate_gp(ctxt, 0);
		break;
	default:
574 575
		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
						addr.seg);
576 577 578 579 580 581
		if (!usable)
			goto bad;
		/* code segment or read-only data segment */
		if (((desc.type & 8) || !(desc.type & 2)) && write)
			goto bad;
		/* unreadable code segment */
582
		if (!fetch && (desc.type & 8) && !(desc.type & 2))
583 584 585 586 587 588 589 590 591 592 593 594 595 596
			goto bad;
		lim = desc_limit_scaled(&desc);
		if ((desc.type & 8) || !(desc.type & 4)) {
			/* expand-up segment */
			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
				goto bad;
		} else {
			/* exapand-down segment */
			if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
				goto bad;
			lim = desc.d ? 0xffffffff : 0xffff;
			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
				goto bad;
		}
597
		cpl = ctxt->ops->cpl(ctxt);
598
		rpl = sel & 3;
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
		cpl = max(cpl, rpl);
		if (!(desc.type & 8)) {
			/* data segment */
			if (cpl > desc.dpl)
				goto bad;
		} else if ((desc.type & 8) && !(desc.type & 4)) {
			/* nonconforming code segment */
			if (cpl != desc.dpl)
				goto bad;
		} else if ((desc.type & 8) && (desc.type & 4)) {
			/* conforming code segment */
			if (cpl < desc.dpl)
				goto bad;
		}
		break;
	}
615
	if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
616 617 618
		la &= (u32)-1;
	*linear = la;
	return X86EMUL_CONTINUE;
619 620 621 622 623
bad:
	if (addr.seg == VCPU_SREG_SS)
		return emulate_ss(ctxt, addr.seg);
	else
		return emulate_gp(ctxt, addr.seg);
624 625
}

626 627 628 629 630 631 632 633 634
static int linearize(struct x86_emulate_ctxt *ctxt,
		     struct segmented_address addr,
		     unsigned size, bool write,
		     ulong *linear)
{
	return __linearize(ctxt, addr, size, write, false, linear);
}


635 636 637 638 639
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
			      struct segmented_address addr,
			      void *data,
			      unsigned size)
{
640 641 642
	int rc;
	ulong linear;

643
	rc = linearize(ctxt, addr, size, false, &linear);
644 645
	if (rc != X86EMUL_CONTINUE)
		return rc;
646
	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
647 648
}

649 650 651 652 653 654 655 656
/*
 * Fetch the next byte of the instruction being emulated which is pointed to
 * by ctxt->_eip, then increment ctxt->_eip.
 *
 * Also prefetch the remaining bytes of the instruction without crossing page
 * boundary if they are not in fetch_cache yet.
 */
static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
657
{
658
	struct fetch_cache *fc = &ctxt->fetch;
659
	int rc;
660
	int size, cur_size;
661

662
	if (ctxt->_eip == fc->end) {
663
		unsigned long linear;
664 665
		struct segmented_address addr = { .seg = VCPU_SREG_CS,
						  .ea  = ctxt->_eip };
666
		cur_size = fc->end - fc->start;
667 668
		size = min(15UL - cur_size,
			   PAGE_SIZE - offset_in_page(ctxt->_eip));
669
		rc = __linearize(ctxt, addr, size, false, true, &linear);
670
		if (unlikely(rc != X86EMUL_CONTINUE))
671
			return rc;
672 673
		rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
				      size, &ctxt->exception);
674
		if (unlikely(rc != X86EMUL_CONTINUE))
675
			return rc;
676
		fc->end += size;
677
	}
678 679
	*dest = fc->data[ctxt->_eip - fc->start];
	ctxt->_eip++;
680
	return X86EMUL_CONTINUE;
681 682 683
}

static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
684
			 void *dest, unsigned size)
685
{
686
	int rc;
687

688
	/* x86 instructions are limited to 15 bytes. */
689
	if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
690
		return X86EMUL_UNHANDLEABLE;
691
	while (size--) {
692
		rc = do_insn_fetch_byte(ctxt, dest++);
693
		if (rc != X86EMUL_CONTINUE)
694 695
			return rc;
	}
696
	return X86EMUL_CONTINUE;
697 698
}

699
/* Fetch next part of the instruction being emulated. */
700
#define insn_fetch(_type, _ctxt)					\
701
({	unsigned long _x;						\
702
	rc = do_insn_fetch(_ctxt, &_x, sizeof(_type));			\
703 704 705 706 707
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
	(_type)_x;							\
})

708 709
#define insn_fetch_arr(_arr, _size, _ctxt)				\
({	rc = do_insn_fetch(_ctxt, _arr, (_size));			\
710 711 712 713
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
})

714 715 716 717 718 719 720
/*
 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 * pointer into the block that addresses the relevant register.
 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 */
static void *decode_register(u8 modrm_reg, unsigned long *regs,
			     int highbyte_regs)
A
Avi Kivity 已提交
721 722 723 724 725 726 727 728 729 730
{
	void *p;

	p = &regs[modrm_reg];
	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
		p = (unsigned char *)&regs[modrm_reg & 3] + 1;
	return p;
}

static int read_descriptor(struct x86_emulate_ctxt *ctxt,
731
			   struct segmented_address addr,
A
Avi Kivity 已提交
732 733 734 735 736 737 738
			   u16 *size, unsigned long *address, int op_bytes)
{
	int rc;

	if (op_bytes == 2)
		op_bytes = 3;
	*address = 0;
739
	rc = segmented_read_std(ctxt, addr, size, 2);
740
	if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
741
		return rc;
742
	addr.ea += 2;
743
	rc = segmented_read_std(ctxt, addr, address, op_bytes);
A
Avi Kivity 已提交
744 745 746
	return rc;
}

747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
static int test_cc(unsigned int condition, unsigned int flags)
{
	int rc = 0;

	switch ((condition & 15) >> 1) {
	case 0: /* o */
		rc |= (flags & EFLG_OF);
		break;
	case 1: /* b/c/nae */
		rc |= (flags & EFLG_CF);
		break;
	case 2: /* z/e */
		rc |= (flags & EFLG_ZF);
		break;
	case 3: /* be/na */
		rc |= (flags & (EFLG_CF|EFLG_ZF));
		break;
	case 4: /* s */
		rc |= (flags & EFLG_SF);
		break;
	case 5: /* p/pe */
		rc |= (flags & EFLG_PF);
		break;
	case 7: /* le/ng */
		rc |= (flags & EFLG_ZF);
		/* fall through */
	case 6: /* l/nge */
		rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
		break;
	}

	/* Odd condition identifiers (lsb == 1) have inverted sense. */
	return (!!rc ^ (condition & 1));
}

782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
static void fetch_register_operand(struct operand *op)
{
	switch (op->bytes) {
	case 1:
		op->val = *(u8 *)op->addr.reg;
		break;
	case 2:
		op->val = *(u16 *)op->addr.reg;
		break;
	case 4:
		op->val = *(u32 *)op->addr.reg;
		break;
	case 8:
		op->val = *(u64 *)op->addr.reg;
		break;
	}
}

A
Avi Kivity 已提交
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
	case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
	case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
	case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
	case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
	case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
	case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
	case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
#ifdef CONFIG_X86_64
	case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
	case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
	case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
	case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
	case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
	case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
	case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
	case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
			  int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
	case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
	case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
	case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
	case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
	case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
	case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
	case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
#ifdef CONFIG_X86_64
	case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
	case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
	case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
	case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
	case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
	case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
	case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
	case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
				    struct operand *op,
857 858
				    int inhibit_bytereg)
{
859 860
	unsigned reg = ctxt->modrm_reg;
	int highbyte_regs = ctxt->rex_prefix == 0;
861

862 863
	if (!(ctxt->d & ModRM))
		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
A
Avi Kivity 已提交
864

865
	if (ctxt->d & Sse) {
A
Avi Kivity 已提交
866 867 868 869 870 871 872
		op->type = OP_XMM;
		op->bytes = 16;
		op->addr.xmm = reg;
		read_sse_reg(ctxt, &op->vec_val, reg);
		return;
	}

873
	op->type = OP_REG;
874 875
	if ((ctxt->d & ByteOp) && !inhibit_bytereg) {
		op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
876 877
		op->bytes = 1;
	} else {
878 879
		op->addr.reg = decode_register(reg, ctxt->regs, 0);
		op->bytes = ctxt->op_bytes;
880
	}
881
	fetch_register_operand(op);
882 883 884
	op->orig_val = op->val;
}

885
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
886
			struct operand *op)
887 888
{
	u8 sib;
889
	int index_reg = 0, base_reg = 0, scale;
890
	int rc = X86EMUL_CONTINUE;
891
	ulong modrm_ea = 0;
892

893 894 895 896
	if (ctxt->rex_prefix) {
		ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1;	/* REX.R */
		index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
		ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
897 898
	}

899
	ctxt->modrm = insn_fetch(u8, ctxt);
900 901 902 903
	ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
	ctxt->modrm_rm |= (ctxt->modrm & 0x07);
	ctxt->modrm_seg = VCPU_SREG_DS;
904

905
	if (ctxt->modrm_mod == 3) {
906
		op->type = OP_REG;
907 908 909 910
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.reg = decode_register(ctxt->modrm_rm,
					       ctxt->regs, ctxt->d & ByteOp);
		if (ctxt->d & Sse) {
A
Avi Kivity 已提交
911 912
			op->type = OP_XMM;
			op->bytes = 16;
913 914
			op->addr.xmm = ctxt->modrm_rm;
			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
A
Avi Kivity 已提交
915 916
			return rc;
		}
917
		fetch_register_operand(op);
918 919 920
		return rc;
	}

921 922
	op->type = OP_MEM;

923 924 925 926 927
	if (ctxt->ad_bytes == 2) {
		unsigned bx = ctxt->regs[VCPU_REGS_RBX];
		unsigned bp = ctxt->regs[VCPU_REGS_RBP];
		unsigned si = ctxt->regs[VCPU_REGS_RSI];
		unsigned di = ctxt->regs[VCPU_REGS_RDI];
928 929

		/* 16-bit ModR/M decode. */
930
		switch (ctxt->modrm_mod) {
931
		case 0:
932
			if (ctxt->modrm_rm == 6)
933
				modrm_ea += insn_fetch(u16, ctxt);
934 935
			break;
		case 1:
936
			modrm_ea += insn_fetch(s8, ctxt);
937 938
			break;
		case 2:
939
			modrm_ea += insn_fetch(u16, ctxt);
940 941
			break;
		}
942
		switch (ctxt->modrm_rm) {
943
		case 0:
944
			modrm_ea += bx + si;
945 946
			break;
		case 1:
947
			modrm_ea += bx + di;
948 949
			break;
		case 2:
950
			modrm_ea += bp + si;
951 952
			break;
		case 3:
953
			modrm_ea += bp + di;
954 955
			break;
		case 4:
956
			modrm_ea += si;
957 958
			break;
		case 5:
959
			modrm_ea += di;
960 961
			break;
		case 6:
962
			if (ctxt->modrm_mod != 0)
963
				modrm_ea += bp;
964 965
			break;
		case 7:
966
			modrm_ea += bx;
967 968
			break;
		}
969 970 971
		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
			ctxt->modrm_seg = VCPU_SREG_SS;
972
		modrm_ea = (u16)modrm_ea;
973 974
	} else {
		/* 32/64-bit ModR/M decode. */
975
		if ((ctxt->modrm_rm & 7) == 4) {
976
			sib = insn_fetch(u8, ctxt);
977 978 979 980
			index_reg |= (sib >> 3) & 7;
			base_reg |= sib & 7;
			scale = sib >> 6;

981
			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
982
				modrm_ea += insn_fetch(s32, ctxt);
983
			else
984
				modrm_ea += ctxt->regs[base_reg];
985
			if (index_reg != 4)
986 987
				modrm_ea += ctxt->regs[index_reg] << scale;
		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
988
			if (ctxt->mode == X86EMUL_MODE_PROT64)
989
				ctxt->rip_relative = 1;
990
		} else
991 992
			modrm_ea += ctxt->regs[ctxt->modrm_rm];
		switch (ctxt->modrm_mod) {
993
		case 0:
994
			if (ctxt->modrm_rm == 5)
995
				modrm_ea += insn_fetch(s32, ctxt);
996 997
			break;
		case 1:
998
			modrm_ea += insn_fetch(s8, ctxt);
999 1000
			break;
		case 2:
1001
			modrm_ea += insn_fetch(s32, ctxt);
1002 1003 1004
			break;
		}
	}
1005
	op->addr.mem.ea = modrm_ea;
1006 1007 1008 1009 1010
done:
	return rc;
}

static int decode_abs(struct x86_emulate_ctxt *ctxt,
1011
		      struct operand *op)
1012
{
1013
	int rc = X86EMUL_CONTINUE;
1014

1015
	op->type = OP_MEM;
1016
	switch (ctxt->ad_bytes) {
1017
	case 2:
1018
		op->addr.mem.ea = insn_fetch(u16, ctxt);
1019 1020
		break;
	case 4:
1021
		op->addr.mem.ea = insn_fetch(u32, ctxt);
1022 1023
		break;
	case 8:
1024
		op->addr.mem.ea = insn_fetch(u64, ctxt);
1025 1026 1027 1028 1029 1030
		break;
	}
done:
	return rc;
}

1031
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1032
{
1033
	long sv = 0, mask;
1034

1035 1036
	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
		mask = ~(ctxt->dst.bytes * 8 - 1);
1037

1038 1039 1040 1041
		if (ctxt->src.bytes == 2)
			sv = (s16)ctxt->src.val & (s16)mask;
		else if (ctxt->src.bytes == 4)
			sv = (s32)ctxt->src.val & (s32)mask;
1042

1043
		ctxt->dst.addr.mem.ea += (sv >> 3);
1044
	}
1045 1046

	/* only subword offset */
1047
	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1048 1049
}

1050 1051
static int read_emulated(struct x86_emulate_ctxt *ctxt,
			 unsigned long addr, void *dest, unsigned size)
A
Avi Kivity 已提交
1052
{
1053
	int rc;
1054
	struct read_cache *mc = &ctxt->mem_read;
A
Avi Kivity 已提交
1055

1056 1057 1058 1059 1060
	while (size) {
		int n = min(size, 8u);
		size -= n;
		if (mc->pos < mc->end)
			goto read_cached;
1061

1062 1063
		rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
					      &ctxt->exception);
1064 1065 1066
		if (rc != X86EMUL_CONTINUE)
			return rc;
		mc->end += n;
A
Avi Kivity 已提交
1067

1068 1069 1070 1071 1072
	read_cached:
		memcpy(dest, mc->data + mc->pos, n);
		mc->pos += n;
		dest += n;
		addr += n;
A
Avi Kivity 已提交
1073
	}
1074 1075
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1076

1077 1078 1079 1080 1081
static int segmented_read(struct x86_emulate_ctxt *ctxt,
			  struct segmented_address addr,
			  void *data,
			  unsigned size)
{
1082 1083 1084
	int rc;
	ulong linear;

1085
	rc = linearize(ctxt, addr, size, false, &linear);
1086 1087
	if (rc != X86EMUL_CONTINUE)
		return rc;
1088
	return read_emulated(ctxt, linear, data, size);
1089 1090 1091 1092 1093 1094 1095
}

static int segmented_write(struct x86_emulate_ctxt *ctxt,
			   struct segmented_address addr,
			   const void *data,
			   unsigned size)
{
1096 1097 1098
	int rc;
	ulong linear;

1099
	rc = linearize(ctxt, addr, size, true, &linear);
1100 1101
	if (rc != X86EMUL_CONTINUE)
		return rc;
1102 1103
	return ctxt->ops->write_emulated(ctxt, linear, data, size,
					 &ctxt->exception);
1104 1105 1106 1107 1108 1109 1110
}

static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
			     struct segmented_address addr,
			     const void *orig_data, const void *data,
			     unsigned size)
{
1111 1112 1113
	int rc;
	ulong linear;

1114
	rc = linearize(ctxt, addr, size, true, &linear);
1115 1116
	if (rc != X86EMUL_CONTINUE)
		return rc;
1117 1118
	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
					   size, &ctxt->exception);
1119 1120
}

1121 1122 1123 1124
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
			   unsigned int size, unsigned short port,
			   void *dest)
{
1125
	struct read_cache *rc = &ctxt->io_read;
1126

1127 1128
	if (rc->pos == rc->end) { /* refill pio read ahead */
		unsigned int in_page, n;
1129 1130
		unsigned int count = ctxt->rep_prefix ?
			address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
1131
		in_page = (ctxt->eflags & EFLG_DF) ?
1132 1133
			offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
			PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
1134 1135 1136 1137 1138
		n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
			count);
		if (n == 0)
			n = 1;
		rc->pos = rc->end = 0;
1139
		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1140 1141
			return 0;
		rc->end = n * size;
A
Avi Kivity 已提交
1142 1143
	}

1144 1145 1146 1147
	memcpy(dest, rc->data + rc->pos, size);
	rc->pos += size;
	return 1;
}
A
Avi Kivity 已提交
1148

1149 1150 1151
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
				     u16 selector, struct desc_ptr *dt)
{
1152 1153
	struct x86_emulate_ops *ops = ctxt->ops;

1154 1155
	if (selector & 1 << 2) {
		struct desc_struct desc;
1156 1157
		u16 sel;

1158
		memset (dt, 0, sizeof *dt);
1159
		if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1160
			return;
1161

1162 1163 1164
		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
		dt->address = get_desc_base(&desc);
	} else
1165
		ops->get_gdt(ctxt, dt);
1166
}
1167

1168 1169 1170 1171 1172 1173 1174
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, struct desc_struct *desc)
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
1175

1176
	get_descriptor_table_ptr(ctxt, selector, &dt);
1177

1178 1179
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
1180

1181 1182 1183
	addr = dt.address + index * 8;
	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
				   &ctxt->exception);
1184
}
1185

1186 1187 1188 1189 1190 1191 1192
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				    u16 selector, struct desc_struct *desc)
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
A
Avi Kivity 已提交
1193

1194
	get_descriptor_table_ptr(ctxt, selector, &dt);
1195

1196 1197
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
A
Avi Kivity 已提交
1198

1199
	addr = dt.address + index * 8;
1200 1201
	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
				    &ctxt->exception);
1202
}
1203

1204
/* Does not support long mode */
1205 1206 1207 1208 1209 1210 1211 1212 1213
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, int seg)
{
	struct desc_struct seg_desc;
	u8 dpl, rpl, cpl;
	unsigned err_vec = GP_VECTOR;
	u32 err_code = 0;
	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
	int ret;
1214

1215
	memset(&seg_desc, 0, sizeof seg_desc);
1216

1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
	if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
	    || ctxt->mode == X86EMUL_MODE_REAL) {
		/* set real mode segment descriptor */
		set_desc_base(&seg_desc, selector << 4);
		set_desc_limit(&seg_desc, 0xffff);
		seg_desc.type = 3;
		seg_desc.p = 1;
		seg_desc.s = 1;
		goto load;
	}

	/* NULL selector is not valid for TR, CS and SS */
	if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
	    && null_selector)
		goto exception;

	/* TR should be in GDT only */
	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
		goto exception;

	if (null_selector) /* for NULL selector skip all following checks */
		goto load;

1240
	ret = read_segment_descriptor(ctxt, selector, &seg_desc);
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
	if (ret != X86EMUL_CONTINUE)
		return ret;

	err_code = selector & 0xfffc;
	err_vec = GP_VECTOR;

	/* can't load system descriptor into segment selecor */
	if (seg <= VCPU_SREG_GS && !seg_desc.s)
		goto exception;

	if (!seg_desc.p) {
		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
		goto exception;
	}

	rpl = selector & 3;
	dpl = seg_desc.dpl;
1258
	cpl = ctxt->ops->cpl(ctxt);
1259 1260 1261 1262 1263 1264 1265 1266 1267

	switch (seg) {
	case VCPU_SREG_SS:
		/*
		 * segment is not a writable data segment or segment
		 * selector's RPL != CPL or segment selector's RPL != CPL
		 */
		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
			goto exception;
A
Avi Kivity 已提交
1268
		break;
1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
	case VCPU_SREG_CS:
		if (!(seg_desc.type & 8))
			goto exception;

		if (seg_desc.type & 4) {
			/* conforming */
			if (dpl > cpl)
				goto exception;
		} else {
			/* nonconforming */
			if (rpl > cpl || dpl != cpl)
				goto exception;
		}
		/* CS(RPL) <- CPL */
		selector = (selector & 0xfffc) | cpl;
A
Avi Kivity 已提交
1284
		break;
1285 1286 1287 1288 1289 1290 1291 1292 1293
	case VCPU_SREG_TR:
		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
			goto exception;
		break;
	case VCPU_SREG_LDTR:
		if (seg_desc.s || seg_desc.type != 2)
			goto exception;
		break;
	default: /*  DS, ES, FS, or GS */
1294
		/*
1295 1296 1297
		 * segment is not a data or readable code segment or
		 * ((segment is a data or nonconforming code segment)
		 * and (both RPL and CPL > DPL))
1298
		 */
1299 1300 1301 1302
		if ((seg_desc.type & 0xa) == 0x8 ||
		    (((seg_desc.type & 0xc) != 0xc) &&
		     (rpl > dpl && cpl > dpl)))
			goto exception;
A
Avi Kivity 已提交
1303
		break;
1304 1305 1306 1307 1308
	}

	if (seg_desc.s) {
		/* mark segment as accessed */
		seg_desc.type |= 1;
1309
		ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1310 1311 1312 1313
		if (ret != X86EMUL_CONTINUE)
			return ret;
	}
load:
1314
	ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1315 1316 1317 1318 1319 1320
	return X86EMUL_CONTINUE;
exception:
	emulate_exception(ctxt, err_vec, err_code, true);
	return X86EMUL_PROPAGATE_FAULT;
}

1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
static void write_register_operand(struct operand *op)
{
	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
	switch (op->bytes) {
	case 1:
		*(u8 *)op->addr.reg = (u8)op->val;
		break;
	case 2:
		*(u16 *)op->addr.reg = (u16)op->val;
		break;
	case 4:
		*op->addr.reg = (u32)op->val;
		break;	/* 64b: zero-extend */
	case 8:
		*op->addr.reg = op->val;
		break;
	}
}

1340
static int writeback(struct x86_emulate_ctxt *ctxt)
1341 1342 1343
{
	int rc;

1344
	switch (ctxt->dst.type) {
1345
	case OP_REG:
1346
		write_register_operand(&ctxt->dst);
A
Avi Kivity 已提交
1347
		break;
1348
	case OP_MEM:
1349
		if (ctxt->lock_prefix)
1350
			rc = segmented_cmpxchg(ctxt,
1351 1352 1353 1354
					       ctxt->dst.addr.mem,
					       &ctxt->dst.orig_val,
					       &ctxt->dst.val,
					       ctxt->dst.bytes);
1355
		else
1356
			rc = segmented_write(ctxt,
1357 1358 1359
					     ctxt->dst.addr.mem,
					     &ctxt->dst.val,
					     ctxt->dst.bytes);
1360 1361
		if (rc != X86EMUL_CONTINUE)
			return rc;
1362
		break;
A
Avi Kivity 已提交
1363
	case OP_XMM:
1364
		write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
A
Avi Kivity 已提交
1365
		break;
1366 1367
	case OP_NONE:
		/* no writeback */
1368
		break;
1369
	default:
1370
		break;
A
Avi Kivity 已提交
1371
	}
1372 1373
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1374

1375
static int em_push(struct x86_emulate_ctxt *ctxt)
1376
{
1377
	struct segmented_address addr;
1378

1379 1380
	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
	addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1381 1382 1383
	addr.seg = VCPU_SREG_SS;

	/* Disable writeback. */
1384 1385
	ctxt->dst.type = OP_NONE;
	return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
1386
}
1387

1388 1389 1390 1391
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
		       void *dest, int len)
{
	int rc;
1392
	struct segmented_address addr;
1393

1394
	addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1395
	addr.seg = VCPU_SREG_SS;
1396
	rc = segmented_read(ctxt, addr, dest, len);
1397 1398 1399
	if (rc != X86EMUL_CONTINUE)
		return rc;

1400
	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
1401
	return rc;
1402 1403
}

1404 1405
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
1406
	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1407 1408
}

1409
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1410
			void *dest, int len)
1411 1412
{
	int rc;
1413 1414
	unsigned long val, change_mask;
	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1415
	int cpl = ctxt->ops->cpl(ctxt);
1416

1417
	rc = emulate_pop(ctxt, &val, len);
1418 1419
	if (rc != X86EMUL_CONTINUE)
		return rc;
1420

1421 1422
	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1423

1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
	switch(ctxt->mode) {
	case X86EMUL_MODE_PROT64:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT16:
		if (cpl == 0)
			change_mask |= EFLG_IOPL;
		if (cpl <= iopl)
			change_mask |= EFLG_IF;
		break;
	case X86EMUL_MODE_VM86:
1434 1435
		if (iopl < 3)
			return emulate_gp(ctxt, 0);
1436 1437 1438 1439 1440
		change_mask |= EFLG_IF;
		break;
	default: /* real mode */
		change_mask |= (EFLG_IOPL | EFLG_IF);
		break;
1441
	}
1442 1443 1444 1445 1446

	*(unsigned long *)dest =
		(ctxt->eflags & ~change_mask) | (val & change_mask);

	return rc;
1447 1448
}

1449 1450
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
1451 1452 1453 1454
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->eflags;
	ctxt->dst.bytes = ctxt->op_bytes;
	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1455 1456
}

1457
static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
1458
{
1459
	ctxt->src.val = get_segment_selector(ctxt, seg);
1460

1461
	return em_push(ctxt);
1462 1463
}

1464
static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, int seg)
1465
{
1466 1467
	unsigned long selector;
	int rc;
1468

1469
	rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1470 1471 1472
	if (rc != X86EMUL_CONTINUE)
		return rc;

1473
	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1474
	return rc;
1475 1476
}

1477
static int em_pusha(struct x86_emulate_ctxt *ctxt)
1478
{
1479
	unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
1480 1481
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RAX;
1482

1483 1484
	while (reg <= VCPU_REGS_RDI) {
		(reg == VCPU_REGS_RSP) ?
1485
		(ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
1486

1487
		rc = em_push(ctxt);
1488 1489
		if (rc != X86EMUL_CONTINUE)
			return rc;
1490

1491
		++reg;
1492 1493
	}

1494
	return rc;
1495 1496
}

1497 1498
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
1499
	ctxt->src.val =  (unsigned long)ctxt->eflags;
1500 1501 1502
	return em_push(ctxt);
}

1503
static int em_popa(struct x86_emulate_ctxt *ctxt)
1504
{
1505 1506
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RDI;
1507

1508 1509
	while (reg >= VCPU_REGS_RAX) {
		if (reg == VCPU_REGS_RSP) {
1510 1511
			register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
							ctxt->op_bytes);
1512 1513
			--reg;
		}
1514

1515
		rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
1516 1517 1518
		if (rc != X86EMUL_CONTINUE)
			break;
		--reg;
1519
	}
1520
	return rc;
1521 1522
}

1523
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1524
{
1525
	struct x86_emulate_ops *ops = ctxt->ops;
1526
	int rc;
1527 1528 1529 1530 1531 1532
	struct desc_ptr dt;
	gva_t cs_addr;
	gva_t eip_addr;
	u16 cs, eip;

	/* TODO: Add limit checks */
1533
	ctxt->src.val = ctxt->eflags;
1534
	rc = em_push(ctxt);
1535 1536
	if (rc != X86EMUL_CONTINUE)
		return rc;
1537 1538 1539

	ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);

1540
	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1541
	rc = em_push(ctxt);
1542 1543
	if (rc != X86EMUL_CONTINUE)
		return rc;
1544

1545
	ctxt->src.val = ctxt->_eip;
1546
	rc = em_push(ctxt);
1547 1548 1549
	if (rc != X86EMUL_CONTINUE)
		return rc;

1550
	ops->get_idt(ctxt, &dt);
1551 1552 1553 1554

	eip_addr = dt.address + (irq << 2);
	cs_addr = dt.address + (irq << 2) + 2;

1555
	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1556 1557 1558
	if (rc != X86EMUL_CONTINUE)
		return rc;

1559
	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1560 1561 1562
	if (rc != X86EMUL_CONTINUE)
		return rc;

1563
	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1564 1565 1566
	if (rc != X86EMUL_CONTINUE)
		return rc;

1567
	ctxt->_eip = eip;
1568 1569 1570 1571

	return rc;
}

1572
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1573 1574 1575
{
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
1576
		return emulate_int_real(ctxt, irq);
1577 1578 1579 1580 1581 1582 1583 1584 1585 1586
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
	default:
		/* Protected mode interrupts unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
	}
}

1587
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1588
{
1589 1590 1591 1592 1593 1594 1595 1596
	int rc = X86EMUL_CONTINUE;
	unsigned long temp_eip = 0;
	unsigned long temp_eflags = 0;
	unsigned long cs = 0;
	unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
			     EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
			     EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
	unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1597

1598
	/* TODO: Add stack limit check */
1599

1600
	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1601

1602 1603
	if (rc != X86EMUL_CONTINUE)
		return rc;
1604

1605 1606
	if (temp_eip & ~0xffff)
		return emulate_gp(ctxt, 0);
1607

1608
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1609

1610 1611
	if (rc != X86EMUL_CONTINUE)
		return rc;
1612

1613
	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1614

1615 1616
	if (rc != X86EMUL_CONTINUE)
		return rc;
1617

1618
	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1619

1620 1621
	if (rc != X86EMUL_CONTINUE)
		return rc;
1622

1623
	ctxt->_eip = temp_eip;
1624 1625


1626
	if (ctxt->op_bytes == 4)
1627
		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1628
	else if (ctxt->op_bytes == 2) {
1629 1630
		ctxt->eflags &= ~0xffff;
		ctxt->eflags |= temp_eflags;
1631
	}
1632 1633 1634 1635 1636

	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
	ctxt->eflags |= EFLG_RESERVED_ONE_MASK;

	return rc;
1637 1638
}

1639
static int em_iret(struct x86_emulate_ctxt *ctxt)
1640
{
1641 1642
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
1643
		return emulate_iret_real(ctxt);
1644 1645 1646 1647
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
1648
	default:
1649 1650
		/* iret from protected mode unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
1651 1652 1653
	}
}

1654 1655 1656 1657 1658
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	unsigned short sel;

1659
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1660

1661
	rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1662 1663 1664
	if (rc != X86EMUL_CONTINUE)
		return rc;

1665 1666
	ctxt->_eip = 0;
	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1667 1668 1669
	return X86EMUL_CONTINUE;
}

1670
static int em_grp1a(struct x86_emulate_ctxt *ctxt)
1671
{
1672
	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
1673 1674
}

1675
static int em_grp2(struct x86_emulate_ctxt *ctxt)
1676
{
1677
	switch (ctxt->modrm_reg) {
1678
	case 0:	/* rol */
1679
		emulate_2op_SrcB(ctxt, "rol");
1680 1681
		break;
	case 1:	/* ror */
1682
		emulate_2op_SrcB(ctxt, "ror");
1683 1684
		break;
	case 2:	/* rcl */
1685
		emulate_2op_SrcB(ctxt, "rcl");
1686 1687
		break;
	case 3:	/* rcr */
1688
		emulate_2op_SrcB(ctxt, "rcr");
1689 1690 1691
		break;
	case 4:	/* sal/shl */
	case 6:	/* sal/shl */
1692
		emulate_2op_SrcB(ctxt, "sal");
1693 1694
		break;
	case 5:	/* shr */
1695
		emulate_2op_SrcB(ctxt, "shr");
1696 1697
		break;
	case 7:	/* sar */
1698
		emulate_2op_SrcB(ctxt, "sar");
1699 1700
		break;
	}
1701
	return X86EMUL_CONTINUE;
1702 1703
}

1704
static int em_grp3(struct x86_emulate_ctxt *ctxt)
1705
{
1706 1707
	unsigned long *rax = &ctxt->regs[VCPU_REGS_RAX];
	unsigned long *rdx = &ctxt->regs[VCPU_REGS_RDX];
1708
	u8 de = 0;
1709

1710
	switch (ctxt->modrm_reg) {
1711
	case 0 ... 1:	/* test */
1712
		emulate_2op_SrcV(ctxt, "test");
1713 1714
		break;
	case 2:	/* not */
1715
		ctxt->dst.val = ~ctxt->dst.val;
1716 1717
		break;
	case 3:	/* neg */
1718
		emulate_1op(ctxt, "neg");
1719
		break;
1720
	case 4: /* mul */
1721
		emulate_1op_rax_rdx("mul", ctxt->src, *rax, *rdx, ctxt->eflags);
1722 1723
		break;
	case 5: /* imul */
1724
		emulate_1op_rax_rdx("imul", ctxt->src, *rax, *rdx, ctxt->eflags);
1725 1726
		break;
	case 6: /* div */
1727
		emulate_1op_rax_rdx_ex("div", ctxt->src, *rax, *rdx,
1728
				       ctxt->eflags, de);
1729 1730
		break;
	case 7: /* idiv */
1731
		emulate_1op_rax_rdx_ex("idiv", ctxt->src, *rax, *rdx,
1732
				       ctxt->eflags, de);
1733
		break;
1734
	default:
1735
		return X86EMUL_UNHANDLEABLE;
1736
	}
1737 1738
	if (de)
		return emulate_de(ctxt);
1739
	return X86EMUL_CONTINUE;
1740 1741
}

1742
static int em_grp45(struct x86_emulate_ctxt *ctxt)
1743
{
1744
	int rc = X86EMUL_CONTINUE;
1745

1746
	switch (ctxt->modrm_reg) {
1747
	case 0:	/* inc */
1748
		emulate_1op(ctxt, "inc");
1749 1750
		break;
	case 1:	/* dec */
1751
		emulate_1op(ctxt, "dec");
1752
		break;
1753 1754
	case 2: /* call near abs */ {
		long int old_eip;
1755 1756 1757
		old_eip = ctxt->_eip;
		ctxt->_eip = ctxt->src.val;
		ctxt->src.val = old_eip;
1758
		rc = em_push(ctxt);
1759 1760
		break;
	}
1761
	case 4: /* jmp abs */
1762
		ctxt->_eip = ctxt->src.val;
1763
		break;
1764 1765 1766
	case 5: /* jmp far */
		rc = em_jmp_far(ctxt);
		break;
1767
	case 6:	/* push */
1768
		rc = em_push(ctxt);
1769 1770
		break;
	}
1771
	return rc;
1772 1773
}

1774
static int em_grp9(struct x86_emulate_ctxt *ctxt)
1775
{
1776
	u64 old = ctxt->dst.orig_val64;
1777

1778 1779 1780 1781
	if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
	    ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
		ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
		ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1782
		ctxt->eflags &= ~EFLG_ZF;
1783
	} else {
1784 1785
		ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
			(u32) ctxt->regs[VCPU_REGS_RBX];
1786

1787
		ctxt->eflags |= EFLG_ZF;
1788
	}
1789
	return X86EMUL_CONTINUE;
1790 1791
}

1792 1793
static int em_ret(struct x86_emulate_ctxt *ctxt)
{
1794 1795 1796
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->_eip;
	ctxt->dst.bytes = ctxt->op_bytes;
1797 1798 1799
	return em_pop(ctxt);
}

1800
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1801 1802 1803 1804
{
	int rc;
	unsigned long cs;

1805
	rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
1806
	if (rc != X86EMUL_CONTINUE)
1807
		return rc;
1808 1809 1810
	if (ctxt->op_bytes == 4)
		ctxt->_eip = (u32)ctxt->_eip;
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1811
	if (rc != X86EMUL_CONTINUE)
1812
		return rc;
1813
	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1814 1815 1816
	return rc;
}

1817
static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, int seg)
1818 1819 1820 1821
{
	unsigned short sel;
	int rc;

1822
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1823

1824
	rc = load_segment_descriptor(ctxt, sel, seg);
1825 1826 1827
	if (rc != X86EMUL_CONTINUE)
		return rc;

1828
	ctxt->dst.val = ctxt->src.val;
1829 1830 1831
	return rc;
}

1832
static void
1833
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1834
			struct desc_struct *cs, struct desc_struct *ss)
1835
{
1836 1837
	u16 selector;

1838
	memset(cs, 0, sizeof(struct desc_struct));
1839
	ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1840
	memset(ss, 0, sizeof(struct desc_struct));
1841 1842

	cs->l = 0;		/* will be adjusted later */
1843
	set_desc_base(cs, 0);	/* flat segment */
1844
	cs->g = 1;		/* 4kb granularity */
1845
	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
1846 1847 1848
	cs->type = 0x0b;	/* Read, Execute, Accessed */
	cs->s = 1;
	cs->dpl = 0;		/* will be adjusted later */
1849 1850
	cs->p = 1;
	cs->d = 1;
1851

1852 1853
	set_desc_base(ss, 0);	/* flat segment */
	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
1854 1855 1856
	ss->g = 1;		/* 4kb granularity */
	ss->s = 1;
	ss->type = 0x03;	/* Read/Write, Accessed */
1857
	ss->d = 1;		/* 32bit stack segment */
1858
	ss->dpl = 0;
1859
	ss->p = 1;
1860 1861
}

1862
static int em_syscall(struct x86_emulate_ctxt *ctxt)
1863
{
1864
	struct x86_emulate_ops *ops = ctxt->ops;
1865
	struct desc_struct cs, ss;
1866
	u64 msr_data;
1867
	u16 cs_sel, ss_sel;
1868
	u64 efer = 0;
1869 1870

	/* syscall is not available in real mode */
1871
	if (ctxt->mode == X86EMUL_MODE_REAL ||
1872 1873
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_ud(ctxt);
1874

1875
	ops->get_msr(ctxt, MSR_EFER, &efer);
1876
	setup_syscalls_segments(ctxt, &cs, &ss);
1877

1878
	ops->get_msr(ctxt, MSR_STAR, &msr_data);
1879
	msr_data >>= 32;
1880 1881
	cs_sel = (u16)(msr_data & 0xfffc);
	ss_sel = (u16)(msr_data + 8);
1882

1883
	if (efer & EFER_LMA) {
1884
		cs.d = 0;
1885 1886
		cs.l = 1;
	}
1887 1888
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1889

1890
	ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
1891
	if (efer & EFER_LMA) {
1892
#ifdef CONFIG_X86_64
1893
		ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1894

1895
		ops->get_msr(ctxt,
1896 1897
			     ctxt->mode == X86EMUL_MODE_PROT64 ?
			     MSR_LSTAR : MSR_CSTAR, &msr_data);
1898
		ctxt->_eip = msr_data;
1899

1900
		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
1901 1902 1903 1904
		ctxt->eflags &= ~(msr_data | EFLG_RF);
#endif
	} else {
		/* legacy mode */
1905
		ops->get_msr(ctxt, MSR_STAR, &msr_data);
1906
		ctxt->_eip = (u32)msr_data;
1907 1908 1909 1910

		ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
	}

1911
	return X86EMUL_CONTINUE;
1912 1913
}

1914
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
1915
{
1916
	struct x86_emulate_ops *ops = ctxt->ops;
1917
	struct desc_struct cs, ss;
1918
	u64 msr_data;
1919
	u16 cs_sel, ss_sel;
1920
	u64 efer = 0;
1921

1922
	ops->get_msr(ctxt, MSR_EFER, &efer);
1923
	/* inject #GP if in real mode */
1924 1925
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return emulate_gp(ctxt, 0);
1926 1927 1928 1929

	/* XXX sysenter/sysexit have not been tested in 64bit mode.
	* Therefore, we inject an #UD.
	*/
1930 1931
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return emulate_ud(ctxt);
1932

1933
	setup_syscalls_segments(ctxt, &cs, &ss);
1934

1935
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1936 1937
	switch (ctxt->mode) {
	case X86EMUL_MODE_PROT32:
1938 1939
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
1940 1941
		break;
	case X86EMUL_MODE_PROT64:
1942 1943
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
1944 1945 1946 1947
		break;
	}

	ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1948 1949 1950 1951
	cs_sel = (u16)msr_data;
	cs_sel &= ~SELECTOR_RPL_MASK;
	ss_sel = cs_sel + 8;
	ss_sel &= ~SELECTOR_RPL_MASK;
1952
	if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
1953
		cs.d = 0;
1954 1955 1956
		cs.l = 1;
	}

1957 1958
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1959

1960
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
1961
	ctxt->_eip = msr_data;
1962

1963
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
1964
	ctxt->regs[VCPU_REGS_RSP] = msr_data;
1965

1966
	return X86EMUL_CONTINUE;
1967 1968
}

1969
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
1970
{
1971
	struct x86_emulate_ops *ops = ctxt->ops;
1972
	struct desc_struct cs, ss;
1973 1974
	u64 msr_data;
	int usermode;
X
Xiao Guangrong 已提交
1975
	u16 cs_sel = 0, ss_sel = 0;
1976

1977 1978
	/* inject #GP if in real mode or Virtual 8086 mode */
	if (ctxt->mode == X86EMUL_MODE_REAL ||
1979 1980
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_gp(ctxt, 0);
1981

1982
	setup_syscalls_segments(ctxt, &cs, &ss);
1983

1984
	if ((ctxt->rex_prefix & 0x8) != 0x0)
1985 1986 1987 1988 1989 1990
		usermode = X86EMUL_MODE_PROT64;
	else
		usermode = X86EMUL_MODE_PROT32;

	cs.dpl = 3;
	ss.dpl = 3;
1991
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1992 1993
	switch (usermode) {
	case X86EMUL_MODE_PROT32:
1994
		cs_sel = (u16)(msr_data + 16);
1995 1996
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
1997
		ss_sel = (u16)(msr_data + 24);
1998 1999
		break;
	case X86EMUL_MODE_PROT64:
2000
		cs_sel = (u16)(msr_data + 32);
2001 2002
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
2003 2004
		ss_sel = cs_sel + 8;
		cs.d = 0;
2005 2006 2007
		cs.l = 1;
		break;
	}
2008 2009
	cs_sel |= SELECTOR_RPL_MASK;
	ss_sel |= SELECTOR_RPL_MASK;
2010

2011 2012
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2013

2014 2015
	ctxt->_eip = ctxt->regs[VCPU_REGS_RDX];
	ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX];
2016

2017
	return X86EMUL_CONTINUE;
2018 2019
}

2020
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2021 2022 2023 2024 2025 2026 2027
{
	int iopl;
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return false;
	if (ctxt->mode == X86EMUL_MODE_VM86)
		return true;
	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2028
	return ctxt->ops->cpl(ctxt) > iopl;
2029 2030 2031 2032 2033
}

static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
					    u16 port, u16 len)
{
2034
	struct x86_emulate_ops *ops = ctxt->ops;
2035
	struct desc_struct tr_seg;
2036
	u32 base3;
2037
	int r;
2038
	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2039
	unsigned mask = (1 << len) - 1;
2040
	unsigned long base;
2041

2042
	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2043
	if (!tr_seg.p)
2044
		return false;
2045
	if (desc_limit_scaled(&tr_seg) < 103)
2046
		return false;
2047 2048 2049 2050
	base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
	base |= ((u64)base3) << 32;
#endif
2051
	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2052 2053
	if (r != X86EMUL_CONTINUE)
		return false;
2054
	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2055
		return false;
2056
	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
	if (r != X86EMUL_CONTINUE)
		return false;
	if ((perm >> bit_idx) & mask)
		return false;
	return true;
}

static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
				 u16 port, u16 len)
{
2067 2068 2069
	if (ctxt->perm_ok)
		return true;

2070 2071
	if (emulator_bad_iopl(ctxt))
		if (!emulator_io_port_access_allowed(ctxt, port, len))
2072
			return false;
2073 2074 2075

	ctxt->perm_ok = true;

2076 2077 2078
	return true;
}

2079 2080 2081
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_16 *tss)
{
2082
	tss->ip = ctxt->_eip;
2083
	tss->flag = ctxt->eflags;
2084 2085 2086 2087 2088 2089 2090 2091
	tss->ax = ctxt->regs[VCPU_REGS_RAX];
	tss->cx = ctxt->regs[VCPU_REGS_RCX];
	tss->dx = ctxt->regs[VCPU_REGS_RDX];
	tss->bx = ctxt->regs[VCPU_REGS_RBX];
	tss->sp = ctxt->regs[VCPU_REGS_RSP];
	tss->bp = ctxt->regs[VCPU_REGS_RBP];
	tss->si = ctxt->regs[VCPU_REGS_RSI];
	tss->di = ctxt->regs[VCPU_REGS_RDI];
2092

2093 2094 2095 2096 2097
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2098 2099 2100 2101 2102 2103 2104
}

static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_16 *tss)
{
	int ret;

2105
	ctxt->_eip = tss->ip;
2106
	ctxt->eflags = tss->flag | 2;
2107 2108 2109 2110 2111 2112 2113 2114
	ctxt->regs[VCPU_REGS_RAX] = tss->ax;
	ctxt->regs[VCPU_REGS_RCX] = tss->cx;
	ctxt->regs[VCPU_REGS_RDX] = tss->dx;
	ctxt->regs[VCPU_REGS_RBX] = tss->bx;
	ctxt->regs[VCPU_REGS_RSP] = tss->sp;
	ctxt->regs[VCPU_REGS_RBP] = tss->bp;
	ctxt->regs[VCPU_REGS_RSI] = tss->si;
	ctxt->regs[VCPU_REGS_RDI] = tss->di;
2115 2116 2117 2118 2119

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
2120 2121 2122 2123 2124
	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2125 2126 2127 2128 2129

	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
2130
	ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
2131 2132
	if (ret != X86EMUL_CONTINUE)
		return ret;
2133
	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2134 2135
	if (ret != X86EMUL_CONTINUE)
		return ret;
2136
	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2137 2138
	if (ret != X86EMUL_CONTINUE)
		return ret;
2139
	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2140 2141
	if (ret != X86EMUL_CONTINUE)
		return ret;
2142
	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2143 2144 2145 2146 2147 2148 2149 2150 2151 2152
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_16(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
2153
	struct x86_emulate_ops *ops = ctxt->ops;
2154 2155
	struct tss_segment_16 tss_seg;
	int ret;
2156
	u32 new_tss_base = get_desc_base(new_desc);
2157

2158
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2159
			    &ctxt->exception);
2160
	if (ret != X86EMUL_CONTINUE)
2161 2162 2163
		/* FIXME: need to provide precise fault address */
		return ret;

2164
	save_state_to_tss16(ctxt, &tss_seg);
2165

2166
	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2167
			     &ctxt->exception);
2168
	if (ret != X86EMUL_CONTINUE)
2169 2170 2171
		/* FIXME: need to provide precise fault address */
		return ret;

2172
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2173
			    &ctxt->exception);
2174
	if (ret != X86EMUL_CONTINUE)
2175 2176 2177 2178 2179 2180
		/* FIXME: need to provide precise fault address */
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

2181
		ret = ops->write_std(ctxt, new_tss_base,
2182 2183
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2184
				     &ctxt->exception);
2185
		if (ret != X86EMUL_CONTINUE)
2186 2187 2188 2189
			/* FIXME: need to provide precise fault address */
			return ret;
	}

2190
	return load_state_from_tss16(ctxt, &tss_seg);
2191 2192 2193 2194 2195
}

static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_32 *tss)
{
2196
	tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2197
	tss->eip = ctxt->_eip;
2198
	tss->eflags = ctxt->eflags;
2199 2200 2201 2202 2203 2204 2205 2206
	tss->eax = ctxt->regs[VCPU_REGS_RAX];
	tss->ecx = ctxt->regs[VCPU_REGS_RCX];
	tss->edx = ctxt->regs[VCPU_REGS_RDX];
	tss->ebx = ctxt->regs[VCPU_REGS_RBX];
	tss->esp = ctxt->regs[VCPU_REGS_RSP];
	tss->ebp = ctxt->regs[VCPU_REGS_RBP];
	tss->esi = ctxt->regs[VCPU_REGS_RSI];
	tss->edi = ctxt->regs[VCPU_REGS_RDI];
2207

2208 2209 2210 2211 2212 2213 2214
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
	tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2215 2216 2217 2218 2219 2220 2221
}

static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_32 *tss)
{
	int ret;

2222
	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2223
		return emulate_gp(ctxt, 0);
2224
	ctxt->_eip = tss->eip;
2225
	ctxt->eflags = tss->eflags | 2;
2226 2227 2228 2229 2230 2231 2232 2233
	ctxt->regs[VCPU_REGS_RAX] = tss->eax;
	ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
	ctxt->regs[VCPU_REGS_RDX] = tss->edx;
	ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
	ctxt->regs[VCPU_REGS_RSP] = tss->esp;
	ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
	ctxt->regs[VCPU_REGS_RSI] = tss->esi;
	ctxt->regs[VCPU_REGS_RDI] = tss->edi;
2234 2235 2236 2237 2238

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
2239 2240 2241 2242 2243 2244 2245
	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2246 2247 2248 2249 2250

	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
2251
	ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2252 2253
	if (ret != X86EMUL_CONTINUE)
		return ret;
2254
	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2255 2256
	if (ret != X86EMUL_CONTINUE)
		return ret;
2257
	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2258 2259
	if (ret != X86EMUL_CONTINUE)
		return ret;
2260
	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2261 2262
	if (ret != X86EMUL_CONTINUE)
		return ret;
2263
	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2264 2265
	if (ret != X86EMUL_CONTINUE)
		return ret;
2266
	ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
2267 2268
	if (ret != X86EMUL_CONTINUE)
		return ret;
2269
	ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
2270 2271 2272 2273 2274 2275 2276 2277 2278 2279
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_32(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
2280
	struct x86_emulate_ops *ops = ctxt->ops;
2281 2282
	struct tss_segment_32 tss_seg;
	int ret;
2283
	u32 new_tss_base = get_desc_base(new_desc);
2284

2285
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2286
			    &ctxt->exception);
2287
	if (ret != X86EMUL_CONTINUE)
2288 2289 2290
		/* FIXME: need to provide precise fault address */
		return ret;

2291
	save_state_to_tss32(ctxt, &tss_seg);
2292

2293
	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2294
			     &ctxt->exception);
2295
	if (ret != X86EMUL_CONTINUE)
2296 2297 2298
		/* FIXME: need to provide precise fault address */
		return ret;

2299
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2300
			    &ctxt->exception);
2301
	if (ret != X86EMUL_CONTINUE)
2302 2303 2304 2305 2306 2307
		/* FIXME: need to provide precise fault address */
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

2308
		ret = ops->write_std(ctxt, new_tss_base,
2309 2310
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2311
				     &ctxt->exception);
2312
		if (ret != X86EMUL_CONTINUE)
2313 2314 2315 2316
			/* FIXME: need to provide precise fault address */
			return ret;
	}

2317
	return load_state_from_tss32(ctxt, &tss_seg);
2318 2319 2320
}

static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2321 2322
				   u16 tss_selector, int reason,
				   bool has_error_code, u32 error_code)
2323
{
2324
	struct x86_emulate_ops *ops = ctxt->ops;
2325 2326
	struct desc_struct curr_tss_desc, next_tss_desc;
	int ret;
2327
	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2328
	ulong old_tss_base =
2329
		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2330
	u32 desc_limit;
2331 2332 2333

	/* FIXME: old_tss_base == ~0 ? */

2334
	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2335 2336
	if (ret != X86EMUL_CONTINUE)
		return ret;
2337
	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2338 2339 2340 2341 2342 2343 2344
	if (ret != X86EMUL_CONTINUE)
		return ret;

	/* FIXME: check that next_tss_desc is tss */

	if (reason != TASK_SWITCH_IRET) {
		if ((tss_selector & 3) > next_tss_desc.dpl ||
2345
		    ops->cpl(ctxt) > next_tss_desc.dpl)
2346
			return emulate_gp(ctxt, 0);
2347 2348
	}

2349 2350 2351 2352
	desc_limit = desc_limit_scaled(&next_tss_desc);
	if (!next_tss_desc.p ||
	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
	     desc_limit < 0x2b)) {
2353
		emulate_ts(ctxt, tss_selector & 0xfffc);
2354 2355 2356 2357 2358
		return X86EMUL_PROPAGATE_FAULT;
	}

	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2359
		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370
	}

	if (reason == TASK_SWITCH_IRET)
		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;

	/* set back link to prev task only if NT bit is set in eflags
	   note that old_tss_sel is not used afetr this point */
	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
		old_tss_sel = 0xffff;

	if (next_tss_desc.type & 8)
2371
		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2372 2373
				     old_tss_base, &next_tss_desc);
	else
2374
		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2375
				     old_tss_base, &next_tss_desc);
2376 2377
	if (ret != X86EMUL_CONTINUE)
		return ret;
2378 2379 2380 2381 2382 2383

	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;

	if (reason != TASK_SWITCH_IRET) {
		next_tss_desc.type |= (1 << 1); /* set busy flag */
2384
		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2385 2386
	}

2387
	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
2388
	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2389

2390
	if (has_error_code) {
2391 2392 2393
		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
		ctxt->lock_prefix = 0;
		ctxt->src.val = (unsigned long) error_code;
2394
		ret = em_push(ctxt);
2395 2396
	}

2397 2398 2399 2400
	return ret;
}

int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2401 2402
			 u16 tss_selector, int reason,
			 bool has_error_code, u32 error_code)
2403 2404 2405
{
	int rc;

2406 2407
	ctxt->_eip = ctxt->eip;
	ctxt->dst.type = OP_NONE;
2408

2409
	rc = emulator_do_task_switch(ctxt, tss_selector, reason,
2410
				     has_error_code, error_code);
2411

2412
	if (rc == X86EMUL_CONTINUE)
2413
		ctxt->eip = ctxt->_eip;
2414

2415
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2416 2417
}

2418
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2419
			    int reg, struct operand *op)
2420 2421 2422
{
	int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;

2423 2424
	register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
	op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
2425
	op->addr.mem.seg = seg;
2426 2427
}

2428 2429 2430 2431 2432 2433
static int em_das(struct x86_emulate_ctxt *ctxt)
{
	u8 al, old_al;
	bool af, cf, old_cf;

	cf = ctxt->eflags & X86_EFLAGS_CF;
2434
	al = ctxt->dst.val;
2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451

	old_al = al;
	old_cf = cf;
	cf = false;
	af = ctxt->eflags & X86_EFLAGS_AF;
	if ((al & 0x0f) > 9 || af) {
		al -= 6;
		cf = old_cf | (al >= 250);
		af = true;
	} else {
		af = false;
	}
	if (old_al > 0x99 || old_cf) {
		al -= 0x60;
		cf = true;
	}

2452
	ctxt->dst.val = al;
2453
	/* Set PF, ZF, SF */
2454 2455 2456
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
2457
	emulate_2op_SrcV(ctxt, "or");
2458 2459 2460 2461 2462 2463 2464 2465
	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
	if (cf)
		ctxt->eflags |= X86_EFLAGS_CF;
	if (af)
		ctxt->eflags |= X86_EFLAGS_AF;
	return X86EMUL_CONTINUE;
}

2466 2467 2468 2469 2470 2471
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
	u16 sel, old_cs;
	ulong old_eip;
	int rc;

2472
	old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2473
	old_eip = ctxt->_eip;
2474

2475
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2476
	if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2477 2478
		return X86EMUL_CONTINUE;

2479 2480
	ctxt->_eip = 0;
	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
2481

2482
	ctxt->src.val = old_cs;
2483
	rc = em_push(ctxt);
2484 2485 2486
	if (rc != X86EMUL_CONTINUE)
		return rc;

2487
	ctxt->src.val = old_eip;
2488
	return em_push(ctxt);
2489 2490
}

2491 2492 2493 2494
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
	int rc;

2495 2496 2497 2498
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->_eip;
	ctxt->dst.bytes = ctxt->op_bytes;
	rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2499 2500
	if (rc != X86EMUL_CONTINUE)
		return rc;
2501
	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
2502 2503 2504
	return X86EMUL_CONTINUE;
}

2505 2506
static int em_add(struct x86_emulate_ctxt *ctxt)
{
2507
	emulate_2op_SrcV(ctxt, "add");
2508 2509 2510 2511 2512
	return X86EMUL_CONTINUE;
}

static int em_or(struct x86_emulate_ctxt *ctxt)
{
2513
	emulate_2op_SrcV(ctxt, "or");
2514 2515 2516 2517 2518
	return X86EMUL_CONTINUE;
}

static int em_adc(struct x86_emulate_ctxt *ctxt)
{
2519
	emulate_2op_SrcV(ctxt, "adc");
2520 2521 2522 2523 2524
	return X86EMUL_CONTINUE;
}

static int em_sbb(struct x86_emulate_ctxt *ctxt)
{
2525
	emulate_2op_SrcV(ctxt, "sbb");
2526 2527 2528 2529 2530
	return X86EMUL_CONTINUE;
}

static int em_and(struct x86_emulate_ctxt *ctxt)
{
2531
	emulate_2op_SrcV(ctxt, "and");
2532 2533 2534 2535 2536
	return X86EMUL_CONTINUE;
}

static int em_sub(struct x86_emulate_ctxt *ctxt)
{
2537
	emulate_2op_SrcV(ctxt, "sub");
2538 2539 2540 2541 2542
	return X86EMUL_CONTINUE;
}

static int em_xor(struct x86_emulate_ctxt *ctxt)
{
2543
	emulate_2op_SrcV(ctxt, "xor");
2544 2545 2546 2547 2548
	return X86EMUL_CONTINUE;
}

static int em_cmp(struct x86_emulate_ctxt *ctxt)
{
2549
	emulate_2op_SrcV(ctxt, "cmp");
2550
	/* Disable writeback. */
2551
	ctxt->dst.type = OP_NONE;
2552 2553 2554
	return X86EMUL_CONTINUE;
}

2555 2556
static int em_test(struct x86_emulate_ctxt *ctxt)
{
2557
	emulate_2op_SrcV(ctxt, "test");
2558 2559 2560
	return X86EMUL_CONTINUE;
}

2561 2562 2563
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
	/* Write back the register source. */
2564 2565
	ctxt->src.val = ctxt->dst.val;
	write_register_operand(&ctxt->src);
2566 2567

	/* Write back the memory destination with implicit LOCK prefix. */
2568 2569
	ctxt->dst.val = ctxt->src.orig_val;
	ctxt->lock_prefix = 1;
2570 2571 2572
	return X86EMUL_CONTINUE;
}

2573
static int em_imul(struct x86_emulate_ctxt *ctxt)
2574
{
2575
	emulate_2op_SrcV_nobyte(ctxt, "imul");
2576 2577 2578
	return X86EMUL_CONTINUE;
}

2579 2580
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
2581
	ctxt->dst.val = ctxt->src2.val;
2582 2583 2584
	return em_imul(ctxt);
}

2585 2586
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
2587 2588 2589 2590
	ctxt->dst.type = OP_REG;
	ctxt->dst.bytes = ctxt->src.bytes;
	ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2591 2592 2593 2594

	return X86EMUL_CONTINUE;
}

2595 2596 2597 2598
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
	u64 tsc = 0;

2599
	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2600 2601
	ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
	ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
2602 2603 2604
	return X86EMUL_CONTINUE;
}

2605 2606
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
2607
	ctxt->dst.val = ctxt->src.val;
2608 2609 2610
	return X86EMUL_CONTINUE;
}

2611 2612
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
2613
	if (ctxt->modrm_reg > VCPU_SREG_GS)
2614 2615
		return emulate_ud(ctxt);

2616
	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
2617 2618 2619 2620 2621
	return X86EMUL_CONTINUE;
}

static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
2622
	u16 sel = ctxt->src.val;
2623

2624
	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
2625 2626
		return emulate_ud(ctxt);

2627
	if (ctxt->modrm_reg == VCPU_SREG_SS)
2628 2629 2630
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;

	/* Disable writeback. */
2631 2632
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
2633 2634
}

2635 2636
static int em_movdqu(struct x86_emulate_ctxt *ctxt)
{
2637
	memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
2638 2639 2640
	return X86EMUL_CONTINUE;
}

2641 2642
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
2643 2644 2645
	int rc;
	ulong linear;

2646
	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
2647
	if (rc == X86EMUL_CONTINUE)
2648
		ctxt->ops->invlpg(ctxt, linear);
2649
	/* Disable writeback. */
2650
	ctxt->dst.type = OP_NONE;
2651 2652 2653
	return X86EMUL_CONTINUE;
}

2654 2655 2656 2657 2658 2659 2660 2661 2662 2663
static int em_clts(struct x86_emulate_ctxt *ctxt)
{
	ulong cr0;

	cr0 = ctxt->ops->get_cr(ctxt, 0);
	cr0 &= ~X86_CR0_TS;
	ctxt->ops->set_cr(ctxt, 0, cr0);
	return X86EMUL_CONTINUE;
}

2664 2665 2666 2667
static int em_vmcall(struct x86_emulate_ctxt *ctxt)
{
	int rc;

2668
	if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
2669 2670 2671 2672 2673 2674 2675
		return X86EMUL_UNHANDLEABLE;

	rc = ctxt->ops->fix_hypercall(ctxt);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	/* Let the processor re-execute the fixed hypercall */
2676
	ctxt->_eip = ctxt->eip;
2677
	/* Disable writeback. */
2678
	ctxt->dst.type = OP_NONE;
2679 2680 2681 2682 2683 2684 2685 2686
	return X86EMUL_CONTINUE;
}

static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
	struct desc_ptr desc_ptr;
	int rc;

2687
	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2688
			     &desc_ptr.size, &desc_ptr.address,
2689
			     ctxt->op_bytes);
2690 2691 2692 2693
	if (rc != X86EMUL_CONTINUE)
		return rc;
	ctxt->ops->set_gdt(ctxt, &desc_ptr);
	/* Disable writeback. */
2694
	ctxt->dst.type = OP_NONE;
2695 2696 2697
	return X86EMUL_CONTINUE;
}

2698
static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
2699 2700 2701
{
	int rc;

2702 2703
	rc = ctxt->ops->fix_hypercall(ctxt);

2704
	/* Disable writeback. */
2705
	ctxt->dst.type = OP_NONE;
2706 2707 2708 2709 2710 2711 2712 2713
	return rc;
}

static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
	struct desc_ptr desc_ptr;
	int rc;

2714
	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2715
			     &desc_ptr.size, &desc_ptr.address,
2716
			     ctxt->op_bytes);
2717 2718 2719 2720
	if (rc != X86EMUL_CONTINUE)
		return rc;
	ctxt->ops->set_idt(ctxt, &desc_ptr);
	/* Disable writeback. */
2721
	ctxt->dst.type = OP_NONE;
2722 2723 2724 2725 2726
	return X86EMUL_CONTINUE;
}

static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
2727 2728
	ctxt->dst.bytes = 2;
	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
2729 2730 2731 2732 2733 2734
	return X86EMUL_CONTINUE;
}

static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
2735 2736
			  | (ctxt->src.val & 0x0f));
	ctxt->dst.type = OP_NONE;
2737 2738 2739
	return X86EMUL_CONTINUE;
}

2740 2741
static int em_loop(struct x86_emulate_ctxt *ctxt)
{
2742 2743 2744 2745
	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
	if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
		jmp_rel(ctxt, ctxt->src.val);
2746 2747 2748 2749 2750 2751

	return X86EMUL_CONTINUE;
}

static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{
2752 2753
	if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
		jmp_rel(ctxt, ctxt->src.val);
2754 2755 2756 2757

	return X86EMUL_CONTINUE;
}

2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->eflags &= ~X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

static int em_sti(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
	ctxt->eflags |= X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790
static bool valid_cr(int nr)
{
	switch (nr) {
	case 0:
	case 2 ... 4:
	case 8:
		return true;
	default:
		return false;
	}
}

static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
2791
	if (!valid_cr(ctxt->modrm_reg))
2792 2793 2794 2795 2796 2797 2798
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
2799 2800
	u64 new_val = ctxt->src.val64;
	int cr = ctxt->modrm_reg;
2801
	u64 efer = 0;
2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818

	static u64 cr_reserved_bits[] = {
		0xffffffff00000000ULL,
		0, 0, 0, /* CR3 checked later */
		CR4_RESERVED_BITS,
		0, 0, 0,
		CR8_RESERVED_BITS,
	};

	if (!valid_cr(cr))
		return emulate_ud(ctxt);

	if (new_val & cr_reserved_bits[cr])
		return emulate_gp(ctxt, 0);

	switch (cr) {
	case 0: {
2819
		u64 cr4;
2820 2821 2822 2823
		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
			return emulate_gp(ctxt, 0);

2824 2825
		cr4 = ctxt->ops->get_cr(ctxt, 4);
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2826 2827 2828 2829 2830 2831 2832 2833 2834 2835

		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
		    !(cr4 & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	case 3: {
		u64 rsvd = 0;

2836 2837
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
		if (efer & EFER_LMA)
2838
			rsvd = CR3_L_MODE_RESERVED_BITS;
2839
		else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
2840
			rsvd = CR3_PAE_RESERVED_BITS;
2841
		else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
2842 2843 2844 2845 2846 2847 2848 2849
			rsvd = CR3_NONPAE_RESERVED_BITS;

		if (new_val & rsvd)
			return emulate_gp(ctxt, 0);

		break;
		}
	case 4: {
2850
		u64 cr4;
2851

2852 2853
		cr4 = ctxt->ops->get_cr(ctxt, 4);
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864

		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	}

	return X86EMUL_CONTINUE;
}

2865 2866 2867 2868
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
	unsigned long dr7;

2869
	ctxt->ops->get_dr(ctxt, 7, &dr7);
2870 2871 2872 2873 2874 2875 2876

	/* Check if DR7.Global_Enable is set */
	return dr7 & (1 << 13);
}

static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
2877
	int dr = ctxt->modrm_reg;
2878 2879 2880 2881 2882
	u64 cr4;

	if (dr > 7)
		return emulate_ud(ctxt);

2883
	cr4 = ctxt->ops->get_cr(ctxt, 4);
2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894
	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
		return emulate_ud(ctxt);

	if (check_dr7_gd(ctxt))
		return emulate_db(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
2895 2896
	u64 new_val = ctxt->src.val64;
	int dr = ctxt->modrm_reg;
2897 2898 2899 2900 2901 2902 2903

	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
		return emulate_gp(ctxt, 0);

	return check_dr_read(ctxt);
}

2904 2905 2906 2907
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
	u64 efer;

2908
	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2909 2910 2911 2912 2913 2914 2915 2916 2917

	if (!(efer & EFER_SVME))
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
2918
	u64 rax = ctxt->regs[VCPU_REGS_RAX];
2919 2920

	/* Valid physical address? */
2921
	if (rax & 0xffff000000000000ULL)
2922 2923 2924 2925 2926
		return emulate_gp(ctxt, 0);

	return check_svme(ctxt);
}

2927 2928
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{
2929
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2930

2931
	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
2932 2933 2934 2935 2936
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

2937 2938
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
2939
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2940
	u64 rcx = ctxt->regs[VCPU_REGS_RCX];
2941

2942
	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
2943 2944 2945 2946 2947 2948
	    (rcx > 3))
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

2949 2950
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
2951 2952
	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
2953 2954 2955 2956 2957 2958 2959
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
2960 2961
	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
2962 2963 2964 2965 2966
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

2967
#define D(_y) { .flags = (_y) }
2968
#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
2969 2970
#define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
		      .check_perm = (_p) }
2971
#define N    D(0)
2972
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
2973
#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2974
#define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
2975
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2976 2977
#define II(_f, _e, _i) \
	{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
2978 2979 2980
#define IIP(_f, _e, _i, _p) \
	{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
	  .check_perm = (_p) }
2981
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
2982

2983
#define D2bv(_f)      D((_f) | ByteOp), D(_f)
2984
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
2985 2986
#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)

2987 2988 2989
#define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
		I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
		I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
2990

2991 2992 2993 2994 2995 2996
static struct opcode group7_rm1[] = {
	DI(SrcNone | ModRM | Priv, monitor),
	DI(SrcNone | ModRM | Priv, mwait),
	N, N, N, N, N, N,
};

2997 2998
static struct opcode group7_rm3[] = {
	DIP(SrcNone | ModRM | Prot | Priv, vmrun,   check_svme_pa),
2999
	II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
3000 3001 3002 3003 3004 3005 3006
	DIP(SrcNone | ModRM | Prot | Priv, vmload,  check_svme_pa),
	DIP(SrcNone | ModRM | Prot | Priv, vmsave,  check_svme_pa),
	DIP(SrcNone | ModRM | Prot | Priv, stgi,    check_svme),
	DIP(SrcNone | ModRM | Prot | Priv, clgi,    check_svme),
	DIP(SrcNone | ModRM | Prot | Priv, skinit,  check_svme),
	DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
};
3007

3008 3009 3010 3011 3012
static struct opcode group7_rm7[] = {
	N,
	DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
	N, N, N, N, N, N,
};
3013

3014
static struct opcode group1[] = {
3015 3016 3017 3018 3019 3020 3021 3022
	I(Lock, em_add),
	I(Lock, em_or),
	I(Lock, em_adc),
	I(Lock, em_sbb),
	I(Lock, em_and),
	I(Lock, em_sub),
	I(Lock, em_xor),
	I(0, em_cmp),
3023 3024 3025 3026 3027 3028 3029 3030 3031
};

static struct opcode group1A[] = {
	D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
};

static struct opcode group3[] = {
	D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
	D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3032
	X4(D(SrcMem | ModRM)),
3033 3034 3035 3036 3037 3038 3039 3040 3041
};

static struct opcode group4[] = {
	D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
	N, N, N, N, N, N,
};

static struct opcode group5[] = {
	D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3042 3043
	D(SrcMem | ModRM | Stack),
	I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
3044 3045 3046 3047
	D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
	D(SrcMem | ModRM | Stack), N,
};

3048 3049 3050 3051 3052 3053 3054 3055
static struct opcode group6[] = {
	DI(ModRM | Prot,        sldt),
	DI(ModRM | Prot,        str),
	DI(ModRM | Prot | Priv, lldt),
	DI(ModRM | Prot | Priv, ltr),
	N, N, N, N,
};

3056
static struct group_dual group7 = { {
3057 3058
	DI(ModRM | Mov | DstMem | Priv, sgdt),
	DI(ModRM | Mov | DstMem | Priv, sidt),
3059 3060 3061 3062 3063
	II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
	II(ModRM | SrcMem | Priv, em_lidt, lidt),
	II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
	II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
	II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3064
}, {
3065 3066
	I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
	EXT(0, group7_rm1),
3067
	N, EXT(0, group7_rm3),
3068 3069
	II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
	II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083
} };

static struct opcode group8[] = {
	N, N, N, N,
	D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
	D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
};

static struct group_dual group9 = { {
	N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
}, {
	N, N, N, N, N, N, N, N,
} };

3084 3085 3086 3087
static struct opcode group11[] = {
	I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
};

3088 3089 3090 3091
static struct gprefix pfx_0f_6f_0f_7f = {
	N, N, N, I(Sse, em_movdqu),
};

3092 3093
static struct opcode opcode_table[256] = {
	/* 0x00 - 0x07 */
3094
	I6ALU(Lock, em_add),
3095 3096
	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
	/* 0x08 - 0x0F */
3097
	I6ALU(Lock, em_or),
3098 3099
	D(ImplicitOps | Stack | No64), N,
	/* 0x10 - 0x17 */
3100
	I6ALU(Lock, em_adc),
3101 3102
	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
	/* 0x18 - 0x1F */
3103
	I6ALU(Lock, em_sbb),
3104 3105
	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
	/* 0x20 - 0x27 */
3106
	I6ALU(Lock, em_and), N, N,
3107
	/* 0x28 - 0x2F */
3108
	I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3109
	/* 0x30 - 0x37 */
3110
	I6ALU(Lock, em_xor), N, N,
3111
	/* 0x38 - 0x3F */
3112
	I6ALU(0, em_cmp), N, N,
3113 3114 3115
	/* 0x40 - 0x4F */
	X16(D(DstReg)),
	/* 0x50 - 0x57 */
3116
	X8(I(SrcReg | Stack, em_push)),
3117
	/* 0x58 - 0x5F */
3118
	X8(I(DstReg | Stack, em_pop)),
3119
	/* 0x60 - 0x67 */
3120 3121
	I(ImplicitOps | Stack | No64, em_pusha),
	I(ImplicitOps | Stack | No64, em_popa),
3122 3123 3124
	N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
	N, N, N, N,
	/* 0x68 - 0x6F */
3125 3126
	I(SrcImm | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3127 3128
	I(SrcImmByte | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3129 3130
	D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
	D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3131 3132 3133 3134 3135 3136 3137
	/* 0x70 - 0x7F */
	X16(D(SrcImmByte)),
	/* 0x80 - 0x87 */
	G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
	G(DstMem | SrcImm | ModRM | Group, group1),
	G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
	G(DstMem | SrcImmByte | ModRM | Group, group1),
3138
	I2bv(DstMem | SrcReg | ModRM, em_test),
3139
	I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
3140
	/* 0x88 - 0x8F */
3141 3142
	I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3143 3144 3145 3146
	I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
	D(ModRM | SrcMem | NoAccess | DstReg),
	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
	G(0, group1A),
3147
	/* 0x90 - 0x97 */
3148
	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3149
	/* 0x98 - 0x9F */
3150
	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3151
	I(SrcImmFAddr | No64, em_call_far), N,
3152 3153
	II(ImplicitOps | Stack, em_pushf, pushf),
	II(ImplicitOps | Stack, em_popf, popf), N, N,
3154
	/* 0xA0 - 0xA7 */
3155 3156 3157
	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
	I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
	I2bv(SrcSI | DstDI | Mov | String, em_mov),
3158
	I2bv(SrcSI | DstDI | String, em_cmp),
3159
	/* 0xA8 - 0xAF */
3160
	I2bv(DstAcc | SrcImm, em_test),
3161 3162
	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3163
	I2bv(SrcAcc | DstDI | String, em_cmp),
3164
	/* 0xB0 - 0xB7 */
3165
	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3166
	/* 0xB8 - 0xBF */
3167
	X8(I(DstReg | SrcImm | Mov, em_mov)),
3168
	/* 0xC0 - 0xC7 */
3169
	D2bv(DstMem | SrcImmByte | ModRM),
3170
	I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3171
	I(ImplicitOps | Stack, em_ret),
3172
	D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
3173
	G(ByteOp, group11), G(0, group11),
3174
	/* 0xC8 - 0xCF */
3175
	N, N, N, I(ImplicitOps | Stack, em_ret_far),
3176
	D(ImplicitOps), DI(SrcImmByte, intn),
3177
	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3178
	/* 0xD0 - 0xD7 */
3179
	D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3180 3181 3182 3183
	N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
3184 3185
	X3(I(SrcImmByte, em_loop)),
	I(SrcImmByte, em_jcxz),
3186 3187
	D2bvIP(SrcImmUByte | DstAcc, in,  check_perm_in),
	D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
3188 3189
	/* 0xE8 - 0xEF */
	D(SrcImm | Stack), D(SrcImm | ImplicitOps),
3190
	I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3191 3192
	D2bvIP(SrcDX | DstAcc, in,  check_perm_in),
	D2bvIP(SrcAcc | DstDX, out, check_perm_out),
3193
	/* 0xF0 - 0xF7 */
3194
	N, DI(ImplicitOps, icebp), N, N,
3195 3196
	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
	G(ByteOp, group3), G(0, group3),
3197
	/* 0xF8 - 0xFF */
3198 3199
	D(ImplicitOps), D(ImplicitOps),
	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3200 3201 3202 3203 3204
	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};

static struct opcode twobyte_table[256] = {
	/* 0x00 - 0x0F */
3205
	G(0, group6), GD(0, &group7), N, N,
3206 3207
	N, I(ImplicitOps | VendorSpecific, em_syscall),
	II(ImplicitOps | Priv, em_clts, clts), N,
3208
	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3209 3210 3211 3212
	N, D(ImplicitOps | ModRM), N, N,
	/* 0x10 - 0x1F */
	N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
	/* 0x20 - 0x2F */
3213
	DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3214
	DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3215
	DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
3216
	DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
3217 3218 3219
	N, N, N, N,
	N, N, N, N, N, N, N, N,
	/* 0x30 - 0x3F */
3220 3221 3222 3223
	DI(ImplicitOps | Priv, wrmsr),
	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
	DI(ImplicitOps | Priv, rdmsr),
	DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
3224 3225
	I(ImplicitOps | VendorSpecific, em_sysenter),
	I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
3226
	N, N,
3227 3228 3229 3230 3231 3232
	N, N, N, N, N, N, N, N,
	/* 0x40 - 0x4F */
	X16(D(DstReg | SrcMem | ModRM | Mov)),
	/* 0x50 - 0x5F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x60 - 0x6F */
3233 3234 3235 3236
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3237
	/* 0x70 - 0x7F */
3238 3239 3240 3241
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3242 3243 3244
	/* 0x80 - 0x8F */
	X16(D(SrcImm)),
	/* 0x90 - 0x9F */
3245
	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3246 3247
	/* 0xA0 - 0xA7 */
	D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3248
	DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
3249 3250 3251 3252
	D(DstMem | SrcReg | Src2ImmByte | ModRM),
	D(DstMem | SrcReg | Src2CL | ModRM), N, N,
	/* 0xA8 - 0xAF */
	D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3253
	DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3254 3255
	D(DstMem | SrcReg | Src2ImmByte | ModRM),
	D(DstMem | SrcReg | Src2CL | ModRM),
3256
	D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3257
	/* 0xB0 - 0xB7 */
3258
	D2bv(DstMem | SrcReg | ModRM | Lock),
3259 3260 3261
	D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
	D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3262 3263
	/* 0xB8 - 0xBF */
	N, N,
3264
	G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3265 3266
	D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3267
	/* 0xC0 - 0xCF */
3268
	D2bv(DstMem | SrcReg | ModRM | Lock),
3269
	N, D(DstMem | SrcReg | ModRM | Mov),
3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284
	N, N, N, GD(0, &group9),
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xDF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xEF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xFF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};

#undef D
#undef N
#undef G
#undef GD
#undef I
3285
#undef GP
3286
#undef EXT
3287

3288
#undef D2bv
3289
#undef D2bvIP
3290
#undef I2bv
3291
#undef I6ALU
3292

3293
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3294 3295 3296
{
	unsigned size;

3297
	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309
	if (size == 8)
		size = 4;
	return size;
}

static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
		      unsigned size, bool sign_extension)
{
	int rc = X86EMUL_CONTINUE;

	op->type = OP_IMM;
	op->bytes = size;
3310
	op->addr.mem.ea = ctxt->_eip;
3311 3312 3313
	/* NB. Immediates are sign-extended as necessary. */
	switch (op->bytes) {
	case 1:
3314
		op->val = insn_fetch(s8, ctxt);
3315 3316
		break;
	case 2:
3317
		op->val = insn_fetch(s16, ctxt);
3318 3319
		break;
	case 4:
3320
		op->val = insn_fetch(s32, ctxt);
3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339
		break;
	}
	if (!sign_extension) {
		switch (op->bytes) {
		case 1:
			op->val &= 0xff;
			break;
		case 2:
			op->val &= 0xffff;
			break;
		case 4:
			op->val &= 0xffffffff;
			break;
		}
	}
done:
	return rc;
}

3340
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3341 3342 3343
{
	int rc = X86EMUL_CONTINUE;
	int mode = ctxt->mode;
3344
	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3345
	bool op_prefix = false;
3346
	struct opcode opcode;
3347
	struct operand memop = { .type = OP_NONE }, *memopp = NULL;
3348

3349 3350 3351
	ctxt->_eip = ctxt->eip;
	ctxt->fetch.start = ctxt->_eip;
	ctxt->fetch.end = ctxt->fetch.start + insn_len;
3352
	if (insn_len > 0)
3353
		memcpy(ctxt->fetch.data, insn, insn_len);
3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370

	switch (mode) {
	case X86EMUL_MODE_REAL:
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
		def_op_bytes = def_ad_bytes = 2;
		break;
	case X86EMUL_MODE_PROT32:
		def_op_bytes = def_ad_bytes = 4;
		break;
#ifdef CONFIG_X86_64
	case X86EMUL_MODE_PROT64:
		def_op_bytes = 4;
		def_ad_bytes = 8;
		break;
#endif
	default:
3371
		return EMULATION_FAILED;
3372 3373
	}

3374 3375
	ctxt->op_bytes = def_op_bytes;
	ctxt->ad_bytes = def_ad_bytes;
3376 3377 3378

	/* Legacy prefixes. */
	for (;;) {
3379
		switch (ctxt->b = insn_fetch(u8, ctxt)) {
3380
		case 0x66:	/* operand-size override */
3381
			op_prefix = true;
3382
			/* switch between 2/4 bytes */
3383
			ctxt->op_bytes = def_op_bytes ^ 6;
3384 3385 3386 3387
			break;
		case 0x67:	/* address-size override */
			if (mode == X86EMUL_MODE_PROT64)
				/* switch between 4/8 bytes */
3388
				ctxt->ad_bytes = def_ad_bytes ^ 12;
3389 3390
			else
				/* switch between 2/4 bytes */
3391
				ctxt->ad_bytes = def_ad_bytes ^ 6;
3392 3393 3394 3395 3396
			break;
		case 0x26:	/* ES override */
		case 0x2e:	/* CS override */
		case 0x36:	/* SS override */
		case 0x3e:	/* DS override */
3397
			set_seg_override(ctxt, (ctxt->b >> 3) & 3);
3398 3399 3400
			break;
		case 0x64:	/* FS override */
		case 0x65:	/* GS override */
3401
			set_seg_override(ctxt, ctxt->b & 7);
3402 3403 3404 3405
			break;
		case 0x40 ... 0x4f: /* REX */
			if (mode != X86EMUL_MODE_PROT64)
				goto done_prefixes;
3406
			ctxt->rex_prefix = ctxt->b;
3407 3408
			continue;
		case 0xf0:	/* LOCK */
3409
			ctxt->lock_prefix = 1;
3410 3411 3412
			break;
		case 0xf2:	/* REPNE/REPNZ */
		case 0xf3:	/* REP/REPE/REPZ */
3413
			ctxt->rep_prefix = ctxt->b;
3414 3415 3416 3417 3418 3419 3420
			break;
		default:
			goto done_prefixes;
		}

		/* Any legacy prefix after a REX prefix nullifies its effect. */

3421
		ctxt->rex_prefix = 0;
3422 3423 3424 3425 3426
	}

done_prefixes:

	/* REX prefix. */
3427 3428
	if (ctxt->rex_prefix & 8)
		ctxt->op_bytes = 8;	/* REX.W */
3429 3430

	/* Opcode byte(s). */
3431
	opcode = opcode_table[ctxt->b];
3432
	/* Two-byte opcode? */
3433 3434
	if (ctxt->b == 0x0f) {
		ctxt->twobyte = 1;
3435
		ctxt->b = insn_fetch(u8, ctxt);
3436
		opcode = twobyte_table[ctxt->b];
3437
	}
3438
	ctxt->d = opcode.flags;
3439

3440 3441
	while (ctxt->d & GroupMask) {
		switch (ctxt->d & GroupMask) {
3442
		case Group:
3443
			ctxt->modrm = insn_fetch(u8, ctxt);
3444 3445
			--ctxt->_eip;
			goffset = (ctxt->modrm >> 3) & 7;
3446 3447 3448
			opcode = opcode.u.group[goffset];
			break;
		case GroupDual:
3449
			ctxt->modrm = insn_fetch(u8, ctxt);
3450 3451 3452
			--ctxt->_eip;
			goffset = (ctxt->modrm >> 3) & 7;
			if ((ctxt->modrm >> 6) == 3)
3453 3454 3455 3456 3457
				opcode = opcode.u.gdual->mod3[goffset];
			else
				opcode = opcode.u.gdual->mod012[goffset];
			break;
		case RMExt:
3458
			goffset = ctxt->modrm & 7;
3459
			opcode = opcode.u.group[goffset];
3460 3461
			break;
		case Prefix:
3462
			if (ctxt->rep_prefix && op_prefix)
3463
				return EMULATION_FAILED;
3464
			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
3465 3466 3467 3468 3469 3470 3471 3472
			switch (simd_prefix) {
			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
			}
			break;
		default:
3473
			return EMULATION_FAILED;
3474
		}
3475

3476 3477
		ctxt->d &= ~GroupMask;
		ctxt->d |= opcode.flags;
3478 3479
	}

3480 3481 3482
	ctxt->execute = opcode.u.execute;
	ctxt->check_perm = opcode.check_perm;
	ctxt->intercept = opcode.intercept;
3483 3484

	/* Unrecognised? */
3485
	if (ctxt->d == 0 || (ctxt->d & Undefined))
3486
		return EMULATION_FAILED;
3487

3488
	if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
3489
		return EMULATION_FAILED;
3490

3491 3492
	if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
		ctxt->op_bytes = 8;
3493

3494
	if (ctxt->d & Op3264) {
3495
		if (mode == X86EMUL_MODE_PROT64)
3496
			ctxt->op_bytes = 8;
3497
		else
3498
			ctxt->op_bytes = 4;
3499 3500
	}

3501 3502
	if (ctxt->d & Sse)
		ctxt->op_bytes = 16;
A
Avi Kivity 已提交
3503

3504
	/* ModRM and SIB bytes. */
3505
	if (ctxt->d & ModRM) {
3506
		rc = decode_modrm(ctxt, &memop);
3507 3508 3509
		if (!ctxt->has_seg_override)
			set_seg_override(ctxt, ctxt->modrm_seg);
	} else if (ctxt->d & MemAbs)
3510
		rc = decode_abs(ctxt, &memop);
3511 3512 3513
	if (rc != X86EMUL_CONTINUE)
		goto done;

3514 3515
	if (!ctxt->has_seg_override)
		set_seg_override(ctxt, VCPU_SREG_DS);
3516

3517
	memop.addr.mem.seg = seg_override(ctxt);
3518

3519
	if (memop.type == OP_MEM && ctxt->ad_bytes != 8)
3520
		memop.addr.mem.ea = (u32)memop.addr.mem.ea;
3521 3522 3523 3524 3525

	/*
	 * Decode and fetch the source operand: register, memory
	 * or immediate.
	 */
3526
	switch (ctxt->d & SrcMask) {
3527 3528 3529
	case SrcNone:
		break;
	case SrcReg:
3530
		decode_register_operand(ctxt, &ctxt->src, 0);
3531 3532
		break;
	case SrcMem16:
3533
		memop.bytes = 2;
3534 3535
		goto srcmem_common;
	case SrcMem32:
3536
		memop.bytes = 4;
3537 3538
		goto srcmem_common;
	case SrcMem:
3539 3540
		memop.bytes = (ctxt->d & ByteOp) ? 1 :
							   ctxt->op_bytes;
3541
	srcmem_common:
3542 3543
		ctxt->src = memop;
		memopp = &ctxt->src;
3544
		break;
3545
	case SrcImmU16:
3546
		rc = decode_imm(ctxt, &ctxt->src, 2, false);
3547
		break;
3548
	case SrcImm:
3549
		rc = decode_imm(ctxt, &ctxt->src, imm_size(ctxt), true);
3550
		break;
3551
	case SrcImmU:
3552
		rc = decode_imm(ctxt, &ctxt->src, imm_size(ctxt), false);
3553 3554
		break;
	case SrcImmByte:
3555
		rc = decode_imm(ctxt, &ctxt->src, 1, true);
3556
		break;
3557
	case SrcImmUByte:
3558
		rc = decode_imm(ctxt, &ctxt->src, 1, false);
3559 3560
		break;
	case SrcAcc:
3561 3562 3563 3564
		ctxt->src.type = OP_REG;
		ctxt->src.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		ctxt->src.addr.reg = &ctxt->regs[VCPU_REGS_RAX];
		fetch_register_operand(&ctxt->src);
3565 3566
		break;
	case SrcOne:
3567 3568
		ctxt->src.bytes = 1;
		ctxt->src.val = 1;
3569 3570
		break;
	case SrcSI:
3571 3572 3573 3574 3575 3576
		ctxt->src.type = OP_MEM;
		ctxt->src.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		ctxt->src.addr.mem.ea =
			register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]);
		ctxt->src.addr.mem.seg = seg_override(ctxt);
		ctxt->src.val = 0;
3577 3578
		break;
	case SrcImmFAddr:
3579 3580 3581
		ctxt->src.type = OP_IMM;
		ctxt->src.addr.mem.ea = ctxt->_eip;
		ctxt->src.bytes = ctxt->op_bytes + 2;
3582
		insn_fetch_arr(ctxt->src.valptr, ctxt->src.bytes, ctxt);
3583 3584
		break;
	case SrcMemFAddr:
3585
		memop.bytes = ctxt->op_bytes + 2;
3586
		goto srcmem_common;
3587
		break;
3588
	case SrcDX:
3589 3590 3591 3592
		ctxt->src.type = OP_REG;
		ctxt->src.bytes = 2;
		ctxt->src.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
		fetch_register_operand(&ctxt->src);
3593
		break;
3594 3595
	}

3596 3597 3598
	if (rc != X86EMUL_CONTINUE)
		goto done;

3599 3600 3601 3602
	/*
	 * Decode and fetch the second source operand: register, memory
	 * or immediate.
	 */
3603
	switch (ctxt->d & Src2Mask) {
3604 3605 3606
	case Src2None:
		break;
	case Src2CL:
3607
		ctxt->src2.bytes = 1;
3608
		ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
3609 3610
		break;
	case Src2ImmByte:
3611
		rc = decode_imm(ctxt, &ctxt->src2, 1, true);
3612 3613
		break;
	case Src2One:
3614 3615
		ctxt->src2.bytes = 1;
		ctxt->src2.val = 1;
3616
		break;
3617
	case Src2Imm:
3618
		rc = decode_imm(ctxt, &ctxt->src2, imm_size(ctxt), true);
3619
		break;
3620 3621
	}

3622 3623 3624
	if (rc != X86EMUL_CONTINUE)
		goto done;

3625
	/* Decode and fetch the destination operand: register or memory. */
3626
	switch (ctxt->d & DstMask) {
3627
	case DstReg:
3628 3629
		decode_register_operand(ctxt, &ctxt->dst,
			 ctxt->twobyte && (ctxt->b == 0xb6 || ctxt->b == 0xb7));
3630
		break;
3631
	case DstImmUByte:
3632 3633 3634
		ctxt->dst.type = OP_IMM;
		ctxt->dst.addr.mem.ea = ctxt->_eip;
		ctxt->dst.bytes = 1;
3635
		ctxt->dst.val = insn_fetch(u8, ctxt);
3636
		break;
3637 3638
	case DstMem:
	case DstMem64:
3639 3640 3641 3642
		ctxt->dst = memop;
		memopp = &ctxt->dst;
		if ((ctxt->d & DstMask) == DstMem64)
			ctxt->dst.bytes = 8;
3643
		else
3644 3645 3646 3647
			ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		if (ctxt->d & BitOp)
			fetch_bit_operand(ctxt);
		ctxt->dst.orig_val = ctxt->dst.val;
3648 3649
		break;
	case DstAcc:
3650 3651 3652 3653 3654
		ctxt->dst.type = OP_REG;
		ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RAX];
		fetch_register_operand(&ctxt->dst);
		ctxt->dst.orig_val = ctxt->dst.val;
3655 3656
		break;
	case DstDI:
3657 3658 3659 3660 3661 3662
		ctxt->dst.type = OP_MEM;
		ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		ctxt->dst.addr.mem.ea =
			register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
		ctxt->dst.addr.mem.seg = VCPU_SREG_ES;
		ctxt->dst.val = 0;
3663
		break;
3664
	case DstDX:
3665 3666 3667 3668
		ctxt->dst.type = OP_REG;
		ctxt->dst.bytes = 2;
		ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
		fetch_register_operand(&ctxt->dst);
3669
		break;
3670 3671 3672
	case ImplicitOps:
		/* Special instructions do their own operand decoding. */
	default:
3673
		ctxt->dst.type = OP_NONE; /* Disable writeback. */
3674
		break;
3675 3676 3677
	}

done:
3678 3679
	if (memopp && memopp->type == OP_MEM && ctxt->rip_relative)
		memopp->addr.mem.ea += ctxt->_eip;
3680

3681
	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
3682 3683
}

3684 3685 3686 3687 3688 3689 3690 3691 3692
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
	/* The second termination condition only applies for REPE
	 * and REPNE. Test if the repeat string operation prefix is
	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
	 * corresponding termination condition according to:
	 * 	- if REPE/REPZ and ZF = 0 then done
	 * 	- if REPNE/REPNZ and ZF = 1 then done
	 */
3693 3694 3695
	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
3696
		 ((ctxt->eflags & EFLG_ZF) == 0))
3697
		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
3698 3699 3700 3701 3702 3703
		    ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
		return true;

	return false;
}

3704
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3705
{
3706
	struct x86_emulate_ops *ops = ctxt->ops;
3707
	u64 msr_data;
3708
	int rc = X86EMUL_CONTINUE;
3709
	int saved_dst_type = ctxt->dst.type;
3710

3711
	ctxt->mem_read.pos = 0;
3712

3713
	if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
3714
		rc = emulate_ud(ctxt);
3715 3716 3717
		goto done;
	}

3718
	/* LOCK prefix is allowed only with some instructions */
3719
	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
3720
		rc = emulate_ud(ctxt);
3721 3722 3723
		goto done;
	}

3724
	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
3725
		rc = emulate_ud(ctxt);
3726 3727 3728
		goto done;
	}

3729
	if ((ctxt->d & Sse)
3730 3731
	    && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
		|| !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
A
Avi Kivity 已提交
3732 3733 3734 3735
		rc = emulate_ud(ctxt);
		goto done;
	}

3736
	if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
A
Avi Kivity 已提交
3737 3738 3739 3740
		rc = emulate_nm(ctxt);
		goto done;
	}

3741 3742
	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
		rc = emulator_check_intercept(ctxt, ctxt->intercept,
3743
					      X86_ICPT_PRE_EXCEPT);
3744 3745 3746 3747
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

3748
	/* Privileged instruction can be executed only in CPL=0 */
3749
	if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
3750
		rc = emulate_gp(ctxt, 0);
3751 3752 3753
		goto done;
	}

3754
	/* Instruction can only be executed in protected mode */
3755
	if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
3756 3757 3758 3759
		rc = emulate_ud(ctxt);
		goto done;
	}

3760
	/* Do instruction specific permission checks */
3761 3762
	if (ctxt->check_perm) {
		rc = ctxt->check_perm(ctxt);
3763 3764 3765 3766
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

3767 3768
	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
		rc = emulator_check_intercept(ctxt, ctxt->intercept,
3769
					      X86_ICPT_POST_EXCEPT);
3770 3771 3772 3773
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

3774
	if (ctxt->rep_prefix && (ctxt->d & String)) {
3775
		/* All REP prefixes have the same first termination condition */
3776 3777
		if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) {
			ctxt->eip = ctxt->_eip;
3778 3779 3780 3781
			goto done;
		}
	}

3782 3783 3784
	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
		rc = segmented_read(ctxt, ctxt->src.addr.mem,
				    ctxt->src.valptr, ctxt->src.bytes);
3785
		if (rc != X86EMUL_CONTINUE)
3786
			goto done;
3787
		ctxt->src.orig_val64 = ctxt->src.val64;
3788 3789
	}

3790 3791 3792
	if (ctxt->src2.type == OP_MEM) {
		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
				    &ctxt->src2.val, ctxt->src2.bytes);
3793 3794 3795 3796
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

3797
	if ((ctxt->d & DstMask) == ImplicitOps)
3798 3799 3800
		goto special_insn;


3801
	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
3802
		/* optimisation - avoid slow emulated read if Mov */
3803 3804
		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
				   &ctxt->dst.val, ctxt->dst.bytes);
3805 3806
		if (rc != X86EMUL_CONTINUE)
			goto done;
3807
	}
3808
	ctxt->dst.orig_val = ctxt->dst.val;
3809

3810 3811
special_insn:

3812 3813
	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
		rc = emulator_check_intercept(ctxt, ctxt->intercept,
3814
					      X86_ICPT_POST_MEMACCESS);
3815 3816 3817 3818
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

3819 3820
	if (ctxt->execute) {
		rc = ctxt->execute(ctxt);
3821 3822 3823 3824 3825
		if (rc != X86EMUL_CONTINUE)
			goto done;
		goto writeback;
	}

3826
	if (ctxt->twobyte)
A
Avi Kivity 已提交
3827 3828
		goto twobyte_insn;

3829
	switch (ctxt->b) {
3830
	case 0x06:		/* push es */
3831
		rc = emulate_push_sreg(ctxt, VCPU_SREG_ES);
3832 3833
		break;
	case 0x07:		/* pop es */
3834
		rc = emulate_pop_sreg(ctxt, VCPU_SREG_ES);
3835 3836
		break;
	case 0x0e:		/* push cs */
3837
		rc = emulate_push_sreg(ctxt, VCPU_SREG_CS);
3838 3839
		break;
	case 0x16:		/* push ss */
3840
		rc = emulate_push_sreg(ctxt, VCPU_SREG_SS);
3841 3842
		break;
	case 0x17:		/* pop ss */
3843
		rc = emulate_pop_sreg(ctxt, VCPU_SREG_SS);
3844 3845
		break;
	case 0x1e:		/* push ds */
3846
		rc = emulate_push_sreg(ctxt, VCPU_SREG_DS);
3847 3848
		break;
	case 0x1f:		/* pop ds */
3849
		rc = emulate_pop_sreg(ctxt, VCPU_SREG_DS);
3850
		break;
3851
	case 0x40 ... 0x47: /* inc r16/r32 */
3852
		emulate_1op(ctxt, "inc");
3853 3854
		break;
	case 0x48 ... 0x4f: /* dec r16/r32 */
3855
		emulate_1op(ctxt, "dec");
3856
		break;
A
Avi Kivity 已提交
3857
	case 0x63:		/* movsxd */
3858
		if (ctxt->mode != X86EMUL_MODE_PROT64)
A
Avi Kivity 已提交
3859
			goto cannot_emulate;
3860
		ctxt->dst.val = (s32) ctxt->src.val;
A
Avi Kivity 已提交
3861
		break;
3862 3863
	case 0x6c:		/* insb */
	case 0x6d:		/* insw/insd */
3864
		ctxt->src.val = ctxt->regs[VCPU_REGS_RDX];
3865
		goto do_io_in;
3866 3867
	case 0x6e:		/* outsb */
	case 0x6f:		/* outsw/outsd */
3868
		ctxt->dst.val = ctxt->regs[VCPU_REGS_RDX];
3869
		goto do_io_out;
3870
		break;
3871
	case 0x70 ... 0x7f: /* jcc (short) */
3872 3873
		if (test_cc(ctxt->b, ctxt->eflags))
			jmp_rel(ctxt, ctxt->src.val);
3874
		break;
N
Nitin A Kamble 已提交
3875
	case 0x8d: /* lea r16/r32, m */
3876
		ctxt->dst.val = ctxt->src.addr.mem.ea;
N
Nitin A Kamble 已提交
3877
		break;
A
Avi Kivity 已提交
3878
	case 0x8f:		/* pop (sole member of Grp1a) */
3879
		rc = em_grp1a(ctxt);
A
Avi Kivity 已提交
3880
		break;
3881
	case 0x90 ... 0x97: /* nop / xchg reg, rax */
3882
		if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
3883
			break;
3884 3885
		rc = em_xchg(ctxt);
		break;
3886
	case 0x98: /* cbw/cwde/cdqe */
3887 3888 3889 3890
		switch (ctxt->op_bytes) {
		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
3891 3892
		}
		break;
3893
	case 0xc0 ... 0xc1:
3894
		rc = em_grp2(ctxt);
3895
		break;
3896
	case 0xc4:		/* les */
3897
		rc = emulate_load_segment(ctxt, VCPU_SREG_ES);
3898 3899
		break;
	case 0xc5:		/* lds */
3900
		rc = emulate_load_segment(ctxt, VCPU_SREG_DS);
3901
		break;
3902
	case 0xcc:		/* int3 */
3903 3904
		rc = emulate_int(ctxt, 3);
		break;
3905
	case 0xcd:		/* int n */
3906
		rc = emulate_int(ctxt, ctxt->src.val);
3907 3908
		break;
	case 0xce:		/* into */
3909 3910
		if (ctxt->eflags & EFLG_OF)
			rc = emulate_int(ctxt, 4);
3911
		break;
3912
	case 0xd0 ... 0xd1:	/* Grp2 */
3913
		rc = em_grp2(ctxt);
3914 3915
		break;
	case 0xd2 ... 0xd3:	/* Grp2 */
3916
		ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
3917
		rc = em_grp2(ctxt);
3918
		break;
3919 3920
	case 0xe4: 	/* inb */
	case 0xe5: 	/* in */
3921
		goto do_io_in;
3922 3923
	case 0xe6: /* outb */
	case 0xe7: /* out */
3924
		goto do_io_out;
3925
	case 0xe8: /* call (near) */ {
3926 3927 3928
		long int rel = ctxt->src.val;
		ctxt->src.val = (unsigned long) ctxt->_eip;
		jmp_rel(ctxt, rel);
3929
		rc = em_push(ctxt);
3930
		break;
3931 3932
	}
	case 0xe9: /* jmp rel */
3933
	case 0xeb: /* jmp rel short */
3934 3935
		jmp_rel(ctxt, ctxt->src.val);
		ctxt->dst.type = OP_NONE; /* Disable writeback. */
3936
		break;
3937 3938
	case 0xec: /* in al,dx */
	case 0xed: /* in (e/r)ax,dx */
3939
	do_io_in:
3940 3941
		if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
				     &ctxt->dst.val))
3942 3943
			goto done; /* IO is needed */
		break;
3944 3945
	case 0xee: /* out dx,al */
	case 0xef: /* out dx,(e/r)ax */
3946
	do_io_out:
3947 3948 3949
		ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
				      &ctxt->src.val, 1);
		ctxt->dst.type = OP_NONE;	/* Disable writeback. */
3950
		break;
3951
	case 0xf4:              /* hlt */
3952
		ctxt->ops->halt(ctxt);
3953
		break;
3954 3955 3956 3957
	case 0xf5:	/* cmc */
		/* complement carry flag from eflags reg */
		ctxt->eflags ^= EFLG_CF;
		break;
3958
	case 0xf6 ... 0xf7:	/* Grp3 */
3959
		rc = em_grp3(ctxt);
3960
		break;
3961 3962 3963
	case 0xf8: /* clc */
		ctxt->eflags &= ~EFLG_CF;
		break;
3964 3965 3966
	case 0xf9: /* stc */
		ctxt->eflags |= EFLG_CF;
		break;
3967 3968 3969 3970 3971 3972
	case 0xfc: /* cld */
		ctxt->eflags &= ~EFLG_DF;
		break;
	case 0xfd: /* std */
		ctxt->eflags |= EFLG_DF;
		break;
3973
	case 0xfe: /* Grp4 */
3974
		rc = em_grp45(ctxt);
3975
		break;
3976
	case 0xff: /* Grp5 */
3977 3978
		rc = em_grp45(ctxt);
		break;
3979 3980
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
3981
	}
3982

3983 3984 3985
	if (rc != X86EMUL_CONTINUE)
		goto done;

3986
writeback:
3987
	rc = writeback(ctxt);
3988
	if (rc != X86EMUL_CONTINUE)
3989 3990
		goto done;

3991 3992 3993 3994
	/*
	 * restore dst type in case the decoding will be reused
	 * (happens for string instruction )
	 */
3995
	ctxt->dst.type = saved_dst_type;
3996

3997 3998 3999
	if ((ctxt->d & SrcMask) == SrcSI)
		string_addr_inc(ctxt, seg_override(ctxt),
				VCPU_REGS_RSI, &ctxt->src);
4000

4001
	if ((ctxt->d & DstMask) == DstDI)
4002
		string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
4003
				&ctxt->dst);
4004

4005 4006 4007
	if (ctxt->rep_prefix && (ctxt->d & String)) {
		struct read_cache *r = &ctxt->io_read;
		register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
4008

4009 4010 4011 4012 4013
		if (!string_insn_completed(ctxt)) {
			/*
			 * Re-enter guest when pio read ahead buffer is empty
			 * or, if it is not used, after each 1024 iteration.
			 */
4014
			if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
4015 4016 4017 4018 4019 4020
			    (r->end == 0 || r->end != r->pos)) {
				/*
				 * Reset read cache. Usually happens before
				 * decode, but since instruction is restarted
				 * we have to do it here.
				 */
4021
				ctxt->mem_read.end = 0;
4022 4023 4024
				return EMULATION_RESTART;
			}
			goto done; /* skip rip writeback */
4025
		}
4026
	}
4027

4028
	ctxt->eip = ctxt->_eip;
4029 4030

done:
4031 4032
	if (rc == X86EMUL_PROPAGATE_FAULT)
		ctxt->have_exception = true;
4033 4034 4035
	if (rc == X86EMUL_INTERCEPTED)
		return EMULATION_INTERCEPTED;

4036
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
A
Avi Kivity 已提交
4037 4038

twobyte_insn:
4039
	switch (ctxt->b) {
4040
	case 0x09:		/* wbinvd */
4041
		(ctxt->ops->wbinvd)(ctxt);
4042 4043
		break;
	case 0x08:		/* invd */
4044 4045 4046 4047
	case 0x0d:		/* GrpP (prefetch) */
	case 0x18:		/* Grp16 (prefetch/nop) */
		break;
	case 0x20: /* mov cr, reg */
4048
		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4049
		break;
A
Avi Kivity 已提交
4050
	case 0x21: /* mov from dr to reg */
4051
		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
A
Avi Kivity 已提交
4052
		break;
4053
	case 0x22: /* mov reg, cr */
4054
		if (ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) {
4055
			emulate_gp(ctxt, 0);
4056
			rc = X86EMUL_PROPAGATE_FAULT;
4057 4058
			goto done;
		}
4059
		ctxt->dst.type = OP_NONE;
4060
		break;
A
Avi Kivity 已提交
4061
	case 0x23: /* mov from reg to dr */
4062
		if (ops->set_dr(ctxt, ctxt->modrm_reg, ctxt->src.val &
4063
				((ctxt->mode == X86EMUL_MODE_PROT64) ?
4064
				 ~0ULL : ~0U)) < 0) {
4065
			/* #UD condition is already handled by the code above */
4066
			emulate_gp(ctxt, 0);
4067
			rc = X86EMUL_PROPAGATE_FAULT;
4068 4069 4070
			goto done;
		}

4071
		ctxt->dst.type = OP_NONE;	/* no writeback */
A
Avi Kivity 已提交
4072
		break;
4073 4074
	case 0x30:
		/* wrmsr */
4075 4076 4077
		msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
			| ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
		if (ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) {
4078
			emulate_gp(ctxt, 0);
4079
			rc = X86EMUL_PROPAGATE_FAULT;
4080
			goto done;
4081 4082 4083 4084 4085
		}
		rc = X86EMUL_CONTINUE;
		break;
	case 0x32:
		/* rdmsr */
4086
		if (ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) {
4087
			emulate_gp(ctxt, 0);
4088
			rc = X86EMUL_PROPAGATE_FAULT;
4089
			goto done;
4090
		} else {
4091 4092
			ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
			ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
4093 4094 4095
		}
		rc = X86EMUL_CONTINUE;
		break;
A
Avi Kivity 已提交
4096
	case 0x40 ... 0x4f:	/* cmov */
4097 4098 4099
		ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
		if (!test_cc(ctxt->b, ctxt->eflags))
			ctxt->dst.type = OP_NONE; /* no writeback */
A
Avi Kivity 已提交
4100
		break;
4101
	case 0x80 ... 0x8f: /* jnz rel, etc*/
4102 4103
		if (test_cc(ctxt->b, ctxt->eflags))
			jmp_rel(ctxt, ctxt->src.val);
4104
		break;
4105
	case 0x90 ... 0x9f:     /* setcc r/m8 */
4106
		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4107
		break;
4108
	case 0xa0:	  /* push fs */
4109
		rc = emulate_push_sreg(ctxt, VCPU_SREG_FS);
4110 4111
		break;
	case 0xa1:	 /* pop fs */
4112
		rc = emulate_pop_sreg(ctxt, VCPU_SREG_FS);
4113
		break;
4114 4115
	case 0xa3:
	      bt:		/* bt */
4116
		ctxt->dst.type = OP_NONE;
4117
		/* only subword offset */
4118
		ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
4119
		emulate_2op_SrcV_nobyte(ctxt, "bt");
4120
		break;
4121 4122
	case 0xa4: /* shld imm8, r, r/m */
	case 0xa5: /* shld cl, r, r/m */
4123
		emulate_2op_cl(ctxt, "shld");
4124
		break;
4125
	case 0xa8:	/* push gs */
4126
		rc = emulate_push_sreg(ctxt, VCPU_SREG_GS);
4127 4128
		break;
	case 0xa9:	/* pop gs */
4129
		rc = emulate_pop_sreg(ctxt, VCPU_SREG_GS);
4130
		break;
4131 4132
	case 0xab:
	      bts:		/* bts */
4133
		emulate_2op_SrcV_nobyte(ctxt, "bts");
4134
		break;
4135 4136
	case 0xac: /* shrd imm8, r, r/m */
	case 0xad: /* shrd cl, r, r/m */
4137
		emulate_2op_cl(ctxt, "shrd");
4138
		break;
4139 4140
	case 0xae:              /* clflush */
		break;
A
Avi Kivity 已提交
4141 4142 4143 4144 4145
	case 0xb0 ... 0xb1:	/* cmpxchg */
		/*
		 * Save real source value, then compare EAX against
		 * destination.
		 */
4146 4147
		ctxt->src.orig_val = ctxt->src.val;
		ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
4148
		emulate_2op_SrcV(ctxt, "cmp");
4149
		if (ctxt->eflags & EFLG_ZF) {
A
Avi Kivity 已提交
4150
			/* Success: write back to memory. */
4151
			ctxt->dst.val = ctxt->src.orig_val;
A
Avi Kivity 已提交
4152 4153
		} else {
			/* Failure: write the value we saw to EAX. */
4154 4155
			ctxt->dst.type = OP_REG;
			ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
A
Avi Kivity 已提交
4156 4157
		}
		break;
4158
	case 0xb2:		/* lss */
4159
		rc = emulate_load_segment(ctxt, VCPU_SREG_SS);
4160
		break;
A
Avi Kivity 已提交
4161 4162
	case 0xb3:
	      btr:		/* btr */
4163
		emulate_2op_SrcV_nobyte(ctxt, "btr");
A
Avi Kivity 已提交
4164
		break;
4165
	case 0xb4:		/* lfs */
4166
		rc = emulate_load_segment(ctxt, VCPU_SREG_FS);
4167 4168
		break;
	case 0xb5:		/* lgs */
4169
		rc = emulate_load_segment(ctxt, VCPU_SREG_GS);
4170
		break;
A
Avi Kivity 已提交
4171
	case 0xb6 ... 0xb7:	/* movzx */
4172 4173 4174
		ctxt->dst.bytes = ctxt->op_bytes;
		ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
						       : (u16) ctxt->src.val;
A
Avi Kivity 已提交
4175 4176
		break;
	case 0xba:		/* Grp8 */
4177
		switch (ctxt->modrm_reg & 3) {
A
Avi Kivity 已提交
4178 4179 4180 4181 4182 4183 4184 4185 4186 4187
		case 0:
			goto bt;
		case 1:
			goto bts;
		case 2:
			goto btr;
		case 3:
			goto btc;
		}
		break;
4188 4189
	case 0xbb:
	      btc:		/* btc */
4190
		emulate_2op_SrcV_nobyte(ctxt, "btc");
4191
		break;
4192 4193 4194
	case 0xbc: {		/* bsf */
		u8 zf;
		__asm__ ("bsf %2, %0; setz %1"
4195 4196
			 : "=r"(ctxt->dst.val), "=q"(zf)
			 : "r"(ctxt->src.val));
4197 4198 4199
		ctxt->eflags &= ~X86_EFLAGS_ZF;
		if (zf) {
			ctxt->eflags |= X86_EFLAGS_ZF;
4200
			ctxt->dst.type = OP_NONE;	/* Disable writeback. */
4201 4202 4203 4204 4205 4206
		}
		break;
	}
	case 0xbd: {		/* bsr */
		u8 zf;
		__asm__ ("bsr %2, %0; setz %1"
4207 4208
			 : "=r"(ctxt->dst.val), "=q"(zf)
			 : "r"(ctxt->src.val));
4209 4210 4211
		ctxt->eflags &= ~X86_EFLAGS_ZF;
		if (zf) {
			ctxt->eflags |= X86_EFLAGS_ZF;
4212
			ctxt->dst.type = OP_NONE;	/* Disable writeback. */
4213 4214 4215
		}
		break;
	}
A
Avi Kivity 已提交
4216
	case 0xbe ... 0xbf:	/* movsx */
4217 4218 4219
		ctxt->dst.bytes = ctxt->op_bytes;
		ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
							(s16) ctxt->src.val;
A
Avi Kivity 已提交
4220
		break;
4221
	case 0xc0 ... 0xc1:	/* xadd */
4222
		emulate_2op_SrcV(ctxt, "add");
4223
		/* Write back the register source. */
4224 4225
		ctxt->src.val = ctxt->dst.orig_val;
		write_register_operand(&ctxt->src);
4226
		break;
4227
	case 0xc3:		/* movnti */
4228 4229 4230
		ctxt->dst.bytes = ctxt->op_bytes;
		ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
							(u64) ctxt->src.val;
4231
		break;
A
Avi Kivity 已提交
4232
	case 0xc7:		/* Grp9 (cmpxchg8b) */
4233
		rc = em_grp9(ctxt);
4234
		break;
4235 4236
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
4237
	}
4238 4239 4240 4241

	if (rc != X86EMUL_CONTINUE)
		goto done;

A
Avi Kivity 已提交
4242 4243 4244
	goto writeback;

cannot_emulate:
4245
	return EMULATION_FAILED;
A
Avi Kivity 已提交
4246
}