emulate.c 142.1 KB
Newer Older
A
Avi Kivity 已提交
1
/******************************************************************************
2
 * emulate.c
A
Avi Kivity 已提交
3 4 5 6 7 8
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9
 * privileged instructions:
A
Avi Kivity 已提交
10 11
 *
 * Copyright (C) 2006 Qumranet
N
Nicolas Kaiser 已提交
12
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
13 14 15 16 17 18 19 20 21 22
 *
 *   Avi Kivity <avi@qumranet.com>
 *   Yaniv Kamay <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

23
#include <linux/kvm_host.h>
24
#include "kvm_cache_regs.h"
25
#include <asm/kvm_emulate.h>
26
#include <linux/stringify.h>
27
#include <asm/debugreg.h>
A
Avi Kivity 已提交
28

29
#include "x86.h"
30
#include "tss.h"
31

32 33 34
/*
 * Operand types
 */
35 36 37 38 39 40 41 42 43
#define OpNone             0ull
#define OpImplicit         1ull  /* No generic decode */
#define OpReg              2ull  /* Register */
#define OpMem              3ull  /* Memory */
#define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
#define OpDI               5ull  /* ES:DI/EDI/RDI */
#define OpMem64            6ull  /* Memory, 64-bit */
#define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
#define OpDX               8ull  /* DX register */
44 45 46
#define OpCL               9ull  /* CL register (for shifts) */
#define OpImmByte         10ull  /* 8-bit sign extended immediate */
#define OpOne             11ull  /* Implied 1 */
47
#define OpImm             12ull  /* Sign extended up to 32-bit immediate */
48 49 50 51 52 53 54
#define OpMem16           13ull  /* Memory operand (16-bit). */
#define OpMem32           14ull  /* Memory operand (32-bit). */
#define OpImmU            15ull  /* Immediate operand, zero extended */
#define OpSI              16ull  /* SI/ESI/RSI */
#define OpImmFAddr        17ull  /* Immediate far address */
#define OpMemFAddr        18ull  /* Far address in memory */
#define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
55 56 57 58 59 60
#define OpES              20ull  /* ES */
#define OpCS              21ull  /* CS */
#define OpSS              22ull  /* SS */
#define OpDS              23ull  /* DS */
#define OpFS              24ull  /* FS */
#define OpGS              25ull  /* GS */
61
#define OpMem8            26ull  /* 8-bit zero extended memory operand */
62
#define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
P
Paolo Bonzini 已提交
63
#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
64 65
#define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
#define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
66 67

#define OpBits             5  /* Width of operand field */
68
#define OpMask             ((1ull << OpBits) - 1)
69

A
Avi Kivity 已提交
70 71 72 73 74 75 76 77 78 79
/*
 * Opcode effective-address decode tables.
 * Note that we only emulate instructions that have at least one memory
 * operand (excluding implicit stack references). We assume that stack
 * references and instruction fetches will never occur in special memory
 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
 * not be handled.
 */

/* Operand sizes: 8-bit operands or specified/overridden size. */
80
#define ByteOp      (1<<0)	/* 8-bit operands. */
A
Avi Kivity 已提交
81
/* Destination operand type. */
82 83 84 85 86 87 88
#define DstShift    1
#define ImplicitOps (OpImplicit << DstShift)
#define DstReg      (OpReg << DstShift)
#define DstMem      (OpMem << DstShift)
#define DstAcc      (OpAcc << DstShift)
#define DstDI       (OpDI << DstShift)
#define DstMem64    (OpMem64 << DstShift)
89
#define DstMem16    (OpMem16 << DstShift)
90 91
#define DstImmUByte (OpImmUByte << DstShift)
#define DstDX       (OpDX << DstShift)
92
#define DstAccLo    (OpAccLo << DstShift)
93
#define DstMask     (OpMask << DstShift)
A
Avi Kivity 已提交
94
/* Source operand type. */
95 96 97 98 99 100 101 102 103 104 105 106
#define SrcShift    6
#define SrcNone     (OpNone << SrcShift)
#define SrcReg      (OpReg << SrcShift)
#define SrcMem      (OpMem << SrcShift)
#define SrcMem16    (OpMem16 << SrcShift)
#define SrcMem32    (OpMem32 << SrcShift)
#define SrcImm      (OpImm << SrcShift)
#define SrcImmByte  (OpImmByte << SrcShift)
#define SrcOne      (OpOne << SrcShift)
#define SrcImmUByte (OpImmUByte << SrcShift)
#define SrcImmU     (OpImmU << SrcShift)
#define SrcSI       (OpSI << SrcShift)
P
Paolo Bonzini 已提交
107
#define SrcXLat     (OpXLat << SrcShift)
108 109 110 111
#define SrcImmFAddr (OpImmFAddr << SrcShift)
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc      (OpAcc << SrcShift)
#define SrcImmU16   (OpImmU16 << SrcShift)
112
#define SrcImm64    (OpImm64 << SrcShift)
113
#define SrcDX       (OpDX << SrcShift)
114
#define SrcMem8     (OpMem8 << SrcShift)
115
#define SrcAccHi    (OpAccHi << SrcShift)
116
#define SrcMask     (OpMask << SrcShift)
117 118 119 120 121 122 123 124 125
#define BitOp       (1<<11)
#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
#define String      (1<<13)     /* String instruction (rep capable) */
#define Stack       (1<<14)     /* Stack instruction (push/pop) */
#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
126
#define Escape      (5<<15)     /* Escape to coprocessor instruction */
127
#define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
128
#define ModeDual    (7<<15)     /* Different instruction for 32/64 bit */
129
#define Sse         (1<<18)     /* SSE Vector instruction */
130 131 132 133
/* Generic ModRM decode. */
#define ModRM       (1<<19)
/* Destination is only written; never read. */
#define Mov         (1<<20)
134
/* Misc flags */
135
#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
136
#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
137
#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
138
#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
139
#define Undefined   (1<<25) /* No Such Instruction */
140
#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
141
#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
142
#define No64	    (1<<28)
143
#define PageTable   (1 << 29)   /* instruction used to write page table */
144
#define NotImpl     (1 << 30)   /* instruction is not implemented */
145
/* Source 2 operand type */
146
#define Src2Shift   (31)
147
#define Src2None    (OpNone << Src2Shift)
148
#define Src2Mem     (OpMem << Src2Shift)
149 150 151 152
#define Src2CL      (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
#define Src2One     (OpOne << Src2Shift)
#define Src2Imm     (OpImm << Src2Shift)
153 154 155 156 157 158
#define Src2ES      (OpES << Src2Shift)
#define Src2CS      (OpCS << Src2Shift)
#define Src2SS      (OpSS << Src2Shift)
#define Src2DS      (OpDS << Src2Shift)
#define Src2FS      (OpFS << Src2Shift)
#define Src2GS      (OpGS << Src2Shift)
159
#define Src2Mask    (OpMask << Src2Shift)
A
Avi Kivity 已提交
160
#define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
161 162 163
#define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
#define Unaligned   ((u64)1 << 42)  /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx         ((u64)1 << 43)  /* Advanced Vector Extensions */
164
#define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
165
#define NoWrite     ((u64)1 << 45)  /* No writeback */
166
#define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
167
#define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
168 169
#define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
#define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
170
#define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
171
#define NearBranch  ((u64)1 << 52)  /* Near branches */
172
#define No16	    ((u64)1 << 53)  /* No 16 bit operand */
173
#define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
A
Avi Kivity 已提交
174

175
#define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
A
Avi Kivity 已提交
176

177 178 179 180 181 182 183 184
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
185

186 187 188 189 190 191
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
#define FASTOP_SIZE 8

/*
 * fastop functions have a special calling convention:
 *
192 193
 * dst:    rax        (in/out)
 * src:    rdx        (in/out)
194 195
 * src2:   rcx        (in)
 * flags:  rflags     (in/out)
196
 * ex:     rsi        (in:fastop pointer, out:zero if exception)
197 198 199 200 201 202 203 204 205 206 207
 *
 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
 * different operand sizes can be reached by calculation, rather than a jump
 * table (which would be bigger than the code).
 *
 * fastop functions are declared as taking a never-defined fastop parameter,
 * so they can't be called from C directly.
 */

struct fastop;

208
struct opcode {
209 210
	u64 flags : 56;
	u64 intercept : 8;
211
	union {
212
		int (*execute)(struct x86_emulate_ctxt *ctxt);
213 214 215
		const struct opcode *group;
		const struct group_dual *gdual;
		const struct gprefix *gprefix;
216
		const struct escape *esc;
217
		const struct instr_dual *idual;
218
		const struct mode_dual *mdual;
219
		void (*fastop)(struct fastop *fake);
220
	} u;
221
	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
222 223 224 225 226
};

struct group_dual {
	struct opcode mod012[8];
	struct opcode mod3[8];
227 228
};

229 230 231 232 233 234 235
struct gprefix {
	struct opcode pfx_no;
	struct opcode pfx_66;
	struct opcode pfx_f2;
	struct opcode pfx_f3;
};

236 237 238 239 240
struct escape {
	struct opcode op[8];
	struct opcode high[64];
};

241 242 243 244 245
struct instr_dual {
	struct opcode mod012;
	struct opcode mod3;
};

246 247 248 249 250
struct mode_dual {
	struct opcode mode32;
	struct opcode mode64;
};

251 252
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a

253 254 255 256 257 258 259
enum x86_transfer_type {
	X86_TRANSFER_NONE,
	X86_TRANSFER_CALL_JMP,
	X86_TRANSFER_RET,
	X86_TRANSFER_TASK_SWITCH,
};

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	if (!(ctxt->regs_valid & (1 << nr))) {
		ctxt->regs_valid |= 1 << nr;
		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
	}
	return ctxt->_regs[nr];
}

static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	ctxt->regs_valid |= 1 << nr;
	ctxt->regs_dirty |= 1 << nr;
	return &ctxt->_regs[nr];
}

static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	reg_read(ctxt, nr);
	return reg_write(ctxt, nr);
}

static void writeback_registers(struct x86_emulate_ctxt *ctxt)
{
	unsigned reg;

	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
}

static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
{
	ctxt->regs_dirty = 0;
	ctxt->regs_valid = 0;
}

A
Avi Kivity 已提交
296 297 298 299
/*
 * These EFLAGS bits are restored from saved value during emulation, and
 * any changes are written back to the saved value after emulation.
 */
300 301
#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
		     X86_EFLAGS_PF|X86_EFLAGS_CF)
A
Avi Kivity 已提交
302

303 304 305 306 307 308
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif

309 310
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));

311 312 313 314 315
#define FOP_FUNC(name) \
	".align " __stringify(FASTOP_SIZE) " \n\t" \
	".type " name ", @function \n\t" \
	name ":\n\t"

316 317 318 319 320 321
#define FOP_RET   "ret \n\t"

#define FOP_START(op) \
	extern void em_##op(struct fastop *fake); \
	asm(".pushsection .text, \"ax\" \n\t" \
	    ".global em_" #op " \n\t" \
322
	    FOP_FUNC("em_" #op)
323 324 325 326

#define FOP_END \
	    ".popsection")

327 328 329
#define FOPNOP() \
	FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
	FOP_RET
330

331
#define FOP1E(op,  dst) \
332 333
	FOP_FUNC(#op "_" #dst) \
	"10: " #op " %" #dst " \n\t" FOP_RET
334 335 336

#define FOP1EEX(op,  dst) \
	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
337 338 339 340 341 342 343 344 345

#define FASTOP1(op) \
	FOP_START(op) \
	FOP1E(op##b, al) \
	FOP1E(op##w, ax) \
	FOP1E(op##l, eax) \
	ON64(FOP1E(op##q, rax))	\
	FOP_END

346 347 348 349 350 351 352 353 354
/* 1-operand, using src2 (for MUL/DIV r/m) */
#define FASTOP1SRC2(op, name) \
	FOP_START(name) \
	FOP1E(op, cl) \
	FOP1E(op, cx) \
	FOP1E(op, ecx) \
	ON64(FOP1E(op, rcx)) \
	FOP_END

355 356 357 358 359 360 361 362 363
/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
#define FASTOP1SRC2EX(op, name) \
	FOP_START(name) \
	FOP1EEX(op, cl) \
	FOP1EEX(op, cx) \
	FOP1EEX(op, ecx) \
	ON64(FOP1EEX(op, rcx)) \
	FOP_END

364
#define FOP2E(op,  dst, src)	   \
365 366
	FOP_FUNC(#op "_" #dst "_" #src) \
	#op " %" #src ", %" #dst " \n\t" FOP_RET
367 368 369

#define FASTOP2(op) \
	FOP_START(op) \
370 371 372 373
	FOP2E(op##b, al, dl) \
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
374 375
	FOP_END

376 377 378 379
/* 2 operand, word only */
#define FASTOP2W(op) \
	FOP_START(op) \
	FOPNOP() \
380 381 382
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
383 384
	FOP_END

385 386 387 388 389 390 391 392 393
/* 2 operand, src is CL */
#define FASTOP2CL(op) \
	FOP_START(op) \
	FOP2E(op##b, al, cl) \
	FOP2E(op##w, ax, cl) \
	FOP2E(op##l, eax, cl) \
	ON64(FOP2E(op##q, rax, cl)) \
	FOP_END

394 395 396 397 398 399 400 401 402
/* 2 operand, src and dest are reversed */
#define FASTOP2R(op, name) \
	FOP_START(name) \
	FOP2E(op##b, dl, al) \
	FOP2E(op##w, dx, ax) \
	FOP2E(op##l, edx, eax) \
	ON64(FOP2E(op##q, rdx, rax)) \
	FOP_END

403
#define FOP3E(op,  dst, src, src2) \
404 405
	FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
	#op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
406 407 408 409 410

/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \
	FOP_START(op) \
	FOPNOP() \
411 412 413
	FOP3E(op##w, ax, dx, cl) \
	FOP3E(op##l, eax, edx, cl) \
	ON64(FOP3E(op##q, rax, rdx, cl)) \
414 415
	FOP_END

416
/* Special case for SETcc - 1 instruction per cc */
417 418 419 420 421 422
#define FOP_SETCC(op) \
	".align 4 \n\t" \
	".type " #op ", @function \n\t" \
	#op ": \n\t" \
	#op " %al \n\t" \
	FOP_RET
423

424 425 426
asm(".global kvm_fastop_exception \n"
    "kvm_fastop_exception: xor %esi, %esi; ret");

427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
FOP_SETCC(setc)
FOP_SETCC(setnc)
FOP_SETCC(setz)
FOP_SETCC(setnz)
FOP_SETCC(setbe)
FOP_SETCC(setnbe)
FOP_SETCC(sets)
FOP_SETCC(setns)
FOP_SETCC(setp)
FOP_SETCC(setnp)
FOP_SETCC(setl)
FOP_SETCC(setnl)
FOP_SETCC(setle)
FOP_SETCC(setnle)
FOP_END;

P
Paolo Bonzini 已提交
446 447 448
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
FOP_END;

449 450 451 452 453 454
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
				    enum x86_intercept intercept,
				    enum x86_intercept_stage stage)
{
	struct x86_instruction_info info = {
		.intercept  = intercept,
455 456 457 458 459
		.rep_prefix = ctxt->rep_prefix,
		.modrm_mod  = ctxt->modrm_mod,
		.modrm_reg  = ctxt->modrm_reg,
		.modrm_rm   = ctxt->modrm_rm,
		.src_val    = ctxt->src.val64,
460
		.dst_val    = ctxt->dst.val64,
461 462 463
		.src_bytes  = ctxt->src.bytes,
		.dst_bytes  = ctxt->dst.bytes,
		.ad_bytes   = ctxt->ad_bytes,
464 465 466
		.next_rip   = ctxt->eip,
	};

467
	return ctxt->ops->intercept(ctxt, &info, stage);
468 469
}

A
Avi Kivity 已提交
470 471 472 473 474
static void assign_masked(ulong *dest, ulong src, ulong mask)
{
	*dest = (*dest & ~mask) | (src & mask);
}

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
static void assign_register(unsigned long *reg, u64 val, int bytes)
{
	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
	switch (bytes) {
	case 1:
		*(u8 *)reg = (u8)val;
		break;
	case 2:
		*(u16 *)reg = (u16)val;
		break;
	case 4:
		*reg = (u32)val;
		break;	/* 64b: zero-extend */
	case 8:
		*reg = val;
		break;
	}
}

494
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
495
{
496
	return (1UL << (ctxt->ad_bytes << 3)) - 1;
497 498
}

A
Avi Kivity 已提交
499 500 501 502 503 504 505 506 507 508 509
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
{
	u16 sel;
	struct desc_struct ss;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return ~0UL;
	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
}

A
Avi Kivity 已提交
510 511 512 513 514
static int stack_size(struct x86_emulate_ctxt *ctxt)
{
	return (__fls(stack_mask(ctxt)) + 1) >> 3;
}

A
Avi Kivity 已提交
515
/* Access/update address held in a register, based on addressing mode. */
516
static inline unsigned long
517
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
518
{
519
	if (ctxt->ad_bytes == sizeof(unsigned long))
520 521
		return reg;
	else
522
		return reg & ad_mask(ctxt);
523 524 525
}

static inline unsigned long
526
register_address(struct x86_emulate_ctxt *ctxt, int reg)
527
{
528
	return address_mask(ctxt, reg_read(ctxt, reg));
529 530
}

531 532 533 534 535
static void masked_increment(ulong *reg, ulong mask, int inc)
{
	assign_masked(reg, *reg + inc, mask);
}

536
static inline void
537
register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
538
{
539
	ulong *preg = reg_rmw(ctxt, reg);
540

541
	assign_register(preg, *preg + inc, ctxt->ad_bytes);
542 543 544 545
}

static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
{
546
	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
547
}
A
Avi Kivity 已提交
548

549 550 551 552 553 554 555
static u32 desc_limit_scaled(struct desc_struct *desc)
{
	u32 limit = get_desc_limit(desc);

	return desc->g ? (limit << 12) | 0xfff : limit;
}

556
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
557 558 559 560
{
	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
		return 0;

561
	return ctxt->ops->get_cached_segment_base(ctxt, seg);
562 563
}

564 565
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
			     u32 error, bool valid)
566
{
567
	WARN_ON(vec > 0x1f);
568 569 570
	ctxt->exception.vector = vec;
	ctxt->exception.error_code = error;
	ctxt->exception.error_code_valid = valid;
571
	return X86EMUL_PROPAGATE_FAULT;
572 573
}

574 575 576 577 578
static int emulate_db(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, DB_VECTOR, 0, false);
}

579
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
580
{
581
	return emulate_exception(ctxt, GP_VECTOR, err, true);
582 583
}

584 585 586 587 588
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{
	return emulate_exception(ctxt, SS_VECTOR, err, true);
}

589
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
590
{
591
	return emulate_exception(ctxt, UD_VECTOR, 0, false);
592 593
}

594
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
595
{
596
	return emulate_exception(ctxt, TS_VECTOR, err, true);
597 598
}

599 600
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
601
	return emulate_exception(ctxt, DE_VECTOR, 0, false);
602 603
}

A
Avi Kivity 已提交
604 605 606 607 608
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, NM_VECTOR, 0, false);
}

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
	u16 selector;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
	return selector;
}

static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
				 unsigned seg)
{
	u16 dummy;
	u32 base3;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}

629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
/*
 * x86 defines three classes of vector instructions: explicitly
 * aligned, explicitly unaligned, and the rest, which change behaviour
 * depending on whether they're AVX encoded or not.
 *
 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
 * subject to the same check.
 */
static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
{
	if (likely(size < 16))
		return false;

	if (ctxt->d & Aligned)
		return true;
	else if (ctxt->d & Unaligned)
		return false;
	else if (ctxt->d & Avx)
		return false;
	else
		return true;
}

652 653 654 655
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
				       struct segmented_address addr,
				       unsigned *max_size, unsigned size,
				       bool write, bool fetch,
656
				       enum x86emul_mode mode, ulong *linear)
657
{
658 659
	struct desc_struct desc;
	bool usable;
660
	ulong la;
661
	u32 lim;
662
	u16 sel;
663

664
	la = seg_base(ctxt, addr.seg) + addr.ea;
665
	*max_size = 0;
666
	switch (mode) {
667
	case X86EMUL_MODE_PROT64:
668
		*linear = la;
669
		if (is_noncanonical_address(la))
670
			goto bad;
671 672 673 674

		*max_size = min_t(u64, ~0u, (1ull << 48) - la);
		if (size > *max_size)
			goto bad;
675 676
		break;
	default:
677
		*linear = la = (u32)la;
678 679
		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
						addr.seg);
680 681
		if (!usable)
			goto bad;
682 683 684
		/* code segment in protected mode or read-only data segment */
		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
					|| !(desc.type & 2)) && write)
685 686
			goto bad;
		/* unreadable code segment */
687
		if (!fetch && (desc.type & 8) && !(desc.type & 2))
688 689
			goto bad;
		lim = desc_limit_scaled(&desc);
690
		if (!(desc.type & 8) && (desc.type & 4)) {
G
Guo Chao 已提交
691
			/* expand-down segment */
692
			if (addr.ea <= lim)
693 694 695
				goto bad;
			lim = desc.d ? 0xffffffff : 0xffff;
		}
696 697
		if (addr.ea > lim)
			goto bad;
698 699 700 701 702 703 704
		if (lim == 0xffffffff)
			*max_size = ~0u;
		else {
			*max_size = (u64)lim + 1 - addr.ea;
			if (size > *max_size)
				goto bad;
		}
705 706
		break;
	}
707 708
	if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
		return emulate_gp(ctxt, 0);
709
	return X86EMUL_CONTINUE;
710 711
bad:
	if (addr.seg == VCPU_SREG_SS)
712
		return emulate_ss(ctxt, 0);
713
	else
714
		return emulate_gp(ctxt, 0);
715 716
}

717 718 719 720 721
static int linearize(struct x86_emulate_ctxt *ctxt,
		     struct segmented_address addr,
		     unsigned size, bool write,
		     ulong *linear)
{
722
	unsigned max_size;
723 724
	return __linearize(ctxt, addr, &max_size, size, write, false,
			   ctxt->mode, linear);
725 726
}

727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
			     enum x86emul_mode mode)
{
	ulong linear;
	int rc;
	unsigned max_size;
	struct segmented_address addr = { .seg = VCPU_SREG_CS,
					   .ea = dst };

	if (ctxt->op_bytes != sizeof(unsigned long))
		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
	rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
	if (rc == X86EMUL_CONTINUE)
		ctxt->_eip = addr.ea;
	return rc;
}

static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
	return assign_eip(ctxt, dst, ctxt->mode);
747 748
}

749 750 751 752
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
			  const struct desc_struct *cs_desc)
{
	enum x86emul_mode mode = ctxt->mode;
753
	int rc;
754 755

#ifdef CONFIG_X86_64
756 757 758
	if (ctxt->mode >= X86EMUL_MODE_PROT16) {
		if (cs_desc->l) {
			u64 efer = 0;
759

760 761 762 763 764
			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
			if (efer & EFER_LMA)
				mode = X86EMUL_MODE_PROT64;
		} else
			mode = X86EMUL_MODE_PROT32; /* temporary value */
765 766 767 768
	}
#endif
	if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
		mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
769 770 771 772
	rc = assign_eip(ctxt, dst, mode);
	if (rc == X86EMUL_CONTINUE)
		ctxt->mode = mode;
	return rc;
773 774 775 776 777 778
}

static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{
	return assign_eip_near(ctxt, ctxt->_eip + rel);
}
779

780 781 782 783 784
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
			      struct segmented_address addr,
			      void *data,
			      unsigned size)
{
785 786 787
	int rc;
	ulong linear;

788
	rc = linearize(ctxt, addr, size, false, &linear);
789 790
	if (rc != X86EMUL_CONTINUE)
		return rc;
791
	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
792 793
}

794
/*
795
 * Prefetch the remaining bytes of the instruction without crossing page
796 797
 * boundary if they are not in fetch_cache yet.
 */
798
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
799 800
{
	int rc;
801
	unsigned size, max_size;
802
	unsigned long linear;
803
	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
804
	struct segmented_address addr = { .seg = VCPU_SREG_CS,
805 806
					   .ea = ctxt->eip + cur_size };

807 808 809 810 811 812 813 814 815 816
	/*
	 * We do not know exactly how many bytes will be needed, and
	 * __linearize is expensive, so fetch as much as possible.  We
	 * just have to avoid going beyond the 15 byte limit, the end
	 * of the segment, or the end of the page.
	 *
	 * __linearize is called with size 0 so that it does not do any
	 * boundary check itself.  Instead, we use max_size to check
	 * against op_size.
	 */
817 818
	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
			 &linear);
819 820 821
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;

822
	size = min_t(unsigned, 15UL ^ cur_size, max_size);
823
	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
824 825 826 827 828 829 830 831

	/*
	 * One instruction can only straddle two pages,
	 * and one has been loaded at the beginning of
	 * x86_decode_insn.  So, if not enough bytes
	 * still, we must have hit the 15-byte boundary.
	 */
	if (unlikely(size < op_size))
832 833
		return emulate_gp(ctxt, 0);

834
	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
835 836 837
			      size, &ctxt->exception);
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;
838
	ctxt->fetch.end += size;
839
	return X86EMUL_CONTINUE;
840 841
}

842 843
static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
					       unsigned size)
844
{
845 846 847 848
	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;

	if (unlikely(done_size < size))
		return __do_insn_fetch_bytes(ctxt, size - done_size);
849 850
	else
		return X86EMUL_CONTINUE;
851 852
}

853
/* Fetch next part of the instruction being emulated. */
854
#define insn_fetch(_type, _ctxt)					\
855 856 857
({	_type _x;							\
									\
	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
858 859
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
860
	ctxt->_eip += sizeof(_type);					\
861 862
	_x = *(_type __aligned(1) *) ctxt->fetch.ptr;			\
	ctxt->fetch.ptr += sizeof(_type);				\
863
	_x;								\
864 865
})

866
#define insn_fetch_arr(_arr, _size, _ctxt)				\
867 868
({									\
	rc = do_insn_fetch_bytes(_ctxt, _size);				\
869 870
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
871
	ctxt->_eip += (_size);						\
872 873
	memcpy(_arr, ctxt->fetch.ptr, _size);				\
	ctxt->fetch.ptr += (_size);					\
874 875
})

876 877 878 879 880
/*
 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 * pointer into the block that addresses the relevant register.
 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 */
881
static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
882
			     int byteop)
A
Avi Kivity 已提交
883 884
{
	void *p;
885
	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
A
Avi Kivity 已提交
886 887

	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
888 889 890
		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
	else
		p = reg_rmw(ctxt, modrm_reg);
A
Avi Kivity 已提交
891 892 893 894
	return p;
}

static int read_descriptor(struct x86_emulate_ctxt *ctxt,
895
			   struct segmented_address addr,
A
Avi Kivity 已提交
896 897 898 899 900 901 902
			   u16 *size, unsigned long *address, int op_bytes)
{
	int rc;

	if (op_bytes == 2)
		op_bytes = 3;
	*address = 0;
903
	rc = segmented_read_std(ctxt, addr, size, 2);
904
	if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
905
		return rc;
906
	addr.ea += 2;
907
	rc = segmented_read_std(ctxt, addr, address, op_bytes);
A
Avi Kivity 已提交
908 909 910
	return rc;
}

911 912 913 914 915 916 917 918 919 920
FASTOP2(add);
FASTOP2(or);
FASTOP2(adc);
FASTOP2(sbb);
FASTOP2(and);
FASTOP2(sub);
FASTOP2(xor);
FASTOP2(cmp);
FASTOP2(test);

921 922
FASTOP1SRC2(mul, mul_ex);
FASTOP1SRC2(imul, imul_ex);
923 924
FASTOP1SRC2EX(div, div_ex);
FASTOP1SRC2EX(idiv, idiv_ex);
925

926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
FASTOP3WCL(shld);
FASTOP3WCL(shrd);

FASTOP2W(imul);

FASTOP1(not);
FASTOP1(neg);
FASTOP1(inc);
FASTOP1(dec);

FASTOP2CL(rol);
FASTOP2CL(ror);
FASTOP2CL(rcl);
FASTOP2CL(rcr);
FASTOP2CL(shl);
FASTOP2CL(shr);
FASTOP2CL(sar);

FASTOP2W(bsf);
FASTOP2W(bsr);
FASTOP2W(bt);
FASTOP2W(bts);
FASTOP2W(btr);
FASTOP2W(btc);

951 952
FASTOP2(xadd);

953 954
FASTOP2R(cmp, cmp_r);

955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
{
	/* If src is zero, do not writeback, but update flags */
	if (ctxt->src.val == 0)
		ctxt->dst.type = OP_NONE;
	return fastop(ctxt, em_bsf);
}

static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
{
	/* If src is zero, do not writeback, but update flags */
	if (ctxt->src.val == 0)
		ctxt->dst.type = OP_NONE;
	return fastop(ctxt, em_bsr);
}

971
static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
972
{
973 974
	u8 rc;
	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
975

976
	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
977
	asm("push %[flags]; popf; call *%[fastop]"
978 979
	    : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
	return rc;
980 981
}

982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
static void fetch_register_operand(struct operand *op)
{
	switch (op->bytes) {
	case 1:
		op->val = *(u8 *)op->addr.reg;
		break;
	case 2:
		op->val = *(u16 *)op->addr.reg;
		break;
	case 4:
		op->val = *(u32 *)op->addr.reg;
		break;
	case 8:
		op->val = *(u64 *)op->addr.reg;
		break;
	}
}

A
Avi Kivity 已提交
1000 1001 1002 1003
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
1004 1005 1006 1007 1008 1009 1010 1011
	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
1012
#ifdef CONFIG_X86_64
1013 1014 1015 1016 1017 1018 1019 1020
	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
			  int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
1032 1033 1034 1035 1036 1037 1038 1039
	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
A
Avi Kivity 已提交
1040
#ifdef CONFIG_X86_64
1041 1042 1043 1044 1045 1046 1047 1048
	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
A
Avi Kivity 已提交
1049 1050 1051 1052 1053 1054
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

A
Avi Kivity 已提交
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fninit");
	ctxt->ops->put_fpu(ctxt);
	return X86EMUL_CONTINUE;
}

static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
{
	u16 fcw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fnstcw %0": "+m"(fcw));
	ctxt->ops->put_fpu(ctxt);

	ctxt->dst.val = fcw;

	return X86EMUL_CONTINUE;
}

static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
{
	u16 fsw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fnstsw %0": "+m"(fsw));
	ctxt->ops->put_fpu(ctxt);

	ctxt->dst.val = fsw;

	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1132
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1133
				    struct operand *op)
1134
{
1135
	unsigned reg = ctxt->modrm_reg;
1136

1137 1138
	if (!(ctxt->d & ModRM))
		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
A
Avi Kivity 已提交
1139

1140
	if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1141 1142 1143 1144 1145 1146
		op->type = OP_XMM;
		op->bytes = 16;
		op->addr.xmm = reg;
		read_sse_reg(ctxt, &op->vec_val, reg);
		return;
	}
A
Avi Kivity 已提交
1147 1148 1149 1150 1151 1152 1153
	if (ctxt->d & Mmx) {
		reg &= 7;
		op->type = OP_MM;
		op->bytes = 8;
		op->addr.mm = reg;
		return;
	}
A
Avi Kivity 已提交
1154

1155
	op->type = OP_REG;
1156 1157 1158
	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);

1159
	fetch_register_operand(op);
1160 1161 1162
	op->orig_val = op->val;
}

1163 1164 1165 1166 1167 1168
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
{
	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
		ctxt->modrm_seg = VCPU_SREG_SS;
}

1169
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1170
			struct operand *op)
1171 1172
{
	u8 sib;
B
Bandan Das 已提交
1173
	int index_reg, base_reg, scale;
1174
	int rc = X86EMUL_CONTINUE;
1175
	ulong modrm_ea = 0;
1176

B
Bandan Das 已提交
1177 1178 1179
	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1180

B
Bandan Das 已提交
1181
	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1182
	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
B
Bandan Das 已提交
1183
	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1184
	ctxt->modrm_seg = VCPU_SREG_DS;
1185

1186
	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1187
		op->type = OP_REG;
1188
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1189
		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1190
				ctxt->d & ByteOp);
1191
		if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1192 1193
			op->type = OP_XMM;
			op->bytes = 16;
1194 1195
			op->addr.xmm = ctxt->modrm_rm;
			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
A
Avi Kivity 已提交
1196 1197
			return rc;
		}
A
Avi Kivity 已提交
1198 1199 1200
		if (ctxt->d & Mmx) {
			op->type = OP_MM;
			op->bytes = 8;
1201
			op->addr.mm = ctxt->modrm_rm & 7;
A
Avi Kivity 已提交
1202 1203
			return rc;
		}
1204
		fetch_register_operand(op);
1205 1206 1207
		return rc;
	}

1208 1209
	op->type = OP_MEM;

1210
	if (ctxt->ad_bytes == 2) {
1211 1212 1213 1214
		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1215 1216

		/* 16-bit ModR/M decode. */
1217
		switch (ctxt->modrm_mod) {
1218
		case 0:
1219
			if (ctxt->modrm_rm == 6)
1220
				modrm_ea += insn_fetch(u16, ctxt);
1221 1222
			break;
		case 1:
1223
			modrm_ea += insn_fetch(s8, ctxt);
1224 1225
			break;
		case 2:
1226
			modrm_ea += insn_fetch(u16, ctxt);
1227 1228
			break;
		}
1229
		switch (ctxt->modrm_rm) {
1230
		case 0:
1231
			modrm_ea += bx + si;
1232 1233
			break;
		case 1:
1234
			modrm_ea += bx + di;
1235 1236
			break;
		case 2:
1237
			modrm_ea += bp + si;
1238 1239
			break;
		case 3:
1240
			modrm_ea += bp + di;
1241 1242
			break;
		case 4:
1243
			modrm_ea += si;
1244 1245
			break;
		case 5:
1246
			modrm_ea += di;
1247 1248
			break;
		case 6:
1249
			if (ctxt->modrm_mod != 0)
1250
				modrm_ea += bp;
1251 1252
			break;
		case 7:
1253
			modrm_ea += bx;
1254 1255
			break;
		}
1256 1257 1258
		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
			ctxt->modrm_seg = VCPU_SREG_SS;
1259
		modrm_ea = (u16)modrm_ea;
1260 1261
	} else {
		/* 32/64-bit ModR/M decode. */
1262
		if ((ctxt->modrm_rm & 7) == 4) {
1263
			sib = insn_fetch(u8, ctxt);
1264 1265 1266 1267
			index_reg |= (sib >> 3) & 7;
			base_reg |= sib & 7;
			scale = sib >> 6;

1268
			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1269
				modrm_ea += insn_fetch(s32, ctxt);
1270
			else {
1271
				modrm_ea += reg_read(ctxt, base_reg);
1272
				adjust_modrm_seg(ctxt, base_reg);
1273 1274 1275 1276
				/* Increment ESP on POP [ESP] */
				if ((ctxt->d & IncSP) &&
				    base_reg == VCPU_REGS_RSP)
					modrm_ea += ctxt->op_bytes;
1277
			}
1278
			if (index_reg != 4)
1279
				modrm_ea += reg_read(ctxt, index_reg) << scale;
1280
		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1281
			modrm_ea += insn_fetch(s32, ctxt);
1282
			if (ctxt->mode == X86EMUL_MODE_PROT64)
1283
				ctxt->rip_relative = 1;
1284 1285
		} else {
			base_reg = ctxt->modrm_rm;
1286
			modrm_ea += reg_read(ctxt, base_reg);
1287 1288
			adjust_modrm_seg(ctxt, base_reg);
		}
1289
		switch (ctxt->modrm_mod) {
1290
		case 1:
1291
			modrm_ea += insn_fetch(s8, ctxt);
1292 1293
			break;
		case 2:
1294
			modrm_ea += insn_fetch(s32, ctxt);
1295 1296 1297
			break;
		}
	}
1298
	op->addr.mem.ea = modrm_ea;
1299 1300 1301
	if (ctxt->ad_bytes != 8)
		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;

1302 1303 1304 1305 1306
done:
	return rc;
}

static int decode_abs(struct x86_emulate_ctxt *ctxt,
1307
		      struct operand *op)
1308
{
1309
	int rc = X86EMUL_CONTINUE;
1310

1311
	op->type = OP_MEM;
1312
	switch (ctxt->ad_bytes) {
1313
	case 2:
1314
		op->addr.mem.ea = insn_fetch(u16, ctxt);
1315 1316
		break;
	case 4:
1317
		op->addr.mem.ea = insn_fetch(u32, ctxt);
1318 1319
		break;
	case 8:
1320
		op->addr.mem.ea = insn_fetch(u64, ctxt);
1321 1322 1323 1324 1325 1326
		break;
	}
done:
	return rc;
}

1327
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1328
{
1329
	long sv = 0, mask;
1330

1331
	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1332
		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1333

1334 1335 1336 1337
		if (ctxt->src.bytes == 2)
			sv = (s16)ctxt->src.val & (s16)mask;
		else if (ctxt->src.bytes == 4)
			sv = (s32)ctxt->src.val & (s32)mask;
1338 1339
		else
			sv = (s64)ctxt->src.val & (s64)mask;
1340

1341 1342
		ctxt->dst.addr.mem.ea = address_mask(ctxt,
					   ctxt->dst.addr.mem.ea + (sv >> 3));
1343
	}
1344 1345

	/* only subword offset */
1346
	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1347 1348
}

1349 1350
static int read_emulated(struct x86_emulate_ctxt *ctxt,
			 unsigned long addr, void *dest, unsigned size)
A
Avi Kivity 已提交
1351
{
1352
	int rc;
1353
	struct read_cache *mc = &ctxt->mem_read;
A
Avi Kivity 已提交
1354

1355 1356
	if (mc->pos < mc->end)
		goto read_cached;
A
Avi Kivity 已提交
1357

1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
	WARN_ON((mc->end + size) >= sizeof(mc->data));

	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
				      &ctxt->exception);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	mc->end += size;

read_cached:
	memcpy(dest, mc->data + mc->pos, size);
	mc->pos += size;
1370 1371
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1372

1373 1374 1375 1376 1377
static int segmented_read(struct x86_emulate_ctxt *ctxt,
			  struct segmented_address addr,
			  void *data,
			  unsigned size)
{
1378 1379 1380
	int rc;
	ulong linear;

1381
	rc = linearize(ctxt, addr, size, false, &linear);
1382 1383
	if (rc != X86EMUL_CONTINUE)
		return rc;
1384
	return read_emulated(ctxt, linear, data, size);
1385 1386 1387 1388 1389 1390 1391
}

static int segmented_write(struct x86_emulate_ctxt *ctxt,
			   struct segmented_address addr,
			   const void *data,
			   unsigned size)
{
1392 1393 1394
	int rc;
	ulong linear;

1395
	rc = linearize(ctxt, addr, size, true, &linear);
1396 1397
	if (rc != X86EMUL_CONTINUE)
		return rc;
1398 1399
	return ctxt->ops->write_emulated(ctxt, linear, data, size,
					 &ctxt->exception);
1400 1401 1402 1403 1404 1405 1406
}

static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
			     struct segmented_address addr,
			     const void *orig_data, const void *data,
			     unsigned size)
{
1407 1408 1409
	int rc;
	ulong linear;

1410
	rc = linearize(ctxt, addr, size, true, &linear);
1411 1412
	if (rc != X86EMUL_CONTINUE)
		return rc;
1413 1414
	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
					   size, &ctxt->exception);
1415 1416
}

1417 1418 1419 1420
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
			   unsigned int size, unsigned short port,
			   void *dest)
{
1421
	struct read_cache *rc = &ctxt->io_read;
1422

1423 1424
	if (rc->pos == rc->end) { /* refill pio read ahead */
		unsigned int in_page, n;
1425
		unsigned int count = ctxt->rep_prefix ?
1426
			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1427
		in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1428 1429
			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1430
		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1431 1432 1433
		if (n == 0)
			n = 1;
		rc->pos = rc->end = 0;
1434
		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1435 1436
			return 0;
		rc->end = n * size;
A
Avi Kivity 已提交
1437 1438
	}

1439
	if (ctxt->rep_prefix && (ctxt->d & String) &&
1440
	    !(ctxt->eflags & X86_EFLAGS_DF)) {
1441 1442 1443 1444 1445 1446 1447 1448
		ctxt->dst.data = rc->data + rc->pos;
		ctxt->dst.type = OP_MEM_STR;
		ctxt->dst.count = (rc->end - rc->pos) / size;
		rc->pos = rc->end;
	} else {
		memcpy(dest, rc->data + rc->pos, size);
		rc->pos += size;
	}
1449 1450
	return 1;
}
A
Avi Kivity 已提交
1451

1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
				     u16 index, struct desc_struct *desc)
{
	struct desc_ptr dt;
	ulong addr;

	ctxt->ops->get_idt(ctxt, &dt);

	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, index << 3 | 0x2);

	addr = dt.address + index * 8;
	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
				   &ctxt->exception);
}

1468 1469 1470
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
				     u16 selector, struct desc_ptr *dt)
{
1471
	const struct x86_emulate_ops *ops = ctxt->ops;
1472
	u32 base3 = 0;
1473

1474 1475
	if (selector & 1 << 2) {
		struct desc_struct desc;
1476 1477
		u16 sel;

1478
		memset (dt, 0, sizeof *dt);
1479 1480
		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
				      VCPU_SREG_LDTR))
1481
			return;
1482

1483
		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1484
		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1485
	} else
1486
		ops->get_gdt(ctxt, dt);
1487
}
1488

1489 1490
static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
			      u16 selector, ulong *desc_addr_p)
1491 1492 1493 1494
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
1495

1496
	get_descriptor_table_ptr(ctxt, selector, &dt);
1497

1498 1499
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
1500

1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
	addr = dt.address + index * 8;

#ifdef CONFIG_X86_64
	if (addr >> 32 != 0) {
		u64 efer = 0;

		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
		if (!(efer & EFER_LMA))
			addr &= (u32)-1;
	}
#endif

	*desc_addr_p = addr;
	return X86EMUL_CONTINUE;
}

/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, struct desc_struct *desc,
				   ulong *desc_addr_p)
{
	int rc;

	rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1529
				   &ctxt->exception);
1530
}
1531

1532 1533 1534 1535
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				    u16 selector, struct desc_struct *desc)
{
1536
	int rc;
1537
	ulong addr;
A
Avi Kivity 已提交
1538

1539 1540 1541
	rc = get_descriptor_ptr(ctxt, selector, &addr);
	if (rc != X86EMUL_CONTINUE)
		return rc;
A
Avi Kivity 已提交
1542

1543 1544
	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
				    &ctxt->exception);
1545
}
1546

1547
/* Does not support long mode */
1548
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1549
				     u16 selector, int seg, u8 cpl,
1550
				     enum x86_transfer_type transfer,
1551
				     struct desc_struct *desc)
1552
{
1553
	struct desc_struct seg_desc, old_desc;
1554
	u8 dpl, rpl;
1555 1556 1557
	unsigned err_vec = GP_VECTOR;
	u32 err_code = 0;
	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1558
	ulong desc_addr;
1559
	int ret;
1560
	u16 dummy;
1561
	u32 base3 = 0;
1562

1563
	memset(&seg_desc, 0, sizeof seg_desc);
1564

1565 1566 1567
	if (ctxt->mode == X86EMUL_MODE_REAL) {
		/* set real mode segment descriptor (keep limit etc. for
		 * unreal mode) */
1568
		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1569 1570
		set_desc_base(&seg_desc, selector << 4);
		goto load;
1571 1572 1573 1574 1575 1576 1577 1578 1579
	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
		/* VM86 needs a clean new segment descriptor */
		set_desc_base(&seg_desc, selector << 4);
		set_desc_limit(&seg_desc, 0xffff);
		seg_desc.type = 3;
		seg_desc.p = 1;
		seg_desc.s = 1;
		seg_desc.dpl = 3;
		goto load;
1580 1581
	}

1582 1583 1584 1585 1586 1587 1588
	rpl = selector & 3;

	/* NULL selector is not valid for TR, CS and SS (except for long mode) */
	if ((seg == VCPU_SREG_CS
	     || (seg == VCPU_SREG_SS
		 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
	     || seg == VCPU_SREG_TR)
1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
	    && null_selector)
		goto exception;

	/* TR should be in GDT only */
	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
		goto exception;

	if (null_selector) /* for NULL selector skip all following checks */
		goto load;

1599
	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1600 1601 1602 1603
	if (ret != X86EMUL_CONTINUE)
		return ret;

	err_code = selector & 0xfffc;
1604 1605
	err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
							   GP_VECTOR;
1606

G
Guo Chao 已提交
1607
	/* can't load system descriptor into segment selector */
1608 1609 1610
	if (seg <= VCPU_SREG_GS && !seg_desc.s) {
		if (transfer == X86_TRANSFER_CALL_JMP)
			return X86EMUL_UNHANDLEABLE;
1611
		goto exception;
1612
	}
1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628

	if (!seg_desc.p) {
		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
		goto exception;
	}

	dpl = seg_desc.dpl;

	switch (seg) {
	case VCPU_SREG_SS:
		/*
		 * segment is not a writable data segment or segment
		 * selector's RPL != CPL or segment selector's RPL != CPL
		 */
		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
			goto exception;
A
Avi Kivity 已提交
1629
		break;
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642
	case VCPU_SREG_CS:
		if (!(seg_desc.type & 8))
			goto exception;

		if (seg_desc.type & 4) {
			/* conforming */
			if (dpl > cpl)
				goto exception;
		} else {
			/* nonconforming */
			if (rpl > cpl || dpl != cpl)
				goto exception;
		}
1643 1644 1645 1646 1647 1648 1649 1650 1651
		/* in long-mode d/b must be clear if l is set */
		if (seg_desc.d && seg_desc.l) {
			u64 efer = 0;

			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
			if (efer & EFER_LMA)
				goto exception;
		}

1652 1653
		/* CS(RPL) <- CPL */
		selector = (selector & 0xfffc) | cpl;
A
Avi Kivity 已提交
1654
		break;
1655 1656 1657
	case VCPU_SREG_TR:
		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
			goto exception;
1658 1659 1660 1661 1662 1663
		old_desc = seg_desc;
		seg_desc.type |= 2; /* busy */
		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
						  sizeof(seg_desc), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
1664 1665 1666 1667 1668 1669
		break;
	case VCPU_SREG_LDTR:
		if (seg_desc.s || seg_desc.type != 2)
			goto exception;
		break;
	default: /*  DS, ES, FS, or GS */
1670
		/*
1671 1672 1673
		 * segment is not a data or readable code segment or
		 * ((segment is a data or nonconforming code segment)
		 * and (both RPL and CPL > DPL))
1674
		 */
1675 1676 1677 1678
		if ((seg_desc.type & 0xa) == 0x8 ||
		    (((seg_desc.type & 0xc) != 0xc) &&
		     (rpl > dpl && cpl > dpl)))
			goto exception;
A
Avi Kivity 已提交
1679
		break;
1680 1681 1682 1683
	}

	if (seg_desc.s) {
		/* mark segment as accessed */
1684 1685 1686 1687 1688 1689 1690
		if (!(seg_desc.type & 1)) {
			seg_desc.type |= 1;
			ret = write_segment_descriptor(ctxt, selector,
						       &seg_desc);
			if (ret != X86EMUL_CONTINUE)
				return ret;
		}
1691 1692 1693 1694 1695
	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
		ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
				sizeof(base3), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
1696 1697 1698
		if (is_noncanonical_address(get_desc_base(&seg_desc) |
					     ((u64)base3 << 32)))
			return emulate_gp(ctxt, 0);
1699 1700
	}
load:
1701
	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1702 1703
	if (desc)
		*desc = seg_desc;
1704 1705
	return X86EMUL_CONTINUE;
exception:
1706
	return emulate_exception(ctxt, err_vec, err_code, true);
1707 1708
}

1709 1710 1711 1712
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, int seg)
{
	u8 cpl = ctxt->ops->cpl(ctxt);
1713 1714
	return __load_segment_descriptor(ctxt, selector, seg, cpl,
					 X86_TRANSFER_NONE, NULL);
1715 1716
}

1717 1718
static void write_register_operand(struct operand *op)
{
1719
	return assign_register(op->addr.reg, op->val, op->bytes);
1720 1721
}

1722
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1723
{
1724
	switch (op->type) {
1725
	case OP_REG:
1726
		write_register_operand(op);
A
Avi Kivity 已提交
1727
		break;
1728
	case OP_MEM:
1729
		if (ctxt->lock_prefix)
P
Paolo Bonzini 已提交
1730 1731 1732 1733 1734 1735 1736
			return segmented_cmpxchg(ctxt,
						 op->addr.mem,
						 &op->orig_val,
						 &op->val,
						 op->bytes);
		else
			return segmented_write(ctxt,
1737 1738 1739
					       op->addr.mem,
					       &op->val,
					       op->bytes);
1740
		break;
1741
	case OP_MEM_STR:
P
Paolo Bonzini 已提交
1742 1743 1744 1745
		return segmented_write(ctxt,
				       op->addr.mem,
				       op->data,
				       op->bytes * op->count);
1746
		break;
A
Avi Kivity 已提交
1747
	case OP_XMM:
1748
		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
A
Avi Kivity 已提交
1749
		break;
A
Avi Kivity 已提交
1750
	case OP_MM:
1751
		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
A
Avi Kivity 已提交
1752
		break;
1753 1754
	case OP_NONE:
		/* no writeback */
1755
		break;
1756
	default:
1757
		break;
A
Avi Kivity 已提交
1758
	}
1759 1760
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1761

1762
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1763
{
1764
	struct segmented_address addr;
1765

1766
	rsp_increment(ctxt, -bytes);
1767
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1768 1769
	addr.seg = VCPU_SREG_SS;

1770 1771 1772 1773 1774
	return segmented_write(ctxt, addr, data, bytes);
}

static int em_push(struct x86_emulate_ctxt *ctxt)
{
1775
	/* Disable writeback. */
1776
	ctxt->dst.type = OP_NONE;
1777
	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1778
}
1779

1780 1781 1782 1783
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
		       void *dest, int len)
{
	int rc;
1784
	struct segmented_address addr;
1785

1786
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1787
	addr.seg = VCPU_SREG_SS;
1788
	rc = segmented_read(ctxt, addr, dest, len);
1789 1790 1791
	if (rc != X86EMUL_CONTINUE)
		return rc;

1792
	rsp_increment(ctxt, len);
1793
	return rc;
1794 1795
}

1796 1797
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
1798
	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1799 1800
}

1801
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1802
			void *dest, int len)
1803 1804
{
	int rc;
1805
	unsigned long val, change_mask;
1806
	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1807
	int cpl = ctxt->ops->cpl(ctxt);
1808

1809
	rc = emulate_pop(ctxt, &val, len);
1810 1811
	if (rc != X86EMUL_CONTINUE)
		return rc;
1812

1813 1814 1815 1816
	change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
		      X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
		      X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
		      X86_EFLAGS_AC | X86_EFLAGS_ID;
1817

1818 1819 1820 1821 1822
	switch(ctxt->mode) {
	case X86EMUL_MODE_PROT64:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT16:
		if (cpl == 0)
1823
			change_mask |= X86_EFLAGS_IOPL;
1824
		if (cpl <= iopl)
1825
			change_mask |= X86_EFLAGS_IF;
1826 1827
		break;
	case X86EMUL_MODE_VM86:
1828 1829
		if (iopl < 3)
			return emulate_gp(ctxt, 0);
1830
		change_mask |= X86_EFLAGS_IF;
1831 1832
		break;
	default: /* real mode */
1833
		change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1834
		break;
1835
	}
1836 1837 1838 1839 1840

	*(unsigned long *)dest =
		(ctxt->eflags & ~change_mask) | (val & change_mask);

	return rc;
1841 1842
}

1843 1844
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
1845 1846 1847 1848
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->eflags;
	ctxt->dst.bytes = ctxt->op_bytes;
	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1849 1850
}

A
Avi Kivity 已提交
1851 1852 1853 1854 1855
static int em_enter(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	unsigned frame_size = ctxt->src.val;
	unsigned nesting_level = ctxt->src2.val & 31;
1856
	ulong rbp;
A
Avi Kivity 已提交
1857 1858 1859 1860

	if (nesting_level)
		return X86EMUL_UNHANDLEABLE;

1861 1862
	rbp = reg_read(ctxt, VCPU_REGS_RBP);
	rc = push(ctxt, &rbp, stack_size(ctxt));
A
Avi Kivity 已提交
1863 1864
	if (rc != X86EMUL_CONTINUE)
		return rc;
1865
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
A
Avi Kivity 已提交
1866
		      stack_mask(ctxt));
1867 1868
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
A
Avi Kivity 已提交
1869 1870 1871 1872
		      stack_mask(ctxt));
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1873 1874
static int em_leave(struct x86_emulate_ctxt *ctxt)
{
1875
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
A
Avi Kivity 已提交
1876
		      stack_mask(ctxt));
1877
	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
A
Avi Kivity 已提交
1878 1879
}

1880
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1881
{
1882 1883
	int seg = ctxt->src2.val;

1884
	ctxt->src.val = get_segment_selector(ctxt, seg);
1885 1886 1887 1888
	if (ctxt->op_bytes == 4) {
		rsp_increment(ctxt, -2);
		ctxt->op_bytes = 2;
	}
1889

1890
	return em_push(ctxt);
1891 1892
}

1893
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1894
{
1895
	int seg = ctxt->src2.val;
1896 1897
	unsigned long selector;
	int rc;
1898

1899
	rc = emulate_pop(ctxt, &selector, 2);
1900 1901 1902
	if (rc != X86EMUL_CONTINUE)
		return rc;

1903 1904
	if (ctxt->modrm_reg == VCPU_SREG_SS)
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1905 1906
	if (ctxt->op_bytes > 2)
		rsp_increment(ctxt, ctxt->op_bytes - 2);
1907

1908
	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1909
	return rc;
1910 1911
}

1912
static int em_pusha(struct x86_emulate_ctxt *ctxt)
1913
{
1914
	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1915 1916
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RAX;
1917

1918 1919
	while (reg <= VCPU_REGS_RDI) {
		(reg == VCPU_REGS_RSP) ?
1920
		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1921

1922
		rc = em_push(ctxt);
1923 1924
		if (rc != X86EMUL_CONTINUE)
			return rc;
1925

1926
		++reg;
1927 1928
	}

1929
	return rc;
1930 1931
}

1932 1933
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
1934
	ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1935 1936 1937
	return em_push(ctxt);
}

1938
static int em_popa(struct x86_emulate_ctxt *ctxt)
1939
{
1940 1941
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RDI;
1942
	u32 val;
1943

1944 1945
	while (reg >= VCPU_REGS_RAX) {
		if (reg == VCPU_REGS_RSP) {
1946
			rsp_increment(ctxt, ctxt->op_bytes);
1947 1948
			--reg;
		}
1949

1950
		rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
1951 1952
		if (rc != X86EMUL_CONTINUE)
			break;
1953
		assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
1954
		--reg;
1955
	}
1956
	return rc;
1957 1958
}

1959
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1960
{
1961
	const struct x86_emulate_ops *ops = ctxt->ops;
1962
	int rc;
1963 1964 1965 1966 1967 1968
	struct desc_ptr dt;
	gva_t cs_addr;
	gva_t eip_addr;
	u16 cs, eip;

	/* TODO: Add limit checks */
1969
	ctxt->src.val = ctxt->eflags;
1970
	rc = em_push(ctxt);
1971 1972
	if (rc != X86EMUL_CONTINUE)
		return rc;
1973

1974
	ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
1975

1976
	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1977
	rc = em_push(ctxt);
1978 1979
	if (rc != X86EMUL_CONTINUE)
		return rc;
1980

1981
	ctxt->src.val = ctxt->_eip;
1982
	rc = em_push(ctxt);
1983 1984 1985
	if (rc != X86EMUL_CONTINUE)
		return rc;

1986
	ops->get_idt(ctxt, &dt);
1987 1988 1989 1990

	eip_addr = dt.address + (irq << 2);
	cs_addr = dt.address + (irq << 2) + 2;

1991
	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1992 1993 1994
	if (rc != X86EMUL_CONTINUE)
		return rc;

1995
	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1996 1997 1998
	if (rc != X86EMUL_CONTINUE)
		return rc;

1999
	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2000 2001 2002
	if (rc != X86EMUL_CONTINUE)
		return rc;

2003
	ctxt->_eip = eip;
2004 2005 2006 2007

	return rc;
}

2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
	int rc;

	invalidate_registers(ctxt);
	rc = __emulate_int_real(ctxt, irq);
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);
	return rc;
}

2019
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2020 2021 2022
{
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
2023
		return __emulate_int_real(ctxt, irq);
2024 2025 2026 2027 2028 2029 2030 2031 2032 2033
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
	default:
		/* Protected mode interrupts unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
	}
}

2034
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2035
{
2036 2037 2038 2039
	int rc = X86EMUL_CONTINUE;
	unsigned long temp_eip = 0;
	unsigned long temp_eflags = 0;
	unsigned long cs = 0;
2040 2041 2042 2043 2044
	unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
			     X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
			     X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
			     X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
			     X86_EFLAGS_AC | X86_EFLAGS_ID |
W
Wanpeng Li 已提交
2045
			     X86_EFLAGS_FIXED;
2046 2047
	unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
				  X86_EFLAGS_VIP;
2048

2049
	/* TODO: Add stack limit check */
2050

2051
	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2052

2053 2054
	if (rc != X86EMUL_CONTINUE)
		return rc;
2055

2056 2057
	if (temp_eip & ~0xffff)
		return emulate_gp(ctxt, 0);
2058

2059
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2060

2061 2062
	if (rc != X86EMUL_CONTINUE)
		return rc;
2063

2064
	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2065

2066 2067
	if (rc != X86EMUL_CONTINUE)
		return rc;
2068

2069
	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2070

2071 2072
	if (rc != X86EMUL_CONTINUE)
		return rc;
2073

2074
	ctxt->_eip = temp_eip;
2075

2076
	if (ctxt->op_bytes == 4)
2077
		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2078
	else if (ctxt->op_bytes == 2) {
2079 2080
		ctxt->eflags &= ~0xffff;
		ctxt->eflags |= temp_eflags;
2081
	}
2082 2083

	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
W
Wanpeng Li 已提交
2084
	ctxt->eflags |= X86_EFLAGS_FIXED;
2085
	ctxt->ops->set_nmi_mask(ctxt, false);
2086 2087

	return rc;
2088 2089
}

2090
static int em_iret(struct x86_emulate_ctxt *ctxt)
2091
{
2092 2093
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
2094
		return emulate_iret_real(ctxt);
2095 2096 2097 2098
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
2099
	default:
2100 2101
		/* iret from protected mode unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
2102 2103 2104
	}
}

2105 2106 2107
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
	int rc;
2108 2109 2110 2111 2112 2113 2114 2115 2116
	unsigned short sel, old_sel;
	struct desc_struct old_desc, new_desc;
	const struct x86_emulate_ops *ops = ctxt->ops;
	u8 cpl = ctxt->ops->cpl(ctxt);

	/* Assignment of RIP may only fail in 64-bit mode */
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
				 VCPU_SREG_CS);
2117

2118
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2119

2120 2121
	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_CALL_JMP,
2122
				       &new_desc);
2123 2124 2125
	if (rc != X86EMUL_CONTINUE)
		return rc;

2126
	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2127
	if (rc != X86EMUL_CONTINUE) {
2128
		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2129 2130 2131 2132 2133
		/* assigning eip failed; restore the old cs */
		ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
		return rc;
	}
	return rc;
2134 2135
}

2136
static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2137
{
2138 2139
	return assign_eip_near(ctxt, ctxt->src.val);
}
2140

2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151
static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	long int old_eip;

	old_eip = ctxt->_eip;
	rc = assign_eip_near(ctxt, ctxt->src.val);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	ctxt->src.val = old_eip;
	rc = em_push(ctxt);
2152
	return rc;
2153 2154
}

2155
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2156
{
2157
	u64 old = ctxt->dst.orig_val64;
2158

2159 2160 2161
	if (ctxt->dst.bytes == 16)
		return X86EMUL_UNHANDLEABLE;

2162 2163 2164 2165
	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2166
		ctxt->eflags &= ~X86_EFLAGS_ZF;
2167
	} else {
2168 2169
		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2170

2171
		ctxt->eflags |= X86_EFLAGS_ZF;
2172
	}
2173
	return X86EMUL_CONTINUE;
2174 2175
}

2176 2177
static int em_ret(struct x86_emulate_ctxt *ctxt)
{
2178 2179 2180 2181 2182 2183 2184 2185
	int rc;
	unsigned long eip;

	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	return assign_eip_near(ctxt, eip);
2186 2187
}

2188
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2189 2190
{
	int rc;
2191 2192
	unsigned long eip, cs;
	u16 old_cs;
2193
	int cpl = ctxt->ops->cpl(ctxt);
2194 2195 2196 2197 2198 2199
	struct desc_struct old_desc, new_desc;
	const struct x86_emulate_ops *ops = ctxt->ops;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
				 VCPU_SREG_CS);
2200

2201
	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2202
	if (rc != X86EMUL_CONTINUE)
2203
		return rc;
2204
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2205
	if (rc != X86EMUL_CONTINUE)
2206
		return rc;
2207 2208 2209
	/* Outer-privilege level return is not implemented */
	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
		return X86EMUL_UNHANDLEABLE;
2210 2211
	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_RET,
2212 2213 2214
				       &new_desc);
	if (rc != X86EMUL_CONTINUE)
		return rc;
2215
	rc = assign_eip_far(ctxt, eip, &new_desc);
2216
	if (rc != X86EMUL_CONTINUE) {
2217
		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2218 2219
		ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
	}
2220 2221 2222
	return rc;
}

2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
{
        int rc;

        rc = em_ret_far(ctxt);
        if (rc != X86EMUL_CONTINUE)
                return rc;
        rsp_increment(ctxt, ctxt->src.val);
        return X86EMUL_CONTINUE;
}

2234 2235 2236
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{
	/* Save real source value, then compare EAX against destination. */
2237 2238
	ctxt->dst.orig_val = ctxt->dst.val;
	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2239
	ctxt->src.orig_val = ctxt->src.val;
2240
	ctxt->src.val = ctxt->dst.orig_val;
2241
	fastop(ctxt, em_cmp);
2242

2243
	if (ctxt->eflags & X86_EFLAGS_ZF) {
2244 2245
		/* Success: write back to memory; no update of EAX */
		ctxt->src.type = OP_NONE;
2246 2247 2248
		ctxt->dst.val = ctxt->src.orig_val;
	} else {
		/* Failure: write the value we saw to EAX. */
2249 2250 2251 2252
		ctxt->src.type = OP_REG;
		ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
		ctxt->src.val = ctxt->dst.orig_val;
		/* Create write-cycle to dest by writing the same value */
2253
		ctxt->dst.val = ctxt->dst.orig_val;
2254 2255 2256 2257
	}
	return X86EMUL_CONTINUE;
}

2258
static int em_lseg(struct x86_emulate_ctxt *ctxt)
2259
{
2260
	int seg = ctxt->src2.val;
2261 2262 2263
	unsigned short sel;
	int rc;

2264
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2265

2266
	rc = load_segment_descriptor(ctxt, sel, seg);
2267 2268 2269
	if (rc != X86EMUL_CONTINUE)
		return rc;

2270
	ctxt->dst.val = ctxt->src.val;
2271 2272 2273
	return rc;
}

2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286
static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

	eax = 0x80000001;
	ecx = 0;
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	return edx & bit(X86_FEATURE_LM);
}

#define GET_SMSTATE(type, smbase, offset)				  \
	({								  \
	 type __val;							  \
2287 2288
	 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val,      \
				      sizeof(__val));			  \
2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432
	 if (r != X86EMUL_CONTINUE)					  \
		 return X86EMUL_UNHANDLEABLE;				  \
	 __val;								  \
	})

static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
{
	desc->g    = (flags >> 23) & 1;
	desc->d    = (flags >> 22) & 1;
	desc->l    = (flags >> 21) & 1;
	desc->avl  = (flags >> 20) & 1;
	desc->p    = (flags >> 15) & 1;
	desc->dpl  = (flags >> 13) & 3;
	desc->s    = (flags >> 12) & 1;
	desc->type = (flags >>  8) & 15;
}

static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
{
	struct desc_struct desc;
	int offset;
	u16 selector;

	selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);

	if (n < 3)
		offset = 0x7f84 + n * 12;
	else
		offset = 0x7f2c + (n - 3) * 12;

	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
	rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
	ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
	return X86EMUL_CONTINUE;
}

static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
{
	struct desc_struct desc;
	int offset;
	u16 selector;
	u32 base3;

	offset = 0x7e00 + n * 16;

	selector =                GET_SMSTATE(u16, smbase, offset);
	rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
	base3 =                   GET_SMSTATE(u32, smbase, offset + 12);

	ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
	return X86EMUL_CONTINUE;
}

static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
				     u64 cr0, u64 cr4)
{
	int bad;

	/*
	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
	 * Then enable protected mode.	However, PCID cannot be enabled
	 * if EFER.LMA=0, so set it separately.
	 */
	bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
	if (bad)
		return X86EMUL_UNHANDLEABLE;

	bad = ctxt->ops->set_cr(ctxt, 0, cr0);
	if (bad)
		return X86EMUL_UNHANDLEABLE;

	if (cr4 & X86_CR4_PCIDE) {
		bad = ctxt->ops->set_cr(ctxt, 4, cr4);
		if (bad)
			return X86EMUL_UNHANDLEABLE;
	}

	return X86EMUL_CONTINUE;
}

static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
	struct desc_struct desc;
	struct desc_ptr dt;
	u16 selector;
	u32 val, cr0, cr4;
	int i;

	cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
	ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
	ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
	ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);

	for (i = 0; i < 8; i++)
		*reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);

	val = GET_SMSTATE(u32, smbase, 0x7fcc);
	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
	val = GET_SMSTATE(u32, smbase, 0x7fc8);
	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);

	selector =                 GET_SMSTATE(u32, smbase, 0x7fc4);
	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f64));
	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f60));
	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f5c));
	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);

	selector =                 GET_SMSTATE(u32, smbase, 0x7fc0);
	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f80));
	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f7c));
	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f78));
	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);

	dt.address =               GET_SMSTATE(u32, smbase, 0x7f74);
	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f70);
	ctxt->ops->set_gdt(ctxt, &dt);

	dt.address =               GET_SMSTATE(u32, smbase, 0x7f58);
	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f54);
	ctxt->ops->set_idt(ctxt, &dt);

	for (i = 0; i < 6; i++) {
		int r = rsm_load_seg_32(ctxt, smbase, i);
		if (r != X86EMUL_CONTINUE)
			return r;
	}

	cr4 = GET_SMSTATE(u32, smbase, 0x7f14);

	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));

	return rsm_enter_protected_mode(ctxt, cr0, cr4);
}

static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
	struct desc_struct desc;
	struct desc_ptr dt;
	u64 val, cr0, cr4;
	u32 base3;
	u16 selector;
2433
	int i, r;
2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474

	for (i = 0; i < 16; i++)
		*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);

	ctxt->_eip   = GET_SMSTATE(u64, smbase, 0x7f78);
	ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;

	val = GET_SMSTATE(u32, smbase, 0x7f68);
	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
	val = GET_SMSTATE(u32, smbase, 0x7f60);
	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);

	cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
	ctxt->ops->set_cr(ctxt, 3,  GET_SMSTATE(u64, smbase, 0x7f50));
	cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
	val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
	ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);

	selector =                  GET_SMSTATE(u32, smbase, 0x7e90);
	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e92) << 8);
	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e94));
	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e98));
	base3 =                     GET_SMSTATE(u32, smbase, 0x7e9c);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);

	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e84);
	dt.address =                GET_SMSTATE(u64, smbase, 0x7e88);
	ctxt->ops->set_idt(ctxt, &dt);

	selector =                  GET_SMSTATE(u32, smbase, 0x7e70);
	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e72) << 8);
	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e74));
	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e78));
	base3 =                     GET_SMSTATE(u32, smbase, 0x7e7c);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);

	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e64);
	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
	ctxt->ops->set_gdt(ctxt, &dt);

2475 2476 2477 2478
	r = rsm_enter_protected_mode(ctxt, cr0, cr4);
	if (r != X86EMUL_CONTINUE)
		return r;

2479
	for (i = 0; i < 6; i++) {
2480
		r = rsm_load_seg_64(ctxt, smbase, i);
2481 2482 2483 2484
		if (r != X86EMUL_CONTINUE)
			return r;
	}

2485
	return X86EMUL_CONTINUE;
2486 2487
}

P
Paolo Bonzini 已提交
2488 2489
static int em_rsm(struct x86_emulate_ctxt *ctxt)
{
2490 2491 2492 2493
	unsigned long cr0, cr4, efer;
	u64 smbase;
	int ret;

P
Paolo Bonzini 已提交
2494 2495 2496
	if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
		return emulate_ud(ctxt);

2497 2498
	/*
	 * Get back to real mode, to prepare a safe state in which to load
2499 2500
	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
	 * supports long mode.
2501
	 */
2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519
	cr4 = ctxt->ops->get_cr(ctxt, 4);
	if (emulator_has_longmode(ctxt)) {
		struct desc_struct cs_desc;

		/* Zero CR4.PCIDE before CR0.PG.  */
		if (cr4 & X86_CR4_PCIDE) {
			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
			cr4 &= ~X86_CR4_PCIDE;
		}

		/* A 32-bit code segment is required to clear EFER.LMA.  */
		memset(&cs_desc, 0, sizeof(cs_desc));
		cs_desc.type = 0xb;
		cs_desc.s = cs_desc.g = cs_desc.p = 1;
		ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
	}

	/* For the 64-bit case, this will clear EFER.LMA.  */
2520 2521 2522
	cr0 = ctxt->ops->get_cr(ctxt, 0);
	if (cr0 & X86_CR0_PE)
		ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2523 2524

	/* Now clear CR4.PAE (which must be done before clearing EFER.LME).  */
2525 2526
	if (cr4 & X86_CR4_PAE)
		ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2527 2528

	/* And finally go back to 32-bit mode.  */
2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548
	efer = 0;
	ctxt->ops->set_msr(ctxt, MSR_EFER, efer);

	smbase = ctxt->ops->get_smbase(ctxt);
	if (emulator_has_longmode(ctxt))
		ret = rsm_load_state_64(ctxt, smbase + 0x8000);
	else
		ret = rsm_load_state_32(ctxt, smbase + 0x8000);

	if (ret != X86EMUL_CONTINUE) {
		/* FIXME: should triple fault */
		return X86EMUL_UNHANDLEABLE;
	}

	if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
		ctxt->ops->set_nmi_mask(ctxt, false);

	ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
	ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
	return X86EMUL_CONTINUE;
P
Paolo Bonzini 已提交
2549 2550
}

2551
static void
2552
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2553
			struct desc_struct *cs, struct desc_struct *ss)
2554 2555
{
	cs->l = 0;		/* will be adjusted later */
2556
	set_desc_base(cs, 0);	/* flat segment */
2557
	cs->g = 1;		/* 4kb granularity */
2558
	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2559 2560 2561
	cs->type = 0x0b;	/* Read, Execute, Accessed */
	cs->s = 1;
	cs->dpl = 0;		/* will be adjusted later */
2562 2563
	cs->p = 1;
	cs->d = 1;
2564
	cs->avl = 0;
2565

2566 2567
	set_desc_base(ss, 0);	/* flat segment */
	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2568 2569 2570
	ss->g = 1;		/* 4kb granularity */
	ss->s = 1;
	ss->type = 0x03;	/* Read/Write, Accessed */
2571
	ss->d = 1;		/* 32bit stack segment */
2572
	ss->dpl = 0;
2573
	ss->p = 1;
2574 2575
	ss->l = 0;
	ss->avl = 0;
2576 2577
}

2578 2579 2580 2581 2582
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

	eax = ecx = 0;
2583 2584
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2585 2586 2587 2588
		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
}

2589 2590
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
{
2591
	const struct x86_emulate_ops *ops = ctxt->ops;
2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602
	u32 eax, ebx, ecx, edx;

	/*
	 * syscall should always be enabled in longmode - so only become
	 * vendor specific (cpuid) if other modes are active...
	 */
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return true;

	eax = 0x00000000;
	ecx = 0x00000000;
2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627
	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	/*
	 * Intel ("GenuineIntel")
	 * remark: Intel CPUs only support "syscall" in 64bit
	 * longmode. Also an 64bit guest with a
	 * 32bit compat-app running will #UD !! While this
	 * behaviour can be fixed (by emulating) into AMD
	 * response - CPUs of AMD can't behave like Intel.
	 */
	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
		return false;

	/* AMD ("AuthenticAMD") */
	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
		return true;

	/* AMD ("AMDisbetter!") */
	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
		return true;
2628 2629 2630 2631 2632

	/* default: (not Intel, not AMD), apply Intel's stricter rules... */
	return false;
}

2633
static int em_syscall(struct x86_emulate_ctxt *ctxt)
2634
{
2635
	const struct x86_emulate_ops *ops = ctxt->ops;
2636
	struct desc_struct cs, ss;
2637
	u64 msr_data;
2638
	u16 cs_sel, ss_sel;
2639
	u64 efer = 0;
2640 2641

	/* syscall is not available in real mode */
2642
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2643 2644
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_ud(ctxt);
2645

2646 2647 2648
	if (!(em_syscall_is_enabled(ctxt)))
		return emulate_ud(ctxt);

2649
	ops->get_msr(ctxt, MSR_EFER, &efer);
2650
	setup_syscalls_segments(ctxt, &cs, &ss);
2651

2652 2653 2654
	if (!(efer & EFER_SCE))
		return emulate_ud(ctxt);

2655
	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2656
	msr_data >>= 32;
2657 2658
	cs_sel = (u16)(msr_data & 0xfffc);
	ss_sel = (u16)(msr_data + 8);
2659

2660
	if (efer & EFER_LMA) {
2661
		cs.d = 0;
2662 2663
		cs.l = 1;
	}
2664 2665
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2666

2667
	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2668
	if (efer & EFER_LMA) {
2669
#ifdef CONFIG_X86_64
2670
		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2671

2672
		ops->get_msr(ctxt,
2673 2674
			     ctxt->mode == X86EMUL_MODE_PROT64 ?
			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2675
		ctxt->_eip = msr_data;
2676

2677
		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2678
		ctxt->eflags &= ~msr_data;
W
Wanpeng Li 已提交
2679
		ctxt->eflags |= X86_EFLAGS_FIXED;
2680 2681 2682
#endif
	} else {
		/* legacy mode */
2683
		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2684
		ctxt->_eip = (u32)msr_data;
2685

2686
		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2687 2688
	}

2689
	return X86EMUL_CONTINUE;
2690 2691
}

2692
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2693
{
2694
	const struct x86_emulate_ops *ops = ctxt->ops;
2695
	struct desc_struct cs, ss;
2696
	u64 msr_data;
2697
	u16 cs_sel, ss_sel;
2698
	u64 efer = 0;
2699

2700
	ops->get_msr(ctxt, MSR_EFER, &efer);
2701
	/* inject #GP if in real mode */
2702 2703
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return emulate_gp(ctxt, 0);
2704

2705 2706 2707 2708
	/*
	 * Not recognized on AMD in compat mode (but is recognized in legacy
	 * mode).
	 */
2709
	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2710 2711 2712
	    && !vendor_intel(ctxt))
		return emulate_ud(ctxt);

2713
	/* sysenter/sysexit have not been tested in 64bit mode. */
2714
	if (ctxt->mode == X86EMUL_MODE_PROT64)
2715
		return X86EMUL_UNHANDLEABLE;
2716

2717
	setup_syscalls_segments(ctxt, &cs, &ss);
2718

2719
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2720 2721
	if ((msr_data & 0xfffc) == 0x0)
		return emulate_gp(ctxt, 0);
2722

2723
	ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2724
	cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2725
	ss_sel = cs_sel + 8;
2726
	if (efer & EFER_LMA) {
2727
		cs.d = 0;
2728 2729 2730
		cs.l = 1;
	}

2731 2732
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2733

2734
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2735
	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2736

2737
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2738 2739
	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
							      (u32)msr_data;
2740

2741
	return X86EMUL_CONTINUE;
2742 2743
}

2744
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2745
{
2746
	const struct x86_emulate_ops *ops = ctxt->ops;
2747
	struct desc_struct cs, ss;
2748
	u64 msr_data, rcx, rdx;
2749
	int usermode;
X
Xiao Guangrong 已提交
2750
	u16 cs_sel = 0, ss_sel = 0;
2751

2752 2753
	/* inject #GP if in real mode or Virtual 8086 mode */
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2754 2755
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_gp(ctxt, 0);
2756

2757
	setup_syscalls_segments(ctxt, &cs, &ss);
2758

2759
	if ((ctxt->rex_prefix & 0x8) != 0x0)
2760 2761 2762 2763
		usermode = X86EMUL_MODE_PROT64;
	else
		usermode = X86EMUL_MODE_PROT32;

2764 2765 2766
	rcx = reg_read(ctxt, VCPU_REGS_RCX);
	rdx = reg_read(ctxt, VCPU_REGS_RDX);

2767 2768
	cs.dpl = 3;
	ss.dpl = 3;
2769
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2770 2771
	switch (usermode) {
	case X86EMUL_MODE_PROT32:
2772
		cs_sel = (u16)(msr_data + 16);
2773 2774
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
2775
		ss_sel = (u16)(msr_data + 24);
2776 2777
		rcx = (u32)rcx;
		rdx = (u32)rdx;
2778 2779
		break;
	case X86EMUL_MODE_PROT64:
2780
		cs_sel = (u16)(msr_data + 32);
2781 2782
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
2783 2784
		ss_sel = cs_sel + 8;
		cs.d = 0;
2785
		cs.l = 1;
2786 2787 2788
		if (is_noncanonical_address(rcx) ||
		    is_noncanonical_address(rdx))
			return emulate_gp(ctxt, 0);
2789 2790
		break;
	}
2791 2792
	cs_sel |= SEGMENT_RPL_MASK;
	ss_sel |= SEGMENT_RPL_MASK;
2793

2794 2795
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2796

2797 2798
	ctxt->_eip = rdx;
	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2799

2800
	return X86EMUL_CONTINUE;
2801 2802
}

2803
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2804 2805 2806 2807 2808 2809
{
	int iopl;
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return false;
	if (ctxt->mode == X86EMUL_MODE_VM86)
		return true;
2810
	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2811
	return ctxt->ops->cpl(ctxt) > iopl;
2812 2813 2814 2815 2816
}

static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
					    u16 port, u16 len)
{
2817
	const struct x86_emulate_ops *ops = ctxt->ops;
2818
	struct desc_struct tr_seg;
2819
	u32 base3;
2820
	int r;
2821
	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2822
	unsigned mask = (1 << len) - 1;
2823
	unsigned long base;
2824

2825
	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2826
	if (!tr_seg.p)
2827
		return false;
2828
	if (desc_limit_scaled(&tr_seg) < 103)
2829
		return false;
2830 2831 2832 2833
	base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
	base |= ((u64)base3) << 32;
#endif
2834
	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2835 2836
	if (r != X86EMUL_CONTINUE)
		return false;
2837
	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2838
		return false;
2839
	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2840 2841 2842 2843 2844 2845 2846 2847 2848 2849
	if (r != X86EMUL_CONTINUE)
		return false;
	if ((perm >> bit_idx) & mask)
		return false;
	return true;
}

static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
				 u16 port, u16 len)
{
2850 2851 2852
	if (ctxt->perm_ok)
		return true;

2853 2854
	if (emulator_bad_iopl(ctxt))
		if (!emulator_io_port_access_allowed(ctxt, port, len))
2855
			return false;
2856 2857 2858

	ctxt->perm_ok = true;

2859 2860 2861
	return true;
}

2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885
static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
{
	/*
	 * Intel CPUs mask the counter and pointers in quite strange
	 * manner when ECX is zero due to REP-string optimizations.
	 */
#ifdef CONFIG_X86_64
	if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
		return;

	*reg_write(ctxt, VCPU_REGS_RCX) = 0;

	switch (ctxt->b) {
	case 0xa4:	/* movsb */
	case 0xa5:	/* movsd/w */
		*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
		/* fall through */
	case 0xaa:	/* stosb */
	case 0xab:	/* stosd/w */
		*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
	}
#endif
}

2886 2887 2888
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_16 *tss)
{
2889
	tss->ip = ctxt->_eip;
2890
	tss->flag = ctxt->eflags;
2891 2892 2893 2894 2895 2896 2897 2898
	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2899

2900 2901 2902 2903 2904
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2905 2906 2907 2908 2909 2910
}

static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_16 *tss)
{
	int ret;
2911
	u8 cpl;
2912

2913
	ctxt->_eip = tss->ip;
2914
	ctxt->eflags = tss->flag | 2;
2915 2916 2917 2918 2919 2920 2921 2922
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2923 2924 2925 2926 2927

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
2928 2929 2930 2931 2932
	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2933

2934 2935
	cpl = tss->cs & 3;

2936
	/*
G
Guo Chao 已提交
2937
	 * Now load segment descriptors. If fault happens at this stage
2938 2939
	 * it is handled in a context of new task
	 */
2940
	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2941
					X86_TRANSFER_TASK_SWITCH, NULL);
2942 2943
	if (ret != X86EMUL_CONTINUE)
		return ret;
2944
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2945
					X86_TRANSFER_TASK_SWITCH, NULL);
2946 2947
	if (ret != X86EMUL_CONTINUE)
		return ret;
2948
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2949
					X86_TRANSFER_TASK_SWITCH, NULL);
2950 2951
	if (ret != X86EMUL_CONTINUE)
		return ret;
2952
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2953
					X86_TRANSFER_TASK_SWITCH, NULL);
2954 2955
	if (ret != X86EMUL_CONTINUE)
		return ret;
2956
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2957
					X86_TRANSFER_TASK_SWITCH, NULL);
2958 2959 2960 2961 2962 2963 2964 2965 2966 2967
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_16(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
2968
	const struct x86_emulate_ops *ops = ctxt->ops;
2969 2970
	struct tss_segment_16 tss_seg;
	int ret;
2971
	u32 new_tss_base = get_desc_base(new_desc);
2972

2973
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2974
			    &ctxt->exception);
2975
	if (ret != X86EMUL_CONTINUE)
2976 2977
		return ret;

2978
	save_state_to_tss16(ctxt, &tss_seg);
2979

2980
	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2981
			     &ctxt->exception);
2982
	if (ret != X86EMUL_CONTINUE)
2983 2984
		return ret;

2985
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2986
			    &ctxt->exception);
2987
	if (ret != X86EMUL_CONTINUE)
2988 2989 2990 2991 2992
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

2993
		ret = ops->write_std(ctxt, new_tss_base,
2994 2995
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2996
				     &ctxt->exception);
2997
		if (ret != X86EMUL_CONTINUE)
2998 2999 3000
			return ret;
	}

3001
	return load_state_from_tss16(ctxt, &tss_seg);
3002 3003 3004 3005 3006
}

static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_32 *tss)
{
3007
	/* CR3 and ldt selector are not saved intentionally */
3008
	tss->eip = ctxt->_eip;
3009
	tss->eflags = ctxt->eflags;
3010 3011 3012 3013 3014 3015 3016 3017
	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3018

3019 3020 3021 3022 3023 3024
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3025 3026 3027 3028 3029 3030
}

static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_32 *tss)
{
	int ret;
3031
	u8 cpl;
3032

3033
	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3034
		return emulate_gp(ctxt, 0);
3035
	ctxt->_eip = tss->eip;
3036
	ctxt->eflags = tss->eflags | 2;
3037 3038

	/* General purpose registers */
3039 3040 3041 3042 3043 3044 3045 3046
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3047 3048 3049

	/*
	 * SDM says that segment selectors are loaded before segment
3050 3051
	 * descriptors.  This is important because CPL checks will
	 * use CS.RPL.
3052
	 */
3053 3054 3055 3056 3057 3058 3059
	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3060

3061 3062 3063 3064 3065
	/*
	 * If we're switching between Protected Mode and VM86, we need to make
	 * sure to update the mode before loading the segment descriptors so
	 * that the selectors are interpreted correctly.
	 */
3066
	if (ctxt->eflags & X86_EFLAGS_VM) {
3067
		ctxt->mode = X86EMUL_MODE_VM86;
3068 3069
		cpl = 3;
	} else {
3070
		ctxt->mode = X86EMUL_MODE_PROT32;
3071 3072
		cpl = tss->cs & 3;
	}
3073

3074 3075 3076 3077
	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
3078
	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3079
					cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3080 3081
	if (ret != X86EMUL_CONTINUE)
		return ret;
3082
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3083
					X86_TRANSFER_TASK_SWITCH, NULL);
3084 3085
	if (ret != X86EMUL_CONTINUE)
		return ret;
3086
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3087
					X86_TRANSFER_TASK_SWITCH, NULL);
3088 3089
	if (ret != X86EMUL_CONTINUE)
		return ret;
3090
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3091
					X86_TRANSFER_TASK_SWITCH, NULL);
3092 3093
	if (ret != X86EMUL_CONTINUE)
		return ret;
3094
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3095
					X86_TRANSFER_TASK_SWITCH, NULL);
3096 3097
	if (ret != X86EMUL_CONTINUE)
		return ret;
3098
	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3099
					X86_TRANSFER_TASK_SWITCH, NULL);
3100 3101
	if (ret != X86EMUL_CONTINUE)
		return ret;
3102
	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3103
					X86_TRANSFER_TASK_SWITCH, NULL);
3104

3105
	return ret;
3106 3107 3108 3109 3110 3111
}

static int task_switch_32(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
3112
	const struct x86_emulate_ops *ops = ctxt->ops;
3113 3114
	struct tss_segment_32 tss_seg;
	int ret;
3115
	u32 new_tss_base = get_desc_base(new_desc);
3116 3117
	u32 eip_offset = offsetof(struct tss_segment_32, eip);
	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3118

3119
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3120
			    &ctxt->exception);
3121
	if (ret != X86EMUL_CONTINUE)
3122 3123
		return ret;

3124
	save_state_to_tss32(ctxt, &tss_seg);
3125

3126 3127 3128
	/* Only GP registers and segment selectors are saved */
	ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
			     ldt_sel_offset - eip_offset, &ctxt->exception);
3129
	if (ret != X86EMUL_CONTINUE)
3130 3131
		return ret;

3132
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3133
			    &ctxt->exception);
3134
	if (ret != X86EMUL_CONTINUE)
3135 3136 3137 3138 3139
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

3140
		ret = ops->write_std(ctxt, new_tss_base,
3141 3142
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
3143
				     &ctxt->exception);
3144
		if (ret != X86EMUL_CONTINUE)
3145 3146 3147
			return ret;
	}

3148
	return load_state_from_tss32(ctxt, &tss_seg);
3149 3150 3151
}

static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3152
				   u16 tss_selector, int idt_index, int reason,
3153
				   bool has_error_code, u32 error_code)
3154
{
3155
	const struct x86_emulate_ops *ops = ctxt->ops;
3156 3157
	struct desc_struct curr_tss_desc, next_tss_desc;
	int ret;
3158
	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3159
	ulong old_tss_base =
3160
		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3161
	u32 desc_limit;
3162
	ulong desc_addr, dr7;
3163 3164 3165

	/* FIXME: old_tss_base == ~0 ? */

3166
	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3167 3168
	if (ret != X86EMUL_CONTINUE)
		return ret;
3169
	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3170 3171 3172 3173 3174
	if (ret != X86EMUL_CONTINUE)
		return ret;

	/* FIXME: check that next_tss_desc is tss */

3175 3176 3177 3178 3179
	/*
	 * Check privileges. The three cases are task switch caused by...
	 *
	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
	 * 2. Exception/IRQ/iret: No check is performed
3180 3181
	 * 3. jmp/call to TSS/task-gate: No check is performed since the
	 *    hardware checks it before exiting.
3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197
	 */
	if (reason == TASK_SWITCH_GATE) {
		if (idt_index != -1) {
			/* Software interrupts */
			struct desc_struct task_gate_desc;
			int dpl;

			ret = read_interrupt_descriptor(ctxt, idt_index,
							&task_gate_desc);
			if (ret != X86EMUL_CONTINUE)
				return ret;

			dpl = task_gate_desc.dpl;
			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
		}
3198 3199
	}

3200 3201 3202 3203
	desc_limit = desc_limit_scaled(&next_tss_desc);
	if (!next_tss_desc.p ||
	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
	     desc_limit < 0x2b)) {
3204
		return emulate_ts(ctxt, tss_selector & 0xfffc);
3205 3206 3207 3208
	}

	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3209
		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3210 3211 3212 3213 3214 3215
	}

	if (reason == TASK_SWITCH_IRET)
		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;

	/* set back link to prev task only if NT bit is set in eflags
G
Guo Chao 已提交
3216
	   note that old_tss_sel is not used after this point */
3217 3218 3219 3220
	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
		old_tss_sel = 0xffff;

	if (next_tss_desc.type & 8)
3221
		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3222 3223
				     old_tss_base, &next_tss_desc);
	else
3224
		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3225
				     old_tss_base, &next_tss_desc);
3226 3227
	if (ret != X86EMUL_CONTINUE)
		return ret;
3228 3229 3230 3231 3232 3233

	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;

	if (reason != TASK_SWITCH_IRET) {
		next_tss_desc.type |= (1 << 1); /* set busy flag */
3234
		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3235 3236
	}

3237
	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
3238
	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3239

3240
	if (has_error_code) {
3241 3242 3243
		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
		ctxt->lock_prefix = 0;
		ctxt->src.val = (unsigned long) error_code;
3244
		ret = em_push(ctxt);
3245 3246
	}

3247 3248 3249
	ops->get_dr(ctxt, 7, &dr7);
	ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));

3250 3251 3252 3253
	return ret;
}

int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3254
			 u16 tss_selector, int idt_index, int reason,
3255
			 bool has_error_code, u32 error_code)
3256 3257 3258
{
	int rc;

3259
	invalidate_registers(ctxt);
3260 3261
	ctxt->_eip = ctxt->eip;
	ctxt->dst.type = OP_NONE;
3262

3263
	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3264
				     has_error_code, error_code);
3265

3266
	if (rc == X86EMUL_CONTINUE) {
3267
		ctxt->eip = ctxt->_eip;
3268 3269
		writeback_registers(ctxt);
	}
3270

3271
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3272 3273
}

3274 3275
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
		struct operand *op)
3276
{
3277
	int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3278

3279 3280
	register_address_increment(ctxt, reg, df * op->bytes);
	op->addr.mem.ea = register_address(ctxt, reg);
3281 3282
}

3283 3284 3285 3286 3287 3288
static int em_das(struct x86_emulate_ctxt *ctxt)
{
	u8 al, old_al;
	bool af, cf, old_cf;

	cf = ctxt->eflags & X86_EFLAGS_CF;
3289
	al = ctxt->dst.val;
3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306

	old_al = al;
	old_cf = cf;
	cf = false;
	af = ctxt->eflags & X86_EFLAGS_AF;
	if ((al & 0x0f) > 9 || af) {
		al -= 6;
		cf = old_cf | (al >= 250);
		af = true;
	} else {
		af = false;
	}
	if (old_al > 0x99 || old_cf) {
		al -= 0x60;
		cf = true;
	}

3307
	ctxt->dst.val = al;
3308
	/* Set PF, ZF, SF */
3309 3310 3311
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
3312
	fastop(ctxt, em_or);
3313 3314 3315 3316 3317 3318 3319 3320
	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
	if (cf)
		ctxt->eflags |= X86_EFLAGS_CF;
	if (af)
		ctxt->eflags |= X86_EFLAGS_AF;
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342
static int em_aam(struct x86_emulate_ctxt *ctxt)
{
	u8 al, ah;

	if (ctxt->src.val == 0)
		return emulate_de(ctxt);

	al = ctxt->dst.val & 0xff;
	ah = al / ctxt->src.val;
	al %= ctxt->src.val;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);

	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);

	return X86EMUL_CONTINUE;
}

3343 3344 3345 3346 3347 3348 3349 3350 3351
static int em_aad(struct x86_emulate_ctxt *ctxt)
{
	u8 al = ctxt->dst.val & 0xff;
	u8 ah = (ctxt->dst.val >> 8) & 0xff;

	al = (al + (ah * ctxt->src.val)) & 0xff;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;

3352 3353 3354 3355 3356
	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);
3357 3358 3359 3360

	return X86EMUL_CONTINUE;
}

3361 3362
static int em_call(struct x86_emulate_ctxt *ctxt)
{
3363
	int rc;
3364 3365 3366
	long rel = ctxt->src.val;

	ctxt->src.val = (unsigned long)ctxt->_eip;
3367 3368 3369
	rc = jmp_rel(ctxt, rel);
	if (rc != X86EMUL_CONTINUE)
		return rc;
3370 3371 3372
	return em_push(ctxt);
}

3373 3374 3375 3376 3377
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
	u16 sel, old_cs;
	ulong old_eip;
	int rc;
3378 3379 3380
	struct desc_struct old_desc, new_desc;
	const struct x86_emulate_ops *ops = ctxt->ops;
	int cpl = ctxt->ops->cpl(ctxt);
3381
	enum x86emul_mode prev_mode = ctxt->mode;
3382

3383
	old_eip = ctxt->_eip;
3384
	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3385

3386
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3387 3388
	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_CALL_JMP, &new_desc);
3389
	if (rc != X86EMUL_CONTINUE)
3390
		return rc;
3391

3392
	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3393 3394
	if (rc != X86EMUL_CONTINUE)
		goto fail;
3395

3396
	ctxt->src.val = old_cs;
3397
	rc = em_push(ctxt);
3398
	if (rc != X86EMUL_CONTINUE)
3399
		goto fail;
3400

3401
	ctxt->src.val = old_eip;
3402 3403 3404
	rc = em_push(ctxt);
	/* If we failed, we tainted the memory, but the very least we should
	   restore cs */
3405 3406
	if (rc != X86EMUL_CONTINUE) {
		pr_warn_once("faulting far call emulation tainted memory\n");
3407
		goto fail;
3408
	}
3409 3410 3411
	return rc;
fail:
	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3412
	ctxt->mode = prev_mode;
3413 3414
	return rc;

3415 3416
}

3417 3418 3419
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
	int rc;
3420
	unsigned long eip;
3421

3422 3423 3424 3425
	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	rc = assign_eip_near(ctxt, eip);
3426 3427
	if (rc != X86EMUL_CONTINUE)
		return rc;
3428
	rsp_increment(ctxt, ctxt->src.val);
3429 3430 3431
	return X86EMUL_CONTINUE;
}

3432 3433 3434
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
	/* Write back the register source. */
3435 3436
	ctxt->src.val = ctxt->dst.val;
	write_register_operand(&ctxt->src);
3437 3438

	/* Write back the memory destination with implicit LOCK prefix. */
3439 3440
	ctxt->dst.val = ctxt->src.orig_val;
	ctxt->lock_prefix = 1;
3441 3442 3443
	return X86EMUL_CONTINUE;
}

3444 3445
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
3446
	ctxt->dst.val = ctxt->src2.val;
3447
	return fastop(ctxt, em_imul);
3448 3449
}

3450 3451
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
3452 3453
	ctxt->dst.type = OP_REG;
	ctxt->dst.bytes = ctxt->src.bytes;
3454
	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3455
	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3456 3457 3458 3459

	return X86EMUL_CONTINUE;
}

3460 3461 3462 3463
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
	u64 tsc = 0;

3464
	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3465 3466
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3467 3468 3469
	return X86EMUL_CONTINUE;
}

3470 3471 3472 3473
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
{
	u64 pmc;

3474
	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3475
		return emulate_gp(ctxt, 0);
3476 3477
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3478 3479 3480
	return X86EMUL_CONTINUE;
}

3481 3482
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
3483
	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3484 3485 3486
	return X86EMUL_CONTINUE;
}

B
Borislav Petkov 已提交
3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521
#define FFL(x) bit(X86_FEATURE_##x)

static int em_movbe(struct x86_emulate_ctxt *ctxt)
{
	u32 ebx, ecx, edx, eax = 1;
	u16 tmp;

	/*
	 * Check MOVBE is set in the guest-visible CPUID leaf.
	 */
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	if (!(ecx & FFL(MOVBE)))
		return emulate_ud(ctxt);

	switch (ctxt->op_bytes) {
	case 2:
		/*
		 * From MOVBE definition: "...When the operand size is 16 bits,
		 * the upper word of the destination register remains unchanged
		 * ..."
		 *
		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
		 * rules so we have to do the operation almost per hand.
		 */
		tmp = (u16)ctxt->src.val;
		ctxt->dst.val &= ~0xffffUL;
		ctxt->dst.val |= (unsigned long)swab16(tmp);
		break;
	case 4:
		ctxt->dst.val = swab32((u32)ctxt->src.val);
		break;
	case 8:
		ctxt->dst.val = swab64(ctxt->src.val);
		break;
	default:
3522
		BUG();
B
Borislav Petkov 已提交
3523 3524 3525 3526
	}
	return X86EMUL_CONTINUE;
}

3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

static int em_dr_write(struct x86_emulate_ctxt *ctxt)
{
	unsigned long val;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		val = ctxt->src.val & ~0ULL;
	else
		val = ctxt->src.val & ~0U;

	/* #UD condition is already handled. */
	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3555 3556 3557 3558
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

3559 3560 3561
	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3562 3563 3564 3565 3566 3567 3568 3569 3570
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

3571
	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3572 3573
		return emulate_gp(ctxt, 0);

3574 3575
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3576 3577 3578
	return X86EMUL_CONTINUE;
}

3579 3580
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
3581
	if (ctxt->modrm_reg > VCPU_SREG_GS)
3582 3583
		return emulate_ud(ctxt);

3584
	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3585 3586
	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
		ctxt->dst.bytes = 2;
3587 3588 3589 3590 3591
	return X86EMUL_CONTINUE;
}

static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
3592
	u16 sel = ctxt->src.val;
3593

3594
	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3595 3596
		return emulate_ud(ctxt);

3597
	if (ctxt->modrm_reg == VCPU_SREG_SS)
3598 3599 3600
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;

	/* Disable writeback. */
3601 3602
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3603 3604
}

A
Avi Kivity 已提交
3605 3606 3607 3608 3609 3610 3611 3612 3613
static int em_lldt(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
}

A
Avi Kivity 已提交
3614 3615 3616 3617 3618 3619 3620 3621 3622
static int em_ltr(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
}

3623 3624
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
3625 3626 3627
	int rc;
	ulong linear;

3628
	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3629
	if (rc == X86EMUL_CONTINUE)
3630
		ctxt->ops->invlpg(ctxt, linear);
3631
	/* Disable writeback. */
3632
	ctxt->dst.type = OP_NONE;
3633 3634 3635
	return X86EMUL_CONTINUE;
}

3636 3637 3638 3639 3640 3641 3642 3643 3644 3645
static int em_clts(struct x86_emulate_ctxt *ctxt)
{
	ulong cr0;

	cr0 = ctxt->ops->get_cr(ctxt, 0);
	cr0 &= ~X86_CR0_TS;
	ctxt->ops->set_cr(ctxt, 0, cr0);
	return X86EMUL_CONTINUE;
}

3646
static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3647
{
3648
	int rc = ctxt->ops->fix_hypercall(ctxt);
3649 3650 3651 3652 3653

	if (rc != X86EMUL_CONTINUE)
		return rc;

	/* Let the processor re-execute the fixed hypercall */
3654
	ctxt->_eip = ctxt->eip;
3655
	/* Disable writeback. */
3656
	ctxt->dst.type = OP_NONE;
3657 3658 3659
	return X86EMUL_CONTINUE;
}

3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
				  void (*get)(struct x86_emulate_ctxt *ctxt,
					      struct desc_ptr *ptr))
{
	struct desc_ptr desc_ptr;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
	get(ctxt, &desc_ptr);
	if (ctxt->op_bytes == 2) {
		ctxt->op_bytes = 4;
		desc_ptr.address &= 0x00ffffff;
	}
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return segmented_write(ctxt, ctxt->dst.addr.mem,
			       &desc_ptr, 2 + ctxt->op_bytes);
}

static int em_sgdt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
}

static int em_sidt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}

3689
static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3690 3691 3692 3693
{
	struct desc_ptr desc_ptr;
	int rc;

3694 3695
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
3696
	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3697
			     &desc_ptr.size, &desc_ptr.address,
3698
			     ctxt->op_bytes);
3699 3700
	if (rc != X86EMUL_CONTINUE)
		return rc;
3701 3702 3703
	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
	    is_noncanonical_address(desc_ptr.address))
		return emulate_gp(ctxt, 0);
3704 3705 3706 3707
	if (lgdt)
		ctxt->ops->set_gdt(ctxt, &desc_ptr);
	else
		ctxt->ops->set_idt(ctxt, &desc_ptr);
3708
	/* Disable writeback. */
3709
	ctxt->dst.type = OP_NONE;
3710 3711 3712
	return X86EMUL_CONTINUE;
}

3713 3714 3715 3716 3717
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
	return em_lgdt_lidt(ctxt, true);
}

3718 3719
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
3720
	return em_lgdt_lidt(ctxt, false);
3721 3722 3723 3724
}

static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
3725 3726
	if (ctxt->dst.type == OP_MEM)
		ctxt->dst.bytes = 2;
3727
	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3728 3729 3730 3731 3732 3733
	return X86EMUL_CONTINUE;
}

static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3734 3735
			  | (ctxt->src.val & 0x0f));
	ctxt->dst.type = OP_NONE;
3736 3737 3738
	return X86EMUL_CONTINUE;
}

3739 3740
static int em_loop(struct x86_emulate_ctxt *ctxt)
{
3741 3742
	int rc = X86EMUL_CONTINUE;

3743
	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3744
	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3745
	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3746
		rc = jmp_rel(ctxt, ctxt->src.val);
3747

3748
	return rc;
3749 3750 3751 3752
}

static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{
3753 3754
	int rc = X86EMUL_CONTINUE;

3755
	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3756
		rc = jmp_rel(ctxt, ctxt->src.val);
3757

3758
	return rc;
3759 3760
}

3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778
static int em_in(struct x86_emulate_ctxt *ctxt)
{
	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
			     &ctxt->dst.val))
		return X86EMUL_IO_NEEDED;

	return X86EMUL_CONTINUE;
}

static int em_out(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
				    &ctxt->src.val, 1);
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->eflags &= ~X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

static int em_sti(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
	ctxt->eflags |= X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3798 3799 3800 3801
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

3802 3803
	eax = reg_read(ctxt, VCPU_REGS_RAX);
	ecx = reg_read(ctxt, VCPU_REGS_RCX);
A
Avi Kivity 已提交
3804
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3805 3806 3807 3808
	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
A
Avi Kivity 已提交
3809 3810 3811
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3812 3813 3814 3815
static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
	u32 flags;

3816 3817
	flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
		X86_EFLAGS_SF;
P
Paolo Bonzini 已提交
3818 3819 3820 3821 3822 3823 3824
	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;

	ctxt->eflags &= ~0xffUL;
	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3825 3826
static int em_lahf(struct x86_emulate_ctxt *ctxt)
{
3827 3828
	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
A
Avi Kivity 已提交
3829 3830 3831
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846
static int em_bswap(struct x86_emulate_ctxt *ctxt)
{
	switch (ctxt->op_bytes) {
#ifdef CONFIG_X86_64
	case 8:
		asm("bswap %0" : "+r"(ctxt->dst.val));
		break;
#endif
	default:
		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
		break;
	}
	return X86EMUL_CONTINUE;
}

3847 3848 3849 3850 3851 3852
static int em_clflush(struct x86_emulate_ctxt *ctxt)
{
	/* emulating clflush regardless of cpuid */
	return X86EMUL_CONTINUE;
}

3853 3854 3855 3856 3857 3858
static int em_movsxd(struct x86_emulate_ctxt *ctxt)
{
	ctxt->dst.val = (s32) ctxt->src.val;
	return X86EMUL_CONTINUE;
}

3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872
static bool valid_cr(int nr)
{
	switch (nr) {
	case 0:
	case 2 ... 4:
	case 8:
		return true;
	default:
		return false;
	}
}

static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
3873
	if (!valid_cr(ctxt->modrm_reg))
3874 3875 3876 3877 3878 3879 3880
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
3881 3882
	u64 new_val = ctxt->src.val64;
	int cr = ctxt->modrm_reg;
3883
	u64 efer = 0;
3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900

	static u64 cr_reserved_bits[] = {
		0xffffffff00000000ULL,
		0, 0, 0, /* CR3 checked later */
		CR4_RESERVED_BITS,
		0, 0, 0,
		CR8_RESERVED_BITS,
	};

	if (!valid_cr(cr))
		return emulate_ud(ctxt);

	if (new_val & cr_reserved_bits[cr])
		return emulate_gp(ctxt, 0);

	switch (cr) {
	case 0: {
3901
		u64 cr4;
3902 3903 3904 3905
		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
			return emulate_gp(ctxt, 0);

3906 3907
		cr4 = ctxt->ops->get_cr(ctxt, 4);
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3908 3909 3910 3911 3912 3913 3914 3915 3916 3917

		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
		    !(cr4 & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	case 3: {
		u64 rsvd = 0;

3918 3919
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
		if (efer & EFER_LMA)
N
Nadav Amit 已提交
3920
			rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3921 3922 3923 3924 3925 3926 3927

		if (new_val & rsvd)
			return emulate_gp(ctxt, 0);

		break;
		}
	case 4: {
3928
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939

		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	}

	return X86EMUL_CONTINUE;
}

3940 3941 3942 3943
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
	unsigned long dr7;

3944
	ctxt->ops->get_dr(ctxt, 7, &dr7);
3945 3946 3947 3948 3949 3950 3951

	/* Check if DR7.Global_Enable is set */
	return dr7 & (1 << 13);
}

static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
3952
	int dr = ctxt->modrm_reg;
3953 3954 3955 3956 3957
	u64 cr4;

	if (dr > 7)
		return emulate_ud(ctxt);

3958
	cr4 = ctxt->ops->get_cr(ctxt, 4);
3959 3960 3961
	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
		return emulate_ud(ctxt);

3962 3963 3964 3965 3966 3967 3968
	if (check_dr7_gd(ctxt)) {
		ulong dr6;

		ctxt->ops->get_dr(ctxt, 6, &dr6);
		dr6 &= ~15;
		dr6 |= DR6_BD | DR6_RTM;
		ctxt->ops->set_dr(ctxt, 6, dr6);
3969
		return emulate_db(ctxt);
3970
	}
3971 3972 3973 3974 3975 3976

	return X86EMUL_CONTINUE;
}

static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
3977 3978
	u64 new_val = ctxt->src.val64;
	int dr = ctxt->modrm_reg;
3979 3980 3981 3982 3983 3984 3985

	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
		return emulate_gp(ctxt, 0);

	return check_dr_read(ctxt);
}

3986 3987 3988 3989
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
	u64 efer;

3990
	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3991 3992 3993 3994 3995 3996 3997 3998 3999

	if (!(efer & EFER_SVME))
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
4000
	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4001 4002

	/* Valid physical address? */
4003
	if (rax & 0xffff000000000000ULL)
4004 4005 4006 4007 4008
		return emulate_gp(ctxt, 0);

	return check_svme(ctxt);
}

4009 4010
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{
4011
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4012

4013
	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4014 4015 4016 4017 4018
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

4019 4020
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
4021
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4022
	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4023

4024
	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4025
	    ctxt->ops->check_pmc(ctxt, rcx))
4026 4027 4028 4029 4030
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

4031 4032
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
4033 4034
	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4035 4036 4037 4038 4039 4040 4041
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
4042 4043
	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4044 4045 4046 4047 4048
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

4049
#define D(_y) { .flags = (_y) }
4050 4051 4052
#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
4053
#define N    D(NotImpl)
4054
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4055 4056
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4057
#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4058
#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4059
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4060
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4061
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4062
#define II(_f, _e, _i) \
4063
	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4064
#define IIP(_f, _e, _i, _p) \
4065 4066
	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
4067
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4068

4069
#define D2bv(_f)      D((_f) | ByteOp), D(_f)
4070
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4071
#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
4072
#define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
4073 4074
#define I2bvIP(_f, _e, _i, _p) \
	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4075

4076 4077 4078
#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4079

4080 4081
static const struct opcode group7_rm0[] = {
	N,
4082
	I(SrcNone | Priv | EmulateOnUD,	em_hypercall),
4083 4084 4085
	N, N, N, N, N, N,
};

4086
static const struct opcode group7_rm1[] = {
4087 4088
	DI(SrcNone | Priv, monitor),
	DI(SrcNone | Priv, mwait),
4089 4090 4091
	N, N, N, N, N, N,
};

4092
static const struct opcode group7_rm3[] = {
4093
	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
4094
	II(SrcNone  | Prot | EmulateOnUD,	em_hypercall,	vmmcall),
4095 4096 4097 4098 4099 4100
	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
4101
};
4102

4103
static const struct opcode group7_rm7[] = {
4104
	N,
4105
	DIP(SrcNone, rdtscp, check_rdtsc),
4106 4107
	N, N, N, N, N, N,
};
4108

4109
static const struct opcode group1[] = {
4110 4111 4112 4113 4114 4115 4116 4117
	F(Lock, em_add),
	F(Lock | PageTable, em_or),
	F(Lock, em_adc),
	F(Lock, em_sbb),
	F(Lock | PageTable, em_and),
	F(Lock, em_sub),
	F(Lock, em_xor),
	F(NoWrite, em_cmp),
4118 4119
};

4120
static const struct opcode group1A[] = {
4121
	I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
4122 4123
};

4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134
static const struct opcode group2[] = {
	F(DstMem | ModRM, em_rol),
	F(DstMem | ModRM, em_ror),
	F(DstMem | ModRM, em_rcl),
	F(DstMem | ModRM, em_rcr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_shr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_sar),
};

4135
static const struct opcode group3[] = {
4136 4137
	F(DstMem | SrcImm | NoWrite, em_test),
	F(DstMem | SrcImm | NoWrite, em_test),
4138 4139
	F(DstMem | SrcNone | Lock, em_not),
	F(DstMem | SrcNone | Lock, em_neg),
4140 4141
	F(DstXacc | Src2Mem, em_mul_ex),
	F(DstXacc | Src2Mem, em_imul_ex),
4142 4143
	F(DstXacc | Src2Mem, em_div_ex),
	F(DstXacc | Src2Mem, em_idiv_ex),
4144 4145
};

4146
static const struct opcode group4[] = {
4147 4148
	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4149 4150 4151
	N, N, N, N, N, N,
};

4152
static const struct opcode group5[] = {
4153 4154
	F(DstMem | SrcNone | Lock,		em_inc),
	F(DstMem | SrcNone | Lock,		em_dec),
4155
	I(SrcMem | NearBranch,			em_call_near_abs),
4156
	I(SrcMemFAddr | ImplicitOps,		em_call_far),
4157
	I(SrcMem | NearBranch,			em_jmp_abs),
4158 4159
	I(SrcMemFAddr | ImplicitOps,		em_jmp_far),
	I(SrcMem | Stack,			em_push), D(Undefined),
4160 4161
};

4162
static const struct opcode group6[] = {
4163 4164
	DI(Prot | DstMem,	sldt),
	DI(Prot | DstMem,	str),
A
Avi Kivity 已提交
4165
	II(Prot | Priv | SrcMem16, em_lldt, lldt),
A
Avi Kivity 已提交
4166
	II(Prot | Priv | SrcMem16, em_ltr, ltr),
4167 4168 4169
	N, N, N, N,
};

4170
static const struct group_dual group7 = { {
4171 4172
	II(Mov | DstMem,			em_sgdt, sgdt),
	II(Mov | DstMem,			em_sidt, sidt),
4173 4174 4175 4176 4177
	II(SrcMem | Priv,			em_lgdt, lgdt),
	II(SrcMem | Priv,			em_lidt, lidt),
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
4178
}, {
4179
	EXT(0, group7_rm0),
4180
	EXT(0, group7_rm1),
4181
	N, EXT(0, group7_rm3),
4182 4183 4184
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	EXT(0, group7_rm7),
4185 4186
} };

4187
static const struct opcode group8[] = {
4188
	N, N, N, N,
4189 4190 4191 4192
	F(DstMem | SrcImmByte | NoWrite,		em_bt),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
	F(DstMem | SrcImmByte | Lock,			em_btr),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
4193 4194
};

4195
static const struct group_dual group9 = { {
4196
	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4197 4198 4199 4200
}, {
	N, N, N, N, N, N, N, N,
} };

4201
static const struct opcode group11[] = {
4202
	I(DstMem | SrcImm | Mov | PageTable, em_mov),
4203
	X7(D(Undefined)),
4204 4205
};

4206
static const struct gprefix pfx_0f_ae_7 = {
4207
	I(SrcMem | ByteOp, em_clflush), N, N, N,
4208 4209 4210 4211 4212 4213 4214 4215
};

static const struct group_dual group15 = { {
	N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
}, {
	N, N, N, N, N, N, N, N,
} };

4216
static const struct gprefix pfx_0f_6f_0f_7f = {
4217
	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4218 4219
};

4220 4221 4222 4223
static const struct instr_dual instr_dual_0f_2b = {
	I(0, em_mov), N
};

4224
static const struct gprefix pfx_0f_2b = {
4225
	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4226 4227
};

4228
static const struct gprefix pfx_0f_28_0f_29 = {
4229
	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4230 4231
};

4232 4233 4234 4235
static const struct gprefix pfx_0f_e7 = {
	N, I(Sse, em_mov), N, N,
};

4236
static const struct escape escape_d9 = { {
4237
	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_db = { {
	N, N, N, N, N, N, N, N,
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_dd = { {
4279
	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

4299 4300 4301 4302
static const struct instr_dual instr_dual_0f_c3 = {
	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
};

4303 4304 4305 4306
static const struct mode_dual mode_dual_63 = {
	N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
};

4307
static const struct opcode opcode_table[256] = {
4308
	/* 0x00 - 0x07 */
4309
	F6ALU(Lock, em_add),
4310 4311
	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4312
	/* 0x08 - 0x0F */
4313
	F6ALU(Lock | PageTable, em_or),
4314 4315
	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
	N,
4316
	/* 0x10 - 0x17 */
4317
	F6ALU(Lock, em_adc),
4318 4319
	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4320
	/* 0x18 - 0x1F */
4321
	F6ALU(Lock, em_sbb),
4322 4323
	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4324
	/* 0x20 - 0x27 */
4325
	F6ALU(Lock | PageTable, em_and), N, N,
4326
	/* 0x28 - 0x2F */
4327
	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4328
	/* 0x30 - 0x37 */
4329
	F6ALU(Lock, em_xor), N, N,
4330
	/* 0x38 - 0x3F */
4331
	F6ALU(NoWrite, em_cmp), N, N,
4332
	/* 0x40 - 0x4F */
4333
	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4334
	/* 0x50 - 0x57 */
4335
	X8(I(SrcReg | Stack, em_push)),
4336
	/* 0x58 - 0x5F */
4337
	X8(I(DstReg | Stack, em_pop)),
4338
	/* 0x60 - 0x67 */
4339 4340
	I(ImplicitOps | Stack | No64, em_pusha),
	I(ImplicitOps | Stack | No64, em_popa),
4341
	N, MD(ModRM, &mode_dual_63),
4342 4343
	N, N, N, N,
	/* 0x68 - 0x6F */
4344 4345
	I(SrcImm | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4346 4347
	I(SrcImmByte | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4348
	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4349
	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4350
	/* 0x70 - 0x7F */
4351
	X16(D(SrcImmByte | NearBranch)),
4352
	/* 0x80 - 0x87 */
4353 4354 4355 4356
	G(ByteOp | DstMem | SrcImm, group1),
	G(DstMem | SrcImm, group1),
	G(ByteOp | DstMem | SrcImm | No64, group1),
	G(DstMem | SrcImmByte, group1),
4357
	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4358
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4359
	/* 0x88 - 0x8F */
4360
	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4361
	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4362
	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4363 4364 4365
	D(ModRM | SrcMem | NoAccess | DstReg),
	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
	G(0, group1A),
4366
	/* 0x90 - 0x97 */
4367
	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4368
	/* 0x98 - 0x9F */
4369
	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4370
	I(SrcImmFAddr | No64, em_call_far), N,
4371
	II(ImplicitOps | Stack, em_pushf, pushf),
P
Paolo Bonzini 已提交
4372 4373
	II(ImplicitOps | Stack, em_popf, popf),
	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4374
	/* 0xA0 - 0xA7 */
4375
	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4376
	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4377
	I2bv(SrcSI | DstDI | Mov | String, em_mov),
4378
	F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4379
	/* 0xA8 - 0xAF */
4380
	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4381 4382
	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4383
	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4384
	/* 0xB0 - 0xB7 */
4385
	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4386
	/* 0xB8 - 0xBF */
4387
	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4388
	/* 0xC0 - 0xC7 */
4389
	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4390 4391
	I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
	I(ImplicitOps | NearBranch, em_ret),
4392 4393
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4394
	G(ByteOp, group11), G(0, group11),
4395
	/* 0xC8 - 0xCF */
A
Avi Kivity 已提交
4396
	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4397 4398
	I(ImplicitOps | SrcImmU16, em_ret_far_imm),
	I(ImplicitOps, em_ret_far),
4399
	D(ImplicitOps), DI(SrcImmByte, intn),
4400
	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4401
	/* 0xD0 - 0xD7 */
4402 4403
	G(Src2One | ByteOp, group2), G(Src2One, group2),
	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
P
Paolo Bonzini 已提交
4404
	I(DstAcc | SrcImmUByte | No64, em_aam),
P
Paolo Bonzini 已提交
4405 4406
	I(DstAcc | SrcImmUByte | No64, em_aad),
	F(DstAcc | ByteOp | No64, em_salc),
P
Paolo Bonzini 已提交
4407
	I(DstAcc | SrcXLat | ByteOp, em_mov),
4408
	/* 0xD8 - 0xDF */
4409
	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4410
	/* 0xE0 - 0xE7 */
4411 4412
	X3(I(SrcImmByte | NearBranch, em_loop)),
	I(SrcImmByte | NearBranch, em_jcxz),
4413 4414
	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4415
	/* 0xE8 - 0xEF */
4416 4417 4418
	I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
	I(SrcImmFAddr | No64, em_jmp_far),
	D(SrcImmByte | ImplicitOps | NearBranch),
4419 4420
	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4421
	/* 0xF0 - 0xF7 */
4422
	N, DI(ImplicitOps, icebp), N, N,
4423 4424
	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
	G(ByteOp, group3), G(0, group3),
4425
	/* 0xF8 - 0xFF */
4426 4427
	D(ImplicitOps), D(ImplicitOps),
	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4428 4429 4430
	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};

4431
static const struct opcode twobyte_table[256] = {
4432
	/* 0x00 - 0x0F */
4433
	G(0, group6), GD(0, &group7), N, N,
4434
	N, I(ImplicitOps | EmulateOnUD, em_syscall),
4435
	II(ImplicitOps | Priv, em_clts, clts), N,
4436
	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4437
	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4438
	/* 0x10 - 0x1F */
P
Paolo Bonzini 已提交
4439
	N, N, N, N, N, N, N, N,
4440 4441
	D(ImplicitOps | ModRM | SrcMem | NoAccess),
	N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4442
	/* 0x20 - 0x2F */
4443 4444 4445 4446 4447 4448
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
						check_cr_write),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
						check_dr_write),
4449
	N, N, N, N,
4450 4451
	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4452
	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4453
	N, N, N, N,
4454
	/* 0x30 - 0x3F */
4455
	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4456
	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4457
	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4458
	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4459 4460
	I(ImplicitOps | EmulateOnUD, em_sysenter),
	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4461
	N, N,
4462 4463
	N, N, N, N, N, N, N, N,
	/* 0x40 - 0x4F */
4464
	X16(D(DstReg | SrcMem | ModRM)),
4465 4466 4467
	/* 0x50 - 0x5F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x60 - 0x6F */
4468 4469 4470 4471
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4472
	/* 0x70 - 0x7F */
4473 4474 4475 4476
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4477
	/* 0x80 - 0x8F */
4478
	X16(D(SrcImm | NearBranch)),
4479
	/* 0x90 - 0x9F */
4480
	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4481
	/* 0xA0 - 0xA7 */
4482
	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4483 4484
	II(ImplicitOps, em_cpuid, cpuid),
	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4485 4486
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4487
	/* 0xA8 - 0xAF */
4488
	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4489
	II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4490
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4491 4492
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4493
	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4494
	/* 0xB0 - 0xB7 */
4495
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4496
	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4497
	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4498 4499
	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4500
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4501 4502
	/* 0xB8 - 0xBF */
	N, N,
4503
	G(BitOp, group8),
4504
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4505 4506
	I(DstReg | SrcMem | ModRM, em_bsf_c),
	I(DstReg | SrcMem | ModRM, em_bsr_c),
4507
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
A
Avi Kivity 已提交
4508
	/* 0xC0 - 0xC7 */
4509
	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4510
	N, ID(0, &instr_dual_0f_c3),
4511
	N, N, N, GD(0, &group9),
A
Avi Kivity 已提交
4512 4513
	/* 0xC8 - 0xCF */
	X8(I(DstReg, em_bswap)),
4514 4515 4516
	/* 0xD0 - 0xDF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xEF */
4517 4518
	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
	N, N, N, N, N, N, N, N,
4519 4520 4521 4522
	/* 0xF0 - 0xFF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};

4523 4524 4525 4526 4527 4528 4529 4530
static const struct instr_dual instr_dual_0f_38_f0 = {
	I(DstReg | SrcMem | Mov, em_movbe), N
};

static const struct instr_dual instr_dual_0f_38_f1 = {
	I(DstMem | SrcReg | Mov, em_movbe), N
};

4531
static const struct gprefix three_byte_0f_38_f0 = {
4532
	ID(0, &instr_dual_0f_38_f0), N, N, N
4533 4534 4535
};

static const struct gprefix three_byte_0f_38_f1 = {
4536
	ID(0, &instr_dual_0f_38_f1), N, N, N
4537 4538 4539 4540 4541 4542 4543 4544 4545
};

/*
 * Insns below are selected by the prefix which indexed by the third opcode
 * byte.
 */
static const struct opcode opcode_map_0f_38[256] = {
	/* 0x00 - 0x7f */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
B
Borislav Petkov 已提交
4546 4547 4548
	/* 0x80 - 0xef */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
	/* 0xf0 - 0xf1 */
4549 4550
	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
B
Borislav Petkov 已提交
4551 4552
	/* 0xf2 - 0xff */
	N, N, X4(N), X8(N)
4553 4554
};

4555 4556 4557 4558 4559
#undef D
#undef N
#undef G
#undef GD
#undef I
4560
#undef GP
4561
#undef EXT
4562
#undef MD
N
Nadav Amit 已提交
4563
#undef ID
4564

4565
#undef D2bv
4566
#undef D2bvIP
4567
#undef I2bv
4568
#undef I2bvIP
4569
#undef I6ALU
4570

4571
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4572 4573 4574
{
	unsigned size;

4575
	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587
	if (size == 8)
		size = 4;
	return size;
}

static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
		      unsigned size, bool sign_extension)
{
	int rc = X86EMUL_CONTINUE;

	op->type = OP_IMM;
	op->bytes = size;
4588
	op->addr.mem.ea = ctxt->_eip;
4589 4590 4591
	/* NB. Immediates are sign-extended as necessary. */
	switch (op->bytes) {
	case 1:
4592
		op->val = insn_fetch(s8, ctxt);
4593 4594
		break;
	case 2:
4595
		op->val = insn_fetch(s16, ctxt);
4596 4597
		break;
	case 4:
4598
		op->val = insn_fetch(s32, ctxt);
4599
		break;
4600 4601 4602
	case 8:
		op->val = insn_fetch(s64, ctxt);
		break;
4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620
	}
	if (!sign_extension) {
		switch (op->bytes) {
		case 1:
			op->val &= 0xff;
			break;
		case 2:
			op->val &= 0xffff;
			break;
		case 4:
			op->val &= 0xffffffff;
			break;
		}
	}
done:
	return rc;
}

4621 4622 4623 4624 4625 4626 4627
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
			  unsigned d)
{
	int rc = X86EMUL_CONTINUE;

	switch (d) {
	case OpReg:
4628
		decode_register_operand(ctxt, op);
4629 4630
		break;
	case OpImmUByte:
4631
		rc = decode_imm(ctxt, op, 1, false);
4632 4633
		break;
	case OpMem:
4634
		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4635 4636 4637
	mem_common:
		*op = ctxt->memop;
		ctxt->memopp = op;
4638
		if (ctxt->d & BitOp)
4639 4640 4641
			fetch_bit_operand(ctxt);
		op->orig_val = op->val;
		break;
4642
	case OpMem64:
4643
		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4644
		goto mem_common;
4645 4646 4647
	case OpAcc:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4648
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4649 4650 4651
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669
	case OpAccLo:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
	case OpAccHi:
		if (ctxt->d & ByteOp) {
			op->type = OP_NONE;
			break;
		}
		op->type = OP_REG;
		op->bytes = ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
4670 4671 4672 4673
	case OpDI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4674
			register_address(ctxt, VCPU_REGS_RDI);
4675 4676
		op->addr.mem.seg = VCPU_SREG_ES;
		op->val = 0;
4677
		op->count = 1;
4678 4679 4680 4681
		break;
	case OpDX:
		op->type = OP_REG;
		op->bytes = 2;
4682
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4683 4684
		fetch_register_operand(op);
		break;
4685
	case OpCL:
4686
		op->type = OP_IMM;
4687
		op->bytes = 1;
4688
		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4689 4690 4691 4692 4693
		break;
	case OpImmByte:
		rc = decode_imm(ctxt, op, 1, true);
		break;
	case OpOne:
4694
		op->type = OP_IMM;
4695 4696 4697 4698 4699 4700
		op->bytes = 1;
		op->val = 1;
		break;
	case OpImm:
		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
		break;
4701 4702 4703
	case OpImm64:
		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
		break;
4704 4705
	case OpMem8:
		ctxt->memop.bytes = 1;
4706
		if (ctxt->memop.type == OP_REG) {
4707 4708
			ctxt->memop.addr.reg = decode_register(ctxt,
					ctxt->modrm_rm, true);
4709 4710
			fetch_register_operand(&ctxt->memop);
		}
4711
		goto mem_common;
4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727
	case OpMem16:
		ctxt->memop.bytes = 2;
		goto mem_common;
	case OpMem32:
		ctxt->memop.bytes = 4;
		goto mem_common;
	case OpImmU16:
		rc = decode_imm(ctxt, op, 2, false);
		break;
	case OpImmU:
		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
		break;
	case OpSI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4728
			register_address(ctxt, VCPU_REGS_RSI);
B
Bandan Das 已提交
4729
		op->addr.mem.seg = ctxt->seg_override;
4730
		op->val = 0;
4731
		op->count = 1;
4732
		break;
P
Paolo Bonzini 已提交
4733 4734 4735 4736
	case OpXLat:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4737
			address_mask(ctxt,
P
Paolo Bonzini 已提交
4738 4739
				reg_read(ctxt, VCPU_REGS_RBX) +
				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
B
Bandan Das 已提交
4740
		op->addr.mem.seg = ctxt->seg_override;
P
Paolo Bonzini 已提交
4741 4742
		op->val = 0;
		break;
4743 4744 4745 4746 4747 4748 4749 4750 4751
	case OpImmFAddr:
		op->type = OP_IMM;
		op->addr.mem.ea = ctxt->_eip;
		op->bytes = ctxt->op_bytes + 2;
		insn_fetch_arr(op->valptr, op->bytes, ctxt);
		break;
	case OpMemFAddr:
		ctxt->memop.bytes = ctxt->op_bytes + 2;
		goto mem_common;
4752
	case OpES:
4753
		op->type = OP_IMM;
4754 4755 4756
		op->val = VCPU_SREG_ES;
		break;
	case OpCS:
4757
		op->type = OP_IMM;
4758 4759 4760
		op->val = VCPU_SREG_CS;
		break;
	case OpSS:
4761
		op->type = OP_IMM;
4762 4763 4764
		op->val = VCPU_SREG_SS;
		break;
	case OpDS:
4765
		op->type = OP_IMM;
4766 4767 4768
		op->val = VCPU_SREG_DS;
		break;
	case OpFS:
4769
		op->type = OP_IMM;
4770 4771 4772
		op->val = VCPU_SREG_FS;
		break;
	case OpGS:
4773
		op->type = OP_IMM;
4774 4775
		op->val = VCPU_SREG_GS;
		break;
4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786
	case OpImplicit:
		/* Special instructions do their own operand decoding. */
	default:
		op->type = OP_NONE; /* Disable writeback. */
		break;
	}

done:
	return rc;
}

4787
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4788 4789 4790
{
	int rc = X86EMUL_CONTINUE;
	int mode = ctxt->mode;
4791
	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4792
	bool op_prefix = false;
B
Bandan Das 已提交
4793
	bool has_seg_override = false;
4794
	struct opcode opcode;
4795

4796 4797
	ctxt->memop.type = OP_NONE;
	ctxt->memopp = NULL;
4798
	ctxt->_eip = ctxt->eip;
4799 4800
	ctxt->fetch.ptr = ctxt->fetch.data;
	ctxt->fetch.end = ctxt->fetch.data + insn_len;
B
Borislav Petkov 已提交
4801
	ctxt->opcode_len = 1;
4802
	if (insn_len > 0)
4803
		memcpy(ctxt->fetch.data, insn, insn_len);
4804
	else {
4805
		rc = __do_insn_fetch_bytes(ctxt, 1);
4806 4807 4808
		if (rc != X86EMUL_CONTINUE)
			return rc;
	}
4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825

	switch (mode) {
	case X86EMUL_MODE_REAL:
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
		def_op_bytes = def_ad_bytes = 2;
		break;
	case X86EMUL_MODE_PROT32:
		def_op_bytes = def_ad_bytes = 4;
		break;
#ifdef CONFIG_X86_64
	case X86EMUL_MODE_PROT64:
		def_op_bytes = 4;
		def_ad_bytes = 8;
		break;
#endif
	default:
4826
		return EMULATION_FAILED;
4827 4828
	}

4829 4830
	ctxt->op_bytes = def_op_bytes;
	ctxt->ad_bytes = def_ad_bytes;
4831 4832 4833

	/* Legacy prefixes. */
	for (;;) {
4834
		switch (ctxt->b = insn_fetch(u8, ctxt)) {
4835
		case 0x66:	/* operand-size override */
4836
			op_prefix = true;
4837
			/* switch between 2/4 bytes */
4838
			ctxt->op_bytes = def_op_bytes ^ 6;
4839 4840 4841 4842
			break;
		case 0x67:	/* address-size override */
			if (mode == X86EMUL_MODE_PROT64)
				/* switch between 4/8 bytes */
4843
				ctxt->ad_bytes = def_ad_bytes ^ 12;
4844 4845
			else
				/* switch between 2/4 bytes */
4846
				ctxt->ad_bytes = def_ad_bytes ^ 6;
4847 4848 4849 4850 4851
			break;
		case 0x26:	/* ES override */
		case 0x2e:	/* CS override */
		case 0x36:	/* SS override */
		case 0x3e:	/* DS override */
B
Bandan Das 已提交
4852 4853
			has_seg_override = true;
			ctxt->seg_override = (ctxt->b >> 3) & 3;
4854 4855 4856
			break;
		case 0x64:	/* FS override */
		case 0x65:	/* GS override */
B
Bandan Das 已提交
4857 4858
			has_seg_override = true;
			ctxt->seg_override = ctxt->b & 7;
4859 4860 4861 4862
			break;
		case 0x40 ... 0x4f: /* REX */
			if (mode != X86EMUL_MODE_PROT64)
				goto done_prefixes;
4863
			ctxt->rex_prefix = ctxt->b;
4864 4865
			continue;
		case 0xf0:	/* LOCK */
4866
			ctxt->lock_prefix = 1;
4867 4868 4869
			break;
		case 0xf2:	/* REPNE/REPNZ */
		case 0xf3:	/* REP/REPE/REPZ */
4870
			ctxt->rep_prefix = ctxt->b;
4871 4872 4873 4874 4875 4876 4877
			break;
		default:
			goto done_prefixes;
		}

		/* Any legacy prefix after a REX prefix nullifies its effect. */

4878
		ctxt->rex_prefix = 0;
4879 4880 4881 4882 4883
	}

done_prefixes:

	/* REX prefix. */
4884 4885
	if (ctxt->rex_prefix & 8)
		ctxt->op_bytes = 8;	/* REX.W */
4886 4887

	/* Opcode byte(s). */
4888
	opcode = opcode_table[ctxt->b];
4889
	/* Two-byte opcode? */
4890
	if (ctxt->b == 0x0f) {
B
Borislav Petkov 已提交
4891
		ctxt->opcode_len = 2;
4892
		ctxt->b = insn_fetch(u8, ctxt);
4893
		opcode = twobyte_table[ctxt->b];
4894 4895 4896 4897 4898 4899 4900

		/* 0F_38 opcode map */
		if (ctxt->b == 0x38) {
			ctxt->opcode_len = 3;
			ctxt->b = insn_fetch(u8, ctxt);
			opcode = opcode_map_0f_38[ctxt->b];
		}
4901
	}
4902
	ctxt->d = opcode.flags;
4903

4904 4905 4906
	if (ctxt->d & ModRM)
		ctxt->modrm = insn_fetch(u8, ctxt);

4907 4908
	/* vex-prefix instructions are not implemented */
	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4909
	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4910 4911 4912
		ctxt->d = NotImpl;
	}

4913 4914
	while (ctxt->d & GroupMask) {
		switch (ctxt->d & GroupMask) {
4915
		case Group:
4916
			goffset = (ctxt->modrm >> 3) & 7;
4917 4918 4919
			opcode = opcode.u.group[goffset];
			break;
		case GroupDual:
4920 4921
			goffset = (ctxt->modrm >> 3) & 7;
			if ((ctxt->modrm >> 6) == 3)
4922 4923 4924 4925 4926
				opcode = opcode.u.gdual->mod3[goffset];
			else
				opcode = opcode.u.gdual->mod012[goffset];
			break;
		case RMExt:
4927
			goffset = ctxt->modrm & 7;
4928
			opcode = opcode.u.group[goffset];
4929 4930
			break;
		case Prefix:
4931
			if (ctxt->rep_prefix && op_prefix)
4932
				return EMULATION_FAILED;
4933
			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4934 4935 4936 4937 4938 4939 4940
			switch (simd_prefix) {
			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
			}
			break;
4941 4942 4943 4944 4945 4946
		case Escape:
			if (ctxt->modrm > 0xbf)
				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
			else
				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
			break;
4947 4948 4949 4950 4951 4952
		case InstrDual:
			if ((ctxt->modrm >> 6) == 3)
				opcode = opcode.u.idual->mod3;
			else
				opcode = opcode.u.idual->mod012;
			break;
4953 4954 4955 4956 4957 4958
		case ModeDual:
			if (ctxt->mode == X86EMUL_MODE_PROT64)
				opcode = opcode.u.mdual->mode64;
			else
				opcode = opcode.u.mdual->mode32;
			break;
4959
		default:
4960
			return EMULATION_FAILED;
4961
		}
4962

4963
		ctxt->d &= ~(u64)GroupMask;
4964
		ctxt->d |= opcode.flags;
4965 4966
	}

4967 4968 4969 4970
	/* Unrecognised? */
	if (ctxt->d == 0)
		return EMULATION_FAILED;

4971
	ctxt->execute = opcode.u.execute;
4972

4973 4974 4975
	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
		return EMULATION_FAILED;

4976
	if (unlikely(ctxt->d &
4977 4978
	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
	     No16))) {
4979 4980 4981 4982 4983 4984
		/*
		 * These are copied unconditionally here, and checked unconditionally
		 * in x86_emulate_insn.
		 */
		ctxt->check_perm = opcode.check_perm;
		ctxt->intercept = opcode.intercept;
4985

4986 4987
		if (ctxt->d & NotImpl)
			return EMULATION_FAILED;
4988

4989 4990 4991 4992 4993 4994
		if (mode == X86EMUL_MODE_PROT64) {
			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
				ctxt->op_bytes = 8;
			else if (ctxt->d & NearBranch)
				ctxt->op_bytes = 8;
		}
4995

4996 4997 4998 4999 5000 5001 5002
		if (ctxt->d & Op3264) {
			if (mode == X86EMUL_MODE_PROT64)
				ctxt->op_bytes = 8;
			else
				ctxt->op_bytes = 4;
		}

5003 5004 5005
		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
			ctxt->op_bytes = 4;

5006 5007 5008 5009 5010
		if (ctxt->d & Sse)
			ctxt->op_bytes = 16;
		else if (ctxt->d & Mmx)
			ctxt->op_bytes = 8;
	}
A
Avi Kivity 已提交
5011

5012
	/* ModRM and SIB bytes. */
5013
	if (ctxt->d & ModRM) {
5014
		rc = decode_modrm(ctxt, &ctxt->memop);
B
Bandan Das 已提交
5015 5016 5017 5018
		if (!has_seg_override) {
			has_seg_override = true;
			ctxt->seg_override = ctxt->modrm_seg;
		}
5019
	} else if (ctxt->d & MemAbs)
5020
		rc = decode_abs(ctxt, &ctxt->memop);
5021 5022 5023
	if (rc != X86EMUL_CONTINUE)
		goto done;

B
Bandan Das 已提交
5024 5025
	if (!has_seg_override)
		ctxt->seg_override = VCPU_SREG_DS;
5026

B
Bandan Das 已提交
5027
	ctxt->memop.addr.mem.seg = ctxt->seg_override;
5028 5029 5030 5031 5032

	/*
	 * Decode and fetch the source operand: register, memory
	 * or immediate.
	 */
5033
	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5034 5035 5036
	if (rc != X86EMUL_CONTINUE)
		goto done;

5037 5038 5039 5040
	/*
	 * Decode and fetch the second source operand: register, memory
	 * or immediate.
	 */
5041
	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5042 5043 5044
	if (rc != X86EMUL_CONTINUE)
		goto done;

5045
	/* Decode and fetch the destination operand: register or memory. */
5046
	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5047

5048
	if (ctxt->rip_relative && likely(ctxt->memopp))
5049 5050
		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
					ctxt->memopp->addr.mem.ea + ctxt->_eip);
5051

5052
done:
5053
	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5054 5055
}

5056 5057 5058 5059 5060
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
{
	return ctxt->d & PageTable;
}

5061 5062 5063 5064 5065 5066 5067 5068 5069
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
	/* The second termination condition only applies for REPE
	 * and REPNE. Test if the repeat string operation prefix is
	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
	 * corresponding termination condition according to:
	 * 	- if REPE/REPZ and ZF = 0 then done
	 * 	- if REPNE/REPNZ and ZF = 1 then done
	 */
5070 5071 5072
	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
5073
		 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5074
		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
5075
		    ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5076 5077 5078 5079 5080
		return true;

	return false;
}

A
Avi Kivity 已提交
5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
	bool fault = false;

	ctxt->ops->get_fpu(ctxt);
	asm volatile("1: fwait \n\t"
		     "2: \n\t"
		     ".pushsection .fixup,\"ax\" \n\t"
		     "3: \n\t"
		     "movb $1, %[fault] \n\t"
		     "jmp 2b \n\t"
		     ".popsection \n\t"
		     _ASM_EXTABLE(1b, 3b)
5094
		     : [fault]"+qm"(fault));
A
Avi Kivity 已提交
5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109
	ctxt->ops->put_fpu(ctxt);

	if (unlikely(fault))
		return emulate_exception(ctxt, MF_VECTOR, 0, false);

	return X86EMUL_CONTINUE;
}

static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
				       struct operand *op)
{
	if (op->type == OP_MM)
		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
}

5110 5111
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{
5112
	register void *__sp asm(_ASM_SP);
5113
	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5114

5115 5116
	if (!(ctxt->d & ByteOp))
		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5117

5118
	asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
5119
	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5120
	      [fastop]"+S"(fop), "+r"(__sp)
5121
	    : "c"(ctxt->src2.val));
5122

5123
	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5124 5125
	if (!fop) /* exception is returned in fop variable */
		return emulate_de(ctxt);
5126 5127
	return X86EMUL_CONTINUE;
}
5128

5129 5130
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
B
Bandan Das 已提交
5131 5132
	memset(&ctxt->rip_relative, 0,
	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5133 5134 5135 5136 5137 5138

	ctxt->io_read.pos = 0;
	ctxt->io_read.end = 0;
	ctxt->mem_read.end = 0;
}

5139
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5140
{
5141
	const struct x86_emulate_ops *ops = ctxt->ops;
5142
	int rc = X86EMUL_CONTINUE;
5143
	int saved_dst_type = ctxt->dst.type;
5144

5145
	ctxt->mem_read.pos = 0;
5146

5147 5148
	/* LOCK prefix is allowed only with some instructions */
	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5149
		rc = emulate_ud(ctxt);
5150 5151 5152
		goto done;
	}

5153
	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5154
		rc = emulate_ud(ctxt);
5155 5156 5157
		goto done;
	}

5158 5159 5160 5161 5162 5163 5164
	if (unlikely(ctxt->d &
		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
				(ctxt->d & Undefined)) {
			rc = emulate_ud(ctxt);
			goto done;
		}
A
Avi Kivity 已提交
5165

5166 5167 5168
		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
			rc = emulate_ud(ctxt);
A
Avi Kivity 已提交
5169
			goto done;
5170
		}
A
Avi Kivity 已提交
5171

5172 5173
		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
			rc = emulate_nm(ctxt);
5174
			goto done;
5175
		}
5176

5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189
		if (ctxt->d & Mmx) {
			rc = flush_pending_x87_faults(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			/*
			 * Now that we know the fpu is exception safe, we can fetch
			 * operands from it.
			 */
			fetch_possible_mmx_operand(ctxt, &ctxt->src);
			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
			if (!(ctxt->d & Mov))
				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
		}
5190

5191
		if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5192 5193 5194 5195 5196
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_PRE_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}
5197

5198 5199 5200 5201 5202 5203
		/* Instruction can only be executed in protected mode */
		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
			rc = emulate_ud(ctxt);
			goto done;
		}

5204 5205
		/* Privileged instruction can be executed only in CPL=0 */
		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5206 5207 5208 5209
			if (ctxt->d & PrivUD)
				rc = emulate_ud(ctxt);
			else
				rc = emulate_gp(ctxt, 0);
5210
			goto done;
5211
		}
5212

5213
		/* Do instruction specific permission checks */
5214
		if (ctxt->d & CheckPerm) {
5215 5216 5217 5218 5219
			rc = ctxt->check_perm(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

5220
		if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5221 5222 5223 5224 5225 5226 5227 5228 5229
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_POST_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

		if (ctxt->rep_prefix && (ctxt->d & String)) {
			/* All REP prefixes have the same first termination condition */
			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5230
				string_registers_quirk(ctxt);
5231
				ctxt->eip = ctxt->_eip;
5232
				ctxt->eflags &= ~X86_EFLAGS_RF;
5233 5234
				goto done;
			}
5235 5236 5237
		}
	}

5238 5239 5240
	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
		rc = segmented_read(ctxt, ctxt->src.addr.mem,
				    ctxt->src.valptr, ctxt->src.bytes);
5241
		if (rc != X86EMUL_CONTINUE)
5242
			goto done;
5243
		ctxt->src.orig_val64 = ctxt->src.val64;
5244 5245
	}

5246 5247 5248
	if (ctxt->src2.type == OP_MEM) {
		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
				    &ctxt->src2.val, ctxt->src2.bytes);
5249 5250 5251 5252
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

5253
	if ((ctxt->d & DstMask) == ImplicitOps)
5254 5255 5256
		goto special_insn;


5257
	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5258
		/* optimisation - avoid slow emulated read if Mov */
5259 5260
		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
				   &ctxt->dst.val, ctxt->dst.bytes);
5261
		if (rc != X86EMUL_CONTINUE) {
5262 5263
			if (!(ctxt->d & NoWrite) &&
			    rc == X86EMUL_PROPAGATE_FAULT &&
5264 5265
			    ctxt->exception.vector == PF_VECTOR)
				ctxt->exception.error_code |= PFERR_WRITE_MASK;
5266
			goto done;
5267
		}
5268
	}
5269 5270
	/* Copy full 64-bit value for CMPXCHG8B.  */
	ctxt->dst.orig_val64 = ctxt->dst.val64;
5271

5272 5273
special_insn:

5274
	if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5275
		rc = emulator_check_intercept(ctxt, ctxt->intercept,
5276
					      X86_ICPT_POST_MEMACCESS);
5277 5278 5279 5280
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

5281
	if (ctxt->rep_prefix && (ctxt->d & String))
5282
		ctxt->eflags |= X86_EFLAGS_RF;
5283
	else
5284
		ctxt->eflags &= ~X86_EFLAGS_RF;
5285

5286
	if (ctxt->execute) {
5287 5288 5289 5290 5291 5292 5293
		if (ctxt->d & Fastop) {
			void (*fop)(struct fastop *) = (void *)ctxt->execute;
			rc = fastop(ctxt, fop);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			goto writeback;
		}
5294
		rc = ctxt->execute(ctxt);
5295 5296 5297 5298 5299
		if (rc != X86EMUL_CONTINUE)
			goto done;
		goto writeback;
	}

B
Borislav Petkov 已提交
5300
	if (ctxt->opcode_len == 2)
A
Avi Kivity 已提交
5301
		goto twobyte_insn;
5302 5303
	else if (ctxt->opcode_len == 3)
		goto threebyte_insn;
A
Avi Kivity 已提交
5304

5305
	switch (ctxt->b) {
5306
	case 0x70 ... 0x7f: /* jcc (short) */
5307
		if (test_cc(ctxt->b, ctxt->eflags))
5308
			rc = jmp_rel(ctxt, ctxt->src.val);
5309
		break;
N
Nitin A Kamble 已提交
5310
	case 0x8d: /* lea r16/r32, m */
5311
		ctxt->dst.val = ctxt->src.addr.mem.ea;
N
Nitin A Kamble 已提交
5312
		break;
5313
	case 0x90 ... 0x97: /* nop / xchg reg, rax */
5314
		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5315 5316 5317
			ctxt->dst.type = OP_NONE;
		else
			rc = em_xchg(ctxt);
5318
		break;
5319
	case 0x98: /* cbw/cwde/cdqe */
5320 5321 5322 5323
		switch (ctxt->op_bytes) {
		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5324 5325
		}
		break;
5326
	case 0xcc:		/* int3 */
5327 5328
		rc = emulate_int(ctxt, 3);
		break;
5329
	case 0xcd:		/* int n */
5330
		rc = emulate_int(ctxt, ctxt->src.val);
5331 5332
		break;
	case 0xce:		/* into */
5333
		if (ctxt->eflags & X86_EFLAGS_OF)
5334
			rc = emulate_int(ctxt, 4);
5335
		break;
5336
	case 0xe9: /* jmp rel */
5337
	case 0xeb: /* jmp rel short */
5338
		rc = jmp_rel(ctxt, ctxt->src.val);
5339
		ctxt->dst.type = OP_NONE; /* Disable writeback. */
5340
		break;
5341
	case 0xf4:              /* hlt */
5342
		ctxt->ops->halt(ctxt);
5343
		break;
5344 5345
	case 0xf5:	/* cmc */
		/* complement carry flag from eflags reg */
5346
		ctxt->eflags ^= X86_EFLAGS_CF;
5347 5348
		break;
	case 0xf8: /* clc */
5349
		ctxt->eflags &= ~X86_EFLAGS_CF;
5350
		break;
5351
	case 0xf9: /* stc */
5352
		ctxt->eflags |= X86_EFLAGS_CF;
5353
		break;
5354
	case 0xfc: /* cld */
5355
		ctxt->eflags &= ~X86_EFLAGS_DF;
5356 5357
		break;
	case 0xfd: /* std */
5358
		ctxt->eflags |= X86_EFLAGS_DF;
5359
		break;
5360 5361
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
5362
	}
5363

5364 5365 5366
	if (rc != X86EMUL_CONTINUE)
		goto done;

5367
writeback:
5368 5369 5370 5371 5372 5373
	if (ctxt->d & SrcWrite) {
		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
		rc = writeback(ctxt, &ctxt->src);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
5374 5375 5376 5377 5378
	if (!(ctxt->d & NoWrite)) {
		rc = writeback(ctxt, &ctxt->dst);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
5379

5380 5381 5382 5383
	/*
	 * restore dst type in case the decoding will be reused
	 * (happens for string instruction )
	 */
5384
	ctxt->dst.type = saved_dst_type;
5385

5386
	if ((ctxt->d & SrcMask) == SrcSI)
5387
		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5388

5389
	if ((ctxt->d & DstMask) == DstDI)
5390
		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5391

5392
	if (ctxt->rep_prefix && (ctxt->d & String)) {
5393
		unsigned int count;
5394
		struct read_cache *r = &ctxt->io_read;
5395 5396 5397 5398
		if ((ctxt->d & SrcMask) == SrcSI)
			count = ctxt->src.count;
		else
			count = ctxt->dst.count;
5399
		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5400

5401 5402 5403 5404 5405
		if (!string_insn_completed(ctxt)) {
			/*
			 * Re-enter guest when pio read ahead buffer is empty
			 * or, if it is not used, after each 1024 iteration.
			 */
5406
			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5407 5408 5409 5410 5411 5412
			    (r->end == 0 || r->end != r->pos)) {
				/*
				 * Reset read cache. Usually happens before
				 * decode, but since instruction is restarted
				 * we have to do it here.
				 */
5413
				ctxt->mem_read.end = 0;
5414
				writeback_registers(ctxt);
5415 5416 5417
				return EMULATION_RESTART;
			}
			goto done; /* skip rip writeback */
5418
		}
5419
		ctxt->eflags &= ~X86_EFLAGS_RF;
5420
	}
5421

5422
	ctxt->eip = ctxt->_eip;
5423 5424

done:
5425 5426
	if (rc == X86EMUL_PROPAGATE_FAULT) {
		WARN_ON(ctxt->exception.vector > 0x1f);
5427
		ctxt->have_exception = true;
5428
	}
5429 5430 5431
	if (rc == X86EMUL_INTERCEPTED)
		return EMULATION_INTERCEPTED;

5432 5433 5434
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);

5435
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
A
Avi Kivity 已提交
5436 5437

twobyte_insn:
5438
	switch (ctxt->b) {
5439
	case 0x09:		/* wbinvd */
5440
		(ctxt->ops->wbinvd)(ctxt);
5441 5442
		break;
	case 0x08:		/* invd */
5443 5444
	case 0x0d:		/* GrpP (prefetch) */
	case 0x18:		/* Grp16 (prefetch/nop) */
P
Paolo Bonzini 已提交
5445
	case 0x1f:		/* nop */
5446 5447
		break;
	case 0x20: /* mov cr, reg */
5448
		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5449
		break;
A
Avi Kivity 已提交
5450
	case 0x21: /* mov from dr to reg */
5451
		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
A
Avi Kivity 已提交
5452 5453
		break;
	case 0x40 ... 0x4f:	/* cmov */
5454 5455
		if (test_cc(ctxt->b, ctxt->eflags))
			ctxt->dst.val = ctxt->src.val;
5456
		else if (ctxt->op_bytes != 4)
5457
			ctxt->dst.type = OP_NONE; /* no writeback */
A
Avi Kivity 已提交
5458
		break;
5459
	case 0x80 ... 0x8f: /* jnz rel, etc*/
5460
		if (test_cc(ctxt->b, ctxt->eflags))
5461
			rc = jmp_rel(ctxt, ctxt->src.val);
5462
		break;
5463
	case 0x90 ... 0x9f:     /* setcc r/m8 */
5464
		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5465
		break;
A
Avi Kivity 已提交
5466
	case 0xb6 ... 0xb7:	/* movzx */
5467
		ctxt->dst.bytes = ctxt->op_bytes;
5468
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5469
						       : (u16) ctxt->src.val;
A
Avi Kivity 已提交
5470 5471
		break;
	case 0xbe ... 0xbf:	/* movsx */
5472
		ctxt->dst.bytes = ctxt->op_bytes;
5473
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5474
							(s16) ctxt->src.val;
A
Avi Kivity 已提交
5475
		break;
5476 5477
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
5478
	}
5479

5480 5481
threebyte_insn:

5482 5483 5484
	if (rc != X86EMUL_CONTINUE)
		goto done;

A
Avi Kivity 已提交
5485 5486 5487
	goto writeback;

cannot_emulate:
5488
	return EMULATION_FAILED;
A
Avi Kivity 已提交
5489
}
5490 5491 5492 5493 5494 5495 5496 5497 5498 5499

void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
{
	invalidate_registers(ctxt);
}

void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
{
	writeback_registers(ctxt);
}