emulate.c 141.6 KB
Newer Older
A
Avi Kivity 已提交
1
/******************************************************************************
2
 * emulate.c
A
Avi Kivity 已提交
3 4 5 6 7 8
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9
 * privileged instructions:
A
Avi Kivity 已提交
10 11
 *
 * Copyright (C) 2006 Qumranet
N
Nicolas Kaiser 已提交
12
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
13 14 15 16 17 18 19 20 21 22
 *
 *   Avi Kivity <avi@qumranet.com>
 *   Yaniv Kamay <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

23
#include <linux/kvm_host.h>
24
#include "kvm_cache_regs.h"
25
#include <asm/kvm_emulate.h>
26
#include <linux/stringify.h>
27
#include <asm/debugreg.h>
A
Avi Kivity 已提交
28

29
#include "x86.h"
30
#include "tss.h"
31

32 33 34
/*
 * Operand types
 */
35 36 37 38 39 40 41 42 43
#define OpNone             0ull
#define OpImplicit         1ull  /* No generic decode */
#define OpReg              2ull  /* Register */
#define OpMem              3ull  /* Memory */
#define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
#define OpDI               5ull  /* ES:DI/EDI/RDI */
#define OpMem64            6ull  /* Memory, 64-bit */
#define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
#define OpDX               8ull  /* DX register */
44 45 46
#define OpCL               9ull  /* CL register (for shifts) */
#define OpImmByte         10ull  /* 8-bit sign extended immediate */
#define OpOne             11ull  /* Implied 1 */
47
#define OpImm             12ull  /* Sign extended up to 32-bit immediate */
48 49 50 51 52 53 54
#define OpMem16           13ull  /* Memory operand (16-bit). */
#define OpMem32           14ull  /* Memory operand (32-bit). */
#define OpImmU            15ull  /* Immediate operand, zero extended */
#define OpSI              16ull  /* SI/ESI/RSI */
#define OpImmFAddr        17ull  /* Immediate far address */
#define OpMemFAddr        18ull  /* Far address in memory */
#define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
55 56 57 58 59 60
#define OpES              20ull  /* ES */
#define OpCS              21ull  /* CS */
#define OpSS              22ull  /* SS */
#define OpDS              23ull  /* DS */
#define OpFS              24ull  /* FS */
#define OpGS              25ull  /* GS */
61
#define OpMem8            26ull  /* 8-bit zero extended memory operand */
62
#define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
P
Paolo Bonzini 已提交
63
#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
64 65
#define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
#define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
66 67

#define OpBits             5  /* Width of operand field */
68
#define OpMask             ((1ull << OpBits) - 1)
69

A
Avi Kivity 已提交
70 71 72 73 74 75 76 77 78 79
/*
 * Opcode effective-address decode tables.
 * Note that we only emulate instructions that have at least one memory
 * operand (excluding implicit stack references). We assume that stack
 * references and instruction fetches will never occur in special memory
 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
 * not be handled.
 */

/* Operand sizes: 8-bit operands or specified/overridden size. */
80
#define ByteOp      (1<<0)	/* 8-bit operands. */
A
Avi Kivity 已提交
81
/* Destination operand type. */
82 83 84 85 86 87 88
#define DstShift    1
#define ImplicitOps (OpImplicit << DstShift)
#define DstReg      (OpReg << DstShift)
#define DstMem      (OpMem << DstShift)
#define DstAcc      (OpAcc << DstShift)
#define DstDI       (OpDI << DstShift)
#define DstMem64    (OpMem64 << DstShift)
89
#define DstMem16    (OpMem16 << DstShift)
90 91
#define DstImmUByte (OpImmUByte << DstShift)
#define DstDX       (OpDX << DstShift)
92
#define DstAccLo    (OpAccLo << DstShift)
93
#define DstMask     (OpMask << DstShift)
A
Avi Kivity 已提交
94
/* Source operand type. */
95 96 97 98 99 100 101 102 103 104 105 106
#define SrcShift    6
#define SrcNone     (OpNone << SrcShift)
#define SrcReg      (OpReg << SrcShift)
#define SrcMem      (OpMem << SrcShift)
#define SrcMem16    (OpMem16 << SrcShift)
#define SrcMem32    (OpMem32 << SrcShift)
#define SrcImm      (OpImm << SrcShift)
#define SrcImmByte  (OpImmByte << SrcShift)
#define SrcOne      (OpOne << SrcShift)
#define SrcImmUByte (OpImmUByte << SrcShift)
#define SrcImmU     (OpImmU << SrcShift)
#define SrcSI       (OpSI << SrcShift)
P
Paolo Bonzini 已提交
107
#define SrcXLat     (OpXLat << SrcShift)
108 109 110 111
#define SrcImmFAddr (OpImmFAddr << SrcShift)
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc      (OpAcc << SrcShift)
#define SrcImmU16   (OpImmU16 << SrcShift)
112
#define SrcImm64    (OpImm64 << SrcShift)
113
#define SrcDX       (OpDX << SrcShift)
114
#define SrcMem8     (OpMem8 << SrcShift)
115
#define SrcAccHi    (OpAccHi << SrcShift)
116
#define SrcMask     (OpMask << SrcShift)
117 118 119 120 121 122 123 124 125
#define BitOp       (1<<11)
#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
#define String      (1<<13)     /* String instruction (rep capable) */
#define Stack       (1<<14)     /* Stack instruction (push/pop) */
#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
126
#define Escape      (5<<15)     /* Escape to coprocessor instruction */
127
#define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
128
#define ModeDual    (7<<15)     /* Different instruction for 32/64 bit */
129
#define Sse         (1<<18)     /* SSE Vector instruction */
130 131 132 133
/* Generic ModRM decode. */
#define ModRM       (1<<19)
/* Destination is only written; never read. */
#define Mov         (1<<20)
134
/* Misc flags */
135
#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
136
#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
137
#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
138
#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
139
#define Undefined   (1<<25) /* No Such Instruction */
140
#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
141
#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
142
#define No64	    (1<<28)
143
#define PageTable   (1 << 29)   /* instruction used to write page table */
144
#define NotImpl     (1 << 30)   /* instruction is not implemented */
145
/* Source 2 operand type */
146
#define Src2Shift   (31)
147
#define Src2None    (OpNone << Src2Shift)
148
#define Src2Mem     (OpMem << Src2Shift)
149 150 151 152
#define Src2CL      (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
#define Src2One     (OpOne << Src2Shift)
#define Src2Imm     (OpImm << Src2Shift)
153 154 155 156 157 158
#define Src2ES      (OpES << Src2Shift)
#define Src2CS      (OpCS << Src2Shift)
#define Src2SS      (OpSS << Src2Shift)
#define Src2DS      (OpDS << Src2Shift)
#define Src2FS      (OpFS << Src2Shift)
#define Src2GS      (OpGS << Src2Shift)
159
#define Src2Mask    (OpMask << Src2Shift)
A
Avi Kivity 已提交
160
#define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
161 162 163
#define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
#define Unaligned   ((u64)1 << 42)  /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx         ((u64)1 << 43)  /* Advanced Vector Extensions */
164
#define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
165
#define NoWrite     ((u64)1 << 45)  /* No writeback */
166
#define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
167
#define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
168 169
#define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
#define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
170
#define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
171
#define NearBranch  ((u64)1 << 52)  /* Near branches */
172
#define No16	    ((u64)1 << 53)  /* No 16 bit operand */
173
#define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
A
Avi Kivity 已提交
174

175
#define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
A
Avi Kivity 已提交
176

177 178 179 180 181 182 183 184
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
185

186 187 188 189 190 191
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
#define FASTOP_SIZE 8

/*
 * fastop functions have a special calling convention:
 *
192 193
 * dst:    rax        (in/out)
 * src:    rdx        (in/out)
194 195
 * src2:   rcx        (in)
 * flags:  rflags     (in/out)
196
 * ex:     rsi        (in:fastop pointer, out:zero if exception)
197 198 199 200 201 202 203 204 205 206 207
 *
 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
 * different operand sizes can be reached by calculation, rather than a jump
 * table (which would be bigger than the code).
 *
 * fastop functions are declared as taking a never-defined fastop parameter,
 * so they can't be called from C directly.
 */

struct fastop;

208
struct opcode {
209 210
	u64 flags : 56;
	u64 intercept : 8;
211
	union {
212
		int (*execute)(struct x86_emulate_ctxt *ctxt);
213 214 215
		const struct opcode *group;
		const struct group_dual *gdual;
		const struct gprefix *gprefix;
216
		const struct escape *esc;
217
		const struct instr_dual *idual;
218
		const struct mode_dual *mdual;
219
		void (*fastop)(struct fastop *fake);
220
	} u;
221
	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
222 223 224 225 226
};

struct group_dual {
	struct opcode mod012[8];
	struct opcode mod3[8];
227 228
};

229 230 231 232 233 234 235
struct gprefix {
	struct opcode pfx_no;
	struct opcode pfx_66;
	struct opcode pfx_f2;
	struct opcode pfx_f3;
};

236 237 238 239 240
struct escape {
	struct opcode op[8];
	struct opcode high[64];
};

241 242 243 244 245
struct instr_dual {
	struct opcode mod012;
	struct opcode mod3;
};

246 247 248 249 250
struct mode_dual {
	struct opcode mode32;
	struct opcode mode64;
};

251 252
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a

253 254 255 256 257 258 259
enum x86_transfer_type {
	X86_TRANSFER_NONE,
	X86_TRANSFER_CALL_JMP,
	X86_TRANSFER_RET,
	X86_TRANSFER_TASK_SWITCH,
};

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	if (!(ctxt->regs_valid & (1 << nr))) {
		ctxt->regs_valid |= 1 << nr;
		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
	}
	return ctxt->_regs[nr];
}

static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	ctxt->regs_valid |= 1 << nr;
	ctxt->regs_dirty |= 1 << nr;
	return &ctxt->_regs[nr];
}

static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	reg_read(ctxt, nr);
	return reg_write(ctxt, nr);
}

static void writeback_registers(struct x86_emulate_ctxt *ctxt)
{
	unsigned reg;

	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
}

static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
{
	ctxt->regs_dirty = 0;
	ctxt->regs_valid = 0;
}

A
Avi Kivity 已提交
296 297 298 299
/*
 * These EFLAGS bits are restored from saved value during emulation, and
 * any changes are written back to the saved value after emulation.
 */
300 301
#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
		     X86_EFLAGS_PF|X86_EFLAGS_CF)
A
Avi Kivity 已提交
302

303 304 305 306 307 308
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif

309 310
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));

311 312 313 314 315
#define FOP_FUNC(name) \
	".align " __stringify(FASTOP_SIZE) " \n\t" \
	".type " name ", @function \n\t" \
	name ":\n\t"

316 317 318 319 320 321
#define FOP_RET   "ret \n\t"

#define FOP_START(op) \
	extern void em_##op(struct fastop *fake); \
	asm(".pushsection .text, \"ax\" \n\t" \
	    ".global em_" #op " \n\t" \
322
	    FOP_FUNC("em_" #op)
323 324 325 326

#define FOP_END \
	    ".popsection")

327 328 329
#define FOPNOP() \
	FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
	FOP_RET
330

331
#define FOP1E(op,  dst) \
332 333
	FOP_FUNC(#op "_" #dst) \
	"10: " #op " %" #dst " \n\t" FOP_RET
334 335 336

#define FOP1EEX(op,  dst) \
	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
337 338 339 340 341 342 343 344 345

#define FASTOP1(op) \
	FOP_START(op) \
	FOP1E(op##b, al) \
	FOP1E(op##w, ax) \
	FOP1E(op##l, eax) \
	ON64(FOP1E(op##q, rax))	\
	FOP_END

346 347 348 349 350 351 352 353 354
/* 1-operand, using src2 (for MUL/DIV r/m) */
#define FASTOP1SRC2(op, name) \
	FOP_START(name) \
	FOP1E(op, cl) \
	FOP1E(op, cx) \
	FOP1E(op, ecx) \
	ON64(FOP1E(op, rcx)) \
	FOP_END

355 356 357 358 359 360 361 362 363
/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
#define FASTOP1SRC2EX(op, name) \
	FOP_START(name) \
	FOP1EEX(op, cl) \
	FOP1EEX(op, cx) \
	FOP1EEX(op, ecx) \
	ON64(FOP1EEX(op, rcx)) \
	FOP_END

364
#define FOP2E(op,  dst, src)	   \
365 366
	FOP_FUNC(#op "_" #dst "_" #src) \
	#op " %" #src ", %" #dst " \n\t" FOP_RET
367 368 369

#define FASTOP2(op) \
	FOP_START(op) \
370 371 372 373
	FOP2E(op##b, al, dl) \
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
374 375
	FOP_END

376 377 378 379
/* 2 operand, word only */
#define FASTOP2W(op) \
	FOP_START(op) \
	FOPNOP() \
380 381 382
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
383 384
	FOP_END

385 386 387 388 389 390 391 392 393
/* 2 operand, src is CL */
#define FASTOP2CL(op) \
	FOP_START(op) \
	FOP2E(op##b, al, cl) \
	FOP2E(op##w, ax, cl) \
	FOP2E(op##l, eax, cl) \
	ON64(FOP2E(op##q, rax, cl)) \
	FOP_END

394 395 396 397 398 399 400 401 402
/* 2 operand, src and dest are reversed */
#define FASTOP2R(op, name) \
	FOP_START(name) \
	FOP2E(op##b, dl, al) \
	FOP2E(op##w, dx, ax) \
	FOP2E(op##l, edx, eax) \
	ON64(FOP2E(op##q, rdx, rax)) \
	FOP_END

403
#define FOP3E(op,  dst, src, src2) \
404 405
	FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
	#op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
406 407 408 409 410

/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \
	FOP_START(op) \
	FOPNOP() \
411 412 413
	FOP3E(op##w, ax, dx, cl) \
	FOP3E(op##l, eax, edx, cl) \
	ON64(FOP3E(op##q, rax, rdx, cl)) \
414 415
	FOP_END

416
/* Special case for SETcc - 1 instruction per cc */
417 418 419 420 421 422
#define FOP_SETCC(op) \
	".align 4 \n\t" \
	".type " #op ", @function \n\t" \
	#op ": \n\t" \
	#op " %al \n\t" \
	FOP_RET
423

424 425 426
asm(".global kvm_fastop_exception \n"
    "kvm_fastop_exception: xor %esi, %esi; ret");

427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
FOP_SETCC(setc)
FOP_SETCC(setnc)
FOP_SETCC(setz)
FOP_SETCC(setnz)
FOP_SETCC(setbe)
FOP_SETCC(setnbe)
FOP_SETCC(sets)
FOP_SETCC(setns)
FOP_SETCC(setp)
FOP_SETCC(setnp)
FOP_SETCC(setl)
FOP_SETCC(setnl)
FOP_SETCC(setle)
FOP_SETCC(setnle)
FOP_END;

P
Paolo Bonzini 已提交
446 447 448
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
FOP_END;

449 450 451 452 453 454
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
				    enum x86_intercept intercept,
				    enum x86_intercept_stage stage)
{
	struct x86_instruction_info info = {
		.intercept  = intercept,
455 456 457 458 459
		.rep_prefix = ctxt->rep_prefix,
		.modrm_mod  = ctxt->modrm_mod,
		.modrm_reg  = ctxt->modrm_reg,
		.modrm_rm   = ctxt->modrm_rm,
		.src_val    = ctxt->src.val64,
460
		.dst_val    = ctxt->dst.val64,
461 462 463
		.src_bytes  = ctxt->src.bytes,
		.dst_bytes  = ctxt->dst.bytes,
		.ad_bytes   = ctxt->ad_bytes,
464 465 466
		.next_rip   = ctxt->eip,
	};

467
	return ctxt->ops->intercept(ctxt, &info, stage);
468 469
}

A
Avi Kivity 已提交
470 471 472 473 474
static void assign_masked(ulong *dest, ulong src, ulong mask)
{
	*dest = (*dest & ~mask) | (src & mask);
}

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
static void assign_register(unsigned long *reg, u64 val, int bytes)
{
	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
	switch (bytes) {
	case 1:
		*(u8 *)reg = (u8)val;
		break;
	case 2:
		*(u16 *)reg = (u16)val;
		break;
	case 4:
		*reg = (u32)val;
		break;	/* 64b: zero-extend */
	case 8:
		*reg = val;
		break;
	}
}

494
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
495
{
496
	return (1UL << (ctxt->ad_bytes << 3)) - 1;
497 498
}

A
Avi Kivity 已提交
499 500 501 502 503 504 505 506 507 508 509
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
{
	u16 sel;
	struct desc_struct ss;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return ~0UL;
	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
}

A
Avi Kivity 已提交
510 511 512 513 514
static int stack_size(struct x86_emulate_ctxt *ctxt)
{
	return (__fls(stack_mask(ctxt)) + 1) >> 3;
}

A
Avi Kivity 已提交
515
/* Access/update address held in a register, based on addressing mode. */
516
static inline unsigned long
517
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
518
{
519
	if (ctxt->ad_bytes == sizeof(unsigned long))
520 521
		return reg;
	else
522
		return reg & ad_mask(ctxt);
523 524 525
}

static inline unsigned long
526
register_address(struct x86_emulate_ctxt *ctxt, int reg)
527
{
528
	return address_mask(ctxt, reg_read(ctxt, reg));
529 530
}

531 532 533 534 535
static void masked_increment(ulong *reg, ulong mask, int inc)
{
	assign_masked(reg, *reg + inc, mask);
}

536
static inline void
537
register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
538
{
539
	ulong *preg = reg_rmw(ctxt, reg);
540

541
	assign_register(preg, *preg + inc, ctxt->ad_bytes);
542 543 544 545
}

static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
{
546
	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
547
}
A
Avi Kivity 已提交
548

549 550 551 552 553 554 555
static u32 desc_limit_scaled(struct desc_struct *desc)
{
	u32 limit = get_desc_limit(desc);

	return desc->g ? (limit << 12) | 0xfff : limit;
}

556
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
557 558 559 560
{
	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
		return 0;

561
	return ctxt->ops->get_cached_segment_base(ctxt, seg);
562 563
}

564 565
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
			     u32 error, bool valid)
566
{
567
	WARN_ON(vec > 0x1f);
568 569 570
	ctxt->exception.vector = vec;
	ctxt->exception.error_code = error;
	ctxt->exception.error_code_valid = valid;
571
	return X86EMUL_PROPAGATE_FAULT;
572 573
}

574 575 576 577 578
static int emulate_db(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, DB_VECTOR, 0, false);
}

579
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
580
{
581
	return emulate_exception(ctxt, GP_VECTOR, err, true);
582 583
}

584 585 586 587 588
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{
	return emulate_exception(ctxt, SS_VECTOR, err, true);
}

589
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
590
{
591
	return emulate_exception(ctxt, UD_VECTOR, 0, false);
592 593
}

594
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
595
{
596
	return emulate_exception(ctxt, TS_VECTOR, err, true);
597 598
}

599 600
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
601
	return emulate_exception(ctxt, DE_VECTOR, 0, false);
602 603
}

A
Avi Kivity 已提交
604 605 606 607 608
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, NM_VECTOR, 0, false);
}

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
	u16 selector;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
	return selector;
}

static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
				 unsigned seg)
{
	u16 dummy;
	u32 base3;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}

629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
/*
 * x86 defines three classes of vector instructions: explicitly
 * aligned, explicitly unaligned, and the rest, which change behaviour
 * depending on whether they're AVX encoded or not.
 *
 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
 * subject to the same check.
 */
static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
{
	if (likely(size < 16))
		return false;

	if (ctxt->d & Aligned)
		return true;
	else if (ctxt->d & Unaligned)
		return false;
	else if (ctxt->d & Avx)
		return false;
	else
		return true;
}

652 653 654 655
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
				       struct segmented_address addr,
				       unsigned *max_size, unsigned size,
				       bool write, bool fetch,
656
				       enum x86emul_mode mode, ulong *linear)
657
{
658 659
	struct desc_struct desc;
	bool usable;
660
	ulong la;
661
	u32 lim;
662
	u16 sel;
663

664
	la = seg_base(ctxt, addr.seg) + addr.ea;
665
	*max_size = 0;
666
	switch (mode) {
667
	case X86EMUL_MODE_PROT64:
668
		*linear = la;
669
		if (is_noncanonical_address(la))
670
			goto bad;
671 672 673 674

		*max_size = min_t(u64, ~0u, (1ull << 48) - la);
		if (size > *max_size)
			goto bad;
675 676
		break;
	default:
677
		*linear = la = (u32)la;
678 679
		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
						addr.seg);
680 681
		if (!usable)
			goto bad;
682 683 684
		/* code segment in protected mode or read-only data segment */
		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
					|| !(desc.type & 2)) && write)
685 686
			goto bad;
		/* unreadable code segment */
687
		if (!fetch && (desc.type & 8) && !(desc.type & 2))
688 689
			goto bad;
		lim = desc_limit_scaled(&desc);
690
		if (!(desc.type & 8) && (desc.type & 4)) {
G
Guo Chao 已提交
691
			/* expand-down segment */
692
			if (addr.ea <= lim)
693 694 695
				goto bad;
			lim = desc.d ? 0xffffffff : 0xffff;
		}
696 697
		if (addr.ea > lim)
			goto bad;
698 699 700 701 702 703 704
		if (lim == 0xffffffff)
			*max_size = ~0u;
		else {
			*max_size = (u64)lim + 1 - addr.ea;
			if (size > *max_size)
				goto bad;
		}
705 706
		break;
	}
707 708
	if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
		return emulate_gp(ctxt, 0);
709
	return X86EMUL_CONTINUE;
710 711
bad:
	if (addr.seg == VCPU_SREG_SS)
712
		return emulate_ss(ctxt, 0);
713
	else
714
		return emulate_gp(ctxt, 0);
715 716
}

717 718 719 720 721
static int linearize(struct x86_emulate_ctxt *ctxt,
		     struct segmented_address addr,
		     unsigned size, bool write,
		     ulong *linear)
{
722
	unsigned max_size;
723 724
	return __linearize(ctxt, addr, &max_size, size, write, false,
			   ctxt->mode, linear);
725 726
}

727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
			     enum x86emul_mode mode)
{
	ulong linear;
	int rc;
	unsigned max_size;
	struct segmented_address addr = { .seg = VCPU_SREG_CS,
					   .ea = dst };

	if (ctxt->op_bytes != sizeof(unsigned long))
		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
	rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
	if (rc == X86EMUL_CONTINUE)
		ctxt->_eip = addr.ea;
	return rc;
}

static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
	return assign_eip(ctxt, dst, ctxt->mode);
747 748
}

749 750 751 752
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
			  const struct desc_struct *cs_desc)
{
	enum x86emul_mode mode = ctxt->mode;
753
	int rc;
754 755

#ifdef CONFIG_X86_64
756 757 758
	if (ctxt->mode >= X86EMUL_MODE_PROT16) {
		if (cs_desc->l) {
			u64 efer = 0;
759

760 761 762 763 764
			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
			if (efer & EFER_LMA)
				mode = X86EMUL_MODE_PROT64;
		} else
			mode = X86EMUL_MODE_PROT32; /* temporary value */
765 766 767 768
	}
#endif
	if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
		mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
769 770 771 772
	rc = assign_eip(ctxt, dst, mode);
	if (rc == X86EMUL_CONTINUE)
		ctxt->mode = mode;
	return rc;
773 774 775 776 777 778
}

static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{
	return assign_eip_near(ctxt, ctxt->_eip + rel);
}
779

780 781 782 783 784
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
			      struct segmented_address addr,
			      void *data,
			      unsigned size)
{
785 786 787
	int rc;
	ulong linear;

788
	rc = linearize(ctxt, addr, size, false, &linear);
789 790
	if (rc != X86EMUL_CONTINUE)
		return rc;
791
	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
792 793
}

794
/*
795
 * Prefetch the remaining bytes of the instruction without crossing page
796 797
 * boundary if they are not in fetch_cache yet.
 */
798
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
799 800
{
	int rc;
801
	unsigned size, max_size;
802
	unsigned long linear;
803
	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
804
	struct segmented_address addr = { .seg = VCPU_SREG_CS,
805 806
					   .ea = ctxt->eip + cur_size };

807 808 809 810 811 812 813 814 815 816
	/*
	 * We do not know exactly how many bytes will be needed, and
	 * __linearize is expensive, so fetch as much as possible.  We
	 * just have to avoid going beyond the 15 byte limit, the end
	 * of the segment, or the end of the page.
	 *
	 * __linearize is called with size 0 so that it does not do any
	 * boundary check itself.  Instead, we use max_size to check
	 * against op_size.
	 */
817 818
	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
			 &linear);
819 820 821
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;

822
	size = min_t(unsigned, 15UL ^ cur_size, max_size);
823
	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
824 825 826 827 828 829 830 831

	/*
	 * One instruction can only straddle two pages,
	 * and one has been loaded at the beginning of
	 * x86_decode_insn.  So, if not enough bytes
	 * still, we must have hit the 15-byte boundary.
	 */
	if (unlikely(size < op_size))
832 833
		return emulate_gp(ctxt, 0);

834
	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
835 836 837
			      size, &ctxt->exception);
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;
838
	ctxt->fetch.end += size;
839
	return X86EMUL_CONTINUE;
840 841
}

842 843
static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
					       unsigned size)
844
{
845 846 847 848
	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;

	if (unlikely(done_size < size))
		return __do_insn_fetch_bytes(ctxt, size - done_size);
849 850
	else
		return X86EMUL_CONTINUE;
851 852
}

853
/* Fetch next part of the instruction being emulated. */
854
#define insn_fetch(_type, _ctxt)					\
855 856 857
({	_type _x;							\
									\
	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
858 859
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
860
	ctxt->_eip += sizeof(_type);					\
861 862
	_x = *(_type __aligned(1) *) ctxt->fetch.ptr;			\
	ctxt->fetch.ptr += sizeof(_type);				\
863
	_x;								\
864 865
})

866
#define insn_fetch_arr(_arr, _size, _ctxt)				\
867 868
({									\
	rc = do_insn_fetch_bytes(_ctxt, _size);				\
869 870
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
871
	ctxt->_eip += (_size);						\
872 873
	memcpy(_arr, ctxt->fetch.ptr, _size);				\
	ctxt->fetch.ptr += (_size);					\
874 875
})

876 877 878 879 880
/*
 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 * pointer into the block that addresses the relevant register.
 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 */
881
static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
882
			     int byteop)
A
Avi Kivity 已提交
883 884
{
	void *p;
885
	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
A
Avi Kivity 已提交
886 887

	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
888 889 890
		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
	else
		p = reg_rmw(ctxt, modrm_reg);
A
Avi Kivity 已提交
891 892 893 894
	return p;
}

static int read_descriptor(struct x86_emulate_ctxt *ctxt,
895
			   struct segmented_address addr,
A
Avi Kivity 已提交
896 897 898 899 900 901 902
			   u16 *size, unsigned long *address, int op_bytes)
{
	int rc;

	if (op_bytes == 2)
		op_bytes = 3;
	*address = 0;
903
	rc = segmented_read_std(ctxt, addr, size, 2);
904
	if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
905
		return rc;
906
	addr.ea += 2;
907
	rc = segmented_read_std(ctxt, addr, address, op_bytes);
A
Avi Kivity 已提交
908 909 910
	return rc;
}

911 912 913 914 915 916 917 918 919 920
FASTOP2(add);
FASTOP2(or);
FASTOP2(adc);
FASTOP2(sbb);
FASTOP2(and);
FASTOP2(sub);
FASTOP2(xor);
FASTOP2(cmp);
FASTOP2(test);

921 922
FASTOP1SRC2(mul, mul_ex);
FASTOP1SRC2(imul, imul_ex);
923 924
FASTOP1SRC2EX(div, div_ex);
FASTOP1SRC2EX(idiv, idiv_ex);
925

926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
FASTOP3WCL(shld);
FASTOP3WCL(shrd);

FASTOP2W(imul);

FASTOP1(not);
FASTOP1(neg);
FASTOP1(inc);
FASTOP1(dec);

FASTOP2CL(rol);
FASTOP2CL(ror);
FASTOP2CL(rcl);
FASTOP2CL(rcr);
FASTOP2CL(shl);
FASTOP2CL(shr);
FASTOP2CL(sar);

FASTOP2W(bsf);
FASTOP2W(bsr);
FASTOP2W(bt);
FASTOP2W(bts);
FASTOP2W(btr);
FASTOP2W(btc);

951 952
FASTOP2(xadd);

953 954
FASTOP2R(cmp, cmp_r);

955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
{
	/* If src is zero, do not writeback, but update flags */
	if (ctxt->src.val == 0)
		ctxt->dst.type = OP_NONE;
	return fastop(ctxt, em_bsf);
}

static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
{
	/* If src is zero, do not writeback, but update flags */
	if (ctxt->src.val == 0)
		ctxt->dst.type = OP_NONE;
	return fastop(ctxt, em_bsr);
}

971
static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
972
{
973 974
	u8 rc;
	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
975

976
	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
977
	asm("push %[flags]; popf; call *%[fastop]"
978 979
	    : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
	return rc;
980 981
}

982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
static void fetch_register_operand(struct operand *op)
{
	switch (op->bytes) {
	case 1:
		op->val = *(u8 *)op->addr.reg;
		break;
	case 2:
		op->val = *(u16 *)op->addr.reg;
		break;
	case 4:
		op->val = *(u32 *)op->addr.reg;
		break;
	case 8:
		op->val = *(u64 *)op->addr.reg;
		break;
	}
}

A
Avi Kivity 已提交
1000 1001 1002 1003
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
1004 1005 1006 1007 1008 1009 1010 1011
	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
1012
#ifdef CONFIG_X86_64
1013 1014 1015 1016 1017 1018 1019 1020
	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
			  int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
1032 1033 1034 1035 1036 1037 1038 1039
	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
A
Avi Kivity 已提交
1040
#ifdef CONFIG_X86_64
1041 1042 1043 1044 1045 1046 1047 1048
	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
A
Avi Kivity 已提交
1049 1050 1051 1052 1053 1054
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

A
Avi Kivity 已提交
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fninit");
	ctxt->ops->put_fpu(ctxt);
	return X86EMUL_CONTINUE;
}

static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
{
	u16 fcw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fnstcw %0": "+m"(fcw));
	ctxt->ops->put_fpu(ctxt);

	ctxt->dst.val = fcw;

	return X86EMUL_CONTINUE;
}

static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
{
	u16 fsw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fnstsw %0": "+m"(fsw));
	ctxt->ops->put_fpu(ctxt);

	ctxt->dst.val = fsw;

	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1132
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1133
				    struct operand *op)
1134
{
1135
	unsigned reg = ctxt->modrm_reg;
1136

1137 1138
	if (!(ctxt->d & ModRM))
		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
A
Avi Kivity 已提交
1139

1140
	if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1141 1142 1143 1144 1145 1146
		op->type = OP_XMM;
		op->bytes = 16;
		op->addr.xmm = reg;
		read_sse_reg(ctxt, &op->vec_val, reg);
		return;
	}
A
Avi Kivity 已提交
1147 1148 1149 1150 1151 1152 1153
	if (ctxt->d & Mmx) {
		reg &= 7;
		op->type = OP_MM;
		op->bytes = 8;
		op->addr.mm = reg;
		return;
	}
A
Avi Kivity 已提交
1154

1155
	op->type = OP_REG;
1156 1157 1158
	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);

1159
	fetch_register_operand(op);
1160 1161 1162
	op->orig_val = op->val;
}

1163 1164 1165 1166 1167 1168
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
{
	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
		ctxt->modrm_seg = VCPU_SREG_SS;
}

1169
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1170
			struct operand *op)
1171 1172
{
	u8 sib;
B
Bandan Das 已提交
1173
	int index_reg, base_reg, scale;
1174
	int rc = X86EMUL_CONTINUE;
1175
	ulong modrm_ea = 0;
1176

B
Bandan Das 已提交
1177 1178 1179
	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1180

B
Bandan Das 已提交
1181
	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1182
	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
B
Bandan Das 已提交
1183
	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1184
	ctxt->modrm_seg = VCPU_SREG_DS;
1185

1186
	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1187
		op->type = OP_REG;
1188
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1189
		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1190
				ctxt->d & ByteOp);
1191
		if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1192 1193
			op->type = OP_XMM;
			op->bytes = 16;
1194 1195
			op->addr.xmm = ctxt->modrm_rm;
			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
A
Avi Kivity 已提交
1196 1197
			return rc;
		}
A
Avi Kivity 已提交
1198 1199 1200
		if (ctxt->d & Mmx) {
			op->type = OP_MM;
			op->bytes = 8;
1201
			op->addr.mm = ctxt->modrm_rm & 7;
A
Avi Kivity 已提交
1202 1203
			return rc;
		}
1204
		fetch_register_operand(op);
1205 1206 1207
		return rc;
	}

1208 1209
	op->type = OP_MEM;

1210
	if (ctxt->ad_bytes == 2) {
1211 1212 1213 1214
		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1215 1216

		/* 16-bit ModR/M decode. */
1217
		switch (ctxt->modrm_mod) {
1218
		case 0:
1219
			if (ctxt->modrm_rm == 6)
1220
				modrm_ea += insn_fetch(u16, ctxt);
1221 1222
			break;
		case 1:
1223
			modrm_ea += insn_fetch(s8, ctxt);
1224 1225
			break;
		case 2:
1226
			modrm_ea += insn_fetch(u16, ctxt);
1227 1228
			break;
		}
1229
		switch (ctxt->modrm_rm) {
1230
		case 0:
1231
			modrm_ea += bx + si;
1232 1233
			break;
		case 1:
1234
			modrm_ea += bx + di;
1235 1236
			break;
		case 2:
1237
			modrm_ea += bp + si;
1238 1239
			break;
		case 3:
1240
			modrm_ea += bp + di;
1241 1242
			break;
		case 4:
1243
			modrm_ea += si;
1244 1245
			break;
		case 5:
1246
			modrm_ea += di;
1247 1248
			break;
		case 6:
1249
			if (ctxt->modrm_mod != 0)
1250
				modrm_ea += bp;
1251 1252
			break;
		case 7:
1253
			modrm_ea += bx;
1254 1255
			break;
		}
1256 1257 1258
		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
			ctxt->modrm_seg = VCPU_SREG_SS;
1259
		modrm_ea = (u16)modrm_ea;
1260 1261
	} else {
		/* 32/64-bit ModR/M decode. */
1262
		if ((ctxt->modrm_rm & 7) == 4) {
1263
			sib = insn_fetch(u8, ctxt);
1264 1265 1266 1267
			index_reg |= (sib >> 3) & 7;
			base_reg |= sib & 7;
			scale = sib >> 6;

1268
			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1269
				modrm_ea += insn_fetch(s32, ctxt);
1270
			else {
1271
				modrm_ea += reg_read(ctxt, base_reg);
1272
				adjust_modrm_seg(ctxt, base_reg);
1273 1274 1275 1276
				/* Increment ESP on POP [ESP] */
				if ((ctxt->d & IncSP) &&
				    base_reg == VCPU_REGS_RSP)
					modrm_ea += ctxt->op_bytes;
1277
			}
1278
			if (index_reg != 4)
1279
				modrm_ea += reg_read(ctxt, index_reg) << scale;
1280
		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1281
			modrm_ea += insn_fetch(s32, ctxt);
1282
			if (ctxt->mode == X86EMUL_MODE_PROT64)
1283
				ctxt->rip_relative = 1;
1284 1285
		} else {
			base_reg = ctxt->modrm_rm;
1286
			modrm_ea += reg_read(ctxt, base_reg);
1287 1288
			adjust_modrm_seg(ctxt, base_reg);
		}
1289
		switch (ctxt->modrm_mod) {
1290
		case 1:
1291
			modrm_ea += insn_fetch(s8, ctxt);
1292 1293
			break;
		case 2:
1294
			modrm_ea += insn_fetch(s32, ctxt);
1295 1296 1297
			break;
		}
	}
1298
	op->addr.mem.ea = modrm_ea;
1299 1300 1301
	if (ctxt->ad_bytes != 8)
		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;

1302 1303 1304 1305 1306
done:
	return rc;
}

static int decode_abs(struct x86_emulate_ctxt *ctxt,
1307
		      struct operand *op)
1308
{
1309
	int rc = X86EMUL_CONTINUE;
1310

1311
	op->type = OP_MEM;
1312
	switch (ctxt->ad_bytes) {
1313
	case 2:
1314
		op->addr.mem.ea = insn_fetch(u16, ctxt);
1315 1316
		break;
	case 4:
1317
		op->addr.mem.ea = insn_fetch(u32, ctxt);
1318 1319
		break;
	case 8:
1320
		op->addr.mem.ea = insn_fetch(u64, ctxt);
1321 1322 1323 1324 1325 1326
		break;
	}
done:
	return rc;
}

1327
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1328
{
1329
	long sv = 0, mask;
1330

1331
	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1332
		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1333

1334 1335 1336 1337
		if (ctxt->src.bytes == 2)
			sv = (s16)ctxt->src.val & (s16)mask;
		else if (ctxt->src.bytes == 4)
			sv = (s32)ctxt->src.val & (s32)mask;
1338 1339
		else
			sv = (s64)ctxt->src.val & (s64)mask;
1340

1341 1342
		ctxt->dst.addr.mem.ea = address_mask(ctxt,
					   ctxt->dst.addr.mem.ea + (sv >> 3));
1343
	}
1344 1345

	/* only subword offset */
1346
	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1347 1348
}

1349 1350
static int read_emulated(struct x86_emulate_ctxt *ctxt,
			 unsigned long addr, void *dest, unsigned size)
A
Avi Kivity 已提交
1351
{
1352
	int rc;
1353
	struct read_cache *mc = &ctxt->mem_read;
A
Avi Kivity 已提交
1354

1355 1356
	if (mc->pos < mc->end)
		goto read_cached;
A
Avi Kivity 已提交
1357

1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
	WARN_ON((mc->end + size) >= sizeof(mc->data));

	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
				      &ctxt->exception);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	mc->end += size;

read_cached:
	memcpy(dest, mc->data + mc->pos, size);
	mc->pos += size;
1370 1371
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1372

1373 1374 1375 1376 1377
static int segmented_read(struct x86_emulate_ctxt *ctxt,
			  struct segmented_address addr,
			  void *data,
			  unsigned size)
{
1378 1379 1380
	int rc;
	ulong linear;

1381
	rc = linearize(ctxt, addr, size, false, &linear);
1382 1383
	if (rc != X86EMUL_CONTINUE)
		return rc;
1384
	return read_emulated(ctxt, linear, data, size);
1385 1386 1387 1388 1389 1390 1391
}

static int segmented_write(struct x86_emulate_ctxt *ctxt,
			   struct segmented_address addr,
			   const void *data,
			   unsigned size)
{
1392 1393 1394
	int rc;
	ulong linear;

1395
	rc = linearize(ctxt, addr, size, true, &linear);
1396 1397
	if (rc != X86EMUL_CONTINUE)
		return rc;
1398 1399
	return ctxt->ops->write_emulated(ctxt, linear, data, size,
					 &ctxt->exception);
1400 1401 1402 1403 1404 1405 1406
}

static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
			     struct segmented_address addr,
			     const void *orig_data, const void *data,
			     unsigned size)
{
1407 1408 1409
	int rc;
	ulong linear;

1410
	rc = linearize(ctxt, addr, size, true, &linear);
1411 1412
	if (rc != X86EMUL_CONTINUE)
		return rc;
1413 1414
	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
					   size, &ctxt->exception);
1415 1416
}

1417 1418 1419 1420
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
			   unsigned int size, unsigned short port,
			   void *dest)
{
1421
	struct read_cache *rc = &ctxt->io_read;
1422

1423 1424
	if (rc->pos == rc->end) { /* refill pio read ahead */
		unsigned int in_page, n;
1425
		unsigned int count = ctxt->rep_prefix ?
1426
			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1427
		in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1428 1429
			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1430
		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1431 1432 1433
		if (n == 0)
			n = 1;
		rc->pos = rc->end = 0;
1434
		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1435 1436
			return 0;
		rc->end = n * size;
A
Avi Kivity 已提交
1437 1438
	}

1439
	if (ctxt->rep_prefix && (ctxt->d & String) &&
1440
	    !(ctxt->eflags & X86_EFLAGS_DF)) {
1441 1442 1443 1444 1445 1446 1447 1448
		ctxt->dst.data = rc->data + rc->pos;
		ctxt->dst.type = OP_MEM_STR;
		ctxt->dst.count = (rc->end - rc->pos) / size;
		rc->pos = rc->end;
	} else {
		memcpy(dest, rc->data + rc->pos, size);
		rc->pos += size;
	}
1449 1450
	return 1;
}
A
Avi Kivity 已提交
1451

1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
				     u16 index, struct desc_struct *desc)
{
	struct desc_ptr dt;
	ulong addr;

	ctxt->ops->get_idt(ctxt, &dt);

	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, index << 3 | 0x2);

	addr = dt.address + index * 8;
	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
				   &ctxt->exception);
}

1468 1469 1470
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
				     u16 selector, struct desc_ptr *dt)
{
1471
	const struct x86_emulate_ops *ops = ctxt->ops;
1472
	u32 base3 = 0;
1473

1474 1475
	if (selector & 1 << 2) {
		struct desc_struct desc;
1476 1477
		u16 sel;

1478
		memset (dt, 0, sizeof *dt);
1479 1480
		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
				      VCPU_SREG_LDTR))
1481
			return;
1482

1483
		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1484
		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1485
	} else
1486
		ops->get_gdt(ctxt, dt);
1487
}
1488

1489 1490
static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
			      u16 selector, ulong *desc_addr_p)
1491 1492 1493 1494
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
1495

1496
	get_descriptor_table_ptr(ctxt, selector, &dt);
1497

1498 1499
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
1500

1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
	addr = dt.address + index * 8;

#ifdef CONFIG_X86_64
	if (addr >> 32 != 0) {
		u64 efer = 0;

		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
		if (!(efer & EFER_LMA))
			addr &= (u32)-1;
	}
#endif

	*desc_addr_p = addr;
	return X86EMUL_CONTINUE;
}

/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, struct desc_struct *desc,
				   ulong *desc_addr_p)
{
	int rc;

	rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1529
				   &ctxt->exception);
1530
}
1531

1532 1533 1534 1535
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				    u16 selector, struct desc_struct *desc)
{
1536
	int rc;
1537
	ulong addr;
A
Avi Kivity 已提交
1538

1539 1540 1541
	rc = get_descriptor_ptr(ctxt, selector, &addr);
	if (rc != X86EMUL_CONTINUE)
		return rc;
A
Avi Kivity 已提交
1542

1543 1544
	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
				    &ctxt->exception);
1545
}
1546

1547
/* Does not support long mode */
1548
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1549
				     u16 selector, int seg, u8 cpl,
1550
				     enum x86_transfer_type transfer,
1551
				     struct desc_struct *desc)
1552
{
1553
	struct desc_struct seg_desc, old_desc;
1554
	u8 dpl, rpl;
1555 1556 1557
	unsigned err_vec = GP_VECTOR;
	u32 err_code = 0;
	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1558
	ulong desc_addr;
1559
	int ret;
1560
	u16 dummy;
1561
	u32 base3 = 0;
1562

1563
	memset(&seg_desc, 0, sizeof seg_desc);
1564

1565 1566 1567
	if (ctxt->mode == X86EMUL_MODE_REAL) {
		/* set real mode segment descriptor (keep limit etc. for
		 * unreal mode) */
1568
		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1569 1570
		set_desc_base(&seg_desc, selector << 4);
		goto load;
1571 1572 1573 1574 1575 1576 1577 1578 1579
	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
		/* VM86 needs a clean new segment descriptor */
		set_desc_base(&seg_desc, selector << 4);
		set_desc_limit(&seg_desc, 0xffff);
		seg_desc.type = 3;
		seg_desc.p = 1;
		seg_desc.s = 1;
		seg_desc.dpl = 3;
		goto load;
1580 1581
	}

1582 1583 1584 1585 1586 1587 1588
	rpl = selector & 3;

	/* NULL selector is not valid for TR, CS and SS (except for long mode) */
	if ((seg == VCPU_SREG_CS
	     || (seg == VCPU_SREG_SS
		 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
	     || seg == VCPU_SREG_TR)
1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
	    && null_selector)
		goto exception;

	/* TR should be in GDT only */
	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
		goto exception;

	if (null_selector) /* for NULL selector skip all following checks */
		goto load;

1599
	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1600 1601 1602 1603
	if (ret != X86EMUL_CONTINUE)
		return ret;

	err_code = selector & 0xfffc;
1604 1605
	err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
							   GP_VECTOR;
1606

G
Guo Chao 已提交
1607
	/* can't load system descriptor into segment selector */
1608 1609 1610
	if (seg <= VCPU_SREG_GS && !seg_desc.s) {
		if (transfer == X86_TRANSFER_CALL_JMP)
			return X86EMUL_UNHANDLEABLE;
1611
		goto exception;
1612
	}
1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628

	if (!seg_desc.p) {
		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
		goto exception;
	}

	dpl = seg_desc.dpl;

	switch (seg) {
	case VCPU_SREG_SS:
		/*
		 * segment is not a writable data segment or segment
		 * selector's RPL != CPL or segment selector's RPL != CPL
		 */
		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
			goto exception;
A
Avi Kivity 已提交
1629
		break;
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642
	case VCPU_SREG_CS:
		if (!(seg_desc.type & 8))
			goto exception;

		if (seg_desc.type & 4) {
			/* conforming */
			if (dpl > cpl)
				goto exception;
		} else {
			/* nonconforming */
			if (rpl > cpl || dpl != cpl)
				goto exception;
		}
1643 1644 1645 1646 1647 1648 1649 1650 1651
		/* in long-mode d/b must be clear if l is set */
		if (seg_desc.d && seg_desc.l) {
			u64 efer = 0;

			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
			if (efer & EFER_LMA)
				goto exception;
		}

1652 1653
		/* CS(RPL) <- CPL */
		selector = (selector & 0xfffc) | cpl;
A
Avi Kivity 已提交
1654
		break;
1655 1656 1657
	case VCPU_SREG_TR:
		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
			goto exception;
1658 1659 1660 1661 1662 1663
		old_desc = seg_desc;
		seg_desc.type |= 2; /* busy */
		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
						  sizeof(seg_desc), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
1664 1665 1666 1667 1668 1669
		break;
	case VCPU_SREG_LDTR:
		if (seg_desc.s || seg_desc.type != 2)
			goto exception;
		break;
	default: /*  DS, ES, FS, or GS */
1670
		/*
1671 1672 1673
		 * segment is not a data or readable code segment or
		 * ((segment is a data or nonconforming code segment)
		 * and (both RPL and CPL > DPL))
1674
		 */
1675 1676 1677 1678
		if ((seg_desc.type & 0xa) == 0x8 ||
		    (((seg_desc.type & 0xc) != 0xc) &&
		     (rpl > dpl && cpl > dpl)))
			goto exception;
A
Avi Kivity 已提交
1679
		break;
1680 1681 1682 1683
	}

	if (seg_desc.s) {
		/* mark segment as accessed */
1684 1685 1686 1687 1688 1689 1690
		if (!(seg_desc.type & 1)) {
			seg_desc.type |= 1;
			ret = write_segment_descriptor(ctxt, selector,
						       &seg_desc);
			if (ret != X86EMUL_CONTINUE)
				return ret;
		}
1691 1692 1693 1694 1695
	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
		ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
				sizeof(base3), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
1696 1697 1698
		if (is_noncanonical_address(get_desc_base(&seg_desc) |
					     ((u64)base3 << 32)))
			return emulate_gp(ctxt, 0);
1699 1700
	}
load:
1701
	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1702 1703
	if (desc)
		*desc = seg_desc;
1704 1705
	return X86EMUL_CONTINUE;
exception:
1706
	return emulate_exception(ctxt, err_vec, err_code, true);
1707 1708
}

1709 1710 1711 1712
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, int seg)
{
	u8 cpl = ctxt->ops->cpl(ctxt);
1713 1714
	return __load_segment_descriptor(ctxt, selector, seg, cpl,
					 X86_TRANSFER_NONE, NULL);
1715 1716
}

1717 1718
static void write_register_operand(struct operand *op)
{
1719
	return assign_register(op->addr.reg, op->val, op->bytes);
1720 1721
}

1722
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1723
{
1724
	switch (op->type) {
1725
	case OP_REG:
1726
		write_register_operand(op);
A
Avi Kivity 已提交
1727
		break;
1728
	case OP_MEM:
1729
		if (ctxt->lock_prefix)
P
Paolo Bonzini 已提交
1730 1731 1732 1733 1734 1735 1736
			return segmented_cmpxchg(ctxt,
						 op->addr.mem,
						 &op->orig_val,
						 &op->val,
						 op->bytes);
		else
			return segmented_write(ctxt,
1737 1738 1739
					       op->addr.mem,
					       &op->val,
					       op->bytes);
1740
		break;
1741
	case OP_MEM_STR:
P
Paolo Bonzini 已提交
1742 1743 1744 1745
		return segmented_write(ctxt,
				       op->addr.mem,
				       op->data,
				       op->bytes * op->count);
1746
		break;
A
Avi Kivity 已提交
1747
	case OP_XMM:
1748
		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
A
Avi Kivity 已提交
1749
		break;
A
Avi Kivity 已提交
1750
	case OP_MM:
1751
		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
A
Avi Kivity 已提交
1752
		break;
1753 1754
	case OP_NONE:
		/* no writeback */
1755
		break;
1756
	default:
1757
		break;
A
Avi Kivity 已提交
1758
	}
1759 1760
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1761

1762
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1763
{
1764
	struct segmented_address addr;
1765

1766
	rsp_increment(ctxt, -bytes);
1767
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1768 1769
	addr.seg = VCPU_SREG_SS;

1770 1771 1772 1773 1774
	return segmented_write(ctxt, addr, data, bytes);
}

static int em_push(struct x86_emulate_ctxt *ctxt)
{
1775
	/* Disable writeback. */
1776
	ctxt->dst.type = OP_NONE;
1777
	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1778
}
1779

1780 1781 1782 1783
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
		       void *dest, int len)
{
	int rc;
1784
	struct segmented_address addr;
1785

1786
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1787
	addr.seg = VCPU_SREG_SS;
1788
	rc = segmented_read(ctxt, addr, dest, len);
1789 1790 1791
	if (rc != X86EMUL_CONTINUE)
		return rc;

1792
	rsp_increment(ctxt, len);
1793
	return rc;
1794 1795
}

1796 1797
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
1798
	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1799 1800
}

1801
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1802
			void *dest, int len)
1803 1804
{
	int rc;
1805
	unsigned long val, change_mask;
1806
	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1807
	int cpl = ctxt->ops->cpl(ctxt);
1808

1809
	rc = emulate_pop(ctxt, &val, len);
1810 1811
	if (rc != X86EMUL_CONTINUE)
		return rc;
1812

1813 1814 1815 1816
	change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
		      X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
		      X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
		      X86_EFLAGS_AC | X86_EFLAGS_ID;
1817

1818 1819 1820 1821 1822
	switch(ctxt->mode) {
	case X86EMUL_MODE_PROT64:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT16:
		if (cpl == 0)
1823
			change_mask |= X86_EFLAGS_IOPL;
1824
		if (cpl <= iopl)
1825
			change_mask |= X86_EFLAGS_IF;
1826 1827
		break;
	case X86EMUL_MODE_VM86:
1828 1829
		if (iopl < 3)
			return emulate_gp(ctxt, 0);
1830
		change_mask |= X86_EFLAGS_IF;
1831 1832
		break;
	default: /* real mode */
1833
		change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1834
		break;
1835
	}
1836 1837 1838 1839 1840

	*(unsigned long *)dest =
		(ctxt->eflags & ~change_mask) | (val & change_mask);

	return rc;
1841 1842
}

1843 1844
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
1845 1846 1847 1848
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->eflags;
	ctxt->dst.bytes = ctxt->op_bytes;
	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1849 1850
}

A
Avi Kivity 已提交
1851 1852 1853 1854 1855
static int em_enter(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	unsigned frame_size = ctxt->src.val;
	unsigned nesting_level = ctxt->src2.val & 31;
1856
	ulong rbp;
A
Avi Kivity 已提交
1857 1858 1859 1860

	if (nesting_level)
		return X86EMUL_UNHANDLEABLE;

1861 1862
	rbp = reg_read(ctxt, VCPU_REGS_RBP);
	rc = push(ctxt, &rbp, stack_size(ctxt));
A
Avi Kivity 已提交
1863 1864
	if (rc != X86EMUL_CONTINUE)
		return rc;
1865
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
A
Avi Kivity 已提交
1866
		      stack_mask(ctxt));
1867 1868
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
A
Avi Kivity 已提交
1869 1870 1871 1872
		      stack_mask(ctxt));
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1873 1874
static int em_leave(struct x86_emulate_ctxt *ctxt)
{
1875
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
A
Avi Kivity 已提交
1876
		      stack_mask(ctxt));
1877
	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
A
Avi Kivity 已提交
1878 1879
}

1880
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1881
{
1882 1883
	int seg = ctxt->src2.val;

1884
	ctxt->src.val = get_segment_selector(ctxt, seg);
1885 1886 1887 1888
	if (ctxt->op_bytes == 4) {
		rsp_increment(ctxt, -2);
		ctxt->op_bytes = 2;
	}
1889

1890
	return em_push(ctxt);
1891 1892
}

1893
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1894
{
1895
	int seg = ctxt->src2.val;
1896 1897
	unsigned long selector;
	int rc;
1898

1899
	rc = emulate_pop(ctxt, &selector, 2);
1900 1901 1902
	if (rc != X86EMUL_CONTINUE)
		return rc;

1903 1904
	if (ctxt->modrm_reg == VCPU_SREG_SS)
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1905 1906
	if (ctxt->op_bytes > 2)
		rsp_increment(ctxt, ctxt->op_bytes - 2);
1907

1908
	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1909
	return rc;
1910 1911
}

1912
static int em_pusha(struct x86_emulate_ctxt *ctxt)
1913
{
1914
	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1915 1916
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RAX;
1917

1918 1919
	while (reg <= VCPU_REGS_RDI) {
		(reg == VCPU_REGS_RSP) ?
1920
		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1921

1922
		rc = em_push(ctxt);
1923 1924
		if (rc != X86EMUL_CONTINUE)
			return rc;
1925

1926
		++reg;
1927 1928
	}

1929
	return rc;
1930 1931
}

1932 1933
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
1934
	ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1935 1936 1937
	return em_push(ctxt);
}

1938
static int em_popa(struct x86_emulate_ctxt *ctxt)
1939
{
1940 1941
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RDI;
1942
	u32 val;
1943

1944 1945
	while (reg >= VCPU_REGS_RAX) {
		if (reg == VCPU_REGS_RSP) {
1946
			rsp_increment(ctxt, ctxt->op_bytes);
1947 1948
			--reg;
		}
1949

1950
		rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
1951 1952
		if (rc != X86EMUL_CONTINUE)
			break;
1953
		assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
1954
		--reg;
1955
	}
1956
	return rc;
1957 1958
}

1959
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1960
{
1961
	const struct x86_emulate_ops *ops = ctxt->ops;
1962
	int rc;
1963 1964 1965 1966 1967 1968
	struct desc_ptr dt;
	gva_t cs_addr;
	gva_t eip_addr;
	u16 cs, eip;

	/* TODO: Add limit checks */
1969
	ctxt->src.val = ctxt->eflags;
1970
	rc = em_push(ctxt);
1971 1972
	if (rc != X86EMUL_CONTINUE)
		return rc;
1973

1974
	ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
1975

1976
	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1977
	rc = em_push(ctxt);
1978 1979
	if (rc != X86EMUL_CONTINUE)
		return rc;
1980

1981
	ctxt->src.val = ctxt->_eip;
1982
	rc = em_push(ctxt);
1983 1984 1985
	if (rc != X86EMUL_CONTINUE)
		return rc;

1986
	ops->get_idt(ctxt, &dt);
1987 1988 1989 1990

	eip_addr = dt.address + (irq << 2);
	cs_addr = dt.address + (irq << 2) + 2;

1991
	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1992 1993 1994
	if (rc != X86EMUL_CONTINUE)
		return rc;

1995
	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1996 1997 1998
	if (rc != X86EMUL_CONTINUE)
		return rc;

1999
	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2000 2001 2002
	if (rc != X86EMUL_CONTINUE)
		return rc;

2003
	ctxt->_eip = eip;
2004 2005 2006 2007

	return rc;
}

2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
	int rc;

	invalidate_registers(ctxt);
	rc = __emulate_int_real(ctxt, irq);
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);
	return rc;
}

2019
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2020 2021 2022
{
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
2023
		return __emulate_int_real(ctxt, irq);
2024 2025 2026 2027 2028 2029 2030 2031 2032 2033
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
	default:
		/* Protected mode interrupts unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
	}
}

2034
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2035
{
2036 2037 2038 2039
	int rc = X86EMUL_CONTINUE;
	unsigned long temp_eip = 0;
	unsigned long temp_eflags = 0;
	unsigned long cs = 0;
2040 2041 2042 2043 2044
	unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
			     X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
			     X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
			     X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
			     X86_EFLAGS_AC | X86_EFLAGS_ID |
W
Wanpeng Li 已提交
2045
			     X86_EFLAGS_FIXED;
2046 2047
	unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
				  X86_EFLAGS_VIP;
2048

2049
	/* TODO: Add stack limit check */
2050

2051
	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2052

2053 2054
	if (rc != X86EMUL_CONTINUE)
		return rc;
2055

2056 2057
	if (temp_eip & ~0xffff)
		return emulate_gp(ctxt, 0);
2058

2059
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2060

2061 2062
	if (rc != X86EMUL_CONTINUE)
		return rc;
2063

2064
	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2065

2066 2067
	if (rc != X86EMUL_CONTINUE)
		return rc;
2068

2069
	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2070

2071 2072
	if (rc != X86EMUL_CONTINUE)
		return rc;
2073

2074
	ctxt->_eip = temp_eip;
2075

2076
	if (ctxt->op_bytes == 4)
2077
		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2078
	else if (ctxt->op_bytes == 2) {
2079 2080
		ctxt->eflags &= ~0xffff;
		ctxt->eflags |= temp_eflags;
2081
	}
2082 2083

	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
W
Wanpeng Li 已提交
2084
	ctxt->eflags |= X86_EFLAGS_FIXED;
2085
	ctxt->ops->set_nmi_mask(ctxt, false);
2086 2087

	return rc;
2088 2089
}

2090
static int em_iret(struct x86_emulate_ctxt *ctxt)
2091
{
2092 2093
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
2094
		return emulate_iret_real(ctxt);
2095 2096 2097 2098
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
2099
	default:
2100 2101
		/* iret from protected mode unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
2102 2103 2104
	}
}

2105 2106 2107
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
	int rc;
2108 2109
	unsigned short sel;
	struct desc_struct new_desc;
2110 2111
	u8 cpl = ctxt->ops->cpl(ctxt);

2112
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2113

2114 2115
	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_CALL_JMP,
2116
				       &new_desc);
2117 2118 2119
	if (rc != X86EMUL_CONTINUE)
		return rc;

2120
	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2121 2122 2123 2124
	/* Error handling is not implemented. */
	if (rc != X86EMUL_CONTINUE)
		return X86EMUL_UNHANDLEABLE;

2125
	return rc;
2126 2127
}

2128
static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2129
{
2130 2131
	return assign_eip_near(ctxt, ctxt->src.val);
}
2132

2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143
static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	long int old_eip;

	old_eip = ctxt->_eip;
	rc = assign_eip_near(ctxt, ctxt->src.val);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	ctxt->src.val = old_eip;
	rc = em_push(ctxt);
2144
	return rc;
2145 2146
}

2147
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2148
{
2149
	u64 old = ctxt->dst.orig_val64;
2150

2151 2152 2153
	if (ctxt->dst.bytes == 16)
		return X86EMUL_UNHANDLEABLE;

2154 2155 2156 2157
	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2158
		ctxt->eflags &= ~X86_EFLAGS_ZF;
2159
	} else {
2160 2161
		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2162

2163
		ctxt->eflags |= X86_EFLAGS_ZF;
2164
	}
2165
	return X86EMUL_CONTINUE;
2166 2167
}

2168 2169
static int em_ret(struct x86_emulate_ctxt *ctxt)
{
2170 2171 2172 2173 2174 2175 2176 2177
	int rc;
	unsigned long eip;

	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	return assign_eip_near(ctxt, eip);
2178 2179
}

2180
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2181 2182
{
	int rc;
2183
	unsigned long eip, cs;
2184
	int cpl = ctxt->ops->cpl(ctxt);
2185
	struct desc_struct new_desc;
2186

2187
	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2188
	if (rc != X86EMUL_CONTINUE)
2189
		return rc;
2190
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2191
	if (rc != X86EMUL_CONTINUE)
2192
		return rc;
2193 2194 2195
	/* Outer-privilege level return is not implemented */
	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
		return X86EMUL_UNHANDLEABLE;
2196 2197
	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_RET,
2198 2199 2200
				       &new_desc);
	if (rc != X86EMUL_CONTINUE)
		return rc;
2201
	rc = assign_eip_far(ctxt, eip, &new_desc);
2202 2203 2204 2205
	/* Error handling is not implemented. */
	if (rc != X86EMUL_CONTINUE)
		return X86EMUL_UNHANDLEABLE;

2206 2207 2208
	return rc;
}

2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219
static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
{
        int rc;

        rc = em_ret_far(ctxt);
        if (rc != X86EMUL_CONTINUE)
                return rc;
        rsp_increment(ctxt, ctxt->src.val);
        return X86EMUL_CONTINUE;
}

2220 2221 2222
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{
	/* Save real source value, then compare EAX against destination. */
2223 2224
	ctxt->dst.orig_val = ctxt->dst.val;
	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2225
	ctxt->src.orig_val = ctxt->src.val;
2226
	ctxt->src.val = ctxt->dst.orig_val;
2227
	fastop(ctxt, em_cmp);
2228

2229
	if (ctxt->eflags & X86_EFLAGS_ZF) {
2230 2231
		/* Success: write back to memory; no update of EAX */
		ctxt->src.type = OP_NONE;
2232 2233 2234
		ctxt->dst.val = ctxt->src.orig_val;
	} else {
		/* Failure: write the value we saw to EAX. */
2235 2236 2237 2238
		ctxt->src.type = OP_REG;
		ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
		ctxt->src.val = ctxt->dst.orig_val;
		/* Create write-cycle to dest by writing the same value */
2239
		ctxt->dst.val = ctxt->dst.orig_val;
2240 2241 2242 2243
	}
	return X86EMUL_CONTINUE;
}

2244
static int em_lseg(struct x86_emulate_ctxt *ctxt)
2245
{
2246
	int seg = ctxt->src2.val;
2247 2248 2249
	unsigned short sel;
	int rc;

2250
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2251

2252
	rc = load_segment_descriptor(ctxt, sel, seg);
2253 2254 2255
	if (rc != X86EMUL_CONTINUE)
		return rc;

2256
	ctxt->dst.val = ctxt->src.val;
2257 2258 2259
	return rc;
}

2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272
static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

	eax = 0x80000001;
	ecx = 0;
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	return edx & bit(X86_FEATURE_LM);
}

#define GET_SMSTATE(type, smbase, offset)				  \
	({								  \
	 type __val;							  \
2273 2274
	 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val,      \
				      sizeof(__val));			  \
2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418
	 if (r != X86EMUL_CONTINUE)					  \
		 return X86EMUL_UNHANDLEABLE;				  \
	 __val;								  \
	})

static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
{
	desc->g    = (flags >> 23) & 1;
	desc->d    = (flags >> 22) & 1;
	desc->l    = (flags >> 21) & 1;
	desc->avl  = (flags >> 20) & 1;
	desc->p    = (flags >> 15) & 1;
	desc->dpl  = (flags >> 13) & 3;
	desc->s    = (flags >> 12) & 1;
	desc->type = (flags >>  8) & 15;
}

static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
{
	struct desc_struct desc;
	int offset;
	u16 selector;

	selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);

	if (n < 3)
		offset = 0x7f84 + n * 12;
	else
		offset = 0x7f2c + (n - 3) * 12;

	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
	rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
	ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
	return X86EMUL_CONTINUE;
}

static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
{
	struct desc_struct desc;
	int offset;
	u16 selector;
	u32 base3;

	offset = 0x7e00 + n * 16;

	selector =                GET_SMSTATE(u16, smbase, offset);
	rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
	base3 =                   GET_SMSTATE(u32, smbase, offset + 12);

	ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
	return X86EMUL_CONTINUE;
}

static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
				     u64 cr0, u64 cr4)
{
	int bad;

	/*
	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
	 * Then enable protected mode.	However, PCID cannot be enabled
	 * if EFER.LMA=0, so set it separately.
	 */
	bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
	if (bad)
		return X86EMUL_UNHANDLEABLE;

	bad = ctxt->ops->set_cr(ctxt, 0, cr0);
	if (bad)
		return X86EMUL_UNHANDLEABLE;

	if (cr4 & X86_CR4_PCIDE) {
		bad = ctxt->ops->set_cr(ctxt, 4, cr4);
		if (bad)
			return X86EMUL_UNHANDLEABLE;
	}

	return X86EMUL_CONTINUE;
}

static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
	struct desc_struct desc;
	struct desc_ptr dt;
	u16 selector;
	u32 val, cr0, cr4;
	int i;

	cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
	ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
	ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
	ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);

	for (i = 0; i < 8; i++)
		*reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);

	val = GET_SMSTATE(u32, smbase, 0x7fcc);
	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
	val = GET_SMSTATE(u32, smbase, 0x7fc8);
	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);

	selector =                 GET_SMSTATE(u32, smbase, 0x7fc4);
	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f64));
	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f60));
	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f5c));
	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);

	selector =                 GET_SMSTATE(u32, smbase, 0x7fc0);
	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f80));
	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f7c));
	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f78));
	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);

	dt.address =               GET_SMSTATE(u32, smbase, 0x7f74);
	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f70);
	ctxt->ops->set_gdt(ctxt, &dt);

	dt.address =               GET_SMSTATE(u32, smbase, 0x7f58);
	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f54);
	ctxt->ops->set_idt(ctxt, &dt);

	for (i = 0; i < 6; i++) {
		int r = rsm_load_seg_32(ctxt, smbase, i);
		if (r != X86EMUL_CONTINUE)
			return r;
	}

	cr4 = GET_SMSTATE(u32, smbase, 0x7f14);

	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));

	return rsm_enter_protected_mode(ctxt, cr0, cr4);
}

static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
	struct desc_struct desc;
	struct desc_ptr dt;
	u64 val, cr0, cr4;
	u32 base3;
	u16 selector;
2419
	int i, r;
2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460

	for (i = 0; i < 16; i++)
		*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);

	ctxt->_eip   = GET_SMSTATE(u64, smbase, 0x7f78);
	ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;

	val = GET_SMSTATE(u32, smbase, 0x7f68);
	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
	val = GET_SMSTATE(u32, smbase, 0x7f60);
	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);

	cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
	ctxt->ops->set_cr(ctxt, 3,  GET_SMSTATE(u64, smbase, 0x7f50));
	cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
	val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
	ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);

	selector =                  GET_SMSTATE(u32, smbase, 0x7e90);
	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e92) << 8);
	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e94));
	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e98));
	base3 =                     GET_SMSTATE(u32, smbase, 0x7e9c);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);

	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e84);
	dt.address =                GET_SMSTATE(u64, smbase, 0x7e88);
	ctxt->ops->set_idt(ctxt, &dt);

	selector =                  GET_SMSTATE(u32, smbase, 0x7e70);
	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e72) << 8);
	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e74));
	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e78));
	base3 =                     GET_SMSTATE(u32, smbase, 0x7e7c);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);

	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e64);
	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
	ctxt->ops->set_gdt(ctxt, &dt);

2461 2462 2463 2464
	r = rsm_enter_protected_mode(ctxt, cr0, cr4);
	if (r != X86EMUL_CONTINUE)
		return r;

2465
	for (i = 0; i < 6; i++) {
2466
		r = rsm_load_seg_64(ctxt, smbase, i);
2467 2468 2469 2470
		if (r != X86EMUL_CONTINUE)
			return r;
	}

2471
	return X86EMUL_CONTINUE;
2472 2473
}

P
Paolo Bonzini 已提交
2474 2475
static int em_rsm(struct x86_emulate_ctxt *ctxt)
{
2476 2477 2478 2479
	unsigned long cr0, cr4, efer;
	u64 smbase;
	int ret;

P
Paolo Bonzini 已提交
2480 2481 2482
	if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
		return emulate_ud(ctxt);

2483 2484
	/*
	 * Get back to real mode, to prepare a safe state in which to load
2485 2486
	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
	 * supports long mode.
2487
	 */
2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505
	cr4 = ctxt->ops->get_cr(ctxt, 4);
	if (emulator_has_longmode(ctxt)) {
		struct desc_struct cs_desc;

		/* Zero CR4.PCIDE before CR0.PG.  */
		if (cr4 & X86_CR4_PCIDE) {
			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
			cr4 &= ~X86_CR4_PCIDE;
		}

		/* A 32-bit code segment is required to clear EFER.LMA.  */
		memset(&cs_desc, 0, sizeof(cs_desc));
		cs_desc.type = 0xb;
		cs_desc.s = cs_desc.g = cs_desc.p = 1;
		ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
	}

	/* For the 64-bit case, this will clear EFER.LMA.  */
2506 2507 2508
	cr0 = ctxt->ops->get_cr(ctxt, 0);
	if (cr0 & X86_CR0_PE)
		ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2509 2510

	/* Now clear CR4.PAE (which must be done before clearing EFER.LME).  */
2511 2512
	if (cr4 & X86_CR4_PAE)
		ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2513 2514

	/* And finally go back to 32-bit mode.  */
2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534
	efer = 0;
	ctxt->ops->set_msr(ctxt, MSR_EFER, efer);

	smbase = ctxt->ops->get_smbase(ctxt);
	if (emulator_has_longmode(ctxt))
		ret = rsm_load_state_64(ctxt, smbase + 0x8000);
	else
		ret = rsm_load_state_32(ctxt, smbase + 0x8000);

	if (ret != X86EMUL_CONTINUE) {
		/* FIXME: should triple fault */
		return X86EMUL_UNHANDLEABLE;
	}

	if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
		ctxt->ops->set_nmi_mask(ctxt, false);

	ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
	ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
	return X86EMUL_CONTINUE;
P
Paolo Bonzini 已提交
2535 2536
}

2537
static void
2538
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2539
			struct desc_struct *cs, struct desc_struct *ss)
2540 2541
{
	cs->l = 0;		/* will be adjusted later */
2542
	set_desc_base(cs, 0);	/* flat segment */
2543
	cs->g = 1;		/* 4kb granularity */
2544
	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2545 2546 2547
	cs->type = 0x0b;	/* Read, Execute, Accessed */
	cs->s = 1;
	cs->dpl = 0;		/* will be adjusted later */
2548 2549
	cs->p = 1;
	cs->d = 1;
2550
	cs->avl = 0;
2551

2552 2553
	set_desc_base(ss, 0);	/* flat segment */
	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2554 2555 2556
	ss->g = 1;		/* 4kb granularity */
	ss->s = 1;
	ss->type = 0x03;	/* Read/Write, Accessed */
2557
	ss->d = 1;		/* 32bit stack segment */
2558
	ss->dpl = 0;
2559
	ss->p = 1;
2560 2561
	ss->l = 0;
	ss->avl = 0;
2562 2563
}

2564 2565 2566 2567 2568
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

	eax = ecx = 0;
2569 2570
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2571 2572 2573 2574
		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
}

2575 2576
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
{
2577
	const struct x86_emulate_ops *ops = ctxt->ops;
2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588
	u32 eax, ebx, ecx, edx;

	/*
	 * syscall should always be enabled in longmode - so only become
	 * vendor specific (cpuid) if other modes are active...
	 */
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return true;

	eax = 0x00000000;
	ecx = 0x00000000;
2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613
	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	/*
	 * Intel ("GenuineIntel")
	 * remark: Intel CPUs only support "syscall" in 64bit
	 * longmode. Also an 64bit guest with a
	 * 32bit compat-app running will #UD !! While this
	 * behaviour can be fixed (by emulating) into AMD
	 * response - CPUs of AMD can't behave like Intel.
	 */
	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
		return false;

	/* AMD ("AuthenticAMD") */
	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
		return true;

	/* AMD ("AMDisbetter!") */
	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
		return true;
2614 2615 2616 2617 2618

	/* default: (not Intel, not AMD), apply Intel's stricter rules... */
	return false;
}

2619
static int em_syscall(struct x86_emulate_ctxt *ctxt)
2620
{
2621
	const struct x86_emulate_ops *ops = ctxt->ops;
2622
	struct desc_struct cs, ss;
2623
	u64 msr_data;
2624
	u16 cs_sel, ss_sel;
2625
	u64 efer = 0;
2626 2627

	/* syscall is not available in real mode */
2628
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2629 2630
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_ud(ctxt);
2631

2632 2633 2634
	if (!(em_syscall_is_enabled(ctxt)))
		return emulate_ud(ctxt);

2635
	ops->get_msr(ctxt, MSR_EFER, &efer);
2636
	setup_syscalls_segments(ctxt, &cs, &ss);
2637

2638 2639 2640
	if (!(efer & EFER_SCE))
		return emulate_ud(ctxt);

2641
	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2642
	msr_data >>= 32;
2643 2644
	cs_sel = (u16)(msr_data & 0xfffc);
	ss_sel = (u16)(msr_data + 8);
2645

2646
	if (efer & EFER_LMA) {
2647
		cs.d = 0;
2648 2649
		cs.l = 1;
	}
2650 2651
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2652

2653
	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2654
	if (efer & EFER_LMA) {
2655
#ifdef CONFIG_X86_64
2656
		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2657

2658
		ops->get_msr(ctxt,
2659 2660
			     ctxt->mode == X86EMUL_MODE_PROT64 ?
			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2661
		ctxt->_eip = msr_data;
2662

2663
		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2664
		ctxt->eflags &= ~msr_data;
W
Wanpeng Li 已提交
2665
		ctxt->eflags |= X86_EFLAGS_FIXED;
2666 2667 2668
#endif
	} else {
		/* legacy mode */
2669
		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2670
		ctxt->_eip = (u32)msr_data;
2671

2672
		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2673 2674
	}

2675
	return X86EMUL_CONTINUE;
2676 2677
}

2678
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2679
{
2680
	const struct x86_emulate_ops *ops = ctxt->ops;
2681
	struct desc_struct cs, ss;
2682
	u64 msr_data;
2683
	u16 cs_sel, ss_sel;
2684
	u64 efer = 0;
2685

2686
	ops->get_msr(ctxt, MSR_EFER, &efer);
2687
	/* inject #GP if in real mode */
2688 2689
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return emulate_gp(ctxt, 0);
2690

2691 2692 2693 2694
	/*
	 * Not recognized on AMD in compat mode (but is recognized in legacy
	 * mode).
	 */
2695
	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2696 2697 2698
	    && !vendor_intel(ctxt))
		return emulate_ud(ctxt);

2699
	/* sysenter/sysexit have not been tested in 64bit mode. */
2700
	if (ctxt->mode == X86EMUL_MODE_PROT64)
2701
		return X86EMUL_UNHANDLEABLE;
2702

2703
	setup_syscalls_segments(ctxt, &cs, &ss);
2704

2705
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2706 2707
	if ((msr_data & 0xfffc) == 0x0)
		return emulate_gp(ctxt, 0);
2708

2709
	ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2710
	cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2711
	ss_sel = cs_sel + 8;
2712
	if (efer & EFER_LMA) {
2713
		cs.d = 0;
2714 2715 2716
		cs.l = 1;
	}

2717 2718
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2719

2720
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2721
	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2722

2723
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2724 2725
	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
							      (u32)msr_data;
2726

2727
	return X86EMUL_CONTINUE;
2728 2729
}

2730
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2731
{
2732
	const struct x86_emulate_ops *ops = ctxt->ops;
2733
	struct desc_struct cs, ss;
2734
	u64 msr_data, rcx, rdx;
2735
	int usermode;
X
Xiao Guangrong 已提交
2736
	u16 cs_sel = 0, ss_sel = 0;
2737

2738 2739
	/* inject #GP if in real mode or Virtual 8086 mode */
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2740 2741
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_gp(ctxt, 0);
2742

2743
	setup_syscalls_segments(ctxt, &cs, &ss);
2744

2745
	if ((ctxt->rex_prefix & 0x8) != 0x0)
2746 2747 2748 2749
		usermode = X86EMUL_MODE_PROT64;
	else
		usermode = X86EMUL_MODE_PROT32;

2750 2751 2752
	rcx = reg_read(ctxt, VCPU_REGS_RCX);
	rdx = reg_read(ctxt, VCPU_REGS_RDX);

2753 2754
	cs.dpl = 3;
	ss.dpl = 3;
2755
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2756 2757
	switch (usermode) {
	case X86EMUL_MODE_PROT32:
2758
		cs_sel = (u16)(msr_data + 16);
2759 2760
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
2761
		ss_sel = (u16)(msr_data + 24);
2762 2763
		rcx = (u32)rcx;
		rdx = (u32)rdx;
2764 2765
		break;
	case X86EMUL_MODE_PROT64:
2766
		cs_sel = (u16)(msr_data + 32);
2767 2768
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
2769 2770
		ss_sel = cs_sel + 8;
		cs.d = 0;
2771
		cs.l = 1;
2772 2773 2774
		if (is_noncanonical_address(rcx) ||
		    is_noncanonical_address(rdx))
			return emulate_gp(ctxt, 0);
2775 2776
		break;
	}
2777 2778
	cs_sel |= SEGMENT_RPL_MASK;
	ss_sel |= SEGMENT_RPL_MASK;
2779

2780 2781
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2782

2783 2784
	ctxt->_eip = rdx;
	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2785

2786
	return X86EMUL_CONTINUE;
2787 2788
}

2789
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2790 2791 2792 2793 2794 2795
{
	int iopl;
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return false;
	if (ctxt->mode == X86EMUL_MODE_VM86)
		return true;
2796
	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2797
	return ctxt->ops->cpl(ctxt) > iopl;
2798 2799 2800 2801 2802
}

static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
					    u16 port, u16 len)
{
2803
	const struct x86_emulate_ops *ops = ctxt->ops;
2804
	struct desc_struct tr_seg;
2805
	u32 base3;
2806
	int r;
2807
	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2808
	unsigned mask = (1 << len) - 1;
2809
	unsigned long base;
2810

2811
	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2812
	if (!tr_seg.p)
2813
		return false;
2814
	if (desc_limit_scaled(&tr_seg) < 103)
2815
		return false;
2816 2817 2818 2819
	base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
	base |= ((u64)base3) << 32;
#endif
2820
	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2821 2822
	if (r != X86EMUL_CONTINUE)
		return false;
2823
	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2824
		return false;
2825
	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2826 2827 2828 2829 2830 2831 2832 2833 2834 2835
	if (r != X86EMUL_CONTINUE)
		return false;
	if ((perm >> bit_idx) & mask)
		return false;
	return true;
}

static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
				 u16 port, u16 len)
{
2836 2837 2838
	if (ctxt->perm_ok)
		return true;

2839 2840
	if (emulator_bad_iopl(ctxt))
		if (!emulator_io_port_access_allowed(ctxt, port, len))
2841
			return false;
2842 2843 2844

	ctxt->perm_ok = true;

2845 2846 2847
	return true;
}

2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871
static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
{
	/*
	 * Intel CPUs mask the counter and pointers in quite strange
	 * manner when ECX is zero due to REP-string optimizations.
	 */
#ifdef CONFIG_X86_64
	if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
		return;

	*reg_write(ctxt, VCPU_REGS_RCX) = 0;

	switch (ctxt->b) {
	case 0xa4:	/* movsb */
	case 0xa5:	/* movsd/w */
		*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
		/* fall through */
	case 0xaa:	/* stosb */
	case 0xab:	/* stosd/w */
		*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
	}
#endif
}

2872 2873 2874
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_16 *tss)
{
2875
	tss->ip = ctxt->_eip;
2876
	tss->flag = ctxt->eflags;
2877 2878 2879 2880 2881 2882 2883 2884
	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2885

2886 2887 2888 2889 2890
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2891 2892 2893 2894 2895 2896
}

static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_16 *tss)
{
	int ret;
2897
	u8 cpl;
2898

2899
	ctxt->_eip = tss->ip;
2900
	ctxt->eflags = tss->flag | 2;
2901 2902 2903 2904 2905 2906 2907 2908
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2909 2910 2911 2912 2913

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
2914 2915 2916 2917 2918
	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2919

2920 2921
	cpl = tss->cs & 3;

2922
	/*
G
Guo Chao 已提交
2923
	 * Now load segment descriptors. If fault happens at this stage
2924 2925
	 * it is handled in a context of new task
	 */
2926
	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2927
					X86_TRANSFER_TASK_SWITCH, NULL);
2928 2929
	if (ret != X86EMUL_CONTINUE)
		return ret;
2930
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2931
					X86_TRANSFER_TASK_SWITCH, NULL);
2932 2933
	if (ret != X86EMUL_CONTINUE)
		return ret;
2934
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2935
					X86_TRANSFER_TASK_SWITCH, NULL);
2936 2937
	if (ret != X86EMUL_CONTINUE)
		return ret;
2938
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2939
					X86_TRANSFER_TASK_SWITCH, NULL);
2940 2941
	if (ret != X86EMUL_CONTINUE)
		return ret;
2942
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2943
					X86_TRANSFER_TASK_SWITCH, NULL);
2944 2945 2946 2947 2948 2949 2950 2951 2952 2953
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_16(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
2954
	const struct x86_emulate_ops *ops = ctxt->ops;
2955 2956
	struct tss_segment_16 tss_seg;
	int ret;
2957
	u32 new_tss_base = get_desc_base(new_desc);
2958

2959
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2960
			    &ctxt->exception);
2961
	if (ret != X86EMUL_CONTINUE)
2962 2963
		return ret;

2964
	save_state_to_tss16(ctxt, &tss_seg);
2965

2966
	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2967
			     &ctxt->exception);
2968
	if (ret != X86EMUL_CONTINUE)
2969 2970
		return ret;

2971
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2972
			    &ctxt->exception);
2973
	if (ret != X86EMUL_CONTINUE)
2974 2975 2976 2977 2978
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

2979
		ret = ops->write_std(ctxt, new_tss_base,
2980 2981
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2982
				     &ctxt->exception);
2983
		if (ret != X86EMUL_CONTINUE)
2984 2985 2986
			return ret;
	}

2987
	return load_state_from_tss16(ctxt, &tss_seg);
2988 2989 2990 2991 2992
}

static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_32 *tss)
{
2993
	/* CR3 and ldt selector are not saved intentionally */
2994
	tss->eip = ctxt->_eip;
2995
	tss->eflags = ctxt->eflags;
2996 2997 2998 2999 3000 3001 3002 3003
	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3004

3005 3006 3007 3008 3009 3010
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3011 3012 3013 3014 3015 3016
}

static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_32 *tss)
{
	int ret;
3017
	u8 cpl;
3018

3019
	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3020
		return emulate_gp(ctxt, 0);
3021
	ctxt->_eip = tss->eip;
3022
	ctxt->eflags = tss->eflags | 2;
3023 3024

	/* General purpose registers */
3025 3026 3027 3028 3029 3030 3031 3032
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3033 3034 3035

	/*
	 * SDM says that segment selectors are loaded before segment
3036 3037
	 * descriptors.  This is important because CPL checks will
	 * use CS.RPL.
3038
	 */
3039 3040 3041 3042 3043 3044 3045
	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3046

3047 3048 3049 3050 3051
	/*
	 * If we're switching between Protected Mode and VM86, we need to make
	 * sure to update the mode before loading the segment descriptors so
	 * that the selectors are interpreted correctly.
	 */
3052
	if (ctxt->eflags & X86_EFLAGS_VM) {
3053
		ctxt->mode = X86EMUL_MODE_VM86;
3054 3055
		cpl = 3;
	} else {
3056
		ctxt->mode = X86EMUL_MODE_PROT32;
3057 3058
		cpl = tss->cs & 3;
	}
3059

3060 3061 3062 3063
	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
3064
	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3065
					cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3066 3067
	if (ret != X86EMUL_CONTINUE)
		return ret;
3068
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3069
					X86_TRANSFER_TASK_SWITCH, NULL);
3070 3071
	if (ret != X86EMUL_CONTINUE)
		return ret;
3072
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3073
					X86_TRANSFER_TASK_SWITCH, NULL);
3074 3075
	if (ret != X86EMUL_CONTINUE)
		return ret;
3076
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3077
					X86_TRANSFER_TASK_SWITCH, NULL);
3078 3079
	if (ret != X86EMUL_CONTINUE)
		return ret;
3080
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3081
					X86_TRANSFER_TASK_SWITCH, NULL);
3082 3083
	if (ret != X86EMUL_CONTINUE)
		return ret;
3084
	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3085
					X86_TRANSFER_TASK_SWITCH, NULL);
3086 3087
	if (ret != X86EMUL_CONTINUE)
		return ret;
3088
	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3089
					X86_TRANSFER_TASK_SWITCH, NULL);
3090

3091
	return ret;
3092 3093 3094 3095 3096 3097
}

static int task_switch_32(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
3098
	const struct x86_emulate_ops *ops = ctxt->ops;
3099 3100
	struct tss_segment_32 tss_seg;
	int ret;
3101
	u32 new_tss_base = get_desc_base(new_desc);
3102 3103
	u32 eip_offset = offsetof(struct tss_segment_32, eip);
	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3104

3105
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3106
			    &ctxt->exception);
3107
	if (ret != X86EMUL_CONTINUE)
3108 3109
		return ret;

3110
	save_state_to_tss32(ctxt, &tss_seg);
3111

3112 3113 3114
	/* Only GP registers and segment selectors are saved */
	ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
			     ldt_sel_offset - eip_offset, &ctxt->exception);
3115
	if (ret != X86EMUL_CONTINUE)
3116 3117
		return ret;

3118
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3119
			    &ctxt->exception);
3120
	if (ret != X86EMUL_CONTINUE)
3121 3122 3123 3124 3125
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

3126
		ret = ops->write_std(ctxt, new_tss_base,
3127 3128
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
3129
				     &ctxt->exception);
3130
		if (ret != X86EMUL_CONTINUE)
3131 3132 3133
			return ret;
	}

3134
	return load_state_from_tss32(ctxt, &tss_seg);
3135 3136 3137
}

static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3138
				   u16 tss_selector, int idt_index, int reason,
3139
				   bool has_error_code, u32 error_code)
3140
{
3141
	const struct x86_emulate_ops *ops = ctxt->ops;
3142 3143
	struct desc_struct curr_tss_desc, next_tss_desc;
	int ret;
3144
	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3145
	ulong old_tss_base =
3146
		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3147
	u32 desc_limit;
3148
	ulong desc_addr, dr7;
3149 3150 3151

	/* FIXME: old_tss_base == ~0 ? */

3152
	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3153 3154
	if (ret != X86EMUL_CONTINUE)
		return ret;
3155
	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3156 3157 3158 3159 3160
	if (ret != X86EMUL_CONTINUE)
		return ret;

	/* FIXME: check that next_tss_desc is tss */

3161 3162 3163 3164 3165
	/*
	 * Check privileges. The three cases are task switch caused by...
	 *
	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
	 * 2. Exception/IRQ/iret: No check is performed
3166 3167
	 * 3. jmp/call to TSS/task-gate: No check is performed since the
	 *    hardware checks it before exiting.
3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183
	 */
	if (reason == TASK_SWITCH_GATE) {
		if (idt_index != -1) {
			/* Software interrupts */
			struct desc_struct task_gate_desc;
			int dpl;

			ret = read_interrupt_descriptor(ctxt, idt_index,
							&task_gate_desc);
			if (ret != X86EMUL_CONTINUE)
				return ret;

			dpl = task_gate_desc.dpl;
			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
		}
3184 3185
	}

3186 3187 3188 3189
	desc_limit = desc_limit_scaled(&next_tss_desc);
	if (!next_tss_desc.p ||
	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
	     desc_limit < 0x2b)) {
3190
		return emulate_ts(ctxt, tss_selector & 0xfffc);
3191 3192 3193 3194
	}

	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3195
		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3196 3197 3198 3199 3200 3201
	}

	if (reason == TASK_SWITCH_IRET)
		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;

	/* set back link to prev task only if NT bit is set in eflags
G
Guo Chao 已提交
3202
	   note that old_tss_sel is not used after this point */
3203 3204 3205 3206
	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
		old_tss_sel = 0xffff;

	if (next_tss_desc.type & 8)
3207
		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3208 3209
				     old_tss_base, &next_tss_desc);
	else
3210
		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3211
				     old_tss_base, &next_tss_desc);
3212 3213
	if (ret != X86EMUL_CONTINUE)
		return ret;
3214 3215 3216 3217 3218 3219

	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;

	if (reason != TASK_SWITCH_IRET) {
		next_tss_desc.type |= (1 << 1); /* set busy flag */
3220
		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3221 3222
	}

3223
	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
3224
	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3225

3226
	if (has_error_code) {
3227 3228 3229
		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
		ctxt->lock_prefix = 0;
		ctxt->src.val = (unsigned long) error_code;
3230
		ret = em_push(ctxt);
3231 3232
	}

3233 3234 3235
	ops->get_dr(ctxt, 7, &dr7);
	ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));

3236 3237 3238 3239
	return ret;
}

int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3240
			 u16 tss_selector, int idt_index, int reason,
3241
			 bool has_error_code, u32 error_code)
3242 3243 3244
{
	int rc;

3245
	invalidate_registers(ctxt);
3246 3247
	ctxt->_eip = ctxt->eip;
	ctxt->dst.type = OP_NONE;
3248

3249
	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3250
				     has_error_code, error_code);
3251

3252
	if (rc == X86EMUL_CONTINUE) {
3253
		ctxt->eip = ctxt->_eip;
3254 3255
		writeback_registers(ctxt);
	}
3256

3257
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3258 3259
}

3260 3261
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
		struct operand *op)
3262
{
3263
	int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3264

3265 3266
	register_address_increment(ctxt, reg, df * op->bytes);
	op->addr.mem.ea = register_address(ctxt, reg);
3267 3268
}

3269 3270 3271 3272 3273 3274
static int em_das(struct x86_emulate_ctxt *ctxt)
{
	u8 al, old_al;
	bool af, cf, old_cf;

	cf = ctxt->eflags & X86_EFLAGS_CF;
3275
	al = ctxt->dst.val;
3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292

	old_al = al;
	old_cf = cf;
	cf = false;
	af = ctxt->eflags & X86_EFLAGS_AF;
	if ((al & 0x0f) > 9 || af) {
		al -= 6;
		cf = old_cf | (al >= 250);
		af = true;
	} else {
		af = false;
	}
	if (old_al > 0x99 || old_cf) {
		al -= 0x60;
		cf = true;
	}

3293
	ctxt->dst.val = al;
3294
	/* Set PF, ZF, SF */
3295 3296 3297
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
3298
	fastop(ctxt, em_or);
3299 3300 3301 3302 3303 3304 3305 3306
	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
	if (cf)
		ctxt->eflags |= X86_EFLAGS_CF;
	if (af)
		ctxt->eflags |= X86_EFLAGS_AF;
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328
static int em_aam(struct x86_emulate_ctxt *ctxt)
{
	u8 al, ah;

	if (ctxt->src.val == 0)
		return emulate_de(ctxt);

	al = ctxt->dst.val & 0xff;
	ah = al / ctxt->src.val;
	al %= ctxt->src.val;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);

	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);

	return X86EMUL_CONTINUE;
}

3329 3330 3331 3332 3333 3334 3335 3336 3337
static int em_aad(struct x86_emulate_ctxt *ctxt)
{
	u8 al = ctxt->dst.val & 0xff;
	u8 ah = (ctxt->dst.val >> 8) & 0xff;

	al = (al + (ah * ctxt->src.val)) & 0xff;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;

3338 3339 3340 3341 3342
	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);
3343 3344 3345 3346

	return X86EMUL_CONTINUE;
}

3347 3348
static int em_call(struct x86_emulate_ctxt *ctxt)
{
3349
	int rc;
3350 3351 3352
	long rel = ctxt->src.val;

	ctxt->src.val = (unsigned long)ctxt->_eip;
3353 3354 3355
	rc = jmp_rel(ctxt, rel);
	if (rc != X86EMUL_CONTINUE)
		return rc;
3356 3357 3358
	return em_push(ctxt);
}

3359 3360 3361 3362 3363
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
	u16 sel, old_cs;
	ulong old_eip;
	int rc;
3364 3365 3366
	struct desc_struct old_desc, new_desc;
	const struct x86_emulate_ops *ops = ctxt->ops;
	int cpl = ctxt->ops->cpl(ctxt);
3367
	enum x86emul_mode prev_mode = ctxt->mode;
3368

3369
	old_eip = ctxt->_eip;
3370
	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3371

3372
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3373 3374
	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_CALL_JMP, &new_desc);
3375
	if (rc != X86EMUL_CONTINUE)
3376
		return rc;
3377

3378
	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3379 3380
	if (rc != X86EMUL_CONTINUE)
		goto fail;
3381

3382
	ctxt->src.val = old_cs;
3383
	rc = em_push(ctxt);
3384
	if (rc != X86EMUL_CONTINUE)
3385
		goto fail;
3386

3387
	ctxt->src.val = old_eip;
3388 3389 3390
	rc = em_push(ctxt);
	/* If we failed, we tainted the memory, but the very least we should
	   restore cs */
3391 3392
	if (rc != X86EMUL_CONTINUE) {
		pr_warn_once("faulting far call emulation tainted memory\n");
3393
		goto fail;
3394
	}
3395 3396 3397
	return rc;
fail:
	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3398
	ctxt->mode = prev_mode;
3399 3400
	return rc;

3401 3402
}

3403 3404 3405
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
	int rc;
3406
	unsigned long eip;
3407

3408 3409 3410 3411
	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	rc = assign_eip_near(ctxt, eip);
3412 3413
	if (rc != X86EMUL_CONTINUE)
		return rc;
3414
	rsp_increment(ctxt, ctxt->src.val);
3415 3416 3417
	return X86EMUL_CONTINUE;
}

3418 3419 3420
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
	/* Write back the register source. */
3421 3422
	ctxt->src.val = ctxt->dst.val;
	write_register_operand(&ctxt->src);
3423 3424

	/* Write back the memory destination with implicit LOCK prefix. */
3425 3426
	ctxt->dst.val = ctxt->src.orig_val;
	ctxt->lock_prefix = 1;
3427 3428 3429
	return X86EMUL_CONTINUE;
}

3430 3431
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
3432
	ctxt->dst.val = ctxt->src2.val;
3433
	return fastop(ctxt, em_imul);
3434 3435
}

3436 3437
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
3438 3439
	ctxt->dst.type = OP_REG;
	ctxt->dst.bytes = ctxt->src.bytes;
3440
	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3441
	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3442 3443 3444 3445

	return X86EMUL_CONTINUE;
}

3446 3447 3448 3449
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
	u64 tsc = 0;

3450
	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3451 3452
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3453 3454 3455
	return X86EMUL_CONTINUE;
}

3456 3457 3458 3459
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
{
	u64 pmc;

3460
	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3461
		return emulate_gp(ctxt, 0);
3462 3463
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3464 3465 3466
	return X86EMUL_CONTINUE;
}

3467 3468
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
3469
	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3470 3471 3472
	return X86EMUL_CONTINUE;
}

B
Borislav Petkov 已提交
3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507
#define FFL(x) bit(X86_FEATURE_##x)

static int em_movbe(struct x86_emulate_ctxt *ctxt)
{
	u32 ebx, ecx, edx, eax = 1;
	u16 tmp;

	/*
	 * Check MOVBE is set in the guest-visible CPUID leaf.
	 */
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	if (!(ecx & FFL(MOVBE)))
		return emulate_ud(ctxt);

	switch (ctxt->op_bytes) {
	case 2:
		/*
		 * From MOVBE definition: "...When the operand size is 16 bits,
		 * the upper word of the destination register remains unchanged
		 * ..."
		 *
		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
		 * rules so we have to do the operation almost per hand.
		 */
		tmp = (u16)ctxt->src.val;
		ctxt->dst.val &= ~0xffffUL;
		ctxt->dst.val |= (unsigned long)swab16(tmp);
		break;
	case 4:
		ctxt->dst.val = swab32((u32)ctxt->src.val);
		break;
	case 8:
		ctxt->dst.val = swab64(ctxt->src.val);
		break;
	default:
3508
		BUG();
B
Borislav Petkov 已提交
3509 3510 3511 3512
	}
	return X86EMUL_CONTINUE;
}

3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

static int em_dr_write(struct x86_emulate_ctxt *ctxt)
{
	unsigned long val;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		val = ctxt->src.val & ~0ULL;
	else
		val = ctxt->src.val & ~0U;

	/* #UD condition is already handled. */
	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3541 3542 3543 3544
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

3545 3546 3547
	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3548 3549 3550 3551 3552 3553 3554 3555 3556
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

3557
	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3558 3559
		return emulate_gp(ctxt, 0);

3560 3561
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3562 3563 3564
	return X86EMUL_CONTINUE;
}

3565 3566
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
3567
	if (ctxt->modrm_reg > VCPU_SREG_GS)
3568 3569
		return emulate_ud(ctxt);

3570
	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3571 3572
	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
		ctxt->dst.bytes = 2;
3573 3574 3575 3576 3577
	return X86EMUL_CONTINUE;
}

static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
3578
	u16 sel = ctxt->src.val;
3579

3580
	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3581 3582
		return emulate_ud(ctxt);

3583
	if (ctxt->modrm_reg == VCPU_SREG_SS)
3584 3585 3586
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;

	/* Disable writeback. */
3587 3588
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3589 3590
}

A
Avi Kivity 已提交
3591 3592 3593 3594 3595 3596 3597 3598 3599
static int em_lldt(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
}

A
Avi Kivity 已提交
3600 3601 3602 3603 3604 3605 3606 3607 3608
static int em_ltr(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
}

3609 3610
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
3611 3612 3613
	int rc;
	ulong linear;

3614
	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3615
	if (rc == X86EMUL_CONTINUE)
3616
		ctxt->ops->invlpg(ctxt, linear);
3617
	/* Disable writeback. */
3618
	ctxt->dst.type = OP_NONE;
3619 3620 3621
	return X86EMUL_CONTINUE;
}

3622 3623 3624 3625 3626 3627 3628 3629 3630 3631
static int em_clts(struct x86_emulate_ctxt *ctxt)
{
	ulong cr0;

	cr0 = ctxt->ops->get_cr(ctxt, 0);
	cr0 &= ~X86_CR0_TS;
	ctxt->ops->set_cr(ctxt, 0, cr0);
	return X86EMUL_CONTINUE;
}

3632
static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3633
{
3634
	int rc = ctxt->ops->fix_hypercall(ctxt);
3635 3636 3637 3638 3639

	if (rc != X86EMUL_CONTINUE)
		return rc;

	/* Let the processor re-execute the fixed hypercall */
3640
	ctxt->_eip = ctxt->eip;
3641
	/* Disable writeback. */
3642
	ctxt->dst.type = OP_NONE;
3643 3644 3645
	return X86EMUL_CONTINUE;
}

3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
				  void (*get)(struct x86_emulate_ctxt *ctxt,
					      struct desc_ptr *ptr))
{
	struct desc_ptr desc_ptr;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
	get(ctxt, &desc_ptr);
	if (ctxt->op_bytes == 2) {
		ctxt->op_bytes = 4;
		desc_ptr.address &= 0x00ffffff;
	}
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return segmented_write(ctxt, ctxt->dst.addr.mem,
			       &desc_ptr, 2 + ctxt->op_bytes);
}

static int em_sgdt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
}

static int em_sidt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}

3675
static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3676 3677 3678 3679
{
	struct desc_ptr desc_ptr;
	int rc;

3680 3681
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
3682
	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3683
			     &desc_ptr.size, &desc_ptr.address,
3684
			     ctxt->op_bytes);
3685 3686
	if (rc != X86EMUL_CONTINUE)
		return rc;
3687 3688 3689
	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
	    is_noncanonical_address(desc_ptr.address))
		return emulate_gp(ctxt, 0);
3690 3691 3692 3693
	if (lgdt)
		ctxt->ops->set_gdt(ctxt, &desc_ptr);
	else
		ctxt->ops->set_idt(ctxt, &desc_ptr);
3694
	/* Disable writeback. */
3695
	ctxt->dst.type = OP_NONE;
3696 3697 3698
	return X86EMUL_CONTINUE;
}

3699 3700 3701 3702 3703
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
	return em_lgdt_lidt(ctxt, true);
}

3704 3705
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
3706
	return em_lgdt_lidt(ctxt, false);
3707 3708 3709 3710
}

static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
3711 3712
	if (ctxt->dst.type == OP_MEM)
		ctxt->dst.bytes = 2;
3713
	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3714 3715 3716 3717 3718 3719
	return X86EMUL_CONTINUE;
}

static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3720 3721
			  | (ctxt->src.val & 0x0f));
	ctxt->dst.type = OP_NONE;
3722 3723 3724
	return X86EMUL_CONTINUE;
}

3725 3726
static int em_loop(struct x86_emulate_ctxt *ctxt)
{
3727 3728
	int rc = X86EMUL_CONTINUE;

3729
	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3730
	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3731
	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3732
		rc = jmp_rel(ctxt, ctxt->src.val);
3733

3734
	return rc;
3735 3736 3737 3738
}

static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{
3739 3740
	int rc = X86EMUL_CONTINUE;

3741
	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3742
		rc = jmp_rel(ctxt, ctxt->src.val);
3743

3744
	return rc;
3745 3746
}

3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764
static int em_in(struct x86_emulate_ctxt *ctxt)
{
	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
			     &ctxt->dst.val))
		return X86EMUL_IO_NEEDED;

	return X86EMUL_CONTINUE;
}

static int em_out(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
				    &ctxt->src.val, 1);
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->eflags &= ~X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

static int em_sti(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
	ctxt->eflags |= X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3784 3785 3786 3787
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

3788 3789
	eax = reg_read(ctxt, VCPU_REGS_RAX);
	ecx = reg_read(ctxt, VCPU_REGS_RCX);
A
Avi Kivity 已提交
3790
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3791 3792 3793 3794
	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
A
Avi Kivity 已提交
3795 3796 3797
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3798 3799 3800 3801
static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
	u32 flags;

3802 3803
	flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
		X86_EFLAGS_SF;
P
Paolo Bonzini 已提交
3804 3805 3806 3807 3808 3809 3810
	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;

	ctxt->eflags &= ~0xffUL;
	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3811 3812
static int em_lahf(struct x86_emulate_ctxt *ctxt)
{
3813 3814
	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
A
Avi Kivity 已提交
3815 3816 3817
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832
static int em_bswap(struct x86_emulate_ctxt *ctxt)
{
	switch (ctxt->op_bytes) {
#ifdef CONFIG_X86_64
	case 8:
		asm("bswap %0" : "+r"(ctxt->dst.val));
		break;
#endif
	default:
		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
		break;
	}
	return X86EMUL_CONTINUE;
}

3833 3834 3835 3836 3837 3838
static int em_clflush(struct x86_emulate_ctxt *ctxt)
{
	/* emulating clflush regardless of cpuid */
	return X86EMUL_CONTINUE;
}

3839 3840 3841 3842 3843 3844
static int em_movsxd(struct x86_emulate_ctxt *ctxt)
{
	ctxt->dst.val = (s32) ctxt->src.val;
	return X86EMUL_CONTINUE;
}

3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858
static bool valid_cr(int nr)
{
	switch (nr) {
	case 0:
	case 2 ... 4:
	case 8:
		return true;
	default:
		return false;
	}
}

static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
3859
	if (!valid_cr(ctxt->modrm_reg))
3860 3861 3862 3863 3864 3865 3866
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
3867 3868
	u64 new_val = ctxt->src.val64;
	int cr = ctxt->modrm_reg;
3869
	u64 efer = 0;
3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886

	static u64 cr_reserved_bits[] = {
		0xffffffff00000000ULL,
		0, 0, 0, /* CR3 checked later */
		CR4_RESERVED_BITS,
		0, 0, 0,
		CR8_RESERVED_BITS,
	};

	if (!valid_cr(cr))
		return emulate_ud(ctxt);

	if (new_val & cr_reserved_bits[cr])
		return emulate_gp(ctxt, 0);

	switch (cr) {
	case 0: {
3887
		u64 cr4;
3888 3889 3890 3891
		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
			return emulate_gp(ctxt, 0);

3892 3893
		cr4 = ctxt->ops->get_cr(ctxt, 4);
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3894 3895 3896 3897 3898 3899 3900 3901 3902 3903

		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
		    !(cr4 & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	case 3: {
		u64 rsvd = 0;

3904 3905
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
		if (efer & EFER_LMA)
N
Nadav Amit 已提交
3906
			rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3907 3908 3909 3910 3911 3912 3913

		if (new_val & rsvd)
			return emulate_gp(ctxt, 0);

		break;
		}
	case 4: {
3914
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925

		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	}

	return X86EMUL_CONTINUE;
}

3926 3927 3928 3929
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
	unsigned long dr7;

3930
	ctxt->ops->get_dr(ctxt, 7, &dr7);
3931 3932 3933 3934 3935 3936 3937

	/* Check if DR7.Global_Enable is set */
	return dr7 & (1 << 13);
}

static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
3938
	int dr = ctxt->modrm_reg;
3939 3940 3941 3942 3943
	u64 cr4;

	if (dr > 7)
		return emulate_ud(ctxt);

3944
	cr4 = ctxt->ops->get_cr(ctxt, 4);
3945 3946 3947
	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
		return emulate_ud(ctxt);

3948 3949 3950 3951 3952 3953 3954
	if (check_dr7_gd(ctxt)) {
		ulong dr6;

		ctxt->ops->get_dr(ctxt, 6, &dr6);
		dr6 &= ~15;
		dr6 |= DR6_BD | DR6_RTM;
		ctxt->ops->set_dr(ctxt, 6, dr6);
3955
		return emulate_db(ctxt);
3956
	}
3957 3958 3959 3960 3961 3962

	return X86EMUL_CONTINUE;
}

static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
3963 3964
	u64 new_val = ctxt->src.val64;
	int dr = ctxt->modrm_reg;
3965 3966 3967 3968 3969 3970 3971

	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
		return emulate_gp(ctxt, 0);

	return check_dr_read(ctxt);
}

3972 3973 3974 3975
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
	u64 efer;

3976
	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3977 3978 3979 3980 3981 3982 3983 3984 3985

	if (!(efer & EFER_SVME))
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
3986
	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3987 3988

	/* Valid physical address? */
3989
	if (rax & 0xffff000000000000ULL)
3990 3991 3992 3993 3994
		return emulate_gp(ctxt, 0);

	return check_svme(ctxt);
}

3995 3996
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{
3997
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3998

3999
	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4000 4001 4002 4003 4004
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

4005 4006
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
4007
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4008
	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4009

4010
	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4011
	    ctxt->ops->check_pmc(ctxt, rcx))
4012 4013 4014 4015 4016
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

4017 4018
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
4019 4020
	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4021 4022 4023 4024 4025 4026 4027
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
4028 4029
	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4030 4031 4032 4033 4034
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

4035
#define D(_y) { .flags = (_y) }
4036 4037 4038
#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
4039
#define N    D(NotImpl)
4040
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4041 4042
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4043
#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4044
#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4045
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4046
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4047
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4048
#define II(_f, _e, _i) \
4049
	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4050
#define IIP(_f, _e, _i, _p) \
4051 4052
	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
4053
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4054

4055
#define D2bv(_f)      D((_f) | ByteOp), D(_f)
4056
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4057
#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
4058
#define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
4059 4060
#define I2bvIP(_f, _e, _i, _p) \
	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4061

4062 4063 4064
#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4065

4066 4067
static const struct opcode group7_rm0[] = {
	N,
4068
	I(SrcNone | Priv | EmulateOnUD,	em_hypercall),
4069 4070 4071
	N, N, N, N, N, N,
};

4072
static const struct opcode group7_rm1[] = {
4073 4074
	DI(SrcNone | Priv, monitor),
	DI(SrcNone | Priv, mwait),
4075 4076 4077
	N, N, N, N, N, N,
};

4078
static const struct opcode group7_rm3[] = {
4079
	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
4080
	II(SrcNone  | Prot | EmulateOnUD,	em_hypercall,	vmmcall),
4081 4082 4083 4084 4085 4086
	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
4087
};
4088

4089
static const struct opcode group7_rm7[] = {
4090
	N,
4091
	DIP(SrcNone, rdtscp, check_rdtsc),
4092 4093
	N, N, N, N, N, N,
};
4094

4095
static const struct opcode group1[] = {
4096 4097 4098 4099 4100 4101 4102 4103
	F(Lock, em_add),
	F(Lock | PageTable, em_or),
	F(Lock, em_adc),
	F(Lock, em_sbb),
	F(Lock | PageTable, em_and),
	F(Lock, em_sub),
	F(Lock, em_xor),
	F(NoWrite, em_cmp),
4104 4105
};

4106
static const struct opcode group1A[] = {
4107
	I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
4108 4109
};

4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120
static const struct opcode group2[] = {
	F(DstMem | ModRM, em_rol),
	F(DstMem | ModRM, em_ror),
	F(DstMem | ModRM, em_rcl),
	F(DstMem | ModRM, em_rcr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_shr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_sar),
};

4121
static const struct opcode group3[] = {
4122 4123
	F(DstMem | SrcImm | NoWrite, em_test),
	F(DstMem | SrcImm | NoWrite, em_test),
4124 4125
	F(DstMem | SrcNone | Lock, em_not),
	F(DstMem | SrcNone | Lock, em_neg),
4126 4127
	F(DstXacc | Src2Mem, em_mul_ex),
	F(DstXacc | Src2Mem, em_imul_ex),
4128 4129
	F(DstXacc | Src2Mem, em_div_ex),
	F(DstXacc | Src2Mem, em_idiv_ex),
4130 4131
};

4132
static const struct opcode group4[] = {
4133 4134
	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4135 4136 4137
	N, N, N, N, N, N,
};

4138
static const struct opcode group5[] = {
4139 4140
	F(DstMem | SrcNone | Lock,		em_inc),
	F(DstMem | SrcNone | Lock,		em_dec),
4141
	I(SrcMem | NearBranch,			em_call_near_abs),
4142
	I(SrcMemFAddr | ImplicitOps,		em_call_far),
4143
	I(SrcMem | NearBranch,			em_jmp_abs),
4144 4145
	I(SrcMemFAddr | ImplicitOps,		em_jmp_far),
	I(SrcMem | Stack,			em_push), D(Undefined),
4146 4147
};

4148
static const struct opcode group6[] = {
4149 4150
	DI(Prot | DstMem,	sldt),
	DI(Prot | DstMem,	str),
A
Avi Kivity 已提交
4151
	II(Prot | Priv | SrcMem16, em_lldt, lldt),
A
Avi Kivity 已提交
4152
	II(Prot | Priv | SrcMem16, em_ltr, ltr),
4153 4154 4155
	N, N, N, N,
};

4156
static const struct group_dual group7 = { {
4157 4158
	II(Mov | DstMem,			em_sgdt, sgdt),
	II(Mov | DstMem,			em_sidt, sidt),
4159 4160 4161 4162 4163
	II(SrcMem | Priv,			em_lgdt, lgdt),
	II(SrcMem | Priv,			em_lidt, lidt),
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
4164
}, {
4165
	EXT(0, group7_rm0),
4166
	EXT(0, group7_rm1),
4167
	N, EXT(0, group7_rm3),
4168 4169 4170
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	EXT(0, group7_rm7),
4171 4172
} };

4173
static const struct opcode group8[] = {
4174
	N, N, N, N,
4175 4176 4177 4178
	F(DstMem | SrcImmByte | NoWrite,		em_bt),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
	F(DstMem | SrcImmByte | Lock,			em_btr),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
4179 4180
};

4181
static const struct group_dual group9 = { {
4182
	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4183 4184 4185 4186
}, {
	N, N, N, N, N, N, N, N,
} };

4187
static const struct opcode group11[] = {
4188
	I(DstMem | SrcImm | Mov | PageTable, em_mov),
4189
	X7(D(Undefined)),
4190 4191
};

4192
static const struct gprefix pfx_0f_ae_7 = {
4193
	I(SrcMem | ByteOp, em_clflush), N, N, N,
4194 4195 4196 4197 4198 4199 4200 4201
};

static const struct group_dual group15 = { {
	N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
}, {
	N, N, N, N, N, N, N, N,
} };

4202
static const struct gprefix pfx_0f_6f_0f_7f = {
4203
	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4204 4205
};

4206 4207 4208 4209
static const struct instr_dual instr_dual_0f_2b = {
	I(0, em_mov), N
};

4210
static const struct gprefix pfx_0f_2b = {
4211
	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4212 4213
};

4214
static const struct gprefix pfx_0f_28_0f_29 = {
4215
	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4216 4217
};

4218 4219 4220 4221
static const struct gprefix pfx_0f_e7 = {
	N, I(Sse, em_mov), N, N,
};

4222
static const struct escape escape_d9 = { {
4223
	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_db = { {
	N, N, N, N, N, N, N, N,
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_dd = { {
4265
	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

4285 4286 4287 4288
static const struct instr_dual instr_dual_0f_c3 = {
	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
};

4289 4290 4291 4292
static const struct mode_dual mode_dual_63 = {
	N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
};

4293
static const struct opcode opcode_table[256] = {
4294
	/* 0x00 - 0x07 */
4295
	F6ALU(Lock, em_add),
4296 4297
	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4298
	/* 0x08 - 0x0F */
4299
	F6ALU(Lock | PageTable, em_or),
4300 4301
	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
	N,
4302
	/* 0x10 - 0x17 */
4303
	F6ALU(Lock, em_adc),
4304 4305
	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4306
	/* 0x18 - 0x1F */
4307
	F6ALU(Lock, em_sbb),
4308 4309
	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4310
	/* 0x20 - 0x27 */
4311
	F6ALU(Lock | PageTable, em_and), N, N,
4312
	/* 0x28 - 0x2F */
4313
	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4314
	/* 0x30 - 0x37 */
4315
	F6ALU(Lock, em_xor), N, N,
4316
	/* 0x38 - 0x3F */
4317
	F6ALU(NoWrite, em_cmp), N, N,
4318
	/* 0x40 - 0x4F */
4319
	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4320
	/* 0x50 - 0x57 */
4321
	X8(I(SrcReg | Stack, em_push)),
4322
	/* 0x58 - 0x5F */
4323
	X8(I(DstReg | Stack, em_pop)),
4324
	/* 0x60 - 0x67 */
4325 4326
	I(ImplicitOps | Stack | No64, em_pusha),
	I(ImplicitOps | Stack | No64, em_popa),
4327
	N, MD(ModRM, &mode_dual_63),
4328 4329
	N, N, N, N,
	/* 0x68 - 0x6F */
4330 4331
	I(SrcImm | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4332 4333
	I(SrcImmByte | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4334
	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4335
	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4336
	/* 0x70 - 0x7F */
4337
	X16(D(SrcImmByte | NearBranch)),
4338
	/* 0x80 - 0x87 */
4339 4340 4341 4342
	G(ByteOp | DstMem | SrcImm, group1),
	G(DstMem | SrcImm, group1),
	G(ByteOp | DstMem | SrcImm | No64, group1),
	G(DstMem | SrcImmByte, group1),
4343
	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4344
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4345
	/* 0x88 - 0x8F */
4346
	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4347
	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4348
	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4349 4350 4351
	D(ModRM | SrcMem | NoAccess | DstReg),
	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
	G(0, group1A),
4352
	/* 0x90 - 0x97 */
4353
	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4354
	/* 0x98 - 0x9F */
4355
	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4356
	I(SrcImmFAddr | No64, em_call_far), N,
4357
	II(ImplicitOps | Stack, em_pushf, pushf),
P
Paolo Bonzini 已提交
4358 4359
	II(ImplicitOps | Stack, em_popf, popf),
	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4360
	/* 0xA0 - 0xA7 */
4361
	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4362
	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4363
	I2bv(SrcSI | DstDI | Mov | String, em_mov),
4364
	F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4365
	/* 0xA8 - 0xAF */
4366
	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4367 4368
	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4369
	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4370
	/* 0xB0 - 0xB7 */
4371
	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4372
	/* 0xB8 - 0xBF */
4373
	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4374
	/* 0xC0 - 0xC7 */
4375
	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4376 4377
	I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
	I(ImplicitOps | NearBranch, em_ret),
4378 4379
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4380
	G(ByteOp, group11), G(0, group11),
4381
	/* 0xC8 - 0xCF */
A
Avi Kivity 已提交
4382
	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4383 4384
	I(ImplicitOps | SrcImmU16, em_ret_far_imm),
	I(ImplicitOps, em_ret_far),
4385
	D(ImplicitOps), DI(SrcImmByte, intn),
4386
	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4387
	/* 0xD0 - 0xD7 */
4388 4389
	G(Src2One | ByteOp, group2), G(Src2One, group2),
	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
P
Paolo Bonzini 已提交
4390
	I(DstAcc | SrcImmUByte | No64, em_aam),
P
Paolo Bonzini 已提交
4391 4392
	I(DstAcc | SrcImmUByte | No64, em_aad),
	F(DstAcc | ByteOp | No64, em_salc),
P
Paolo Bonzini 已提交
4393
	I(DstAcc | SrcXLat | ByteOp, em_mov),
4394
	/* 0xD8 - 0xDF */
4395
	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4396
	/* 0xE0 - 0xE7 */
4397 4398
	X3(I(SrcImmByte | NearBranch, em_loop)),
	I(SrcImmByte | NearBranch, em_jcxz),
4399 4400
	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4401
	/* 0xE8 - 0xEF */
4402 4403 4404
	I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
	I(SrcImmFAddr | No64, em_jmp_far),
	D(SrcImmByte | ImplicitOps | NearBranch),
4405 4406
	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4407
	/* 0xF0 - 0xF7 */
4408
	N, DI(ImplicitOps, icebp), N, N,
4409 4410
	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
	G(ByteOp, group3), G(0, group3),
4411
	/* 0xF8 - 0xFF */
4412 4413
	D(ImplicitOps), D(ImplicitOps),
	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4414 4415 4416
	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};

4417
static const struct opcode twobyte_table[256] = {
4418
	/* 0x00 - 0x0F */
4419
	G(0, group6), GD(0, &group7), N, N,
4420
	N, I(ImplicitOps | EmulateOnUD, em_syscall),
4421
	II(ImplicitOps | Priv, em_clts, clts), N,
4422
	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4423
	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4424
	/* 0x10 - 0x1F */
P
Paolo Bonzini 已提交
4425
	N, N, N, N, N, N, N, N,
4426 4427
	D(ImplicitOps | ModRM | SrcMem | NoAccess),
	N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4428
	/* 0x20 - 0x2F */
4429 4430 4431 4432 4433 4434
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
						check_cr_write),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
						check_dr_write),
4435
	N, N, N, N,
4436 4437
	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4438
	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4439
	N, N, N, N,
4440
	/* 0x30 - 0x3F */
4441
	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4442
	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4443
	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4444
	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4445 4446
	I(ImplicitOps | EmulateOnUD, em_sysenter),
	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4447
	N, N,
4448 4449
	N, N, N, N, N, N, N, N,
	/* 0x40 - 0x4F */
4450
	X16(D(DstReg | SrcMem | ModRM)),
4451 4452 4453
	/* 0x50 - 0x5F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x60 - 0x6F */
4454 4455 4456 4457
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4458
	/* 0x70 - 0x7F */
4459 4460 4461 4462
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4463
	/* 0x80 - 0x8F */
4464
	X16(D(SrcImm | NearBranch)),
4465
	/* 0x90 - 0x9F */
4466
	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4467
	/* 0xA0 - 0xA7 */
4468
	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4469 4470
	II(ImplicitOps, em_cpuid, cpuid),
	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4471 4472
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4473
	/* 0xA8 - 0xAF */
4474
	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4475
	II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4476
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4477 4478
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4479
	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4480
	/* 0xB0 - 0xB7 */
4481
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4482
	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4483
	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4484 4485
	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4486
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4487 4488
	/* 0xB8 - 0xBF */
	N, N,
4489
	G(BitOp, group8),
4490
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4491 4492
	I(DstReg | SrcMem | ModRM, em_bsf_c),
	I(DstReg | SrcMem | ModRM, em_bsr_c),
4493
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
A
Avi Kivity 已提交
4494
	/* 0xC0 - 0xC7 */
4495
	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4496
	N, ID(0, &instr_dual_0f_c3),
4497
	N, N, N, GD(0, &group9),
A
Avi Kivity 已提交
4498 4499
	/* 0xC8 - 0xCF */
	X8(I(DstReg, em_bswap)),
4500 4501 4502
	/* 0xD0 - 0xDF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xEF */
4503 4504
	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
	N, N, N, N, N, N, N, N,
4505 4506 4507 4508
	/* 0xF0 - 0xFF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};

4509 4510 4511 4512 4513 4514 4515 4516
static const struct instr_dual instr_dual_0f_38_f0 = {
	I(DstReg | SrcMem | Mov, em_movbe), N
};

static const struct instr_dual instr_dual_0f_38_f1 = {
	I(DstMem | SrcReg | Mov, em_movbe), N
};

4517
static const struct gprefix three_byte_0f_38_f0 = {
4518
	ID(0, &instr_dual_0f_38_f0), N, N, N
4519 4520 4521
};

static const struct gprefix three_byte_0f_38_f1 = {
4522
	ID(0, &instr_dual_0f_38_f1), N, N, N
4523 4524 4525 4526 4527 4528 4529 4530 4531
};

/*
 * Insns below are selected by the prefix which indexed by the third opcode
 * byte.
 */
static const struct opcode opcode_map_0f_38[256] = {
	/* 0x00 - 0x7f */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
B
Borislav Petkov 已提交
4532 4533 4534
	/* 0x80 - 0xef */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
	/* 0xf0 - 0xf1 */
4535 4536
	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
B
Borislav Petkov 已提交
4537 4538
	/* 0xf2 - 0xff */
	N, N, X4(N), X8(N)
4539 4540
};

4541 4542 4543 4544 4545
#undef D
#undef N
#undef G
#undef GD
#undef I
4546
#undef GP
4547
#undef EXT
4548
#undef MD
N
Nadav Amit 已提交
4549
#undef ID
4550

4551
#undef D2bv
4552
#undef D2bvIP
4553
#undef I2bv
4554
#undef I2bvIP
4555
#undef I6ALU
4556

4557
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4558 4559 4560
{
	unsigned size;

4561
	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573
	if (size == 8)
		size = 4;
	return size;
}

static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
		      unsigned size, bool sign_extension)
{
	int rc = X86EMUL_CONTINUE;

	op->type = OP_IMM;
	op->bytes = size;
4574
	op->addr.mem.ea = ctxt->_eip;
4575 4576 4577
	/* NB. Immediates are sign-extended as necessary. */
	switch (op->bytes) {
	case 1:
4578
		op->val = insn_fetch(s8, ctxt);
4579 4580
		break;
	case 2:
4581
		op->val = insn_fetch(s16, ctxt);
4582 4583
		break;
	case 4:
4584
		op->val = insn_fetch(s32, ctxt);
4585
		break;
4586 4587 4588
	case 8:
		op->val = insn_fetch(s64, ctxt);
		break;
4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606
	}
	if (!sign_extension) {
		switch (op->bytes) {
		case 1:
			op->val &= 0xff;
			break;
		case 2:
			op->val &= 0xffff;
			break;
		case 4:
			op->val &= 0xffffffff;
			break;
		}
	}
done:
	return rc;
}

4607 4608 4609 4610 4611 4612 4613
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
			  unsigned d)
{
	int rc = X86EMUL_CONTINUE;

	switch (d) {
	case OpReg:
4614
		decode_register_operand(ctxt, op);
4615 4616
		break;
	case OpImmUByte:
4617
		rc = decode_imm(ctxt, op, 1, false);
4618 4619
		break;
	case OpMem:
4620
		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4621 4622 4623
	mem_common:
		*op = ctxt->memop;
		ctxt->memopp = op;
4624
		if (ctxt->d & BitOp)
4625 4626 4627
			fetch_bit_operand(ctxt);
		op->orig_val = op->val;
		break;
4628
	case OpMem64:
4629
		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4630
		goto mem_common;
4631 4632 4633
	case OpAcc:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4634
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4635 4636 4637
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655
	case OpAccLo:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
	case OpAccHi:
		if (ctxt->d & ByteOp) {
			op->type = OP_NONE;
			break;
		}
		op->type = OP_REG;
		op->bytes = ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
4656 4657 4658 4659
	case OpDI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4660
			register_address(ctxt, VCPU_REGS_RDI);
4661 4662
		op->addr.mem.seg = VCPU_SREG_ES;
		op->val = 0;
4663
		op->count = 1;
4664 4665 4666 4667
		break;
	case OpDX:
		op->type = OP_REG;
		op->bytes = 2;
4668
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4669 4670
		fetch_register_operand(op);
		break;
4671
	case OpCL:
4672
		op->type = OP_IMM;
4673
		op->bytes = 1;
4674
		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4675 4676 4677 4678 4679
		break;
	case OpImmByte:
		rc = decode_imm(ctxt, op, 1, true);
		break;
	case OpOne:
4680
		op->type = OP_IMM;
4681 4682 4683 4684 4685 4686
		op->bytes = 1;
		op->val = 1;
		break;
	case OpImm:
		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
		break;
4687 4688 4689
	case OpImm64:
		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
		break;
4690 4691
	case OpMem8:
		ctxt->memop.bytes = 1;
4692
		if (ctxt->memop.type == OP_REG) {
4693 4694
			ctxt->memop.addr.reg = decode_register(ctxt,
					ctxt->modrm_rm, true);
4695 4696
			fetch_register_operand(&ctxt->memop);
		}
4697
		goto mem_common;
4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713
	case OpMem16:
		ctxt->memop.bytes = 2;
		goto mem_common;
	case OpMem32:
		ctxt->memop.bytes = 4;
		goto mem_common;
	case OpImmU16:
		rc = decode_imm(ctxt, op, 2, false);
		break;
	case OpImmU:
		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
		break;
	case OpSI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4714
			register_address(ctxt, VCPU_REGS_RSI);
B
Bandan Das 已提交
4715
		op->addr.mem.seg = ctxt->seg_override;
4716
		op->val = 0;
4717
		op->count = 1;
4718
		break;
P
Paolo Bonzini 已提交
4719 4720 4721 4722
	case OpXLat:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4723
			address_mask(ctxt,
P
Paolo Bonzini 已提交
4724 4725
				reg_read(ctxt, VCPU_REGS_RBX) +
				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
B
Bandan Das 已提交
4726
		op->addr.mem.seg = ctxt->seg_override;
P
Paolo Bonzini 已提交
4727 4728
		op->val = 0;
		break;
4729 4730 4731 4732 4733 4734 4735 4736 4737
	case OpImmFAddr:
		op->type = OP_IMM;
		op->addr.mem.ea = ctxt->_eip;
		op->bytes = ctxt->op_bytes + 2;
		insn_fetch_arr(op->valptr, op->bytes, ctxt);
		break;
	case OpMemFAddr:
		ctxt->memop.bytes = ctxt->op_bytes + 2;
		goto mem_common;
4738
	case OpES:
4739
		op->type = OP_IMM;
4740 4741 4742
		op->val = VCPU_SREG_ES;
		break;
	case OpCS:
4743
		op->type = OP_IMM;
4744 4745 4746
		op->val = VCPU_SREG_CS;
		break;
	case OpSS:
4747
		op->type = OP_IMM;
4748 4749 4750
		op->val = VCPU_SREG_SS;
		break;
	case OpDS:
4751
		op->type = OP_IMM;
4752 4753 4754
		op->val = VCPU_SREG_DS;
		break;
	case OpFS:
4755
		op->type = OP_IMM;
4756 4757 4758
		op->val = VCPU_SREG_FS;
		break;
	case OpGS:
4759
		op->type = OP_IMM;
4760 4761
		op->val = VCPU_SREG_GS;
		break;
4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772
	case OpImplicit:
		/* Special instructions do their own operand decoding. */
	default:
		op->type = OP_NONE; /* Disable writeback. */
		break;
	}

done:
	return rc;
}

4773
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4774 4775 4776
{
	int rc = X86EMUL_CONTINUE;
	int mode = ctxt->mode;
4777
	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4778
	bool op_prefix = false;
B
Bandan Das 已提交
4779
	bool has_seg_override = false;
4780
	struct opcode opcode;
4781

4782 4783
	ctxt->memop.type = OP_NONE;
	ctxt->memopp = NULL;
4784
	ctxt->_eip = ctxt->eip;
4785 4786
	ctxt->fetch.ptr = ctxt->fetch.data;
	ctxt->fetch.end = ctxt->fetch.data + insn_len;
B
Borislav Petkov 已提交
4787
	ctxt->opcode_len = 1;
4788
	if (insn_len > 0)
4789
		memcpy(ctxt->fetch.data, insn, insn_len);
4790
	else {
4791
		rc = __do_insn_fetch_bytes(ctxt, 1);
4792 4793 4794
		if (rc != X86EMUL_CONTINUE)
			return rc;
	}
4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811

	switch (mode) {
	case X86EMUL_MODE_REAL:
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
		def_op_bytes = def_ad_bytes = 2;
		break;
	case X86EMUL_MODE_PROT32:
		def_op_bytes = def_ad_bytes = 4;
		break;
#ifdef CONFIG_X86_64
	case X86EMUL_MODE_PROT64:
		def_op_bytes = 4;
		def_ad_bytes = 8;
		break;
#endif
	default:
4812
		return EMULATION_FAILED;
4813 4814
	}

4815 4816
	ctxt->op_bytes = def_op_bytes;
	ctxt->ad_bytes = def_ad_bytes;
4817 4818 4819

	/* Legacy prefixes. */
	for (;;) {
4820
		switch (ctxt->b = insn_fetch(u8, ctxt)) {
4821
		case 0x66:	/* operand-size override */
4822
			op_prefix = true;
4823
			/* switch between 2/4 bytes */
4824
			ctxt->op_bytes = def_op_bytes ^ 6;
4825 4826 4827 4828
			break;
		case 0x67:	/* address-size override */
			if (mode == X86EMUL_MODE_PROT64)
				/* switch between 4/8 bytes */
4829
				ctxt->ad_bytes = def_ad_bytes ^ 12;
4830 4831
			else
				/* switch between 2/4 bytes */
4832
				ctxt->ad_bytes = def_ad_bytes ^ 6;
4833 4834 4835 4836 4837
			break;
		case 0x26:	/* ES override */
		case 0x2e:	/* CS override */
		case 0x36:	/* SS override */
		case 0x3e:	/* DS override */
B
Bandan Das 已提交
4838 4839
			has_seg_override = true;
			ctxt->seg_override = (ctxt->b >> 3) & 3;
4840 4841 4842
			break;
		case 0x64:	/* FS override */
		case 0x65:	/* GS override */
B
Bandan Das 已提交
4843 4844
			has_seg_override = true;
			ctxt->seg_override = ctxt->b & 7;
4845 4846 4847 4848
			break;
		case 0x40 ... 0x4f: /* REX */
			if (mode != X86EMUL_MODE_PROT64)
				goto done_prefixes;
4849
			ctxt->rex_prefix = ctxt->b;
4850 4851
			continue;
		case 0xf0:	/* LOCK */
4852
			ctxt->lock_prefix = 1;
4853 4854 4855
			break;
		case 0xf2:	/* REPNE/REPNZ */
		case 0xf3:	/* REP/REPE/REPZ */
4856
			ctxt->rep_prefix = ctxt->b;
4857 4858 4859 4860 4861 4862 4863
			break;
		default:
			goto done_prefixes;
		}

		/* Any legacy prefix after a REX prefix nullifies its effect. */

4864
		ctxt->rex_prefix = 0;
4865 4866 4867 4868 4869
	}

done_prefixes:

	/* REX prefix. */
4870 4871
	if (ctxt->rex_prefix & 8)
		ctxt->op_bytes = 8;	/* REX.W */
4872 4873

	/* Opcode byte(s). */
4874
	opcode = opcode_table[ctxt->b];
4875
	/* Two-byte opcode? */
4876
	if (ctxt->b == 0x0f) {
B
Borislav Petkov 已提交
4877
		ctxt->opcode_len = 2;
4878
		ctxt->b = insn_fetch(u8, ctxt);
4879
		opcode = twobyte_table[ctxt->b];
4880 4881 4882 4883 4884 4885 4886

		/* 0F_38 opcode map */
		if (ctxt->b == 0x38) {
			ctxt->opcode_len = 3;
			ctxt->b = insn_fetch(u8, ctxt);
			opcode = opcode_map_0f_38[ctxt->b];
		}
4887
	}
4888
	ctxt->d = opcode.flags;
4889

4890 4891 4892
	if (ctxt->d & ModRM)
		ctxt->modrm = insn_fetch(u8, ctxt);

4893 4894
	/* vex-prefix instructions are not implemented */
	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4895
	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4896 4897 4898
		ctxt->d = NotImpl;
	}

4899 4900
	while (ctxt->d & GroupMask) {
		switch (ctxt->d & GroupMask) {
4901
		case Group:
4902
			goffset = (ctxt->modrm >> 3) & 7;
4903 4904 4905
			opcode = opcode.u.group[goffset];
			break;
		case GroupDual:
4906 4907
			goffset = (ctxt->modrm >> 3) & 7;
			if ((ctxt->modrm >> 6) == 3)
4908 4909 4910 4911 4912
				opcode = opcode.u.gdual->mod3[goffset];
			else
				opcode = opcode.u.gdual->mod012[goffset];
			break;
		case RMExt:
4913
			goffset = ctxt->modrm & 7;
4914
			opcode = opcode.u.group[goffset];
4915 4916
			break;
		case Prefix:
4917
			if (ctxt->rep_prefix && op_prefix)
4918
				return EMULATION_FAILED;
4919
			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4920 4921 4922 4923 4924 4925 4926
			switch (simd_prefix) {
			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
			}
			break;
4927 4928 4929 4930 4931 4932
		case Escape:
			if (ctxt->modrm > 0xbf)
				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
			else
				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
			break;
4933 4934 4935 4936 4937 4938
		case InstrDual:
			if ((ctxt->modrm >> 6) == 3)
				opcode = opcode.u.idual->mod3;
			else
				opcode = opcode.u.idual->mod012;
			break;
4939 4940 4941 4942 4943 4944
		case ModeDual:
			if (ctxt->mode == X86EMUL_MODE_PROT64)
				opcode = opcode.u.mdual->mode64;
			else
				opcode = opcode.u.mdual->mode32;
			break;
4945
		default:
4946
			return EMULATION_FAILED;
4947
		}
4948

4949
		ctxt->d &= ~(u64)GroupMask;
4950
		ctxt->d |= opcode.flags;
4951 4952
	}

4953 4954 4955 4956
	/* Unrecognised? */
	if (ctxt->d == 0)
		return EMULATION_FAILED;

4957
	ctxt->execute = opcode.u.execute;
4958

4959 4960 4961
	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
		return EMULATION_FAILED;

4962
	if (unlikely(ctxt->d &
4963 4964
	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
	     No16))) {
4965 4966 4967 4968 4969 4970
		/*
		 * These are copied unconditionally here, and checked unconditionally
		 * in x86_emulate_insn.
		 */
		ctxt->check_perm = opcode.check_perm;
		ctxt->intercept = opcode.intercept;
4971

4972 4973
		if (ctxt->d & NotImpl)
			return EMULATION_FAILED;
4974

4975 4976 4977 4978 4979 4980
		if (mode == X86EMUL_MODE_PROT64) {
			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
				ctxt->op_bytes = 8;
			else if (ctxt->d & NearBranch)
				ctxt->op_bytes = 8;
		}
4981

4982 4983 4984 4985 4986 4987 4988
		if (ctxt->d & Op3264) {
			if (mode == X86EMUL_MODE_PROT64)
				ctxt->op_bytes = 8;
			else
				ctxt->op_bytes = 4;
		}

4989 4990 4991
		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
			ctxt->op_bytes = 4;

4992 4993 4994 4995 4996
		if (ctxt->d & Sse)
			ctxt->op_bytes = 16;
		else if (ctxt->d & Mmx)
			ctxt->op_bytes = 8;
	}
A
Avi Kivity 已提交
4997

4998
	/* ModRM and SIB bytes. */
4999
	if (ctxt->d & ModRM) {
5000
		rc = decode_modrm(ctxt, &ctxt->memop);
B
Bandan Das 已提交
5001 5002 5003 5004
		if (!has_seg_override) {
			has_seg_override = true;
			ctxt->seg_override = ctxt->modrm_seg;
		}
5005
	} else if (ctxt->d & MemAbs)
5006
		rc = decode_abs(ctxt, &ctxt->memop);
5007 5008 5009
	if (rc != X86EMUL_CONTINUE)
		goto done;

B
Bandan Das 已提交
5010 5011
	if (!has_seg_override)
		ctxt->seg_override = VCPU_SREG_DS;
5012

B
Bandan Das 已提交
5013
	ctxt->memop.addr.mem.seg = ctxt->seg_override;
5014 5015 5016 5017 5018

	/*
	 * Decode and fetch the source operand: register, memory
	 * or immediate.
	 */
5019
	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5020 5021 5022
	if (rc != X86EMUL_CONTINUE)
		goto done;

5023 5024 5025 5026
	/*
	 * Decode and fetch the second source operand: register, memory
	 * or immediate.
	 */
5027
	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5028 5029 5030
	if (rc != X86EMUL_CONTINUE)
		goto done;

5031
	/* Decode and fetch the destination operand: register or memory. */
5032
	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5033

5034
	if (ctxt->rip_relative && likely(ctxt->memopp))
5035 5036
		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
					ctxt->memopp->addr.mem.ea + ctxt->_eip);
5037

5038
done:
5039
	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5040 5041
}

5042 5043 5044 5045 5046
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
{
	return ctxt->d & PageTable;
}

5047 5048 5049 5050 5051 5052 5053 5054 5055
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
	/* The second termination condition only applies for REPE
	 * and REPNE. Test if the repeat string operation prefix is
	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
	 * corresponding termination condition according to:
	 * 	- if REPE/REPZ and ZF = 0 then done
	 * 	- if REPNE/REPNZ and ZF = 1 then done
	 */
5056 5057 5058
	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
5059
		 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5060
		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
5061
		    ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5062 5063 5064 5065 5066
		return true;

	return false;
}

A
Avi Kivity 已提交
5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
	bool fault = false;

	ctxt->ops->get_fpu(ctxt);
	asm volatile("1: fwait \n\t"
		     "2: \n\t"
		     ".pushsection .fixup,\"ax\" \n\t"
		     "3: \n\t"
		     "movb $1, %[fault] \n\t"
		     "jmp 2b \n\t"
		     ".popsection \n\t"
		     _ASM_EXTABLE(1b, 3b)
5080
		     : [fault]"+qm"(fault));
A
Avi Kivity 已提交
5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095
	ctxt->ops->put_fpu(ctxt);

	if (unlikely(fault))
		return emulate_exception(ctxt, MF_VECTOR, 0, false);

	return X86EMUL_CONTINUE;
}

static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
				       struct operand *op)
{
	if (op->type == OP_MM)
		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
}

5096 5097
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{
5098
	register void *__sp asm(_ASM_SP);
5099
	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5100

5101 5102
	if (!(ctxt->d & ByteOp))
		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5103

5104
	asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
5105
	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5106
	      [fastop]"+S"(fop), "+r"(__sp)
5107
	    : "c"(ctxt->src2.val));
5108

5109
	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5110 5111
	if (!fop) /* exception is returned in fop variable */
		return emulate_de(ctxt);
5112 5113
	return X86EMUL_CONTINUE;
}
5114

5115 5116
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
B
Bandan Das 已提交
5117 5118
	memset(&ctxt->rip_relative, 0,
	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5119 5120 5121 5122 5123 5124

	ctxt->io_read.pos = 0;
	ctxt->io_read.end = 0;
	ctxt->mem_read.end = 0;
}

5125
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5126
{
5127
	const struct x86_emulate_ops *ops = ctxt->ops;
5128
	int rc = X86EMUL_CONTINUE;
5129
	int saved_dst_type = ctxt->dst.type;
5130

5131
	ctxt->mem_read.pos = 0;
5132

5133 5134
	/* LOCK prefix is allowed only with some instructions */
	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5135
		rc = emulate_ud(ctxt);
5136 5137 5138
		goto done;
	}

5139
	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5140
		rc = emulate_ud(ctxt);
5141 5142 5143
		goto done;
	}

5144 5145 5146 5147 5148 5149 5150
	if (unlikely(ctxt->d &
		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
				(ctxt->d & Undefined)) {
			rc = emulate_ud(ctxt);
			goto done;
		}
A
Avi Kivity 已提交
5151

5152 5153 5154
		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
			rc = emulate_ud(ctxt);
A
Avi Kivity 已提交
5155
			goto done;
5156
		}
A
Avi Kivity 已提交
5157

5158 5159
		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
			rc = emulate_nm(ctxt);
5160
			goto done;
5161
		}
5162

5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175
		if (ctxt->d & Mmx) {
			rc = flush_pending_x87_faults(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			/*
			 * Now that we know the fpu is exception safe, we can fetch
			 * operands from it.
			 */
			fetch_possible_mmx_operand(ctxt, &ctxt->src);
			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
			if (!(ctxt->d & Mov))
				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
		}
5176

5177
		if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5178 5179 5180 5181 5182
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_PRE_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}
5183

5184 5185 5186 5187 5188 5189
		/* Instruction can only be executed in protected mode */
		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
			rc = emulate_ud(ctxt);
			goto done;
		}

5190 5191
		/* Privileged instruction can be executed only in CPL=0 */
		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5192 5193 5194 5195
			if (ctxt->d & PrivUD)
				rc = emulate_ud(ctxt);
			else
				rc = emulate_gp(ctxt, 0);
5196
			goto done;
5197
		}
5198

5199
		/* Do instruction specific permission checks */
5200
		if (ctxt->d & CheckPerm) {
5201 5202 5203 5204 5205
			rc = ctxt->check_perm(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

5206
		if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5207 5208 5209 5210 5211 5212 5213 5214 5215
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_POST_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

		if (ctxt->rep_prefix && (ctxt->d & String)) {
			/* All REP prefixes have the same first termination condition */
			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5216
				string_registers_quirk(ctxt);
5217
				ctxt->eip = ctxt->_eip;
5218
				ctxt->eflags &= ~X86_EFLAGS_RF;
5219 5220
				goto done;
			}
5221 5222 5223
		}
	}

5224 5225 5226
	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
		rc = segmented_read(ctxt, ctxt->src.addr.mem,
				    ctxt->src.valptr, ctxt->src.bytes);
5227
		if (rc != X86EMUL_CONTINUE)
5228
			goto done;
5229
		ctxt->src.orig_val64 = ctxt->src.val64;
5230 5231
	}

5232 5233 5234
	if (ctxt->src2.type == OP_MEM) {
		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
				    &ctxt->src2.val, ctxt->src2.bytes);
5235 5236 5237 5238
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

5239
	if ((ctxt->d & DstMask) == ImplicitOps)
5240 5241 5242
		goto special_insn;


5243
	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5244
		/* optimisation - avoid slow emulated read if Mov */
5245 5246
		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
				   &ctxt->dst.val, ctxt->dst.bytes);
5247
		if (rc != X86EMUL_CONTINUE) {
5248 5249
			if (!(ctxt->d & NoWrite) &&
			    rc == X86EMUL_PROPAGATE_FAULT &&
5250 5251
			    ctxt->exception.vector == PF_VECTOR)
				ctxt->exception.error_code |= PFERR_WRITE_MASK;
5252
			goto done;
5253
		}
5254
	}
5255 5256
	/* Copy full 64-bit value for CMPXCHG8B.  */
	ctxt->dst.orig_val64 = ctxt->dst.val64;
5257

5258 5259
special_insn:

5260
	if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5261
		rc = emulator_check_intercept(ctxt, ctxt->intercept,
5262
					      X86_ICPT_POST_MEMACCESS);
5263 5264 5265 5266
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

5267
	if (ctxt->rep_prefix && (ctxt->d & String))
5268
		ctxt->eflags |= X86_EFLAGS_RF;
5269
	else
5270
		ctxt->eflags &= ~X86_EFLAGS_RF;
5271

5272
	if (ctxt->execute) {
5273 5274 5275 5276 5277 5278 5279
		if (ctxt->d & Fastop) {
			void (*fop)(struct fastop *) = (void *)ctxt->execute;
			rc = fastop(ctxt, fop);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			goto writeback;
		}
5280
		rc = ctxt->execute(ctxt);
5281 5282 5283 5284 5285
		if (rc != X86EMUL_CONTINUE)
			goto done;
		goto writeback;
	}

B
Borislav Petkov 已提交
5286
	if (ctxt->opcode_len == 2)
A
Avi Kivity 已提交
5287
		goto twobyte_insn;
5288 5289
	else if (ctxt->opcode_len == 3)
		goto threebyte_insn;
A
Avi Kivity 已提交
5290

5291
	switch (ctxt->b) {
5292
	case 0x70 ... 0x7f: /* jcc (short) */
5293
		if (test_cc(ctxt->b, ctxt->eflags))
5294
			rc = jmp_rel(ctxt, ctxt->src.val);
5295
		break;
N
Nitin A Kamble 已提交
5296
	case 0x8d: /* lea r16/r32, m */
5297
		ctxt->dst.val = ctxt->src.addr.mem.ea;
N
Nitin A Kamble 已提交
5298
		break;
5299
	case 0x90 ... 0x97: /* nop / xchg reg, rax */
5300
		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5301 5302 5303
			ctxt->dst.type = OP_NONE;
		else
			rc = em_xchg(ctxt);
5304
		break;
5305
	case 0x98: /* cbw/cwde/cdqe */
5306 5307 5308 5309
		switch (ctxt->op_bytes) {
		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5310 5311
		}
		break;
5312
	case 0xcc:		/* int3 */
5313 5314
		rc = emulate_int(ctxt, 3);
		break;
5315
	case 0xcd:		/* int n */
5316
		rc = emulate_int(ctxt, ctxt->src.val);
5317 5318
		break;
	case 0xce:		/* into */
5319
		if (ctxt->eflags & X86_EFLAGS_OF)
5320
			rc = emulate_int(ctxt, 4);
5321
		break;
5322
	case 0xe9: /* jmp rel */
5323
	case 0xeb: /* jmp rel short */
5324
		rc = jmp_rel(ctxt, ctxt->src.val);
5325
		ctxt->dst.type = OP_NONE; /* Disable writeback. */
5326
		break;
5327
	case 0xf4:              /* hlt */
5328
		ctxt->ops->halt(ctxt);
5329
		break;
5330 5331
	case 0xf5:	/* cmc */
		/* complement carry flag from eflags reg */
5332
		ctxt->eflags ^= X86_EFLAGS_CF;
5333 5334
		break;
	case 0xf8: /* clc */
5335
		ctxt->eflags &= ~X86_EFLAGS_CF;
5336
		break;
5337
	case 0xf9: /* stc */
5338
		ctxt->eflags |= X86_EFLAGS_CF;
5339
		break;
5340
	case 0xfc: /* cld */
5341
		ctxt->eflags &= ~X86_EFLAGS_DF;
5342 5343
		break;
	case 0xfd: /* std */
5344
		ctxt->eflags |= X86_EFLAGS_DF;
5345
		break;
5346 5347
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
5348
	}
5349

5350 5351 5352
	if (rc != X86EMUL_CONTINUE)
		goto done;

5353
writeback:
5354 5355 5356 5357 5358 5359
	if (ctxt->d & SrcWrite) {
		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
		rc = writeback(ctxt, &ctxt->src);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
5360 5361 5362 5363 5364
	if (!(ctxt->d & NoWrite)) {
		rc = writeback(ctxt, &ctxt->dst);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
5365

5366 5367 5368 5369
	/*
	 * restore dst type in case the decoding will be reused
	 * (happens for string instruction )
	 */
5370
	ctxt->dst.type = saved_dst_type;
5371

5372
	if ((ctxt->d & SrcMask) == SrcSI)
5373
		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5374

5375
	if ((ctxt->d & DstMask) == DstDI)
5376
		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5377

5378
	if (ctxt->rep_prefix && (ctxt->d & String)) {
5379
		unsigned int count;
5380
		struct read_cache *r = &ctxt->io_read;
5381 5382 5383 5384
		if ((ctxt->d & SrcMask) == SrcSI)
			count = ctxt->src.count;
		else
			count = ctxt->dst.count;
5385
		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5386

5387 5388 5389 5390 5391
		if (!string_insn_completed(ctxt)) {
			/*
			 * Re-enter guest when pio read ahead buffer is empty
			 * or, if it is not used, after each 1024 iteration.
			 */
5392
			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5393 5394 5395 5396 5397 5398
			    (r->end == 0 || r->end != r->pos)) {
				/*
				 * Reset read cache. Usually happens before
				 * decode, but since instruction is restarted
				 * we have to do it here.
				 */
5399
				ctxt->mem_read.end = 0;
5400
				writeback_registers(ctxt);
5401 5402 5403
				return EMULATION_RESTART;
			}
			goto done; /* skip rip writeback */
5404
		}
5405
		ctxt->eflags &= ~X86_EFLAGS_RF;
5406
	}
5407

5408
	ctxt->eip = ctxt->_eip;
5409 5410

done:
5411 5412
	if (rc == X86EMUL_PROPAGATE_FAULT) {
		WARN_ON(ctxt->exception.vector > 0x1f);
5413
		ctxt->have_exception = true;
5414
	}
5415 5416 5417
	if (rc == X86EMUL_INTERCEPTED)
		return EMULATION_INTERCEPTED;

5418 5419 5420
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);

5421
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
A
Avi Kivity 已提交
5422 5423

twobyte_insn:
5424
	switch (ctxt->b) {
5425
	case 0x09:		/* wbinvd */
5426
		(ctxt->ops->wbinvd)(ctxt);
5427 5428
		break;
	case 0x08:		/* invd */
5429 5430
	case 0x0d:		/* GrpP (prefetch) */
	case 0x18:		/* Grp16 (prefetch/nop) */
P
Paolo Bonzini 已提交
5431
	case 0x1f:		/* nop */
5432 5433
		break;
	case 0x20: /* mov cr, reg */
5434
		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5435
		break;
A
Avi Kivity 已提交
5436
	case 0x21: /* mov from dr to reg */
5437
		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
A
Avi Kivity 已提交
5438 5439
		break;
	case 0x40 ... 0x4f:	/* cmov */
5440 5441
		if (test_cc(ctxt->b, ctxt->eflags))
			ctxt->dst.val = ctxt->src.val;
5442
		else if (ctxt->op_bytes != 4)
5443
			ctxt->dst.type = OP_NONE; /* no writeback */
A
Avi Kivity 已提交
5444
		break;
5445
	case 0x80 ... 0x8f: /* jnz rel, etc*/
5446
		if (test_cc(ctxt->b, ctxt->eflags))
5447
			rc = jmp_rel(ctxt, ctxt->src.val);
5448
		break;
5449
	case 0x90 ... 0x9f:     /* setcc r/m8 */
5450
		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5451
		break;
A
Avi Kivity 已提交
5452
	case 0xb6 ... 0xb7:	/* movzx */
5453
		ctxt->dst.bytes = ctxt->op_bytes;
5454
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5455
						       : (u16) ctxt->src.val;
A
Avi Kivity 已提交
5456 5457
		break;
	case 0xbe ... 0xbf:	/* movsx */
5458
		ctxt->dst.bytes = ctxt->op_bytes;
5459
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5460
							(s16) ctxt->src.val;
A
Avi Kivity 已提交
5461
		break;
5462 5463
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
5464
	}
5465

5466 5467
threebyte_insn:

5468 5469 5470
	if (rc != X86EMUL_CONTINUE)
		goto done;

A
Avi Kivity 已提交
5471 5472 5473
	goto writeback;

cannot_emulate:
5474
	return EMULATION_FAILED;
A
Avi Kivity 已提交
5475
}
5476 5477 5478 5479 5480 5481 5482 5483 5484 5485

void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
{
	invalidate_registers(ctxt);
}

void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
{
	writeback_registers(ctxt);
}