emulate.c 141.8 KB
Newer Older
A
Avi Kivity 已提交
1
/******************************************************************************
2
 * emulate.c
A
Avi Kivity 已提交
3 4 5 6 7 8
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9
 * privileged instructions:
A
Avi Kivity 已提交
10 11
 *
 * Copyright (C) 2006 Qumranet
N
Nicolas Kaiser 已提交
12
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
13 14 15 16 17 18 19 20 21 22
 *
 *   Avi Kivity <avi@qumranet.com>
 *   Yaniv Kamay <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

23
#include <linux/kvm_host.h>
24
#include "kvm_cache_regs.h"
A
Avi Kivity 已提交
25
#include <linux/module.h>
26
#include <asm/kvm_emulate.h>
27
#include <linux/stringify.h>
28
#include <asm/debugreg.h>
A
Avi Kivity 已提交
29

30
#include "x86.h"
31
#include "tss.h"
32

33 34 35
/*
 * Operand types
 */
36 37 38 39 40 41 42 43 44
#define OpNone             0ull
#define OpImplicit         1ull  /* No generic decode */
#define OpReg              2ull  /* Register */
#define OpMem              3ull  /* Memory */
#define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
#define OpDI               5ull  /* ES:DI/EDI/RDI */
#define OpMem64            6ull  /* Memory, 64-bit */
#define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
#define OpDX               8ull  /* DX register */
45 46 47
#define OpCL               9ull  /* CL register (for shifts) */
#define OpImmByte         10ull  /* 8-bit sign extended immediate */
#define OpOne             11ull  /* Implied 1 */
48
#define OpImm             12ull  /* Sign extended up to 32-bit immediate */
49 50 51 52 53 54 55
#define OpMem16           13ull  /* Memory operand (16-bit). */
#define OpMem32           14ull  /* Memory operand (32-bit). */
#define OpImmU            15ull  /* Immediate operand, zero extended */
#define OpSI              16ull  /* SI/ESI/RSI */
#define OpImmFAddr        17ull  /* Immediate far address */
#define OpMemFAddr        18ull  /* Far address in memory */
#define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
56 57 58 59 60 61
#define OpES              20ull  /* ES */
#define OpCS              21ull  /* CS */
#define OpSS              22ull  /* SS */
#define OpDS              23ull  /* DS */
#define OpFS              24ull  /* FS */
#define OpGS              25ull  /* GS */
62
#define OpMem8            26ull  /* 8-bit zero extended memory operand */
63
#define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
P
Paolo Bonzini 已提交
64
#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
65 66
#define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
#define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
67 68

#define OpBits             5  /* Width of operand field */
69
#define OpMask             ((1ull << OpBits) - 1)
70

A
Avi Kivity 已提交
71 72 73 74 75 76 77 78 79 80
/*
 * Opcode effective-address decode tables.
 * Note that we only emulate instructions that have at least one memory
 * operand (excluding implicit stack references). We assume that stack
 * references and instruction fetches will never occur in special memory
 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
 * not be handled.
 */

/* Operand sizes: 8-bit operands or specified/overridden size. */
81
#define ByteOp      (1<<0)	/* 8-bit operands. */
A
Avi Kivity 已提交
82
/* Destination operand type. */
83 84 85 86 87 88 89
#define DstShift    1
#define ImplicitOps (OpImplicit << DstShift)
#define DstReg      (OpReg << DstShift)
#define DstMem      (OpMem << DstShift)
#define DstAcc      (OpAcc << DstShift)
#define DstDI       (OpDI << DstShift)
#define DstMem64    (OpMem64 << DstShift)
90
#define DstMem16    (OpMem16 << DstShift)
91 92
#define DstImmUByte (OpImmUByte << DstShift)
#define DstDX       (OpDX << DstShift)
93
#define DstAccLo    (OpAccLo << DstShift)
94
#define DstMask     (OpMask << DstShift)
A
Avi Kivity 已提交
95
/* Source operand type. */
96 97 98 99 100 101 102 103 104 105 106 107
#define SrcShift    6
#define SrcNone     (OpNone << SrcShift)
#define SrcReg      (OpReg << SrcShift)
#define SrcMem      (OpMem << SrcShift)
#define SrcMem16    (OpMem16 << SrcShift)
#define SrcMem32    (OpMem32 << SrcShift)
#define SrcImm      (OpImm << SrcShift)
#define SrcImmByte  (OpImmByte << SrcShift)
#define SrcOne      (OpOne << SrcShift)
#define SrcImmUByte (OpImmUByte << SrcShift)
#define SrcImmU     (OpImmU << SrcShift)
#define SrcSI       (OpSI << SrcShift)
P
Paolo Bonzini 已提交
108
#define SrcXLat     (OpXLat << SrcShift)
109 110 111 112
#define SrcImmFAddr (OpImmFAddr << SrcShift)
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc      (OpAcc << SrcShift)
#define SrcImmU16   (OpImmU16 << SrcShift)
113
#define SrcImm64    (OpImm64 << SrcShift)
114
#define SrcDX       (OpDX << SrcShift)
115
#define SrcMem8     (OpMem8 << SrcShift)
116
#define SrcAccHi    (OpAccHi << SrcShift)
117
#define SrcMask     (OpMask << SrcShift)
118 119 120 121 122 123 124 125 126
#define BitOp       (1<<11)
#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
#define String      (1<<13)     /* String instruction (rep capable) */
#define Stack       (1<<14)     /* Stack instruction (push/pop) */
#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
127
#define Escape      (5<<15)     /* Escape to coprocessor instruction */
128
#define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
129
#define ModeDual    (7<<15)     /* Different instruction for 32/64 bit */
130
#define Sse         (1<<18)     /* SSE Vector instruction */
131 132 133 134
/* Generic ModRM decode. */
#define ModRM       (1<<19)
/* Destination is only written; never read. */
#define Mov         (1<<20)
135
/* Misc flags */
136
#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
137
#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138
#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139
#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140
#define Undefined   (1<<25) /* No Such Instruction */
141
#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
142
#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
143
#define No64	    (1<<28)
144
#define PageTable   (1 << 29)   /* instruction used to write page table */
145
#define NotImpl     (1 << 30)   /* instruction is not implemented */
146
/* Source 2 operand type */
147
#define Src2Shift   (31)
148
#define Src2None    (OpNone << Src2Shift)
149
#define Src2Mem     (OpMem << Src2Shift)
150 151 152 153
#define Src2CL      (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
#define Src2One     (OpOne << Src2Shift)
#define Src2Imm     (OpImm << Src2Shift)
154 155 156 157 158 159
#define Src2ES      (OpES << Src2Shift)
#define Src2CS      (OpCS << Src2Shift)
#define Src2SS      (OpSS << Src2Shift)
#define Src2DS      (OpDS << Src2Shift)
#define Src2FS      (OpFS << Src2Shift)
#define Src2GS      (OpGS << Src2Shift)
160
#define Src2Mask    (OpMask << Src2Shift)
A
Avi Kivity 已提交
161
#define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
162 163 164
#define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
#define Unaligned   ((u64)1 << 42)  /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx         ((u64)1 << 43)  /* Advanced Vector Extensions */
165
#define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
166
#define NoWrite     ((u64)1 << 45)  /* No writeback */
167
#define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
168
#define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
169 170
#define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
#define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
171
#define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
172
#define NearBranch  ((u64)1 << 52)  /* Near branches */
173
#define No16	    ((u64)1 << 53)  /* No 16 bit operand */
174
#define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
A
Avi Kivity 已提交
175

176
#define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
A
Avi Kivity 已提交
177

178 179 180 181 182 183 184 185
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
186

187 188 189 190 191 192
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
#define FASTOP_SIZE 8

/*
 * fastop functions have a special calling convention:
 *
193 194
 * dst:    rax        (in/out)
 * src:    rdx        (in/out)
195 196
 * src2:   rcx        (in)
 * flags:  rflags     (in/out)
197
 * ex:     rsi        (in:fastop pointer, out:zero if exception)
198 199 200 201 202 203 204 205 206 207 208
 *
 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
 * different operand sizes can be reached by calculation, rather than a jump
 * table (which would be bigger than the code).
 *
 * fastop functions are declared as taking a never-defined fastop parameter,
 * so they can't be called from C directly.
 */

struct fastop;

209
struct opcode {
210 211
	u64 flags : 56;
	u64 intercept : 8;
212
	union {
213
		int (*execute)(struct x86_emulate_ctxt *ctxt);
214 215 216
		const struct opcode *group;
		const struct group_dual *gdual;
		const struct gprefix *gprefix;
217
		const struct escape *esc;
218
		const struct instr_dual *idual;
219
		const struct mode_dual *mdual;
220
		void (*fastop)(struct fastop *fake);
221
	} u;
222
	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
223 224 225 226 227
};

struct group_dual {
	struct opcode mod012[8];
	struct opcode mod3[8];
228 229
};

230 231 232 233 234 235 236
struct gprefix {
	struct opcode pfx_no;
	struct opcode pfx_66;
	struct opcode pfx_f2;
	struct opcode pfx_f3;
};

237 238 239 240 241
struct escape {
	struct opcode op[8];
	struct opcode high[64];
};

242 243 244 245 246
struct instr_dual {
	struct opcode mod012;
	struct opcode mod3;
};

247 248 249 250 251
struct mode_dual {
	struct opcode mode32;
	struct opcode mode64;
};

252 253
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a

254 255 256 257 258 259 260
enum x86_transfer_type {
	X86_TRANSFER_NONE,
	X86_TRANSFER_CALL_JMP,
	X86_TRANSFER_RET,
	X86_TRANSFER_TASK_SWITCH,
};

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	if (!(ctxt->regs_valid & (1 << nr))) {
		ctxt->regs_valid |= 1 << nr;
		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
	}
	return ctxt->_regs[nr];
}

static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	ctxt->regs_valid |= 1 << nr;
	ctxt->regs_dirty |= 1 << nr;
	return &ctxt->_regs[nr];
}

static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	reg_read(ctxt, nr);
	return reg_write(ctxt, nr);
}

static void writeback_registers(struct x86_emulate_ctxt *ctxt)
{
	unsigned reg;

	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
}

static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
{
	ctxt->regs_dirty = 0;
	ctxt->regs_valid = 0;
}

A
Avi Kivity 已提交
297 298 299 300
/*
 * These EFLAGS bits are restored from saved value during emulation, and
 * any changes are written back to the saved value after emulation.
 */
301 302
#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
		     X86_EFLAGS_PF|X86_EFLAGS_CF)
A
Avi Kivity 已提交
303

304 305 306 307 308 309
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif

310 311
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));

312 313 314 315 316 317 318 319 320 321 322 323 324
#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
#define FOP_RET   "ret \n\t"

#define FOP_START(op) \
	extern void em_##op(struct fastop *fake); \
	asm(".pushsection .text, \"ax\" \n\t" \
	    ".global em_" #op " \n\t" \
            FOP_ALIGN \
	    "em_" #op ": \n\t"

#define FOP_END \
	    ".popsection")

325 326
#define FOPNOP() FOP_ALIGN FOP_RET

327
#define FOP1E(op,  dst) \
328 329 330 331
	FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET

#define FOP1EEX(op,  dst) \
	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
332 333 334 335 336 337 338 339 340

#define FASTOP1(op) \
	FOP_START(op) \
	FOP1E(op##b, al) \
	FOP1E(op##w, ax) \
	FOP1E(op##l, eax) \
	ON64(FOP1E(op##q, rax))	\
	FOP_END

341 342 343 344 345 346 347 348 349
/* 1-operand, using src2 (for MUL/DIV r/m) */
#define FASTOP1SRC2(op, name) \
	FOP_START(name) \
	FOP1E(op, cl) \
	FOP1E(op, cx) \
	FOP1E(op, ecx) \
	ON64(FOP1E(op, rcx)) \
	FOP_END

350 351 352 353 354 355 356 357 358
/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
#define FASTOP1SRC2EX(op, name) \
	FOP_START(name) \
	FOP1EEX(op, cl) \
	FOP1EEX(op, cx) \
	FOP1EEX(op, ecx) \
	ON64(FOP1EEX(op, rcx)) \
	FOP_END

359 360 361 362 363
#define FOP2E(op,  dst, src)	   \
	FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET

#define FASTOP2(op) \
	FOP_START(op) \
364 365 366 367
	FOP2E(op##b, al, dl) \
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
368 369
	FOP_END

370 371 372 373
/* 2 operand, word only */
#define FASTOP2W(op) \
	FOP_START(op) \
	FOPNOP() \
374 375 376
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
377 378
	FOP_END

379 380 381 382 383 384 385 386 387
/* 2 operand, src is CL */
#define FASTOP2CL(op) \
	FOP_START(op) \
	FOP2E(op##b, al, cl) \
	FOP2E(op##w, ax, cl) \
	FOP2E(op##l, eax, cl) \
	ON64(FOP2E(op##q, rax, cl)) \
	FOP_END

388 389 390 391 392 393 394 395 396
/* 2 operand, src and dest are reversed */
#define FASTOP2R(op, name) \
	FOP_START(name) \
	FOP2E(op##b, dl, al) \
	FOP2E(op##w, dx, ax) \
	FOP2E(op##l, edx, eax) \
	ON64(FOP2E(op##q, rdx, rax)) \
	FOP_END

397 398 399 400 401 402 403
#define FOP3E(op,  dst, src, src2) \
	FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET

/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \
	FOP_START(op) \
	FOPNOP() \
404 405 406
	FOP3E(op##w, ax, dx, cl) \
	FOP3E(op##l, eax, edx, cl) \
	ON64(FOP3E(op##q, rax, rdx, cl)) \
407 408
	FOP_END

409 410 411
/* Special case for SETcc - 1 instruction per cc */
#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"

412 413 414
asm(".global kvm_fastop_exception \n"
    "kvm_fastop_exception: xor %esi, %esi; ret");

415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
FOP_SETCC(setc)
FOP_SETCC(setnc)
FOP_SETCC(setz)
FOP_SETCC(setnz)
FOP_SETCC(setbe)
FOP_SETCC(setnbe)
FOP_SETCC(sets)
FOP_SETCC(setns)
FOP_SETCC(setp)
FOP_SETCC(setnp)
FOP_SETCC(setl)
FOP_SETCC(setnl)
FOP_SETCC(setle)
FOP_SETCC(setnle)
FOP_END;

P
Paolo Bonzini 已提交
434 435 436
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
FOP_END;

437 438 439 440 441 442
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
				    enum x86_intercept intercept,
				    enum x86_intercept_stage stage)
{
	struct x86_instruction_info info = {
		.intercept  = intercept,
443 444 445 446 447
		.rep_prefix = ctxt->rep_prefix,
		.modrm_mod  = ctxt->modrm_mod,
		.modrm_reg  = ctxt->modrm_reg,
		.modrm_rm   = ctxt->modrm_rm,
		.src_val    = ctxt->src.val64,
448
		.dst_val    = ctxt->dst.val64,
449 450 451
		.src_bytes  = ctxt->src.bytes,
		.dst_bytes  = ctxt->dst.bytes,
		.ad_bytes   = ctxt->ad_bytes,
452 453 454
		.next_rip   = ctxt->eip,
	};

455
	return ctxt->ops->intercept(ctxt, &info, stage);
456 457
}

A
Avi Kivity 已提交
458 459 460 461 462
static void assign_masked(ulong *dest, ulong src, ulong mask)
{
	*dest = (*dest & ~mask) | (src & mask);
}

463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
static void assign_register(unsigned long *reg, u64 val, int bytes)
{
	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
	switch (bytes) {
	case 1:
		*(u8 *)reg = (u8)val;
		break;
	case 2:
		*(u16 *)reg = (u16)val;
		break;
	case 4:
		*reg = (u32)val;
		break;	/* 64b: zero-extend */
	case 8:
		*reg = val;
		break;
	}
}

482
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
483
{
484
	return (1UL << (ctxt->ad_bytes << 3)) - 1;
485 486
}

A
Avi Kivity 已提交
487 488 489 490 491 492 493 494 495 496 497
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
{
	u16 sel;
	struct desc_struct ss;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return ~0UL;
	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
}

A
Avi Kivity 已提交
498 499 500 501 502
static int stack_size(struct x86_emulate_ctxt *ctxt)
{
	return (__fls(stack_mask(ctxt)) + 1) >> 3;
}

A
Avi Kivity 已提交
503
/* Access/update address held in a register, based on addressing mode. */
504
static inline unsigned long
505
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
506
{
507
	if (ctxt->ad_bytes == sizeof(unsigned long))
508 509
		return reg;
	else
510
		return reg & ad_mask(ctxt);
511 512 513
}

static inline unsigned long
514
register_address(struct x86_emulate_ctxt *ctxt, int reg)
515
{
516
	return address_mask(ctxt, reg_read(ctxt, reg));
517 518
}

519 520 521 522 523
static void masked_increment(ulong *reg, ulong mask, int inc)
{
	assign_masked(reg, *reg + inc, mask);
}

524
static inline void
525
register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
526
{
527
	ulong *preg = reg_rmw(ctxt, reg);
528

529
	assign_register(preg, *preg + inc, ctxt->ad_bytes);
530 531 532 533
}

static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
{
534
	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
535
}
A
Avi Kivity 已提交
536

537 538 539 540 541 542 543
static u32 desc_limit_scaled(struct desc_struct *desc)
{
	u32 limit = get_desc_limit(desc);

	return desc->g ? (limit << 12) | 0xfff : limit;
}

544
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
545 546 547 548
{
	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
		return 0;

549
	return ctxt->ops->get_cached_segment_base(ctxt, seg);
550 551
}

552 553
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
			     u32 error, bool valid)
554
{
555
	WARN_ON(vec > 0x1f);
556 557 558
	ctxt->exception.vector = vec;
	ctxt->exception.error_code = error;
	ctxt->exception.error_code_valid = valid;
559
	return X86EMUL_PROPAGATE_FAULT;
560 561
}

562 563 564 565 566
static int emulate_db(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, DB_VECTOR, 0, false);
}

567
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
568
{
569
	return emulate_exception(ctxt, GP_VECTOR, err, true);
570 571
}

572 573 574 575 576
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{
	return emulate_exception(ctxt, SS_VECTOR, err, true);
}

577
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
578
{
579
	return emulate_exception(ctxt, UD_VECTOR, 0, false);
580 581
}

582
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
583
{
584
	return emulate_exception(ctxt, TS_VECTOR, err, true);
585 586
}

587 588
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
589
	return emulate_exception(ctxt, DE_VECTOR, 0, false);
590 591
}

A
Avi Kivity 已提交
592 593 594 595 596
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, NM_VECTOR, 0, false);
}

597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
	u16 selector;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
	return selector;
}

static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
				 unsigned seg)
{
	u16 dummy;
	u32 base3;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}

617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
/*
 * x86 defines three classes of vector instructions: explicitly
 * aligned, explicitly unaligned, and the rest, which change behaviour
 * depending on whether they're AVX encoded or not.
 *
 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
 * subject to the same check.
 */
static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
{
	if (likely(size < 16))
		return false;

	if (ctxt->d & Aligned)
		return true;
	else if (ctxt->d & Unaligned)
		return false;
	else if (ctxt->d & Avx)
		return false;
	else
		return true;
}

640 641 642 643
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
				       struct segmented_address addr,
				       unsigned *max_size, unsigned size,
				       bool write, bool fetch,
644
				       enum x86emul_mode mode, ulong *linear)
645
{
646 647
	struct desc_struct desc;
	bool usable;
648
	ulong la;
649
	u32 lim;
650
	u16 sel;
651

652
	la = seg_base(ctxt, addr.seg) + addr.ea;
653
	*linear = la;
654
	*max_size = 0;
655
	switch (mode) {
656
	case X86EMUL_MODE_PROT64:
657
		if (is_noncanonical_address(la))
658
			goto bad;
659 660 661 662

		*max_size = min_t(u64, ~0u, (1ull << 48) - la);
		if (size > *max_size)
			goto bad;
663 664
		break;
	default:
665 666
		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
						addr.seg);
667 668
		if (!usable)
			goto bad;
669 670 671
		/* code segment in protected mode or read-only data segment */
		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
					|| !(desc.type & 2)) && write)
672 673
			goto bad;
		/* unreadable code segment */
674
		if (!fetch && (desc.type & 8) && !(desc.type & 2))
675 676
			goto bad;
		lim = desc_limit_scaled(&desc);
677
		if (!(desc.type & 8) && (desc.type & 4)) {
G
Guo Chao 已提交
678
			/* expand-down segment */
679
			if (addr.ea <= lim)
680 681 682
				goto bad;
			lim = desc.d ? 0xffffffff : 0xffff;
		}
683 684
		if (addr.ea > lim)
			goto bad;
685 686 687 688 689 690 691
		if (lim == 0xffffffff)
			*max_size = ~0u;
		else {
			*max_size = (u64)lim + 1 - addr.ea;
			if (size > *max_size)
				goto bad;
		}
692
		la &= (u32)-1;
693 694
		break;
	}
695 696
	if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
		return emulate_gp(ctxt, 0);
697
	return X86EMUL_CONTINUE;
698 699
bad:
	if (addr.seg == VCPU_SREG_SS)
700
		return emulate_ss(ctxt, 0);
701
	else
702
		return emulate_gp(ctxt, 0);
703 704
}

705 706 707 708 709
static int linearize(struct x86_emulate_ctxt *ctxt,
		     struct segmented_address addr,
		     unsigned size, bool write,
		     ulong *linear)
{
710
	unsigned max_size;
711 712
	return __linearize(ctxt, addr, &max_size, size, write, false,
			   ctxt->mode, linear);
713 714
}

715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
			     enum x86emul_mode mode)
{
	ulong linear;
	int rc;
	unsigned max_size;
	struct segmented_address addr = { .seg = VCPU_SREG_CS,
					   .ea = dst };

	if (ctxt->op_bytes != sizeof(unsigned long))
		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
	rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
	if (rc == X86EMUL_CONTINUE)
		ctxt->_eip = addr.ea;
	return rc;
}

static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
	return assign_eip(ctxt, dst, ctxt->mode);
735 736
}

737 738 739 740
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
			  const struct desc_struct *cs_desc)
{
	enum x86emul_mode mode = ctxt->mode;
741
	int rc;
742 743

#ifdef CONFIG_X86_64
744 745 746
	if (ctxt->mode >= X86EMUL_MODE_PROT16) {
		if (cs_desc->l) {
			u64 efer = 0;
747

748 749 750 751 752
			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
			if (efer & EFER_LMA)
				mode = X86EMUL_MODE_PROT64;
		} else
			mode = X86EMUL_MODE_PROT32; /* temporary value */
753 754 755 756
	}
#endif
	if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
		mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
757 758 759 760
	rc = assign_eip(ctxt, dst, mode);
	if (rc == X86EMUL_CONTINUE)
		ctxt->mode = mode;
	return rc;
761 762 763 764 765 766
}

static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{
	return assign_eip_near(ctxt, ctxt->_eip + rel);
}
767

768 769 770 771 772
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
			      struct segmented_address addr,
			      void *data,
			      unsigned size)
{
773 774 775
	int rc;
	ulong linear;

776
	rc = linearize(ctxt, addr, size, false, &linear);
777 778
	if (rc != X86EMUL_CONTINUE)
		return rc;
779
	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
780 781
}

782
/*
783
 * Prefetch the remaining bytes of the instruction without crossing page
784 785
 * boundary if they are not in fetch_cache yet.
 */
786
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
787 788
{
	int rc;
789
	unsigned size, max_size;
790
	unsigned long linear;
791
	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
792
	struct segmented_address addr = { .seg = VCPU_SREG_CS,
793 794
					   .ea = ctxt->eip + cur_size };

795 796 797 798 799 800 801 802 803 804
	/*
	 * We do not know exactly how many bytes will be needed, and
	 * __linearize is expensive, so fetch as much as possible.  We
	 * just have to avoid going beyond the 15 byte limit, the end
	 * of the segment, or the end of the page.
	 *
	 * __linearize is called with size 0 so that it does not do any
	 * boundary check itself.  Instead, we use max_size to check
	 * against op_size.
	 */
805 806
	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
			 &linear);
807 808 809
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;

810
	size = min_t(unsigned, 15UL ^ cur_size, max_size);
811
	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
812 813 814 815 816 817 818 819

	/*
	 * One instruction can only straddle two pages,
	 * and one has been loaded at the beginning of
	 * x86_decode_insn.  So, if not enough bytes
	 * still, we must have hit the 15-byte boundary.
	 */
	if (unlikely(size < op_size))
820 821
		return emulate_gp(ctxt, 0);

822
	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
823 824 825
			      size, &ctxt->exception);
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;
826
	ctxt->fetch.end += size;
827
	return X86EMUL_CONTINUE;
828 829
}

830 831
static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
					       unsigned size)
832
{
833 834 835 836
	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;

	if (unlikely(done_size < size))
		return __do_insn_fetch_bytes(ctxt, size - done_size);
837 838
	else
		return X86EMUL_CONTINUE;
839 840
}

841
/* Fetch next part of the instruction being emulated. */
842
#define insn_fetch(_type, _ctxt)					\
843 844 845
({	_type _x;							\
									\
	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
846 847
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
848
	ctxt->_eip += sizeof(_type);					\
849 850
	_x = *(_type __aligned(1) *) ctxt->fetch.ptr;			\
	ctxt->fetch.ptr += sizeof(_type);				\
851
	_x;								\
852 853
})

854
#define insn_fetch_arr(_arr, _size, _ctxt)				\
855 856
({									\
	rc = do_insn_fetch_bytes(_ctxt, _size);				\
857 858
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
859
	ctxt->_eip += (_size);						\
860 861
	memcpy(_arr, ctxt->fetch.ptr, _size);				\
	ctxt->fetch.ptr += (_size);					\
862 863
})

864 865 866 867 868
/*
 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 * pointer into the block that addresses the relevant register.
 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 */
869
static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
870
			     int byteop)
A
Avi Kivity 已提交
871 872
{
	void *p;
873
	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
A
Avi Kivity 已提交
874 875

	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
876 877 878
		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
	else
		p = reg_rmw(ctxt, modrm_reg);
A
Avi Kivity 已提交
879 880 881 882
	return p;
}

static int read_descriptor(struct x86_emulate_ctxt *ctxt,
883
			   struct segmented_address addr,
A
Avi Kivity 已提交
884 885 886 887 888 889 890
			   u16 *size, unsigned long *address, int op_bytes)
{
	int rc;

	if (op_bytes == 2)
		op_bytes = 3;
	*address = 0;
891
	rc = segmented_read_std(ctxt, addr, size, 2);
892
	if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
893
		return rc;
894
	addr.ea += 2;
895
	rc = segmented_read_std(ctxt, addr, address, op_bytes);
A
Avi Kivity 已提交
896 897 898
	return rc;
}

899 900 901 902 903 904 905 906 907 908
FASTOP2(add);
FASTOP2(or);
FASTOP2(adc);
FASTOP2(sbb);
FASTOP2(and);
FASTOP2(sub);
FASTOP2(xor);
FASTOP2(cmp);
FASTOP2(test);

909 910
FASTOP1SRC2(mul, mul_ex);
FASTOP1SRC2(imul, imul_ex);
911 912
FASTOP1SRC2EX(div, div_ex);
FASTOP1SRC2EX(idiv, idiv_ex);
913

914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
FASTOP3WCL(shld);
FASTOP3WCL(shrd);

FASTOP2W(imul);

FASTOP1(not);
FASTOP1(neg);
FASTOP1(inc);
FASTOP1(dec);

FASTOP2CL(rol);
FASTOP2CL(ror);
FASTOP2CL(rcl);
FASTOP2CL(rcr);
FASTOP2CL(shl);
FASTOP2CL(shr);
FASTOP2CL(sar);

FASTOP2W(bsf);
FASTOP2W(bsr);
FASTOP2W(bt);
FASTOP2W(bts);
FASTOP2W(btr);
FASTOP2W(btc);

939 940
FASTOP2(xadd);

941 942
FASTOP2R(cmp, cmp_r);

943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
{
	/* If src is zero, do not writeback, but update flags */
	if (ctxt->src.val == 0)
		ctxt->dst.type = OP_NONE;
	return fastop(ctxt, em_bsf);
}

static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
{
	/* If src is zero, do not writeback, but update flags */
	if (ctxt->src.val == 0)
		ctxt->dst.type = OP_NONE;
	return fastop(ctxt, em_bsr);
}

959
static u8 test_cc(unsigned int condition, unsigned long flags)
960
{
961 962
	u8 rc;
	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
963

964
	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
965
	asm("push %[flags]; popf; call *%[fastop]"
966 967
	    : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
	return rc;
968 969
}

970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
static void fetch_register_operand(struct operand *op)
{
	switch (op->bytes) {
	case 1:
		op->val = *(u8 *)op->addr.reg;
		break;
	case 2:
		op->val = *(u16 *)op->addr.reg;
		break;
	case 4:
		op->val = *(u32 *)op->addr.reg;
		break;
	case 8:
		op->val = *(u64 *)op->addr.reg;
		break;
	}
}

A
Avi Kivity 已提交
988 989 990 991
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
992 993 994 995 996 997 998 999
	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
1000
#ifdef CONFIG_X86_64
1001 1002 1003 1004 1005 1006 1007 1008
	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
			  int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
1020 1021 1022 1023 1024 1025 1026 1027
	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
A
Avi Kivity 已提交
1028
#ifdef CONFIG_X86_64
1029 1030 1031 1032 1033 1034 1035 1036
	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
A
Avi Kivity 已提交
1037 1038 1039 1040 1041 1042
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

A
Avi Kivity 已提交
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fninit");
	ctxt->ops->put_fpu(ctxt);
	return X86EMUL_CONTINUE;
}

static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
{
	u16 fcw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fnstcw %0": "+m"(fcw));
	ctxt->ops->put_fpu(ctxt);

	ctxt->dst.val = fcw;

	return X86EMUL_CONTINUE;
}

static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
{
	u16 fsw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fnstsw %0": "+m"(fsw));
	ctxt->ops->put_fpu(ctxt);

	ctxt->dst.val = fsw;

	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1120
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1121
				    struct operand *op)
1122
{
1123
	unsigned reg = ctxt->modrm_reg;
1124

1125 1126
	if (!(ctxt->d & ModRM))
		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
A
Avi Kivity 已提交
1127

1128
	if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1129 1130 1131 1132 1133 1134
		op->type = OP_XMM;
		op->bytes = 16;
		op->addr.xmm = reg;
		read_sse_reg(ctxt, &op->vec_val, reg);
		return;
	}
A
Avi Kivity 已提交
1135 1136 1137 1138 1139 1140 1141
	if (ctxt->d & Mmx) {
		reg &= 7;
		op->type = OP_MM;
		op->bytes = 8;
		op->addr.mm = reg;
		return;
	}
A
Avi Kivity 已提交
1142

1143
	op->type = OP_REG;
1144 1145 1146
	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);

1147
	fetch_register_operand(op);
1148 1149 1150
	op->orig_val = op->val;
}

1151 1152 1153 1154 1155 1156
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
{
	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
		ctxt->modrm_seg = VCPU_SREG_SS;
}

1157
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1158
			struct operand *op)
1159 1160
{
	u8 sib;
B
Bandan Das 已提交
1161
	int index_reg, base_reg, scale;
1162
	int rc = X86EMUL_CONTINUE;
1163
	ulong modrm_ea = 0;
1164

B
Bandan Das 已提交
1165 1166 1167
	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1168

B
Bandan Das 已提交
1169
	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1170
	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
B
Bandan Das 已提交
1171
	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1172
	ctxt->modrm_seg = VCPU_SREG_DS;
1173

1174
	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1175
		op->type = OP_REG;
1176
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1177
		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1178
				ctxt->d & ByteOp);
1179
		if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1180 1181
			op->type = OP_XMM;
			op->bytes = 16;
1182 1183
			op->addr.xmm = ctxt->modrm_rm;
			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
A
Avi Kivity 已提交
1184 1185
			return rc;
		}
A
Avi Kivity 已提交
1186 1187 1188
		if (ctxt->d & Mmx) {
			op->type = OP_MM;
			op->bytes = 8;
1189
			op->addr.mm = ctxt->modrm_rm & 7;
A
Avi Kivity 已提交
1190 1191
			return rc;
		}
1192
		fetch_register_operand(op);
1193 1194 1195
		return rc;
	}

1196 1197
	op->type = OP_MEM;

1198
	if (ctxt->ad_bytes == 2) {
1199 1200 1201 1202
		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1203 1204

		/* 16-bit ModR/M decode. */
1205
		switch (ctxt->modrm_mod) {
1206
		case 0:
1207
			if (ctxt->modrm_rm == 6)
1208
				modrm_ea += insn_fetch(u16, ctxt);
1209 1210
			break;
		case 1:
1211
			modrm_ea += insn_fetch(s8, ctxt);
1212 1213
			break;
		case 2:
1214
			modrm_ea += insn_fetch(u16, ctxt);
1215 1216
			break;
		}
1217
		switch (ctxt->modrm_rm) {
1218
		case 0:
1219
			modrm_ea += bx + si;
1220 1221
			break;
		case 1:
1222
			modrm_ea += bx + di;
1223 1224
			break;
		case 2:
1225
			modrm_ea += bp + si;
1226 1227
			break;
		case 3:
1228
			modrm_ea += bp + di;
1229 1230
			break;
		case 4:
1231
			modrm_ea += si;
1232 1233
			break;
		case 5:
1234
			modrm_ea += di;
1235 1236
			break;
		case 6:
1237
			if (ctxt->modrm_mod != 0)
1238
				modrm_ea += bp;
1239 1240
			break;
		case 7:
1241
			modrm_ea += bx;
1242 1243
			break;
		}
1244 1245 1246
		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
			ctxt->modrm_seg = VCPU_SREG_SS;
1247
		modrm_ea = (u16)modrm_ea;
1248 1249
	} else {
		/* 32/64-bit ModR/M decode. */
1250
		if ((ctxt->modrm_rm & 7) == 4) {
1251
			sib = insn_fetch(u8, ctxt);
1252 1253 1254 1255
			index_reg |= (sib >> 3) & 7;
			base_reg |= sib & 7;
			scale = sib >> 6;

1256
			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1257
				modrm_ea += insn_fetch(s32, ctxt);
1258
			else {
1259
				modrm_ea += reg_read(ctxt, base_reg);
1260
				adjust_modrm_seg(ctxt, base_reg);
1261 1262 1263 1264
				/* Increment ESP on POP [ESP] */
				if ((ctxt->d & IncSP) &&
				    base_reg == VCPU_REGS_RSP)
					modrm_ea += ctxt->op_bytes;
1265
			}
1266
			if (index_reg != 4)
1267
				modrm_ea += reg_read(ctxt, index_reg) << scale;
1268
		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1269
			modrm_ea += insn_fetch(s32, ctxt);
1270
			if (ctxt->mode == X86EMUL_MODE_PROT64)
1271
				ctxt->rip_relative = 1;
1272 1273
		} else {
			base_reg = ctxt->modrm_rm;
1274
			modrm_ea += reg_read(ctxt, base_reg);
1275 1276
			adjust_modrm_seg(ctxt, base_reg);
		}
1277
		switch (ctxt->modrm_mod) {
1278
		case 1:
1279
			modrm_ea += insn_fetch(s8, ctxt);
1280 1281
			break;
		case 2:
1282
			modrm_ea += insn_fetch(s32, ctxt);
1283 1284 1285
			break;
		}
	}
1286
	op->addr.mem.ea = modrm_ea;
1287 1288 1289
	if (ctxt->ad_bytes != 8)
		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;

1290 1291 1292 1293 1294
done:
	return rc;
}

static int decode_abs(struct x86_emulate_ctxt *ctxt,
1295
		      struct operand *op)
1296
{
1297
	int rc = X86EMUL_CONTINUE;
1298

1299
	op->type = OP_MEM;
1300
	switch (ctxt->ad_bytes) {
1301
	case 2:
1302
		op->addr.mem.ea = insn_fetch(u16, ctxt);
1303 1304
		break;
	case 4:
1305
		op->addr.mem.ea = insn_fetch(u32, ctxt);
1306 1307
		break;
	case 8:
1308
		op->addr.mem.ea = insn_fetch(u64, ctxt);
1309 1310 1311 1312 1313 1314
		break;
	}
done:
	return rc;
}

1315
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1316
{
1317
	long sv = 0, mask;
1318

1319
	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1320
		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1321

1322 1323 1324 1325
		if (ctxt->src.bytes == 2)
			sv = (s16)ctxt->src.val & (s16)mask;
		else if (ctxt->src.bytes == 4)
			sv = (s32)ctxt->src.val & (s32)mask;
1326 1327
		else
			sv = (s64)ctxt->src.val & (s64)mask;
1328

1329 1330
		ctxt->dst.addr.mem.ea = address_mask(ctxt,
					   ctxt->dst.addr.mem.ea + (sv >> 3));
1331
	}
1332 1333

	/* only subword offset */
1334
	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1335 1336
}

1337 1338
static int read_emulated(struct x86_emulate_ctxt *ctxt,
			 unsigned long addr, void *dest, unsigned size)
A
Avi Kivity 已提交
1339
{
1340
	int rc;
1341
	struct read_cache *mc = &ctxt->mem_read;
A
Avi Kivity 已提交
1342

1343 1344
	if (mc->pos < mc->end)
		goto read_cached;
A
Avi Kivity 已提交
1345

1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
	WARN_ON((mc->end + size) >= sizeof(mc->data));

	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
				      &ctxt->exception);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	mc->end += size;

read_cached:
	memcpy(dest, mc->data + mc->pos, size);
	mc->pos += size;
1358 1359
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1360

1361 1362 1363 1364 1365
static int segmented_read(struct x86_emulate_ctxt *ctxt,
			  struct segmented_address addr,
			  void *data,
			  unsigned size)
{
1366 1367 1368
	int rc;
	ulong linear;

1369
	rc = linearize(ctxt, addr, size, false, &linear);
1370 1371
	if (rc != X86EMUL_CONTINUE)
		return rc;
1372
	return read_emulated(ctxt, linear, data, size);
1373 1374 1375 1376 1377 1378 1379
}

static int segmented_write(struct x86_emulate_ctxt *ctxt,
			   struct segmented_address addr,
			   const void *data,
			   unsigned size)
{
1380 1381 1382
	int rc;
	ulong linear;

1383
	rc = linearize(ctxt, addr, size, true, &linear);
1384 1385
	if (rc != X86EMUL_CONTINUE)
		return rc;
1386 1387
	return ctxt->ops->write_emulated(ctxt, linear, data, size,
					 &ctxt->exception);
1388 1389 1390 1391 1392 1393 1394
}

static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
			     struct segmented_address addr,
			     const void *orig_data, const void *data,
			     unsigned size)
{
1395 1396 1397
	int rc;
	ulong linear;

1398
	rc = linearize(ctxt, addr, size, true, &linear);
1399 1400
	if (rc != X86EMUL_CONTINUE)
		return rc;
1401 1402
	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
					   size, &ctxt->exception);
1403 1404
}

1405 1406 1407 1408
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
			   unsigned int size, unsigned short port,
			   void *dest)
{
1409
	struct read_cache *rc = &ctxt->io_read;
1410

1411 1412
	if (rc->pos == rc->end) { /* refill pio read ahead */
		unsigned int in_page, n;
1413
		unsigned int count = ctxt->rep_prefix ?
1414
			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1415
		in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1416 1417
			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1418
		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1419 1420 1421
		if (n == 0)
			n = 1;
		rc->pos = rc->end = 0;
1422
		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1423 1424
			return 0;
		rc->end = n * size;
A
Avi Kivity 已提交
1425 1426
	}

1427
	if (ctxt->rep_prefix && (ctxt->d & String) &&
1428
	    !(ctxt->eflags & X86_EFLAGS_DF)) {
1429 1430 1431 1432 1433 1434 1435 1436
		ctxt->dst.data = rc->data + rc->pos;
		ctxt->dst.type = OP_MEM_STR;
		ctxt->dst.count = (rc->end - rc->pos) / size;
		rc->pos = rc->end;
	} else {
		memcpy(dest, rc->data + rc->pos, size);
		rc->pos += size;
	}
1437 1438
	return 1;
}
A
Avi Kivity 已提交
1439

1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
				     u16 index, struct desc_struct *desc)
{
	struct desc_ptr dt;
	ulong addr;

	ctxt->ops->get_idt(ctxt, &dt);

	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, index << 3 | 0x2);

	addr = dt.address + index * 8;
	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
				   &ctxt->exception);
}

1456 1457 1458
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
				     u16 selector, struct desc_ptr *dt)
{
1459
	const struct x86_emulate_ops *ops = ctxt->ops;
1460
	u32 base3 = 0;
1461

1462 1463
	if (selector & 1 << 2) {
		struct desc_struct desc;
1464 1465
		u16 sel;

1466
		memset (dt, 0, sizeof *dt);
1467 1468
		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
				      VCPU_SREG_LDTR))
1469
			return;
1470

1471
		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1472
		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1473
	} else
1474
		ops->get_gdt(ctxt, dt);
1475
}
1476

1477 1478
static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
			      u16 selector, ulong *desc_addr_p)
1479 1480 1481 1482
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
1483

1484
	get_descriptor_table_ptr(ctxt, selector, &dt);
1485

1486 1487
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
1488

1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516
	addr = dt.address + index * 8;

#ifdef CONFIG_X86_64
	if (addr >> 32 != 0) {
		u64 efer = 0;

		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
		if (!(efer & EFER_LMA))
			addr &= (u32)-1;
	}
#endif

	*desc_addr_p = addr;
	return X86EMUL_CONTINUE;
}

/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, struct desc_struct *desc,
				   ulong *desc_addr_p)
{
	int rc;

	rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
1517
				   &ctxt->exception);
1518
}
1519

1520 1521 1522 1523
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				    u16 selector, struct desc_struct *desc)
{
1524
	int rc;
1525
	ulong addr;
A
Avi Kivity 已提交
1526

1527 1528 1529
	rc = get_descriptor_ptr(ctxt, selector, &addr);
	if (rc != X86EMUL_CONTINUE)
		return rc;
A
Avi Kivity 已提交
1530

1531 1532
	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
				    &ctxt->exception);
1533
}
1534

1535
/* Does not support long mode */
1536
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1537
				     u16 selector, int seg, u8 cpl,
1538
				     enum x86_transfer_type transfer,
1539
				     struct desc_struct *desc)
1540
{
1541
	struct desc_struct seg_desc, old_desc;
1542
	u8 dpl, rpl;
1543 1544 1545
	unsigned err_vec = GP_VECTOR;
	u32 err_code = 0;
	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1546
	ulong desc_addr;
1547
	int ret;
1548
	u16 dummy;
1549
	u32 base3 = 0;
1550

1551
	memset(&seg_desc, 0, sizeof seg_desc);
1552

1553 1554 1555
	if (ctxt->mode == X86EMUL_MODE_REAL) {
		/* set real mode segment descriptor (keep limit etc. for
		 * unreal mode) */
1556
		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1557 1558
		set_desc_base(&seg_desc, selector << 4);
		goto load;
1559 1560 1561 1562 1563 1564 1565 1566 1567
	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
		/* VM86 needs a clean new segment descriptor */
		set_desc_base(&seg_desc, selector << 4);
		set_desc_limit(&seg_desc, 0xffff);
		seg_desc.type = 3;
		seg_desc.p = 1;
		seg_desc.s = 1;
		seg_desc.dpl = 3;
		goto load;
1568 1569
	}

1570 1571 1572 1573 1574 1575 1576
	rpl = selector & 3;

	/* NULL selector is not valid for TR, CS and SS (except for long mode) */
	if ((seg == VCPU_SREG_CS
	     || (seg == VCPU_SREG_SS
		 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
	     || seg == VCPU_SREG_TR)
1577 1578 1579 1580 1581 1582 1583 1584 1585 1586
	    && null_selector)
		goto exception;

	/* TR should be in GDT only */
	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
		goto exception;

	if (null_selector) /* for NULL selector skip all following checks */
		goto load;

1587
	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1588 1589 1590 1591
	if (ret != X86EMUL_CONTINUE)
		return ret;

	err_code = selector & 0xfffc;
1592 1593
	err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
							   GP_VECTOR;
1594

G
Guo Chao 已提交
1595
	/* can't load system descriptor into segment selector */
1596 1597 1598
	if (seg <= VCPU_SREG_GS && !seg_desc.s) {
		if (transfer == X86_TRANSFER_CALL_JMP)
			return X86EMUL_UNHANDLEABLE;
1599
		goto exception;
1600
	}
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616

	if (!seg_desc.p) {
		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
		goto exception;
	}

	dpl = seg_desc.dpl;

	switch (seg) {
	case VCPU_SREG_SS:
		/*
		 * segment is not a writable data segment or segment
		 * selector's RPL != CPL or segment selector's RPL != CPL
		 */
		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
			goto exception;
A
Avi Kivity 已提交
1617
		break;
1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
	case VCPU_SREG_CS:
		if (!(seg_desc.type & 8))
			goto exception;

		if (seg_desc.type & 4) {
			/* conforming */
			if (dpl > cpl)
				goto exception;
		} else {
			/* nonconforming */
			if (rpl > cpl || dpl != cpl)
				goto exception;
		}
1631 1632 1633 1634 1635 1636 1637 1638 1639
		/* in long-mode d/b must be clear if l is set */
		if (seg_desc.d && seg_desc.l) {
			u64 efer = 0;

			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
			if (efer & EFER_LMA)
				goto exception;
		}

1640 1641
		/* CS(RPL) <- CPL */
		selector = (selector & 0xfffc) | cpl;
A
Avi Kivity 已提交
1642
		break;
1643 1644 1645
	case VCPU_SREG_TR:
		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
			goto exception;
1646 1647 1648 1649 1650 1651
		old_desc = seg_desc;
		seg_desc.type |= 2; /* busy */
		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
						  sizeof(seg_desc), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
1652 1653 1654 1655 1656 1657
		break;
	case VCPU_SREG_LDTR:
		if (seg_desc.s || seg_desc.type != 2)
			goto exception;
		break;
	default: /*  DS, ES, FS, or GS */
1658
		/*
1659 1660 1661
		 * segment is not a data or readable code segment or
		 * ((segment is a data or nonconforming code segment)
		 * and (both RPL and CPL > DPL))
1662
		 */
1663 1664 1665 1666
		if ((seg_desc.type & 0xa) == 0x8 ||
		    (((seg_desc.type & 0xc) != 0xc) &&
		     (rpl > dpl && cpl > dpl)))
			goto exception;
A
Avi Kivity 已提交
1667
		break;
1668 1669 1670 1671
	}

	if (seg_desc.s) {
		/* mark segment as accessed */
1672 1673 1674 1675 1676 1677 1678
		if (!(seg_desc.type & 1)) {
			seg_desc.type |= 1;
			ret = write_segment_descriptor(ctxt, selector,
						       &seg_desc);
			if (ret != X86EMUL_CONTINUE)
				return ret;
		}
1679 1680 1681 1682 1683
	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
		ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
				sizeof(base3), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
1684 1685 1686
		if (is_noncanonical_address(get_desc_base(&seg_desc) |
					     ((u64)base3 << 32)))
			return emulate_gp(ctxt, 0);
1687 1688
	}
load:
1689
	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1690 1691
	if (desc)
		*desc = seg_desc;
1692 1693
	return X86EMUL_CONTINUE;
exception:
1694
	return emulate_exception(ctxt, err_vec, err_code, true);
1695 1696
}

1697 1698 1699 1700
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, int seg)
{
	u8 cpl = ctxt->ops->cpl(ctxt);
1701 1702
	return __load_segment_descriptor(ctxt, selector, seg, cpl,
					 X86_TRANSFER_NONE, NULL);
1703 1704
}

1705 1706
static void write_register_operand(struct operand *op)
{
1707
	return assign_register(op->addr.reg, op->val, op->bytes);
1708 1709
}

1710
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1711
{
1712
	switch (op->type) {
1713
	case OP_REG:
1714
		write_register_operand(op);
A
Avi Kivity 已提交
1715
		break;
1716
	case OP_MEM:
1717
		if (ctxt->lock_prefix)
P
Paolo Bonzini 已提交
1718 1719 1720 1721 1722 1723 1724
			return segmented_cmpxchg(ctxt,
						 op->addr.mem,
						 &op->orig_val,
						 &op->val,
						 op->bytes);
		else
			return segmented_write(ctxt,
1725 1726 1727
					       op->addr.mem,
					       &op->val,
					       op->bytes);
1728
		break;
1729
	case OP_MEM_STR:
P
Paolo Bonzini 已提交
1730 1731 1732 1733
		return segmented_write(ctxt,
				       op->addr.mem,
				       op->data,
				       op->bytes * op->count);
1734
		break;
A
Avi Kivity 已提交
1735
	case OP_XMM:
1736
		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
A
Avi Kivity 已提交
1737
		break;
A
Avi Kivity 已提交
1738
	case OP_MM:
1739
		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
A
Avi Kivity 已提交
1740
		break;
1741 1742
	case OP_NONE:
		/* no writeback */
1743
		break;
1744
	default:
1745
		break;
A
Avi Kivity 已提交
1746
	}
1747 1748
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1749

1750
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1751
{
1752
	struct segmented_address addr;
1753

1754
	rsp_increment(ctxt, -bytes);
1755
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1756 1757
	addr.seg = VCPU_SREG_SS;

1758 1759 1760 1761 1762
	return segmented_write(ctxt, addr, data, bytes);
}

static int em_push(struct x86_emulate_ctxt *ctxt)
{
1763
	/* Disable writeback. */
1764
	ctxt->dst.type = OP_NONE;
1765
	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1766
}
1767

1768 1769 1770 1771
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
		       void *dest, int len)
{
	int rc;
1772
	struct segmented_address addr;
1773

1774
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1775
	addr.seg = VCPU_SREG_SS;
1776
	rc = segmented_read(ctxt, addr, dest, len);
1777 1778 1779
	if (rc != X86EMUL_CONTINUE)
		return rc;

1780
	rsp_increment(ctxt, len);
1781
	return rc;
1782 1783
}

1784 1785
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
1786
	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1787 1788
}

1789
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1790
			void *dest, int len)
1791 1792
{
	int rc;
1793
	unsigned long val, change_mask;
1794
	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1795
	int cpl = ctxt->ops->cpl(ctxt);
1796

1797
	rc = emulate_pop(ctxt, &val, len);
1798 1799
	if (rc != X86EMUL_CONTINUE)
		return rc;
1800

1801 1802 1803 1804
	change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
		      X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
		      X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
		      X86_EFLAGS_AC | X86_EFLAGS_ID;
1805

1806 1807 1808 1809 1810
	switch(ctxt->mode) {
	case X86EMUL_MODE_PROT64:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT16:
		if (cpl == 0)
1811
			change_mask |= X86_EFLAGS_IOPL;
1812
		if (cpl <= iopl)
1813
			change_mask |= X86_EFLAGS_IF;
1814 1815
		break;
	case X86EMUL_MODE_VM86:
1816 1817
		if (iopl < 3)
			return emulate_gp(ctxt, 0);
1818
		change_mask |= X86_EFLAGS_IF;
1819 1820
		break;
	default: /* real mode */
1821
		change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1822
		break;
1823
	}
1824 1825 1826 1827 1828

	*(unsigned long *)dest =
		(ctxt->eflags & ~change_mask) | (val & change_mask);

	return rc;
1829 1830
}

1831 1832
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
1833 1834 1835 1836
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->eflags;
	ctxt->dst.bytes = ctxt->op_bytes;
	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1837 1838
}

A
Avi Kivity 已提交
1839 1840 1841 1842 1843
static int em_enter(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	unsigned frame_size = ctxt->src.val;
	unsigned nesting_level = ctxt->src2.val & 31;
1844
	ulong rbp;
A
Avi Kivity 已提交
1845 1846 1847 1848

	if (nesting_level)
		return X86EMUL_UNHANDLEABLE;

1849 1850
	rbp = reg_read(ctxt, VCPU_REGS_RBP);
	rc = push(ctxt, &rbp, stack_size(ctxt));
A
Avi Kivity 已提交
1851 1852
	if (rc != X86EMUL_CONTINUE)
		return rc;
1853
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
A
Avi Kivity 已提交
1854
		      stack_mask(ctxt));
1855 1856
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
A
Avi Kivity 已提交
1857 1858 1859 1860
		      stack_mask(ctxt));
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1861 1862
static int em_leave(struct x86_emulate_ctxt *ctxt)
{
1863
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
A
Avi Kivity 已提交
1864
		      stack_mask(ctxt));
1865
	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
A
Avi Kivity 已提交
1866 1867
}

1868
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1869
{
1870 1871
	int seg = ctxt->src2.val;

1872
	ctxt->src.val = get_segment_selector(ctxt, seg);
1873 1874 1875 1876
	if (ctxt->op_bytes == 4) {
		rsp_increment(ctxt, -2);
		ctxt->op_bytes = 2;
	}
1877

1878
	return em_push(ctxt);
1879 1880
}

1881
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1882
{
1883
	int seg = ctxt->src2.val;
1884 1885
	unsigned long selector;
	int rc;
1886

1887
	rc = emulate_pop(ctxt, &selector, 2);
1888 1889 1890
	if (rc != X86EMUL_CONTINUE)
		return rc;

1891 1892
	if (ctxt->modrm_reg == VCPU_SREG_SS)
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1893 1894
	if (ctxt->op_bytes > 2)
		rsp_increment(ctxt, ctxt->op_bytes - 2);
1895

1896
	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1897
	return rc;
1898 1899
}

1900
static int em_pusha(struct x86_emulate_ctxt *ctxt)
1901
{
1902
	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1903 1904
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RAX;
1905

1906 1907
	while (reg <= VCPU_REGS_RDI) {
		(reg == VCPU_REGS_RSP) ?
1908
		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1909

1910
		rc = em_push(ctxt);
1911 1912
		if (rc != X86EMUL_CONTINUE)
			return rc;
1913

1914
		++reg;
1915 1916
	}

1917
	return rc;
1918 1919
}

1920 1921
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
1922
	ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1923 1924 1925
	return em_push(ctxt);
}

1926
static int em_popa(struct x86_emulate_ctxt *ctxt)
1927
{
1928 1929
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RDI;
1930
	u32 val;
1931

1932 1933
	while (reg >= VCPU_REGS_RAX) {
		if (reg == VCPU_REGS_RSP) {
1934
			rsp_increment(ctxt, ctxt->op_bytes);
1935 1936
			--reg;
		}
1937

1938
		rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
1939 1940
		if (rc != X86EMUL_CONTINUE)
			break;
1941
		assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
1942
		--reg;
1943
	}
1944
	return rc;
1945 1946
}

1947
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1948
{
1949
	const struct x86_emulate_ops *ops = ctxt->ops;
1950
	int rc;
1951 1952 1953 1954 1955 1956
	struct desc_ptr dt;
	gva_t cs_addr;
	gva_t eip_addr;
	u16 cs, eip;

	/* TODO: Add limit checks */
1957
	ctxt->src.val = ctxt->eflags;
1958
	rc = em_push(ctxt);
1959 1960
	if (rc != X86EMUL_CONTINUE)
		return rc;
1961

1962
	ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
1963

1964
	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1965
	rc = em_push(ctxt);
1966 1967
	if (rc != X86EMUL_CONTINUE)
		return rc;
1968

1969
	ctxt->src.val = ctxt->_eip;
1970
	rc = em_push(ctxt);
1971 1972 1973
	if (rc != X86EMUL_CONTINUE)
		return rc;

1974
	ops->get_idt(ctxt, &dt);
1975 1976 1977 1978

	eip_addr = dt.address + (irq << 2);
	cs_addr = dt.address + (irq << 2) + 2;

1979
	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1980 1981 1982
	if (rc != X86EMUL_CONTINUE)
		return rc;

1983
	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1984 1985 1986
	if (rc != X86EMUL_CONTINUE)
		return rc;

1987
	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1988 1989 1990
	if (rc != X86EMUL_CONTINUE)
		return rc;

1991
	ctxt->_eip = eip;
1992 1993 1994 1995

	return rc;
}

1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
	int rc;

	invalidate_registers(ctxt);
	rc = __emulate_int_real(ctxt, irq);
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);
	return rc;
}

2007
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2008 2009 2010
{
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
2011
		return __emulate_int_real(ctxt, irq);
2012 2013 2014 2015 2016 2017 2018 2019 2020 2021
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
	default:
		/* Protected mode interrupts unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
	}
}

2022
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2023
{
2024 2025 2026 2027
	int rc = X86EMUL_CONTINUE;
	unsigned long temp_eip = 0;
	unsigned long temp_eflags = 0;
	unsigned long cs = 0;
2028 2029 2030 2031 2032
	unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
			     X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
			     X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
			     X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
			     X86_EFLAGS_AC | X86_EFLAGS_ID |
W
Wanpeng Li 已提交
2033
			     X86_EFLAGS_FIXED;
2034 2035
	unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
				  X86_EFLAGS_VIP;
2036

2037
	/* TODO: Add stack limit check */
2038

2039
	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2040

2041 2042
	if (rc != X86EMUL_CONTINUE)
		return rc;
2043

2044 2045
	if (temp_eip & ~0xffff)
		return emulate_gp(ctxt, 0);
2046

2047
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2048

2049 2050
	if (rc != X86EMUL_CONTINUE)
		return rc;
2051

2052
	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2053

2054 2055
	if (rc != X86EMUL_CONTINUE)
		return rc;
2056

2057
	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2058

2059 2060
	if (rc != X86EMUL_CONTINUE)
		return rc;
2061

2062
	ctxt->_eip = temp_eip;
2063

2064
	if (ctxt->op_bytes == 4)
2065
		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2066
	else if (ctxt->op_bytes == 2) {
2067 2068
		ctxt->eflags &= ~0xffff;
		ctxt->eflags |= temp_eflags;
2069
	}
2070 2071

	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
W
Wanpeng Li 已提交
2072
	ctxt->eflags |= X86_EFLAGS_FIXED;
2073
	ctxt->ops->set_nmi_mask(ctxt, false);
2074 2075

	return rc;
2076 2077
}

2078
static int em_iret(struct x86_emulate_ctxt *ctxt)
2079
{
2080 2081
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
2082
		return emulate_iret_real(ctxt);
2083 2084 2085 2086
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
2087
	default:
2088 2089
		/* iret from protected mode unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
2090 2091 2092
	}
}

2093 2094 2095
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
	int rc;
2096 2097 2098 2099 2100 2101 2102 2103 2104
	unsigned short sel, old_sel;
	struct desc_struct old_desc, new_desc;
	const struct x86_emulate_ops *ops = ctxt->ops;
	u8 cpl = ctxt->ops->cpl(ctxt);

	/* Assignment of RIP may only fail in 64-bit mode */
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
				 VCPU_SREG_CS);
2105

2106
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2107

2108 2109
	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_CALL_JMP,
2110
				       &new_desc);
2111 2112 2113
	if (rc != X86EMUL_CONTINUE)
		return rc;

2114
	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2115
	if (rc != X86EMUL_CONTINUE) {
2116
		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2117 2118 2119 2120 2121
		/* assigning eip failed; restore the old cs */
		ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
		return rc;
	}
	return rc;
2122 2123
}

2124
static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2125
{
2126 2127
	return assign_eip_near(ctxt, ctxt->src.val);
}
2128

2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139
static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	long int old_eip;

	old_eip = ctxt->_eip;
	rc = assign_eip_near(ctxt, ctxt->src.val);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	ctxt->src.val = old_eip;
	rc = em_push(ctxt);
2140
	return rc;
2141 2142
}

2143
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2144
{
2145
	u64 old = ctxt->dst.orig_val64;
2146

2147 2148 2149
	if (ctxt->dst.bytes == 16)
		return X86EMUL_UNHANDLEABLE;

2150 2151 2152 2153
	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2154
		ctxt->eflags &= ~X86_EFLAGS_ZF;
2155
	} else {
2156 2157
		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2158

2159
		ctxt->eflags |= X86_EFLAGS_ZF;
2160
	}
2161
	return X86EMUL_CONTINUE;
2162 2163
}

2164 2165
static int em_ret(struct x86_emulate_ctxt *ctxt)
{
2166 2167 2168 2169 2170 2171 2172 2173
	int rc;
	unsigned long eip;

	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	return assign_eip_near(ctxt, eip);
2174 2175
}

2176
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2177 2178
{
	int rc;
2179 2180
	unsigned long eip, cs;
	u16 old_cs;
2181
	int cpl = ctxt->ops->cpl(ctxt);
2182 2183 2184 2185 2186 2187
	struct desc_struct old_desc, new_desc;
	const struct x86_emulate_ops *ops = ctxt->ops;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
				 VCPU_SREG_CS);
2188

2189
	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2190
	if (rc != X86EMUL_CONTINUE)
2191
		return rc;
2192
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2193
	if (rc != X86EMUL_CONTINUE)
2194
		return rc;
2195 2196 2197
	/* Outer-privilege level return is not implemented */
	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
		return X86EMUL_UNHANDLEABLE;
2198 2199
	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_RET,
2200 2201 2202
				       &new_desc);
	if (rc != X86EMUL_CONTINUE)
		return rc;
2203
	rc = assign_eip_far(ctxt, eip, &new_desc);
2204
	if (rc != X86EMUL_CONTINUE) {
2205
		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2206 2207
		ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
	}
2208 2209 2210
	return rc;
}

2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221
static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
{
        int rc;

        rc = em_ret_far(ctxt);
        if (rc != X86EMUL_CONTINUE)
                return rc;
        rsp_increment(ctxt, ctxt->src.val);
        return X86EMUL_CONTINUE;
}

2222 2223 2224
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{
	/* Save real source value, then compare EAX against destination. */
2225 2226
	ctxt->dst.orig_val = ctxt->dst.val;
	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2227
	ctxt->src.orig_val = ctxt->src.val;
2228
	ctxt->src.val = ctxt->dst.orig_val;
2229
	fastop(ctxt, em_cmp);
2230

2231
	if (ctxt->eflags & X86_EFLAGS_ZF) {
2232 2233
		/* Success: write back to memory; no update of EAX */
		ctxt->src.type = OP_NONE;
2234 2235 2236
		ctxt->dst.val = ctxt->src.orig_val;
	} else {
		/* Failure: write the value we saw to EAX. */
2237 2238 2239 2240
		ctxt->src.type = OP_REG;
		ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
		ctxt->src.val = ctxt->dst.orig_val;
		/* Create write-cycle to dest by writing the same value */
2241
		ctxt->dst.val = ctxt->dst.orig_val;
2242 2243 2244 2245
	}
	return X86EMUL_CONTINUE;
}

2246
static int em_lseg(struct x86_emulate_ctxt *ctxt)
2247
{
2248
	int seg = ctxt->src2.val;
2249 2250 2251
	unsigned short sel;
	int rc;

2252
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2253

2254
	rc = load_segment_descriptor(ctxt, sel, seg);
2255 2256 2257
	if (rc != X86EMUL_CONTINUE)
		return rc;

2258
	ctxt->dst.val = ctxt->src.val;
2259 2260 2261
	return rc;
}

2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274
static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

	eax = 0x80000001;
	ecx = 0;
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	return edx & bit(X86_FEATURE_LM);
}

#define GET_SMSTATE(type, smbase, offset)				  \
	({								  \
	 type __val;							  \
2275 2276
	 int r = ctxt->ops->read_phys(ctxt, smbase + offset, &__val,      \
				      sizeof(__val));			  \
2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420
	 if (r != X86EMUL_CONTINUE)					  \
		 return X86EMUL_UNHANDLEABLE;				  \
	 __val;								  \
	})

static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
{
	desc->g    = (flags >> 23) & 1;
	desc->d    = (flags >> 22) & 1;
	desc->l    = (flags >> 21) & 1;
	desc->avl  = (flags >> 20) & 1;
	desc->p    = (flags >> 15) & 1;
	desc->dpl  = (flags >> 13) & 3;
	desc->s    = (flags >> 12) & 1;
	desc->type = (flags >>  8) & 15;
}

static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
{
	struct desc_struct desc;
	int offset;
	u16 selector;

	selector = GET_SMSTATE(u32, smbase, 0x7fa8 + n * 4);

	if (n < 3)
		offset = 0x7f84 + n * 12;
	else
		offset = 0x7f2c + (n - 3) * 12;

	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
	rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smbase, offset));
	ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
	return X86EMUL_CONTINUE;
}

static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
{
	struct desc_struct desc;
	int offset;
	u16 selector;
	u32 base3;

	offset = 0x7e00 + n * 16;

	selector =                GET_SMSTATE(u16, smbase, offset);
	rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smbase, offset + 2) << 8);
	set_desc_limit(&desc,     GET_SMSTATE(u32, smbase, offset + 4));
	set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
	base3 =                   GET_SMSTATE(u32, smbase, offset + 12);

	ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
	return X86EMUL_CONTINUE;
}

static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
				     u64 cr0, u64 cr4)
{
	int bad;

	/*
	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
	 * Then enable protected mode.	However, PCID cannot be enabled
	 * if EFER.LMA=0, so set it separately.
	 */
	bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
	if (bad)
		return X86EMUL_UNHANDLEABLE;

	bad = ctxt->ops->set_cr(ctxt, 0, cr0);
	if (bad)
		return X86EMUL_UNHANDLEABLE;

	if (cr4 & X86_CR4_PCIDE) {
		bad = ctxt->ops->set_cr(ctxt, 4, cr4);
		if (bad)
			return X86EMUL_UNHANDLEABLE;
	}

	return X86EMUL_CONTINUE;
}

static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
	struct desc_struct desc;
	struct desc_ptr dt;
	u16 selector;
	u32 val, cr0, cr4;
	int i;

	cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
	ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
	ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
	ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);

	for (i = 0; i < 8; i++)
		*reg_write(ctxt, i) = GET_SMSTATE(u32, smbase, 0x7fd0 + i * 4);

	val = GET_SMSTATE(u32, smbase, 0x7fcc);
	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
	val = GET_SMSTATE(u32, smbase, 0x7fc8);
	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);

	selector =                 GET_SMSTATE(u32, smbase, 0x7fc4);
	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f64));
	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f60));
	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f5c));
	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);

	selector =                 GET_SMSTATE(u32, smbase, 0x7fc0);
	set_desc_base(&desc,       GET_SMSTATE(u32, smbase, 0x7f80));
	set_desc_limit(&desc,      GET_SMSTATE(u32, smbase, 0x7f7c));
	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smbase, 0x7f78));
	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);

	dt.address =               GET_SMSTATE(u32, smbase, 0x7f74);
	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f70);
	ctxt->ops->set_gdt(ctxt, &dt);

	dt.address =               GET_SMSTATE(u32, smbase, 0x7f58);
	dt.size =                  GET_SMSTATE(u32, smbase, 0x7f54);
	ctxt->ops->set_idt(ctxt, &dt);

	for (i = 0; i < 6; i++) {
		int r = rsm_load_seg_32(ctxt, smbase, i);
		if (r != X86EMUL_CONTINUE)
			return r;
	}

	cr4 = GET_SMSTATE(u32, smbase, 0x7f14);

	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));

	return rsm_enter_protected_mode(ctxt, cr0, cr4);
}

static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
	struct desc_struct desc;
	struct desc_ptr dt;
	u64 val, cr0, cr4;
	u32 base3;
	u16 selector;
2421
	int i, r;
2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462

	for (i = 0; i < 16; i++)
		*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);

	ctxt->_eip   = GET_SMSTATE(u64, smbase, 0x7f78);
	ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7f70) | X86_EFLAGS_FIXED;

	val = GET_SMSTATE(u32, smbase, 0x7f68);
	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
	val = GET_SMSTATE(u32, smbase, 0x7f60);
	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);

	cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
	ctxt->ops->set_cr(ctxt, 3,  GET_SMSTATE(u64, smbase, 0x7f50));
	cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
	val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
	ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);

	selector =                  GET_SMSTATE(u32, smbase, 0x7e90);
	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e92) << 8);
	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e94));
	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e98));
	base3 =                     GET_SMSTATE(u32, smbase, 0x7e9c);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);

	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e84);
	dt.address =                GET_SMSTATE(u64, smbase, 0x7e88);
	ctxt->ops->set_idt(ctxt, &dt);

	selector =                  GET_SMSTATE(u32, smbase, 0x7e70);
	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smbase, 0x7e72) << 8);
	set_desc_limit(&desc,       GET_SMSTATE(u32, smbase, 0x7e74));
	set_desc_base(&desc,        GET_SMSTATE(u32, smbase, 0x7e78));
	base3 =                     GET_SMSTATE(u32, smbase, 0x7e7c);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);

	dt.size =                   GET_SMSTATE(u32, smbase, 0x7e64);
	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
	ctxt->ops->set_gdt(ctxt, &dt);

2463 2464 2465 2466
	r = rsm_enter_protected_mode(ctxt, cr0, cr4);
	if (r != X86EMUL_CONTINUE)
		return r;

2467
	for (i = 0; i < 6; i++) {
2468
		r = rsm_load_seg_64(ctxt, smbase, i);
2469 2470 2471 2472
		if (r != X86EMUL_CONTINUE)
			return r;
	}

2473
	return X86EMUL_CONTINUE;
2474 2475
}

P
Paolo Bonzini 已提交
2476 2477
static int em_rsm(struct x86_emulate_ctxt *ctxt)
{
2478 2479 2480 2481
	unsigned long cr0, cr4, efer;
	u64 smbase;
	int ret;

P
Paolo Bonzini 已提交
2482 2483 2484
	if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
		return emulate_ud(ctxt);

2485 2486
	/*
	 * Get back to real mode, to prepare a safe state in which to load
2487 2488
	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
	 * supports long mode.
2489
	 */
2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507
	cr4 = ctxt->ops->get_cr(ctxt, 4);
	if (emulator_has_longmode(ctxt)) {
		struct desc_struct cs_desc;

		/* Zero CR4.PCIDE before CR0.PG.  */
		if (cr4 & X86_CR4_PCIDE) {
			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
			cr4 &= ~X86_CR4_PCIDE;
		}

		/* A 32-bit code segment is required to clear EFER.LMA.  */
		memset(&cs_desc, 0, sizeof(cs_desc));
		cs_desc.type = 0xb;
		cs_desc.s = cs_desc.g = cs_desc.p = 1;
		ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
	}

	/* For the 64-bit case, this will clear EFER.LMA.  */
2508 2509 2510
	cr0 = ctxt->ops->get_cr(ctxt, 0);
	if (cr0 & X86_CR0_PE)
		ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2511 2512

	/* Now clear CR4.PAE (which must be done before clearing EFER.LME).  */
2513 2514
	if (cr4 & X86_CR4_PAE)
		ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2515 2516

	/* And finally go back to 32-bit mode.  */
2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536
	efer = 0;
	ctxt->ops->set_msr(ctxt, MSR_EFER, efer);

	smbase = ctxt->ops->get_smbase(ctxt);
	if (emulator_has_longmode(ctxt))
		ret = rsm_load_state_64(ctxt, smbase + 0x8000);
	else
		ret = rsm_load_state_32(ctxt, smbase + 0x8000);

	if (ret != X86EMUL_CONTINUE) {
		/* FIXME: should triple fault */
		return X86EMUL_UNHANDLEABLE;
	}

	if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
		ctxt->ops->set_nmi_mask(ctxt, false);

	ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
	ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
	return X86EMUL_CONTINUE;
P
Paolo Bonzini 已提交
2537 2538
}

2539
static void
2540
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2541
			struct desc_struct *cs, struct desc_struct *ss)
2542 2543
{
	cs->l = 0;		/* will be adjusted later */
2544
	set_desc_base(cs, 0);	/* flat segment */
2545
	cs->g = 1;		/* 4kb granularity */
2546
	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2547 2548 2549
	cs->type = 0x0b;	/* Read, Execute, Accessed */
	cs->s = 1;
	cs->dpl = 0;		/* will be adjusted later */
2550 2551
	cs->p = 1;
	cs->d = 1;
2552
	cs->avl = 0;
2553

2554 2555
	set_desc_base(ss, 0);	/* flat segment */
	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2556 2557 2558
	ss->g = 1;		/* 4kb granularity */
	ss->s = 1;
	ss->type = 0x03;	/* Read/Write, Accessed */
2559
	ss->d = 1;		/* 32bit stack segment */
2560
	ss->dpl = 0;
2561
	ss->p = 1;
2562 2563
	ss->l = 0;
	ss->avl = 0;
2564 2565
}

2566 2567 2568 2569 2570
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

	eax = ecx = 0;
2571 2572
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2573 2574 2575 2576
		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
}

2577 2578
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
{
2579
	const struct x86_emulate_ops *ops = ctxt->ops;
2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590
	u32 eax, ebx, ecx, edx;

	/*
	 * syscall should always be enabled in longmode - so only become
	 * vendor specific (cpuid) if other modes are active...
	 */
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return true;

	eax = 0x00000000;
	ecx = 0x00000000;
2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615
	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	/*
	 * Intel ("GenuineIntel")
	 * remark: Intel CPUs only support "syscall" in 64bit
	 * longmode. Also an 64bit guest with a
	 * 32bit compat-app running will #UD !! While this
	 * behaviour can be fixed (by emulating) into AMD
	 * response - CPUs of AMD can't behave like Intel.
	 */
	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
		return false;

	/* AMD ("AuthenticAMD") */
	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
		return true;

	/* AMD ("AMDisbetter!") */
	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
		return true;
2616 2617 2618 2619 2620

	/* default: (not Intel, not AMD), apply Intel's stricter rules... */
	return false;
}

2621
static int em_syscall(struct x86_emulate_ctxt *ctxt)
2622
{
2623
	const struct x86_emulate_ops *ops = ctxt->ops;
2624
	struct desc_struct cs, ss;
2625
	u64 msr_data;
2626
	u16 cs_sel, ss_sel;
2627
	u64 efer = 0;
2628 2629

	/* syscall is not available in real mode */
2630
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2631 2632
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_ud(ctxt);
2633

2634 2635 2636
	if (!(em_syscall_is_enabled(ctxt)))
		return emulate_ud(ctxt);

2637
	ops->get_msr(ctxt, MSR_EFER, &efer);
2638
	setup_syscalls_segments(ctxt, &cs, &ss);
2639

2640 2641 2642
	if (!(efer & EFER_SCE))
		return emulate_ud(ctxt);

2643
	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2644
	msr_data >>= 32;
2645 2646
	cs_sel = (u16)(msr_data & 0xfffc);
	ss_sel = (u16)(msr_data + 8);
2647

2648
	if (efer & EFER_LMA) {
2649
		cs.d = 0;
2650 2651
		cs.l = 1;
	}
2652 2653
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2654

2655
	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2656
	if (efer & EFER_LMA) {
2657
#ifdef CONFIG_X86_64
2658
		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2659

2660
		ops->get_msr(ctxt,
2661 2662
			     ctxt->mode == X86EMUL_MODE_PROT64 ?
			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2663
		ctxt->_eip = msr_data;
2664

2665
		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2666
		ctxt->eflags &= ~msr_data;
W
Wanpeng Li 已提交
2667
		ctxt->eflags |= X86_EFLAGS_FIXED;
2668 2669 2670
#endif
	} else {
		/* legacy mode */
2671
		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2672
		ctxt->_eip = (u32)msr_data;
2673

2674
		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2675 2676
	}

2677
	return X86EMUL_CONTINUE;
2678 2679
}

2680
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2681
{
2682
	const struct x86_emulate_ops *ops = ctxt->ops;
2683
	struct desc_struct cs, ss;
2684
	u64 msr_data;
2685
	u16 cs_sel, ss_sel;
2686
	u64 efer = 0;
2687

2688
	ops->get_msr(ctxt, MSR_EFER, &efer);
2689
	/* inject #GP if in real mode */
2690 2691
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return emulate_gp(ctxt, 0);
2692

2693 2694 2695 2696
	/*
	 * Not recognized on AMD in compat mode (but is recognized in legacy
	 * mode).
	 */
2697
	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2698 2699 2700
	    && !vendor_intel(ctxt))
		return emulate_ud(ctxt);

2701
	/* sysenter/sysexit have not been tested in 64bit mode. */
2702
	if (ctxt->mode == X86EMUL_MODE_PROT64)
2703
		return X86EMUL_UNHANDLEABLE;
2704

2705
	setup_syscalls_segments(ctxt, &cs, &ss);
2706

2707
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2708 2709
	if ((msr_data & 0xfffc) == 0x0)
		return emulate_gp(ctxt, 0);
2710

2711
	ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2712
	cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2713
	ss_sel = cs_sel + 8;
2714
	if (efer & EFER_LMA) {
2715
		cs.d = 0;
2716 2717 2718
		cs.l = 1;
	}

2719 2720
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2721

2722
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2723
	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2724

2725
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2726 2727
	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
							      (u32)msr_data;
2728

2729
	return X86EMUL_CONTINUE;
2730 2731
}

2732
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2733
{
2734
	const struct x86_emulate_ops *ops = ctxt->ops;
2735
	struct desc_struct cs, ss;
2736
	u64 msr_data, rcx, rdx;
2737
	int usermode;
X
Xiao Guangrong 已提交
2738
	u16 cs_sel = 0, ss_sel = 0;
2739

2740 2741
	/* inject #GP if in real mode or Virtual 8086 mode */
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2742 2743
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_gp(ctxt, 0);
2744

2745
	setup_syscalls_segments(ctxt, &cs, &ss);
2746

2747
	if ((ctxt->rex_prefix & 0x8) != 0x0)
2748 2749 2750 2751
		usermode = X86EMUL_MODE_PROT64;
	else
		usermode = X86EMUL_MODE_PROT32;

2752 2753 2754
	rcx = reg_read(ctxt, VCPU_REGS_RCX);
	rdx = reg_read(ctxt, VCPU_REGS_RDX);

2755 2756
	cs.dpl = 3;
	ss.dpl = 3;
2757
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2758 2759
	switch (usermode) {
	case X86EMUL_MODE_PROT32:
2760
		cs_sel = (u16)(msr_data + 16);
2761 2762
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
2763
		ss_sel = (u16)(msr_data + 24);
2764 2765
		rcx = (u32)rcx;
		rdx = (u32)rdx;
2766 2767
		break;
	case X86EMUL_MODE_PROT64:
2768
		cs_sel = (u16)(msr_data + 32);
2769 2770
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
2771 2772
		ss_sel = cs_sel + 8;
		cs.d = 0;
2773
		cs.l = 1;
2774 2775 2776
		if (is_noncanonical_address(rcx) ||
		    is_noncanonical_address(rdx))
			return emulate_gp(ctxt, 0);
2777 2778
		break;
	}
2779 2780
	cs_sel |= SEGMENT_RPL_MASK;
	ss_sel |= SEGMENT_RPL_MASK;
2781

2782 2783
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2784

2785 2786
	ctxt->_eip = rdx;
	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2787

2788
	return X86EMUL_CONTINUE;
2789 2790
}

2791
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2792 2793 2794 2795 2796 2797
{
	int iopl;
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return false;
	if (ctxt->mode == X86EMUL_MODE_VM86)
		return true;
2798
	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2799
	return ctxt->ops->cpl(ctxt) > iopl;
2800 2801 2802 2803 2804
}

static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
					    u16 port, u16 len)
{
2805
	const struct x86_emulate_ops *ops = ctxt->ops;
2806
	struct desc_struct tr_seg;
2807
	u32 base3;
2808
	int r;
2809
	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2810
	unsigned mask = (1 << len) - 1;
2811
	unsigned long base;
2812

2813
	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2814
	if (!tr_seg.p)
2815
		return false;
2816
	if (desc_limit_scaled(&tr_seg) < 103)
2817
		return false;
2818 2819 2820 2821
	base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
	base |= ((u64)base3) << 32;
#endif
2822
	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2823 2824
	if (r != X86EMUL_CONTINUE)
		return false;
2825
	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2826
		return false;
2827
	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2828 2829 2830 2831 2832 2833 2834 2835 2836 2837
	if (r != X86EMUL_CONTINUE)
		return false;
	if ((perm >> bit_idx) & mask)
		return false;
	return true;
}

static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
				 u16 port, u16 len)
{
2838 2839 2840
	if (ctxt->perm_ok)
		return true;

2841 2842
	if (emulator_bad_iopl(ctxt))
		if (!emulator_io_port_access_allowed(ctxt, port, len))
2843
			return false;
2844 2845 2846

	ctxt->perm_ok = true;

2847 2848 2849
	return true;
}

2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873
static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
{
	/*
	 * Intel CPUs mask the counter and pointers in quite strange
	 * manner when ECX is zero due to REP-string optimizations.
	 */
#ifdef CONFIG_X86_64
	if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
		return;

	*reg_write(ctxt, VCPU_REGS_RCX) = 0;

	switch (ctxt->b) {
	case 0xa4:	/* movsb */
	case 0xa5:	/* movsd/w */
		*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
		/* fall through */
	case 0xaa:	/* stosb */
	case 0xab:	/* stosd/w */
		*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
	}
#endif
}

2874 2875 2876
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_16 *tss)
{
2877
	tss->ip = ctxt->_eip;
2878
	tss->flag = ctxt->eflags;
2879 2880 2881 2882 2883 2884 2885 2886
	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2887

2888 2889 2890 2891 2892
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2893 2894 2895 2896 2897 2898
}

static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_16 *tss)
{
	int ret;
2899
	u8 cpl;
2900

2901
	ctxt->_eip = tss->ip;
2902
	ctxt->eflags = tss->flag | 2;
2903 2904 2905 2906 2907 2908 2909 2910
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2911 2912 2913 2914 2915

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
2916 2917 2918 2919 2920
	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2921

2922 2923
	cpl = tss->cs & 3;

2924
	/*
G
Guo Chao 已提交
2925
	 * Now load segment descriptors. If fault happens at this stage
2926 2927
	 * it is handled in a context of new task
	 */
2928
	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2929
					X86_TRANSFER_TASK_SWITCH, NULL);
2930 2931
	if (ret != X86EMUL_CONTINUE)
		return ret;
2932
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2933
					X86_TRANSFER_TASK_SWITCH, NULL);
2934 2935
	if (ret != X86EMUL_CONTINUE)
		return ret;
2936
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2937
					X86_TRANSFER_TASK_SWITCH, NULL);
2938 2939
	if (ret != X86EMUL_CONTINUE)
		return ret;
2940
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2941
					X86_TRANSFER_TASK_SWITCH, NULL);
2942 2943
	if (ret != X86EMUL_CONTINUE)
		return ret;
2944
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2945
					X86_TRANSFER_TASK_SWITCH, NULL);
2946 2947 2948 2949 2950 2951 2952 2953 2954 2955
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_16(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
2956
	const struct x86_emulate_ops *ops = ctxt->ops;
2957 2958
	struct tss_segment_16 tss_seg;
	int ret;
2959
	u32 new_tss_base = get_desc_base(new_desc);
2960

2961
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2962
			    &ctxt->exception);
2963
	if (ret != X86EMUL_CONTINUE)
2964 2965
		return ret;

2966
	save_state_to_tss16(ctxt, &tss_seg);
2967

2968
	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2969
			     &ctxt->exception);
2970
	if (ret != X86EMUL_CONTINUE)
2971 2972
		return ret;

2973
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2974
			    &ctxt->exception);
2975
	if (ret != X86EMUL_CONTINUE)
2976 2977 2978 2979 2980
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

2981
		ret = ops->write_std(ctxt, new_tss_base,
2982 2983
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2984
				     &ctxt->exception);
2985
		if (ret != X86EMUL_CONTINUE)
2986 2987 2988
			return ret;
	}

2989
	return load_state_from_tss16(ctxt, &tss_seg);
2990 2991 2992 2993 2994
}

static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_32 *tss)
{
2995
	/* CR3 and ldt selector are not saved intentionally */
2996
	tss->eip = ctxt->_eip;
2997
	tss->eflags = ctxt->eflags;
2998 2999 3000 3001 3002 3003 3004 3005
	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3006

3007 3008 3009 3010 3011 3012
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3013 3014 3015 3016 3017 3018
}

static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_32 *tss)
{
	int ret;
3019
	u8 cpl;
3020

3021
	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3022
		return emulate_gp(ctxt, 0);
3023
	ctxt->_eip = tss->eip;
3024
	ctxt->eflags = tss->eflags | 2;
3025 3026

	/* General purpose registers */
3027 3028 3029 3030 3031 3032 3033 3034
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3035 3036 3037

	/*
	 * SDM says that segment selectors are loaded before segment
3038 3039
	 * descriptors.  This is important because CPL checks will
	 * use CS.RPL.
3040
	 */
3041 3042 3043 3044 3045 3046 3047
	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3048

3049 3050 3051 3052 3053
	/*
	 * If we're switching between Protected Mode and VM86, we need to make
	 * sure to update the mode before loading the segment descriptors so
	 * that the selectors are interpreted correctly.
	 */
3054
	if (ctxt->eflags & X86_EFLAGS_VM) {
3055
		ctxt->mode = X86EMUL_MODE_VM86;
3056 3057
		cpl = 3;
	} else {
3058
		ctxt->mode = X86EMUL_MODE_PROT32;
3059 3060
		cpl = tss->cs & 3;
	}
3061

3062 3063 3064 3065
	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
3066
	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3067
					cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3068 3069
	if (ret != X86EMUL_CONTINUE)
		return ret;
3070
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3071
					X86_TRANSFER_TASK_SWITCH, NULL);
3072 3073
	if (ret != X86EMUL_CONTINUE)
		return ret;
3074
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3075
					X86_TRANSFER_TASK_SWITCH, NULL);
3076 3077
	if (ret != X86EMUL_CONTINUE)
		return ret;
3078
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3079
					X86_TRANSFER_TASK_SWITCH, NULL);
3080 3081
	if (ret != X86EMUL_CONTINUE)
		return ret;
3082
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3083
					X86_TRANSFER_TASK_SWITCH, NULL);
3084 3085
	if (ret != X86EMUL_CONTINUE)
		return ret;
3086
	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3087
					X86_TRANSFER_TASK_SWITCH, NULL);
3088 3089
	if (ret != X86EMUL_CONTINUE)
		return ret;
3090
	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3091
					X86_TRANSFER_TASK_SWITCH, NULL);
3092

3093
	return ret;
3094 3095 3096 3097 3098 3099
}

static int task_switch_32(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
3100
	const struct x86_emulate_ops *ops = ctxt->ops;
3101 3102
	struct tss_segment_32 tss_seg;
	int ret;
3103
	u32 new_tss_base = get_desc_base(new_desc);
3104 3105
	u32 eip_offset = offsetof(struct tss_segment_32, eip);
	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3106

3107
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
3108
			    &ctxt->exception);
3109
	if (ret != X86EMUL_CONTINUE)
3110 3111
		return ret;

3112
	save_state_to_tss32(ctxt, &tss_seg);
3113

3114 3115 3116
	/* Only GP registers and segment selectors are saved */
	ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
			     ldt_sel_offset - eip_offset, &ctxt->exception);
3117
	if (ret != X86EMUL_CONTINUE)
3118 3119
		return ret;

3120
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
3121
			    &ctxt->exception);
3122
	if (ret != X86EMUL_CONTINUE)
3123 3124 3125 3126 3127
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

3128
		ret = ops->write_std(ctxt, new_tss_base,
3129 3130
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
3131
				     &ctxt->exception);
3132
		if (ret != X86EMUL_CONTINUE)
3133 3134 3135
			return ret;
	}

3136
	return load_state_from_tss32(ctxt, &tss_seg);
3137 3138 3139
}

static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3140
				   u16 tss_selector, int idt_index, int reason,
3141
				   bool has_error_code, u32 error_code)
3142
{
3143
	const struct x86_emulate_ops *ops = ctxt->ops;
3144 3145
	struct desc_struct curr_tss_desc, next_tss_desc;
	int ret;
3146
	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3147
	ulong old_tss_base =
3148
		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3149
	u32 desc_limit;
3150
	ulong desc_addr, dr7;
3151 3152 3153

	/* FIXME: old_tss_base == ~0 ? */

3154
	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3155 3156
	if (ret != X86EMUL_CONTINUE)
		return ret;
3157
	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3158 3159 3160 3161 3162
	if (ret != X86EMUL_CONTINUE)
		return ret;

	/* FIXME: check that next_tss_desc is tss */

3163 3164 3165 3166 3167
	/*
	 * Check privileges. The three cases are task switch caused by...
	 *
	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
	 * 2. Exception/IRQ/iret: No check is performed
3168 3169
	 * 3. jmp/call to TSS/task-gate: No check is performed since the
	 *    hardware checks it before exiting.
3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185
	 */
	if (reason == TASK_SWITCH_GATE) {
		if (idt_index != -1) {
			/* Software interrupts */
			struct desc_struct task_gate_desc;
			int dpl;

			ret = read_interrupt_descriptor(ctxt, idt_index,
							&task_gate_desc);
			if (ret != X86EMUL_CONTINUE)
				return ret;

			dpl = task_gate_desc.dpl;
			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
		}
3186 3187
	}

3188 3189 3190 3191
	desc_limit = desc_limit_scaled(&next_tss_desc);
	if (!next_tss_desc.p ||
	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
	     desc_limit < 0x2b)) {
3192
		return emulate_ts(ctxt, tss_selector & 0xfffc);
3193 3194 3195 3196
	}

	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3197
		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3198 3199 3200 3201 3202 3203
	}

	if (reason == TASK_SWITCH_IRET)
		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;

	/* set back link to prev task only if NT bit is set in eflags
G
Guo Chao 已提交
3204
	   note that old_tss_sel is not used after this point */
3205 3206 3207 3208
	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
		old_tss_sel = 0xffff;

	if (next_tss_desc.type & 8)
3209
		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3210 3211
				     old_tss_base, &next_tss_desc);
	else
3212
		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3213
				     old_tss_base, &next_tss_desc);
3214 3215
	if (ret != X86EMUL_CONTINUE)
		return ret;
3216 3217 3218 3219 3220 3221

	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;

	if (reason != TASK_SWITCH_IRET) {
		next_tss_desc.type |= (1 << 1); /* set busy flag */
3222
		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3223 3224
	}

3225
	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
3226
	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3227

3228
	if (has_error_code) {
3229 3230 3231
		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
		ctxt->lock_prefix = 0;
		ctxt->src.val = (unsigned long) error_code;
3232
		ret = em_push(ctxt);
3233 3234
	}

3235 3236 3237
	ops->get_dr(ctxt, 7, &dr7);
	ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));

3238 3239 3240 3241
	return ret;
}

int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3242
			 u16 tss_selector, int idt_index, int reason,
3243
			 bool has_error_code, u32 error_code)
3244 3245 3246
{
	int rc;

3247
	invalidate_registers(ctxt);
3248 3249
	ctxt->_eip = ctxt->eip;
	ctxt->dst.type = OP_NONE;
3250

3251
	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3252
				     has_error_code, error_code);
3253

3254
	if (rc == X86EMUL_CONTINUE) {
3255
		ctxt->eip = ctxt->_eip;
3256 3257
		writeback_registers(ctxt);
	}
3258

3259
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3260 3261
}

3262 3263
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
		struct operand *op)
3264
{
3265
	int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3266

3267 3268
	register_address_increment(ctxt, reg, df * op->bytes);
	op->addr.mem.ea = register_address(ctxt, reg);
3269 3270
}

3271 3272 3273 3274 3275 3276
static int em_das(struct x86_emulate_ctxt *ctxt)
{
	u8 al, old_al;
	bool af, cf, old_cf;

	cf = ctxt->eflags & X86_EFLAGS_CF;
3277
	al = ctxt->dst.val;
3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294

	old_al = al;
	old_cf = cf;
	cf = false;
	af = ctxt->eflags & X86_EFLAGS_AF;
	if ((al & 0x0f) > 9 || af) {
		al -= 6;
		cf = old_cf | (al >= 250);
		af = true;
	} else {
		af = false;
	}
	if (old_al > 0x99 || old_cf) {
		al -= 0x60;
		cf = true;
	}

3295
	ctxt->dst.val = al;
3296
	/* Set PF, ZF, SF */
3297 3298 3299
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
3300
	fastop(ctxt, em_or);
3301 3302 3303 3304 3305 3306 3307 3308
	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
	if (cf)
		ctxt->eflags |= X86_EFLAGS_CF;
	if (af)
		ctxt->eflags |= X86_EFLAGS_AF;
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330
static int em_aam(struct x86_emulate_ctxt *ctxt)
{
	u8 al, ah;

	if (ctxt->src.val == 0)
		return emulate_de(ctxt);

	al = ctxt->dst.val & 0xff;
	ah = al / ctxt->src.val;
	al %= ctxt->src.val;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);

	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);

	return X86EMUL_CONTINUE;
}

3331 3332 3333 3334 3335 3336 3337 3338 3339
static int em_aad(struct x86_emulate_ctxt *ctxt)
{
	u8 al = ctxt->dst.val & 0xff;
	u8 ah = (ctxt->dst.val >> 8) & 0xff;

	al = (al + (ah * ctxt->src.val)) & 0xff;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;

3340 3341 3342 3343 3344
	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);
3345 3346 3347 3348

	return X86EMUL_CONTINUE;
}

3349 3350
static int em_call(struct x86_emulate_ctxt *ctxt)
{
3351
	int rc;
3352 3353 3354
	long rel = ctxt->src.val;

	ctxt->src.val = (unsigned long)ctxt->_eip;
3355 3356 3357
	rc = jmp_rel(ctxt, rel);
	if (rc != X86EMUL_CONTINUE)
		return rc;
3358 3359 3360
	return em_push(ctxt);
}

3361 3362 3363 3364 3365
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
	u16 sel, old_cs;
	ulong old_eip;
	int rc;
3366 3367 3368
	struct desc_struct old_desc, new_desc;
	const struct x86_emulate_ops *ops = ctxt->ops;
	int cpl = ctxt->ops->cpl(ctxt);
3369
	enum x86emul_mode prev_mode = ctxt->mode;
3370

3371
	old_eip = ctxt->_eip;
3372
	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3373

3374
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3375 3376
	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_CALL_JMP, &new_desc);
3377
	if (rc != X86EMUL_CONTINUE)
3378
		return rc;
3379

3380
	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3381 3382
	if (rc != X86EMUL_CONTINUE)
		goto fail;
3383

3384
	ctxt->src.val = old_cs;
3385
	rc = em_push(ctxt);
3386
	if (rc != X86EMUL_CONTINUE)
3387
		goto fail;
3388

3389
	ctxt->src.val = old_eip;
3390 3391 3392
	rc = em_push(ctxt);
	/* If we failed, we tainted the memory, but the very least we should
	   restore cs */
3393 3394
	if (rc != X86EMUL_CONTINUE) {
		pr_warn_once("faulting far call emulation tainted memory\n");
3395
		goto fail;
3396
	}
3397 3398 3399
	return rc;
fail:
	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3400
	ctxt->mode = prev_mode;
3401 3402
	return rc;

3403 3404
}

3405 3406 3407
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
	int rc;
3408
	unsigned long eip;
3409

3410 3411 3412 3413
	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	rc = assign_eip_near(ctxt, eip);
3414 3415
	if (rc != X86EMUL_CONTINUE)
		return rc;
3416
	rsp_increment(ctxt, ctxt->src.val);
3417 3418 3419
	return X86EMUL_CONTINUE;
}

3420 3421 3422
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
	/* Write back the register source. */
3423 3424
	ctxt->src.val = ctxt->dst.val;
	write_register_operand(&ctxt->src);
3425 3426

	/* Write back the memory destination with implicit LOCK prefix. */
3427 3428
	ctxt->dst.val = ctxt->src.orig_val;
	ctxt->lock_prefix = 1;
3429 3430 3431
	return X86EMUL_CONTINUE;
}

3432 3433
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
3434
	ctxt->dst.val = ctxt->src2.val;
3435
	return fastop(ctxt, em_imul);
3436 3437
}

3438 3439
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
3440 3441
	ctxt->dst.type = OP_REG;
	ctxt->dst.bytes = ctxt->src.bytes;
3442
	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3443
	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3444 3445 3446 3447

	return X86EMUL_CONTINUE;
}

3448 3449 3450 3451
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
	u64 tsc = 0;

3452
	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3453 3454
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3455 3456 3457
	return X86EMUL_CONTINUE;
}

3458 3459 3460 3461
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
{
	u64 pmc;

3462
	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3463
		return emulate_gp(ctxt, 0);
3464 3465
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3466 3467 3468
	return X86EMUL_CONTINUE;
}

3469 3470
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
3471
	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3472 3473 3474
	return X86EMUL_CONTINUE;
}

B
Borislav Petkov 已提交
3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509
#define FFL(x) bit(X86_FEATURE_##x)

static int em_movbe(struct x86_emulate_ctxt *ctxt)
{
	u32 ebx, ecx, edx, eax = 1;
	u16 tmp;

	/*
	 * Check MOVBE is set in the guest-visible CPUID leaf.
	 */
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	if (!(ecx & FFL(MOVBE)))
		return emulate_ud(ctxt);

	switch (ctxt->op_bytes) {
	case 2:
		/*
		 * From MOVBE definition: "...When the operand size is 16 bits,
		 * the upper word of the destination register remains unchanged
		 * ..."
		 *
		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
		 * rules so we have to do the operation almost per hand.
		 */
		tmp = (u16)ctxt->src.val;
		ctxt->dst.val &= ~0xffffUL;
		ctxt->dst.val |= (unsigned long)swab16(tmp);
		break;
	case 4:
		ctxt->dst.val = swab32((u32)ctxt->src.val);
		break;
	case 8:
		ctxt->dst.val = swab64(ctxt->src.val);
		break;
	default:
3510
		BUG();
B
Borislav Petkov 已提交
3511 3512 3513 3514
	}
	return X86EMUL_CONTINUE;
}

3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

static int em_dr_write(struct x86_emulate_ctxt *ctxt)
{
	unsigned long val;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		val = ctxt->src.val & ~0ULL;
	else
		val = ctxt->src.val & ~0U;

	/* #UD condition is already handled. */
	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3543 3544 3545 3546
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

3547 3548 3549
	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3550 3551 3552 3553 3554 3555 3556 3557 3558
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

3559
	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3560 3561
		return emulate_gp(ctxt, 0);

3562 3563
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3564 3565 3566
	return X86EMUL_CONTINUE;
}

3567 3568
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
3569
	if (ctxt->modrm_reg > VCPU_SREG_GS)
3570 3571
		return emulate_ud(ctxt);

3572
	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3573 3574
	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
		ctxt->dst.bytes = 2;
3575 3576 3577 3578 3579
	return X86EMUL_CONTINUE;
}

static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
3580
	u16 sel = ctxt->src.val;
3581

3582
	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3583 3584
		return emulate_ud(ctxt);

3585
	if (ctxt->modrm_reg == VCPU_SREG_SS)
3586 3587 3588
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;

	/* Disable writeback. */
3589 3590
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3591 3592
}

A
Avi Kivity 已提交
3593 3594 3595 3596 3597 3598 3599 3600 3601
static int em_lldt(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
}

A
Avi Kivity 已提交
3602 3603 3604 3605 3606 3607 3608 3609 3610
static int em_ltr(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
}

3611 3612
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
3613 3614 3615
	int rc;
	ulong linear;

3616
	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3617
	if (rc == X86EMUL_CONTINUE)
3618
		ctxt->ops->invlpg(ctxt, linear);
3619
	/* Disable writeback. */
3620
	ctxt->dst.type = OP_NONE;
3621 3622 3623
	return X86EMUL_CONTINUE;
}

3624 3625 3626 3627 3628 3629 3630 3631 3632 3633
static int em_clts(struct x86_emulate_ctxt *ctxt)
{
	ulong cr0;

	cr0 = ctxt->ops->get_cr(ctxt, 0);
	cr0 &= ~X86_CR0_TS;
	ctxt->ops->set_cr(ctxt, 0, cr0);
	return X86EMUL_CONTINUE;
}

3634
static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3635
{
3636
	int rc = ctxt->ops->fix_hypercall(ctxt);
3637 3638 3639 3640 3641

	if (rc != X86EMUL_CONTINUE)
		return rc;

	/* Let the processor re-execute the fixed hypercall */
3642
	ctxt->_eip = ctxt->eip;
3643
	/* Disable writeback. */
3644
	ctxt->dst.type = OP_NONE;
3645 3646 3647
	return X86EMUL_CONTINUE;
}

3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
				  void (*get)(struct x86_emulate_ctxt *ctxt,
					      struct desc_ptr *ptr))
{
	struct desc_ptr desc_ptr;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
	get(ctxt, &desc_ptr);
	if (ctxt->op_bytes == 2) {
		ctxt->op_bytes = 4;
		desc_ptr.address &= 0x00ffffff;
	}
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return segmented_write(ctxt, ctxt->dst.addr.mem,
			       &desc_ptr, 2 + ctxt->op_bytes);
}

static int em_sgdt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
}

static int em_sidt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}

3677
static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3678 3679 3680 3681
{
	struct desc_ptr desc_ptr;
	int rc;

3682 3683
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
3684
	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3685
			     &desc_ptr.size, &desc_ptr.address,
3686
			     ctxt->op_bytes);
3687 3688
	if (rc != X86EMUL_CONTINUE)
		return rc;
3689 3690 3691
	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
	    is_noncanonical_address(desc_ptr.address))
		return emulate_gp(ctxt, 0);
3692 3693 3694 3695
	if (lgdt)
		ctxt->ops->set_gdt(ctxt, &desc_ptr);
	else
		ctxt->ops->set_idt(ctxt, &desc_ptr);
3696
	/* Disable writeback. */
3697
	ctxt->dst.type = OP_NONE;
3698 3699 3700
	return X86EMUL_CONTINUE;
}

3701 3702 3703 3704 3705
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
	return em_lgdt_lidt(ctxt, true);
}

3706 3707
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
3708
	return em_lgdt_lidt(ctxt, false);
3709 3710 3711 3712
}

static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
3713 3714
	if (ctxt->dst.type == OP_MEM)
		ctxt->dst.bytes = 2;
3715
	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3716 3717 3718 3719 3720 3721
	return X86EMUL_CONTINUE;
}

static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3722 3723
			  | (ctxt->src.val & 0x0f));
	ctxt->dst.type = OP_NONE;
3724 3725 3726
	return X86EMUL_CONTINUE;
}

3727 3728
static int em_loop(struct x86_emulate_ctxt *ctxt)
{
3729 3730
	int rc = X86EMUL_CONTINUE;

3731
	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3732
	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3733
	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3734
		rc = jmp_rel(ctxt, ctxt->src.val);
3735

3736
	return rc;
3737 3738 3739 3740
}

static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{
3741 3742
	int rc = X86EMUL_CONTINUE;

3743
	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3744
		rc = jmp_rel(ctxt, ctxt->src.val);
3745

3746
	return rc;
3747 3748
}

3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766
static int em_in(struct x86_emulate_ctxt *ctxt)
{
	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
			     &ctxt->dst.val))
		return X86EMUL_IO_NEEDED;

	return X86EMUL_CONTINUE;
}

static int em_out(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
				    &ctxt->src.val, 1);
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->eflags &= ~X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

static int em_sti(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
	ctxt->eflags |= X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3786 3787 3788 3789
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

3790 3791
	eax = reg_read(ctxt, VCPU_REGS_RAX);
	ecx = reg_read(ctxt, VCPU_REGS_RCX);
A
Avi Kivity 已提交
3792
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3793 3794 3795 3796
	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
A
Avi Kivity 已提交
3797 3798 3799
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3800 3801 3802 3803
static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
	u32 flags;

3804 3805
	flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
		X86_EFLAGS_SF;
P
Paolo Bonzini 已提交
3806 3807 3808 3809 3810 3811 3812
	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;

	ctxt->eflags &= ~0xffUL;
	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3813 3814
static int em_lahf(struct x86_emulate_ctxt *ctxt)
{
3815 3816
	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
A
Avi Kivity 已提交
3817 3818 3819
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834
static int em_bswap(struct x86_emulate_ctxt *ctxt)
{
	switch (ctxt->op_bytes) {
#ifdef CONFIG_X86_64
	case 8:
		asm("bswap %0" : "+r"(ctxt->dst.val));
		break;
#endif
	default:
		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
		break;
	}
	return X86EMUL_CONTINUE;
}

3835 3836 3837 3838 3839 3840
static int em_clflush(struct x86_emulate_ctxt *ctxt)
{
	/* emulating clflush regardless of cpuid */
	return X86EMUL_CONTINUE;
}

3841 3842 3843 3844 3845 3846
static int em_movsxd(struct x86_emulate_ctxt *ctxt)
{
	ctxt->dst.val = (s32) ctxt->src.val;
	return X86EMUL_CONTINUE;
}

3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860
static bool valid_cr(int nr)
{
	switch (nr) {
	case 0:
	case 2 ... 4:
	case 8:
		return true;
	default:
		return false;
	}
}

static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
3861
	if (!valid_cr(ctxt->modrm_reg))
3862 3863 3864 3865 3866 3867 3868
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
3869 3870
	u64 new_val = ctxt->src.val64;
	int cr = ctxt->modrm_reg;
3871
	u64 efer = 0;
3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888

	static u64 cr_reserved_bits[] = {
		0xffffffff00000000ULL,
		0, 0, 0, /* CR3 checked later */
		CR4_RESERVED_BITS,
		0, 0, 0,
		CR8_RESERVED_BITS,
	};

	if (!valid_cr(cr))
		return emulate_ud(ctxt);

	if (new_val & cr_reserved_bits[cr])
		return emulate_gp(ctxt, 0);

	switch (cr) {
	case 0: {
3889
		u64 cr4;
3890 3891 3892 3893
		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
			return emulate_gp(ctxt, 0);

3894 3895
		cr4 = ctxt->ops->get_cr(ctxt, 4);
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3896 3897 3898 3899 3900 3901 3902 3903 3904 3905

		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
		    !(cr4 & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	case 3: {
		u64 rsvd = 0;

3906 3907
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
		if (efer & EFER_LMA)
N
Nadav Amit 已提交
3908
			rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3909 3910 3911 3912 3913 3914 3915

		if (new_val & rsvd)
			return emulate_gp(ctxt, 0);

		break;
		}
	case 4: {
3916
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927

		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	}

	return X86EMUL_CONTINUE;
}

3928 3929 3930 3931
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
	unsigned long dr7;

3932
	ctxt->ops->get_dr(ctxt, 7, &dr7);
3933 3934 3935 3936 3937 3938 3939

	/* Check if DR7.Global_Enable is set */
	return dr7 & (1 << 13);
}

static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
3940
	int dr = ctxt->modrm_reg;
3941 3942 3943 3944 3945
	u64 cr4;

	if (dr > 7)
		return emulate_ud(ctxt);

3946
	cr4 = ctxt->ops->get_cr(ctxt, 4);
3947 3948 3949
	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
		return emulate_ud(ctxt);

3950 3951 3952 3953 3954 3955 3956
	if (check_dr7_gd(ctxt)) {
		ulong dr6;

		ctxt->ops->get_dr(ctxt, 6, &dr6);
		dr6 &= ~15;
		dr6 |= DR6_BD | DR6_RTM;
		ctxt->ops->set_dr(ctxt, 6, dr6);
3957
		return emulate_db(ctxt);
3958
	}
3959 3960 3961 3962 3963 3964

	return X86EMUL_CONTINUE;
}

static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
3965 3966
	u64 new_val = ctxt->src.val64;
	int dr = ctxt->modrm_reg;
3967 3968 3969 3970 3971 3972 3973

	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
		return emulate_gp(ctxt, 0);

	return check_dr_read(ctxt);
}

3974 3975 3976 3977
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
	u64 efer;

3978
	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3979 3980 3981 3982 3983 3984 3985 3986 3987

	if (!(efer & EFER_SVME))
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
3988
	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3989 3990

	/* Valid physical address? */
3991
	if (rax & 0xffff000000000000ULL)
3992 3993 3994 3995 3996
		return emulate_gp(ctxt, 0);

	return check_svme(ctxt);
}

3997 3998
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{
3999
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4000

4001
	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4002 4003 4004 4005 4006
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

4007 4008
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
4009
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4010
	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4011

4012
	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4013
	    ctxt->ops->check_pmc(ctxt, rcx))
4014 4015 4016 4017 4018
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

4019 4020
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
4021 4022
	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4023 4024 4025 4026 4027 4028 4029
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
4030 4031
	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4032 4033 4034 4035 4036
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

4037
#define D(_y) { .flags = (_y) }
4038 4039 4040
#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
4041
#define N    D(NotImpl)
4042
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4043 4044
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4045
#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4046
#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4047
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4048
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4049
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4050
#define II(_f, _e, _i) \
4051
	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4052
#define IIP(_f, _e, _i, _p) \
4053 4054
	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
4055
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4056

4057
#define D2bv(_f)      D((_f) | ByteOp), D(_f)
4058
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4059
#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
4060
#define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
4061 4062
#define I2bvIP(_f, _e, _i, _p) \
	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4063

4064 4065 4066
#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4067

4068 4069
static const struct opcode group7_rm0[] = {
	N,
4070
	I(SrcNone | Priv | EmulateOnUD,	em_hypercall),
4071 4072 4073
	N, N, N, N, N, N,
};

4074
static const struct opcode group7_rm1[] = {
4075 4076
	DI(SrcNone | Priv, monitor),
	DI(SrcNone | Priv, mwait),
4077 4078 4079
	N, N, N, N, N, N,
};

4080
static const struct opcode group7_rm3[] = {
4081
	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
4082
	II(SrcNone  | Prot | EmulateOnUD,	em_hypercall,	vmmcall),
4083 4084 4085 4086 4087 4088
	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
4089
};
4090

4091
static const struct opcode group7_rm7[] = {
4092
	N,
4093
	DIP(SrcNone, rdtscp, check_rdtsc),
4094 4095
	N, N, N, N, N, N,
};
4096

4097
static const struct opcode group1[] = {
4098 4099 4100 4101 4102 4103 4104 4105
	F(Lock, em_add),
	F(Lock | PageTable, em_or),
	F(Lock, em_adc),
	F(Lock, em_sbb),
	F(Lock | PageTable, em_and),
	F(Lock, em_sub),
	F(Lock, em_xor),
	F(NoWrite, em_cmp),
4106 4107
};

4108
static const struct opcode group1A[] = {
4109
	I(DstMem | SrcNone | Mov | Stack | IncSP, em_pop), N, N, N, N, N, N, N,
4110 4111
};

4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122
static const struct opcode group2[] = {
	F(DstMem | ModRM, em_rol),
	F(DstMem | ModRM, em_ror),
	F(DstMem | ModRM, em_rcl),
	F(DstMem | ModRM, em_rcr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_shr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_sar),
};

4123
static const struct opcode group3[] = {
4124 4125
	F(DstMem | SrcImm | NoWrite, em_test),
	F(DstMem | SrcImm | NoWrite, em_test),
4126 4127
	F(DstMem | SrcNone | Lock, em_not),
	F(DstMem | SrcNone | Lock, em_neg),
4128 4129
	F(DstXacc | Src2Mem, em_mul_ex),
	F(DstXacc | Src2Mem, em_imul_ex),
4130 4131
	F(DstXacc | Src2Mem, em_div_ex),
	F(DstXacc | Src2Mem, em_idiv_ex),
4132 4133
};

4134
static const struct opcode group4[] = {
4135 4136
	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4137 4138 4139
	N, N, N, N, N, N,
};

4140
static const struct opcode group5[] = {
4141 4142
	F(DstMem | SrcNone | Lock,		em_inc),
	F(DstMem | SrcNone | Lock,		em_dec),
4143
	I(SrcMem | NearBranch,			em_call_near_abs),
4144
	I(SrcMemFAddr | ImplicitOps,		em_call_far),
4145
	I(SrcMem | NearBranch,			em_jmp_abs),
4146 4147
	I(SrcMemFAddr | ImplicitOps,		em_jmp_far),
	I(SrcMem | Stack,			em_push), D(Undefined),
4148 4149
};

4150
static const struct opcode group6[] = {
4151 4152
	DI(Prot | DstMem,	sldt),
	DI(Prot | DstMem,	str),
A
Avi Kivity 已提交
4153
	II(Prot | Priv | SrcMem16, em_lldt, lldt),
A
Avi Kivity 已提交
4154
	II(Prot | Priv | SrcMem16, em_ltr, ltr),
4155 4156 4157
	N, N, N, N,
};

4158
static const struct group_dual group7 = { {
4159 4160
	II(Mov | DstMem,			em_sgdt, sgdt),
	II(Mov | DstMem,			em_sidt, sidt),
4161 4162 4163 4164 4165
	II(SrcMem | Priv,			em_lgdt, lgdt),
	II(SrcMem | Priv,			em_lidt, lidt),
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
4166
}, {
4167
	EXT(0, group7_rm0),
4168
	EXT(0, group7_rm1),
4169
	N, EXT(0, group7_rm3),
4170 4171 4172
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	EXT(0, group7_rm7),
4173 4174
} };

4175
static const struct opcode group8[] = {
4176
	N, N, N, N,
4177 4178 4179 4180
	F(DstMem | SrcImmByte | NoWrite,		em_bt),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
	F(DstMem | SrcImmByte | Lock,			em_btr),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
4181 4182
};

4183
static const struct group_dual group9 = { {
4184
	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4185 4186 4187 4188
}, {
	N, N, N, N, N, N, N, N,
} };

4189
static const struct opcode group11[] = {
4190
	I(DstMem | SrcImm | Mov | PageTable, em_mov),
4191
	X7(D(Undefined)),
4192 4193
};

4194
static const struct gprefix pfx_0f_ae_7 = {
4195
	I(SrcMem | ByteOp, em_clflush), N, N, N,
4196 4197 4198 4199 4200 4201 4202 4203
};

static const struct group_dual group15 = { {
	N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
}, {
	N, N, N, N, N, N, N, N,
} };

4204
static const struct gprefix pfx_0f_6f_0f_7f = {
4205
	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4206 4207
};

4208 4209 4210 4211
static const struct instr_dual instr_dual_0f_2b = {
	I(0, em_mov), N
};

4212
static const struct gprefix pfx_0f_2b = {
4213
	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4214 4215
};

4216
static const struct gprefix pfx_0f_28_0f_29 = {
4217
	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4218 4219
};

4220 4221 4222 4223
static const struct gprefix pfx_0f_e7 = {
	N, I(Sse, em_mov), N, N,
};

4224
static const struct escape escape_d9 = { {
4225
	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_db = { {
	N, N, N, N, N, N, N, N,
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_dd = { {
4267
	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

4287 4288 4289 4290
static const struct instr_dual instr_dual_0f_c3 = {
	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
};

4291 4292 4293 4294
static const struct mode_dual mode_dual_63 = {
	N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
};

4295
static const struct opcode opcode_table[256] = {
4296
	/* 0x00 - 0x07 */
4297
	F6ALU(Lock, em_add),
4298 4299
	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4300
	/* 0x08 - 0x0F */
4301
	F6ALU(Lock | PageTable, em_or),
4302 4303
	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
	N,
4304
	/* 0x10 - 0x17 */
4305
	F6ALU(Lock, em_adc),
4306 4307
	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4308
	/* 0x18 - 0x1F */
4309
	F6ALU(Lock, em_sbb),
4310 4311
	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4312
	/* 0x20 - 0x27 */
4313
	F6ALU(Lock | PageTable, em_and), N, N,
4314
	/* 0x28 - 0x2F */
4315
	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4316
	/* 0x30 - 0x37 */
4317
	F6ALU(Lock, em_xor), N, N,
4318
	/* 0x38 - 0x3F */
4319
	F6ALU(NoWrite, em_cmp), N, N,
4320
	/* 0x40 - 0x4F */
4321
	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4322
	/* 0x50 - 0x57 */
4323
	X8(I(SrcReg | Stack, em_push)),
4324
	/* 0x58 - 0x5F */
4325
	X8(I(DstReg | Stack, em_pop)),
4326
	/* 0x60 - 0x67 */
4327 4328
	I(ImplicitOps | Stack | No64, em_pusha),
	I(ImplicitOps | Stack | No64, em_popa),
4329
	N, MD(ModRM, &mode_dual_63),
4330 4331
	N, N, N, N,
	/* 0x68 - 0x6F */
4332 4333
	I(SrcImm | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4334 4335
	I(SrcImmByte | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4336
	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4337
	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4338
	/* 0x70 - 0x7F */
4339
	X16(D(SrcImmByte | NearBranch)),
4340
	/* 0x80 - 0x87 */
4341 4342 4343 4344
	G(ByteOp | DstMem | SrcImm, group1),
	G(DstMem | SrcImm, group1),
	G(ByteOp | DstMem | SrcImm | No64, group1),
	G(DstMem | SrcImmByte, group1),
4345
	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4346
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4347
	/* 0x88 - 0x8F */
4348
	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4349
	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4350
	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4351 4352 4353
	D(ModRM | SrcMem | NoAccess | DstReg),
	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
	G(0, group1A),
4354
	/* 0x90 - 0x97 */
4355
	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4356
	/* 0x98 - 0x9F */
4357
	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4358
	I(SrcImmFAddr | No64, em_call_far), N,
4359
	II(ImplicitOps | Stack, em_pushf, pushf),
P
Paolo Bonzini 已提交
4360 4361
	II(ImplicitOps | Stack, em_popf, popf),
	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4362
	/* 0xA0 - 0xA7 */
4363
	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4364
	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4365
	I2bv(SrcSI | DstDI | Mov | String, em_mov),
4366
	F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4367
	/* 0xA8 - 0xAF */
4368
	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4369 4370
	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4371
	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4372
	/* 0xB0 - 0xB7 */
4373
	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4374
	/* 0xB8 - 0xBF */
4375
	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4376
	/* 0xC0 - 0xC7 */
4377
	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4378 4379
	I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
	I(ImplicitOps | NearBranch, em_ret),
4380 4381
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4382
	G(ByteOp, group11), G(0, group11),
4383
	/* 0xC8 - 0xCF */
A
Avi Kivity 已提交
4384
	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4385 4386
	I(ImplicitOps | SrcImmU16, em_ret_far_imm),
	I(ImplicitOps, em_ret_far),
4387
	D(ImplicitOps), DI(SrcImmByte, intn),
4388
	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4389
	/* 0xD0 - 0xD7 */
4390 4391
	G(Src2One | ByteOp, group2), G(Src2One, group2),
	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
P
Paolo Bonzini 已提交
4392
	I(DstAcc | SrcImmUByte | No64, em_aam),
P
Paolo Bonzini 已提交
4393 4394
	I(DstAcc | SrcImmUByte | No64, em_aad),
	F(DstAcc | ByteOp | No64, em_salc),
P
Paolo Bonzini 已提交
4395
	I(DstAcc | SrcXLat | ByteOp, em_mov),
4396
	/* 0xD8 - 0xDF */
4397
	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4398
	/* 0xE0 - 0xE7 */
4399 4400
	X3(I(SrcImmByte | NearBranch, em_loop)),
	I(SrcImmByte | NearBranch, em_jcxz),
4401 4402
	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4403
	/* 0xE8 - 0xEF */
4404 4405 4406
	I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
	I(SrcImmFAddr | No64, em_jmp_far),
	D(SrcImmByte | ImplicitOps | NearBranch),
4407 4408
	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4409
	/* 0xF0 - 0xF7 */
4410
	N, DI(ImplicitOps, icebp), N, N,
4411 4412
	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
	G(ByteOp, group3), G(0, group3),
4413
	/* 0xF8 - 0xFF */
4414 4415
	D(ImplicitOps), D(ImplicitOps),
	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4416 4417 4418
	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};

4419
static const struct opcode twobyte_table[256] = {
4420
	/* 0x00 - 0x0F */
4421
	G(0, group6), GD(0, &group7), N, N,
4422
	N, I(ImplicitOps | EmulateOnUD, em_syscall),
4423
	II(ImplicitOps | Priv, em_clts, clts), N,
4424
	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4425
	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4426
	/* 0x10 - 0x1F */
P
Paolo Bonzini 已提交
4427
	N, N, N, N, N, N, N, N,
4428 4429
	D(ImplicitOps | ModRM | SrcMem | NoAccess),
	N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4430
	/* 0x20 - 0x2F */
4431 4432 4433 4434 4435 4436
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
						check_cr_write),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
						check_dr_write),
4437
	N, N, N, N,
4438 4439
	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4440
	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4441
	N, N, N, N,
4442
	/* 0x30 - 0x3F */
4443
	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4444
	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4445
	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4446
	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4447 4448
	I(ImplicitOps | EmulateOnUD, em_sysenter),
	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4449
	N, N,
4450 4451
	N, N, N, N, N, N, N, N,
	/* 0x40 - 0x4F */
4452
	X16(D(DstReg | SrcMem | ModRM)),
4453 4454 4455
	/* 0x50 - 0x5F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x60 - 0x6F */
4456 4457 4458 4459
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4460
	/* 0x70 - 0x7F */
4461 4462 4463 4464
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4465
	/* 0x80 - 0x8F */
4466
	X16(D(SrcImm | NearBranch)),
4467
	/* 0x90 - 0x9F */
4468
	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4469
	/* 0xA0 - 0xA7 */
4470
	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4471 4472
	II(ImplicitOps, em_cpuid, cpuid),
	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4473 4474
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4475
	/* 0xA8 - 0xAF */
4476
	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4477
	II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4478
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4479 4480
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4481
	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4482
	/* 0xB0 - 0xB7 */
4483
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4484
	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4485
	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4486 4487
	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4488
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4489 4490
	/* 0xB8 - 0xBF */
	N, N,
4491
	G(BitOp, group8),
4492
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4493 4494
	I(DstReg | SrcMem | ModRM, em_bsf_c),
	I(DstReg | SrcMem | ModRM, em_bsr_c),
4495
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
A
Avi Kivity 已提交
4496
	/* 0xC0 - 0xC7 */
4497
	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4498
	N, ID(0, &instr_dual_0f_c3),
4499
	N, N, N, GD(0, &group9),
A
Avi Kivity 已提交
4500 4501
	/* 0xC8 - 0xCF */
	X8(I(DstReg, em_bswap)),
4502 4503 4504
	/* 0xD0 - 0xDF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xEF */
4505 4506
	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
	N, N, N, N, N, N, N, N,
4507 4508 4509 4510
	/* 0xF0 - 0xFF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};

4511 4512 4513 4514 4515 4516 4517 4518
static const struct instr_dual instr_dual_0f_38_f0 = {
	I(DstReg | SrcMem | Mov, em_movbe), N
};

static const struct instr_dual instr_dual_0f_38_f1 = {
	I(DstMem | SrcReg | Mov, em_movbe), N
};

4519
static const struct gprefix three_byte_0f_38_f0 = {
4520
	ID(0, &instr_dual_0f_38_f0), N, N, N
4521 4522 4523
};

static const struct gprefix three_byte_0f_38_f1 = {
4524
	ID(0, &instr_dual_0f_38_f1), N, N, N
4525 4526 4527 4528 4529 4530 4531 4532 4533
};

/*
 * Insns below are selected by the prefix which indexed by the third opcode
 * byte.
 */
static const struct opcode opcode_map_0f_38[256] = {
	/* 0x00 - 0x7f */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
B
Borislav Petkov 已提交
4534 4535 4536
	/* 0x80 - 0xef */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
	/* 0xf0 - 0xf1 */
4537 4538
	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
B
Borislav Petkov 已提交
4539 4540
	/* 0xf2 - 0xff */
	N, N, X4(N), X8(N)
4541 4542
};

4543 4544 4545 4546 4547
#undef D
#undef N
#undef G
#undef GD
#undef I
4548
#undef GP
4549
#undef EXT
4550
#undef MD
N
Nadav Amit 已提交
4551
#undef ID
4552

4553
#undef D2bv
4554
#undef D2bvIP
4555
#undef I2bv
4556
#undef I2bvIP
4557
#undef I6ALU
4558

4559
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4560 4561 4562
{
	unsigned size;

4563
	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575
	if (size == 8)
		size = 4;
	return size;
}

static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
		      unsigned size, bool sign_extension)
{
	int rc = X86EMUL_CONTINUE;

	op->type = OP_IMM;
	op->bytes = size;
4576
	op->addr.mem.ea = ctxt->_eip;
4577 4578 4579
	/* NB. Immediates are sign-extended as necessary. */
	switch (op->bytes) {
	case 1:
4580
		op->val = insn_fetch(s8, ctxt);
4581 4582
		break;
	case 2:
4583
		op->val = insn_fetch(s16, ctxt);
4584 4585
		break;
	case 4:
4586
		op->val = insn_fetch(s32, ctxt);
4587
		break;
4588 4589 4590
	case 8:
		op->val = insn_fetch(s64, ctxt);
		break;
4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608
	}
	if (!sign_extension) {
		switch (op->bytes) {
		case 1:
			op->val &= 0xff;
			break;
		case 2:
			op->val &= 0xffff;
			break;
		case 4:
			op->val &= 0xffffffff;
			break;
		}
	}
done:
	return rc;
}

4609 4610 4611 4612 4613 4614 4615
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
			  unsigned d)
{
	int rc = X86EMUL_CONTINUE;

	switch (d) {
	case OpReg:
4616
		decode_register_operand(ctxt, op);
4617 4618
		break;
	case OpImmUByte:
4619
		rc = decode_imm(ctxt, op, 1, false);
4620 4621
		break;
	case OpMem:
4622
		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4623 4624 4625
	mem_common:
		*op = ctxt->memop;
		ctxt->memopp = op;
4626
		if (ctxt->d & BitOp)
4627 4628 4629
			fetch_bit_operand(ctxt);
		op->orig_val = op->val;
		break;
4630
	case OpMem64:
4631
		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4632
		goto mem_common;
4633 4634 4635
	case OpAcc:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4636
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4637 4638 4639
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657
	case OpAccLo:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
	case OpAccHi:
		if (ctxt->d & ByteOp) {
			op->type = OP_NONE;
			break;
		}
		op->type = OP_REG;
		op->bytes = ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
4658 4659 4660 4661
	case OpDI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4662
			register_address(ctxt, VCPU_REGS_RDI);
4663 4664
		op->addr.mem.seg = VCPU_SREG_ES;
		op->val = 0;
4665
		op->count = 1;
4666 4667 4668 4669
		break;
	case OpDX:
		op->type = OP_REG;
		op->bytes = 2;
4670
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4671 4672
		fetch_register_operand(op);
		break;
4673
	case OpCL:
4674
		op->type = OP_IMM;
4675
		op->bytes = 1;
4676
		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4677 4678 4679 4680 4681
		break;
	case OpImmByte:
		rc = decode_imm(ctxt, op, 1, true);
		break;
	case OpOne:
4682
		op->type = OP_IMM;
4683 4684 4685 4686 4687 4688
		op->bytes = 1;
		op->val = 1;
		break;
	case OpImm:
		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
		break;
4689 4690 4691
	case OpImm64:
		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
		break;
4692 4693
	case OpMem8:
		ctxt->memop.bytes = 1;
4694
		if (ctxt->memop.type == OP_REG) {
4695 4696
			ctxt->memop.addr.reg = decode_register(ctxt,
					ctxt->modrm_rm, true);
4697 4698
			fetch_register_operand(&ctxt->memop);
		}
4699
		goto mem_common;
4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715
	case OpMem16:
		ctxt->memop.bytes = 2;
		goto mem_common;
	case OpMem32:
		ctxt->memop.bytes = 4;
		goto mem_common;
	case OpImmU16:
		rc = decode_imm(ctxt, op, 2, false);
		break;
	case OpImmU:
		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
		break;
	case OpSI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4716
			register_address(ctxt, VCPU_REGS_RSI);
B
Bandan Das 已提交
4717
		op->addr.mem.seg = ctxt->seg_override;
4718
		op->val = 0;
4719
		op->count = 1;
4720
		break;
P
Paolo Bonzini 已提交
4721 4722 4723 4724
	case OpXLat:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4725
			address_mask(ctxt,
P
Paolo Bonzini 已提交
4726 4727
				reg_read(ctxt, VCPU_REGS_RBX) +
				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
B
Bandan Das 已提交
4728
		op->addr.mem.seg = ctxt->seg_override;
P
Paolo Bonzini 已提交
4729 4730
		op->val = 0;
		break;
4731 4732 4733 4734 4735 4736 4737 4738 4739
	case OpImmFAddr:
		op->type = OP_IMM;
		op->addr.mem.ea = ctxt->_eip;
		op->bytes = ctxt->op_bytes + 2;
		insn_fetch_arr(op->valptr, op->bytes, ctxt);
		break;
	case OpMemFAddr:
		ctxt->memop.bytes = ctxt->op_bytes + 2;
		goto mem_common;
4740
	case OpES:
4741
		op->type = OP_IMM;
4742 4743 4744
		op->val = VCPU_SREG_ES;
		break;
	case OpCS:
4745
		op->type = OP_IMM;
4746 4747 4748
		op->val = VCPU_SREG_CS;
		break;
	case OpSS:
4749
		op->type = OP_IMM;
4750 4751 4752
		op->val = VCPU_SREG_SS;
		break;
	case OpDS:
4753
		op->type = OP_IMM;
4754 4755 4756
		op->val = VCPU_SREG_DS;
		break;
	case OpFS:
4757
		op->type = OP_IMM;
4758 4759 4760
		op->val = VCPU_SREG_FS;
		break;
	case OpGS:
4761
		op->type = OP_IMM;
4762 4763
		op->val = VCPU_SREG_GS;
		break;
4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774
	case OpImplicit:
		/* Special instructions do their own operand decoding. */
	default:
		op->type = OP_NONE; /* Disable writeback. */
		break;
	}

done:
	return rc;
}

4775
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4776 4777 4778
{
	int rc = X86EMUL_CONTINUE;
	int mode = ctxt->mode;
4779
	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4780
	bool op_prefix = false;
B
Bandan Das 已提交
4781
	bool has_seg_override = false;
4782
	struct opcode opcode;
4783

4784 4785
	ctxt->memop.type = OP_NONE;
	ctxt->memopp = NULL;
4786
	ctxt->_eip = ctxt->eip;
4787 4788
	ctxt->fetch.ptr = ctxt->fetch.data;
	ctxt->fetch.end = ctxt->fetch.data + insn_len;
B
Borislav Petkov 已提交
4789
	ctxt->opcode_len = 1;
4790
	if (insn_len > 0)
4791
		memcpy(ctxt->fetch.data, insn, insn_len);
4792
	else {
4793
		rc = __do_insn_fetch_bytes(ctxt, 1);
4794 4795 4796
		if (rc != X86EMUL_CONTINUE)
			return rc;
	}
4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813

	switch (mode) {
	case X86EMUL_MODE_REAL:
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
		def_op_bytes = def_ad_bytes = 2;
		break;
	case X86EMUL_MODE_PROT32:
		def_op_bytes = def_ad_bytes = 4;
		break;
#ifdef CONFIG_X86_64
	case X86EMUL_MODE_PROT64:
		def_op_bytes = 4;
		def_ad_bytes = 8;
		break;
#endif
	default:
4814
		return EMULATION_FAILED;
4815 4816
	}

4817 4818
	ctxt->op_bytes = def_op_bytes;
	ctxt->ad_bytes = def_ad_bytes;
4819 4820 4821

	/* Legacy prefixes. */
	for (;;) {
4822
		switch (ctxt->b = insn_fetch(u8, ctxt)) {
4823
		case 0x66:	/* operand-size override */
4824
			op_prefix = true;
4825
			/* switch between 2/4 bytes */
4826
			ctxt->op_bytes = def_op_bytes ^ 6;
4827 4828 4829 4830
			break;
		case 0x67:	/* address-size override */
			if (mode == X86EMUL_MODE_PROT64)
				/* switch between 4/8 bytes */
4831
				ctxt->ad_bytes = def_ad_bytes ^ 12;
4832 4833
			else
				/* switch between 2/4 bytes */
4834
				ctxt->ad_bytes = def_ad_bytes ^ 6;
4835 4836 4837 4838 4839
			break;
		case 0x26:	/* ES override */
		case 0x2e:	/* CS override */
		case 0x36:	/* SS override */
		case 0x3e:	/* DS override */
B
Bandan Das 已提交
4840 4841
			has_seg_override = true;
			ctxt->seg_override = (ctxt->b >> 3) & 3;
4842 4843 4844
			break;
		case 0x64:	/* FS override */
		case 0x65:	/* GS override */
B
Bandan Das 已提交
4845 4846
			has_seg_override = true;
			ctxt->seg_override = ctxt->b & 7;
4847 4848 4849 4850
			break;
		case 0x40 ... 0x4f: /* REX */
			if (mode != X86EMUL_MODE_PROT64)
				goto done_prefixes;
4851
			ctxt->rex_prefix = ctxt->b;
4852 4853
			continue;
		case 0xf0:	/* LOCK */
4854
			ctxt->lock_prefix = 1;
4855 4856 4857
			break;
		case 0xf2:	/* REPNE/REPNZ */
		case 0xf3:	/* REP/REPE/REPZ */
4858
			ctxt->rep_prefix = ctxt->b;
4859 4860 4861 4862 4863 4864 4865
			break;
		default:
			goto done_prefixes;
		}

		/* Any legacy prefix after a REX prefix nullifies its effect. */

4866
		ctxt->rex_prefix = 0;
4867 4868 4869 4870 4871
	}

done_prefixes:

	/* REX prefix. */
4872 4873
	if (ctxt->rex_prefix & 8)
		ctxt->op_bytes = 8;	/* REX.W */
4874 4875

	/* Opcode byte(s). */
4876
	opcode = opcode_table[ctxt->b];
4877
	/* Two-byte opcode? */
4878
	if (ctxt->b == 0x0f) {
B
Borislav Petkov 已提交
4879
		ctxt->opcode_len = 2;
4880
		ctxt->b = insn_fetch(u8, ctxt);
4881
		opcode = twobyte_table[ctxt->b];
4882 4883 4884 4885 4886 4887 4888

		/* 0F_38 opcode map */
		if (ctxt->b == 0x38) {
			ctxt->opcode_len = 3;
			ctxt->b = insn_fetch(u8, ctxt);
			opcode = opcode_map_0f_38[ctxt->b];
		}
4889
	}
4890
	ctxt->d = opcode.flags;
4891

4892 4893 4894
	if (ctxt->d & ModRM)
		ctxt->modrm = insn_fetch(u8, ctxt);

4895 4896
	/* vex-prefix instructions are not implemented */
	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4897
	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4898 4899 4900
		ctxt->d = NotImpl;
	}

4901 4902
	while (ctxt->d & GroupMask) {
		switch (ctxt->d & GroupMask) {
4903
		case Group:
4904
			goffset = (ctxt->modrm >> 3) & 7;
4905 4906 4907
			opcode = opcode.u.group[goffset];
			break;
		case GroupDual:
4908 4909
			goffset = (ctxt->modrm >> 3) & 7;
			if ((ctxt->modrm >> 6) == 3)
4910 4911 4912 4913 4914
				opcode = opcode.u.gdual->mod3[goffset];
			else
				opcode = opcode.u.gdual->mod012[goffset];
			break;
		case RMExt:
4915
			goffset = ctxt->modrm & 7;
4916
			opcode = opcode.u.group[goffset];
4917 4918
			break;
		case Prefix:
4919
			if (ctxt->rep_prefix && op_prefix)
4920
				return EMULATION_FAILED;
4921
			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4922 4923 4924 4925 4926 4927 4928
			switch (simd_prefix) {
			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
			}
			break;
4929 4930 4931 4932 4933 4934
		case Escape:
			if (ctxt->modrm > 0xbf)
				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
			else
				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
			break;
4935 4936 4937 4938 4939 4940
		case InstrDual:
			if ((ctxt->modrm >> 6) == 3)
				opcode = opcode.u.idual->mod3;
			else
				opcode = opcode.u.idual->mod012;
			break;
4941 4942 4943 4944 4945 4946
		case ModeDual:
			if (ctxt->mode == X86EMUL_MODE_PROT64)
				opcode = opcode.u.mdual->mode64;
			else
				opcode = opcode.u.mdual->mode32;
			break;
4947
		default:
4948
			return EMULATION_FAILED;
4949
		}
4950

4951
		ctxt->d &= ~(u64)GroupMask;
4952
		ctxt->d |= opcode.flags;
4953 4954
	}

4955 4956 4957 4958
	/* Unrecognised? */
	if (ctxt->d == 0)
		return EMULATION_FAILED;

4959
	ctxt->execute = opcode.u.execute;
4960

4961 4962 4963
	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
		return EMULATION_FAILED;

4964
	if (unlikely(ctxt->d &
4965 4966
	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
	     No16))) {
4967 4968 4969 4970 4971 4972
		/*
		 * These are copied unconditionally here, and checked unconditionally
		 * in x86_emulate_insn.
		 */
		ctxt->check_perm = opcode.check_perm;
		ctxt->intercept = opcode.intercept;
4973

4974 4975
		if (ctxt->d & NotImpl)
			return EMULATION_FAILED;
4976

4977 4978 4979 4980 4981 4982
		if (mode == X86EMUL_MODE_PROT64) {
			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
				ctxt->op_bytes = 8;
			else if (ctxt->d & NearBranch)
				ctxt->op_bytes = 8;
		}
4983

4984 4985 4986 4987 4988 4989 4990
		if (ctxt->d & Op3264) {
			if (mode == X86EMUL_MODE_PROT64)
				ctxt->op_bytes = 8;
			else
				ctxt->op_bytes = 4;
		}

4991 4992 4993
		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
			ctxt->op_bytes = 4;

4994 4995 4996 4997 4998
		if (ctxt->d & Sse)
			ctxt->op_bytes = 16;
		else if (ctxt->d & Mmx)
			ctxt->op_bytes = 8;
	}
A
Avi Kivity 已提交
4999

5000
	/* ModRM and SIB bytes. */
5001
	if (ctxt->d & ModRM) {
5002
		rc = decode_modrm(ctxt, &ctxt->memop);
B
Bandan Das 已提交
5003 5004 5005 5006
		if (!has_seg_override) {
			has_seg_override = true;
			ctxt->seg_override = ctxt->modrm_seg;
		}
5007
	} else if (ctxt->d & MemAbs)
5008
		rc = decode_abs(ctxt, &ctxt->memop);
5009 5010 5011
	if (rc != X86EMUL_CONTINUE)
		goto done;

B
Bandan Das 已提交
5012 5013
	if (!has_seg_override)
		ctxt->seg_override = VCPU_SREG_DS;
5014

B
Bandan Das 已提交
5015
	ctxt->memop.addr.mem.seg = ctxt->seg_override;
5016 5017 5018 5019 5020

	/*
	 * Decode and fetch the source operand: register, memory
	 * or immediate.
	 */
5021
	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5022 5023 5024
	if (rc != X86EMUL_CONTINUE)
		goto done;

5025 5026 5027 5028
	/*
	 * Decode and fetch the second source operand: register, memory
	 * or immediate.
	 */
5029
	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5030 5031 5032
	if (rc != X86EMUL_CONTINUE)
		goto done;

5033
	/* Decode and fetch the destination operand: register or memory. */
5034
	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5035

5036
	if (ctxt->rip_relative)
5037 5038
		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
					ctxt->memopp->addr.mem.ea + ctxt->_eip);
5039

5040
done:
5041
	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5042 5043
}

5044 5045 5046 5047 5048
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
{
	return ctxt->d & PageTable;
}

5049 5050 5051 5052 5053 5054 5055 5056 5057
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
	/* The second termination condition only applies for REPE
	 * and REPNE. Test if the repeat string operation prefix is
	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
	 * corresponding termination condition according to:
	 * 	- if REPE/REPZ and ZF = 0 then done
	 * 	- if REPNE/REPNZ and ZF = 1 then done
	 */
5058 5059 5060
	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
5061
		 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5062
		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
5063
		    ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5064 5065 5066 5067 5068
		return true;

	return false;
}

A
Avi Kivity 已提交
5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
	bool fault = false;

	ctxt->ops->get_fpu(ctxt);
	asm volatile("1: fwait \n\t"
		     "2: \n\t"
		     ".pushsection .fixup,\"ax\" \n\t"
		     "3: \n\t"
		     "movb $1, %[fault] \n\t"
		     "jmp 2b \n\t"
		     ".popsection \n\t"
		     _ASM_EXTABLE(1b, 3b)
5082
		     : [fault]"+qm"(fault));
A
Avi Kivity 已提交
5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097
	ctxt->ops->put_fpu(ctxt);

	if (unlikely(fault))
		return emulate_exception(ctxt, MF_VECTOR, 0, false);

	return X86EMUL_CONTINUE;
}

static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
				       struct operand *op)
{
	if (op->type == OP_MM)
		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
}

5098 5099 5100
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{
	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5101 5102
	if (!(ctxt->d & ByteOp))
		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5103
	asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
5104 5105 5106
	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
	      [fastop]"+S"(fop)
	    : "c"(ctxt->src2.val));
5107
	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5108 5109
	if (!fop) /* exception is returned in fop variable */
		return emulate_de(ctxt);
5110 5111
	return X86EMUL_CONTINUE;
}
5112

5113 5114
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
B
Bandan Das 已提交
5115 5116
	memset(&ctxt->rip_relative, 0,
	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5117 5118 5119 5120 5121 5122

	ctxt->io_read.pos = 0;
	ctxt->io_read.end = 0;
	ctxt->mem_read.end = 0;
}

5123
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5124
{
5125
	const struct x86_emulate_ops *ops = ctxt->ops;
5126
	int rc = X86EMUL_CONTINUE;
5127
	int saved_dst_type = ctxt->dst.type;
5128

5129
	ctxt->mem_read.pos = 0;
5130

5131 5132
	/* LOCK prefix is allowed only with some instructions */
	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5133
		rc = emulate_ud(ctxt);
5134 5135 5136
		goto done;
	}

5137
	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5138
		rc = emulate_ud(ctxt);
5139 5140 5141
		goto done;
	}

5142 5143 5144 5145 5146 5147 5148
	if (unlikely(ctxt->d &
		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
				(ctxt->d & Undefined)) {
			rc = emulate_ud(ctxt);
			goto done;
		}
A
Avi Kivity 已提交
5149

5150 5151 5152
		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
			rc = emulate_ud(ctxt);
A
Avi Kivity 已提交
5153
			goto done;
5154
		}
A
Avi Kivity 已提交
5155

5156 5157
		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
			rc = emulate_nm(ctxt);
5158
			goto done;
5159
		}
5160

5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173
		if (ctxt->d & Mmx) {
			rc = flush_pending_x87_faults(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			/*
			 * Now that we know the fpu is exception safe, we can fetch
			 * operands from it.
			 */
			fetch_possible_mmx_operand(ctxt, &ctxt->src);
			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
			if (!(ctxt->d & Mov))
				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
		}
5174

5175
		if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5176 5177 5178 5179 5180
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_PRE_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}
5181

5182 5183 5184 5185 5186 5187
		/* Instruction can only be executed in protected mode */
		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
			rc = emulate_ud(ctxt);
			goto done;
		}

5188 5189
		/* Privileged instruction can be executed only in CPL=0 */
		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5190 5191 5192 5193
			if (ctxt->d & PrivUD)
				rc = emulate_ud(ctxt);
			else
				rc = emulate_gp(ctxt, 0);
5194
			goto done;
5195
		}
5196

5197
		/* Do instruction specific permission checks */
5198
		if (ctxt->d & CheckPerm) {
5199 5200 5201 5202 5203
			rc = ctxt->check_perm(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

5204
		if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5205 5206 5207 5208 5209 5210 5211 5212 5213
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_POST_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

		if (ctxt->rep_prefix && (ctxt->d & String)) {
			/* All REP prefixes have the same first termination condition */
			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5214
				string_registers_quirk(ctxt);
5215
				ctxt->eip = ctxt->_eip;
5216
				ctxt->eflags &= ~X86_EFLAGS_RF;
5217 5218
				goto done;
			}
5219 5220 5221
		}
	}

5222 5223 5224
	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
		rc = segmented_read(ctxt, ctxt->src.addr.mem,
				    ctxt->src.valptr, ctxt->src.bytes);
5225
		if (rc != X86EMUL_CONTINUE)
5226
			goto done;
5227
		ctxt->src.orig_val64 = ctxt->src.val64;
5228 5229
	}

5230 5231 5232
	if (ctxt->src2.type == OP_MEM) {
		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
				    &ctxt->src2.val, ctxt->src2.bytes);
5233 5234 5235 5236
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

5237
	if ((ctxt->d & DstMask) == ImplicitOps)
5238 5239 5240
		goto special_insn;


5241
	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5242
		/* optimisation - avoid slow emulated read if Mov */
5243 5244
		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
				   &ctxt->dst.val, ctxt->dst.bytes);
5245
		if (rc != X86EMUL_CONTINUE) {
5246 5247
			if (!(ctxt->d & NoWrite) &&
			    rc == X86EMUL_PROPAGATE_FAULT &&
5248 5249
			    ctxt->exception.vector == PF_VECTOR)
				ctxt->exception.error_code |= PFERR_WRITE_MASK;
5250
			goto done;
5251
		}
5252
	}
5253 5254
	/* Copy full 64-bit value for CMPXCHG8B.  */
	ctxt->dst.orig_val64 = ctxt->dst.val64;
5255

5256 5257
special_insn:

5258
	if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5259
		rc = emulator_check_intercept(ctxt, ctxt->intercept,
5260
					      X86_ICPT_POST_MEMACCESS);
5261 5262 5263 5264
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

5265
	if (ctxt->rep_prefix && (ctxt->d & String))
5266
		ctxt->eflags |= X86_EFLAGS_RF;
5267
	else
5268
		ctxt->eflags &= ~X86_EFLAGS_RF;
5269

5270
	if (ctxt->execute) {
5271 5272 5273 5274 5275 5276 5277
		if (ctxt->d & Fastop) {
			void (*fop)(struct fastop *) = (void *)ctxt->execute;
			rc = fastop(ctxt, fop);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			goto writeback;
		}
5278
		rc = ctxt->execute(ctxt);
5279 5280 5281 5282 5283
		if (rc != X86EMUL_CONTINUE)
			goto done;
		goto writeback;
	}

B
Borislav Petkov 已提交
5284
	if (ctxt->opcode_len == 2)
A
Avi Kivity 已提交
5285
		goto twobyte_insn;
5286 5287
	else if (ctxt->opcode_len == 3)
		goto threebyte_insn;
A
Avi Kivity 已提交
5288

5289
	switch (ctxt->b) {
5290
	case 0x70 ... 0x7f: /* jcc (short) */
5291
		if (test_cc(ctxt->b, ctxt->eflags))
5292
			rc = jmp_rel(ctxt, ctxt->src.val);
5293
		break;
N
Nitin A Kamble 已提交
5294
	case 0x8d: /* lea r16/r32, m */
5295
		ctxt->dst.val = ctxt->src.addr.mem.ea;
N
Nitin A Kamble 已提交
5296
		break;
5297
	case 0x90 ... 0x97: /* nop / xchg reg, rax */
5298
		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5299 5300 5301
			ctxt->dst.type = OP_NONE;
		else
			rc = em_xchg(ctxt);
5302
		break;
5303
	case 0x98: /* cbw/cwde/cdqe */
5304 5305 5306 5307
		switch (ctxt->op_bytes) {
		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5308 5309
		}
		break;
5310
	case 0xcc:		/* int3 */
5311 5312
		rc = emulate_int(ctxt, 3);
		break;
5313
	case 0xcd:		/* int n */
5314
		rc = emulate_int(ctxt, ctxt->src.val);
5315 5316
		break;
	case 0xce:		/* into */
5317
		if (ctxt->eflags & X86_EFLAGS_OF)
5318
			rc = emulate_int(ctxt, 4);
5319
		break;
5320
	case 0xe9: /* jmp rel */
5321
	case 0xeb: /* jmp rel short */
5322
		rc = jmp_rel(ctxt, ctxt->src.val);
5323
		ctxt->dst.type = OP_NONE; /* Disable writeback. */
5324
		break;
5325
	case 0xf4:              /* hlt */
5326
		ctxt->ops->halt(ctxt);
5327
		break;
5328 5329
	case 0xf5:	/* cmc */
		/* complement carry flag from eflags reg */
5330
		ctxt->eflags ^= X86_EFLAGS_CF;
5331 5332
		break;
	case 0xf8: /* clc */
5333
		ctxt->eflags &= ~X86_EFLAGS_CF;
5334
		break;
5335
	case 0xf9: /* stc */
5336
		ctxt->eflags |= X86_EFLAGS_CF;
5337
		break;
5338
	case 0xfc: /* cld */
5339
		ctxt->eflags &= ~X86_EFLAGS_DF;
5340 5341
		break;
	case 0xfd: /* std */
5342
		ctxt->eflags |= X86_EFLAGS_DF;
5343
		break;
5344 5345
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
5346
	}
5347

5348 5349 5350
	if (rc != X86EMUL_CONTINUE)
		goto done;

5351
writeback:
5352 5353 5354 5355 5356 5357
	if (ctxt->d & SrcWrite) {
		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
		rc = writeback(ctxt, &ctxt->src);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
5358 5359 5360 5361 5362
	if (!(ctxt->d & NoWrite)) {
		rc = writeback(ctxt, &ctxt->dst);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
5363

5364 5365 5366 5367
	/*
	 * restore dst type in case the decoding will be reused
	 * (happens for string instruction )
	 */
5368
	ctxt->dst.type = saved_dst_type;
5369

5370
	if ((ctxt->d & SrcMask) == SrcSI)
5371
		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5372

5373
	if ((ctxt->d & DstMask) == DstDI)
5374
		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5375

5376
	if (ctxt->rep_prefix && (ctxt->d & String)) {
5377
		unsigned int count;
5378
		struct read_cache *r = &ctxt->io_read;
5379 5380 5381 5382
		if ((ctxt->d & SrcMask) == SrcSI)
			count = ctxt->src.count;
		else
			count = ctxt->dst.count;
5383
		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5384

5385 5386 5387 5388 5389
		if (!string_insn_completed(ctxt)) {
			/*
			 * Re-enter guest when pio read ahead buffer is empty
			 * or, if it is not used, after each 1024 iteration.
			 */
5390
			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5391 5392 5393 5394 5395 5396
			    (r->end == 0 || r->end != r->pos)) {
				/*
				 * Reset read cache. Usually happens before
				 * decode, but since instruction is restarted
				 * we have to do it here.
				 */
5397
				ctxt->mem_read.end = 0;
5398
				writeback_registers(ctxt);
5399 5400 5401
				return EMULATION_RESTART;
			}
			goto done; /* skip rip writeback */
5402
		}
5403
		ctxt->eflags &= ~X86_EFLAGS_RF;
5404
	}
5405

5406
	ctxt->eip = ctxt->_eip;
5407 5408

done:
5409 5410
	if (rc == X86EMUL_PROPAGATE_FAULT) {
		WARN_ON(ctxt->exception.vector > 0x1f);
5411
		ctxt->have_exception = true;
5412
	}
5413 5414 5415
	if (rc == X86EMUL_INTERCEPTED)
		return EMULATION_INTERCEPTED;

5416 5417 5418
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);

5419
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
A
Avi Kivity 已提交
5420 5421

twobyte_insn:
5422
	switch (ctxt->b) {
5423
	case 0x09:		/* wbinvd */
5424
		(ctxt->ops->wbinvd)(ctxt);
5425 5426
		break;
	case 0x08:		/* invd */
5427 5428
	case 0x0d:		/* GrpP (prefetch) */
	case 0x18:		/* Grp16 (prefetch/nop) */
P
Paolo Bonzini 已提交
5429
	case 0x1f:		/* nop */
5430 5431
		break;
	case 0x20: /* mov cr, reg */
5432
		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5433
		break;
A
Avi Kivity 已提交
5434
	case 0x21: /* mov from dr to reg */
5435
		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
A
Avi Kivity 已提交
5436 5437
		break;
	case 0x40 ... 0x4f:	/* cmov */
5438 5439
		if (test_cc(ctxt->b, ctxt->eflags))
			ctxt->dst.val = ctxt->src.val;
5440
		else if (ctxt->op_bytes != 4)
5441
			ctxt->dst.type = OP_NONE; /* no writeback */
A
Avi Kivity 已提交
5442
		break;
5443
	case 0x80 ... 0x8f: /* jnz rel, etc*/
5444
		if (test_cc(ctxt->b, ctxt->eflags))
5445
			rc = jmp_rel(ctxt, ctxt->src.val);
5446
		break;
5447
	case 0x90 ... 0x9f:     /* setcc r/m8 */
5448
		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5449
		break;
A
Avi Kivity 已提交
5450
	case 0xb6 ... 0xb7:	/* movzx */
5451
		ctxt->dst.bytes = ctxt->op_bytes;
5452
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5453
						       : (u16) ctxt->src.val;
A
Avi Kivity 已提交
5454 5455
		break;
	case 0xbe ... 0xbf:	/* movsx */
5456
		ctxt->dst.bytes = ctxt->op_bytes;
5457
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5458
							(s16) ctxt->src.val;
A
Avi Kivity 已提交
5459
		break;
5460 5461
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
5462
	}
5463

5464 5465
threebyte_insn:

5466 5467 5468
	if (rc != X86EMUL_CONTINUE)
		goto done;

A
Avi Kivity 已提交
5469 5470 5471
	goto writeback;

cannot_emulate:
5472
	return EMULATION_FAILED;
A
Avi Kivity 已提交
5473
}
5474 5475 5476 5477 5478 5479 5480 5481 5482 5483

void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
{
	invalidate_registers(ctxt);
}

void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
{
	writeback_registers(ctxt);
}