emulate.c 130.9 KB
Newer Older
A
Avi Kivity 已提交
1
/******************************************************************************
2
 * emulate.c
A
Avi Kivity 已提交
3 4 5 6 7 8
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9
 * privileged instructions:
A
Avi Kivity 已提交
10 11
 *
 * Copyright (C) 2006 Qumranet
N
Nicolas Kaiser 已提交
12
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
13 14 15 16 17 18 19 20 21 22
 *
 *   Avi Kivity <avi@qumranet.com>
 *   Yaniv Kamay <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

23
#include <linux/kvm_host.h>
24
#include "kvm_cache_regs.h"
A
Avi Kivity 已提交
25
#include <linux/module.h>
26
#include <asm/kvm_emulate.h>
27
#include <linux/stringify.h>
A
Avi Kivity 已提交
28

29
#include "x86.h"
30
#include "tss.h"
31

32 33 34
/*
 * Operand types
 */
35 36 37 38 39 40 41 42 43
#define OpNone             0ull
#define OpImplicit         1ull  /* No generic decode */
#define OpReg              2ull  /* Register */
#define OpMem              3ull  /* Memory */
#define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
#define OpDI               5ull  /* ES:DI/EDI/RDI */
#define OpMem64            6ull  /* Memory, 64-bit */
#define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
#define OpDX               8ull  /* DX register */
44 45 46
#define OpCL               9ull  /* CL register (for shifts) */
#define OpImmByte         10ull  /* 8-bit sign extended immediate */
#define OpOne             11ull  /* Implied 1 */
47
#define OpImm             12ull  /* Sign extended up to 32-bit immediate */
48 49 50 51 52 53 54
#define OpMem16           13ull  /* Memory operand (16-bit). */
#define OpMem32           14ull  /* Memory operand (32-bit). */
#define OpImmU            15ull  /* Immediate operand, zero extended */
#define OpSI              16ull  /* SI/ESI/RSI */
#define OpImmFAddr        17ull  /* Immediate far address */
#define OpMemFAddr        18ull  /* Far address in memory */
#define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
55 56 57 58 59 60
#define OpES              20ull  /* ES */
#define OpCS              21ull  /* CS */
#define OpSS              22ull  /* SS */
#define OpDS              23ull  /* DS */
#define OpFS              24ull  /* FS */
#define OpGS              25ull  /* GS */
61
#define OpMem8            26ull  /* 8-bit zero extended memory operand */
62
#define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
P
Paolo Bonzini 已提交
63
#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
64 65
#define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
#define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
66 67

#define OpBits             5  /* Width of operand field */
68
#define OpMask             ((1ull << OpBits) - 1)
69

A
Avi Kivity 已提交
70 71 72 73 74 75 76 77 78 79
/*
 * Opcode effective-address decode tables.
 * Note that we only emulate instructions that have at least one memory
 * operand (excluding implicit stack references). We assume that stack
 * references and instruction fetches will never occur in special memory
 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
 * not be handled.
 */

/* Operand sizes: 8-bit operands or specified/overridden size. */
80
#define ByteOp      (1<<0)	/* 8-bit operands. */
A
Avi Kivity 已提交
81
/* Destination operand type. */
82 83 84 85 86 87 88
#define DstShift    1
#define ImplicitOps (OpImplicit << DstShift)
#define DstReg      (OpReg << DstShift)
#define DstMem      (OpMem << DstShift)
#define DstAcc      (OpAcc << DstShift)
#define DstDI       (OpDI << DstShift)
#define DstMem64    (OpMem64 << DstShift)
89
#define DstMem16    (OpMem16 << DstShift)
90 91
#define DstImmUByte (OpImmUByte << DstShift)
#define DstDX       (OpDX << DstShift)
92
#define DstAccLo    (OpAccLo << DstShift)
93
#define DstMask     (OpMask << DstShift)
A
Avi Kivity 已提交
94
/* Source operand type. */
95 96 97 98 99 100 101 102 103 104 105 106
#define SrcShift    6
#define SrcNone     (OpNone << SrcShift)
#define SrcReg      (OpReg << SrcShift)
#define SrcMem      (OpMem << SrcShift)
#define SrcMem16    (OpMem16 << SrcShift)
#define SrcMem32    (OpMem32 << SrcShift)
#define SrcImm      (OpImm << SrcShift)
#define SrcImmByte  (OpImmByte << SrcShift)
#define SrcOne      (OpOne << SrcShift)
#define SrcImmUByte (OpImmUByte << SrcShift)
#define SrcImmU     (OpImmU << SrcShift)
#define SrcSI       (OpSI << SrcShift)
P
Paolo Bonzini 已提交
107
#define SrcXLat     (OpXLat << SrcShift)
108 109 110 111
#define SrcImmFAddr (OpImmFAddr << SrcShift)
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc      (OpAcc << SrcShift)
#define SrcImmU16   (OpImmU16 << SrcShift)
112
#define SrcImm64    (OpImm64 << SrcShift)
113
#define SrcDX       (OpDX << SrcShift)
114
#define SrcMem8     (OpMem8 << SrcShift)
115
#define SrcAccHi    (OpAccHi << SrcShift)
116
#define SrcMask     (OpMask << SrcShift)
117 118 119 120 121 122 123 124 125
#define BitOp       (1<<11)
#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
#define String      (1<<13)     /* String instruction (rep capable) */
#define Stack       (1<<14)     /* Stack instruction (push/pop) */
#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
126
#define Escape      (5<<15)     /* Escape to coprocessor instruction */
127
#define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
128
#define Sse         (1<<18)     /* SSE Vector instruction */
129 130 131 132
/* Generic ModRM decode. */
#define ModRM       (1<<19)
/* Destination is only written; never read. */
#define Mov         (1<<20)
133
/* Misc flags */
134
#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
135
#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
136
#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
137
#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
138
#define Undefined   (1<<25) /* No Such Instruction */
139
#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
140
#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
141
#define No64	    (1<<28)
142
#define PageTable   (1 << 29)   /* instruction used to write page table */
143
#define NotImpl     (1 << 30)   /* instruction is not implemented */
144
/* Source 2 operand type */
145
#define Src2Shift   (31)
146
#define Src2None    (OpNone << Src2Shift)
147
#define Src2Mem     (OpMem << Src2Shift)
148 149 150 151
#define Src2CL      (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
#define Src2One     (OpOne << Src2Shift)
#define Src2Imm     (OpImm << Src2Shift)
152 153 154 155 156 157
#define Src2ES      (OpES << Src2Shift)
#define Src2CS      (OpCS << Src2Shift)
#define Src2SS      (OpSS << Src2Shift)
#define Src2DS      (OpDS << Src2Shift)
#define Src2FS      (OpFS << Src2Shift)
#define Src2GS      (OpGS << Src2Shift)
158
#define Src2Mask    (OpMask << Src2Shift)
A
Avi Kivity 已提交
159
#define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
160 161 162
#define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
#define Unaligned   ((u64)1 << 42)  /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx         ((u64)1 << 43)  /* Advanced Vector Extensions */
163
#define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
164
#define NoWrite     ((u64)1 << 45)  /* No writeback */
165
#define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
166
#define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
167 168
#define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
#define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
169
#define NoBigReal   ((u64)1 << 50)  /* No big real mode */
170
#define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
171
#define NearBranch  ((u64)1 << 52)  /* Near branches */
172
#define No16	    ((u64)1 << 53)  /* No 16 bit operand */
A
Avi Kivity 已提交
173

174
#define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
A
Avi Kivity 已提交
175

176 177 178 179 180 181 182 183
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
184

185 186 187 188 189 190
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
#define FASTOP_SIZE 8

/*
 * fastop functions have a special calling convention:
 *
191 192
 * dst:    rax        (in/out)
 * src:    rdx        (in/out)
193 194
 * src2:   rcx        (in)
 * flags:  rflags     (in/out)
195
 * ex:     rsi        (in:fastop pointer, out:zero if exception)
196 197 198 199 200 201 202 203 204 205 206
 *
 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
 * different operand sizes can be reached by calculation, rather than a jump
 * table (which would be bigger than the code).
 *
 * fastop functions are declared as taking a never-defined fastop parameter,
 * so they can't be called from C directly.
 */

struct fastop;

207
struct opcode {
208 209
	u64 flags : 56;
	u64 intercept : 8;
210
	union {
211
		int (*execute)(struct x86_emulate_ctxt *ctxt);
212 213 214
		const struct opcode *group;
		const struct group_dual *gdual;
		const struct gprefix *gprefix;
215
		const struct escape *esc;
216
		const struct instr_dual *idual;
217
		void (*fastop)(struct fastop *fake);
218
	} u;
219
	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
220 221 222 223 224
};

struct group_dual {
	struct opcode mod012[8];
	struct opcode mod3[8];
225 226
};

227 228 229 230 231 232 233
struct gprefix {
	struct opcode pfx_no;
	struct opcode pfx_66;
	struct opcode pfx_f2;
	struct opcode pfx_f3;
};

234 235 236 237 238
struct escape {
	struct opcode op[8];
	struct opcode high[64];
};

239 240 241 242 243
struct instr_dual {
	struct opcode mod012;
	struct opcode mod3;
};

A
Avi Kivity 已提交
244
/* EFLAGS bit definitions. */
245 246 247 248
#define EFLG_ID (1<<21)
#define EFLG_VIP (1<<20)
#define EFLG_VIF (1<<19)
#define EFLG_AC (1<<18)
249 250
#define EFLG_VM (1<<17)
#define EFLG_RF (1<<16)
251 252
#define EFLG_IOPL (3<<12)
#define EFLG_NT (1<<14)
A
Avi Kivity 已提交
253 254
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
255
#define EFLG_IF (1<<9)
256
#define EFLG_TF (1<<8)
A
Avi Kivity 已提交
257 258 259 260 261 262
#define EFLG_SF (1<<7)
#define EFLG_ZF (1<<6)
#define EFLG_AF (1<<4)
#define EFLG_PF (1<<2)
#define EFLG_CF (1<<0)

263 264 265
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
#define EFLG_RESERVED_ONE_MASK 2

266 267 268 269 270 271 272
enum x86_transfer_type {
	X86_TRANSFER_NONE,
	X86_TRANSFER_CALL_JMP,
	X86_TRANSFER_RET,
	X86_TRANSFER_TASK_SWITCH,
};

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	if (!(ctxt->regs_valid & (1 << nr))) {
		ctxt->regs_valid |= 1 << nr;
		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
	}
	return ctxt->_regs[nr];
}

static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	ctxt->regs_valid |= 1 << nr;
	ctxt->regs_dirty |= 1 << nr;
	return &ctxt->_regs[nr];
}

static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	reg_read(ctxt, nr);
	return reg_write(ctxt, nr);
}

static void writeback_registers(struct x86_emulate_ctxt *ctxt)
{
	unsigned reg;

	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
}

static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
{
	ctxt->regs_dirty = 0;
	ctxt->regs_valid = 0;
}

A
Avi Kivity 已提交
309 310 311 312 313 314
/*
 * These EFLAGS bits are restored from saved value during emulation, and
 * any changes are written back to the saved value after emulation.
 */
#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)

315 316 317 318 319 320
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif

321 322
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));

323 324 325 326 327 328 329 330 331 332 333 334 335
#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
#define FOP_RET   "ret \n\t"

#define FOP_START(op) \
	extern void em_##op(struct fastop *fake); \
	asm(".pushsection .text, \"ax\" \n\t" \
	    ".global em_" #op " \n\t" \
            FOP_ALIGN \
	    "em_" #op ": \n\t"

#define FOP_END \
	    ".popsection")

336 337
#define FOPNOP() FOP_ALIGN FOP_RET

338
#define FOP1E(op,  dst) \
339 340 341 342
	FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET

#define FOP1EEX(op,  dst) \
	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
343 344 345 346 347 348 349 350 351

#define FASTOP1(op) \
	FOP_START(op) \
	FOP1E(op##b, al) \
	FOP1E(op##w, ax) \
	FOP1E(op##l, eax) \
	ON64(FOP1E(op##q, rax))	\
	FOP_END

352 353 354 355 356 357 358 359 360
/* 1-operand, using src2 (for MUL/DIV r/m) */
#define FASTOP1SRC2(op, name) \
	FOP_START(name) \
	FOP1E(op, cl) \
	FOP1E(op, cx) \
	FOP1E(op, ecx) \
	ON64(FOP1E(op, rcx)) \
	FOP_END

361 362 363 364 365 366 367 368 369
/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
#define FASTOP1SRC2EX(op, name) \
	FOP_START(name) \
	FOP1EEX(op, cl) \
	FOP1EEX(op, cx) \
	FOP1EEX(op, ecx) \
	ON64(FOP1EEX(op, rcx)) \
	FOP_END

370 371 372 373 374
#define FOP2E(op,  dst, src)	   \
	FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET

#define FASTOP2(op) \
	FOP_START(op) \
375 376 377 378
	FOP2E(op##b, al, dl) \
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
379 380
	FOP_END

381 382 383 384
/* 2 operand, word only */
#define FASTOP2W(op) \
	FOP_START(op) \
	FOPNOP() \
385 386 387
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
388 389
	FOP_END

390 391 392 393 394 395 396 397 398
/* 2 operand, src is CL */
#define FASTOP2CL(op) \
	FOP_START(op) \
	FOP2E(op##b, al, cl) \
	FOP2E(op##w, ax, cl) \
	FOP2E(op##l, eax, cl) \
	ON64(FOP2E(op##q, rax, cl)) \
	FOP_END

399 400 401 402 403 404 405 406 407
/* 2 operand, src and dest are reversed */
#define FASTOP2R(op, name) \
	FOP_START(name) \
	FOP2E(op##b, dl, al) \
	FOP2E(op##w, dx, ax) \
	FOP2E(op##l, edx, eax) \
	ON64(FOP2E(op##q, rdx, rax)) \
	FOP_END

408 409 410 411 412 413 414
#define FOP3E(op,  dst, src, src2) \
	FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET

/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \
	FOP_START(op) \
	FOPNOP() \
415 416 417
	FOP3E(op##w, ax, dx, cl) \
	FOP3E(op##l, eax, edx, cl) \
	ON64(FOP3E(op##q, rax, rdx, cl)) \
418 419
	FOP_END

420 421 422
/* Special case for SETcc - 1 instruction per cc */
#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"

423 424 425
asm(".global kvm_fastop_exception \n"
    "kvm_fastop_exception: xor %esi, %esi; ret");

426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
FOP_SETCC(setc)
FOP_SETCC(setnc)
FOP_SETCC(setz)
FOP_SETCC(setnz)
FOP_SETCC(setbe)
FOP_SETCC(setnbe)
FOP_SETCC(sets)
FOP_SETCC(setns)
FOP_SETCC(setp)
FOP_SETCC(setnp)
FOP_SETCC(setl)
FOP_SETCC(setnl)
FOP_SETCC(setle)
FOP_SETCC(setnle)
FOP_END;

P
Paolo Bonzini 已提交
445 446 447
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
FOP_END;

448 449 450 451 452 453
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
				    enum x86_intercept intercept,
				    enum x86_intercept_stage stage)
{
	struct x86_instruction_info info = {
		.intercept  = intercept,
454 455 456 457 458
		.rep_prefix = ctxt->rep_prefix,
		.modrm_mod  = ctxt->modrm_mod,
		.modrm_reg  = ctxt->modrm_reg,
		.modrm_rm   = ctxt->modrm_rm,
		.src_val    = ctxt->src.val64,
459
		.dst_val    = ctxt->dst.val64,
460 461 462
		.src_bytes  = ctxt->src.bytes,
		.dst_bytes  = ctxt->dst.bytes,
		.ad_bytes   = ctxt->ad_bytes,
463 464 465
		.next_rip   = ctxt->eip,
	};

466
	return ctxt->ops->intercept(ctxt, &info, stage);
467 468
}

A
Avi Kivity 已提交
469 470 471 472 473
static void assign_masked(ulong *dest, ulong src, ulong mask)
{
	*dest = (*dest & ~mask) | (src & mask);
}

474
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
475
{
476
	return (1UL << (ctxt->ad_bytes << 3)) - 1;
477 478
}

A
Avi Kivity 已提交
479 480 481 482 483 484 485 486 487 488 489
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
{
	u16 sel;
	struct desc_struct ss;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return ~0UL;
	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
}

A
Avi Kivity 已提交
490 491 492 493 494
static int stack_size(struct x86_emulate_ctxt *ctxt)
{
	return (__fls(stack_mask(ctxt)) + 1) >> 3;
}

A
Avi Kivity 已提交
495
/* Access/update address held in a register, based on addressing mode. */
496
static inline unsigned long
497
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
498
{
499
	if (ctxt->ad_bytes == sizeof(unsigned long))
500 501
		return reg;
	else
502
		return reg & ad_mask(ctxt);
503 504 505
}

static inline unsigned long
506
register_address(struct x86_emulate_ctxt *ctxt, int reg)
507
{
508
	return address_mask(ctxt, reg_read(ctxt, reg));
509 510
}

511 512 513 514 515
static void masked_increment(ulong *reg, ulong mask, int inc)
{
	assign_masked(reg, *reg + inc, mask);
}

516
static inline void
517
register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
518
{
519 520
	ulong mask;

521
	if (ctxt->ad_bytes == sizeof(unsigned long))
522
		mask = ~0UL;
523
	else
524
		mask = ad_mask(ctxt);
525
	masked_increment(reg_rmw(ctxt, reg), mask, inc);
526 527 528 529
}

static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
{
530
	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
531
}
A
Avi Kivity 已提交
532

533 534 535 536 537 538 539
static u32 desc_limit_scaled(struct desc_struct *desc)
{
	u32 limit = get_desc_limit(desc);

	return desc->g ? (limit << 12) | 0xfff : limit;
}

540
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
541 542 543 544
{
	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
		return 0;

545
	return ctxt->ops->get_cached_segment_base(ctxt, seg);
546 547
}

548 549
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
			     u32 error, bool valid)
550
{
551
	WARN_ON(vec > 0x1f);
552 553 554
	ctxt->exception.vector = vec;
	ctxt->exception.error_code = error;
	ctxt->exception.error_code_valid = valid;
555
	return X86EMUL_PROPAGATE_FAULT;
556 557
}

558 559 560 561 562
static int emulate_db(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, DB_VECTOR, 0, false);
}

563
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
564
{
565
	return emulate_exception(ctxt, GP_VECTOR, err, true);
566 567
}

568 569 570 571 572
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{
	return emulate_exception(ctxt, SS_VECTOR, err, true);
}

573
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
574
{
575
	return emulate_exception(ctxt, UD_VECTOR, 0, false);
576 577
}

578
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
579
{
580
	return emulate_exception(ctxt, TS_VECTOR, err, true);
581 582
}

583 584
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
585
	return emulate_exception(ctxt, DE_VECTOR, 0, false);
586 587
}

A
Avi Kivity 已提交
588 589 590 591 592
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, NM_VECTOR, 0, false);
}

593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
	u16 selector;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
	return selector;
}

static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
				 unsigned seg)
{
	u16 dummy;
	u32 base3;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}

613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
/*
 * x86 defines three classes of vector instructions: explicitly
 * aligned, explicitly unaligned, and the rest, which change behaviour
 * depending on whether they're AVX encoded or not.
 *
 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
 * subject to the same check.
 */
static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
{
	if (likely(size < 16))
		return false;

	if (ctxt->d & Aligned)
		return true;
	else if (ctxt->d & Unaligned)
		return false;
	else if (ctxt->d & Avx)
		return false;
	else
		return true;
}

636 637 638 639
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
				       struct segmented_address addr,
				       unsigned *max_size, unsigned size,
				       bool write, bool fetch,
640
				       enum x86emul_mode mode, ulong *linear)
641
{
642 643
	struct desc_struct desc;
	bool usable;
644
	ulong la;
645
	u32 lim;
646
	u16 sel;
647

648
	la = seg_base(ctxt, addr.seg) + addr.ea;
649
	*max_size = 0;
650
	switch (mode) {
651
	case X86EMUL_MODE_PROT64:
652
		if (is_noncanonical_address(la))
653
			goto bad;
654 655 656 657

		*max_size = min_t(u64, ~0u, (1ull << 48) - la);
		if (size > *max_size)
			goto bad;
658 659
		break;
	default:
660 661
		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
						addr.seg);
662 663
		if (!usable)
			goto bad;
664 665 666
		/* code segment in protected mode or read-only data segment */
		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
					|| !(desc.type & 2)) && write)
667 668
			goto bad;
		/* unreadable code segment */
669
		if (!fetch && (desc.type & 8) && !(desc.type & 2))
670 671
			goto bad;
		lim = desc_limit_scaled(&desc);
672
		if (!(desc.type & 8) && (desc.type & 4)) {
G
Guo Chao 已提交
673
			/* expand-down segment */
674
			if (addr.ea <= lim)
675 676 677
				goto bad;
			lim = desc.d ? 0xffffffff : 0xffff;
		}
678 679 680
		if (addr.ea > lim)
			goto bad;
		*max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
681 682
		if (size > *max_size)
			goto bad;
683
		la &= (u32)-1;
684 685
		break;
	}
686 687
	if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
		return emulate_gp(ctxt, 0);
688 689
	*linear = la;
	return X86EMUL_CONTINUE;
690 691
bad:
	if (addr.seg == VCPU_SREG_SS)
692
		return emulate_ss(ctxt, 0);
693
	else
694
		return emulate_gp(ctxt, 0);
695 696
}

697 698 699 700 701
static int linearize(struct x86_emulate_ctxt *ctxt,
		     struct segmented_address addr,
		     unsigned size, bool write,
		     ulong *linear)
{
702
	unsigned max_size;
703 704
	return __linearize(ctxt, addr, &max_size, size, write, false,
			   ctxt->mode, linear);
705 706
}

707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
			     enum x86emul_mode mode)
{
	ulong linear;
	int rc;
	unsigned max_size;
	struct segmented_address addr = { .seg = VCPU_SREG_CS,
					   .ea = dst };

	if (ctxt->op_bytes != sizeof(unsigned long))
		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
	rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
	if (rc == X86EMUL_CONTINUE)
		ctxt->_eip = addr.ea;
	return rc;
}

static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
	return assign_eip(ctxt, dst, ctxt->mode);
727 728
}

729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
			  const struct desc_struct *cs_desc)
{
	enum x86emul_mode mode = ctxt->mode;

#ifdef CONFIG_X86_64
	if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
		u64 efer = 0;

		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
		if (efer & EFER_LMA)
			mode = X86EMUL_MODE_PROT64;
	}
#endif
	if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
		mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
	return assign_eip(ctxt, dst, mode);
}

static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{
	return assign_eip_near(ctxt, ctxt->_eip + rel);
}
752

753 754 755 756 757
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
			      struct segmented_address addr,
			      void *data,
			      unsigned size)
{
758 759 760
	int rc;
	ulong linear;

761
	rc = linearize(ctxt, addr, size, false, &linear);
762 763
	if (rc != X86EMUL_CONTINUE)
		return rc;
764
	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
765 766
}

767
/*
768
 * Prefetch the remaining bytes of the instruction without crossing page
769 770
 * boundary if they are not in fetch_cache yet.
 */
771
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
772 773
{
	int rc;
774
	unsigned size, max_size;
775
	unsigned long linear;
776
	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
777
	struct segmented_address addr = { .seg = VCPU_SREG_CS,
778 779
					   .ea = ctxt->eip + cur_size };

780 781 782 783 784 785 786 787 788 789
	/*
	 * We do not know exactly how many bytes will be needed, and
	 * __linearize is expensive, so fetch as much as possible.  We
	 * just have to avoid going beyond the 15 byte limit, the end
	 * of the segment, or the end of the page.
	 *
	 * __linearize is called with size 0 so that it does not do any
	 * boundary check itself.  Instead, we use max_size to check
	 * against op_size.
	 */
790 791
	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
			 &linear);
792 793 794
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;

795
	size = min_t(unsigned, 15UL ^ cur_size, max_size);
796
	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
797 798 799 800 801 802 803 804

	/*
	 * One instruction can only straddle two pages,
	 * and one has been loaded at the beginning of
	 * x86_decode_insn.  So, if not enough bytes
	 * still, we must have hit the 15-byte boundary.
	 */
	if (unlikely(size < op_size))
805 806
		return emulate_gp(ctxt, 0);

807
	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
808 809 810
			      size, &ctxt->exception);
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;
811
	ctxt->fetch.end += size;
812
	return X86EMUL_CONTINUE;
813 814
}

815 816
static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
					       unsigned size)
817
{
818 819 820 821
	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;

	if (unlikely(done_size < size))
		return __do_insn_fetch_bytes(ctxt, size - done_size);
822 823
	else
		return X86EMUL_CONTINUE;
824 825
}

826
/* Fetch next part of the instruction being emulated. */
827
#define insn_fetch(_type, _ctxt)					\
828 829 830
({	_type _x;							\
									\
	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
831 832
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
833
	ctxt->_eip += sizeof(_type);					\
834 835
	_x = *(_type __aligned(1) *) ctxt->fetch.ptr;			\
	ctxt->fetch.ptr += sizeof(_type);				\
836
	_x;								\
837 838
})

839
#define insn_fetch_arr(_arr, _size, _ctxt)				\
840 841
({									\
	rc = do_insn_fetch_bytes(_ctxt, _size);				\
842 843
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
844
	ctxt->_eip += (_size);						\
845 846
	memcpy(_arr, ctxt->fetch.ptr, _size);				\
	ctxt->fetch.ptr += (_size);					\
847 848
})

849 850 851 852 853
/*
 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 * pointer into the block that addresses the relevant register.
 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 */
854
static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
855
			     int byteop)
A
Avi Kivity 已提交
856 857
{
	void *p;
858
	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
A
Avi Kivity 已提交
859 860

	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
861 862 863
		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
	else
		p = reg_rmw(ctxt, modrm_reg);
A
Avi Kivity 已提交
864 865 866 867
	return p;
}

static int read_descriptor(struct x86_emulate_ctxt *ctxt,
868
			   struct segmented_address addr,
A
Avi Kivity 已提交
869 870 871 872 873 874 875
			   u16 *size, unsigned long *address, int op_bytes)
{
	int rc;

	if (op_bytes == 2)
		op_bytes = 3;
	*address = 0;
876
	rc = segmented_read_std(ctxt, addr, size, 2);
877
	if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
878
		return rc;
879
	addr.ea += 2;
880
	rc = segmented_read_std(ctxt, addr, address, op_bytes);
A
Avi Kivity 已提交
881 882 883
	return rc;
}

884 885 886 887 888 889 890 891 892 893
FASTOP2(add);
FASTOP2(or);
FASTOP2(adc);
FASTOP2(sbb);
FASTOP2(and);
FASTOP2(sub);
FASTOP2(xor);
FASTOP2(cmp);
FASTOP2(test);

894 895
FASTOP1SRC2(mul, mul_ex);
FASTOP1SRC2(imul, imul_ex);
896 897
FASTOP1SRC2EX(div, div_ex);
FASTOP1SRC2EX(idiv, idiv_ex);
898

899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
FASTOP3WCL(shld);
FASTOP3WCL(shrd);

FASTOP2W(imul);

FASTOP1(not);
FASTOP1(neg);
FASTOP1(inc);
FASTOP1(dec);

FASTOP2CL(rol);
FASTOP2CL(ror);
FASTOP2CL(rcl);
FASTOP2CL(rcr);
FASTOP2CL(shl);
FASTOP2CL(shr);
FASTOP2CL(sar);

FASTOP2W(bsf);
FASTOP2W(bsr);
FASTOP2W(bt);
FASTOP2W(bts);
FASTOP2W(btr);
FASTOP2W(btc);

924 925
FASTOP2(xadd);

926 927
FASTOP2R(cmp, cmp_r);

928
static u8 test_cc(unsigned int condition, unsigned long flags)
929
{
930 931
	u8 rc;
	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
932

933
	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
934
	asm("push %[flags]; popf; call *%[fastop]"
935 936
	    : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
	return rc;
937 938
}

939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
static void fetch_register_operand(struct operand *op)
{
	switch (op->bytes) {
	case 1:
		op->val = *(u8 *)op->addr.reg;
		break;
	case 2:
		op->val = *(u16 *)op->addr.reg;
		break;
	case 4:
		op->val = *(u32 *)op->addr.reg;
		break;
	case 8:
		op->val = *(u64 *)op->addr.reg;
		break;
	}
}

A
Avi Kivity 已提交
957 958 959 960
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
961 962 963 964 965 966 967 968
	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
969
#ifdef CONFIG_X86_64
970 971 972 973 974 975 976 977
	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
978 979 980 981 982 983 984 985 986 987 988
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
			  int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
989 990 991 992 993 994 995 996
	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
A
Avi Kivity 已提交
997
#ifdef CONFIG_X86_64
998 999 1000 1001 1002 1003 1004 1005
	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
A
Avi Kivity 已提交
1006 1007 1008 1009 1010 1011
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

A
Avi Kivity 已提交
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fninit");
	ctxt->ops->put_fpu(ctxt);
	return X86EMUL_CONTINUE;
}

static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
{
	u16 fcw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fnstcw %0": "+m"(fcw));
	ctxt->ops->put_fpu(ctxt);

	ctxt->dst.val = fcw;

	return X86EMUL_CONTINUE;
}

static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
{
	u16 fsw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fnstsw %0": "+m"(fsw));
	ctxt->ops->put_fpu(ctxt);

	ctxt->dst.val = fsw;

	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1089
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1090
				    struct operand *op)
1091
{
1092
	unsigned reg = ctxt->modrm_reg;
1093

1094 1095
	if (!(ctxt->d & ModRM))
		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
A
Avi Kivity 已提交
1096

1097
	if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1098 1099 1100 1101 1102 1103
		op->type = OP_XMM;
		op->bytes = 16;
		op->addr.xmm = reg;
		read_sse_reg(ctxt, &op->vec_val, reg);
		return;
	}
A
Avi Kivity 已提交
1104 1105 1106 1107 1108 1109 1110
	if (ctxt->d & Mmx) {
		reg &= 7;
		op->type = OP_MM;
		op->bytes = 8;
		op->addr.mm = reg;
		return;
	}
A
Avi Kivity 已提交
1111

1112
	op->type = OP_REG;
1113 1114 1115
	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);

1116
	fetch_register_operand(op);
1117 1118 1119
	op->orig_val = op->val;
}

1120 1121 1122 1123 1124 1125
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
{
	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
		ctxt->modrm_seg = VCPU_SREG_SS;
}

1126
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1127
			struct operand *op)
1128 1129
{
	u8 sib;
B
Bandan Das 已提交
1130
	int index_reg, base_reg, scale;
1131
	int rc = X86EMUL_CONTINUE;
1132
	ulong modrm_ea = 0;
1133

B
Bandan Das 已提交
1134 1135 1136
	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1137

B
Bandan Das 已提交
1138
	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1139
	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
B
Bandan Das 已提交
1140
	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1141
	ctxt->modrm_seg = VCPU_SREG_DS;
1142

1143
	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1144
		op->type = OP_REG;
1145
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1146
		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1147
				ctxt->d & ByteOp);
1148
		if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1149 1150
			op->type = OP_XMM;
			op->bytes = 16;
1151 1152
			op->addr.xmm = ctxt->modrm_rm;
			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
A
Avi Kivity 已提交
1153 1154
			return rc;
		}
A
Avi Kivity 已提交
1155 1156 1157
		if (ctxt->d & Mmx) {
			op->type = OP_MM;
			op->bytes = 8;
1158
			op->addr.mm = ctxt->modrm_rm & 7;
A
Avi Kivity 已提交
1159 1160
			return rc;
		}
1161
		fetch_register_operand(op);
1162 1163 1164
		return rc;
	}

1165 1166
	op->type = OP_MEM;

1167
	if (ctxt->ad_bytes == 2) {
1168 1169 1170 1171
		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1172 1173

		/* 16-bit ModR/M decode. */
1174
		switch (ctxt->modrm_mod) {
1175
		case 0:
1176
			if (ctxt->modrm_rm == 6)
1177
				modrm_ea += insn_fetch(u16, ctxt);
1178 1179
			break;
		case 1:
1180
			modrm_ea += insn_fetch(s8, ctxt);
1181 1182
			break;
		case 2:
1183
			modrm_ea += insn_fetch(u16, ctxt);
1184 1185
			break;
		}
1186
		switch (ctxt->modrm_rm) {
1187
		case 0:
1188
			modrm_ea += bx + si;
1189 1190
			break;
		case 1:
1191
			modrm_ea += bx + di;
1192 1193
			break;
		case 2:
1194
			modrm_ea += bp + si;
1195 1196
			break;
		case 3:
1197
			modrm_ea += bp + di;
1198 1199
			break;
		case 4:
1200
			modrm_ea += si;
1201 1202
			break;
		case 5:
1203
			modrm_ea += di;
1204 1205
			break;
		case 6:
1206
			if (ctxt->modrm_mod != 0)
1207
				modrm_ea += bp;
1208 1209
			break;
		case 7:
1210
			modrm_ea += bx;
1211 1212
			break;
		}
1213 1214 1215
		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
			ctxt->modrm_seg = VCPU_SREG_SS;
1216
		modrm_ea = (u16)modrm_ea;
1217 1218
	} else {
		/* 32/64-bit ModR/M decode. */
1219
		if ((ctxt->modrm_rm & 7) == 4) {
1220
			sib = insn_fetch(u8, ctxt);
1221 1222 1223 1224
			index_reg |= (sib >> 3) & 7;
			base_reg |= sib & 7;
			scale = sib >> 6;

1225
			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1226
				modrm_ea += insn_fetch(s32, ctxt);
1227
			else {
1228
				modrm_ea += reg_read(ctxt, base_reg);
1229 1230
				adjust_modrm_seg(ctxt, base_reg);
			}
1231
			if (index_reg != 4)
1232
				modrm_ea += reg_read(ctxt, index_reg) << scale;
1233
		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1234
			modrm_ea += insn_fetch(s32, ctxt);
1235
			if (ctxt->mode == X86EMUL_MODE_PROT64)
1236
				ctxt->rip_relative = 1;
1237 1238
		} else {
			base_reg = ctxt->modrm_rm;
1239
			modrm_ea += reg_read(ctxt, base_reg);
1240 1241
			adjust_modrm_seg(ctxt, base_reg);
		}
1242
		switch (ctxt->modrm_mod) {
1243
		case 1:
1244
			modrm_ea += insn_fetch(s8, ctxt);
1245 1246
			break;
		case 2:
1247
			modrm_ea += insn_fetch(s32, ctxt);
1248 1249 1250
			break;
		}
	}
1251
	op->addr.mem.ea = modrm_ea;
1252 1253 1254
	if (ctxt->ad_bytes != 8)
		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;

1255 1256 1257 1258 1259
done:
	return rc;
}

static int decode_abs(struct x86_emulate_ctxt *ctxt,
1260
		      struct operand *op)
1261
{
1262
	int rc = X86EMUL_CONTINUE;
1263

1264
	op->type = OP_MEM;
1265
	switch (ctxt->ad_bytes) {
1266
	case 2:
1267
		op->addr.mem.ea = insn_fetch(u16, ctxt);
1268 1269
		break;
	case 4:
1270
		op->addr.mem.ea = insn_fetch(u32, ctxt);
1271 1272
		break;
	case 8:
1273
		op->addr.mem.ea = insn_fetch(u64, ctxt);
1274 1275 1276 1277 1278 1279
		break;
	}
done:
	return rc;
}

1280
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1281
{
1282
	long sv = 0, mask;
1283

1284
	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1285
		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1286

1287 1288 1289 1290
		if (ctxt->src.bytes == 2)
			sv = (s16)ctxt->src.val & (s16)mask;
		else if (ctxt->src.bytes == 4)
			sv = (s32)ctxt->src.val & (s32)mask;
1291 1292
		else
			sv = (s64)ctxt->src.val & (s64)mask;
1293

1294 1295
		ctxt->dst.addr.mem.ea = address_mask(ctxt,
					   ctxt->dst.addr.mem.ea + (sv >> 3));
1296
	}
1297 1298

	/* only subword offset */
1299
	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1300 1301
}

1302 1303
static int read_emulated(struct x86_emulate_ctxt *ctxt,
			 unsigned long addr, void *dest, unsigned size)
A
Avi Kivity 已提交
1304
{
1305
	int rc;
1306
	struct read_cache *mc = &ctxt->mem_read;
A
Avi Kivity 已提交
1307

1308 1309
	if (mc->pos < mc->end)
		goto read_cached;
A
Avi Kivity 已提交
1310

1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
	WARN_ON((mc->end + size) >= sizeof(mc->data));

	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
				      &ctxt->exception);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	mc->end += size;

read_cached:
	memcpy(dest, mc->data + mc->pos, size);
	mc->pos += size;
1323 1324
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1325

1326 1327 1328 1329 1330
static int segmented_read(struct x86_emulate_ctxt *ctxt,
			  struct segmented_address addr,
			  void *data,
			  unsigned size)
{
1331 1332 1333
	int rc;
	ulong linear;

1334
	rc = linearize(ctxt, addr, size, false, &linear);
1335 1336
	if (rc != X86EMUL_CONTINUE)
		return rc;
1337
	return read_emulated(ctxt, linear, data, size);
1338 1339 1340 1341 1342 1343 1344
}

static int segmented_write(struct x86_emulate_ctxt *ctxt,
			   struct segmented_address addr,
			   const void *data,
			   unsigned size)
{
1345 1346 1347
	int rc;
	ulong linear;

1348
	rc = linearize(ctxt, addr, size, true, &linear);
1349 1350
	if (rc != X86EMUL_CONTINUE)
		return rc;
1351 1352
	return ctxt->ops->write_emulated(ctxt, linear, data, size,
					 &ctxt->exception);
1353 1354 1355 1356 1357 1358 1359
}

static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
			     struct segmented_address addr,
			     const void *orig_data, const void *data,
			     unsigned size)
{
1360 1361 1362
	int rc;
	ulong linear;

1363
	rc = linearize(ctxt, addr, size, true, &linear);
1364 1365
	if (rc != X86EMUL_CONTINUE)
		return rc;
1366 1367
	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
					   size, &ctxt->exception);
1368 1369
}

1370 1371 1372 1373
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
			   unsigned int size, unsigned short port,
			   void *dest)
{
1374
	struct read_cache *rc = &ctxt->io_read;
1375

1376 1377
	if (rc->pos == rc->end) { /* refill pio read ahead */
		unsigned int in_page, n;
1378
		unsigned int count = ctxt->rep_prefix ?
1379
			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1380
		in_page = (ctxt->eflags & EFLG_DF) ?
1381 1382
			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1383
		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1384 1385 1386
		if (n == 0)
			n = 1;
		rc->pos = rc->end = 0;
1387
		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1388 1389
			return 0;
		rc->end = n * size;
A
Avi Kivity 已提交
1390 1391
	}

1392 1393
	if (ctxt->rep_prefix && (ctxt->d & String) &&
	    !(ctxt->eflags & EFLG_DF)) {
1394 1395 1396 1397 1398 1399 1400 1401
		ctxt->dst.data = rc->data + rc->pos;
		ctxt->dst.type = OP_MEM_STR;
		ctxt->dst.count = (rc->end - rc->pos) / size;
		rc->pos = rc->end;
	} else {
		memcpy(dest, rc->data + rc->pos, size);
		rc->pos += size;
	}
1402 1403
	return 1;
}
A
Avi Kivity 已提交
1404

1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
				     u16 index, struct desc_struct *desc)
{
	struct desc_ptr dt;
	ulong addr;

	ctxt->ops->get_idt(ctxt, &dt);

	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, index << 3 | 0x2);

	addr = dt.address + index * 8;
	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
				   &ctxt->exception);
}

1421 1422 1423
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
				     u16 selector, struct desc_ptr *dt)
{
1424
	const struct x86_emulate_ops *ops = ctxt->ops;
1425
	u32 base3 = 0;
1426

1427 1428
	if (selector & 1 << 2) {
		struct desc_struct desc;
1429 1430
		u16 sel;

1431
		memset (dt, 0, sizeof *dt);
1432 1433
		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
				      VCPU_SREG_LDTR))
1434
			return;
1435

1436
		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1437
		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1438
	} else
1439
		ops->get_gdt(ctxt, dt);
1440
}
1441

1442 1443
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1444 1445
				   u16 selector, struct desc_struct *desc,
				   ulong *desc_addr_p)
1446 1447 1448 1449
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
1450

1451
	get_descriptor_table_ptr(ctxt, selector, &dt);
1452

1453 1454
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
1455

1456
	*desc_addr_p = addr = dt.address + index * 8;
1457 1458
	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
				   &ctxt->exception);
1459
}
1460

1461 1462 1463 1464 1465 1466 1467
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				    u16 selector, struct desc_struct *desc)
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
A
Avi Kivity 已提交
1468

1469
	get_descriptor_table_ptr(ctxt, selector, &dt);
1470

1471 1472
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
A
Avi Kivity 已提交
1473

1474
	addr = dt.address + index * 8;
1475 1476
	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
				    &ctxt->exception);
1477
}
1478

1479
/* Does not support long mode */
1480
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1481
				     u16 selector, int seg, u8 cpl,
1482
				     enum x86_transfer_type transfer,
1483
				     struct desc_struct *desc)
1484
{
1485
	struct desc_struct seg_desc, old_desc;
1486
	u8 dpl, rpl;
1487 1488 1489
	unsigned err_vec = GP_VECTOR;
	u32 err_code = 0;
	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1490
	ulong desc_addr;
1491
	int ret;
1492
	u16 dummy;
1493
	u32 base3 = 0;
1494

1495
	memset(&seg_desc, 0, sizeof seg_desc);
1496

1497 1498 1499
	if (ctxt->mode == X86EMUL_MODE_REAL) {
		/* set real mode segment descriptor (keep limit etc. for
		 * unreal mode) */
1500
		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1501 1502
		set_desc_base(&seg_desc, selector << 4);
		goto load;
1503 1504 1505 1506 1507 1508 1509 1510 1511
	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
		/* VM86 needs a clean new segment descriptor */
		set_desc_base(&seg_desc, selector << 4);
		set_desc_limit(&seg_desc, 0xffff);
		seg_desc.type = 3;
		seg_desc.p = 1;
		seg_desc.s = 1;
		seg_desc.dpl = 3;
		goto load;
1512 1513
	}

1514 1515 1516 1517 1518 1519 1520
	rpl = selector & 3;

	/* NULL selector is not valid for TR, CS and SS (except for long mode) */
	if ((seg == VCPU_SREG_CS
	     || (seg == VCPU_SREG_SS
		 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
	     || seg == VCPU_SREG_TR)
1521 1522 1523 1524 1525 1526 1527 1528 1529 1530
	    && null_selector)
		goto exception;

	/* TR should be in GDT only */
	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
		goto exception;

	if (null_selector) /* for NULL selector skip all following checks */
		goto load;

1531
	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1532 1533 1534 1535
	if (ret != X86EMUL_CONTINUE)
		return ret;

	err_code = selector & 0xfffc;
1536 1537
	err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
							   GP_VECTOR;
1538

G
Guo Chao 已提交
1539
	/* can't load system descriptor into segment selector */
1540 1541 1542
	if (seg <= VCPU_SREG_GS && !seg_desc.s) {
		if (transfer == X86_TRANSFER_CALL_JMP)
			return X86EMUL_UNHANDLEABLE;
1543
		goto exception;
1544
	}
1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560

	if (!seg_desc.p) {
		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
		goto exception;
	}

	dpl = seg_desc.dpl;

	switch (seg) {
	case VCPU_SREG_SS:
		/*
		 * segment is not a writable data segment or segment
		 * selector's RPL != CPL or segment selector's RPL != CPL
		 */
		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
			goto exception;
A
Avi Kivity 已提交
1561
		break;
1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
	case VCPU_SREG_CS:
		if (!(seg_desc.type & 8))
			goto exception;

		if (seg_desc.type & 4) {
			/* conforming */
			if (dpl > cpl)
				goto exception;
		} else {
			/* nonconforming */
			if (rpl > cpl || dpl != cpl)
				goto exception;
		}
1575 1576 1577 1578 1579 1580 1581 1582 1583
		/* in long-mode d/b must be clear if l is set */
		if (seg_desc.d && seg_desc.l) {
			u64 efer = 0;

			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
			if (efer & EFER_LMA)
				goto exception;
		}

1584 1585
		/* CS(RPL) <- CPL */
		selector = (selector & 0xfffc) | cpl;
A
Avi Kivity 已提交
1586
		break;
1587 1588 1589
	case VCPU_SREG_TR:
		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
			goto exception;
1590 1591 1592 1593 1594 1595
		old_desc = seg_desc;
		seg_desc.type |= 2; /* busy */
		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
						  sizeof(seg_desc), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
1596 1597 1598 1599 1600 1601
		break;
	case VCPU_SREG_LDTR:
		if (seg_desc.s || seg_desc.type != 2)
			goto exception;
		break;
	default: /*  DS, ES, FS, or GS */
1602
		/*
1603 1604 1605
		 * segment is not a data or readable code segment or
		 * ((segment is a data or nonconforming code segment)
		 * and (both RPL and CPL > DPL))
1606
		 */
1607 1608 1609 1610
		if ((seg_desc.type & 0xa) == 0x8 ||
		    (((seg_desc.type & 0xc) != 0xc) &&
		     (rpl > dpl && cpl > dpl)))
			goto exception;
A
Avi Kivity 已提交
1611
		break;
1612 1613 1614 1615 1616
	}

	if (seg_desc.s) {
		/* mark segment as accessed */
		seg_desc.type |= 1;
1617
		ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1618 1619
		if (ret != X86EMUL_CONTINUE)
			return ret;
1620 1621 1622 1623 1624
	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
		ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
				sizeof(base3), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
1625 1626 1627
		if (is_noncanonical_address(get_desc_base(&seg_desc) |
					     ((u64)base3 << 32)))
			return emulate_gp(ctxt, 0);
1628 1629
	}
load:
1630
	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1631 1632
	if (desc)
		*desc = seg_desc;
1633 1634
	return X86EMUL_CONTINUE;
exception:
1635
	return emulate_exception(ctxt, err_vec, err_code, true);
1636 1637
}

1638 1639 1640 1641
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, int seg)
{
	u8 cpl = ctxt->ops->cpl(ctxt);
1642 1643
	return __load_segment_descriptor(ctxt, selector, seg, cpl,
					 X86_TRANSFER_NONE, NULL);
1644 1645
}

1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
static void write_register_operand(struct operand *op)
{
	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
	switch (op->bytes) {
	case 1:
		*(u8 *)op->addr.reg = (u8)op->val;
		break;
	case 2:
		*(u16 *)op->addr.reg = (u16)op->val;
		break;
	case 4:
		*op->addr.reg = (u32)op->val;
		break;	/* 64b: zero-extend */
	case 8:
		*op->addr.reg = op->val;
		break;
	}
}

1665
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1666
{
1667
	switch (op->type) {
1668
	case OP_REG:
1669
		write_register_operand(op);
A
Avi Kivity 已提交
1670
		break;
1671
	case OP_MEM:
1672
		if (ctxt->lock_prefix)
P
Paolo Bonzini 已提交
1673 1674 1675 1676 1677 1678 1679
			return segmented_cmpxchg(ctxt,
						 op->addr.mem,
						 &op->orig_val,
						 &op->val,
						 op->bytes);
		else
			return segmented_write(ctxt,
1680 1681 1682
					       op->addr.mem,
					       &op->val,
					       op->bytes);
1683
		break;
1684
	case OP_MEM_STR:
P
Paolo Bonzini 已提交
1685 1686 1687 1688
		return segmented_write(ctxt,
				       op->addr.mem,
				       op->data,
				       op->bytes * op->count);
1689
		break;
A
Avi Kivity 已提交
1690
	case OP_XMM:
1691
		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
A
Avi Kivity 已提交
1692
		break;
A
Avi Kivity 已提交
1693
	case OP_MM:
1694
		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
A
Avi Kivity 已提交
1695
		break;
1696 1697
	case OP_NONE:
		/* no writeback */
1698
		break;
1699
	default:
1700
		break;
A
Avi Kivity 已提交
1701
	}
1702 1703
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1704

1705
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1706
{
1707
	struct segmented_address addr;
1708

1709
	rsp_increment(ctxt, -bytes);
1710
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1711 1712
	addr.seg = VCPU_SREG_SS;

1713 1714 1715 1716 1717
	return segmented_write(ctxt, addr, data, bytes);
}

static int em_push(struct x86_emulate_ctxt *ctxt)
{
1718
	/* Disable writeback. */
1719
	ctxt->dst.type = OP_NONE;
1720
	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1721
}
1722

1723 1724 1725 1726
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
		       void *dest, int len)
{
	int rc;
1727
	struct segmented_address addr;
1728

1729
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1730
	addr.seg = VCPU_SREG_SS;
1731
	rc = segmented_read(ctxt, addr, dest, len);
1732 1733 1734
	if (rc != X86EMUL_CONTINUE)
		return rc;

1735
	rsp_increment(ctxt, len);
1736
	return rc;
1737 1738
}

1739 1740
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
1741
	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1742 1743
}

1744
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1745
			void *dest, int len)
1746 1747
{
	int rc;
1748 1749
	unsigned long val, change_mask;
	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1750
	int cpl = ctxt->ops->cpl(ctxt);
1751

1752
	rc = emulate_pop(ctxt, &val, len);
1753 1754
	if (rc != X86EMUL_CONTINUE)
		return rc;
1755

1756
	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1757
		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1758

1759 1760 1761 1762 1763 1764 1765 1766 1767 1768
	switch(ctxt->mode) {
	case X86EMUL_MODE_PROT64:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT16:
		if (cpl == 0)
			change_mask |= EFLG_IOPL;
		if (cpl <= iopl)
			change_mask |= EFLG_IF;
		break;
	case X86EMUL_MODE_VM86:
1769 1770
		if (iopl < 3)
			return emulate_gp(ctxt, 0);
1771 1772 1773 1774 1775
		change_mask |= EFLG_IF;
		break;
	default: /* real mode */
		change_mask |= (EFLG_IOPL | EFLG_IF);
		break;
1776
	}
1777 1778 1779 1780 1781

	*(unsigned long *)dest =
		(ctxt->eflags & ~change_mask) | (val & change_mask);

	return rc;
1782 1783
}

1784 1785
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
1786 1787 1788 1789
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->eflags;
	ctxt->dst.bytes = ctxt->op_bytes;
	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1790 1791
}

A
Avi Kivity 已提交
1792 1793 1794 1795 1796
static int em_enter(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	unsigned frame_size = ctxt->src.val;
	unsigned nesting_level = ctxt->src2.val & 31;
1797
	ulong rbp;
A
Avi Kivity 已提交
1798 1799 1800 1801

	if (nesting_level)
		return X86EMUL_UNHANDLEABLE;

1802 1803
	rbp = reg_read(ctxt, VCPU_REGS_RBP);
	rc = push(ctxt, &rbp, stack_size(ctxt));
A
Avi Kivity 已提交
1804 1805
	if (rc != X86EMUL_CONTINUE)
		return rc;
1806
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
A
Avi Kivity 已提交
1807
		      stack_mask(ctxt));
1808 1809
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
A
Avi Kivity 已提交
1810 1811 1812 1813
		      stack_mask(ctxt));
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1814 1815
static int em_leave(struct x86_emulate_ctxt *ctxt)
{
1816
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
A
Avi Kivity 已提交
1817
		      stack_mask(ctxt));
1818
	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
A
Avi Kivity 已提交
1819 1820
}

1821
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1822
{
1823 1824
	int seg = ctxt->src2.val;

1825
	ctxt->src.val = get_segment_selector(ctxt, seg);
1826 1827 1828 1829
	if (ctxt->op_bytes == 4) {
		rsp_increment(ctxt, -2);
		ctxt->op_bytes = 2;
	}
1830

1831
	return em_push(ctxt);
1832 1833
}

1834
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1835
{
1836
	int seg = ctxt->src2.val;
1837 1838
	unsigned long selector;
	int rc;
1839

1840
	rc = emulate_pop(ctxt, &selector, 2);
1841 1842 1843
	if (rc != X86EMUL_CONTINUE)
		return rc;

1844 1845
	if (ctxt->modrm_reg == VCPU_SREG_SS)
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1846 1847
	if (ctxt->op_bytes > 2)
		rsp_increment(ctxt, ctxt->op_bytes - 2);
1848

1849
	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1850
	return rc;
1851 1852
}

1853
static int em_pusha(struct x86_emulate_ctxt *ctxt)
1854
{
1855
	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1856 1857
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RAX;
1858

1859 1860
	while (reg <= VCPU_REGS_RDI) {
		(reg == VCPU_REGS_RSP) ?
1861
		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1862

1863
		rc = em_push(ctxt);
1864 1865
		if (rc != X86EMUL_CONTINUE)
			return rc;
1866

1867
		++reg;
1868 1869
	}

1870
	return rc;
1871 1872
}

1873 1874
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
1875
	ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
1876 1877 1878
	return em_push(ctxt);
}

1879
static int em_popa(struct x86_emulate_ctxt *ctxt)
1880
{
1881 1882
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RDI;
1883

1884 1885
	while (reg >= VCPU_REGS_RAX) {
		if (reg == VCPU_REGS_RSP) {
1886
			rsp_increment(ctxt, ctxt->op_bytes);
1887 1888
			--reg;
		}
1889

1890
		rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1891 1892 1893
		if (rc != X86EMUL_CONTINUE)
			break;
		--reg;
1894
	}
1895
	return rc;
1896 1897
}

1898
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1899
{
1900
	const struct x86_emulate_ops *ops = ctxt->ops;
1901
	int rc;
1902 1903 1904 1905 1906 1907
	struct desc_ptr dt;
	gva_t cs_addr;
	gva_t eip_addr;
	u16 cs, eip;

	/* TODO: Add limit checks */
1908
	ctxt->src.val = ctxt->eflags;
1909
	rc = em_push(ctxt);
1910 1911
	if (rc != X86EMUL_CONTINUE)
		return rc;
1912 1913 1914

	ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);

1915
	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1916
	rc = em_push(ctxt);
1917 1918
	if (rc != X86EMUL_CONTINUE)
		return rc;
1919

1920
	ctxt->src.val = ctxt->_eip;
1921
	rc = em_push(ctxt);
1922 1923 1924
	if (rc != X86EMUL_CONTINUE)
		return rc;

1925
	ops->get_idt(ctxt, &dt);
1926 1927 1928 1929

	eip_addr = dt.address + (irq << 2);
	cs_addr = dt.address + (irq << 2) + 2;

1930
	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1931 1932 1933
	if (rc != X86EMUL_CONTINUE)
		return rc;

1934
	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1935 1936 1937
	if (rc != X86EMUL_CONTINUE)
		return rc;

1938
	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1939 1940 1941
	if (rc != X86EMUL_CONTINUE)
		return rc;

1942
	ctxt->_eip = eip;
1943 1944 1945 1946

	return rc;
}

1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
	int rc;

	invalidate_registers(ctxt);
	rc = __emulate_int_real(ctxt, irq);
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);
	return rc;
}

1958
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1959 1960 1961
{
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
1962
		return __emulate_int_real(ctxt, irq);
1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
	default:
		/* Protected mode interrupts unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
	}
}

1973
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1974
{
1975 1976 1977 1978 1979 1980 1981 1982
	int rc = X86EMUL_CONTINUE;
	unsigned long temp_eip = 0;
	unsigned long temp_eflags = 0;
	unsigned long cs = 0;
	unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
			     EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
			     EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
	unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1983

1984
	/* TODO: Add stack limit check */
1985

1986
	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1987

1988 1989
	if (rc != X86EMUL_CONTINUE)
		return rc;
1990

1991 1992
	if (temp_eip & ~0xffff)
		return emulate_gp(ctxt, 0);
1993

1994
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1995

1996 1997
	if (rc != X86EMUL_CONTINUE)
		return rc;
1998

1999
	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2000

2001 2002
	if (rc != X86EMUL_CONTINUE)
		return rc;
2003

2004
	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2005

2006 2007
	if (rc != X86EMUL_CONTINUE)
		return rc;
2008

2009
	ctxt->_eip = temp_eip;
2010 2011


2012
	if (ctxt->op_bytes == 4)
2013
		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2014
	else if (ctxt->op_bytes == 2) {
2015 2016
		ctxt->eflags &= ~0xffff;
		ctxt->eflags |= temp_eflags;
2017
	}
2018 2019 2020 2021 2022

	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
	ctxt->eflags |= EFLG_RESERVED_ONE_MASK;

	return rc;
2023 2024
}

2025
static int em_iret(struct x86_emulate_ctxt *ctxt)
2026
{
2027 2028
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
2029
		return emulate_iret_real(ctxt);
2030 2031 2032 2033
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
2034
	default:
2035 2036
		/* iret from protected mode unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
2037 2038 2039
	}
}

2040 2041 2042
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
	int rc;
2043 2044 2045 2046 2047 2048 2049 2050 2051
	unsigned short sel, old_sel;
	struct desc_struct old_desc, new_desc;
	const struct x86_emulate_ops *ops = ctxt->ops;
	u8 cpl = ctxt->ops->cpl(ctxt);

	/* Assignment of RIP may only fail in 64-bit mode */
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
				 VCPU_SREG_CS);
2052

2053
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2054

2055 2056
	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_CALL_JMP,
2057
				       &new_desc);
2058 2059 2060
	if (rc != X86EMUL_CONTINUE)
		return rc;

2061
	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2062
	if (rc != X86EMUL_CONTINUE) {
2063
		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2064 2065 2066 2067 2068
		/* assigning eip failed; restore the old cs */
		ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
		return rc;
	}
	return rc;
2069 2070
}

2071
static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2072
{
2073 2074
	return assign_eip_near(ctxt, ctxt->src.val);
}
2075

2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	long int old_eip;

	old_eip = ctxt->_eip;
	rc = assign_eip_near(ctxt, ctxt->src.val);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	ctxt->src.val = old_eip;
	rc = em_push(ctxt);
2087
	return rc;
2088 2089
}

2090
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2091
{
2092
	u64 old = ctxt->dst.orig_val64;
2093

2094 2095 2096
	if (ctxt->dst.bytes == 16)
		return X86EMUL_UNHANDLEABLE;

2097 2098 2099 2100
	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2101
		ctxt->eflags &= ~EFLG_ZF;
2102
	} else {
2103 2104
		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2105

2106
		ctxt->eflags |= EFLG_ZF;
2107
	}
2108
	return X86EMUL_CONTINUE;
2109 2110
}

2111 2112
static int em_ret(struct x86_emulate_ctxt *ctxt)
{
2113 2114 2115 2116 2117 2118 2119 2120
	int rc;
	unsigned long eip;

	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	return assign_eip_near(ctxt, eip);
2121 2122
}

2123
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2124 2125
{
	int rc;
2126 2127
	unsigned long eip, cs;
	u16 old_cs;
2128
	int cpl = ctxt->ops->cpl(ctxt);
2129 2130 2131 2132 2133 2134
	struct desc_struct old_desc, new_desc;
	const struct x86_emulate_ops *ops = ctxt->ops;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
				 VCPU_SREG_CS);
2135

2136
	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2137
	if (rc != X86EMUL_CONTINUE)
2138
		return rc;
2139
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2140
	if (rc != X86EMUL_CONTINUE)
2141
		return rc;
2142 2143 2144
	/* Outer-privilege level return is not implemented */
	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
		return X86EMUL_UNHANDLEABLE;
2145 2146
	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_RET,
2147 2148 2149
				       &new_desc);
	if (rc != X86EMUL_CONTINUE)
		return rc;
2150
	rc = assign_eip_far(ctxt, eip, &new_desc);
2151
	if (rc != X86EMUL_CONTINUE) {
2152
		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2153 2154
		ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
	}
2155 2156 2157
	return rc;
}

2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168
static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
{
        int rc;

        rc = em_ret_far(ctxt);
        if (rc != X86EMUL_CONTINUE)
                return rc;
        rsp_increment(ctxt, ctxt->src.val);
        return X86EMUL_CONTINUE;
}

2169 2170 2171
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{
	/* Save real source value, then compare EAX against destination. */
2172 2173
	ctxt->dst.orig_val = ctxt->dst.val;
	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2174
	ctxt->src.orig_val = ctxt->src.val;
2175
	ctxt->src.val = ctxt->dst.orig_val;
2176
	fastop(ctxt, em_cmp);
2177 2178 2179 2180 2181 2182 2183

	if (ctxt->eflags & EFLG_ZF) {
		/* Success: write back to memory. */
		ctxt->dst.val = ctxt->src.orig_val;
	} else {
		/* Failure: write the value we saw to EAX. */
		ctxt->dst.type = OP_REG;
2184
		ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2185
		ctxt->dst.val = ctxt->dst.orig_val;
2186 2187 2188 2189
	}
	return X86EMUL_CONTINUE;
}

2190
static int em_lseg(struct x86_emulate_ctxt *ctxt)
2191
{
2192
	int seg = ctxt->src2.val;
2193 2194 2195
	unsigned short sel;
	int rc;

2196
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2197

2198
	rc = load_segment_descriptor(ctxt, sel, seg);
2199 2200 2201
	if (rc != X86EMUL_CONTINUE)
		return rc;

2202
	ctxt->dst.val = ctxt->src.val;
2203 2204 2205
	return rc;
}

2206
static void
2207
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2208
			struct desc_struct *cs, struct desc_struct *ss)
2209 2210
{
	cs->l = 0;		/* will be adjusted later */
2211
	set_desc_base(cs, 0);	/* flat segment */
2212
	cs->g = 1;		/* 4kb granularity */
2213
	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2214 2215 2216
	cs->type = 0x0b;	/* Read, Execute, Accessed */
	cs->s = 1;
	cs->dpl = 0;		/* will be adjusted later */
2217 2218
	cs->p = 1;
	cs->d = 1;
2219
	cs->avl = 0;
2220

2221 2222
	set_desc_base(ss, 0);	/* flat segment */
	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2223 2224 2225
	ss->g = 1;		/* 4kb granularity */
	ss->s = 1;
	ss->type = 0x03;	/* Read/Write, Accessed */
2226
	ss->d = 1;		/* 32bit stack segment */
2227
	ss->dpl = 0;
2228
	ss->p = 1;
2229 2230
	ss->l = 0;
	ss->avl = 0;
2231 2232
}

2233 2234 2235 2236 2237
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

	eax = ecx = 0;
2238 2239
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2240 2241 2242 2243
		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
}

2244 2245
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
{
2246
	const struct x86_emulate_ops *ops = ctxt->ops;
2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257
	u32 eax, ebx, ecx, edx;

	/*
	 * syscall should always be enabled in longmode - so only become
	 * vendor specific (cpuid) if other modes are active...
	 */
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return true;

	eax = 0x00000000;
	ecx = 0x00000000;
2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282
	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	/*
	 * Intel ("GenuineIntel")
	 * remark: Intel CPUs only support "syscall" in 64bit
	 * longmode. Also an 64bit guest with a
	 * 32bit compat-app running will #UD !! While this
	 * behaviour can be fixed (by emulating) into AMD
	 * response - CPUs of AMD can't behave like Intel.
	 */
	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
		return false;

	/* AMD ("AuthenticAMD") */
	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
		return true;

	/* AMD ("AMDisbetter!") */
	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
		return true;
2283 2284 2285 2286 2287

	/* default: (not Intel, not AMD), apply Intel's stricter rules... */
	return false;
}

2288
static int em_syscall(struct x86_emulate_ctxt *ctxt)
2289
{
2290
	const struct x86_emulate_ops *ops = ctxt->ops;
2291
	struct desc_struct cs, ss;
2292
	u64 msr_data;
2293
	u16 cs_sel, ss_sel;
2294
	u64 efer = 0;
2295 2296

	/* syscall is not available in real mode */
2297
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2298 2299
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_ud(ctxt);
2300

2301 2302 2303
	if (!(em_syscall_is_enabled(ctxt)))
		return emulate_ud(ctxt);

2304
	ops->get_msr(ctxt, MSR_EFER, &efer);
2305
	setup_syscalls_segments(ctxt, &cs, &ss);
2306

2307 2308 2309
	if (!(efer & EFER_SCE))
		return emulate_ud(ctxt);

2310
	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2311
	msr_data >>= 32;
2312 2313
	cs_sel = (u16)(msr_data & 0xfffc);
	ss_sel = (u16)(msr_data + 8);
2314

2315
	if (efer & EFER_LMA) {
2316
		cs.d = 0;
2317 2318
		cs.l = 1;
	}
2319 2320
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2321

2322
	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2323
	if (efer & EFER_LMA) {
2324
#ifdef CONFIG_X86_64
2325
		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2326

2327
		ops->get_msr(ctxt,
2328 2329
			     ctxt->mode == X86EMUL_MODE_PROT64 ?
			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2330
		ctxt->_eip = msr_data;
2331

2332
		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2333
		ctxt->eflags &= ~msr_data;
2334
		ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2335 2336 2337
#endif
	} else {
		/* legacy mode */
2338
		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2339
		ctxt->_eip = (u32)msr_data;
2340

2341
		ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2342 2343
	}

2344
	return X86EMUL_CONTINUE;
2345 2346
}

2347
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2348
{
2349
	const struct x86_emulate_ops *ops = ctxt->ops;
2350
	struct desc_struct cs, ss;
2351
	u64 msr_data;
2352
	u16 cs_sel, ss_sel;
2353
	u64 efer = 0;
2354

2355
	ops->get_msr(ctxt, MSR_EFER, &efer);
2356
	/* inject #GP if in real mode */
2357 2358
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return emulate_gp(ctxt, 0);
2359

2360 2361 2362 2363 2364 2365 2366 2367
	/*
	 * Not recognized on AMD in compat mode (but is recognized in legacy
	 * mode).
	 */
	if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
	    && !vendor_intel(ctxt))
		return emulate_ud(ctxt);

2368
	/* sysenter/sysexit have not been tested in 64bit mode. */
2369
	if (ctxt->mode == X86EMUL_MODE_PROT64)
2370
		return X86EMUL_UNHANDLEABLE;
2371

2372
	setup_syscalls_segments(ctxt, &cs, &ss);
2373

2374
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2375 2376
	switch (ctxt->mode) {
	case X86EMUL_MODE_PROT32:
2377 2378
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
2379 2380
		break;
	case X86EMUL_MODE_PROT64:
2381 2382
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
2383
		break;
2384 2385
	default:
		break;
2386 2387
	}

2388
	ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2389 2390 2391 2392
	cs_sel = (u16)msr_data;
	cs_sel &= ~SELECTOR_RPL_MASK;
	ss_sel = cs_sel + 8;
	ss_sel &= ~SELECTOR_RPL_MASK;
2393
	if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2394
		cs.d = 0;
2395 2396 2397
		cs.l = 1;
	}

2398 2399
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2400

2401
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2402
	ctxt->_eip = msr_data;
2403

2404
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2405
	*reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2406

2407
	return X86EMUL_CONTINUE;
2408 2409
}

2410
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2411
{
2412
	const struct x86_emulate_ops *ops = ctxt->ops;
2413
	struct desc_struct cs, ss;
2414
	u64 msr_data, rcx, rdx;
2415
	int usermode;
X
Xiao Guangrong 已提交
2416
	u16 cs_sel = 0, ss_sel = 0;
2417

2418 2419
	/* inject #GP if in real mode or Virtual 8086 mode */
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2420 2421
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_gp(ctxt, 0);
2422

2423
	setup_syscalls_segments(ctxt, &cs, &ss);
2424

2425
	if ((ctxt->rex_prefix & 0x8) != 0x0)
2426 2427 2428 2429
		usermode = X86EMUL_MODE_PROT64;
	else
		usermode = X86EMUL_MODE_PROT32;

2430 2431 2432
	rcx = reg_read(ctxt, VCPU_REGS_RCX);
	rdx = reg_read(ctxt, VCPU_REGS_RDX);

2433 2434
	cs.dpl = 3;
	ss.dpl = 3;
2435
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2436 2437
	switch (usermode) {
	case X86EMUL_MODE_PROT32:
2438
		cs_sel = (u16)(msr_data + 16);
2439 2440
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
2441
		ss_sel = (u16)(msr_data + 24);
2442 2443
		rcx = (u32)rcx;
		rdx = (u32)rdx;
2444 2445
		break;
	case X86EMUL_MODE_PROT64:
2446
		cs_sel = (u16)(msr_data + 32);
2447 2448
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
2449 2450
		ss_sel = cs_sel + 8;
		cs.d = 0;
2451
		cs.l = 1;
2452 2453 2454
		if (is_noncanonical_address(rcx) ||
		    is_noncanonical_address(rdx))
			return emulate_gp(ctxt, 0);
2455 2456
		break;
	}
2457 2458
	cs_sel |= SELECTOR_RPL_MASK;
	ss_sel |= SELECTOR_RPL_MASK;
2459

2460 2461
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2462

2463 2464
	ctxt->_eip = rdx;
	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2465

2466
	return X86EMUL_CONTINUE;
2467 2468
}

2469
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2470 2471 2472 2473 2474 2475 2476
{
	int iopl;
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return false;
	if (ctxt->mode == X86EMUL_MODE_VM86)
		return true;
	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2477
	return ctxt->ops->cpl(ctxt) > iopl;
2478 2479 2480 2481 2482
}

static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
					    u16 port, u16 len)
{
2483
	const struct x86_emulate_ops *ops = ctxt->ops;
2484
	struct desc_struct tr_seg;
2485
	u32 base3;
2486
	int r;
2487
	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2488
	unsigned mask = (1 << len) - 1;
2489
	unsigned long base;
2490

2491
	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2492
	if (!tr_seg.p)
2493
		return false;
2494
	if (desc_limit_scaled(&tr_seg) < 103)
2495
		return false;
2496 2497 2498 2499
	base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
	base |= ((u64)base3) << 32;
#endif
2500
	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2501 2502
	if (r != X86EMUL_CONTINUE)
		return false;
2503
	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2504
		return false;
2505
	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2506 2507 2508 2509 2510 2511 2512 2513 2514 2515
	if (r != X86EMUL_CONTINUE)
		return false;
	if ((perm >> bit_idx) & mask)
		return false;
	return true;
}

static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
				 u16 port, u16 len)
{
2516 2517 2518
	if (ctxt->perm_ok)
		return true;

2519 2520
	if (emulator_bad_iopl(ctxt))
		if (!emulator_io_port_access_allowed(ctxt, port, len))
2521
			return false;
2522 2523 2524

	ctxt->perm_ok = true;

2525 2526 2527
	return true;
}

2528 2529 2530
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_16 *tss)
{
2531
	tss->ip = ctxt->_eip;
2532
	tss->flag = ctxt->eflags;
2533 2534 2535 2536 2537 2538 2539 2540
	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2541

2542 2543 2544 2545 2546
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2547 2548 2549 2550 2551 2552
}

static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_16 *tss)
{
	int ret;
2553
	u8 cpl;
2554

2555
	ctxt->_eip = tss->ip;
2556
	ctxt->eflags = tss->flag | 2;
2557 2558 2559 2560 2561 2562 2563 2564
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2565 2566 2567 2568 2569

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
2570 2571 2572 2573 2574
	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2575

2576 2577
	cpl = tss->cs & 3;

2578
	/*
G
Guo Chao 已提交
2579
	 * Now load segment descriptors. If fault happens at this stage
2580 2581
	 * it is handled in a context of new task
	 */
2582
	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2583
					X86_TRANSFER_TASK_SWITCH, NULL);
2584 2585
	if (ret != X86EMUL_CONTINUE)
		return ret;
2586
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2587
					X86_TRANSFER_TASK_SWITCH, NULL);
2588 2589
	if (ret != X86EMUL_CONTINUE)
		return ret;
2590
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2591
					X86_TRANSFER_TASK_SWITCH, NULL);
2592 2593
	if (ret != X86EMUL_CONTINUE)
		return ret;
2594
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2595
					X86_TRANSFER_TASK_SWITCH, NULL);
2596 2597
	if (ret != X86EMUL_CONTINUE)
		return ret;
2598
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2599
					X86_TRANSFER_TASK_SWITCH, NULL);
2600 2601 2602 2603 2604 2605 2606 2607 2608 2609
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_16(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
2610
	const struct x86_emulate_ops *ops = ctxt->ops;
2611 2612
	struct tss_segment_16 tss_seg;
	int ret;
2613
	u32 new_tss_base = get_desc_base(new_desc);
2614

2615
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2616
			    &ctxt->exception);
2617
	if (ret != X86EMUL_CONTINUE)
2618 2619
		return ret;

2620
	save_state_to_tss16(ctxt, &tss_seg);
2621

2622
	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2623
			     &ctxt->exception);
2624
	if (ret != X86EMUL_CONTINUE)
2625 2626
		return ret;

2627
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2628
			    &ctxt->exception);
2629
	if (ret != X86EMUL_CONTINUE)
2630 2631 2632 2633 2634
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

2635
		ret = ops->write_std(ctxt, new_tss_base,
2636 2637
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2638
				     &ctxt->exception);
2639
		if (ret != X86EMUL_CONTINUE)
2640 2641 2642
			return ret;
	}

2643
	return load_state_from_tss16(ctxt, &tss_seg);
2644 2645 2646 2647 2648
}

static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_32 *tss)
{
2649
	/* CR3 and ldt selector are not saved intentionally */
2650
	tss->eip = ctxt->_eip;
2651
	tss->eflags = ctxt->eflags;
2652 2653 2654 2655 2656 2657 2658 2659
	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2660

2661 2662 2663 2664 2665 2666
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2667 2668 2669 2670 2671 2672
}

static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_32 *tss)
{
	int ret;
2673
	u8 cpl;
2674

2675
	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2676
		return emulate_gp(ctxt, 0);
2677
	ctxt->_eip = tss->eip;
2678
	ctxt->eflags = tss->eflags | 2;
2679 2680

	/* General purpose registers */
2681 2682 2683 2684 2685 2686 2687 2688
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2689 2690 2691

	/*
	 * SDM says that segment selectors are loaded before segment
2692 2693
	 * descriptors.  This is important because CPL checks will
	 * use CS.RPL.
2694
	 */
2695 2696 2697 2698 2699 2700 2701
	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2702

2703 2704 2705 2706 2707
	/*
	 * If we're switching between Protected Mode and VM86, we need to make
	 * sure to update the mode before loading the segment descriptors so
	 * that the selectors are interpreted correctly.
	 */
2708
	if (ctxt->eflags & X86_EFLAGS_VM) {
2709
		ctxt->mode = X86EMUL_MODE_VM86;
2710 2711
		cpl = 3;
	} else {
2712
		ctxt->mode = X86EMUL_MODE_PROT32;
2713 2714
		cpl = tss->cs & 3;
	}
2715

2716 2717 2718 2719
	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
2720
	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2721
					cpl, X86_TRANSFER_TASK_SWITCH, NULL);
2722 2723
	if (ret != X86EMUL_CONTINUE)
		return ret;
2724
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2725
					X86_TRANSFER_TASK_SWITCH, NULL);
2726 2727
	if (ret != X86EMUL_CONTINUE)
		return ret;
2728
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2729
					X86_TRANSFER_TASK_SWITCH, NULL);
2730 2731
	if (ret != X86EMUL_CONTINUE)
		return ret;
2732
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2733
					X86_TRANSFER_TASK_SWITCH, NULL);
2734 2735
	if (ret != X86EMUL_CONTINUE)
		return ret;
2736
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2737
					X86_TRANSFER_TASK_SWITCH, NULL);
2738 2739
	if (ret != X86EMUL_CONTINUE)
		return ret;
2740
	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2741
					X86_TRANSFER_TASK_SWITCH, NULL);
2742 2743
	if (ret != X86EMUL_CONTINUE)
		return ret;
2744
	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2745
					X86_TRANSFER_TASK_SWITCH, NULL);
2746 2747 2748 2749 2750 2751 2752 2753 2754 2755
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_32(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
2756
	const struct x86_emulate_ops *ops = ctxt->ops;
2757 2758
	struct tss_segment_32 tss_seg;
	int ret;
2759
	u32 new_tss_base = get_desc_base(new_desc);
2760 2761
	u32 eip_offset = offsetof(struct tss_segment_32, eip);
	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2762

2763
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2764
			    &ctxt->exception);
2765
	if (ret != X86EMUL_CONTINUE)
2766 2767
		return ret;

2768
	save_state_to_tss32(ctxt, &tss_seg);
2769

2770 2771 2772
	/* Only GP registers and segment selectors are saved */
	ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
			     ldt_sel_offset - eip_offset, &ctxt->exception);
2773
	if (ret != X86EMUL_CONTINUE)
2774 2775
		return ret;

2776
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2777
			    &ctxt->exception);
2778
	if (ret != X86EMUL_CONTINUE)
2779 2780 2781 2782 2783
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

2784
		ret = ops->write_std(ctxt, new_tss_base,
2785 2786
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2787
				     &ctxt->exception);
2788
		if (ret != X86EMUL_CONTINUE)
2789 2790 2791
			return ret;
	}

2792
	return load_state_from_tss32(ctxt, &tss_seg);
2793 2794 2795
}

static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2796
				   u16 tss_selector, int idt_index, int reason,
2797
				   bool has_error_code, u32 error_code)
2798
{
2799
	const struct x86_emulate_ops *ops = ctxt->ops;
2800 2801
	struct desc_struct curr_tss_desc, next_tss_desc;
	int ret;
2802
	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2803
	ulong old_tss_base =
2804
		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2805
	u32 desc_limit;
2806
	ulong desc_addr;
2807 2808 2809

	/* FIXME: old_tss_base == ~0 ? */

2810
	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2811 2812
	if (ret != X86EMUL_CONTINUE)
		return ret;
2813
	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2814 2815 2816 2817 2818
	if (ret != X86EMUL_CONTINUE)
		return ret;

	/* FIXME: check that next_tss_desc is tss */

2819 2820 2821 2822 2823
	/*
	 * Check privileges. The three cases are task switch caused by...
	 *
	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
	 * 2. Exception/IRQ/iret: No check is performed
2824 2825
	 * 3. jmp/call to TSS/task-gate: No check is performed since the
	 *    hardware checks it before exiting.
2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841
	 */
	if (reason == TASK_SWITCH_GATE) {
		if (idt_index != -1) {
			/* Software interrupts */
			struct desc_struct task_gate_desc;
			int dpl;

			ret = read_interrupt_descriptor(ctxt, idt_index,
							&task_gate_desc);
			if (ret != X86EMUL_CONTINUE)
				return ret;

			dpl = task_gate_desc.dpl;
			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
		}
2842 2843
	}

2844 2845 2846 2847
	desc_limit = desc_limit_scaled(&next_tss_desc);
	if (!next_tss_desc.p ||
	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
	     desc_limit < 0x2b)) {
2848
		return emulate_ts(ctxt, tss_selector & 0xfffc);
2849 2850 2851 2852
	}

	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2853
		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2854 2855 2856 2857 2858 2859
	}

	if (reason == TASK_SWITCH_IRET)
		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;

	/* set back link to prev task only if NT bit is set in eflags
G
Guo Chao 已提交
2860
	   note that old_tss_sel is not used after this point */
2861 2862 2863 2864
	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
		old_tss_sel = 0xffff;

	if (next_tss_desc.type & 8)
2865
		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2866 2867
				     old_tss_base, &next_tss_desc);
	else
2868
		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2869
				     old_tss_base, &next_tss_desc);
2870 2871
	if (ret != X86EMUL_CONTINUE)
		return ret;
2872 2873 2874 2875 2876 2877

	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;

	if (reason != TASK_SWITCH_IRET) {
		next_tss_desc.type |= (1 << 1); /* set busy flag */
2878
		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2879 2880
	}

2881
	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
2882
	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2883

2884
	if (has_error_code) {
2885 2886 2887
		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
		ctxt->lock_prefix = 0;
		ctxt->src.val = (unsigned long) error_code;
2888
		ret = em_push(ctxt);
2889 2890
	}

2891 2892 2893 2894
	return ret;
}

int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2895
			 u16 tss_selector, int idt_index, int reason,
2896
			 bool has_error_code, u32 error_code)
2897 2898 2899
{
	int rc;

2900
	invalidate_registers(ctxt);
2901 2902
	ctxt->_eip = ctxt->eip;
	ctxt->dst.type = OP_NONE;
2903

2904
	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2905
				     has_error_code, error_code);
2906

2907
	if (rc == X86EMUL_CONTINUE) {
2908
		ctxt->eip = ctxt->_eip;
2909 2910
		writeback_registers(ctxt);
	}
2911

2912
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2913 2914
}

2915 2916
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
		struct operand *op)
2917
{
2918
	int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2919

2920 2921
	register_address_increment(ctxt, reg, df * op->bytes);
	op->addr.mem.ea = register_address(ctxt, reg);
2922 2923
}

2924 2925 2926 2927 2928 2929
static int em_das(struct x86_emulate_ctxt *ctxt)
{
	u8 al, old_al;
	bool af, cf, old_cf;

	cf = ctxt->eflags & X86_EFLAGS_CF;
2930
	al = ctxt->dst.val;
2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947

	old_al = al;
	old_cf = cf;
	cf = false;
	af = ctxt->eflags & X86_EFLAGS_AF;
	if ((al & 0x0f) > 9 || af) {
		al -= 6;
		cf = old_cf | (al >= 250);
		af = true;
	} else {
		af = false;
	}
	if (old_al > 0x99 || old_cf) {
		al -= 0x60;
		cf = true;
	}

2948
	ctxt->dst.val = al;
2949
	/* Set PF, ZF, SF */
2950 2951 2952
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
2953
	fastop(ctxt, em_or);
2954 2955 2956 2957 2958 2959 2960 2961
	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
	if (cf)
		ctxt->eflags |= X86_EFLAGS_CF;
	if (af)
		ctxt->eflags |= X86_EFLAGS_AF;
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983
static int em_aam(struct x86_emulate_ctxt *ctxt)
{
	u8 al, ah;

	if (ctxt->src.val == 0)
		return emulate_de(ctxt);

	al = ctxt->dst.val & 0xff;
	ah = al / ctxt->src.val;
	al %= ctxt->src.val;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);

	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);

	return X86EMUL_CONTINUE;
}

2984 2985 2986 2987 2988 2989 2990 2991 2992
static int em_aad(struct x86_emulate_ctxt *ctxt)
{
	u8 al = ctxt->dst.val & 0xff;
	u8 ah = (ctxt->dst.val >> 8) & 0xff;

	al = (al + (ah * ctxt->src.val)) & 0xff;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;

2993 2994 2995 2996 2997
	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);
2998 2999 3000 3001

	return X86EMUL_CONTINUE;
}

3002 3003
static int em_call(struct x86_emulate_ctxt *ctxt)
{
3004
	int rc;
3005 3006 3007
	long rel = ctxt->src.val;

	ctxt->src.val = (unsigned long)ctxt->_eip;
3008 3009 3010
	rc = jmp_rel(ctxt, rel);
	if (rc != X86EMUL_CONTINUE)
		return rc;
3011 3012 3013
	return em_push(ctxt);
}

3014 3015 3016 3017 3018
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
	u16 sel, old_cs;
	ulong old_eip;
	int rc;
3019 3020 3021
	struct desc_struct old_desc, new_desc;
	const struct x86_emulate_ops *ops = ctxt->ops;
	int cpl = ctxt->ops->cpl(ctxt);
3022

3023
	old_eip = ctxt->_eip;
3024
	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3025

3026
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3027 3028
	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_CALL_JMP, &new_desc);
3029
	if (rc != X86EMUL_CONTINUE)
3030
		return rc;
3031

3032
	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3033 3034
	if (rc != X86EMUL_CONTINUE)
		goto fail;
3035

3036
	ctxt->src.val = old_cs;
3037
	rc = em_push(ctxt);
3038
	if (rc != X86EMUL_CONTINUE)
3039
		goto fail;
3040

3041
	ctxt->src.val = old_eip;
3042 3043 3044 3045 3046 3047 3048 3049 3050 3051
	rc = em_push(ctxt);
	/* If we failed, we tainted the memory, but the very least we should
	   restore cs */
	if (rc != X86EMUL_CONTINUE)
		goto fail;
	return rc;
fail:
	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
	return rc;

3052 3053
}

3054 3055 3056
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
	int rc;
3057
	unsigned long eip;
3058

3059 3060 3061 3062
	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	rc = assign_eip_near(ctxt, eip);
3063 3064
	if (rc != X86EMUL_CONTINUE)
		return rc;
3065
	rsp_increment(ctxt, ctxt->src.val);
3066 3067 3068
	return X86EMUL_CONTINUE;
}

3069 3070 3071
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
	/* Write back the register source. */
3072 3073
	ctxt->src.val = ctxt->dst.val;
	write_register_operand(&ctxt->src);
3074 3075

	/* Write back the memory destination with implicit LOCK prefix. */
3076 3077
	ctxt->dst.val = ctxt->src.orig_val;
	ctxt->lock_prefix = 1;
3078 3079 3080
	return X86EMUL_CONTINUE;
}

3081 3082
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
3083
	ctxt->dst.val = ctxt->src2.val;
3084
	return fastop(ctxt, em_imul);
3085 3086
}

3087 3088
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
3089 3090
	ctxt->dst.type = OP_REG;
	ctxt->dst.bytes = ctxt->src.bytes;
3091
	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3092
	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3093 3094 3095 3096

	return X86EMUL_CONTINUE;
}

3097 3098 3099 3100
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
	u64 tsc = 0;

3101
	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3102 3103
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3104 3105 3106
	return X86EMUL_CONTINUE;
}

3107 3108 3109 3110
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
{
	u64 pmc;

3111
	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3112
		return emulate_gp(ctxt, 0);
3113 3114
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3115 3116 3117
	return X86EMUL_CONTINUE;
}

3118 3119
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
3120
	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3121 3122 3123
	return X86EMUL_CONTINUE;
}

B
Borislav Petkov 已提交
3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158
#define FFL(x) bit(X86_FEATURE_##x)

static int em_movbe(struct x86_emulate_ctxt *ctxt)
{
	u32 ebx, ecx, edx, eax = 1;
	u16 tmp;

	/*
	 * Check MOVBE is set in the guest-visible CPUID leaf.
	 */
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	if (!(ecx & FFL(MOVBE)))
		return emulate_ud(ctxt);

	switch (ctxt->op_bytes) {
	case 2:
		/*
		 * From MOVBE definition: "...When the operand size is 16 bits,
		 * the upper word of the destination register remains unchanged
		 * ..."
		 *
		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
		 * rules so we have to do the operation almost per hand.
		 */
		tmp = (u16)ctxt->src.val;
		ctxt->dst.val &= ~0xffffUL;
		ctxt->dst.val |= (unsigned long)swab16(tmp);
		break;
	case 4:
		ctxt->dst.val = swab32((u32)ctxt->src.val);
		break;
	case 8:
		ctxt->dst.val = swab64(ctxt->src.val);
		break;
	default:
3159
		BUG();
B
Borislav Petkov 已提交
3160 3161 3162 3163
	}
	return X86EMUL_CONTINUE;
}

3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

static int em_dr_write(struct x86_emulate_ctxt *ctxt)
{
	unsigned long val;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		val = ctxt->src.val & ~0ULL;
	else
		val = ctxt->src.val & ~0U;

	/* #UD condition is already handled. */
	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3192 3193 3194 3195
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

3196 3197 3198
	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3199 3200 3201 3202 3203 3204 3205 3206 3207
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

3208
	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3209 3210
		return emulate_gp(ctxt, 0);

3211 3212
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3213 3214 3215
	return X86EMUL_CONTINUE;
}

3216 3217
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
3218
	if (ctxt->modrm_reg > VCPU_SREG_GS)
3219 3220
		return emulate_ud(ctxt);

3221
	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3222 3223
	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
		ctxt->dst.bytes = 2;
3224 3225 3226 3227 3228
	return X86EMUL_CONTINUE;
}

static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
3229
	u16 sel = ctxt->src.val;
3230

3231
	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3232 3233
		return emulate_ud(ctxt);

3234
	if (ctxt->modrm_reg == VCPU_SREG_SS)
3235 3236 3237
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;

	/* Disable writeback. */
3238 3239
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3240 3241
}

A
Avi Kivity 已提交
3242 3243 3244 3245 3246 3247 3248 3249 3250
static int em_lldt(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
}

A
Avi Kivity 已提交
3251 3252 3253 3254 3255 3256 3257 3258 3259
static int em_ltr(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
}

3260 3261
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
3262 3263 3264
	int rc;
	ulong linear;

3265
	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3266
	if (rc == X86EMUL_CONTINUE)
3267
		ctxt->ops->invlpg(ctxt, linear);
3268
	/* Disable writeback. */
3269
	ctxt->dst.type = OP_NONE;
3270 3271 3272
	return X86EMUL_CONTINUE;
}

3273 3274 3275 3276 3277 3278 3279 3280 3281 3282
static int em_clts(struct x86_emulate_ctxt *ctxt)
{
	ulong cr0;

	cr0 = ctxt->ops->get_cr(ctxt, 0);
	cr0 &= ~X86_CR0_TS;
	ctxt->ops->set_cr(ctxt, 0, cr0);
	return X86EMUL_CONTINUE;
}

3283 3284
static int em_vmcall(struct x86_emulate_ctxt *ctxt)
{
3285
	int rc = ctxt->ops->fix_hypercall(ctxt);
3286 3287 3288 3289 3290

	if (rc != X86EMUL_CONTINUE)
		return rc;

	/* Let the processor re-execute the fixed hypercall */
3291
	ctxt->_eip = ctxt->eip;
3292
	/* Disable writeback. */
3293
	ctxt->dst.type = OP_NONE;
3294 3295 3296
	return X86EMUL_CONTINUE;
}

3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
				  void (*get)(struct x86_emulate_ctxt *ctxt,
					      struct desc_ptr *ptr))
{
	struct desc_ptr desc_ptr;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
	get(ctxt, &desc_ptr);
	if (ctxt->op_bytes == 2) {
		ctxt->op_bytes = 4;
		desc_ptr.address &= 0x00ffffff;
	}
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return segmented_write(ctxt, ctxt->dst.addr.mem,
			       &desc_ptr, 2 + ctxt->op_bytes);
}

static int em_sgdt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
}

static int em_sidt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}

3326
static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3327 3328 3329 3330
{
	struct desc_ptr desc_ptr;
	int rc;

3331 3332
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
3333
	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3334
			     &desc_ptr.size, &desc_ptr.address,
3335
			     ctxt->op_bytes);
3336 3337
	if (rc != X86EMUL_CONTINUE)
		return rc;
3338 3339 3340
	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
	    is_noncanonical_address(desc_ptr.address))
		return emulate_gp(ctxt, 0);
3341 3342 3343 3344
	if (lgdt)
		ctxt->ops->set_gdt(ctxt, &desc_ptr);
	else
		ctxt->ops->set_idt(ctxt, &desc_ptr);
3345
	/* Disable writeback. */
3346
	ctxt->dst.type = OP_NONE;
3347 3348 3349
	return X86EMUL_CONTINUE;
}

3350 3351 3352 3353 3354
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
	return em_lgdt_lidt(ctxt, true);
}

3355
static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3356 3357 3358
{
	int rc;

3359 3360
	rc = ctxt->ops->fix_hypercall(ctxt);

3361
	/* Disable writeback. */
3362
	ctxt->dst.type = OP_NONE;
3363 3364 3365 3366 3367
	return rc;
}

static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
3368
	return em_lgdt_lidt(ctxt, false);
3369 3370 3371 3372
}

static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
3373 3374
	if (ctxt->dst.type == OP_MEM)
		ctxt->dst.bytes = 2;
3375
	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3376 3377 3378 3379 3380 3381
	return X86EMUL_CONTINUE;
}

static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3382 3383
			  | (ctxt->src.val & 0x0f));
	ctxt->dst.type = OP_NONE;
3384 3385 3386
	return X86EMUL_CONTINUE;
}

3387 3388
static int em_loop(struct x86_emulate_ctxt *ctxt)
{
3389 3390
	int rc = X86EMUL_CONTINUE;

3391
	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3392
	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3393
	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3394
		rc = jmp_rel(ctxt, ctxt->src.val);
3395

3396
	return rc;
3397 3398 3399 3400
}

static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{
3401 3402
	int rc = X86EMUL_CONTINUE;

3403
	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3404
		rc = jmp_rel(ctxt, ctxt->src.val);
3405

3406
	return rc;
3407 3408
}

3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426
static int em_in(struct x86_emulate_ctxt *ctxt)
{
	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
			     &ctxt->dst.val))
		return X86EMUL_IO_NEEDED;

	return X86EMUL_CONTINUE;
}

static int em_out(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
				    &ctxt->src.val, 1);
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->eflags &= ~X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

static int em_sti(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
	ctxt->eflags |= X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3446 3447 3448 3449
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

3450 3451
	eax = reg_read(ctxt, VCPU_REGS_RAX);
	ecx = reg_read(ctxt, VCPU_REGS_RCX);
A
Avi Kivity 已提交
3452
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3453 3454 3455 3456
	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
A
Avi Kivity 已提交
3457 3458 3459
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471
static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
	u32 flags;

	flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;

	ctxt->eflags &= ~0xffUL;
	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3472 3473
static int em_lahf(struct x86_emulate_ctxt *ctxt)
{
3474 3475
	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
A
Avi Kivity 已提交
3476 3477 3478
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493
static int em_bswap(struct x86_emulate_ctxt *ctxt)
{
	switch (ctxt->op_bytes) {
#ifdef CONFIG_X86_64
	case 8:
		asm("bswap %0" : "+r"(ctxt->dst.val));
		break;
#endif
	default:
		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
		break;
	}
	return X86EMUL_CONTINUE;
}

3494 3495 3496 3497 3498 3499
static int em_clflush(struct x86_emulate_ctxt *ctxt)
{
	/* emulating clflush regardless of cpuid */
	return X86EMUL_CONTINUE;
}

3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513
static bool valid_cr(int nr)
{
	switch (nr) {
	case 0:
	case 2 ... 4:
	case 8:
		return true;
	default:
		return false;
	}
}

static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
3514
	if (!valid_cr(ctxt->modrm_reg))
3515 3516 3517 3518 3519 3520 3521
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
3522 3523
	u64 new_val = ctxt->src.val64;
	int cr = ctxt->modrm_reg;
3524
	u64 efer = 0;
3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541

	static u64 cr_reserved_bits[] = {
		0xffffffff00000000ULL,
		0, 0, 0, /* CR3 checked later */
		CR4_RESERVED_BITS,
		0, 0, 0,
		CR8_RESERVED_BITS,
	};

	if (!valid_cr(cr))
		return emulate_ud(ctxt);

	if (new_val & cr_reserved_bits[cr])
		return emulate_gp(ctxt, 0);

	switch (cr) {
	case 0: {
3542
		u64 cr4;
3543 3544 3545 3546
		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
			return emulate_gp(ctxt, 0);

3547 3548
		cr4 = ctxt->ops->get_cr(ctxt, 4);
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3549 3550 3551 3552 3553 3554 3555 3556 3557 3558

		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
		    !(cr4 & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	case 3: {
		u64 rsvd = 0;

3559 3560
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
		if (efer & EFER_LMA)
N
Nadav Amit 已提交
3561
			rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3562 3563 3564 3565 3566 3567 3568

		if (new_val & rsvd)
			return emulate_gp(ctxt, 0);

		break;
		}
	case 4: {
3569
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580

		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	}

	return X86EMUL_CONTINUE;
}

3581 3582 3583 3584
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
	unsigned long dr7;

3585
	ctxt->ops->get_dr(ctxt, 7, &dr7);
3586 3587 3588 3589 3590 3591 3592

	/* Check if DR7.Global_Enable is set */
	return dr7 & (1 << 13);
}

static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
3593
	int dr = ctxt->modrm_reg;
3594 3595 3596 3597 3598
	u64 cr4;

	if (dr > 7)
		return emulate_ud(ctxt);

3599
	cr4 = ctxt->ops->get_cr(ctxt, 4);
3600 3601 3602
	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
		return emulate_ud(ctxt);

3603 3604 3605 3606 3607 3608 3609
	if (check_dr7_gd(ctxt)) {
		ulong dr6;

		ctxt->ops->get_dr(ctxt, 6, &dr6);
		dr6 &= ~15;
		dr6 |= DR6_BD | DR6_RTM;
		ctxt->ops->set_dr(ctxt, 6, dr6);
3610
		return emulate_db(ctxt);
3611
	}
3612 3613 3614 3615 3616 3617

	return X86EMUL_CONTINUE;
}

static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
3618 3619
	u64 new_val = ctxt->src.val64;
	int dr = ctxt->modrm_reg;
3620 3621 3622 3623 3624 3625 3626

	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
		return emulate_gp(ctxt, 0);

	return check_dr_read(ctxt);
}

3627 3628 3629 3630
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
	u64 efer;

3631
	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3632 3633 3634 3635 3636 3637 3638 3639 3640

	if (!(efer & EFER_SVME))
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
3641
	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3642 3643

	/* Valid physical address? */
3644
	if (rax & 0xffff000000000000ULL)
3645 3646 3647 3648 3649
		return emulate_gp(ctxt, 0);

	return check_svme(ctxt);
}

3650 3651
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{
3652
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3653

3654
	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3655 3656 3657 3658 3659
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

3660 3661
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
3662
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3663
	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3664

3665
	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3666
	    ctxt->ops->check_pmc(ctxt, rcx))
3667 3668 3669 3670 3671
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

3672 3673
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
3674 3675
	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3676 3677 3678 3679 3680 3681 3682
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
3683 3684
	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3685 3686 3687 3688 3689
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

3690
#define D(_y) { .flags = (_y) }
3691 3692 3693
#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
3694
#define N    D(NotImpl)
3695
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3696 3697
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3698
#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
3699
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3700
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3701
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3702
#define II(_f, _e, _i) \
3703
	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3704
#define IIP(_f, _e, _i, _p) \
3705 3706
	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
3707
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3708

3709
#define D2bv(_f)      D((_f) | ByteOp), D(_f)
3710
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3711
#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
3712
#define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
3713 3714
#define I2bvIP(_f, _e, _i, _p) \
	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3715

3716 3717 3718
#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3719

3720 3721 3722 3723 3724 3725
static const struct opcode group7_rm0[] = {
	N,
	I(SrcNone | Priv | EmulateOnUD,	em_vmcall),
	N, N, N, N, N, N,
};

3726
static const struct opcode group7_rm1[] = {
3727 3728
	DI(SrcNone | Priv, monitor),
	DI(SrcNone | Priv, mwait),
3729 3730 3731
	N, N, N, N, N, N,
};

3732
static const struct opcode group7_rm3[] = {
3733
	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
3734
	II(SrcNone  | Prot | EmulateOnUD,	em_vmmcall,	vmmcall),
3735 3736 3737 3738 3739 3740
	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
3741
};
3742

3743
static const struct opcode group7_rm7[] = {
3744
	N,
3745
	DIP(SrcNone, rdtscp, check_rdtsc),
3746 3747
	N, N, N, N, N, N,
};
3748

3749
static const struct opcode group1[] = {
3750 3751 3752 3753 3754 3755 3756 3757
	F(Lock, em_add),
	F(Lock | PageTable, em_or),
	F(Lock, em_adc),
	F(Lock, em_sbb),
	F(Lock | PageTable, em_and),
	F(Lock, em_sub),
	F(Lock, em_xor),
	F(NoWrite, em_cmp),
3758 3759
};

3760
static const struct opcode group1A[] = {
3761
	I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3762 3763
};

3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774
static const struct opcode group2[] = {
	F(DstMem | ModRM, em_rol),
	F(DstMem | ModRM, em_ror),
	F(DstMem | ModRM, em_rcl),
	F(DstMem | ModRM, em_rcr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_shr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_sar),
};

3775
static const struct opcode group3[] = {
3776 3777
	F(DstMem | SrcImm | NoWrite, em_test),
	F(DstMem | SrcImm | NoWrite, em_test),
3778 3779
	F(DstMem | SrcNone | Lock, em_not),
	F(DstMem | SrcNone | Lock, em_neg),
3780 3781
	F(DstXacc | Src2Mem, em_mul_ex),
	F(DstXacc | Src2Mem, em_imul_ex),
3782 3783
	F(DstXacc | Src2Mem, em_div_ex),
	F(DstXacc | Src2Mem, em_idiv_ex),
3784 3785
};

3786
static const struct opcode group4[] = {
3787 3788
	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3789 3790 3791
	N, N, N, N, N, N,
};

3792
static const struct opcode group5[] = {
3793 3794
	F(DstMem | SrcNone | Lock,		em_inc),
	F(DstMem | SrcNone | Lock,		em_dec),
3795
	I(SrcMem | NearBranch,			em_call_near_abs),
3796
	I(SrcMemFAddr | ImplicitOps | Stack,	em_call_far),
3797
	I(SrcMem | NearBranch,			em_jmp_abs),
3798 3799
	I(SrcMemFAddr | ImplicitOps,		em_jmp_far),
	I(SrcMem | Stack,			em_push), D(Undefined),
3800 3801
};

3802
static const struct opcode group6[] = {
3803 3804
	DI(Prot,	sldt),
	DI(Prot,	str),
A
Avi Kivity 已提交
3805
	II(Prot | Priv | SrcMem16, em_lldt, lldt),
A
Avi Kivity 已提交
3806
	II(Prot | Priv | SrcMem16, em_ltr, ltr),
3807 3808 3809
	N, N, N, N,
};

3810
static const struct group_dual group7 = { {
3811 3812
	II(Mov | DstMem,			em_sgdt, sgdt),
	II(Mov | DstMem,			em_sidt, sidt),
3813 3814 3815 3816 3817
	II(SrcMem | Priv,			em_lgdt, lgdt),
	II(SrcMem | Priv,			em_lidt, lidt),
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
3818
}, {
3819
	EXT(0, group7_rm0),
3820
	EXT(0, group7_rm1),
3821
	N, EXT(0, group7_rm3),
3822 3823 3824
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	EXT(0, group7_rm7),
3825 3826
} };

3827
static const struct opcode group8[] = {
3828
	N, N, N, N,
3829 3830 3831 3832
	F(DstMem | SrcImmByte | NoWrite,		em_bt),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
	F(DstMem | SrcImmByte | Lock,			em_btr),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
3833 3834
};

3835
static const struct group_dual group9 = { {
3836
	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3837 3838 3839 3840
}, {
	N, N, N, N, N, N, N, N,
} };

3841
static const struct opcode group11[] = {
3842
	I(DstMem | SrcImm | Mov | PageTable, em_mov),
3843
	X7(D(Undefined)),
3844 3845
};

3846
static const struct gprefix pfx_0f_ae_7 = {
3847
	I(SrcMem | ByteOp, em_clflush), N, N, N,
3848 3849 3850 3851 3852 3853 3854 3855
};

static const struct group_dual group15 = { {
	N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
}, {
	N, N, N, N, N, N, N, N,
} };

3856
static const struct gprefix pfx_0f_6f_0f_7f = {
3857
	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3858 3859
};

3860 3861 3862 3863
static const struct instr_dual instr_dual_0f_2b = {
	I(0, em_mov), N
};

3864
static const struct gprefix pfx_0f_2b = {
3865
	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
3866 3867
};

3868
static const struct gprefix pfx_0f_28_0f_29 = {
3869
	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3870 3871
};

3872 3873 3874 3875
static const struct gprefix pfx_0f_e7 = {
	N, I(Sse, em_mov), N, N,
};

3876
static const struct escape escape_d9 = { {
3877
	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_db = { {
	N, N, N, N, N, N, N, N,
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_dd = { {
3919
	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

3939 3940 3941 3942
static const struct instr_dual instr_dual_0f_c3 = {
	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
};

3943
static const struct opcode opcode_table[256] = {
3944
	/* 0x00 - 0x07 */
3945
	F6ALU(Lock, em_add),
3946 3947
	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3948
	/* 0x08 - 0x0F */
3949
	F6ALU(Lock | PageTable, em_or),
3950 3951
	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
	N,
3952
	/* 0x10 - 0x17 */
3953
	F6ALU(Lock, em_adc),
3954 3955
	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3956
	/* 0x18 - 0x1F */
3957
	F6ALU(Lock, em_sbb),
3958 3959
	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3960
	/* 0x20 - 0x27 */
3961
	F6ALU(Lock | PageTable, em_and), N, N,
3962
	/* 0x28 - 0x2F */
3963
	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3964
	/* 0x30 - 0x37 */
3965
	F6ALU(Lock, em_xor), N, N,
3966
	/* 0x38 - 0x3F */
3967
	F6ALU(NoWrite, em_cmp), N, N,
3968
	/* 0x40 - 0x4F */
3969
	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3970
	/* 0x50 - 0x57 */
3971
	X8(I(SrcReg | Stack, em_push)),
3972
	/* 0x58 - 0x5F */
3973
	X8(I(DstReg | Stack, em_pop)),
3974
	/* 0x60 - 0x67 */
3975 3976
	I(ImplicitOps | Stack | No64, em_pusha),
	I(ImplicitOps | Stack | No64, em_popa),
3977 3978 3979
	N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
	N, N, N, N,
	/* 0x68 - 0x6F */
3980 3981
	I(SrcImm | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3982 3983
	I(SrcImmByte | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3984
	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3985
	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3986
	/* 0x70 - 0x7F */
3987
	X16(D(SrcImmByte | NearBranch)),
3988
	/* 0x80 - 0x87 */
3989 3990 3991 3992
	G(ByteOp | DstMem | SrcImm, group1),
	G(DstMem | SrcImm, group1),
	G(ByteOp | DstMem | SrcImm | No64, group1),
	G(DstMem | SrcImmByte, group1),
3993
	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3994
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3995
	/* 0x88 - 0x8F */
3996
	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3997
	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3998
	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3999 4000 4001
	D(ModRM | SrcMem | NoAccess | DstReg),
	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
	G(0, group1A),
4002
	/* 0x90 - 0x97 */
4003
	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4004
	/* 0x98 - 0x9F */
4005
	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4006
	I(SrcImmFAddr | No64, em_call_far), N,
4007
	II(ImplicitOps | Stack, em_pushf, pushf),
P
Paolo Bonzini 已提交
4008 4009
	II(ImplicitOps | Stack, em_popf, popf),
	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4010
	/* 0xA0 - 0xA7 */
4011
	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4012
	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4013
	I2bv(SrcSI | DstDI | Mov | String, em_mov),
4014
	F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4015
	/* 0xA8 - 0xAF */
4016
	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4017 4018
	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4019
	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4020
	/* 0xB0 - 0xB7 */
4021
	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4022
	/* 0xB8 - 0xBF */
4023
	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4024
	/* 0xC0 - 0xC7 */
4025
	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4026 4027
	I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
	I(ImplicitOps | NearBranch, em_ret),
4028 4029
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4030
	G(ByteOp, group11), G(0, group11),
4031
	/* 0xC8 - 0xCF */
A
Avi Kivity 已提交
4032
	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4033 4034
	I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
	I(ImplicitOps | Stack, em_ret_far),
4035
	D(ImplicitOps), DI(SrcImmByte, intn),
4036
	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4037
	/* 0xD0 - 0xD7 */
4038 4039
	G(Src2One | ByteOp, group2), G(Src2One, group2),
	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
P
Paolo Bonzini 已提交
4040
	I(DstAcc | SrcImmUByte | No64, em_aam),
P
Paolo Bonzini 已提交
4041 4042
	I(DstAcc | SrcImmUByte | No64, em_aad),
	F(DstAcc | ByteOp | No64, em_salc),
P
Paolo Bonzini 已提交
4043
	I(DstAcc | SrcXLat | ByteOp, em_mov),
4044
	/* 0xD8 - 0xDF */
4045
	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4046
	/* 0xE0 - 0xE7 */
4047 4048
	X3(I(SrcImmByte | NearBranch, em_loop)),
	I(SrcImmByte | NearBranch, em_jcxz),
4049 4050
	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4051
	/* 0xE8 - 0xEF */
4052 4053 4054
	I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
	I(SrcImmFAddr | No64, em_jmp_far),
	D(SrcImmByte | ImplicitOps | NearBranch),
4055 4056
	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4057
	/* 0xF0 - 0xF7 */
4058
	N, DI(ImplicitOps, icebp), N, N,
4059 4060
	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
	G(ByteOp, group3), G(0, group3),
4061
	/* 0xF8 - 0xFF */
4062 4063
	D(ImplicitOps), D(ImplicitOps),
	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4064 4065 4066
	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};

4067
static const struct opcode twobyte_table[256] = {
4068
	/* 0x00 - 0x0F */
4069
	G(0, group6), GD(0, &group7), N, N,
4070
	N, I(ImplicitOps | EmulateOnUD, em_syscall),
4071
	II(ImplicitOps | Priv, em_clts, clts), N,
4072
	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4073
	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4074
	/* 0x10 - 0x1F */
P
Paolo Bonzini 已提交
4075
	N, N, N, N, N, N, N, N,
4076 4077
	D(ImplicitOps | ModRM | SrcMem | NoAccess),
	N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4078
	/* 0x20 - 0x2F */
4079 4080 4081 4082 4083 4084
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
						check_cr_write),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
						check_dr_write),
4085
	N, N, N, N,
4086 4087
	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4088
	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4089
	N, N, N, N,
4090
	/* 0x30 - 0x3F */
4091
	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4092
	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4093
	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4094
	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4095 4096
	I(ImplicitOps | EmulateOnUD, em_sysenter),
	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4097
	N, N,
4098 4099
	N, N, N, N, N, N, N, N,
	/* 0x40 - 0x4F */
4100
	X16(D(DstReg | SrcMem | ModRM)),
4101 4102 4103
	/* 0x50 - 0x5F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x60 - 0x6F */
4104 4105 4106 4107
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4108
	/* 0x70 - 0x7F */
4109 4110 4111 4112
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4113
	/* 0x80 - 0x8F */
4114
	X16(D(SrcImm | NearBranch)),
4115
	/* 0x90 - 0x9F */
4116
	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4117
	/* 0xA0 - 0xA7 */
4118
	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4119 4120
	II(ImplicitOps, em_cpuid, cpuid),
	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4121 4122
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4123
	/* 0xA8 - 0xAF */
4124
	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4125
	DI(ImplicitOps, rsm),
4126
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4127 4128
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4129
	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4130
	/* 0xB0 - 0xB7 */
4131
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4132
	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4133
	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4134 4135
	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4136
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4137 4138
	/* 0xB8 - 0xBF */
	N, N,
4139
	G(BitOp, group8),
4140 4141
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
	F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4142
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
A
Avi Kivity 已提交
4143
	/* 0xC0 - 0xC7 */
4144
	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4145
	N, ID(0, &instr_dual_0f_c3),
4146
	N, N, N, GD(0, &group9),
A
Avi Kivity 已提交
4147 4148
	/* 0xC8 - 0xCF */
	X8(I(DstReg, em_bswap)),
4149 4150 4151
	/* 0xD0 - 0xDF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xEF */
4152 4153
	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
	N, N, N, N, N, N, N, N,
4154 4155 4156 4157
	/* 0xF0 - 0xFF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};

4158 4159 4160 4161 4162 4163 4164 4165
static const struct instr_dual instr_dual_0f_38_f0 = {
	I(DstReg | SrcMem | Mov, em_movbe), N
};

static const struct instr_dual instr_dual_0f_38_f1 = {
	I(DstMem | SrcReg | Mov, em_movbe), N
};

4166
static const struct gprefix three_byte_0f_38_f0 = {
4167
	ID(0, &instr_dual_0f_38_f0), N, N, N
4168 4169 4170
};

static const struct gprefix three_byte_0f_38_f1 = {
4171
	ID(0, &instr_dual_0f_38_f1), N, N, N
4172 4173 4174 4175 4176 4177 4178 4179 4180
};

/*
 * Insns below are selected by the prefix which indexed by the third opcode
 * byte.
 */
static const struct opcode opcode_map_0f_38[256] = {
	/* 0x00 - 0x7f */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
B
Borislav Petkov 已提交
4181 4182 4183
	/* 0x80 - 0xef */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
	/* 0xf0 - 0xf1 */
4184 4185
	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
B
Borislav Petkov 已提交
4186 4187
	/* 0xf2 - 0xff */
	N, N, X4(N), X8(N)
4188 4189
};

4190 4191 4192 4193 4194
#undef D
#undef N
#undef G
#undef GD
#undef I
4195
#undef GP
4196
#undef EXT
4197

4198
#undef D2bv
4199
#undef D2bvIP
4200
#undef I2bv
4201
#undef I2bvIP
4202
#undef I6ALU
4203

4204
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4205 4206 4207
{
	unsigned size;

4208
	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220
	if (size == 8)
		size = 4;
	return size;
}

static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
		      unsigned size, bool sign_extension)
{
	int rc = X86EMUL_CONTINUE;

	op->type = OP_IMM;
	op->bytes = size;
4221
	op->addr.mem.ea = ctxt->_eip;
4222 4223 4224
	/* NB. Immediates are sign-extended as necessary. */
	switch (op->bytes) {
	case 1:
4225
		op->val = insn_fetch(s8, ctxt);
4226 4227
		break;
	case 2:
4228
		op->val = insn_fetch(s16, ctxt);
4229 4230
		break;
	case 4:
4231
		op->val = insn_fetch(s32, ctxt);
4232
		break;
4233 4234 4235
	case 8:
		op->val = insn_fetch(s64, ctxt);
		break;
4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253
	}
	if (!sign_extension) {
		switch (op->bytes) {
		case 1:
			op->val &= 0xff;
			break;
		case 2:
			op->val &= 0xffff;
			break;
		case 4:
			op->val &= 0xffffffff;
			break;
		}
	}
done:
	return rc;
}

4254 4255 4256 4257 4258 4259 4260
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
			  unsigned d)
{
	int rc = X86EMUL_CONTINUE;

	switch (d) {
	case OpReg:
4261
		decode_register_operand(ctxt, op);
4262 4263
		break;
	case OpImmUByte:
4264
		rc = decode_imm(ctxt, op, 1, false);
4265 4266
		break;
	case OpMem:
4267
		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4268 4269 4270
	mem_common:
		*op = ctxt->memop;
		ctxt->memopp = op;
4271
		if (ctxt->d & BitOp)
4272 4273 4274
			fetch_bit_operand(ctxt);
		op->orig_val = op->val;
		break;
4275
	case OpMem64:
4276
		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4277
		goto mem_common;
4278 4279 4280
	case OpAcc:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4281
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4282 4283 4284
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302
	case OpAccLo:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
	case OpAccHi:
		if (ctxt->d & ByteOp) {
			op->type = OP_NONE;
			break;
		}
		op->type = OP_REG;
		op->bytes = ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
4303 4304 4305 4306
	case OpDI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4307
			register_address(ctxt, VCPU_REGS_RDI);
4308 4309
		op->addr.mem.seg = VCPU_SREG_ES;
		op->val = 0;
4310
		op->count = 1;
4311 4312 4313 4314
		break;
	case OpDX:
		op->type = OP_REG;
		op->bytes = 2;
4315
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4316 4317
		fetch_register_operand(op);
		break;
4318
	case OpCL:
4319
		op->type = OP_IMM;
4320
		op->bytes = 1;
4321
		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4322 4323 4324 4325 4326
		break;
	case OpImmByte:
		rc = decode_imm(ctxt, op, 1, true);
		break;
	case OpOne:
4327
		op->type = OP_IMM;
4328 4329 4330 4331 4332 4333
		op->bytes = 1;
		op->val = 1;
		break;
	case OpImm:
		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
		break;
4334 4335 4336
	case OpImm64:
		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
		break;
4337 4338
	case OpMem8:
		ctxt->memop.bytes = 1;
4339
		if (ctxt->memop.type == OP_REG) {
4340 4341
			ctxt->memop.addr.reg = decode_register(ctxt,
					ctxt->modrm_rm, true);
4342 4343
			fetch_register_operand(&ctxt->memop);
		}
4344
		goto mem_common;
4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360
	case OpMem16:
		ctxt->memop.bytes = 2;
		goto mem_common;
	case OpMem32:
		ctxt->memop.bytes = 4;
		goto mem_common;
	case OpImmU16:
		rc = decode_imm(ctxt, op, 2, false);
		break;
	case OpImmU:
		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
		break;
	case OpSI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4361
			register_address(ctxt, VCPU_REGS_RSI);
B
Bandan Das 已提交
4362
		op->addr.mem.seg = ctxt->seg_override;
4363
		op->val = 0;
4364
		op->count = 1;
4365
		break;
P
Paolo Bonzini 已提交
4366 4367 4368 4369
	case OpXLat:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4370
			address_mask(ctxt,
P
Paolo Bonzini 已提交
4371 4372
				reg_read(ctxt, VCPU_REGS_RBX) +
				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
B
Bandan Das 已提交
4373
		op->addr.mem.seg = ctxt->seg_override;
P
Paolo Bonzini 已提交
4374 4375
		op->val = 0;
		break;
4376 4377 4378 4379 4380 4381 4382 4383 4384
	case OpImmFAddr:
		op->type = OP_IMM;
		op->addr.mem.ea = ctxt->_eip;
		op->bytes = ctxt->op_bytes + 2;
		insn_fetch_arr(op->valptr, op->bytes, ctxt);
		break;
	case OpMemFAddr:
		ctxt->memop.bytes = ctxt->op_bytes + 2;
		goto mem_common;
4385
	case OpES:
4386
		op->type = OP_IMM;
4387 4388 4389
		op->val = VCPU_SREG_ES;
		break;
	case OpCS:
4390
		op->type = OP_IMM;
4391 4392 4393
		op->val = VCPU_SREG_CS;
		break;
	case OpSS:
4394
		op->type = OP_IMM;
4395 4396 4397
		op->val = VCPU_SREG_SS;
		break;
	case OpDS:
4398
		op->type = OP_IMM;
4399 4400 4401
		op->val = VCPU_SREG_DS;
		break;
	case OpFS:
4402
		op->type = OP_IMM;
4403 4404 4405
		op->val = VCPU_SREG_FS;
		break;
	case OpGS:
4406
		op->type = OP_IMM;
4407 4408
		op->val = VCPU_SREG_GS;
		break;
4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419
	case OpImplicit:
		/* Special instructions do their own operand decoding. */
	default:
		op->type = OP_NONE; /* Disable writeback. */
		break;
	}

done:
	return rc;
}

4420
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4421 4422 4423
{
	int rc = X86EMUL_CONTINUE;
	int mode = ctxt->mode;
4424
	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4425
	bool op_prefix = false;
B
Bandan Das 已提交
4426
	bool has_seg_override = false;
4427
	struct opcode opcode;
4428

4429 4430
	ctxt->memop.type = OP_NONE;
	ctxt->memopp = NULL;
4431
	ctxt->_eip = ctxt->eip;
4432 4433
	ctxt->fetch.ptr = ctxt->fetch.data;
	ctxt->fetch.end = ctxt->fetch.data + insn_len;
B
Borislav Petkov 已提交
4434
	ctxt->opcode_len = 1;
4435
	if (insn_len > 0)
4436
		memcpy(ctxt->fetch.data, insn, insn_len);
4437
	else {
4438
		rc = __do_insn_fetch_bytes(ctxt, 1);
4439 4440 4441
		if (rc != X86EMUL_CONTINUE)
			return rc;
	}
4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458

	switch (mode) {
	case X86EMUL_MODE_REAL:
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
		def_op_bytes = def_ad_bytes = 2;
		break;
	case X86EMUL_MODE_PROT32:
		def_op_bytes = def_ad_bytes = 4;
		break;
#ifdef CONFIG_X86_64
	case X86EMUL_MODE_PROT64:
		def_op_bytes = 4;
		def_ad_bytes = 8;
		break;
#endif
	default:
4459
		return EMULATION_FAILED;
4460 4461
	}

4462 4463
	ctxt->op_bytes = def_op_bytes;
	ctxt->ad_bytes = def_ad_bytes;
4464 4465 4466

	/* Legacy prefixes. */
	for (;;) {
4467
		switch (ctxt->b = insn_fetch(u8, ctxt)) {
4468
		case 0x66:	/* operand-size override */
4469
			op_prefix = true;
4470
			/* switch between 2/4 bytes */
4471
			ctxt->op_bytes = def_op_bytes ^ 6;
4472 4473 4474 4475
			break;
		case 0x67:	/* address-size override */
			if (mode == X86EMUL_MODE_PROT64)
				/* switch between 4/8 bytes */
4476
				ctxt->ad_bytes = def_ad_bytes ^ 12;
4477 4478
			else
				/* switch between 2/4 bytes */
4479
				ctxt->ad_bytes = def_ad_bytes ^ 6;
4480 4481 4482 4483 4484
			break;
		case 0x26:	/* ES override */
		case 0x2e:	/* CS override */
		case 0x36:	/* SS override */
		case 0x3e:	/* DS override */
B
Bandan Das 已提交
4485 4486
			has_seg_override = true;
			ctxt->seg_override = (ctxt->b >> 3) & 3;
4487 4488 4489
			break;
		case 0x64:	/* FS override */
		case 0x65:	/* GS override */
B
Bandan Das 已提交
4490 4491
			has_seg_override = true;
			ctxt->seg_override = ctxt->b & 7;
4492 4493 4494 4495
			break;
		case 0x40 ... 0x4f: /* REX */
			if (mode != X86EMUL_MODE_PROT64)
				goto done_prefixes;
4496
			ctxt->rex_prefix = ctxt->b;
4497 4498
			continue;
		case 0xf0:	/* LOCK */
4499
			ctxt->lock_prefix = 1;
4500 4501 4502
			break;
		case 0xf2:	/* REPNE/REPNZ */
		case 0xf3:	/* REP/REPE/REPZ */
4503
			ctxt->rep_prefix = ctxt->b;
4504 4505 4506 4507 4508 4509 4510
			break;
		default:
			goto done_prefixes;
		}

		/* Any legacy prefix after a REX prefix nullifies its effect. */

4511
		ctxt->rex_prefix = 0;
4512 4513 4514 4515 4516
	}

done_prefixes:

	/* REX prefix. */
4517 4518
	if (ctxt->rex_prefix & 8)
		ctxt->op_bytes = 8;	/* REX.W */
4519 4520

	/* Opcode byte(s). */
4521
	opcode = opcode_table[ctxt->b];
4522
	/* Two-byte opcode? */
4523
	if (ctxt->b == 0x0f) {
B
Borislav Petkov 已提交
4524
		ctxt->opcode_len = 2;
4525
		ctxt->b = insn_fetch(u8, ctxt);
4526
		opcode = twobyte_table[ctxt->b];
4527 4528 4529 4530 4531 4532 4533

		/* 0F_38 opcode map */
		if (ctxt->b == 0x38) {
			ctxt->opcode_len = 3;
			ctxt->b = insn_fetch(u8, ctxt);
			opcode = opcode_map_0f_38[ctxt->b];
		}
4534
	}
4535
	ctxt->d = opcode.flags;
4536

4537 4538 4539
	if (ctxt->d & ModRM)
		ctxt->modrm = insn_fetch(u8, ctxt);

4540 4541
	/* vex-prefix instructions are not implemented */
	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4542
	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4543 4544 4545
		ctxt->d = NotImpl;
	}

4546 4547
	while (ctxt->d & GroupMask) {
		switch (ctxt->d & GroupMask) {
4548
		case Group:
4549
			goffset = (ctxt->modrm >> 3) & 7;
4550 4551 4552
			opcode = opcode.u.group[goffset];
			break;
		case GroupDual:
4553 4554
			goffset = (ctxt->modrm >> 3) & 7;
			if ((ctxt->modrm >> 6) == 3)
4555 4556 4557 4558 4559
				opcode = opcode.u.gdual->mod3[goffset];
			else
				opcode = opcode.u.gdual->mod012[goffset];
			break;
		case RMExt:
4560
			goffset = ctxt->modrm & 7;
4561
			opcode = opcode.u.group[goffset];
4562 4563
			break;
		case Prefix:
4564
			if (ctxt->rep_prefix && op_prefix)
4565
				return EMULATION_FAILED;
4566
			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4567 4568 4569 4570 4571 4572 4573
			switch (simd_prefix) {
			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
			}
			break;
4574 4575 4576 4577 4578 4579
		case Escape:
			if (ctxt->modrm > 0xbf)
				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
			else
				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
			break;
4580 4581 4582 4583 4584 4585
		case InstrDual:
			if ((ctxt->modrm >> 6) == 3)
				opcode = opcode.u.idual->mod3;
			else
				opcode = opcode.u.idual->mod012;
			break;
4586
		default:
4587
			return EMULATION_FAILED;
4588
		}
4589

4590
		ctxt->d &= ~(u64)GroupMask;
4591
		ctxt->d |= opcode.flags;
4592 4593
	}

4594 4595 4596 4597
	/* Unrecognised? */
	if (ctxt->d == 0)
		return EMULATION_FAILED;

4598
	ctxt->execute = opcode.u.execute;
4599

4600 4601 4602
	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
		return EMULATION_FAILED;

4603
	if (unlikely(ctxt->d &
4604 4605
	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
	     No16))) {
4606 4607 4608 4609 4610 4611
		/*
		 * These are copied unconditionally here, and checked unconditionally
		 * in x86_emulate_insn.
		 */
		ctxt->check_perm = opcode.check_perm;
		ctxt->intercept = opcode.intercept;
4612

4613 4614
		if (ctxt->d & NotImpl)
			return EMULATION_FAILED;
4615

4616 4617 4618 4619 4620 4621
		if (mode == X86EMUL_MODE_PROT64) {
			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
				ctxt->op_bytes = 8;
			else if (ctxt->d & NearBranch)
				ctxt->op_bytes = 8;
		}
4622

4623 4624 4625 4626 4627 4628 4629
		if (ctxt->d & Op3264) {
			if (mode == X86EMUL_MODE_PROT64)
				ctxt->op_bytes = 8;
			else
				ctxt->op_bytes = 4;
		}

4630 4631 4632
		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
			ctxt->op_bytes = 4;

4633 4634 4635 4636 4637
		if (ctxt->d & Sse)
			ctxt->op_bytes = 16;
		else if (ctxt->d & Mmx)
			ctxt->op_bytes = 8;
	}
A
Avi Kivity 已提交
4638

4639
	/* ModRM and SIB bytes. */
4640
	if (ctxt->d & ModRM) {
4641
		rc = decode_modrm(ctxt, &ctxt->memop);
B
Bandan Das 已提交
4642 4643 4644 4645
		if (!has_seg_override) {
			has_seg_override = true;
			ctxt->seg_override = ctxt->modrm_seg;
		}
4646
	} else if (ctxt->d & MemAbs)
4647
		rc = decode_abs(ctxt, &ctxt->memop);
4648 4649 4650
	if (rc != X86EMUL_CONTINUE)
		goto done;

B
Bandan Das 已提交
4651 4652
	if (!has_seg_override)
		ctxt->seg_override = VCPU_SREG_DS;
4653

B
Bandan Das 已提交
4654
	ctxt->memop.addr.mem.seg = ctxt->seg_override;
4655 4656 4657 4658 4659

	/*
	 * Decode and fetch the source operand: register, memory
	 * or immediate.
	 */
4660
	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4661 4662 4663
	if (rc != X86EMUL_CONTINUE)
		goto done;

4664 4665 4666 4667
	/*
	 * Decode and fetch the second source operand: register, memory
	 * or immediate.
	 */
4668
	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4669 4670 4671
	if (rc != X86EMUL_CONTINUE)
		goto done;

4672
	/* Decode and fetch the destination operand: register or memory. */
4673
	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4674

4675
	if (ctxt->rip_relative)
4676 4677
		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
					ctxt->memopp->addr.mem.ea + ctxt->_eip);
4678

4679
done:
4680
	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4681 4682
}

4683 4684 4685 4686 4687
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
{
	return ctxt->d & PageTable;
}

4688 4689 4690 4691 4692 4693 4694 4695 4696
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
	/* The second termination condition only applies for REPE
	 * and REPNE. Test if the repeat string operation prefix is
	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
	 * corresponding termination condition according to:
	 * 	- if REPE/REPZ and ZF = 0 then done
	 * 	- if REPNE/REPNZ and ZF = 1 then done
	 */
4697 4698 4699
	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
4700
		 ((ctxt->eflags & EFLG_ZF) == 0))
4701
		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
4702 4703 4704 4705 4706 4707
		    ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
		return true;

	return false;
}

A
Avi Kivity 已提交
4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
	bool fault = false;

	ctxt->ops->get_fpu(ctxt);
	asm volatile("1: fwait \n\t"
		     "2: \n\t"
		     ".pushsection .fixup,\"ax\" \n\t"
		     "3: \n\t"
		     "movb $1, %[fault] \n\t"
		     "jmp 2b \n\t"
		     ".popsection \n\t"
		     _ASM_EXTABLE(1b, 3b)
4721
		     : [fault]"+qm"(fault));
A
Avi Kivity 已提交
4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736
	ctxt->ops->put_fpu(ctxt);

	if (unlikely(fault))
		return emulate_exception(ctxt, MF_VECTOR, 0, false);

	return X86EMUL_CONTINUE;
}

static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
				       struct operand *op)
{
	if (op->type == OP_MM)
		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
}

4737 4738 4739
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{
	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4740 4741
	if (!(ctxt->d & ByteOp))
		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4742
	asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4743 4744 4745
	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
	      [fastop]"+S"(fop)
	    : "c"(ctxt->src2.val));
4746
	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4747 4748
	if (!fop) /* exception is returned in fop variable */
		return emulate_de(ctxt);
4749 4750
	return X86EMUL_CONTINUE;
}
4751

4752 4753
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
B
Bandan Das 已提交
4754 4755
	memset(&ctxt->rip_relative, 0,
	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4756 4757 4758 4759 4760 4761

	ctxt->io_read.pos = 0;
	ctxt->io_read.end = 0;
	ctxt->mem_read.end = 0;
}

4762
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4763
{
4764
	const struct x86_emulate_ops *ops = ctxt->ops;
4765
	int rc = X86EMUL_CONTINUE;
4766
	int saved_dst_type = ctxt->dst.type;
4767

4768
	ctxt->mem_read.pos = 0;
4769

4770 4771
	/* LOCK prefix is allowed only with some instructions */
	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4772
		rc = emulate_ud(ctxt);
4773 4774 4775
		goto done;
	}

4776
	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4777
		rc = emulate_ud(ctxt);
4778 4779 4780
		goto done;
	}

4781 4782 4783 4784 4785 4786 4787
	if (unlikely(ctxt->d &
		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
				(ctxt->d & Undefined)) {
			rc = emulate_ud(ctxt);
			goto done;
		}
A
Avi Kivity 已提交
4788

4789 4790 4791
		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
			rc = emulate_ud(ctxt);
A
Avi Kivity 已提交
4792
			goto done;
4793
		}
A
Avi Kivity 已提交
4794

4795 4796
		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
			rc = emulate_nm(ctxt);
4797
			goto done;
4798
		}
4799

4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812
		if (ctxt->d & Mmx) {
			rc = flush_pending_x87_faults(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			/*
			 * Now that we know the fpu is exception safe, we can fetch
			 * operands from it.
			 */
			fetch_possible_mmx_operand(ctxt, &ctxt->src);
			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
			if (!(ctxt->d & Mov))
				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
		}
4813

4814
		if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4815 4816 4817 4818 4819
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_PRE_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}
4820

4821 4822 4823 4824 4825 4826
		/* Instruction can only be executed in protected mode */
		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
			rc = emulate_ud(ctxt);
			goto done;
		}

4827 4828
		/* Privileged instruction can be executed only in CPL=0 */
		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4829 4830 4831 4832
			if (ctxt->d & PrivUD)
				rc = emulate_ud(ctxt);
			else
				rc = emulate_gp(ctxt, 0);
4833
			goto done;
4834
		}
4835

4836
		/* Do instruction specific permission checks */
4837
		if (ctxt->d & CheckPerm) {
4838 4839 4840 4841 4842
			rc = ctxt->check_perm(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

4843
		if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4844 4845 4846 4847 4848 4849 4850 4851 4852 4853
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_POST_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

		if (ctxt->rep_prefix && (ctxt->d & String)) {
			/* All REP prefixes have the same first termination condition */
			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
				ctxt->eip = ctxt->_eip;
4854
				ctxt->eflags &= ~EFLG_RF;
4855 4856
				goto done;
			}
4857 4858 4859
		}
	}

4860 4861 4862
	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
		rc = segmented_read(ctxt, ctxt->src.addr.mem,
				    ctxt->src.valptr, ctxt->src.bytes);
4863
		if (rc != X86EMUL_CONTINUE)
4864
			goto done;
4865
		ctxt->src.orig_val64 = ctxt->src.val64;
4866 4867
	}

4868 4869 4870
	if (ctxt->src2.type == OP_MEM) {
		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
				    &ctxt->src2.val, ctxt->src2.bytes);
4871 4872 4873 4874
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

4875
	if ((ctxt->d & DstMask) == ImplicitOps)
4876 4877 4878
		goto special_insn;


4879
	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4880
		/* optimisation - avoid slow emulated read if Mov */
4881 4882
		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
				   &ctxt->dst.val, ctxt->dst.bytes);
4883 4884
		if (rc != X86EMUL_CONTINUE)
			goto done;
4885
	}
4886
	ctxt->dst.orig_val = ctxt->dst.val;
4887

4888 4889
special_insn:

4890
	if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4891
		rc = emulator_check_intercept(ctxt, ctxt->intercept,
4892
					      X86_ICPT_POST_MEMACCESS);
4893 4894 4895 4896
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

4897 4898 4899 4900
	if (ctxt->rep_prefix && (ctxt->d & String))
		ctxt->eflags |= EFLG_RF;
	else
		ctxt->eflags &= ~EFLG_RF;
4901

4902
	if (ctxt->execute) {
4903 4904 4905 4906 4907 4908 4909
		if (ctxt->d & Fastop) {
			void (*fop)(struct fastop *) = (void *)ctxt->execute;
			rc = fastop(ctxt, fop);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			goto writeback;
		}
4910
		rc = ctxt->execute(ctxt);
4911 4912 4913 4914 4915
		if (rc != X86EMUL_CONTINUE)
			goto done;
		goto writeback;
	}

B
Borislav Petkov 已提交
4916
	if (ctxt->opcode_len == 2)
A
Avi Kivity 已提交
4917
		goto twobyte_insn;
4918 4919
	else if (ctxt->opcode_len == 3)
		goto threebyte_insn;
A
Avi Kivity 已提交
4920

4921
	switch (ctxt->b) {
A
Avi Kivity 已提交
4922
	case 0x63:		/* movsxd */
4923
		if (ctxt->mode != X86EMUL_MODE_PROT64)
A
Avi Kivity 已提交
4924
			goto cannot_emulate;
4925
		ctxt->dst.val = (s32) ctxt->src.val;
A
Avi Kivity 已提交
4926
		break;
4927
	case 0x70 ... 0x7f: /* jcc (short) */
4928
		if (test_cc(ctxt->b, ctxt->eflags))
4929
			rc = jmp_rel(ctxt, ctxt->src.val);
4930
		break;
N
Nitin A Kamble 已提交
4931
	case 0x8d: /* lea r16/r32, m */
4932
		ctxt->dst.val = ctxt->src.addr.mem.ea;
N
Nitin A Kamble 已提交
4933
		break;
4934
	case 0x90 ... 0x97: /* nop / xchg reg, rax */
4935
		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4936 4937 4938
			ctxt->dst.type = OP_NONE;
		else
			rc = em_xchg(ctxt);
4939
		break;
4940
	case 0x98: /* cbw/cwde/cdqe */
4941 4942 4943 4944
		switch (ctxt->op_bytes) {
		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4945 4946
		}
		break;
4947
	case 0xcc:		/* int3 */
4948 4949
		rc = emulate_int(ctxt, 3);
		break;
4950
	case 0xcd:		/* int n */
4951
		rc = emulate_int(ctxt, ctxt->src.val);
4952 4953
		break;
	case 0xce:		/* into */
4954 4955
		if (ctxt->eflags & EFLG_OF)
			rc = emulate_int(ctxt, 4);
4956
		break;
4957
	case 0xe9: /* jmp rel */
4958
	case 0xeb: /* jmp rel short */
4959
		rc = jmp_rel(ctxt, ctxt->src.val);
4960
		ctxt->dst.type = OP_NONE; /* Disable writeback. */
4961
		break;
4962
	case 0xf4:              /* hlt */
4963
		ctxt->ops->halt(ctxt);
4964
		break;
4965 4966 4967 4968 4969 4970 4971
	case 0xf5:	/* cmc */
		/* complement carry flag from eflags reg */
		ctxt->eflags ^= EFLG_CF;
		break;
	case 0xf8: /* clc */
		ctxt->eflags &= ~EFLG_CF;
		break;
4972 4973 4974
	case 0xf9: /* stc */
		ctxt->eflags |= EFLG_CF;
		break;
4975 4976 4977 4978 4979 4980
	case 0xfc: /* cld */
		ctxt->eflags &= ~EFLG_DF;
		break;
	case 0xfd: /* std */
		ctxt->eflags |= EFLG_DF;
		break;
4981 4982
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
4983
	}
4984

4985 4986 4987
	if (rc != X86EMUL_CONTINUE)
		goto done;

4988
writeback:
4989 4990 4991 4992 4993 4994
	if (ctxt->d & SrcWrite) {
		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
		rc = writeback(ctxt, &ctxt->src);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
4995 4996 4997 4998 4999
	if (!(ctxt->d & NoWrite)) {
		rc = writeback(ctxt, &ctxt->dst);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
5000

5001 5002 5003 5004
	/*
	 * restore dst type in case the decoding will be reused
	 * (happens for string instruction )
	 */
5005
	ctxt->dst.type = saved_dst_type;
5006

5007
	if ((ctxt->d & SrcMask) == SrcSI)
5008
		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5009

5010
	if ((ctxt->d & DstMask) == DstDI)
5011
		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5012

5013
	if (ctxt->rep_prefix && (ctxt->d & String)) {
5014
		unsigned int count;
5015
		struct read_cache *r = &ctxt->io_read;
5016 5017 5018 5019
		if ((ctxt->d & SrcMask) == SrcSI)
			count = ctxt->src.count;
		else
			count = ctxt->dst.count;
5020
		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5021

5022 5023 5024 5025 5026
		if (!string_insn_completed(ctxt)) {
			/*
			 * Re-enter guest when pio read ahead buffer is empty
			 * or, if it is not used, after each 1024 iteration.
			 */
5027
			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5028 5029 5030 5031 5032 5033
			    (r->end == 0 || r->end != r->pos)) {
				/*
				 * Reset read cache. Usually happens before
				 * decode, but since instruction is restarted
				 * we have to do it here.
				 */
5034
				ctxt->mem_read.end = 0;
5035
				writeback_registers(ctxt);
5036 5037 5038
				return EMULATION_RESTART;
			}
			goto done; /* skip rip writeback */
5039
		}
5040
		ctxt->eflags &= ~EFLG_RF;
5041
	}
5042

5043
	ctxt->eip = ctxt->_eip;
5044 5045

done:
5046 5047
	if (rc == X86EMUL_PROPAGATE_FAULT) {
		WARN_ON(ctxt->exception.vector > 0x1f);
5048
		ctxt->have_exception = true;
5049
	}
5050 5051 5052
	if (rc == X86EMUL_INTERCEPTED)
		return EMULATION_INTERCEPTED;

5053 5054 5055
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);

5056
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
A
Avi Kivity 已提交
5057 5058

twobyte_insn:
5059
	switch (ctxt->b) {
5060
	case 0x09:		/* wbinvd */
5061
		(ctxt->ops->wbinvd)(ctxt);
5062 5063
		break;
	case 0x08:		/* invd */
5064 5065
	case 0x0d:		/* GrpP (prefetch) */
	case 0x18:		/* Grp16 (prefetch/nop) */
P
Paolo Bonzini 已提交
5066
	case 0x1f:		/* nop */
5067 5068
		break;
	case 0x20: /* mov cr, reg */
5069
		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5070
		break;
A
Avi Kivity 已提交
5071
	case 0x21: /* mov from dr to reg */
5072
		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
A
Avi Kivity 已提交
5073 5074
		break;
	case 0x40 ... 0x4f:	/* cmov */
5075 5076 5077 5078
		if (test_cc(ctxt->b, ctxt->eflags))
			ctxt->dst.val = ctxt->src.val;
		else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
			 ctxt->op_bytes != 4)
5079
			ctxt->dst.type = OP_NONE; /* no writeback */
A
Avi Kivity 已提交
5080
		break;
5081
	case 0x80 ... 0x8f: /* jnz rel, etc*/
5082
		if (test_cc(ctxt->b, ctxt->eflags))
5083
			rc = jmp_rel(ctxt, ctxt->src.val);
5084
		break;
5085
	case 0x90 ... 0x9f:     /* setcc r/m8 */
5086
		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5087
		break;
A
Avi Kivity 已提交
5088
	case 0xb6 ... 0xb7:	/* movzx */
5089
		ctxt->dst.bytes = ctxt->op_bytes;
5090
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5091
						       : (u16) ctxt->src.val;
A
Avi Kivity 已提交
5092 5093
		break;
	case 0xbe ... 0xbf:	/* movsx */
5094
		ctxt->dst.bytes = ctxt->op_bytes;
5095
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5096
							(s16) ctxt->src.val;
A
Avi Kivity 已提交
5097
		break;
5098 5099
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
5100
	}
5101

5102 5103
threebyte_insn:

5104 5105 5106
	if (rc != X86EMUL_CONTINUE)
		goto done;

A
Avi Kivity 已提交
5107 5108 5109
	goto writeback;

cannot_emulate:
5110
	return EMULATION_FAILED;
A
Avi Kivity 已提交
5111
}
5112 5113 5114 5115 5116 5117 5118 5119 5120 5121

void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
{
	invalidate_registers(ctxt);
}

void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
{
	writeback_registers(ctxt);
}