emulate.c 130.1 KB
Newer Older
A
Avi Kivity 已提交
1
/******************************************************************************
2
 * emulate.c
A
Avi Kivity 已提交
3 4 5 6 7 8
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9
 * privileged instructions:
A
Avi Kivity 已提交
10 11
 *
 * Copyright (C) 2006 Qumranet
N
Nicolas Kaiser 已提交
12
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
13 14 15 16 17 18 19 20 21 22
 *
 *   Avi Kivity <avi@qumranet.com>
 *   Yaniv Kamay <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

23
#include <linux/kvm_host.h>
24
#include "kvm_cache_regs.h"
A
Avi Kivity 已提交
25
#include <linux/module.h>
26
#include <asm/kvm_emulate.h>
27
#include <linux/stringify.h>
A
Avi Kivity 已提交
28

29
#include "x86.h"
30
#include "tss.h"
31

32 33 34
/*
 * Operand types
 */
35 36 37 38 39 40 41 42 43
#define OpNone             0ull
#define OpImplicit         1ull  /* No generic decode */
#define OpReg              2ull  /* Register */
#define OpMem              3ull  /* Memory */
#define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
#define OpDI               5ull  /* ES:DI/EDI/RDI */
#define OpMem64            6ull  /* Memory, 64-bit */
#define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
#define OpDX               8ull  /* DX register */
44 45 46
#define OpCL               9ull  /* CL register (for shifts) */
#define OpImmByte         10ull  /* 8-bit sign extended immediate */
#define OpOne             11ull  /* Implied 1 */
47
#define OpImm             12ull  /* Sign extended up to 32-bit immediate */
48 49 50 51 52 53 54
#define OpMem16           13ull  /* Memory operand (16-bit). */
#define OpMem32           14ull  /* Memory operand (32-bit). */
#define OpImmU            15ull  /* Immediate operand, zero extended */
#define OpSI              16ull  /* SI/ESI/RSI */
#define OpImmFAddr        17ull  /* Immediate far address */
#define OpMemFAddr        18ull  /* Far address in memory */
#define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
55 56 57 58 59 60
#define OpES              20ull  /* ES */
#define OpCS              21ull  /* CS */
#define OpSS              22ull  /* SS */
#define OpDS              23ull  /* DS */
#define OpFS              24ull  /* FS */
#define OpGS              25ull  /* GS */
61
#define OpMem8            26ull  /* 8-bit zero extended memory operand */
62
#define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
P
Paolo Bonzini 已提交
63
#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
64 65
#define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
#define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
66 67

#define OpBits             5  /* Width of operand field */
68
#define OpMask             ((1ull << OpBits) - 1)
69

A
Avi Kivity 已提交
70 71 72 73 74 75 76 77 78 79
/*
 * Opcode effective-address decode tables.
 * Note that we only emulate instructions that have at least one memory
 * operand (excluding implicit stack references). We assume that stack
 * references and instruction fetches will never occur in special memory
 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
 * not be handled.
 */

/* Operand sizes: 8-bit operands or specified/overridden size. */
80
#define ByteOp      (1<<0)	/* 8-bit operands. */
A
Avi Kivity 已提交
81
/* Destination operand type. */
82 83 84 85 86 87 88 89 90
#define DstShift    1
#define ImplicitOps (OpImplicit << DstShift)
#define DstReg      (OpReg << DstShift)
#define DstMem      (OpMem << DstShift)
#define DstAcc      (OpAcc << DstShift)
#define DstDI       (OpDI << DstShift)
#define DstMem64    (OpMem64 << DstShift)
#define DstImmUByte (OpImmUByte << DstShift)
#define DstDX       (OpDX << DstShift)
91
#define DstAccLo    (OpAccLo << DstShift)
92
#define DstMask     (OpMask << DstShift)
A
Avi Kivity 已提交
93
/* Source operand type. */
94 95 96 97 98 99 100 101 102 103 104 105
#define SrcShift    6
#define SrcNone     (OpNone << SrcShift)
#define SrcReg      (OpReg << SrcShift)
#define SrcMem      (OpMem << SrcShift)
#define SrcMem16    (OpMem16 << SrcShift)
#define SrcMem32    (OpMem32 << SrcShift)
#define SrcImm      (OpImm << SrcShift)
#define SrcImmByte  (OpImmByte << SrcShift)
#define SrcOne      (OpOne << SrcShift)
#define SrcImmUByte (OpImmUByte << SrcShift)
#define SrcImmU     (OpImmU << SrcShift)
#define SrcSI       (OpSI << SrcShift)
P
Paolo Bonzini 已提交
106
#define SrcXLat     (OpXLat << SrcShift)
107 108 109 110
#define SrcImmFAddr (OpImmFAddr << SrcShift)
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc      (OpAcc << SrcShift)
#define SrcImmU16   (OpImmU16 << SrcShift)
111
#define SrcImm64    (OpImm64 << SrcShift)
112
#define SrcDX       (OpDX << SrcShift)
113
#define SrcMem8     (OpMem8 << SrcShift)
114
#define SrcAccHi    (OpAccHi << SrcShift)
115
#define SrcMask     (OpMask << SrcShift)
116 117 118 119 120 121 122 123 124
#define BitOp       (1<<11)
#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
#define String      (1<<13)     /* String instruction (rep capable) */
#define Stack       (1<<14)     /* Stack instruction (push/pop) */
#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
125
#define Escape      (5<<15)     /* Escape to coprocessor instruction */
126
#define Sse         (1<<18)     /* SSE Vector instruction */
127 128 129 130
/* Generic ModRM decode. */
#define ModRM       (1<<19)
/* Destination is only written; never read. */
#define Mov         (1<<20)
131
/* Misc flags */
132
#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
133
#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134
#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135
#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136
#define Undefined   (1<<25) /* No Such Instruction */
137
#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
138
#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
139
#define No64	    (1<<28)
140
#define PageTable   (1 << 29)   /* instruction used to write page table */
141
#define NotImpl     (1 << 30)   /* instruction is not implemented */
142
/* Source 2 operand type */
143
#define Src2Shift   (31)
144
#define Src2None    (OpNone << Src2Shift)
145
#define Src2Mem     (OpMem << Src2Shift)
146 147 148 149
#define Src2CL      (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
#define Src2One     (OpOne << Src2Shift)
#define Src2Imm     (OpImm << Src2Shift)
150 151 152 153 154 155
#define Src2ES      (OpES << Src2Shift)
#define Src2CS      (OpCS << Src2Shift)
#define Src2SS      (OpSS << Src2Shift)
#define Src2DS      (OpDS << Src2Shift)
#define Src2FS      (OpFS << Src2Shift)
#define Src2GS      (OpGS << Src2Shift)
156
#define Src2Mask    (OpMask << Src2Shift)
A
Avi Kivity 已提交
157
#define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
158 159 160
#define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
#define Unaligned   ((u64)1 << 42)  /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx         ((u64)1 << 43)  /* Advanced Vector Extensions */
161
#define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
162
#define NoWrite     ((u64)1 << 45)  /* No writeback */
163
#define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
164
#define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
165 166
#define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
#define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
167
#define NoBigReal   ((u64)1 << 50)  /* No big real mode */
168
#define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
169
#define NearBranch  ((u64)1 << 52)  /* Near branches */
170
#define No16	    ((u64)1 << 53)  /* No 16 bit operand */
A
Avi Kivity 已提交
171

172
#define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
A
Avi Kivity 已提交
173

174 175 176 177 178 179 180 181
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
182

183 184 185 186 187 188
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
#define FASTOP_SIZE 8

/*
 * fastop functions have a special calling convention:
 *
189 190
 * dst:    rax        (in/out)
 * src:    rdx        (in/out)
191 192
 * src2:   rcx        (in)
 * flags:  rflags     (in/out)
193
 * ex:     rsi        (in:fastop pointer, out:zero if exception)
194 195 196 197 198 199 200 201 202 203 204
 *
 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
 * different operand sizes can be reached by calculation, rather than a jump
 * table (which would be bigger than the code).
 *
 * fastop functions are declared as taking a never-defined fastop parameter,
 * so they can't be called from C directly.
 */

struct fastop;

205
struct opcode {
206 207
	u64 flags : 56;
	u64 intercept : 8;
208
	union {
209
		int (*execute)(struct x86_emulate_ctxt *ctxt);
210 211 212
		const struct opcode *group;
		const struct group_dual *gdual;
		const struct gprefix *gprefix;
213
		const struct escape *esc;
214
		void (*fastop)(struct fastop *fake);
215
	} u;
216
	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
217 218 219 220 221
};

struct group_dual {
	struct opcode mod012[8];
	struct opcode mod3[8];
222 223
};

224 225 226 227 228 229 230
struct gprefix {
	struct opcode pfx_no;
	struct opcode pfx_66;
	struct opcode pfx_f2;
	struct opcode pfx_f3;
};

231 232 233 234 235
struct escape {
	struct opcode op[8];
	struct opcode high[64];
};

A
Avi Kivity 已提交
236
/* EFLAGS bit definitions. */
237 238 239 240
#define EFLG_ID (1<<21)
#define EFLG_VIP (1<<20)
#define EFLG_VIF (1<<19)
#define EFLG_AC (1<<18)
241 242
#define EFLG_VM (1<<17)
#define EFLG_RF (1<<16)
243 244
#define EFLG_IOPL (3<<12)
#define EFLG_NT (1<<14)
A
Avi Kivity 已提交
245 246
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
247
#define EFLG_IF (1<<9)
248
#define EFLG_TF (1<<8)
A
Avi Kivity 已提交
249 250 251 252 253 254
#define EFLG_SF (1<<7)
#define EFLG_ZF (1<<6)
#define EFLG_AF (1<<4)
#define EFLG_PF (1<<2)
#define EFLG_CF (1<<0)

255 256 257
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
#define EFLG_RESERVED_ONE_MASK 2

258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	if (!(ctxt->regs_valid & (1 << nr))) {
		ctxt->regs_valid |= 1 << nr;
		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
	}
	return ctxt->_regs[nr];
}

static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	ctxt->regs_valid |= 1 << nr;
	ctxt->regs_dirty |= 1 << nr;
	return &ctxt->_regs[nr];
}

static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	reg_read(ctxt, nr);
	return reg_write(ctxt, nr);
}

static void writeback_registers(struct x86_emulate_ctxt *ctxt)
{
	unsigned reg;

	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
}

static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
{
	ctxt->regs_dirty = 0;
	ctxt->regs_valid = 0;
}

A
Avi Kivity 已提交
294 295 296 297 298 299
/*
 * These EFLAGS bits are restored from saved value during emulation, and
 * any changes are written back to the saved value after emulation.
 */
#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)

300 301 302 303 304 305
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif

306 307
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));

308 309 310 311 312 313 314 315 316 317 318 319 320
#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
#define FOP_RET   "ret \n\t"

#define FOP_START(op) \
	extern void em_##op(struct fastop *fake); \
	asm(".pushsection .text, \"ax\" \n\t" \
	    ".global em_" #op " \n\t" \
            FOP_ALIGN \
	    "em_" #op ": \n\t"

#define FOP_END \
	    ".popsection")

321 322
#define FOPNOP() FOP_ALIGN FOP_RET

323
#define FOP1E(op,  dst) \
324 325 326 327
	FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET

#define FOP1EEX(op,  dst) \
	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
328 329 330 331 332 333 334 335 336

#define FASTOP1(op) \
	FOP_START(op) \
	FOP1E(op##b, al) \
	FOP1E(op##w, ax) \
	FOP1E(op##l, eax) \
	ON64(FOP1E(op##q, rax))	\
	FOP_END

337 338 339 340 341 342 343 344 345
/* 1-operand, using src2 (for MUL/DIV r/m) */
#define FASTOP1SRC2(op, name) \
	FOP_START(name) \
	FOP1E(op, cl) \
	FOP1E(op, cx) \
	FOP1E(op, ecx) \
	ON64(FOP1E(op, rcx)) \
	FOP_END

346 347 348 349 350 351 352 353 354
/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
#define FASTOP1SRC2EX(op, name) \
	FOP_START(name) \
	FOP1EEX(op, cl) \
	FOP1EEX(op, cx) \
	FOP1EEX(op, ecx) \
	ON64(FOP1EEX(op, rcx)) \
	FOP_END

355 356 357 358 359
#define FOP2E(op,  dst, src)	   \
	FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET

#define FASTOP2(op) \
	FOP_START(op) \
360 361 362 363
	FOP2E(op##b, al, dl) \
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
364 365
	FOP_END

366 367 368 369
/* 2 operand, word only */
#define FASTOP2W(op) \
	FOP_START(op) \
	FOPNOP() \
370 371 372
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
373 374
	FOP_END

375 376 377 378 379 380 381 382 383
/* 2 operand, src is CL */
#define FASTOP2CL(op) \
	FOP_START(op) \
	FOP2E(op##b, al, cl) \
	FOP2E(op##w, ax, cl) \
	FOP2E(op##l, eax, cl) \
	ON64(FOP2E(op##q, rax, cl)) \
	FOP_END

384 385 386 387 388 389 390 391 392
/* 2 operand, src and dest are reversed */
#define FASTOP2R(op, name) \
	FOP_START(name) \
	FOP2E(op##b, dl, al) \
	FOP2E(op##w, dx, ax) \
	FOP2E(op##l, edx, eax) \
	ON64(FOP2E(op##q, rdx, rax)) \
	FOP_END

393 394 395 396 397 398 399
#define FOP3E(op,  dst, src, src2) \
	FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET

/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \
	FOP_START(op) \
	FOPNOP() \
400 401 402
	FOP3E(op##w, ax, dx, cl) \
	FOP3E(op##l, eax, edx, cl) \
	ON64(FOP3E(op##q, rax, rdx, cl)) \
403 404
	FOP_END

405 406 407
/* Special case for SETcc - 1 instruction per cc */
#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"

408 409 410
asm(".global kvm_fastop_exception \n"
    "kvm_fastop_exception: xor %esi, %esi; ret");

411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
FOP_SETCC(setc)
FOP_SETCC(setnc)
FOP_SETCC(setz)
FOP_SETCC(setnz)
FOP_SETCC(setbe)
FOP_SETCC(setnbe)
FOP_SETCC(sets)
FOP_SETCC(setns)
FOP_SETCC(setp)
FOP_SETCC(setnp)
FOP_SETCC(setl)
FOP_SETCC(setnl)
FOP_SETCC(setle)
FOP_SETCC(setnle)
FOP_END;

P
Paolo Bonzini 已提交
430 431 432
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
FOP_END;

433 434 435 436 437 438
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
				    enum x86_intercept intercept,
				    enum x86_intercept_stage stage)
{
	struct x86_instruction_info info = {
		.intercept  = intercept,
439 440 441 442 443
		.rep_prefix = ctxt->rep_prefix,
		.modrm_mod  = ctxt->modrm_mod,
		.modrm_reg  = ctxt->modrm_reg,
		.modrm_rm   = ctxt->modrm_rm,
		.src_val    = ctxt->src.val64,
444
		.dst_val    = ctxt->dst.val64,
445 446 447
		.src_bytes  = ctxt->src.bytes,
		.dst_bytes  = ctxt->dst.bytes,
		.ad_bytes   = ctxt->ad_bytes,
448 449 450
		.next_rip   = ctxt->eip,
	};

451
	return ctxt->ops->intercept(ctxt, &info, stage);
452 453
}

A
Avi Kivity 已提交
454 455 456 457 458
static void assign_masked(ulong *dest, ulong src, ulong mask)
{
	*dest = (*dest & ~mask) | (src & mask);
}

459
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
460
{
461
	return (1UL << (ctxt->ad_bytes << 3)) - 1;
462 463
}

A
Avi Kivity 已提交
464 465 466 467 468 469 470 471 472 473 474
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
{
	u16 sel;
	struct desc_struct ss;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return ~0UL;
	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
}

A
Avi Kivity 已提交
475 476 477 478 479
static int stack_size(struct x86_emulate_ctxt *ctxt)
{
	return (__fls(stack_mask(ctxt)) + 1) >> 3;
}

A
Avi Kivity 已提交
480
/* Access/update address held in a register, based on addressing mode. */
481
static inline unsigned long
482
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
483
{
484
	if (ctxt->ad_bytes == sizeof(unsigned long))
485 486
		return reg;
	else
487
		return reg & ad_mask(ctxt);
488 489 490
}

static inline unsigned long
491
register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
492
{
493
	return address_mask(ctxt, reg);
494 495
}

496 497 498 499 500
static void masked_increment(ulong *reg, ulong mask, int inc)
{
	assign_masked(reg, *reg + inc, mask);
}

501
static inline void
502
register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
503
{
504 505
	ulong mask;

506
	if (ctxt->ad_bytes == sizeof(unsigned long))
507
		mask = ~0UL;
508
	else
509 510 511 512 513 514
		mask = ad_mask(ctxt);
	masked_increment(reg, mask, inc);
}

static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
{
515
	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
516
}
A
Avi Kivity 已提交
517

518 519 520 521 522 523 524
static u32 desc_limit_scaled(struct desc_struct *desc)
{
	u32 limit = get_desc_limit(desc);

	return desc->g ? (limit << 12) | 0xfff : limit;
}

525
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
526 527 528 529
{
	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
		return 0;

530
	return ctxt->ops->get_cached_segment_base(ctxt, seg);
531 532
}

533 534
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
			     u32 error, bool valid)
535
{
536
	WARN_ON(vec > 0x1f);
537 538 539
	ctxt->exception.vector = vec;
	ctxt->exception.error_code = error;
	ctxt->exception.error_code_valid = valid;
540
	return X86EMUL_PROPAGATE_FAULT;
541 542
}

543 544 545 546 547
static int emulate_db(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, DB_VECTOR, 0, false);
}

548
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
549
{
550
	return emulate_exception(ctxt, GP_VECTOR, err, true);
551 552
}

553 554 555 556 557
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{
	return emulate_exception(ctxt, SS_VECTOR, err, true);
}

558
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
559
{
560
	return emulate_exception(ctxt, UD_VECTOR, 0, false);
561 562
}

563
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
564
{
565
	return emulate_exception(ctxt, TS_VECTOR, err, true);
566 567
}

568 569
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
570
	return emulate_exception(ctxt, DE_VECTOR, 0, false);
571 572
}

A
Avi Kivity 已提交
573 574 575 576 577
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, NM_VECTOR, 0, false);
}

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
	u16 selector;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
	return selector;
}

static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
				 unsigned seg)
{
	u16 dummy;
	u32 base3;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}

598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
/*
 * x86 defines three classes of vector instructions: explicitly
 * aligned, explicitly unaligned, and the rest, which change behaviour
 * depending on whether they're AVX encoded or not.
 *
 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
 * subject to the same check.
 */
static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
{
	if (likely(size < 16))
		return false;

	if (ctxt->d & Aligned)
		return true;
	else if (ctxt->d & Unaligned)
		return false;
	else if (ctxt->d & Avx)
		return false;
	else
		return true;
}

621 622 623 624
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
				       struct segmented_address addr,
				       unsigned *max_size, unsigned size,
				       bool write, bool fetch,
625
				       enum x86emul_mode mode, ulong *linear)
626
{
627 628
	struct desc_struct desc;
	bool usable;
629
	ulong la;
630
	u32 lim;
631
	u16 sel;
632

633
	la = seg_base(ctxt, addr.seg) + addr.ea;
634
	*max_size = 0;
635
	switch (mode) {
636
	case X86EMUL_MODE_PROT64:
637
		if (is_noncanonical_address(la))
638
			goto bad;
639 640 641 642

		*max_size = min_t(u64, ~0u, (1ull << 48) - la);
		if (size > *max_size)
			goto bad;
643 644
		break;
	default:
645 646
		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
						addr.seg);
647 648
		if (!usable)
			goto bad;
649 650 651
		/* code segment in protected mode or read-only data segment */
		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
					|| !(desc.type & 2)) && write)
652 653
			goto bad;
		/* unreadable code segment */
654
		if (!fetch && (desc.type & 8) && !(desc.type & 2))
655 656
			goto bad;
		lim = desc_limit_scaled(&desc);
657
		if ((desc.type & 8) || !(desc.type & 4)) {
658
			/* expand-up segment */
659
			if (addr.ea > lim)
660
				goto bad;
661
			*max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
662
		} else {
G
Guo Chao 已提交
663
			/* expand-down segment */
664
			if (addr.ea <= lim)
665 666
				goto bad;
			lim = desc.d ? 0xffffffff : 0xffff;
667
			if (addr.ea > lim)
668
				goto bad;
669
			*max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
670
		}
671 672
		if (size > *max_size)
			goto bad;
673 674
		break;
	}
675
	if (ctxt->mode != X86EMUL_MODE_PROT64)
676
		la &= (u32)-1;
677 678
	if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
		return emulate_gp(ctxt, 0);
679 680
	*linear = la;
	return X86EMUL_CONTINUE;
681 682
bad:
	if (addr.seg == VCPU_SREG_SS)
683
		return emulate_ss(ctxt, 0);
684
	else
685
		return emulate_gp(ctxt, 0);
686 687
}

688 689 690 691 692
static int linearize(struct x86_emulate_ctxt *ctxt,
		     struct segmented_address addr,
		     unsigned size, bool write,
		     ulong *linear)
{
693
	unsigned max_size;
694 695
	return __linearize(ctxt, addr, &max_size, size, write, false,
			   ctxt->mode, linear);
696 697
}

698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
			     enum x86emul_mode mode)
{
	ulong linear;
	int rc;
	unsigned max_size;
	struct segmented_address addr = { .seg = VCPU_SREG_CS,
					   .ea = dst };

	if (ctxt->op_bytes != sizeof(unsigned long))
		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
	rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
	if (rc == X86EMUL_CONTINUE)
		ctxt->_eip = addr.ea;
	return rc;
}

static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
	return assign_eip(ctxt, dst, ctxt->mode);
}

static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
			  const struct desc_struct *cs_desc)
{
	enum x86emul_mode mode = ctxt->mode;

#ifdef CONFIG_X86_64
	if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
		u64 efer = 0;

		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
		if (efer & EFER_LMA)
			mode = X86EMUL_MODE_PROT64;
	}
#endif
	if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
		mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
	return assign_eip(ctxt, dst, mode);
}

static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{
	return assign_eip_near(ctxt, ctxt->_eip + rel);
}
743

744 745 746 747 748
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
			      struct segmented_address addr,
			      void *data,
			      unsigned size)
{
749 750 751
	int rc;
	ulong linear;

752
	rc = linearize(ctxt, addr, size, false, &linear);
753 754
	if (rc != X86EMUL_CONTINUE)
		return rc;
755
	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
756 757
}

758
/*
759
 * Prefetch the remaining bytes of the instruction without crossing page
760 761
 * boundary if they are not in fetch_cache yet.
 */
762
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
763 764
{
	int rc;
765
	unsigned size, max_size;
766
	unsigned long linear;
767
	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
768
	struct segmented_address addr = { .seg = VCPU_SREG_CS,
769 770
					   .ea = ctxt->eip + cur_size };

771 772 773 774 775 776 777 778 779 780
	/*
	 * We do not know exactly how many bytes will be needed, and
	 * __linearize is expensive, so fetch as much as possible.  We
	 * just have to avoid going beyond the 15 byte limit, the end
	 * of the segment, or the end of the page.
	 *
	 * __linearize is called with size 0 so that it does not do any
	 * boundary check itself.  Instead, we use max_size to check
	 * against op_size.
	 */
781 782
	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
			 &linear);
783 784 785
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;

786
	size = min_t(unsigned, 15UL ^ cur_size, max_size);
787
	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
788 789 790 791 792 793 794 795

	/*
	 * One instruction can only straddle two pages,
	 * and one has been loaded at the beginning of
	 * x86_decode_insn.  So, if not enough bytes
	 * still, we must have hit the 15-byte boundary.
	 */
	if (unlikely(size < op_size))
796 797
		return emulate_gp(ctxt, 0);

798
	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
799 800 801
			      size, &ctxt->exception);
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;
802
	ctxt->fetch.end += size;
803
	return X86EMUL_CONTINUE;
804 805
}

806 807
static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
					       unsigned size)
808
{
809 810 811 812
	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;

	if (unlikely(done_size < size))
		return __do_insn_fetch_bytes(ctxt, size - done_size);
813 814
	else
		return X86EMUL_CONTINUE;
815 816
}

817
/* Fetch next part of the instruction being emulated. */
818
#define insn_fetch(_type, _ctxt)					\
819 820 821
({	_type _x;							\
									\
	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
822 823
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
824
	ctxt->_eip += sizeof(_type);					\
825 826
	_x = *(_type __aligned(1) *) ctxt->fetch.ptr;			\
	ctxt->fetch.ptr += sizeof(_type);				\
827
	_x;								\
828 829
})

830
#define insn_fetch_arr(_arr, _size, _ctxt)				\
831 832
({									\
	rc = do_insn_fetch_bytes(_ctxt, _size);				\
833 834
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
835
	ctxt->_eip += (_size);						\
836 837
	memcpy(_arr, ctxt->fetch.ptr, _size);				\
	ctxt->fetch.ptr += (_size);					\
838 839
})

840 841 842 843 844
/*
 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 * pointer into the block that addresses the relevant register.
 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 */
845
static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
846
			     int byteop)
A
Avi Kivity 已提交
847 848
{
	void *p;
849
	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
A
Avi Kivity 已提交
850 851

	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
852 853 854
		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
	else
		p = reg_rmw(ctxt, modrm_reg);
A
Avi Kivity 已提交
855 856 857 858
	return p;
}

static int read_descriptor(struct x86_emulate_ctxt *ctxt,
859
			   struct segmented_address addr,
A
Avi Kivity 已提交
860 861 862 863 864 865 866
			   u16 *size, unsigned long *address, int op_bytes)
{
	int rc;

	if (op_bytes == 2)
		op_bytes = 3;
	*address = 0;
867
	rc = segmented_read_std(ctxt, addr, size, 2);
868
	if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
869
		return rc;
870
	addr.ea += 2;
871
	rc = segmented_read_std(ctxt, addr, address, op_bytes);
A
Avi Kivity 已提交
872 873 874
	return rc;
}

875 876 877 878 879 880 881 882 883 884
FASTOP2(add);
FASTOP2(or);
FASTOP2(adc);
FASTOP2(sbb);
FASTOP2(and);
FASTOP2(sub);
FASTOP2(xor);
FASTOP2(cmp);
FASTOP2(test);

885 886
FASTOP1SRC2(mul, mul_ex);
FASTOP1SRC2(imul, imul_ex);
887 888
FASTOP1SRC2EX(div, div_ex);
FASTOP1SRC2EX(idiv, idiv_ex);
889

890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
FASTOP3WCL(shld);
FASTOP3WCL(shrd);

FASTOP2W(imul);

FASTOP1(not);
FASTOP1(neg);
FASTOP1(inc);
FASTOP1(dec);

FASTOP2CL(rol);
FASTOP2CL(ror);
FASTOP2CL(rcl);
FASTOP2CL(rcr);
FASTOP2CL(shl);
FASTOP2CL(shr);
FASTOP2CL(sar);

FASTOP2W(bsf);
FASTOP2W(bsr);
FASTOP2W(bt);
FASTOP2W(bts);
FASTOP2W(btr);
FASTOP2W(btc);

915 916
FASTOP2(xadd);

917 918
FASTOP2R(cmp, cmp_r);

919
static u8 test_cc(unsigned int condition, unsigned long flags)
920
{
921 922
	u8 rc;
	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
923

924
	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
925
	asm("push %[flags]; popf; call *%[fastop]"
926 927
	    : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
	return rc;
928 929
}

930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
static void fetch_register_operand(struct operand *op)
{
	switch (op->bytes) {
	case 1:
		op->val = *(u8 *)op->addr.reg;
		break;
	case 2:
		op->val = *(u16 *)op->addr.reg;
		break;
	case 4:
		op->val = *(u32 *)op->addr.reg;
		break;
	case 8:
		op->val = *(u64 *)op->addr.reg;
		break;
	}
}

A
Avi Kivity 已提交
948 949 950 951
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
952 953 954 955 956 957 958 959
	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
960
#ifdef CONFIG_X86_64
961 962 963 964 965 966 967 968
	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
969 970 971 972 973 974 975 976 977 978 979
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
			  int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
980 981 982 983 984 985 986 987
	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
A
Avi Kivity 已提交
988
#ifdef CONFIG_X86_64
989 990 991 992 993 994 995 996
	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
A
Avi Kivity 已提交
997 998 999 1000 1001 1002
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

A
Avi Kivity 已提交
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fninit");
	ctxt->ops->put_fpu(ctxt);
	return X86EMUL_CONTINUE;
}

static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
{
	u16 fcw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fnstcw %0": "+m"(fcw));
	ctxt->ops->put_fpu(ctxt);

	/* force 2 byte destination */
	ctxt->dst.bytes = 2;
	ctxt->dst.val = fcw;

	return X86EMUL_CONTINUE;
}

static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
{
	u16 fsw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fnstsw %0": "+m"(fsw));
	ctxt->ops->put_fpu(ctxt);

	/* force 2 byte destination */
	ctxt->dst.bytes = 2;
	ctxt->dst.val = fsw;

	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1084
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1085
				    struct operand *op)
1086
{
1087
	unsigned reg = ctxt->modrm_reg;
1088

1089 1090
	if (!(ctxt->d & ModRM))
		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
A
Avi Kivity 已提交
1091

1092
	if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1093 1094 1095 1096 1097 1098
		op->type = OP_XMM;
		op->bytes = 16;
		op->addr.xmm = reg;
		read_sse_reg(ctxt, &op->vec_val, reg);
		return;
	}
A
Avi Kivity 已提交
1099 1100 1101 1102 1103 1104 1105
	if (ctxt->d & Mmx) {
		reg &= 7;
		op->type = OP_MM;
		op->bytes = 8;
		op->addr.mm = reg;
		return;
	}
A
Avi Kivity 已提交
1106

1107
	op->type = OP_REG;
1108 1109 1110
	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);

1111
	fetch_register_operand(op);
1112 1113 1114
	op->orig_val = op->val;
}

1115 1116 1117 1118 1119 1120
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
{
	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
		ctxt->modrm_seg = VCPU_SREG_SS;
}

1121
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1122
			struct operand *op)
1123 1124
{
	u8 sib;
B
Bandan Das 已提交
1125
	int index_reg, base_reg, scale;
1126
	int rc = X86EMUL_CONTINUE;
1127
	ulong modrm_ea = 0;
1128

B
Bandan Das 已提交
1129 1130 1131
	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1132

B
Bandan Das 已提交
1133
	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1134
	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
B
Bandan Das 已提交
1135
	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1136
	ctxt->modrm_seg = VCPU_SREG_DS;
1137

1138
	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1139
		op->type = OP_REG;
1140
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1141
		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1142
				ctxt->d & ByteOp);
1143
		if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1144 1145
			op->type = OP_XMM;
			op->bytes = 16;
1146 1147
			op->addr.xmm = ctxt->modrm_rm;
			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
A
Avi Kivity 已提交
1148 1149
			return rc;
		}
A
Avi Kivity 已提交
1150 1151 1152
		if (ctxt->d & Mmx) {
			op->type = OP_MM;
			op->bytes = 8;
1153
			op->addr.mm = ctxt->modrm_rm & 7;
A
Avi Kivity 已提交
1154 1155
			return rc;
		}
1156
		fetch_register_operand(op);
1157 1158 1159
		return rc;
	}

1160 1161
	op->type = OP_MEM;

1162
	if (ctxt->ad_bytes == 2) {
1163 1164 1165 1166
		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1167 1168

		/* 16-bit ModR/M decode. */
1169
		switch (ctxt->modrm_mod) {
1170
		case 0:
1171
			if (ctxt->modrm_rm == 6)
1172
				modrm_ea += insn_fetch(u16, ctxt);
1173 1174
			break;
		case 1:
1175
			modrm_ea += insn_fetch(s8, ctxt);
1176 1177
			break;
		case 2:
1178
			modrm_ea += insn_fetch(u16, ctxt);
1179 1180
			break;
		}
1181
		switch (ctxt->modrm_rm) {
1182
		case 0:
1183
			modrm_ea += bx + si;
1184 1185
			break;
		case 1:
1186
			modrm_ea += bx + di;
1187 1188
			break;
		case 2:
1189
			modrm_ea += bp + si;
1190 1191
			break;
		case 3:
1192
			modrm_ea += bp + di;
1193 1194
			break;
		case 4:
1195
			modrm_ea += si;
1196 1197
			break;
		case 5:
1198
			modrm_ea += di;
1199 1200
			break;
		case 6:
1201
			if (ctxt->modrm_mod != 0)
1202
				modrm_ea += bp;
1203 1204
			break;
		case 7:
1205
			modrm_ea += bx;
1206 1207
			break;
		}
1208 1209 1210
		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
			ctxt->modrm_seg = VCPU_SREG_SS;
1211
		modrm_ea = (u16)modrm_ea;
1212 1213
	} else {
		/* 32/64-bit ModR/M decode. */
1214
		if ((ctxt->modrm_rm & 7) == 4) {
1215
			sib = insn_fetch(u8, ctxt);
1216 1217 1218 1219
			index_reg |= (sib >> 3) & 7;
			base_reg |= sib & 7;
			scale = sib >> 6;

1220
			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1221
				modrm_ea += insn_fetch(s32, ctxt);
1222
			else {
1223
				modrm_ea += reg_read(ctxt, base_reg);
1224 1225
				adjust_modrm_seg(ctxt, base_reg);
			}
1226
			if (index_reg != 4)
1227
				modrm_ea += reg_read(ctxt, index_reg) << scale;
1228
		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1229
			modrm_ea += insn_fetch(s32, ctxt);
1230
			if (ctxt->mode == X86EMUL_MODE_PROT64)
1231
				ctxt->rip_relative = 1;
1232 1233
		} else {
			base_reg = ctxt->modrm_rm;
1234
			modrm_ea += reg_read(ctxt, base_reg);
1235 1236
			adjust_modrm_seg(ctxt, base_reg);
		}
1237
		switch (ctxt->modrm_mod) {
1238
		case 1:
1239
			modrm_ea += insn_fetch(s8, ctxt);
1240 1241
			break;
		case 2:
1242
			modrm_ea += insn_fetch(s32, ctxt);
1243 1244 1245
			break;
		}
	}
1246
	op->addr.mem.ea = modrm_ea;
1247 1248 1249
	if (ctxt->ad_bytes != 8)
		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;

1250 1251 1252 1253 1254
done:
	return rc;
}

static int decode_abs(struct x86_emulate_ctxt *ctxt,
1255
		      struct operand *op)
1256
{
1257
	int rc = X86EMUL_CONTINUE;
1258

1259
	op->type = OP_MEM;
1260
	switch (ctxt->ad_bytes) {
1261
	case 2:
1262
		op->addr.mem.ea = insn_fetch(u16, ctxt);
1263 1264
		break;
	case 4:
1265
		op->addr.mem.ea = insn_fetch(u32, ctxt);
1266 1267
		break;
	case 8:
1268
		op->addr.mem.ea = insn_fetch(u64, ctxt);
1269 1270 1271 1272 1273 1274
		break;
	}
done:
	return rc;
}

1275
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1276
{
1277
	long sv = 0, mask;
1278

1279
	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1280
		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1281

1282 1283 1284 1285
		if (ctxt->src.bytes == 2)
			sv = (s16)ctxt->src.val & (s16)mask;
		else if (ctxt->src.bytes == 4)
			sv = (s32)ctxt->src.val & (s32)mask;
1286 1287
		else
			sv = (s64)ctxt->src.val & (s64)mask;
1288

1289 1290
		ctxt->dst.addr.mem.ea = address_mask(ctxt,
					   ctxt->dst.addr.mem.ea + (sv >> 3));
1291
	}
1292 1293

	/* only subword offset */
1294
	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1295 1296
}

1297 1298
static int read_emulated(struct x86_emulate_ctxt *ctxt,
			 unsigned long addr, void *dest, unsigned size)
A
Avi Kivity 已提交
1299
{
1300
	int rc;
1301
	struct read_cache *mc = &ctxt->mem_read;
A
Avi Kivity 已提交
1302

1303 1304
	if (mc->pos < mc->end)
		goto read_cached;
A
Avi Kivity 已提交
1305

1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
	WARN_ON((mc->end + size) >= sizeof(mc->data));

	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
				      &ctxt->exception);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	mc->end += size;

read_cached:
	memcpy(dest, mc->data + mc->pos, size);
	mc->pos += size;
1318 1319
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1320

1321 1322 1323 1324 1325
static int segmented_read(struct x86_emulate_ctxt *ctxt,
			  struct segmented_address addr,
			  void *data,
			  unsigned size)
{
1326 1327 1328
	int rc;
	ulong linear;

1329
	rc = linearize(ctxt, addr, size, false, &linear);
1330 1331
	if (rc != X86EMUL_CONTINUE)
		return rc;
1332
	return read_emulated(ctxt, linear, data, size);
1333 1334 1335 1336 1337 1338 1339
}

static int segmented_write(struct x86_emulate_ctxt *ctxt,
			   struct segmented_address addr,
			   const void *data,
			   unsigned size)
{
1340 1341 1342
	int rc;
	ulong linear;

1343
	rc = linearize(ctxt, addr, size, true, &linear);
1344 1345
	if (rc != X86EMUL_CONTINUE)
		return rc;
1346 1347
	return ctxt->ops->write_emulated(ctxt, linear, data, size,
					 &ctxt->exception);
1348 1349 1350 1351 1352 1353 1354
}

static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
			     struct segmented_address addr,
			     const void *orig_data, const void *data,
			     unsigned size)
{
1355 1356 1357
	int rc;
	ulong linear;

1358
	rc = linearize(ctxt, addr, size, true, &linear);
1359 1360
	if (rc != X86EMUL_CONTINUE)
		return rc;
1361 1362
	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
					   size, &ctxt->exception);
1363 1364
}

1365 1366 1367 1368
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
			   unsigned int size, unsigned short port,
			   void *dest)
{
1369
	struct read_cache *rc = &ctxt->io_read;
1370

1371 1372
	if (rc->pos == rc->end) { /* refill pio read ahead */
		unsigned int in_page, n;
1373
		unsigned int count = ctxt->rep_prefix ?
1374
			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1375
		in_page = (ctxt->eflags & EFLG_DF) ?
1376 1377
			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1378
		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1379 1380 1381
		if (n == 0)
			n = 1;
		rc->pos = rc->end = 0;
1382
		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1383 1384
			return 0;
		rc->end = n * size;
A
Avi Kivity 已提交
1385 1386
	}

1387 1388
	if (ctxt->rep_prefix && (ctxt->d & String) &&
	    !(ctxt->eflags & EFLG_DF)) {
1389 1390 1391 1392 1393 1394 1395 1396
		ctxt->dst.data = rc->data + rc->pos;
		ctxt->dst.type = OP_MEM_STR;
		ctxt->dst.count = (rc->end - rc->pos) / size;
		rc->pos = rc->end;
	} else {
		memcpy(dest, rc->data + rc->pos, size);
		rc->pos += size;
	}
1397 1398
	return 1;
}
A
Avi Kivity 已提交
1399

1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
				     u16 index, struct desc_struct *desc)
{
	struct desc_ptr dt;
	ulong addr;

	ctxt->ops->get_idt(ctxt, &dt);

	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, index << 3 | 0x2);

	addr = dt.address + index * 8;
	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
				   &ctxt->exception);
}

1416 1417 1418
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
				     u16 selector, struct desc_ptr *dt)
{
1419
	const struct x86_emulate_ops *ops = ctxt->ops;
1420
	u32 base3 = 0;
1421

1422 1423
	if (selector & 1 << 2) {
		struct desc_struct desc;
1424 1425
		u16 sel;

1426
		memset (dt, 0, sizeof *dt);
1427 1428
		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
				      VCPU_SREG_LDTR))
1429
			return;
1430

1431
		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1432
		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1433
	} else
1434
		ops->get_gdt(ctxt, dt);
1435
}
1436

1437 1438
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1439 1440
				   u16 selector, struct desc_struct *desc,
				   ulong *desc_addr_p)
1441 1442 1443 1444
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
1445

1446
	get_descriptor_table_ptr(ctxt, selector, &dt);
1447

1448 1449
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
1450

1451
	*desc_addr_p = addr = dt.address + index * 8;
1452 1453
	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
				   &ctxt->exception);
1454
}
1455

1456 1457 1458 1459 1460 1461 1462
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				    u16 selector, struct desc_struct *desc)
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
A
Avi Kivity 已提交
1463

1464
	get_descriptor_table_ptr(ctxt, selector, &dt);
1465

1466 1467
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
A
Avi Kivity 已提交
1468

1469
	addr = dt.address + index * 8;
1470 1471
	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
				    &ctxt->exception);
1472
}
1473

1474
/* Does not support long mode */
1475
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1476 1477 1478
				     u16 selector, int seg, u8 cpl,
				     bool in_task_switch,
				     struct desc_struct *desc)
1479
{
1480
	struct desc_struct seg_desc, old_desc;
1481
	u8 dpl, rpl;
1482 1483 1484
	unsigned err_vec = GP_VECTOR;
	u32 err_code = 0;
	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1485
	ulong desc_addr;
1486
	int ret;
1487
	u16 dummy;
1488
	u32 base3 = 0;
1489

1490
	memset(&seg_desc, 0, sizeof seg_desc);
1491

1492 1493 1494
	if (ctxt->mode == X86EMUL_MODE_REAL) {
		/* set real mode segment descriptor (keep limit etc. for
		 * unreal mode) */
1495
		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1496 1497
		set_desc_base(&seg_desc, selector << 4);
		goto load;
1498 1499 1500 1501 1502 1503 1504 1505 1506
	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
		/* VM86 needs a clean new segment descriptor */
		set_desc_base(&seg_desc, selector << 4);
		set_desc_limit(&seg_desc, 0xffff);
		seg_desc.type = 3;
		seg_desc.p = 1;
		seg_desc.s = 1;
		seg_desc.dpl = 3;
		goto load;
1507 1508
	}

1509 1510 1511 1512 1513 1514 1515
	rpl = selector & 3;

	/* NULL selector is not valid for TR, CS and SS (except for long mode) */
	if ((seg == VCPU_SREG_CS
	     || (seg == VCPU_SREG_SS
		 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
	     || seg == VCPU_SREG_TR)
1516 1517 1518 1519 1520 1521 1522 1523 1524 1525
	    && null_selector)
		goto exception;

	/* TR should be in GDT only */
	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
		goto exception;

	if (null_selector) /* for NULL selector skip all following checks */
		goto load;

1526
	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1527 1528 1529 1530
	if (ret != X86EMUL_CONTINUE)
		return ret;

	err_code = selector & 0xfffc;
1531
	err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1532

G
Guo Chao 已提交
1533
	/* can't load system descriptor into segment selector */
1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551
	if (seg <= VCPU_SREG_GS && !seg_desc.s)
		goto exception;

	if (!seg_desc.p) {
		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
		goto exception;
	}

	dpl = seg_desc.dpl;

	switch (seg) {
	case VCPU_SREG_SS:
		/*
		 * segment is not a writable data segment or segment
		 * selector's RPL != CPL or segment selector's RPL != CPL
		 */
		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
			goto exception;
A
Avi Kivity 已提交
1552
		break;
1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
	case VCPU_SREG_CS:
		if (!(seg_desc.type & 8))
			goto exception;

		if (seg_desc.type & 4) {
			/* conforming */
			if (dpl > cpl)
				goto exception;
		} else {
			/* nonconforming */
			if (rpl > cpl || dpl != cpl)
				goto exception;
		}
1566 1567 1568 1569 1570 1571 1572 1573 1574
		/* in long-mode d/b must be clear if l is set */
		if (seg_desc.d && seg_desc.l) {
			u64 efer = 0;

			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
			if (efer & EFER_LMA)
				goto exception;
		}

1575 1576
		/* CS(RPL) <- CPL */
		selector = (selector & 0xfffc) | cpl;
A
Avi Kivity 已提交
1577
		break;
1578 1579 1580
	case VCPU_SREG_TR:
		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
			goto exception;
1581 1582 1583 1584 1585 1586
		old_desc = seg_desc;
		seg_desc.type |= 2; /* busy */
		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
						  sizeof(seg_desc), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
1587 1588 1589 1590 1591 1592
		break;
	case VCPU_SREG_LDTR:
		if (seg_desc.s || seg_desc.type != 2)
			goto exception;
		break;
	default: /*  DS, ES, FS, or GS */
1593
		/*
1594 1595 1596
		 * segment is not a data or readable code segment or
		 * ((segment is a data or nonconforming code segment)
		 * and (both RPL and CPL > DPL))
1597
		 */
1598 1599 1600 1601
		if ((seg_desc.type & 0xa) == 0x8 ||
		    (((seg_desc.type & 0xc) != 0xc) &&
		     (rpl > dpl && cpl > dpl)))
			goto exception;
A
Avi Kivity 已提交
1602
		break;
1603 1604 1605 1606 1607
	}

	if (seg_desc.s) {
		/* mark segment as accessed */
		seg_desc.type |= 1;
1608
		ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1609 1610
		if (ret != X86EMUL_CONTINUE)
			return ret;
1611 1612 1613 1614 1615
	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
		ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
				sizeof(base3), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
1616 1617 1618
		if (is_noncanonical_address(get_desc_base(&seg_desc) |
					     ((u64)base3 << 32)))
			return emulate_gp(ctxt, 0);
1619 1620
	}
load:
1621
	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1622 1623
	if (desc)
		*desc = seg_desc;
1624 1625
	return X86EMUL_CONTINUE;
exception:
1626
	return emulate_exception(ctxt, err_vec, err_code, true);
1627 1628
}

1629 1630 1631 1632
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, int seg)
{
	u8 cpl = ctxt->ops->cpl(ctxt);
1633
	return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1634 1635
}

1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
static void write_register_operand(struct operand *op)
{
	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
	switch (op->bytes) {
	case 1:
		*(u8 *)op->addr.reg = (u8)op->val;
		break;
	case 2:
		*(u16 *)op->addr.reg = (u16)op->val;
		break;
	case 4:
		*op->addr.reg = (u32)op->val;
		break;	/* 64b: zero-extend */
	case 8:
		*op->addr.reg = op->val;
		break;
	}
}

1655
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1656
{
1657
	switch (op->type) {
1658
	case OP_REG:
1659
		write_register_operand(op);
A
Avi Kivity 已提交
1660
		break;
1661
	case OP_MEM:
1662
		if (ctxt->lock_prefix)
P
Paolo Bonzini 已提交
1663 1664 1665 1666 1667 1668 1669
			return segmented_cmpxchg(ctxt,
						 op->addr.mem,
						 &op->orig_val,
						 &op->val,
						 op->bytes);
		else
			return segmented_write(ctxt,
1670 1671 1672
					       op->addr.mem,
					       &op->val,
					       op->bytes);
1673
		break;
1674
	case OP_MEM_STR:
P
Paolo Bonzini 已提交
1675 1676 1677 1678
		return segmented_write(ctxt,
				       op->addr.mem,
				       op->data,
				       op->bytes * op->count);
1679
		break;
A
Avi Kivity 已提交
1680
	case OP_XMM:
1681
		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
A
Avi Kivity 已提交
1682
		break;
A
Avi Kivity 已提交
1683
	case OP_MM:
1684
		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
A
Avi Kivity 已提交
1685
		break;
1686 1687
	case OP_NONE:
		/* no writeback */
1688
		break;
1689
	default:
1690
		break;
A
Avi Kivity 已提交
1691
	}
1692 1693
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1694

1695
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1696
{
1697
	struct segmented_address addr;
1698

1699
	rsp_increment(ctxt, -bytes);
1700
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1701 1702
	addr.seg = VCPU_SREG_SS;

1703 1704 1705 1706 1707
	return segmented_write(ctxt, addr, data, bytes);
}

static int em_push(struct x86_emulate_ctxt *ctxt)
{
1708
	/* Disable writeback. */
1709
	ctxt->dst.type = OP_NONE;
1710
	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1711
}
1712

1713 1714 1715 1716
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
		       void *dest, int len)
{
	int rc;
1717
	struct segmented_address addr;
1718

1719
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1720
	addr.seg = VCPU_SREG_SS;
1721
	rc = segmented_read(ctxt, addr, dest, len);
1722 1723 1724
	if (rc != X86EMUL_CONTINUE)
		return rc;

1725
	rsp_increment(ctxt, len);
1726
	return rc;
1727 1728
}

1729 1730
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
1731
	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1732 1733
}

1734
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1735
			void *dest, int len)
1736 1737
{
	int rc;
1738 1739
	unsigned long val, change_mask;
	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1740
	int cpl = ctxt->ops->cpl(ctxt);
1741

1742
	rc = emulate_pop(ctxt, &val, len);
1743 1744
	if (rc != X86EMUL_CONTINUE)
		return rc;
1745

1746
	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1747
		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1748

1749 1750 1751 1752 1753 1754 1755 1756 1757 1758
	switch(ctxt->mode) {
	case X86EMUL_MODE_PROT64:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT16:
		if (cpl == 0)
			change_mask |= EFLG_IOPL;
		if (cpl <= iopl)
			change_mask |= EFLG_IF;
		break;
	case X86EMUL_MODE_VM86:
1759 1760
		if (iopl < 3)
			return emulate_gp(ctxt, 0);
1761 1762 1763 1764 1765
		change_mask |= EFLG_IF;
		break;
	default: /* real mode */
		change_mask |= (EFLG_IOPL | EFLG_IF);
		break;
1766
	}
1767 1768 1769 1770 1771

	*(unsigned long *)dest =
		(ctxt->eflags & ~change_mask) | (val & change_mask);

	return rc;
1772 1773
}

1774 1775
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
1776 1777 1778 1779
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->eflags;
	ctxt->dst.bytes = ctxt->op_bytes;
	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1780 1781
}

A
Avi Kivity 已提交
1782 1783 1784 1785 1786
static int em_enter(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	unsigned frame_size = ctxt->src.val;
	unsigned nesting_level = ctxt->src2.val & 31;
1787
	ulong rbp;
A
Avi Kivity 已提交
1788 1789 1790 1791

	if (nesting_level)
		return X86EMUL_UNHANDLEABLE;

1792 1793
	rbp = reg_read(ctxt, VCPU_REGS_RBP);
	rc = push(ctxt, &rbp, stack_size(ctxt));
A
Avi Kivity 已提交
1794 1795
	if (rc != X86EMUL_CONTINUE)
		return rc;
1796
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
A
Avi Kivity 已提交
1797
		      stack_mask(ctxt));
1798 1799
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
A
Avi Kivity 已提交
1800 1801 1802 1803
		      stack_mask(ctxt));
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1804 1805
static int em_leave(struct x86_emulate_ctxt *ctxt)
{
1806
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
A
Avi Kivity 已提交
1807
		      stack_mask(ctxt));
1808
	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
A
Avi Kivity 已提交
1809 1810
}

1811
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1812
{
1813 1814
	int seg = ctxt->src2.val;

1815
	ctxt->src.val = get_segment_selector(ctxt, seg);
1816 1817 1818 1819
	if (ctxt->op_bytes == 4) {
		rsp_increment(ctxt, -2);
		ctxt->op_bytes = 2;
	}
1820

1821
	return em_push(ctxt);
1822 1823
}

1824
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1825
{
1826
	int seg = ctxt->src2.val;
1827 1828
	unsigned long selector;
	int rc;
1829

1830
	rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1831 1832 1833
	if (rc != X86EMUL_CONTINUE)
		return rc;

1834 1835 1836
	if (ctxt->modrm_reg == VCPU_SREG_SS)
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;

1837
	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1838
	return rc;
1839 1840
}

1841
static int em_pusha(struct x86_emulate_ctxt *ctxt)
1842
{
1843
	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1844 1845
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RAX;
1846

1847 1848
	while (reg <= VCPU_REGS_RDI) {
		(reg == VCPU_REGS_RSP) ?
1849
		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1850

1851
		rc = em_push(ctxt);
1852 1853
		if (rc != X86EMUL_CONTINUE)
			return rc;
1854

1855
		++reg;
1856 1857
	}

1858
	return rc;
1859 1860
}

1861 1862
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
1863
	ctxt->src.val =  (unsigned long)ctxt->eflags;
1864 1865 1866
	return em_push(ctxt);
}

1867
static int em_popa(struct x86_emulate_ctxt *ctxt)
1868
{
1869 1870
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RDI;
1871

1872 1873
	while (reg >= VCPU_REGS_RAX) {
		if (reg == VCPU_REGS_RSP) {
1874
			rsp_increment(ctxt, ctxt->op_bytes);
1875 1876
			--reg;
		}
1877

1878
		rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1879 1880 1881
		if (rc != X86EMUL_CONTINUE)
			break;
		--reg;
1882
	}
1883
	return rc;
1884 1885
}

1886
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1887
{
1888
	const struct x86_emulate_ops *ops = ctxt->ops;
1889
	int rc;
1890 1891 1892 1893 1894 1895
	struct desc_ptr dt;
	gva_t cs_addr;
	gva_t eip_addr;
	u16 cs, eip;

	/* TODO: Add limit checks */
1896
	ctxt->src.val = ctxt->eflags;
1897
	rc = em_push(ctxt);
1898 1899
	if (rc != X86EMUL_CONTINUE)
		return rc;
1900 1901 1902

	ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);

1903
	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1904
	rc = em_push(ctxt);
1905 1906
	if (rc != X86EMUL_CONTINUE)
		return rc;
1907

1908
	ctxt->src.val = ctxt->_eip;
1909
	rc = em_push(ctxt);
1910 1911 1912
	if (rc != X86EMUL_CONTINUE)
		return rc;

1913
	ops->get_idt(ctxt, &dt);
1914 1915 1916 1917

	eip_addr = dt.address + (irq << 2);
	cs_addr = dt.address + (irq << 2) + 2;

1918
	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1919 1920 1921
	if (rc != X86EMUL_CONTINUE)
		return rc;

1922
	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1923 1924 1925
	if (rc != X86EMUL_CONTINUE)
		return rc;

1926
	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1927 1928 1929
	if (rc != X86EMUL_CONTINUE)
		return rc;

1930
	ctxt->_eip = eip;
1931 1932 1933 1934

	return rc;
}

1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
	int rc;

	invalidate_registers(ctxt);
	rc = __emulate_int_real(ctxt, irq);
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);
	return rc;
}

1946
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1947 1948 1949
{
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
1950
		return __emulate_int_real(ctxt, irq);
1951 1952 1953 1954 1955 1956 1957 1958 1959 1960
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
	default:
		/* Protected mode interrupts unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
	}
}

1961
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1962
{
1963 1964 1965 1966 1967 1968 1969 1970
	int rc = X86EMUL_CONTINUE;
	unsigned long temp_eip = 0;
	unsigned long temp_eflags = 0;
	unsigned long cs = 0;
	unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
			     EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
			     EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
	unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1971

1972
	/* TODO: Add stack limit check */
1973

1974
	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1975

1976 1977
	if (rc != X86EMUL_CONTINUE)
		return rc;
1978

1979 1980
	if (temp_eip & ~0xffff)
		return emulate_gp(ctxt, 0);
1981

1982
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1983

1984 1985
	if (rc != X86EMUL_CONTINUE)
		return rc;
1986

1987
	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1988

1989 1990
	if (rc != X86EMUL_CONTINUE)
		return rc;
1991

1992
	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1993

1994 1995
	if (rc != X86EMUL_CONTINUE)
		return rc;
1996

1997
	ctxt->_eip = temp_eip;
1998 1999


2000
	if (ctxt->op_bytes == 4)
2001
		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2002
	else if (ctxt->op_bytes == 2) {
2003 2004
		ctxt->eflags &= ~0xffff;
		ctxt->eflags |= temp_eflags;
2005
	}
2006 2007 2008 2009 2010

	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
	ctxt->eflags |= EFLG_RESERVED_ONE_MASK;

	return rc;
2011 2012
}

2013
static int em_iret(struct x86_emulate_ctxt *ctxt)
2014
{
2015 2016
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
2017
		return emulate_iret_real(ctxt);
2018 2019 2020 2021
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
2022
	default:
2023 2024
		/* iret from protected mode unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
2025 2026 2027
	}
}

2028 2029 2030
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
	int rc;
2031 2032 2033 2034 2035 2036 2037 2038 2039
	unsigned short sel, old_sel;
	struct desc_struct old_desc, new_desc;
	const struct x86_emulate_ops *ops = ctxt->ops;
	u8 cpl = ctxt->ops->cpl(ctxt);

	/* Assignment of RIP may only fail in 64-bit mode */
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
				 VCPU_SREG_CS);
2040

2041
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2042

2043 2044
	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
				       &new_desc);
2045 2046 2047
	if (rc != X86EMUL_CONTINUE)
		return rc;

2048
	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2049
	if (rc != X86EMUL_CONTINUE) {
2050
		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2051 2052 2053 2054 2055
		/* assigning eip failed; restore the old cs */
		ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
		return rc;
	}
	return rc;
2056 2057
}

2058
static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2059
{
2060 2061
	return assign_eip_near(ctxt, ctxt->src.val);
}
2062

2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073
static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	long int old_eip;

	old_eip = ctxt->_eip;
	rc = assign_eip_near(ctxt, ctxt->src.val);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	ctxt->src.val = old_eip;
	rc = em_push(ctxt);
2074
	return rc;
2075 2076
}

2077
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2078
{
2079
	u64 old = ctxt->dst.orig_val64;
2080

2081 2082 2083
	if (ctxt->dst.bytes == 16)
		return X86EMUL_UNHANDLEABLE;

2084 2085 2086 2087
	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2088
		ctxt->eflags &= ~EFLG_ZF;
2089
	} else {
2090 2091
		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2092

2093
		ctxt->eflags |= EFLG_ZF;
2094
	}
2095
	return X86EMUL_CONTINUE;
2096 2097
}

2098 2099
static int em_ret(struct x86_emulate_ctxt *ctxt)
{
2100 2101 2102 2103 2104 2105 2106 2107
	int rc;
	unsigned long eip;

	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	return assign_eip_near(ctxt, eip);
2108 2109
}

2110
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2111 2112
{
	int rc;
2113 2114
	unsigned long eip, cs;
	u16 old_cs;
2115
	int cpl = ctxt->ops->cpl(ctxt);
2116 2117 2118 2119 2120 2121
	struct desc_struct old_desc, new_desc;
	const struct x86_emulate_ops *ops = ctxt->ops;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
				 VCPU_SREG_CS);
2122

2123
	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2124
	if (rc != X86EMUL_CONTINUE)
2125
		return rc;
2126
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2127
	if (rc != X86EMUL_CONTINUE)
2128
		return rc;
2129 2130 2131
	/* Outer-privilege level return is not implemented */
	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
		return X86EMUL_UNHANDLEABLE;
2132 2133 2134 2135
	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
				       &new_desc);
	if (rc != X86EMUL_CONTINUE)
		return rc;
2136
	rc = assign_eip_far(ctxt, eip, &new_desc);
2137
	if (rc != X86EMUL_CONTINUE) {
2138
		WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2139 2140
		ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
	}
2141 2142 2143
	return rc;
}

2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154
static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
{
        int rc;

        rc = em_ret_far(ctxt);
        if (rc != X86EMUL_CONTINUE)
                return rc;
        rsp_increment(ctxt, ctxt->src.val);
        return X86EMUL_CONTINUE;
}

2155 2156 2157
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{
	/* Save real source value, then compare EAX against destination. */
2158 2159
	ctxt->dst.orig_val = ctxt->dst.val;
	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2160
	ctxt->src.orig_val = ctxt->src.val;
2161
	ctxt->src.val = ctxt->dst.orig_val;
2162
	fastop(ctxt, em_cmp);
2163 2164 2165 2166 2167 2168 2169

	if (ctxt->eflags & EFLG_ZF) {
		/* Success: write back to memory. */
		ctxt->dst.val = ctxt->src.orig_val;
	} else {
		/* Failure: write the value we saw to EAX. */
		ctxt->dst.type = OP_REG;
2170
		ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2171
		ctxt->dst.val = ctxt->dst.orig_val;
2172 2173 2174 2175
	}
	return X86EMUL_CONTINUE;
}

2176
static int em_lseg(struct x86_emulate_ctxt *ctxt)
2177
{
2178
	int seg = ctxt->src2.val;
2179 2180 2181
	unsigned short sel;
	int rc;

2182
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2183

2184
	rc = load_segment_descriptor(ctxt, sel, seg);
2185 2186 2187
	if (rc != X86EMUL_CONTINUE)
		return rc;

2188
	ctxt->dst.val = ctxt->src.val;
2189 2190 2191
	return rc;
}

2192
static void
2193
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2194
			struct desc_struct *cs, struct desc_struct *ss)
2195 2196
{
	cs->l = 0;		/* will be adjusted later */
2197
	set_desc_base(cs, 0);	/* flat segment */
2198
	cs->g = 1;		/* 4kb granularity */
2199
	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2200 2201 2202
	cs->type = 0x0b;	/* Read, Execute, Accessed */
	cs->s = 1;
	cs->dpl = 0;		/* will be adjusted later */
2203 2204
	cs->p = 1;
	cs->d = 1;
2205
	cs->avl = 0;
2206

2207 2208
	set_desc_base(ss, 0);	/* flat segment */
	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2209 2210 2211
	ss->g = 1;		/* 4kb granularity */
	ss->s = 1;
	ss->type = 0x03;	/* Read/Write, Accessed */
2212
	ss->d = 1;		/* 32bit stack segment */
2213
	ss->dpl = 0;
2214
	ss->p = 1;
2215 2216
	ss->l = 0;
	ss->avl = 0;
2217 2218
}

2219 2220 2221 2222 2223
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

	eax = ecx = 0;
2224 2225
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2226 2227 2228 2229
		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
}

2230 2231
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
{
2232
	const struct x86_emulate_ops *ops = ctxt->ops;
2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243
	u32 eax, ebx, ecx, edx;

	/*
	 * syscall should always be enabled in longmode - so only become
	 * vendor specific (cpuid) if other modes are active...
	 */
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return true;

	eax = 0x00000000;
	ecx = 0x00000000;
2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268
	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	/*
	 * Intel ("GenuineIntel")
	 * remark: Intel CPUs only support "syscall" in 64bit
	 * longmode. Also an 64bit guest with a
	 * 32bit compat-app running will #UD !! While this
	 * behaviour can be fixed (by emulating) into AMD
	 * response - CPUs of AMD can't behave like Intel.
	 */
	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
		return false;

	/* AMD ("AuthenticAMD") */
	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
		return true;

	/* AMD ("AMDisbetter!") */
	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
		return true;
2269 2270 2271 2272 2273

	/* default: (not Intel, not AMD), apply Intel's stricter rules... */
	return false;
}

2274
static int em_syscall(struct x86_emulate_ctxt *ctxt)
2275
{
2276
	const struct x86_emulate_ops *ops = ctxt->ops;
2277
	struct desc_struct cs, ss;
2278
	u64 msr_data;
2279
	u16 cs_sel, ss_sel;
2280
	u64 efer = 0;
2281 2282

	/* syscall is not available in real mode */
2283
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2284 2285
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_ud(ctxt);
2286

2287 2288 2289
	if (!(em_syscall_is_enabled(ctxt)))
		return emulate_ud(ctxt);

2290
	ops->get_msr(ctxt, MSR_EFER, &efer);
2291
	setup_syscalls_segments(ctxt, &cs, &ss);
2292

2293 2294 2295
	if (!(efer & EFER_SCE))
		return emulate_ud(ctxt);

2296
	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2297
	msr_data >>= 32;
2298 2299
	cs_sel = (u16)(msr_data & 0xfffc);
	ss_sel = (u16)(msr_data + 8);
2300

2301
	if (efer & EFER_LMA) {
2302
		cs.d = 0;
2303 2304
		cs.l = 1;
	}
2305 2306
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2307

2308
	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2309
	if (efer & EFER_LMA) {
2310
#ifdef CONFIG_X86_64
2311
		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2312

2313
		ops->get_msr(ctxt,
2314 2315
			     ctxt->mode == X86EMUL_MODE_PROT64 ?
			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2316
		ctxt->_eip = msr_data;
2317

2318
		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2319
		ctxt->eflags &= ~msr_data;
2320
		ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2321 2322 2323
#endif
	} else {
		/* legacy mode */
2324
		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2325
		ctxt->_eip = (u32)msr_data;
2326

2327
		ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2328 2329
	}

2330
	return X86EMUL_CONTINUE;
2331 2332
}

2333
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2334
{
2335
	const struct x86_emulate_ops *ops = ctxt->ops;
2336
	struct desc_struct cs, ss;
2337
	u64 msr_data;
2338
	u16 cs_sel, ss_sel;
2339
	u64 efer = 0;
2340

2341
	ops->get_msr(ctxt, MSR_EFER, &efer);
2342
	/* inject #GP if in real mode */
2343 2344
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return emulate_gp(ctxt, 0);
2345

2346 2347 2348 2349 2350 2351 2352 2353
	/*
	 * Not recognized on AMD in compat mode (but is recognized in legacy
	 * mode).
	 */
	if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
	    && !vendor_intel(ctxt))
		return emulate_ud(ctxt);

2354
	/* sysenter/sysexit have not been tested in 64bit mode. */
2355
	if (ctxt->mode == X86EMUL_MODE_PROT64)
2356
		return X86EMUL_UNHANDLEABLE;
2357

2358
	setup_syscalls_segments(ctxt, &cs, &ss);
2359

2360
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2361 2362
	switch (ctxt->mode) {
	case X86EMUL_MODE_PROT32:
2363 2364
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
2365 2366
		break;
	case X86EMUL_MODE_PROT64:
2367 2368
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
2369
		break;
2370 2371
	default:
		break;
2372 2373
	}

2374
	ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2375 2376 2377 2378
	cs_sel = (u16)msr_data;
	cs_sel &= ~SELECTOR_RPL_MASK;
	ss_sel = cs_sel + 8;
	ss_sel &= ~SELECTOR_RPL_MASK;
2379
	if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2380
		cs.d = 0;
2381 2382 2383
		cs.l = 1;
	}

2384 2385
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2386

2387
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2388
	ctxt->_eip = msr_data;
2389

2390
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2391
	*reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2392

2393
	return X86EMUL_CONTINUE;
2394 2395
}

2396
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2397
{
2398
	const struct x86_emulate_ops *ops = ctxt->ops;
2399
	struct desc_struct cs, ss;
2400
	u64 msr_data, rcx, rdx;
2401
	int usermode;
X
Xiao Guangrong 已提交
2402
	u16 cs_sel = 0, ss_sel = 0;
2403

2404 2405
	/* inject #GP if in real mode or Virtual 8086 mode */
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2406 2407
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_gp(ctxt, 0);
2408

2409
	setup_syscalls_segments(ctxt, &cs, &ss);
2410

2411
	if ((ctxt->rex_prefix & 0x8) != 0x0)
2412 2413 2414 2415
		usermode = X86EMUL_MODE_PROT64;
	else
		usermode = X86EMUL_MODE_PROT32;

2416 2417 2418
	rcx = reg_read(ctxt, VCPU_REGS_RCX);
	rdx = reg_read(ctxt, VCPU_REGS_RDX);

2419 2420
	cs.dpl = 3;
	ss.dpl = 3;
2421
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2422 2423
	switch (usermode) {
	case X86EMUL_MODE_PROT32:
2424
		cs_sel = (u16)(msr_data + 16);
2425 2426
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
2427
		ss_sel = (u16)(msr_data + 24);
2428 2429
		rcx = (u32)rcx;
		rdx = (u32)rdx;
2430 2431
		break;
	case X86EMUL_MODE_PROT64:
2432
		cs_sel = (u16)(msr_data + 32);
2433 2434
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
2435 2436
		ss_sel = cs_sel + 8;
		cs.d = 0;
2437
		cs.l = 1;
2438 2439 2440
		if (is_noncanonical_address(rcx) ||
		    is_noncanonical_address(rdx))
			return emulate_gp(ctxt, 0);
2441 2442
		break;
	}
2443 2444
	cs_sel |= SELECTOR_RPL_MASK;
	ss_sel |= SELECTOR_RPL_MASK;
2445

2446 2447
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2448

2449 2450
	ctxt->_eip = rdx;
	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2451

2452
	return X86EMUL_CONTINUE;
2453 2454
}

2455
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2456 2457 2458 2459 2460 2461 2462
{
	int iopl;
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return false;
	if (ctxt->mode == X86EMUL_MODE_VM86)
		return true;
	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2463
	return ctxt->ops->cpl(ctxt) > iopl;
2464 2465 2466 2467 2468
}

static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
					    u16 port, u16 len)
{
2469
	const struct x86_emulate_ops *ops = ctxt->ops;
2470
	struct desc_struct tr_seg;
2471
	u32 base3;
2472
	int r;
2473
	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2474
	unsigned mask = (1 << len) - 1;
2475
	unsigned long base;
2476

2477
	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2478
	if (!tr_seg.p)
2479
		return false;
2480
	if (desc_limit_scaled(&tr_seg) < 103)
2481
		return false;
2482 2483 2484 2485
	base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
	base |= ((u64)base3) << 32;
#endif
2486
	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2487 2488
	if (r != X86EMUL_CONTINUE)
		return false;
2489
	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2490
		return false;
2491
	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2492 2493 2494 2495 2496 2497 2498 2499 2500 2501
	if (r != X86EMUL_CONTINUE)
		return false;
	if ((perm >> bit_idx) & mask)
		return false;
	return true;
}

static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
				 u16 port, u16 len)
{
2502 2503 2504
	if (ctxt->perm_ok)
		return true;

2505 2506
	if (emulator_bad_iopl(ctxt))
		if (!emulator_io_port_access_allowed(ctxt, port, len))
2507
			return false;
2508 2509 2510

	ctxt->perm_ok = true;

2511 2512 2513
	return true;
}

2514 2515 2516
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_16 *tss)
{
2517
	tss->ip = ctxt->_eip;
2518
	tss->flag = ctxt->eflags;
2519 2520 2521 2522 2523 2524 2525 2526
	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2527

2528 2529 2530 2531 2532
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2533 2534 2535 2536 2537 2538
}

static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_16 *tss)
{
	int ret;
2539
	u8 cpl;
2540

2541
	ctxt->_eip = tss->ip;
2542
	ctxt->eflags = tss->flag | 2;
2543 2544 2545 2546 2547 2548 2549 2550
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2551 2552 2553 2554 2555

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
2556 2557 2558 2559 2560
	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2561

2562 2563
	cpl = tss->cs & 3;

2564
	/*
G
Guo Chao 已提交
2565
	 * Now load segment descriptors. If fault happens at this stage
2566 2567
	 * it is handled in a context of new task
	 */
2568 2569
	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
					true, NULL);
2570 2571
	if (ret != X86EMUL_CONTINUE)
		return ret;
2572 2573
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
					true, NULL);
2574 2575
	if (ret != X86EMUL_CONTINUE)
		return ret;
2576 2577
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
					true, NULL);
2578 2579
	if (ret != X86EMUL_CONTINUE)
		return ret;
2580 2581
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
					true, NULL);
2582 2583
	if (ret != X86EMUL_CONTINUE)
		return ret;
2584 2585
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
					true, NULL);
2586 2587 2588 2589 2590 2591 2592 2593 2594 2595
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_16(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
2596
	const struct x86_emulate_ops *ops = ctxt->ops;
2597 2598
	struct tss_segment_16 tss_seg;
	int ret;
2599
	u32 new_tss_base = get_desc_base(new_desc);
2600

2601
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2602
			    &ctxt->exception);
2603
	if (ret != X86EMUL_CONTINUE)
2604 2605 2606
		/* FIXME: need to provide precise fault address */
		return ret;

2607
	save_state_to_tss16(ctxt, &tss_seg);
2608

2609
	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2610
			     &ctxt->exception);
2611
	if (ret != X86EMUL_CONTINUE)
2612 2613 2614
		/* FIXME: need to provide precise fault address */
		return ret;

2615
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2616
			    &ctxt->exception);
2617
	if (ret != X86EMUL_CONTINUE)
2618 2619 2620 2621 2622 2623
		/* FIXME: need to provide precise fault address */
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

2624
		ret = ops->write_std(ctxt, new_tss_base,
2625 2626
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2627
				     &ctxt->exception);
2628
		if (ret != X86EMUL_CONTINUE)
2629 2630 2631 2632
			/* FIXME: need to provide precise fault address */
			return ret;
	}

2633
	return load_state_from_tss16(ctxt, &tss_seg);
2634 2635 2636 2637 2638
}

static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_32 *tss)
{
2639
	/* CR3 and ldt selector are not saved intentionally */
2640
	tss->eip = ctxt->_eip;
2641
	tss->eflags = ctxt->eflags;
2642 2643 2644 2645 2646 2647 2648 2649
	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2650

2651 2652 2653 2654 2655 2656
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2657 2658 2659 2660 2661 2662
}

static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_32 *tss)
{
	int ret;
2663
	u8 cpl;
2664

2665
	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2666
		return emulate_gp(ctxt, 0);
2667
	ctxt->_eip = tss->eip;
2668
	ctxt->eflags = tss->eflags | 2;
2669 2670

	/* General purpose registers */
2671 2672 2673 2674 2675 2676 2677 2678
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2679 2680 2681

	/*
	 * SDM says that segment selectors are loaded before segment
2682 2683
	 * descriptors.  This is important because CPL checks will
	 * use CS.RPL.
2684
	 */
2685 2686 2687 2688 2689 2690 2691
	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2692

2693 2694 2695 2696 2697
	/*
	 * If we're switching between Protected Mode and VM86, we need to make
	 * sure to update the mode before loading the segment descriptors so
	 * that the selectors are interpreted correctly.
	 */
2698
	if (ctxt->eflags & X86_EFLAGS_VM) {
2699
		ctxt->mode = X86EMUL_MODE_VM86;
2700 2701
		cpl = 3;
	} else {
2702
		ctxt->mode = X86EMUL_MODE_PROT32;
2703 2704
		cpl = tss->cs & 3;
	}
2705

2706 2707 2708 2709
	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
2710 2711
	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
					cpl, true, NULL);
2712 2713
	if (ret != X86EMUL_CONTINUE)
		return ret;
2714 2715
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
					true, NULL);
2716 2717
	if (ret != X86EMUL_CONTINUE)
		return ret;
2718 2719
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
					true, NULL);
2720 2721
	if (ret != X86EMUL_CONTINUE)
		return ret;
2722 2723
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
					true, NULL);
2724 2725
	if (ret != X86EMUL_CONTINUE)
		return ret;
2726 2727
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
					true, NULL);
2728 2729
	if (ret != X86EMUL_CONTINUE)
		return ret;
2730 2731
	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
					true, NULL);
2732 2733
	if (ret != X86EMUL_CONTINUE)
		return ret;
2734 2735
	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
					true, NULL);
2736 2737 2738 2739 2740 2741 2742 2743 2744 2745
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_32(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
2746
	const struct x86_emulate_ops *ops = ctxt->ops;
2747 2748
	struct tss_segment_32 tss_seg;
	int ret;
2749
	u32 new_tss_base = get_desc_base(new_desc);
2750 2751
	u32 eip_offset = offsetof(struct tss_segment_32, eip);
	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2752

2753
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2754
			    &ctxt->exception);
2755
	if (ret != X86EMUL_CONTINUE)
2756 2757 2758
		/* FIXME: need to provide precise fault address */
		return ret;

2759
	save_state_to_tss32(ctxt, &tss_seg);
2760

2761 2762 2763
	/* Only GP registers and segment selectors are saved */
	ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
			     ldt_sel_offset - eip_offset, &ctxt->exception);
2764
	if (ret != X86EMUL_CONTINUE)
2765 2766 2767
		/* FIXME: need to provide precise fault address */
		return ret;

2768
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2769
			    &ctxt->exception);
2770
	if (ret != X86EMUL_CONTINUE)
2771 2772 2773 2774 2775 2776
		/* FIXME: need to provide precise fault address */
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

2777
		ret = ops->write_std(ctxt, new_tss_base,
2778 2779
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2780
				     &ctxt->exception);
2781
		if (ret != X86EMUL_CONTINUE)
2782 2783 2784 2785
			/* FIXME: need to provide precise fault address */
			return ret;
	}

2786
	return load_state_from_tss32(ctxt, &tss_seg);
2787 2788 2789
}

static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2790
				   u16 tss_selector, int idt_index, int reason,
2791
				   bool has_error_code, u32 error_code)
2792
{
2793
	const struct x86_emulate_ops *ops = ctxt->ops;
2794 2795
	struct desc_struct curr_tss_desc, next_tss_desc;
	int ret;
2796
	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2797
	ulong old_tss_base =
2798
		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2799
	u32 desc_limit;
2800
	ulong desc_addr;
2801 2802 2803

	/* FIXME: old_tss_base == ~0 ? */

2804
	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2805 2806
	if (ret != X86EMUL_CONTINUE)
		return ret;
2807
	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2808 2809 2810 2811 2812
	if (ret != X86EMUL_CONTINUE)
		return ret;

	/* FIXME: check that next_tss_desc is tss */

2813 2814 2815 2816 2817
	/*
	 * Check privileges. The three cases are task switch caused by...
	 *
	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
	 * 2. Exception/IRQ/iret: No check is performed
2818 2819
	 * 3. jmp/call to TSS/task-gate: No check is performed since the
	 *    hardware checks it before exiting.
2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835
	 */
	if (reason == TASK_SWITCH_GATE) {
		if (idt_index != -1) {
			/* Software interrupts */
			struct desc_struct task_gate_desc;
			int dpl;

			ret = read_interrupt_descriptor(ctxt, idt_index,
							&task_gate_desc);
			if (ret != X86EMUL_CONTINUE)
				return ret;

			dpl = task_gate_desc.dpl;
			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
		}
2836 2837
	}

2838 2839 2840 2841
	desc_limit = desc_limit_scaled(&next_tss_desc);
	if (!next_tss_desc.p ||
	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
	     desc_limit < 0x2b)) {
2842
		return emulate_ts(ctxt, tss_selector & 0xfffc);
2843 2844 2845 2846
	}

	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2847
		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2848 2849 2850 2851 2852 2853
	}

	if (reason == TASK_SWITCH_IRET)
		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;

	/* set back link to prev task only if NT bit is set in eflags
G
Guo Chao 已提交
2854
	   note that old_tss_sel is not used after this point */
2855 2856 2857 2858
	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
		old_tss_sel = 0xffff;

	if (next_tss_desc.type & 8)
2859
		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2860 2861
				     old_tss_base, &next_tss_desc);
	else
2862
		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2863
				     old_tss_base, &next_tss_desc);
2864 2865
	if (ret != X86EMUL_CONTINUE)
		return ret;
2866 2867 2868 2869 2870 2871

	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;

	if (reason != TASK_SWITCH_IRET) {
		next_tss_desc.type |= (1 << 1); /* set busy flag */
2872
		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2873 2874
	}

2875
	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
2876
	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2877

2878
	if (has_error_code) {
2879 2880 2881
		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
		ctxt->lock_prefix = 0;
		ctxt->src.val = (unsigned long) error_code;
2882
		ret = em_push(ctxt);
2883 2884
	}

2885 2886 2887 2888
	return ret;
}

int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2889
			 u16 tss_selector, int idt_index, int reason,
2890
			 bool has_error_code, u32 error_code)
2891 2892 2893
{
	int rc;

2894
	invalidate_registers(ctxt);
2895 2896
	ctxt->_eip = ctxt->eip;
	ctxt->dst.type = OP_NONE;
2897

2898
	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2899
				     has_error_code, error_code);
2900

2901
	if (rc == X86EMUL_CONTINUE) {
2902
		ctxt->eip = ctxt->_eip;
2903 2904
		writeback_registers(ctxt);
	}
2905

2906
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2907 2908
}

2909 2910
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
		struct operand *op)
2911
{
2912
	int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2913

2914 2915
	register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
	op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2916 2917
}

2918 2919 2920 2921 2922 2923
static int em_das(struct x86_emulate_ctxt *ctxt)
{
	u8 al, old_al;
	bool af, cf, old_cf;

	cf = ctxt->eflags & X86_EFLAGS_CF;
2924
	al = ctxt->dst.val;
2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941

	old_al = al;
	old_cf = cf;
	cf = false;
	af = ctxt->eflags & X86_EFLAGS_AF;
	if ((al & 0x0f) > 9 || af) {
		al -= 6;
		cf = old_cf | (al >= 250);
		af = true;
	} else {
		af = false;
	}
	if (old_al > 0x99 || old_cf) {
		al -= 0x60;
		cf = true;
	}

2942
	ctxt->dst.val = al;
2943
	/* Set PF, ZF, SF */
2944 2945 2946
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
2947
	fastop(ctxt, em_or);
2948 2949 2950 2951 2952 2953 2954 2955
	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
	if (cf)
		ctxt->eflags |= X86_EFLAGS_CF;
	if (af)
		ctxt->eflags |= X86_EFLAGS_AF;
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977
static int em_aam(struct x86_emulate_ctxt *ctxt)
{
	u8 al, ah;

	if (ctxt->src.val == 0)
		return emulate_de(ctxt);

	al = ctxt->dst.val & 0xff;
	ah = al / ctxt->src.val;
	al %= ctxt->src.val;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);

	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);

	return X86EMUL_CONTINUE;
}

2978 2979 2980 2981 2982 2983 2984 2985 2986
static int em_aad(struct x86_emulate_ctxt *ctxt)
{
	u8 al = ctxt->dst.val & 0xff;
	u8 ah = (ctxt->dst.val >> 8) & 0xff;

	al = (al + (ah * ctxt->src.val)) & 0xff;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;

2987 2988 2989 2990 2991
	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);
2992 2993 2994 2995

	return X86EMUL_CONTINUE;
}

2996 2997
static int em_call(struct x86_emulate_ctxt *ctxt)
{
2998
	int rc;
2999 3000 3001
	long rel = ctxt->src.val;

	ctxt->src.val = (unsigned long)ctxt->_eip;
3002 3003 3004
	rc = jmp_rel(ctxt, rel);
	if (rc != X86EMUL_CONTINUE)
		return rc;
3005 3006 3007
	return em_push(ctxt);
}

3008 3009 3010 3011 3012
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
	u16 sel, old_cs;
	ulong old_eip;
	int rc;
3013 3014 3015
	struct desc_struct old_desc, new_desc;
	const struct x86_emulate_ops *ops = ctxt->ops;
	int cpl = ctxt->ops->cpl(ctxt);
3016

3017
	old_eip = ctxt->_eip;
3018
	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3019

3020
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3021 3022 3023
	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
				       &new_desc);
	if (rc != X86EMUL_CONTINUE)
3024 3025
		return X86EMUL_CONTINUE;

3026
	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3027 3028
	if (rc != X86EMUL_CONTINUE)
		goto fail;
3029

3030
	ctxt->src.val = old_cs;
3031
	rc = em_push(ctxt);
3032
	if (rc != X86EMUL_CONTINUE)
3033
		goto fail;
3034

3035
	ctxt->src.val = old_eip;
3036 3037 3038 3039 3040 3041 3042 3043 3044 3045
	rc = em_push(ctxt);
	/* If we failed, we tainted the memory, but the very least we should
	   restore cs */
	if (rc != X86EMUL_CONTINUE)
		goto fail;
	return rc;
fail:
	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
	return rc;

3046 3047
}

3048 3049 3050
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
	int rc;
3051
	unsigned long eip;
3052

3053 3054 3055 3056
	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	rc = assign_eip_near(ctxt, eip);
3057 3058
	if (rc != X86EMUL_CONTINUE)
		return rc;
3059
	rsp_increment(ctxt, ctxt->src.val);
3060 3061 3062
	return X86EMUL_CONTINUE;
}

3063 3064 3065
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
	/* Write back the register source. */
3066 3067
	ctxt->src.val = ctxt->dst.val;
	write_register_operand(&ctxt->src);
3068 3069

	/* Write back the memory destination with implicit LOCK prefix. */
3070 3071
	ctxt->dst.val = ctxt->src.orig_val;
	ctxt->lock_prefix = 1;
3072 3073 3074
	return X86EMUL_CONTINUE;
}

3075 3076
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
3077
	ctxt->dst.val = ctxt->src2.val;
3078
	return fastop(ctxt, em_imul);
3079 3080
}

3081 3082
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
3083 3084
	ctxt->dst.type = OP_REG;
	ctxt->dst.bytes = ctxt->src.bytes;
3085
	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3086
	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3087 3088 3089 3090

	return X86EMUL_CONTINUE;
}

3091 3092 3093 3094
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
	u64 tsc = 0;

3095
	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3096 3097
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3098 3099 3100
	return X86EMUL_CONTINUE;
}

3101 3102 3103 3104
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
{
	u64 pmc;

3105
	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3106
		return emulate_gp(ctxt, 0);
3107 3108
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3109 3110 3111
	return X86EMUL_CONTINUE;
}

3112 3113
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
3114
	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3115 3116 3117
	return X86EMUL_CONTINUE;
}

B
Borislav Petkov 已提交
3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152
#define FFL(x) bit(X86_FEATURE_##x)

static int em_movbe(struct x86_emulate_ctxt *ctxt)
{
	u32 ebx, ecx, edx, eax = 1;
	u16 tmp;

	/*
	 * Check MOVBE is set in the guest-visible CPUID leaf.
	 */
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	if (!(ecx & FFL(MOVBE)))
		return emulate_ud(ctxt);

	switch (ctxt->op_bytes) {
	case 2:
		/*
		 * From MOVBE definition: "...When the operand size is 16 bits,
		 * the upper word of the destination register remains unchanged
		 * ..."
		 *
		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
		 * rules so we have to do the operation almost per hand.
		 */
		tmp = (u16)ctxt->src.val;
		ctxt->dst.val &= ~0xffffUL;
		ctxt->dst.val |= (unsigned long)swab16(tmp);
		break;
	case 4:
		ctxt->dst.val = swab32((u32)ctxt->src.val);
		break;
	case 8:
		ctxt->dst.val = swab64(ctxt->src.val);
		break;
	default:
3153
		BUG();
B
Borislav Petkov 已提交
3154 3155 3156 3157
	}
	return X86EMUL_CONTINUE;
}

3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

static int em_dr_write(struct x86_emulate_ctxt *ctxt)
{
	unsigned long val;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		val = ctxt->src.val & ~0ULL;
	else
		val = ctxt->src.val & ~0U;

	/* #UD condition is already handled. */
	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3186 3187 3188 3189
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

3190 3191 3192
	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3193 3194 3195 3196 3197 3198 3199 3200 3201
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

3202
	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3203 3204
		return emulate_gp(ctxt, 0);

3205 3206
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3207 3208 3209
	return X86EMUL_CONTINUE;
}

3210 3211
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
3212
	if (ctxt->modrm_reg > VCPU_SREG_GS)
3213 3214
		return emulate_ud(ctxt);

3215
	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3216 3217
	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
		ctxt->dst.bytes = 2;
3218 3219 3220 3221 3222
	return X86EMUL_CONTINUE;
}

static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
3223
	u16 sel = ctxt->src.val;
3224

3225
	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3226 3227
		return emulate_ud(ctxt);

3228
	if (ctxt->modrm_reg == VCPU_SREG_SS)
3229 3230 3231
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;

	/* Disable writeback. */
3232 3233
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3234 3235
}

A
Avi Kivity 已提交
3236 3237 3238 3239 3240 3241 3242 3243 3244
static int em_lldt(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
}

A
Avi Kivity 已提交
3245 3246 3247 3248 3249 3250 3251 3252 3253
static int em_ltr(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
}

3254 3255
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
3256 3257 3258
	int rc;
	ulong linear;

3259
	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3260
	if (rc == X86EMUL_CONTINUE)
3261
		ctxt->ops->invlpg(ctxt, linear);
3262
	/* Disable writeback. */
3263
	ctxt->dst.type = OP_NONE;
3264 3265 3266
	return X86EMUL_CONTINUE;
}

3267 3268 3269 3270 3271 3272 3273 3274 3275 3276
static int em_clts(struct x86_emulate_ctxt *ctxt)
{
	ulong cr0;

	cr0 = ctxt->ops->get_cr(ctxt, 0);
	cr0 &= ~X86_CR0_TS;
	ctxt->ops->set_cr(ctxt, 0, cr0);
	return X86EMUL_CONTINUE;
}

3277 3278
static int em_vmcall(struct x86_emulate_ctxt *ctxt)
{
3279
	int rc = ctxt->ops->fix_hypercall(ctxt);
3280 3281 3282 3283 3284

	if (rc != X86EMUL_CONTINUE)
		return rc;

	/* Let the processor re-execute the fixed hypercall */
3285
	ctxt->_eip = ctxt->eip;
3286
	/* Disable writeback. */
3287
	ctxt->dst.type = OP_NONE;
3288 3289 3290
	return X86EMUL_CONTINUE;
}

3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
				  void (*get)(struct x86_emulate_ctxt *ctxt,
					      struct desc_ptr *ptr))
{
	struct desc_ptr desc_ptr;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
	get(ctxt, &desc_ptr);
	if (ctxt->op_bytes == 2) {
		ctxt->op_bytes = 4;
		desc_ptr.address &= 0x00ffffff;
	}
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return segmented_write(ctxt, ctxt->dst.addr.mem,
			       &desc_ptr, 2 + ctxt->op_bytes);
}

static int em_sgdt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
}

static int em_sidt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}

3320
static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3321 3322 3323 3324
{
	struct desc_ptr desc_ptr;
	int rc;

3325 3326
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
3327
	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3328
			     &desc_ptr.size, &desc_ptr.address,
3329
			     ctxt->op_bytes);
3330 3331
	if (rc != X86EMUL_CONTINUE)
		return rc;
3332 3333 3334
	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
	    is_noncanonical_address(desc_ptr.address))
		return emulate_gp(ctxt, 0);
3335 3336 3337 3338
	if (lgdt)
		ctxt->ops->set_gdt(ctxt, &desc_ptr);
	else
		ctxt->ops->set_idt(ctxt, &desc_ptr);
3339
	/* Disable writeback. */
3340
	ctxt->dst.type = OP_NONE;
3341 3342 3343
	return X86EMUL_CONTINUE;
}

3344 3345 3346 3347 3348
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
	return em_lgdt_lidt(ctxt, true);
}

3349
static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3350 3351 3352
{
	int rc;

3353 3354
	rc = ctxt->ops->fix_hypercall(ctxt);

3355
	/* Disable writeback. */
3356
	ctxt->dst.type = OP_NONE;
3357 3358 3359 3360 3361
	return rc;
}

static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
3362
	return em_lgdt_lidt(ctxt, false);
3363 3364 3365 3366
}

static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
3367 3368
	if (ctxt->dst.type == OP_MEM)
		ctxt->dst.bytes = 2;
3369
	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3370 3371 3372 3373 3374 3375
	return X86EMUL_CONTINUE;
}

static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3376 3377
			  | (ctxt->src.val & 0x0f));
	ctxt->dst.type = OP_NONE;
3378 3379 3380
	return X86EMUL_CONTINUE;
}

3381 3382
static int em_loop(struct x86_emulate_ctxt *ctxt)
{
3383 3384
	int rc = X86EMUL_CONTINUE;

3385 3386
	register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3387
	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3388
		rc = jmp_rel(ctxt, ctxt->src.val);
3389

3390
	return rc;
3391 3392 3393 3394
}

static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{
3395 3396
	int rc = X86EMUL_CONTINUE;

3397
	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3398
		rc = jmp_rel(ctxt, ctxt->src.val);
3399

3400
	return rc;
3401 3402
}

3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420
static int em_in(struct x86_emulate_ctxt *ctxt)
{
	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
			     &ctxt->dst.val))
		return X86EMUL_IO_NEEDED;

	return X86EMUL_CONTINUE;
}

static int em_out(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
				    &ctxt->src.val, 1);
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->eflags &= ~X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

static int em_sti(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
	ctxt->eflags |= X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3440 3441 3442 3443
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

3444 3445
	eax = reg_read(ctxt, VCPU_REGS_RAX);
	ecx = reg_read(ctxt, VCPU_REGS_RCX);
A
Avi Kivity 已提交
3446
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3447 3448 3449 3450
	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
A
Avi Kivity 已提交
3451 3452 3453
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465
static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
	u32 flags;

	flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;

	ctxt->eflags &= ~0xffUL;
	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3466 3467
static int em_lahf(struct x86_emulate_ctxt *ctxt)
{
3468 3469
	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
A
Avi Kivity 已提交
3470 3471 3472
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487
static int em_bswap(struct x86_emulate_ctxt *ctxt)
{
	switch (ctxt->op_bytes) {
#ifdef CONFIG_X86_64
	case 8:
		asm("bswap %0" : "+r"(ctxt->dst.val));
		break;
#endif
	default:
		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
		break;
	}
	return X86EMUL_CONTINUE;
}

3488 3489 3490 3491 3492 3493
static int em_clflush(struct x86_emulate_ctxt *ctxt)
{
	/* emulating clflush regardless of cpuid */
	return X86EMUL_CONTINUE;
}

3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507
static bool valid_cr(int nr)
{
	switch (nr) {
	case 0:
	case 2 ... 4:
	case 8:
		return true;
	default:
		return false;
	}
}

static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
3508
	if (!valid_cr(ctxt->modrm_reg))
3509 3510 3511 3512 3513 3514 3515
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
3516 3517
	u64 new_val = ctxt->src.val64;
	int cr = ctxt->modrm_reg;
3518
	u64 efer = 0;
3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535

	static u64 cr_reserved_bits[] = {
		0xffffffff00000000ULL,
		0, 0, 0, /* CR3 checked later */
		CR4_RESERVED_BITS,
		0, 0, 0,
		CR8_RESERVED_BITS,
	};

	if (!valid_cr(cr))
		return emulate_ud(ctxt);

	if (new_val & cr_reserved_bits[cr])
		return emulate_gp(ctxt, 0);

	switch (cr) {
	case 0: {
3536
		u64 cr4;
3537 3538 3539 3540
		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
			return emulate_gp(ctxt, 0);

3541 3542
		cr4 = ctxt->ops->get_cr(ctxt, 4);
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3543 3544 3545 3546 3547 3548 3549 3550 3551 3552

		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
		    !(cr4 & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	case 3: {
		u64 rsvd = 0;

3553 3554
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
		if (efer & EFER_LMA)
N
Nadav Amit 已提交
3555
			rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3556 3557 3558 3559 3560 3561 3562

		if (new_val & rsvd)
			return emulate_gp(ctxt, 0);

		break;
		}
	case 4: {
3563
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574

		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	}

	return X86EMUL_CONTINUE;
}

3575 3576 3577 3578
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
	unsigned long dr7;

3579
	ctxt->ops->get_dr(ctxt, 7, &dr7);
3580 3581 3582 3583 3584 3585 3586

	/* Check if DR7.Global_Enable is set */
	return dr7 & (1 << 13);
}

static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
3587
	int dr = ctxt->modrm_reg;
3588 3589 3590 3591 3592
	u64 cr4;

	if (dr > 7)
		return emulate_ud(ctxt);

3593
	cr4 = ctxt->ops->get_cr(ctxt, 4);
3594 3595 3596
	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
		return emulate_ud(ctxt);

3597 3598 3599 3600 3601 3602 3603
	if (check_dr7_gd(ctxt)) {
		ulong dr6;

		ctxt->ops->get_dr(ctxt, 6, &dr6);
		dr6 &= ~15;
		dr6 |= DR6_BD | DR6_RTM;
		ctxt->ops->set_dr(ctxt, 6, dr6);
3604
		return emulate_db(ctxt);
3605
	}
3606 3607 3608 3609 3610 3611

	return X86EMUL_CONTINUE;
}

static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
3612 3613
	u64 new_val = ctxt->src.val64;
	int dr = ctxt->modrm_reg;
3614 3615 3616 3617 3618 3619 3620

	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
		return emulate_gp(ctxt, 0);

	return check_dr_read(ctxt);
}

3621 3622 3623 3624
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
	u64 efer;

3625
	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3626 3627 3628 3629 3630 3631 3632 3633 3634

	if (!(efer & EFER_SVME))
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
3635
	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3636 3637

	/* Valid physical address? */
3638
	if (rax & 0xffff000000000000ULL)
3639 3640 3641 3642 3643
		return emulate_gp(ctxt, 0);

	return check_svme(ctxt);
}

3644 3645
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{
3646
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3647

3648
	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3649 3650 3651 3652 3653
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

3654 3655
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
3656
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3657
	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3658

3659
	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3660
	    ctxt->ops->check_pmc(ctxt, rcx))
3661 3662 3663 3664 3665
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

3666 3667
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
3668 3669
	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3670 3671 3672 3673 3674 3675 3676
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
3677 3678
	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3679 3680 3681 3682 3683
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

3684
#define D(_y) { .flags = (_y) }
3685 3686 3687
#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
3688
#define N    D(NotImpl)
3689
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3690 3691
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3692
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3693
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3694
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3695
#define II(_f, _e, _i) \
3696
	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3697
#define IIP(_f, _e, _i, _p) \
3698 3699
	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
3700
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3701

3702
#define D2bv(_f)      D((_f) | ByteOp), D(_f)
3703
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3704
#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
3705
#define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
3706 3707
#define I2bvIP(_f, _e, _i, _p) \
	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3708

3709 3710 3711
#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3712

3713 3714 3715 3716 3717 3718
static const struct opcode group7_rm0[] = {
	N,
	I(SrcNone | Priv | EmulateOnUD,	em_vmcall),
	N, N, N, N, N, N,
};

3719
static const struct opcode group7_rm1[] = {
3720 3721
	DI(SrcNone | Priv, monitor),
	DI(SrcNone | Priv, mwait),
3722 3723 3724
	N, N, N, N, N, N,
};

3725
static const struct opcode group7_rm3[] = {
3726
	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
3727
	II(SrcNone  | Prot | EmulateOnUD,	em_vmmcall,	vmmcall),
3728 3729 3730 3731 3732 3733
	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
3734
};
3735

3736
static const struct opcode group7_rm7[] = {
3737
	N,
3738
	DIP(SrcNone, rdtscp, check_rdtsc),
3739 3740
	N, N, N, N, N, N,
};
3741

3742
static const struct opcode group1[] = {
3743 3744 3745 3746 3747 3748 3749 3750
	F(Lock, em_add),
	F(Lock | PageTable, em_or),
	F(Lock, em_adc),
	F(Lock, em_sbb),
	F(Lock | PageTable, em_and),
	F(Lock, em_sub),
	F(Lock, em_xor),
	F(NoWrite, em_cmp),
3751 3752
};

3753
static const struct opcode group1A[] = {
3754
	I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3755 3756
};

3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767
static const struct opcode group2[] = {
	F(DstMem | ModRM, em_rol),
	F(DstMem | ModRM, em_ror),
	F(DstMem | ModRM, em_rcl),
	F(DstMem | ModRM, em_rcr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_shr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_sar),
};

3768
static const struct opcode group3[] = {
3769 3770
	F(DstMem | SrcImm | NoWrite, em_test),
	F(DstMem | SrcImm | NoWrite, em_test),
3771 3772
	F(DstMem | SrcNone | Lock, em_not),
	F(DstMem | SrcNone | Lock, em_neg),
3773 3774
	F(DstXacc | Src2Mem, em_mul_ex),
	F(DstXacc | Src2Mem, em_imul_ex),
3775 3776
	F(DstXacc | Src2Mem, em_div_ex),
	F(DstXacc | Src2Mem, em_idiv_ex),
3777 3778
};

3779
static const struct opcode group4[] = {
3780 3781
	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3782 3783 3784
	N, N, N, N, N, N,
};

3785
static const struct opcode group5[] = {
3786 3787
	F(DstMem | SrcNone | Lock,		em_inc),
	F(DstMem | SrcNone | Lock,		em_dec),
3788
	I(SrcMem | NearBranch,			em_call_near_abs),
3789
	I(SrcMemFAddr | ImplicitOps | Stack,	em_call_far),
3790
	I(SrcMem | NearBranch,			em_jmp_abs),
3791 3792
	I(SrcMemFAddr | ImplicitOps,		em_jmp_far),
	I(SrcMem | Stack,			em_push), D(Undefined),
3793 3794
};

3795
static const struct opcode group6[] = {
3796 3797
	DI(Prot,	sldt),
	DI(Prot,	str),
A
Avi Kivity 已提交
3798
	II(Prot | Priv | SrcMem16, em_lldt, lldt),
A
Avi Kivity 已提交
3799
	II(Prot | Priv | SrcMem16, em_ltr, ltr),
3800 3801 3802
	N, N, N, N,
};

3803
static const struct group_dual group7 = { {
3804 3805
	II(Mov | DstMem,			em_sgdt, sgdt),
	II(Mov | DstMem,			em_sidt, sidt),
3806 3807 3808 3809 3810
	II(SrcMem | Priv,			em_lgdt, lgdt),
	II(SrcMem | Priv,			em_lidt, lidt),
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
3811
}, {
3812
	EXT(0, group7_rm0),
3813
	EXT(0, group7_rm1),
3814
	N, EXT(0, group7_rm3),
3815 3816 3817
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	EXT(0, group7_rm7),
3818 3819
} };

3820
static const struct opcode group8[] = {
3821
	N, N, N, N,
3822 3823 3824 3825
	F(DstMem | SrcImmByte | NoWrite,		em_bt),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
	F(DstMem | SrcImmByte | Lock,			em_btr),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
3826 3827
};

3828
static const struct group_dual group9 = { {
3829
	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3830 3831 3832 3833
}, {
	N, N, N, N, N, N, N, N,
} };

3834
static const struct opcode group11[] = {
3835
	I(DstMem | SrcImm | Mov | PageTable, em_mov),
3836
	X7(D(Undefined)),
3837 3838
};

3839
static const struct gprefix pfx_0f_ae_7 = {
3840
	I(SrcMem | ByteOp, em_clflush), N, N, N,
3841 3842 3843 3844 3845 3846 3847 3848
};

static const struct group_dual group15 = { {
	N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
}, {
	N, N, N, N, N, N, N, N,
} };

3849
static const struct gprefix pfx_0f_6f_0f_7f = {
3850
	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3851 3852
};

3853 3854
static const struct gprefix pfx_0f_2b = {
	I(0, em_mov), I(0, em_mov), N, N,
3855 3856
};

3857
static const struct gprefix pfx_0f_28_0f_29 = {
3858
	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3859 3860
};

3861 3862 3863 3864
static const struct gprefix pfx_0f_e7 = {
	N, I(Sse, em_mov), N, N,
};

3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927
static const struct escape escape_d9 = { {
	N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_db = { {
	N, N, N, N, N, N, N, N,
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_dd = { {
	N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

3928
static const struct opcode opcode_table[256] = {
3929
	/* 0x00 - 0x07 */
3930
	F6ALU(Lock, em_add),
3931 3932
	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3933
	/* 0x08 - 0x0F */
3934
	F6ALU(Lock | PageTable, em_or),
3935 3936
	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
	N,
3937
	/* 0x10 - 0x17 */
3938
	F6ALU(Lock, em_adc),
3939 3940
	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3941
	/* 0x18 - 0x1F */
3942
	F6ALU(Lock, em_sbb),
3943 3944
	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3945
	/* 0x20 - 0x27 */
3946
	F6ALU(Lock | PageTable, em_and), N, N,
3947
	/* 0x28 - 0x2F */
3948
	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3949
	/* 0x30 - 0x37 */
3950
	F6ALU(Lock, em_xor), N, N,
3951
	/* 0x38 - 0x3F */
3952
	F6ALU(NoWrite, em_cmp), N, N,
3953
	/* 0x40 - 0x4F */
3954
	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3955
	/* 0x50 - 0x57 */
3956
	X8(I(SrcReg | Stack, em_push)),
3957
	/* 0x58 - 0x5F */
3958
	X8(I(DstReg | Stack, em_pop)),
3959
	/* 0x60 - 0x67 */
3960 3961
	I(ImplicitOps | Stack | No64, em_pusha),
	I(ImplicitOps | Stack | No64, em_popa),
3962 3963 3964
	N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
	N, N, N, N,
	/* 0x68 - 0x6F */
3965 3966
	I(SrcImm | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3967 3968
	I(SrcImmByte | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3969
	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3970
	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3971
	/* 0x70 - 0x7F */
3972
	X16(D(SrcImmByte | NearBranch)),
3973
	/* 0x80 - 0x87 */
3974 3975 3976 3977
	G(ByteOp | DstMem | SrcImm, group1),
	G(DstMem | SrcImm, group1),
	G(ByteOp | DstMem | SrcImm | No64, group1),
	G(DstMem | SrcImmByte, group1),
3978
	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3979
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3980
	/* 0x88 - 0x8F */
3981
	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3982
	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3983
	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3984 3985 3986
	D(ModRM | SrcMem | NoAccess | DstReg),
	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
	G(0, group1A),
3987
	/* 0x90 - 0x97 */
3988
	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3989
	/* 0x98 - 0x9F */
3990
	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3991
	I(SrcImmFAddr | No64, em_call_far), N,
3992
	II(ImplicitOps | Stack, em_pushf, pushf),
P
Paolo Bonzini 已提交
3993 3994
	II(ImplicitOps | Stack, em_popf, popf),
	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3995
	/* 0xA0 - 0xA7 */
3996
	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3997
	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3998
	I2bv(SrcSI | DstDI | Mov | String, em_mov),
3999
	F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
4000
	/* 0xA8 - 0xAF */
4001
	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4002 4003
	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4004
	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4005
	/* 0xB0 - 0xB7 */
4006
	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4007
	/* 0xB8 - 0xBF */
4008
	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4009
	/* 0xC0 - 0xC7 */
4010
	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4011 4012
	I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
	I(ImplicitOps | NearBranch, em_ret),
4013 4014
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4015
	G(ByteOp, group11), G(0, group11),
4016
	/* 0xC8 - 0xCF */
A
Avi Kivity 已提交
4017
	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4018 4019
	I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
	I(ImplicitOps | Stack, em_ret_far),
4020
	D(ImplicitOps), DI(SrcImmByte, intn),
4021
	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4022
	/* 0xD0 - 0xD7 */
4023 4024
	G(Src2One | ByteOp, group2), G(Src2One, group2),
	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
P
Paolo Bonzini 已提交
4025
	I(DstAcc | SrcImmUByte | No64, em_aam),
P
Paolo Bonzini 已提交
4026 4027
	I(DstAcc | SrcImmUByte | No64, em_aad),
	F(DstAcc | ByteOp | No64, em_salc),
P
Paolo Bonzini 已提交
4028
	I(DstAcc | SrcXLat | ByteOp, em_mov),
4029
	/* 0xD8 - 0xDF */
4030
	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4031
	/* 0xE0 - 0xE7 */
4032 4033
	X3(I(SrcImmByte | NearBranch, em_loop)),
	I(SrcImmByte | NearBranch, em_jcxz),
4034 4035
	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4036
	/* 0xE8 - 0xEF */
4037 4038 4039
	I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
	I(SrcImmFAddr | No64, em_jmp_far),
	D(SrcImmByte | ImplicitOps | NearBranch),
4040 4041
	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4042
	/* 0xF0 - 0xF7 */
4043
	N, DI(ImplicitOps, icebp), N, N,
4044 4045
	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
	G(ByteOp, group3), G(0, group3),
4046
	/* 0xF8 - 0xFF */
4047 4048
	D(ImplicitOps), D(ImplicitOps),
	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4049 4050 4051
	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};

4052
static const struct opcode twobyte_table[256] = {
4053
	/* 0x00 - 0x0F */
4054
	G(0, group6), GD(0, &group7), N, N,
4055
	N, I(ImplicitOps | EmulateOnUD, em_syscall),
4056
	II(ImplicitOps | Priv, em_clts, clts), N,
4057
	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4058
	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4059
	/* 0x10 - 0x1F */
P
Paolo Bonzini 已提交
4060
	N, N, N, N, N, N, N, N,
4061 4062
	D(ImplicitOps | ModRM | SrcMem | NoAccess),
	N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4063
	/* 0x20 - 0x2F */
4064 4065 4066 4067 4068 4069
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
						check_cr_write),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
						check_dr_write),
4070
	N, N, N, N,
4071 4072
	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4073
	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4074
	N, N, N, N,
4075
	/* 0x30 - 0x3F */
4076
	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4077
	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4078
	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4079
	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4080 4081
	I(ImplicitOps | EmulateOnUD, em_sysenter),
	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4082
	N, N,
4083 4084
	N, N, N, N, N, N, N, N,
	/* 0x40 - 0x4F */
4085
	X16(D(DstReg | SrcMem | ModRM)),
4086 4087 4088
	/* 0x50 - 0x5F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x60 - 0x6F */
4089 4090 4091 4092
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4093
	/* 0x70 - 0x7F */
4094 4095 4096 4097
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4098
	/* 0x80 - 0x8F */
4099
	X16(D(SrcImm | NearBranch)),
4100
	/* 0x90 - 0x9F */
4101
	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4102
	/* 0xA0 - 0xA7 */
4103
	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4104 4105
	II(ImplicitOps, em_cpuid, cpuid),
	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4106 4107
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4108
	/* 0xA8 - 0xAF */
4109
	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4110
	DI(ImplicitOps, rsm),
4111
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4112 4113
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4114
	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4115
	/* 0xB0 - 0xB7 */
4116
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4117
	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4118
	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4119 4120
	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4121
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4122 4123
	/* 0xB8 - 0xBF */
	N, N,
4124
	G(BitOp, group8),
4125 4126
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
	F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4127
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
A
Avi Kivity 已提交
4128
	/* 0xC0 - 0xC7 */
4129
	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4130
	N, I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov),
4131
	N, N, N, GD(0, &group9),
A
Avi Kivity 已提交
4132 4133
	/* 0xC8 - 0xCF */
	X8(I(DstReg, em_bswap)),
4134 4135 4136
	/* 0xD0 - 0xDF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xEF */
4137 4138
	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
	N, N, N, N, N, N, N, N,
4139 4140 4141 4142
	/* 0xF0 - 0xFF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};

4143
static const struct gprefix three_byte_0f_38_f0 = {
B
Borislav Petkov 已提交
4144
	I(DstReg | SrcMem | Mov, em_movbe), N, N, N
4145 4146 4147
};

static const struct gprefix three_byte_0f_38_f1 = {
B
Borislav Petkov 已提交
4148
	I(DstMem | SrcReg | Mov, em_movbe), N, N, N
4149 4150 4151 4152 4153 4154 4155 4156 4157
};

/*
 * Insns below are selected by the prefix which indexed by the third opcode
 * byte.
 */
static const struct opcode opcode_map_0f_38[256] = {
	/* 0x00 - 0x7f */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
B
Borislav Petkov 已提交
4158 4159 4160 4161 4162 4163 4164
	/* 0x80 - 0xef */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
	/* 0xf0 - 0xf1 */
	GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
	GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
	/* 0xf2 - 0xff */
	N, N, X4(N), X8(N)
4165 4166
};

4167 4168 4169 4170 4171
#undef D
#undef N
#undef G
#undef GD
#undef I
4172
#undef GP
4173
#undef EXT
4174

4175
#undef D2bv
4176
#undef D2bvIP
4177
#undef I2bv
4178
#undef I2bvIP
4179
#undef I6ALU
4180

4181
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4182 4183 4184
{
	unsigned size;

4185
	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197
	if (size == 8)
		size = 4;
	return size;
}

static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
		      unsigned size, bool sign_extension)
{
	int rc = X86EMUL_CONTINUE;

	op->type = OP_IMM;
	op->bytes = size;
4198
	op->addr.mem.ea = ctxt->_eip;
4199 4200 4201
	/* NB. Immediates are sign-extended as necessary. */
	switch (op->bytes) {
	case 1:
4202
		op->val = insn_fetch(s8, ctxt);
4203 4204
		break;
	case 2:
4205
		op->val = insn_fetch(s16, ctxt);
4206 4207
		break;
	case 4:
4208
		op->val = insn_fetch(s32, ctxt);
4209
		break;
4210 4211 4212
	case 8:
		op->val = insn_fetch(s64, ctxt);
		break;
4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230
	}
	if (!sign_extension) {
		switch (op->bytes) {
		case 1:
			op->val &= 0xff;
			break;
		case 2:
			op->val &= 0xffff;
			break;
		case 4:
			op->val &= 0xffffffff;
			break;
		}
	}
done:
	return rc;
}

4231 4232 4233 4234 4235 4236 4237
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
			  unsigned d)
{
	int rc = X86EMUL_CONTINUE;

	switch (d) {
	case OpReg:
4238
		decode_register_operand(ctxt, op);
4239 4240
		break;
	case OpImmUByte:
4241
		rc = decode_imm(ctxt, op, 1, false);
4242 4243
		break;
	case OpMem:
4244
		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4245 4246 4247
	mem_common:
		*op = ctxt->memop;
		ctxt->memopp = op;
4248
		if (ctxt->d & BitOp)
4249 4250 4251
			fetch_bit_operand(ctxt);
		op->orig_val = op->val;
		break;
4252
	case OpMem64:
4253
		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4254
		goto mem_common;
4255 4256 4257
	case OpAcc:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4258
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4259 4260 4261
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279
	case OpAccLo:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
	case OpAccHi:
		if (ctxt->d & ByteOp) {
			op->type = OP_NONE;
			break;
		}
		op->type = OP_REG;
		op->bytes = ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
4280 4281 4282 4283
	case OpDI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4284
			register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4285 4286
		op->addr.mem.seg = VCPU_SREG_ES;
		op->val = 0;
4287
		op->count = 1;
4288 4289 4290 4291
		break;
	case OpDX:
		op->type = OP_REG;
		op->bytes = 2;
4292
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4293 4294
		fetch_register_operand(op);
		break;
4295 4296
	case OpCL:
		op->bytes = 1;
4297
		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308
		break;
	case OpImmByte:
		rc = decode_imm(ctxt, op, 1, true);
		break;
	case OpOne:
		op->bytes = 1;
		op->val = 1;
		break;
	case OpImm:
		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
		break;
4309 4310 4311
	case OpImm64:
		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
		break;
4312 4313
	case OpMem8:
		ctxt->memop.bytes = 1;
4314
		if (ctxt->memop.type == OP_REG) {
4315 4316
			ctxt->memop.addr.reg = decode_register(ctxt,
					ctxt->modrm_rm, true);
4317 4318
			fetch_register_operand(&ctxt->memop);
		}
4319
		goto mem_common;
4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335
	case OpMem16:
		ctxt->memop.bytes = 2;
		goto mem_common;
	case OpMem32:
		ctxt->memop.bytes = 4;
		goto mem_common;
	case OpImmU16:
		rc = decode_imm(ctxt, op, 2, false);
		break;
	case OpImmU:
		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
		break;
	case OpSI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4336
			register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
B
Bandan Das 已提交
4337
		op->addr.mem.seg = ctxt->seg_override;
4338
		op->val = 0;
4339
		op->count = 1;
4340
		break;
P
Paolo Bonzini 已提交
4341 4342 4343 4344 4345 4346 4347
	case OpXLat:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
			register_address(ctxt,
				reg_read(ctxt, VCPU_REGS_RBX) +
				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
B
Bandan Das 已提交
4348
		op->addr.mem.seg = ctxt->seg_override;
P
Paolo Bonzini 已提交
4349 4350
		op->val = 0;
		break;
4351 4352 4353 4354 4355 4356 4357 4358 4359
	case OpImmFAddr:
		op->type = OP_IMM;
		op->addr.mem.ea = ctxt->_eip;
		op->bytes = ctxt->op_bytes + 2;
		insn_fetch_arr(op->valptr, op->bytes, ctxt);
		break;
	case OpMemFAddr:
		ctxt->memop.bytes = ctxt->op_bytes + 2;
		goto mem_common;
4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377
	case OpES:
		op->val = VCPU_SREG_ES;
		break;
	case OpCS:
		op->val = VCPU_SREG_CS;
		break;
	case OpSS:
		op->val = VCPU_SREG_SS;
		break;
	case OpDS:
		op->val = VCPU_SREG_DS;
		break;
	case OpFS:
		op->val = VCPU_SREG_FS;
		break;
	case OpGS:
		op->val = VCPU_SREG_GS;
		break;
4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388
	case OpImplicit:
		/* Special instructions do their own operand decoding. */
	default:
		op->type = OP_NONE; /* Disable writeback. */
		break;
	}

done:
	return rc;
}

4389
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4390 4391 4392
{
	int rc = X86EMUL_CONTINUE;
	int mode = ctxt->mode;
4393
	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4394
	bool op_prefix = false;
B
Bandan Das 已提交
4395
	bool has_seg_override = false;
4396
	struct opcode opcode;
4397

4398 4399
	ctxt->memop.type = OP_NONE;
	ctxt->memopp = NULL;
4400
	ctxt->_eip = ctxt->eip;
4401 4402
	ctxt->fetch.ptr = ctxt->fetch.data;
	ctxt->fetch.end = ctxt->fetch.data + insn_len;
B
Borislav Petkov 已提交
4403
	ctxt->opcode_len = 1;
4404
	if (insn_len > 0)
4405
		memcpy(ctxt->fetch.data, insn, insn_len);
4406
	else {
4407
		rc = __do_insn_fetch_bytes(ctxt, 1);
4408 4409 4410
		if (rc != X86EMUL_CONTINUE)
			return rc;
	}
4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427

	switch (mode) {
	case X86EMUL_MODE_REAL:
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
		def_op_bytes = def_ad_bytes = 2;
		break;
	case X86EMUL_MODE_PROT32:
		def_op_bytes = def_ad_bytes = 4;
		break;
#ifdef CONFIG_X86_64
	case X86EMUL_MODE_PROT64:
		def_op_bytes = 4;
		def_ad_bytes = 8;
		break;
#endif
	default:
4428
		return EMULATION_FAILED;
4429 4430
	}

4431 4432
	ctxt->op_bytes = def_op_bytes;
	ctxt->ad_bytes = def_ad_bytes;
4433 4434 4435

	/* Legacy prefixes. */
	for (;;) {
4436
		switch (ctxt->b = insn_fetch(u8, ctxt)) {
4437
		case 0x66:	/* operand-size override */
4438
			op_prefix = true;
4439
			/* switch between 2/4 bytes */
4440
			ctxt->op_bytes = def_op_bytes ^ 6;
4441 4442 4443 4444
			break;
		case 0x67:	/* address-size override */
			if (mode == X86EMUL_MODE_PROT64)
				/* switch between 4/8 bytes */
4445
				ctxt->ad_bytes = def_ad_bytes ^ 12;
4446 4447
			else
				/* switch between 2/4 bytes */
4448
				ctxt->ad_bytes = def_ad_bytes ^ 6;
4449 4450 4451 4452 4453
			break;
		case 0x26:	/* ES override */
		case 0x2e:	/* CS override */
		case 0x36:	/* SS override */
		case 0x3e:	/* DS override */
B
Bandan Das 已提交
4454 4455
			has_seg_override = true;
			ctxt->seg_override = (ctxt->b >> 3) & 3;
4456 4457 4458
			break;
		case 0x64:	/* FS override */
		case 0x65:	/* GS override */
B
Bandan Das 已提交
4459 4460
			has_seg_override = true;
			ctxt->seg_override = ctxt->b & 7;
4461 4462 4463 4464
			break;
		case 0x40 ... 0x4f: /* REX */
			if (mode != X86EMUL_MODE_PROT64)
				goto done_prefixes;
4465
			ctxt->rex_prefix = ctxt->b;
4466 4467
			continue;
		case 0xf0:	/* LOCK */
4468
			ctxt->lock_prefix = 1;
4469 4470 4471
			break;
		case 0xf2:	/* REPNE/REPNZ */
		case 0xf3:	/* REP/REPE/REPZ */
4472
			ctxt->rep_prefix = ctxt->b;
4473 4474 4475 4476 4477 4478 4479
			break;
		default:
			goto done_prefixes;
		}

		/* Any legacy prefix after a REX prefix nullifies its effect. */

4480
		ctxt->rex_prefix = 0;
4481 4482 4483 4484 4485
	}

done_prefixes:

	/* REX prefix. */
4486 4487
	if (ctxt->rex_prefix & 8)
		ctxt->op_bytes = 8;	/* REX.W */
4488 4489

	/* Opcode byte(s). */
4490
	opcode = opcode_table[ctxt->b];
4491
	/* Two-byte opcode? */
4492
	if (ctxt->b == 0x0f) {
B
Borislav Petkov 已提交
4493
		ctxt->opcode_len = 2;
4494
		ctxt->b = insn_fetch(u8, ctxt);
4495
		opcode = twobyte_table[ctxt->b];
4496 4497 4498 4499 4500 4501 4502

		/* 0F_38 opcode map */
		if (ctxt->b == 0x38) {
			ctxt->opcode_len = 3;
			ctxt->b = insn_fetch(u8, ctxt);
			opcode = opcode_map_0f_38[ctxt->b];
		}
4503
	}
4504
	ctxt->d = opcode.flags;
4505

4506 4507 4508
	if (ctxt->d & ModRM)
		ctxt->modrm = insn_fetch(u8, ctxt);

4509 4510
	/* vex-prefix instructions are not implemented */
	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4511
	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4512 4513 4514
		ctxt->d = NotImpl;
	}

4515 4516
	while (ctxt->d & GroupMask) {
		switch (ctxt->d & GroupMask) {
4517
		case Group:
4518
			goffset = (ctxt->modrm >> 3) & 7;
4519 4520 4521
			opcode = opcode.u.group[goffset];
			break;
		case GroupDual:
4522 4523
			goffset = (ctxt->modrm >> 3) & 7;
			if ((ctxt->modrm >> 6) == 3)
4524 4525 4526 4527 4528
				opcode = opcode.u.gdual->mod3[goffset];
			else
				opcode = opcode.u.gdual->mod012[goffset];
			break;
		case RMExt:
4529
			goffset = ctxt->modrm & 7;
4530
			opcode = opcode.u.group[goffset];
4531 4532
			break;
		case Prefix:
4533
			if (ctxt->rep_prefix && op_prefix)
4534
				return EMULATION_FAILED;
4535
			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4536 4537 4538 4539 4540 4541 4542
			switch (simd_prefix) {
			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
			}
			break;
4543 4544 4545 4546 4547 4548
		case Escape:
			if (ctxt->modrm > 0xbf)
				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
			else
				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
			break;
4549
		default:
4550
			return EMULATION_FAILED;
4551
		}
4552

4553
		ctxt->d &= ~(u64)GroupMask;
4554
		ctxt->d |= opcode.flags;
4555 4556
	}

4557 4558 4559 4560
	/* Unrecognised? */
	if (ctxt->d == 0)
		return EMULATION_FAILED;

4561
	ctxt->execute = opcode.u.execute;
4562

4563 4564 4565
	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
		return EMULATION_FAILED;

4566
	if (unlikely(ctxt->d &
4567 4568
	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
	     No16))) {
4569 4570 4571 4572 4573 4574
		/*
		 * These are copied unconditionally here, and checked unconditionally
		 * in x86_emulate_insn.
		 */
		ctxt->check_perm = opcode.check_perm;
		ctxt->intercept = opcode.intercept;
4575

4576 4577
		if (ctxt->d & NotImpl)
			return EMULATION_FAILED;
4578

4579 4580 4581 4582 4583 4584
		if (mode == X86EMUL_MODE_PROT64) {
			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
				ctxt->op_bytes = 8;
			else if (ctxt->d & NearBranch)
				ctxt->op_bytes = 8;
		}
4585

4586 4587 4588 4589 4590 4591 4592
		if (ctxt->d & Op3264) {
			if (mode == X86EMUL_MODE_PROT64)
				ctxt->op_bytes = 8;
			else
				ctxt->op_bytes = 4;
		}

4593 4594 4595
		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
			ctxt->op_bytes = 4;

4596 4597 4598 4599 4600
		if (ctxt->d & Sse)
			ctxt->op_bytes = 16;
		else if (ctxt->d & Mmx)
			ctxt->op_bytes = 8;
	}
A
Avi Kivity 已提交
4601

4602
	/* ModRM and SIB bytes. */
4603
	if (ctxt->d & ModRM) {
4604
		rc = decode_modrm(ctxt, &ctxt->memop);
B
Bandan Das 已提交
4605 4606 4607 4608
		if (!has_seg_override) {
			has_seg_override = true;
			ctxt->seg_override = ctxt->modrm_seg;
		}
4609
	} else if (ctxt->d & MemAbs)
4610
		rc = decode_abs(ctxt, &ctxt->memop);
4611 4612 4613
	if (rc != X86EMUL_CONTINUE)
		goto done;

B
Bandan Das 已提交
4614 4615
	if (!has_seg_override)
		ctxt->seg_override = VCPU_SREG_DS;
4616

B
Bandan Das 已提交
4617
	ctxt->memop.addr.mem.seg = ctxt->seg_override;
4618 4619 4620 4621 4622

	/*
	 * Decode and fetch the source operand: register, memory
	 * or immediate.
	 */
4623
	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4624 4625 4626
	if (rc != X86EMUL_CONTINUE)
		goto done;

4627 4628 4629 4630
	/*
	 * Decode and fetch the second source operand: register, memory
	 * or immediate.
	 */
4631
	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4632 4633 4634
	if (rc != X86EMUL_CONTINUE)
		goto done;

4635
	/* Decode and fetch the destination operand: register or memory. */
4636
	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4637

4638
	if (ctxt->rip_relative)
4639 4640
		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
					ctxt->memopp->addr.mem.ea + ctxt->_eip);
4641

4642
done:
4643
	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4644 4645
}

4646 4647 4648 4649 4650
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
{
	return ctxt->d & PageTable;
}

4651 4652 4653 4654 4655 4656 4657 4658 4659
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
	/* The second termination condition only applies for REPE
	 * and REPNE. Test if the repeat string operation prefix is
	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
	 * corresponding termination condition according to:
	 * 	- if REPE/REPZ and ZF = 0 then done
	 * 	- if REPNE/REPNZ and ZF = 1 then done
	 */
4660 4661 4662
	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
4663
		 ((ctxt->eflags & EFLG_ZF) == 0))
4664
		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
4665 4666 4667 4668 4669 4670
		    ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
		return true;

	return false;
}

A
Avi Kivity 已提交
4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
	bool fault = false;

	ctxt->ops->get_fpu(ctxt);
	asm volatile("1: fwait \n\t"
		     "2: \n\t"
		     ".pushsection .fixup,\"ax\" \n\t"
		     "3: \n\t"
		     "movb $1, %[fault] \n\t"
		     "jmp 2b \n\t"
		     ".popsection \n\t"
		     _ASM_EXTABLE(1b, 3b)
4684
		     : [fault]"+qm"(fault));
A
Avi Kivity 已提交
4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699
	ctxt->ops->put_fpu(ctxt);

	if (unlikely(fault))
		return emulate_exception(ctxt, MF_VECTOR, 0, false);

	return X86EMUL_CONTINUE;
}

static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
				       struct operand *op)
{
	if (op->type == OP_MM)
		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
}

4700 4701 4702
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{
	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4703 4704
	if (!(ctxt->d & ByteOp))
		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4705
	asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4706 4707 4708
	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
	      [fastop]"+S"(fop)
	    : "c"(ctxt->src2.val));
4709
	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4710 4711
	if (!fop) /* exception is returned in fop variable */
		return emulate_de(ctxt);
4712 4713
	return X86EMUL_CONTINUE;
}
4714

4715 4716
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
B
Bandan Das 已提交
4717 4718
	memset(&ctxt->rip_relative, 0,
	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4719 4720 4721 4722 4723 4724

	ctxt->io_read.pos = 0;
	ctxt->io_read.end = 0;
	ctxt->mem_read.end = 0;
}

4725
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4726
{
4727
	const struct x86_emulate_ops *ops = ctxt->ops;
4728
	int rc = X86EMUL_CONTINUE;
4729
	int saved_dst_type = ctxt->dst.type;
4730

4731
	ctxt->mem_read.pos = 0;
4732

4733 4734
	/* LOCK prefix is allowed only with some instructions */
	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4735
		rc = emulate_ud(ctxt);
4736 4737 4738
		goto done;
	}

4739
	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4740
		rc = emulate_ud(ctxt);
4741 4742 4743
		goto done;
	}

4744 4745 4746 4747 4748 4749 4750
	if (unlikely(ctxt->d &
		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
				(ctxt->d & Undefined)) {
			rc = emulate_ud(ctxt);
			goto done;
		}
A
Avi Kivity 已提交
4751

4752 4753 4754
		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
			rc = emulate_ud(ctxt);
A
Avi Kivity 已提交
4755
			goto done;
4756
		}
A
Avi Kivity 已提交
4757

4758 4759
		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
			rc = emulate_nm(ctxt);
4760
			goto done;
4761
		}
4762

4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775
		if (ctxt->d & Mmx) {
			rc = flush_pending_x87_faults(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			/*
			 * Now that we know the fpu is exception safe, we can fetch
			 * operands from it.
			 */
			fetch_possible_mmx_operand(ctxt, &ctxt->src);
			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
			if (!(ctxt->d & Mov))
				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
		}
4776

4777
		if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4778 4779 4780 4781 4782
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_PRE_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}
4783

4784 4785
		/* Privileged instruction can be executed only in CPL=0 */
		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4786 4787 4788 4789
			if (ctxt->d & PrivUD)
				rc = emulate_ud(ctxt);
			else
				rc = emulate_gp(ctxt, 0);
4790
			goto done;
4791
		}
4792

4793 4794 4795
		/* Instruction can only be executed in protected mode */
		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
			rc = emulate_ud(ctxt);
4796
			goto done;
4797
		}
4798

4799
		/* Do instruction specific permission checks */
4800
		if (ctxt->d & CheckPerm) {
4801 4802 4803 4804 4805
			rc = ctxt->check_perm(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

4806
		if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4807 4808 4809 4810 4811 4812 4813 4814 4815 4816
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_POST_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

		if (ctxt->rep_prefix && (ctxt->d & String)) {
			/* All REP prefixes have the same first termination condition */
			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
				ctxt->eip = ctxt->_eip;
4817
				ctxt->eflags &= ~EFLG_RF;
4818 4819
				goto done;
			}
4820 4821 4822
		}
	}

4823 4824 4825
	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
		rc = segmented_read(ctxt, ctxt->src.addr.mem,
				    ctxt->src.valptr, ctxt->src.bytes);
4826
		if (rc != X86EMUL_CONTINUE)
4827
			goto done;
4828
		ctxt->src.orig_val64 = ctxt->src.val64;
4829 4830
	}

4831 4832 4833
	if (ctxt->src2.type == OP_MEM) {
		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
				    &ctxt->src2.val, ctxt->src2.bytes);
4834 4835 4836 4837
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

4838
	if ((ctxt->d & DstMask) == ImplicitOps)
4839 4840 4841
		goto special_insn;


4842
	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4843
		/* optimisation - avoid slow emulated read if Mov */
4844 4845
		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
				   &ctxt->dst.val, ctxt->dst.bytes);
4846 4847
		if (rc != X86EMUL_CONTINUE)
			goto done;
4848
	}
4849
	ctxt->dst.orig_val = ctxt->dst.val;
4850

4851 4852
special_insn:

4853
	if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4854
		rc = emulator_check_intercept(ctxt, ctxt->intercept,
4855
					      X86_ICPT_POST_MEMACCESS);
4856 4857 4858 4859
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

4860 4861 4862 4863
	if (ctxt->rep_prefix && (ctxt->d & String))
		ctxt->eflags |= EFLG_RF;
	else
		ctxt->eflags &= ~EFLG_RF;
4864

4865
	if (ctxt->execute) {
4866 4867 4868 4869 4870 4871 4872
		if (ctxt->d & Fastop) {
			void (*fop)(struct fastop *) = (void *)ctxt->execute;
			rc = fastop(ctxt, fop);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			goto writeback;
		}
4873
		rc = ctxt->execute(ctxt);
4874 4875 4876 4877 4878
		if (rc != X86EMUL_CONTINUE)
			goto done;
		goto writeback;
	}

B
Borislav Petkov 已提交
4879
	if (ctxt->opcode_len == 2)
A
Avi Kivity 已提交
4880
		goto twobyte_insn;
4881 4882
	else if (ctxt->opcode_len == 3)
		goto threebyte_insn;
A
Avi Kivity 已提交
4883

4884
	switch (ctxt->b) {
A
Avi Kivity 已提交
4885
	case 0x63:		/* movsxd */
4886
		if (ctxt->mode != X86EMUL_MODE_PROT64)
A
Avi Kivity 已提交
4887
			goto cannot_emulate;
4888
		ctxt->dst.val = (s32) ctxt->src.val;
A
Avi Kivity 已提交
4889
		break;
4890
	case 0x70 ... 0x7f: /* jcc (short) */
4891
		if (test_cc(ctxt->b, ctxt->eflags))
4892
			rc = jmp_rel(ctxt, ctxt->src.val);
4893
		break;
N
Nitin A Kamble 已提交
4894
	case 0x8d: /* lea r16/r32, m */
4895
		ctxt->dst.val = ctxt->src.addr.mem.ea;
N
Nitin A Kamble 已提交
4896
		break;
4897
	case 0x90 ... 0x97: /* nop / xchg reg, rax */
4898
		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4899 4900 4901
			ctxt->dst.type = OP_NONE;
		else
			rc = em_xchg(ctxt);
4902
		break;
4903
	case 0x98: /* cbw/cwde/cdqe */
4904 4905 4906 4907
		switch (ctxt->op_bytes) {
		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4908 4909
		}
		break;
4910
	case 0xcc:		/* int3 */
4911 4912
		rc = emulate_int(ctxt, 3);
		break;
4913
	case 0xcd:		/* int n */
4914
		rc = emulate_int(ctxt, ctxt->src.val);
4915 4916
		break;
	case 0xce:		/* into */
4917 4918
		if (ctxt->eflags & EFLG_OF)
			rc = emulate_int(ctxt, 4);
4919
		break;
4920
	case 0xe9: /* jmp rel */
4921
	case 0xeb: /* jmp rel short */
4922
		rc = jmp_rel(ctxt, ctxt->src.val);
4923
		ctxt->dst.type = OP_NONE; /* Disable writeback. */
4924
		break;
4925
	case 0xf4:              /* hlt */
4926
		ctxt->ops->halt(ctxt);
4927
		break;
4928 4929 4930 4931 4932 4933 4934
	case 0xf5:	/* cmc */
		/* complement carry flag from eflags reg */
		ctxt->eflags ^= EFLG_CF;
		break;
	case 0xf8: /* clc */
		ctxt->eflags &= ~EFLG_CF;
		break;
4935 4936 4937
	case 0xf9: /* stc */
		ctxt->eflags |= EFLG_CF;
		break;
4938 4939 4940 4941 4942 4943
	case 0xfc: /* cld */
		ctxt->eflags &= ~EFLG_DF;
		break;
	case 0xfd: /* std */
		ctxt->eflags |= EFLG_DF;
		break;
4944 4945
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
4946
	}
4947

4948 4949 4950
	if (rc != X86EMUL_CONTINUE)
		goto done;

4951
writeback:
4952 4953 4954 4955 4956 4957
	if (ctxt->d & SrcWrite) {
		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
		rc = writeback(ctxt, &ctxt->src);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
4958 4959 4960 4961 4962
	if (!(ctxt->d & NoWrite)) {
		rc = writeback(ctxt, &ctxt->dst);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
4963

4964 4965 4966 4967
	/*
	 * restore dst type in case the decoding will be reused
	 * (happens for string instruction )
	 */
4968
	ctxt->dst.type = saved_dst_type;
4969

4970
	if ((ctxt->d & SrcMask) == SrcSI)
4971
		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4972

4973
	if ((ctxt->d & DstMask) == DstDI)
4974
		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4975

4976
	if (ctxt->rep_prefix && (ctxt->d & String)) {
4977
		unsigned int count;
4978
		struct read_cache *r = &ctxt->io_read;
4979 4980 4981 4982 4983 4984
		if ((ctxt->d & SrcMask) == SrcSI)
			count = ctxt->src.count;
		else
			count = ctxt->dst.count;
		register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
				-count);
4985

4986 4987 4988 4989 4990
		if (!string_insn_completed(ctxt)) {
			/*
			 * Re-enter guest when pio read ahead buffer is empty
			 * or, if it is not used, after each 1024 iteration.
			 */
4991
			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4992 4993 4994 4995 4996 4997
			    (r->end == 0 || r->end != r->pos)) {
				/*
				 * Reset read cache. Usually happens before
				 * decode, but since instruction is restarted
				 * we have to do it here.
				 */
4998
				ctxt->mem_read.end = 0;
4999
				writeback_registers(ctxt);
5000 5001 5002
				return EMULATION_RESTART;
			}
			goto done; /* skip rip writeback */
5003
		}
5004
		ctxt->eflags &= ~EFLG_RF;
5005
	}
5006

5007
	ctxt->eip = ctxt->_eip;
5008 5009

done:
5010 5011
	if (rc == X86EMUL_PROPAGATE_FAULT) {
		WARN_ON(ctxt->exception.vector > 0x1f);
5012
		ctxt->have_exception = true;
5013
	}
5014 5015 5016
	if (rc == X86EMUL_INTERCEPTED)
		return EMULATION_INTERCEPTED;

5017 5018 5019
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);

5020
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
A
Avi Kivity 已提交
5021 5022

twobyte_insn:
5023
	switch (ctxt->b) {
5024
	case 0x09:		/* wbinvd */
5025
		(ctxt->ops->wbinvd)(ctxt);
5026 5027
		break;
	case 0x08:		/* invd */
5028 5029
	case 0x0d:		/* GrpP (prefetch) */
	case 0x18:		/* Grp16 (prefetch/nop) */
P
Paolo Bonzini 已提交
5030
	case 0x1f:		/* nop */
5031 5032
		break;
	case 0x20: /* mov cr, reg */
5033
		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5034
		break;
A
Avi Kivity 已提交
5035
	case 0x21: /* mov from dr to reg */
5036
		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
A
Avi Kivity 已提交
5037 5038
		break;
	case 0x40 ... 0x4f:	/* cmov */
5039 5040 5041 5042
		if (test_cc(ctxt->b, ctxt->eflags))
			ctxt->dst.val = ctxt->src.val;
		else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
			 ctxt->op_bytes != 4)
5043
			ctxt->dst.type = OP_NONE; /* no writeback */
A
Avi Kivity 已提交
5044
		break;
5045
	case 0x80 ... 0x8f: /* jnz rel, etc*/
5046
		if (test_cc(ctxt->b, ctxt->eflags))
5047
			rc = jmp_rel(ctxt, ctxt->src.val);
5048
		break;
5049
	case 0x90 ... 0x9f:     /* setcc r/m8 */
5050
		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5051
		break;
A
Avi Kivity 已提交
5052
	case 0xb6 ... 0xb7:	/* movzx */
5053
		ctxt->dst.bytes = ctxt->op_bytes;
5054
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5055
						       : (u16) ctxt->src.val;
A
Avi Kivity 已提交
5056 5057
		break;
	case 0xbe ... 0xbf:	/* movsx */
5058
		ctxt->dst.bytes = ctxt->op_bytes;
5059
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5060
							(s16) ctxt->src.val;
A
Avi Kivity 已提交
5061
		break;
5062 5063
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
5064
	}
5065

5066 5067
threebyte_insn:

5068 5069 5070
	if (rc != X86EMUL_CONTINUE)
		goto done;

A
Avi Kivity 已提交
5071 5072 5073
	goto writeback;

cannot_emulate:
5074
	return EMULATION_FAILED;
A
Avi Kivity 已提交
5075
}
5076 5077 5078 5079 5080 5081 5082 5083 5084 5085

void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
{
	invalidate_registers(ctxt);
}

void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
{
	writeback_registers(ctxt);
}