emulate.c 125.7 KB
Newer Older
A
Avi Kivity 已提交
1
/******************************************************************************
2
 * emulate.c
A
Avi Kivity 已提交
3 4 5 6 7 8
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9
 * privileged instructions:
A
Avi Kivity 已提交
10 11
 *
 * Copyright (C) 2006 Qumranet
N
Nicolas Kaiser 已提交
12
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
13 14 15 16 17 18 19 20 21 22
 *
 *   Avi Kivity <avi@qumranet.com>
 *   Yaniv Kamay <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

23
#include <linux/kvm_host.h>
24
#include "kvm_cache_regs.h"
A
Avi Kivity 已提交
25
#include <linux/module.h>
26
#include <asm/kvm_emulate.h>
27
#include <linux/stringify.h>
A
Avi Kivity 已提交
28

29
#include "x86.h"
30
#include "tss.h"
31

32 33 34
/*
 * Operand types
 */
35 36 37 38 39 40 41 42 43
#define OpNone             0ull
#define OpImplicit         1ull  /* No generic decode */
#define OpReg              2ull  /* Register */
#define OpMem              3ull  /* Memory */
#define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
#define OpDI               5ull  /* ES:DI/EDI/RDI */
#define OpMem64            6ull  /* Memory, 64-bit */
#define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
#define OpDX               8ull  /* DX register */
44 45 46
#define OpCL               9ull  /* CL register (for shifts) */
#define OpImmByte         10ull  /* 8-bit sign extended immediate */
#define OpOne             11ull  /* Implied 1 */
47
#define OpImm             12ull  /* Sign extended up to 32-bit immediate */
48 49 50 51 52 53 54
#define OpMem16           13ull  /* Memory operand (16-bit). */
#define OpMem32           14ull  /* Memory operand (32-bit). */
#define OpImmU            15ull  /* Immediate operand, zero extended */
#define OpSI              16ull  /* SI/ESI/RSI */
#define OpImmFAddr        17ull  /* Immediate far address */
#define OpMemFAddr        18ull  /* Far address in memory */
#define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
55 56 57 58 59 60
#define OpES              20ull  /* ES */
#define OpCS              21ull  /* CS */
#define OpSS              22ull  /* SS */
#define OpDS              23ull  /* DS */
#define OpFS              24ull  /* FS */
#define OpGS              25ull  /* GS */
61
#define OpMem8            26ull  /* 8-bit zero extended memory operand */
62
#define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
P
Paolo Bonzini 已提交
63
#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
64 65
#define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
#define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
66 67

#define OpBits             5  /* Width of operand field */
68
#define OpMask             ((1ull << OpBits) - 1)
69

A
Avi Kivity 已提交
70 71 72 73 74 75 76 77 78 79
/*
 * Opcode effective-address decode tables.
 * Note that we only emulate instructions that have at least one memory
 * operand (excluding implicit stack references). We assume that stack
 * references and instruction fetches will never occur in special memory
 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
 * not be handled.
 */

/* Operand sizes: 8-bit operands or specified/overridden size. */
80
#define ByteOp      (1<<0)	/* 8-bit operands. */
A
Avi Kivity 已提交
81
/* Destination operand type. */
82 83 84 85 86 87 88 89 90
#define DstShift    1
#define ImplicitOps (OpImplicit << DstShift)
#define DstReg      (OpReg << DstShift)
#define DstMem      (OpMem << DstShift)
#define DstAcc      (OpAcc << DstShift)
#define DstDI       (OpDI << DstShift)
#define DstMem64    (OpMem64 << DstShift)
#define DstImmUByte (OpImmUByte << DstShift)
#define DstDX       (OpDX << DstShift)
91
#define DstAccLo    (OpAccLo << DstShift)
92
#define DstMask     (OpMask << DstShift)
A
Avi Kivity 已提交
93
/* Source operand type. */
94 95 96 97 98 99 100 101 102 103 104 105
#define SrcShift    6
#define SrcNone     (OpNone << SrcShift)
#define SrcReg      (OpReg << SrcShift)
#define SrcMem      (OpMem << SrcShift)
#define SrcMem16    (OpMem16 << SrcShift)
#define SrcMem32    (OpMem32 << SrcShift)
#define SrcImm      (OpImm << SrcShift)
#define SrcImmByte  (OpImmByte << SrcShift)
#define SrcOne      (OpOne << SrcShift)
#define SrcImmUByte (OpImmUByte << SrcShift)
#define SrcImmU     (OpImmU << SrcShift)
#define SrcSI       (OpSI << SrcShift)
P
Paolo Bonzini 已提交
106
#define SrcXLat     (OpXLat << SrcShift)
107 108 109 110
#define SrcImmFAddr (OpImmFAddr << SrcShift)
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc      (OpAcc << SrcShift)
#define SrcImmU16   (OpImmU16 << SrcShift)
111
#define SrcImm64    (OpImm64 << SrcShift)
112
#define SrcDX       (OpDX << SrcShift)
113
#define SrcMem8     (OpMem8 << SrcShift)
114
#define SrcAccHi    (OpAccHi << SrcShift)
115
#define SrcMask     (OpMask << SrcShift)
116 117 118 119 120 121 122 123 124
#define BitOp       (1<<11)
#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
#define String      (1<<13)     /* String instruction (rep capable) */
#define Stack       (1<<14)     /* Stack instruction (push/pop) */
#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
125
#define Escape      (5<<15)     /* Escape to coprocessor instruction */
126
#define Sse         (1<<18)     /* SSE Vector instruction */
127 128 129 130
/* Generic ModRM decode. */
#define ModRM       (1<<19)
/* Destination is only written; never read. */
#define Mov         (1<<20)
131
/* Misc flags */
132
#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
133
#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134
#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135
#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136
#define Undefined   (1<<25) /* No Such Instruction */
137
#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
138
#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
139
#define No64	    (1<<28)
140
#define PageTable   (1 << 29)   /* instruction used to write page table */
141
#define NotImpl     (1 << 30)   /* instruction is not implemented */
142
/* Source 2 operand type */
143
#define Src2Shift   (31)
144
#define Src2None    (OpNone << Src2Shift)
145
#define Src2Mem     (OpMem << Src2Shift)
146 147 148 149
#define Src2CL      (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
#define Src2One     (OpOne << Src2Shift)
#define Src2Imm     (OpImm << Src2Shift)
150 151 152 153 154 155
#define Src2ES      (OpES << Src2Shift)
#define Src2CS      (OpCS << Src2Shift)
#define Src2SS      (OpSS << Src2Shift)
#define Src2DS      (OpDS << Src2Shift)
#define Src2FS      (OpFS << Src2Shift)
#define Src2GS      (OpGS << Src2Shift)
156
#define Src2Mask    (OpMask << Src2Shift)
A
Avi Kivity 已提交
157
#define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
158 159 160
#define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
#define Unaligned   ((u64)1 << 42)  /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx         ((u64)1 << 43)  /* Advanced Vector Extensions */
161
#define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
162
#define NoWrite     ((u64)1 << 45)  /* No writeback */
163
#define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
164
#define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
165 166
#define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
#define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
167
#define NoBigReal   ((u64)1 << 50)  /* No big real mode */
168
#define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
A
Avi Kivity 已提交
169

170
#define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
A
Avi Kivity 已提交
171

172 173 174 175 176 177 178 179
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
180

181 182 183 184 185 186
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
#define FASTOP_SIZE 8

/*
 * fastop functions have a special calling convention:
 *
187 188
 * dst:    rax        (in/out)
 * src:    rdx        (in/out)
189 190
 * src2:   rcx        (in)
 * flags:  rflags     (in/out)
191
 * ex:     rsi        (in:fastop pointer, out:zero if exception)
192 193 194 195 196 197 198 199 200 201 202
 *
 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
 * different operand sizes can be reached by calculation, rather than a jump
 * table (which would be bigger than the code).
 *
 * fastop functions are declared as taking a never-defined fastop parameter,
 * so they can't be called from C directly.
 */

struct fastop;

203
struct opcode {
204 205
	u64 flags : 56;
	u64 intercept : 8;
206
	union {
207
		int (*execute)(struct x86_emulate_ctxt *ctxt);
208 209 210
		const struct opcode *group;
		const struct group_dual *gdual;
		const struct gprefix *gprefix;
211
		const struct escape *esc;
212
		void (*fastop)(struct fastop *fake);
213
	} u;
214
	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
215 216 217 218 219
};

struct group_dual {
	struct opcode mod012[8];
	struct opcode mod3[8];
220 221
};

222 223 224 225 226 227 228
struct gprefix {
	struct opcode pfx_no;
	struct opcode pfx_66;
	struct opcode pfx_f2;
	struct opcode pfx_f3;
};

229 230 231 232 233
struct escape {
	struct opcode op[8];
	struct opcode high[64];
};

A
Avi Kivity 已提交
234
/* EFLAGS bit definitions. */
235 236 237 238
#define EFLG_ID (1<<21)
#define EFLG_VIP (1<<20)
#define EFLG_VIF (1<<19)
#define EFLG_AC (1<<18)
239 240
#define EFLG_VM (1<<17)
#define EFLG_RF (1<<16)
241 242
#define EFLG_IOPL (3<<12)
#define EFLG_NT (1<<14)
A
Avi Kivity 已提交
243 244
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
245
#define EFLG_IF (1<<9)
246
#define EFLG_TF (1<<8)
A
Avi Kivity 已提交
247 248 249 250 251 252
#define EFLG_SF (1<<7)
#define EFLG_ZF (1<<6)
#define EFLG_AF (1<<4)
#define EFLG_PF (1<<2)
#define EFLG_CF (1<<0)

253 254 255
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
#define EFLG_RESERVED_ONE_MASK 2

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	if (!(ctxt->regs_valid & (1 << nr))) {
		ctxt->regs_valid |= 1 << nr;
		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
	}
	return ctxt->_regs[nr];
}

static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	ctxt->regs_valid |= 1 << nr;
	ctxt->regs_dirty |= 1 << nr;
	return &ctxt->_regs[nr];
}

static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	reg_read(ctxt, nr);
	return reg_write(ctxt, nr);
}

static void writeback_registers(struct x86_emulate_ctxt *ctxt)
{
	unsigned reg;

	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
}

static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
{
	ctxt->regs_dirty = 0;
	ctxt->regs_valid = 0;
}

A
Avi Kivity 已提交
292 293 294 295 296 297
/*
 * These EFLAGS bits are restored from saved value during emulation, and
 * any changes are written back to the saved value after emulation.
 */
#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)

298 299 300 301 302 303
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif

304 305
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));

306 307 308 309 310 311 312 313 314 315 316 317 318
#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
#define FOP_RET   "ret \n\t"

#define FOP_START(op) \
	extern void em_##op(struct fastop *fake); \
	asm(".pushsection .text, \"ax\" \n\t" \
	    ".global em_" #op " \n\t" \
            FOP_ALIGN \
	    "em_" #op ": \n\t"

#define FOP_END \
	    ".popsection")

319 320
#define FOPNOP() FOP_ALIGN FOP_RET

321
#define FOP1E(op,  dst) \
322 323 324 325
	FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET

#define FOP1EEX(op,  dst) \
	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
326 327 328 329 330 331 332 333 334

#define FASTOP1(op) \
	FOP_START(op) \
	FOP1E(op##b, al) \
	FOP1E(op##w, ax) \
	FOP1E(op##l, eax) \
	ON64(FOP1E(op##q, rax))	\
	FOP_END

335 336 337 338 339 340 341 342 343
/* 1-operand, using src2 (for MUL/DIV r/m) */
#define FASTOP1SRC2(op, name) \
	FOP_START(name) \
	FOP1E(op, cl) \
	FOP1E(op, cx) \
	FOP1E(op, ecx) \
	ON64(FOP1E(op, rcx)) \
	FOP_END

344 345 346 347 348 349 350 351 352
/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
#define FASTOP1SRC2EX(op, name) \
	FOP_START(name) \
	FOP1EEX(op, cl) \
	FOP1EEX(op, cx) \
	FOP1EEX(op, ecx) \
	ON64(FOP1EEX(op, rcx)) \
	FOP_END

353 354 355 356 357
#define FOP2E(op,  dst, src)	   \
	FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET

#define FASTOP2(op) \
	FOP_START(op) \
358 359 360 361
	FOP2E(op##b, al, dl) \
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
362 363
	FOP_END

364 365 366 367
/* 2 operand, word only */
#define FASTOP2W(op) \
	FOP_START(op) \
	FOPNOP() \
368 369 370
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
371 372
	FOP_END

373 374 375 376 377 378 379 380 381
/* 2 operand, src is CL */
#define FASTOP2CL(op) \
	FOP_START(op) \
	FOP2E(op##b, al, cl) \
	FOP2E(op##w, ax, cl) \
	FOP2E(op##l, eax, cl) \
	ON64(FOP2E(op##q, rax, cl)) \
	FOP_END

382 383 384 385 386 387 388
#define FOP3E(op,  dst, src, src2) \
	FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET

/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \
	FOP_START(op) \
	FOPNOP() \
389 390 391
	FOP3E(op##w, ax, dx, cl) \
	FOP3E(op##l, eax, edx, cl) \
	ON64(FOP3E(op##q, rax, rdx, cl)) \
392 393
	FOP_END

394 395 396
/* Special case for SETcc - 1 instruction per cc */
#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"

397 398 399
asm(".global kvm_fastop_exception \n"
    "kvm_fastop_exception: xor %esi, %esi; ret");

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
FOP_SETCC(setc)
FOP_SETCC(setnc)
FOP_SETCC(setz)
FOP_SETCC(setnz)
FOP_SETCC(setbe)
FOP_SETCC(setnbe)
FOP_SETCC(sets)
FOP_SETCC(setns)
FOP_SETCC(setp)
FOP_SETCC(setnp)
FOP_SETCC(setl)
FOP_SETCC(setnl)
FOP_SETCC(setle)
FOP_SETCC(setnle)
FOP_END;

P
Paolo Bonzini 已提交
419 420 421
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
FOP_END;

422 423 424 425 426 427
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
				    enum x86_intercept intercept,
				    enum x86_intercept_stage stage)
{
	struct x86_instruction_info info = {
		.intercept  = intercept,
428 429 430 431 432
		.rep_prefix = ctxt->rep_prefix,
		.modrm_mod  = ctxt->modrm_mod,
		.modrm_reg  = ctxt->modrm_reg,
		.modrm_rm   = ctxt->modrm_rm,
		.src_val    = ctxt->src.val64,
433
		.dst_val    = ctxt->dst.val64,
434 435 436
		.src_bytes  = ctxt->src.bytes,
		.dst_bytes  = ctxt->dst.bytes,
		.ad_bytes   = ctxt->ad_bytes,
437 438 439
		.next_rip   = ctxt->eip,
	};

440
	return ctxt->ops->intercept(ctxt, &info, stage);
441 442
}

A
Avi Kivity 已提交
443 444 445 446 447
static void assign_masked(ulong *dest, ulong src, ulong mask)
{
	*dest = (*dest & ~mask) | (src & mask);
}

448
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
449
{
450
	return (1UL << (ctxt->ad_bytes << 3)) - 1;
451 452
}

A
Avi Kivity 已提交
453 454 455 456 457 458 459 460 461 462 463
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
{
	u16 sel;
	struct desc_struct ss;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return ~0UL;
	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
}

A
Avi Kivity 已提交
464 465 466 467 468
static int stack_size(struct x86_emulate_ctxt *ctxt)
{
	return (__fls(stack_mask(ctxt)) + 1) >> 3;
}

A
Avi Kivity 已提交
469
/* Access/update address held in a register, based on addressing mode. */
470
static inline unsigned long
471
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
472
{
473
	if (ctxt->ad_bytes == sizeof(unsigned long))
474 475
		return reg;
	else
476
		return reg & ad_mask(ctxt);
477 478 479
}

static inline unsigned long
480
register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
481
{
482
	return address_mask(ctxt, reg);
483 484
}

485 486 487 488 489
static void masked_increment(ulong *reg, ulong mask, int inc)
{
	assign_masked(reg, *reg + inc, mask);
}

490
static inline void
491
register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
492
{
493 494
	ulong mask;

495
	if (ctxt->ad_bytes == sizeof(unsigned long))
496
		mask = ~0UL;
497
	else
498 499 500 501 502 503
		mask = ad_mask(ctxt);
	masked_increment(reg, mask, inc);
}

static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
{
504
	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
505
}
A
Avi Kivity 已提交
506

507
static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
508
{
509
	register_address_increment(ctxt, &ctxt->_eip, rel);
510
}
511

512 513 514 515 516 517 518
static u32 desc_limit_scaled(struct desc_struct *desc)
{
	u32 limit = get_desc_limit(desc);

	return desc->g ? (limit << 12) | 0xfff : limit;
}

519
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
520 521 522 523
{
	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
		return 0;

524
	return ctxt->ops->get_cached_segment_base(ctxt, seg);
525 526
}

527 528
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
			     u32 error, bool valid)
529
{
530 531 532
	ctxt->exception.vector = vec;
	ctxt->exception.error_code = error;
	ctxt->exception.error_code_valid = valid;
533
	return X86EMUL_PROPAGATE_FAULT;
534 535
}

536 537 538 539 540
static int emulate_db(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, DB_VECTOR, 0, false);
}

541
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
542
{
543
	return emulate_exception(ctxt, GP_VECTOR, err, true);
544 545
}

546 547 548 549 550
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{
	return emulate_exception(ctxt, SS_VECTOR, err, true);
}

551
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
552
{
553
	return emulate_exception(ctxt, UD_VECTOR, 0, false);
554 555
}

556
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
557
{
558
	return emulate_exception(ctxt, TS_VECTOR, err, true);
559 560
}

561 562
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
563
	return emulate_exception(ctxt, DE_VECTOR, 0, false);
564 565
}

A
Avi Kivity 已提交
566 567 568 569 570
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, NM_VECTOR, 0, false);
}

571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
	u16 selector;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
	return selector;
}

static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
				 unsigned seg)
{
	u16 dummy;
	u32 base3;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}

591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
/*
 * x86 defines three classes of vector instructions: explicitly
 * aligned, explicitly unaligned, and the rest, which change behaviour
 * depending on whether they're AVX encoded or not.
 *
 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
 * subject to the same check.
 */
static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
{
	if (likely(size < 16))
		return false;

	if (ctxt->d & Aligned)
		return true;
	else if (ctxt->d & Unaligned)
		return false;
	else if (ctxt->d & Avx)
		return false;
	else
		return true;
}

614
static int __linearize(struct x86_emulate_ctxt *ctxt,
615
		     struct segmented_address addr,
616
		     unsigned size, bool write, bool fetch,
617 618
		     ulong *linear)
{
619 620
	struct desc_struct desc;
	bool usable;
621
	ulong la;
622
	u32 lim;
623
	u16 sel;
624
	unsigned cpl;
625

626
	la = seg_base(ctxt, addr.seg) + addr.ea;
627 628 629 630 631 632
	switch (ctxt->mode) {
	case X86EMUL_MODE_PROT64:
		if (((signed long)la << 16) >> 16 != la)
			return emulate_gp(ctxt, 0);
		break;
	default:
633 634
		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
						addr.seg);
635 636
		if (!usable)
			goto bad;
637 638 639
		/* code segment in protected mode or read-only data segment */
		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
					|| !(desc.type & 2)) && write)
640 641
			goto bad;
		/* unreadable code segment */
642
		if (!fetch && (desc.type & 8) && !(desc.type & 2))
643 644
			goto bad;
		lim = desc_limit_scaled(&desc);
645 646 647 648 649 650
		if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
		    (ctxt->d & NoBigReal)) {
			/* la is between zero and 0xffff */
			if (la > 0xffff || (u32)(la + size - 1) > 0xffff)
				goto bad;
		} else if ((desc.type & 8) || !(desc.type & 4)) {
651 652 653 654
			/* expand-up segment */
			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
				goto bad;
		} else {
G
Guo Chao 已提交
655
			/* expand-down segment */
656 657 658 659 660 661
			if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
				goto bad;
			lim = desc.d ? 0xffffffff : 0xffff;
			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
				goto bad;
		}
662
		cpl = ctxt->ops->cpl(ctxt);
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
		if (!(desc.type & 8)) {
			/* data segment */
			if (cpl > desc.dpl)
				goto bad;
		} else if ((desc.type & 8) && !(desc.type & 4)) {
			/* nonconforming code segment */
			if (cpl != desc.dpl)
				goto bad;
		} else if ((desc.type & 8) && (desc.type & 4)) {
			/* conforming code segment */
			if (cpl < desc.dpl)
				goto bad;
		}
		break;
	}
678
	if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
679
		la &= (u32)-1;
680 681
	if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
		return emulate_gp(ctxt, 0);
682 683
	*linear = la;
	return X86EMUL_CONTINUE;
684 685
bad:
	if (addr.seg == VCPU_SREG_SS)
686
		return emulate_ss(ctxt, sel);
687
	else
688
		return emulate_gp(ctxt, sel);
689 690
}

691 692 693 694 695 696 697 698 699
static int linearize(struct x86_emulate_ctxt *ctxt,
		     struct segmented_address addr,
		     unsigned size, bool write,
		     ulong *linear)
{
	return __linearize(ctxt, addr, size, write, false, linear);
}


700 701 702 703 704
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
			      struct segmented_address addr,
			      void *data,
			      unsigned size)
{
705 706 707
	int rc;
	ulong linear;

708
	rc = linearize(ctxt, addr, size, false, &linear);
709 710
	if (rc != X86EMUL_CONTINUE)
		return rc;
711
	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
712 713
}

714
/*
715
 * Prefetch the remaining bytes of the instruction without crossing page
716 717
 * boundary if they are not in fetch_cache yet.
 */
718
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
719 720
{
	int rc;
721
	unsigned size;
722
	unsigned long linear;
723
	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
724
	struct segmented_address addr = { .seg = VCPU_SREG_CS,
725 726
					   .ea = ctxt->eip + cur_size };

727 728 729 730 731 732
	size = 15UL ^ cur_size;
	rc = __linearize(ctxt, addr, size, false, true, &linear);
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;

	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
733 734 735 736 737 738 739 740

	/*
	 * One instruction can only straddle two pages,
	 * and one has been loaded at the beginning of
	 * x86_decode_insn.  So, if not enough bytes
	 * still, we must have hit the 15-byte boundary.
	 */
	if (unlikely(size < op_size))
741
		return X86EMUL_UNHANDLEABLE;
742
	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
743 744 745
			      size, &ctxt->exception);
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;
746
	ctxt->fetch.end += size;
747
	return X86EMUL_CONTINUE;
748 749
}

750 751
static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
					       unsigned size)
752
{
753
	if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size))
754 755 756
		return __do_insn_fetch_bytes(ctxt, size);
	else
		return X86EMUL_CONTINUE;
757 758
}

759
/* Fetch next part of the instruction being emulated. */
760
#define insn_fetch(_type, _ctxt)					\
761 762 763
({	_type _x;							\
									\
	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
764 765
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
766
	ctxt->_eip += sizeof(_type);					\
767 768
	_x = *(_type __aligned(1) *) ctxt->fetch.ptr;			\
	ctxt->fetch.ptr += sizeof(_type);				\
769
	_x;								\
770 771
})

772
#define insn_fetch_arr(_arr, _size, _ctxt)				\
773 774
({									\
	rc = do_insn_fetch_bytes(_ctxt, _size);				\
775 776
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
777
	ctxt->_eip += (_size);						\
778 779
	memcpy(_arr, ctxt->fetch.ptr, _size);				\
	ctxt->fetch.ptr += (_size);					\
780 781
})

782 783 784 785 786
/*
 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 * pointer into the block that addresses the relevant register.
 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 */
787
static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
788
			     int byteop)
A
Avi Kivity 已提交
789 790
{
	void *p;
791
	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
A
Avi Kivity 已提交
792 793

	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
794 795 796
		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
	else
		p = reg_rmw(ctxt, modrm_reg);
A
Avi Kivity 已提交
797 798 799 800
	return p;
}

static int read_descriptor(struct x86_emulate_ctxt *ctxt,
801
			   struct segmented_address addr,
A
Avi Kivity 已提交
802 803 804 805 806 807 808
			   u16 *size, unsigned long *address, int op_bytes)
{
	int rc;

	if (op_bytes == 2)
		op_bytes = 3;
	*address = 0;
809
	rc = segmented_read_std(ctxt, addr, size, 2);
810
	if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
811
		return rc;
812
	addr.ea += 2;
813
	rc = segmented_read_std(ctxt, addr, address, op_bytes);
A
Avi Kivity 已提交
814 815 816
	return rc;
}

817 818 819 820 821 822 823 824 825 826
FASTOP2(add);
FASTOP2(or);
FASTOP2(adc);
FASTOP2(sbb);
FASTOP2(and);
FASTOP2(sub);
FASTOP2(xor);
FASTOP2(cmp);
FASTOP2(test);

827 828
FASTOP1SRC2(mul, mul_ex);
FASTOP1SRC2(imul, imul_ex);
829 830
FASTOP1SRC2EX(div, div_ex);
FASTOP1SRC2EX(idiv, idiv_ex);
831

832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
FASTOP3WCL(shld);
FASTOP3WCL(shrd);

FASTOP2W(imul);

FASTOP1(not);
FASTOP1(neg);
FASTOP1(inc);
FASTOP1(dec);

FASTOP2CL(rol);
FASTOP2CL(ror);
FASTOP2CL(rcl);
FASTOP2CL(rcr);
FASTOP2CL(shl);
FASTOP2CL(shr);
FASTOP2CL(sar);

FASTOP2W(bsf);
FASTOP2W(bsr);
FASTOP2W(bt);
FASTOP2W(bts);
FASTOP2W(btr);
FASTOP2W(btc);

857 858
FASTOP2(xadd);

859
static u8 test_cc(unsigned int condition, unsigned long flags)
860
{
861 862
	u8 rc;
	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
863

864
	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
865
	asm("push %[flags]; popf; call *%[fastop]"
866 867
	    : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
	return rc;
868 869
}

870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
static void fetch_register_operand(struct operand *op)
{
	switch (op->bytes) {
	case 1:
		op->val = *(u8 *)op->addr.reg;
		break;
	case 2:
		op->val = *(u16 *)op->addr.reg;
		break;
	case 4:
		op->val = *(u32 *)op->addr.reg;
		break;
	case 8:
		op->val = *(u64 *)op->addr.reg;
		break;
	}
}

A
Avi Kivity 已提交
888 889 890 891
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
892 893 894 895 896 897 898 899
	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
900
#ifdef CONFIG_X86_64
901 902 903 904 905 906 907 908
	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
909 910 911 912 913 914 915 916 917 918 919
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
			  int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
920 921 922 923 924 925 926 927
	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
A
Avi Kivity 已提交
928
#ifdef CONFIG_X86_64
929 930 931 932 933 934 935 936
	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
A
Avi Kivity 已提交
937 938 939 940 941 942
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

A
Avi Kivity 已提交
943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976
static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fninit");
	ctxt->ops->put_fpu(ctxt);
	return X86EMUL_CONTINUE;
}

static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
{
	u16 fcw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fnstcw %0": "+m"(fcw));
	ctxt->ops->put_fpu(ctxt);

	/* force 2 byte destination */
	ctxt->dst.bytes = 2;
	ctxt->dst.val = fcw;

	return X86EMUL_CONTINUE;
}

static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
{
	u16 fsw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fnstsw %0": "+m"(fsw));
	ctxt->ops->put_fpu(ctxt);

	/* force 2 byte destination */
	ctxt->dst.bytes = 2;
	ctxt->dst.val = fsw;

	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1024
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1025
				    struct operand *op)
1026
{
1027
	unsigned reg = ctxt->modrm_reg;
1028

1029 1030
	if (!(ctxt->d & ModRM))
		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
A
Avi Kivity 已提交
1031

1032
	if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1033 1034 1035 1036 1037 1038
		op->type = OP_XMM;
		op->bytes = 16;
		op->addr.xmm = reg;
		read_sse_reg(ctxt, &op->vec_val, reg);
		return;
	}
A
Avi Kivity 已提交
1039 1040 1041 1042 1043 1044 1045
	if (ctxt->d & Mmx) {
		reg &= 7;
		op->type = OP_MM;
		op->bytes = 8;
		op->addr.mm = reg;
		return;
	}
A
Avi Kivity 已提交
1046

1047
	op->type = OP_REG;
1048 1049 1050
	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);

1051
	fetch_register_operand(op);
1052 1053 1054
	op->orig_val = op->val;
}

1055 1056 1057 1058 1059 1060
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
{
	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
		ctxt->modrm_seg = VCPU_SREG_SS;
}

1061
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1062
			struct operand *op)
1063 1064
{
	u8 sib;
B
Bandan Das 已提交
1065
	int index_reg, base_reg, scale;
1066
	int rc = X86EMUL_CONTINUE;
1067
	ulong modrm_ea = 0;
1068

B
Bandan Das 已提交
1069 1070 1071
	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1072

B
Bandan Das 已提交
1073
	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1074
	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
B
Bandan Das 已提交
1075
	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1076
	ctxt->modrm_seg = VCPU_SREG_DS;
1077

1078
	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1079
		op->type = OP_REG;
1080
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1081
		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1082
				ctxt->d & ByteOp);
1083
		if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1084 1085
			op->type = OP_XMM;
			op->bytes = 16;
1086 1087
			op->addr.xmm = ctxt->modrm_rm;
			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
A
Avi Kivity 已提交
1088 1089
			return rc;
		}
A
Avi Kivity 已提交
1090 1091 1092
		if (ctxt->d & Mmx) {
			op->type = OP_MM;
			op->bytes = 8;
1093
			op->addr.mm = ctxt->modrm_rm & 7;
A
Avi Kivity 已提交
1094 1095
			return rc;
		}
1096
		fetch_register_operand(op);
1097 1098 1099
		return rc;
	}

1100 1101
	op->type = OP_MEM;

1102
	if (ctxt->ad_bytes == 2) {
1103 1104 1105 1106
		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1107 1108

		/* 16-bit ModR/M decode. */
1109
		switch (ctxt->modrm_mod) {
1110
		case 0:
1111
			if (ctxt->modrm_rm == 6)
1112
				modrm_ea += insn_fetch(u16, ctxt);
1113 1114
			break;
		case 1:
1115
			modrm_ea += insn_fetch(s8, ctxt);
1116 1117
			break;
		case 2:
1118
			modrm_ea += insn_fetch(u16, ctxt);
1119 1120
			break;
		}
1121
		switch (ctxt->modrm_rm) {
1122
		case 0:
1123
			modrm_ea += bx + si;
1124 1125
			break;
		case 1:
1126
			modrm_ea += bx + di;
1127 1128
			break;
		case 2:
1129
			modrm_ea += bp + si;
1130 1131
			break;
		case 3:
1132
			modrm_ea += bp + di;
1133 1134
			break;
		case 4:
1135
			modrm_ea += si;
1136 1137
			break;
		case 5:
1138
			modrm_ea += di;
1139 1140
			break;
		case 6:
1141
			if (ctxt->modrm_mod != 0)
1142
				modrm_ea += bp;
1143 1144
			break;
		case 7:
1145
			modrm_ea += bx;
1146 1147
			break;
		}
1148 1149 1150
		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
			ctxt->modrm_seg = VCPU_SREG_SS;
1151
		modrm_ea = (u16)modrm_ea;
1152 1153
	} else {
		/* 32/64-bit ModR/M decode. */
1154
		if ((ctxt->modrm_rm & 7) == 4) {
1155
			sib = insn_fetch(u8, ctxt);
1156 1157 1158 1159
			index_reg |= (sib >> 3) & 7;
			base_reg |= sib & 7;
			scale = sib >> 6;

1160
			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1161
				modrm_ea += insn_fetch(s32, ctxt);
1162
			else {
1163
				modrm_ea += reg_read(ctxt, base_reg);
1164 1165
				adjust_modrm_seg(ctxt, base_reg);
			}
1166
			if (index_reg != 4)
1167
				modrm_ea += reg_read(ctxt, index_reg) << scale;
1168
		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1169
			if (ctxt->mode == X86EMUL_MODE_PROT64)
1170
				ctxt->rip_relative = 1;
1171 1172
		} else {
			base_reg = ctxt->modrm_rm;
1173
			modrm_ea += reg_read(ctxt, base_reg);
1174 1175
			adjust_modrm_seg(ctxt, base_reg);
		}
1176
		switch (ctxt->modrm_mod) {
1177
		case 0:
1178
			if (ctxt->modrm_rm == 5)
1179
				modrm_ea += insn_fetch(s32, ctxt);
1180 1181
			break;
		case 1:
1182
			modrm_ea += insn_fetch(s8, ctxt);
1183 1184
			break;
		case 2:
1185
			modrm_ea += insn_fetch(s32, ctxt);
1186 1187 1188
			break;
		}
	}
1189
	op->addr.mem.ea = modrm_ea;
1190 1191 1192
	if (ctxt->ad_bytes != 8)
		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;

1193 1194 1195 1196 1197
done:
	return rc;
}

static int decode_abs(struct x86_emulate_ctxt *ctxt,
1198
		      struct operand *op)
1199
{
1200
	int rc = X86EMUL_CONTINUE;
1201

1202
	op->type = OP_MEM;
1203
	switch (ctxt->ad_bytes) {
1204
	case 2:
1205
		op->addr.mem.ea = insn_fetch(u16, ctxt);
1206 1207
		break;
	case 4:
1208
		op->addr.mem.ea = insn_fetch(u32, ctxt);
1209 1210
		break;
	case 8:
1211
		op->addr.mem.ea = insn_fetch(u64, ctxt);
1212 1213 1214 1215 1216 1217
		break;
	}
done:
	return rc;
}

1218
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1219
{
1220
	long sv = 0, mask;
1221

1222
	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1223
		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1224

1225 1226 1227 1228
		if (ctxt->src.bytes == 2)
			sv = (s16)ctxt->src.val & (s16)mask;
		else if (ctxt->src.bytes == 4)
			sv = (s32)ctxt->src.val & (s32)mask;
1229 1230
		else
			sv = (s64)ctxt->src.val & (s64)mask;
1231

1232
		ctxt->dst.addr.mem.ea += (sv >> 3);
1233
	}
1234 1235

	/* only subword offset */
1236
	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1237 1238
}

1239 1240
static int read_emulated(struct x86_emulate_ctxt *ctxt,
			 unsigned long addr, void *dest, unsigned size)
A
Avi Kivity 已提交
1241
{
1242
	int rc;
1243
	struct read_cache *mc = &ctxt->mem_read;
A
Avi Kivity 已提交
1244

1245 1246
	if (mc->pos < mc->end)
		goto read_cached;
A
Avi Kivity 已提交
1247

1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
	WARN_ON((mc->end + size) >= sizeof(mc->data));

	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
				      &ctxt->exception);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	mc->end += size;

read_cached:
	memcpy(dest, mc->data + mc->pos, size);
	mc->pos += size;
1260 1261
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1262

1263 1264 1265 1266 1267
static int segmented_read(struct x86_emulate_ctxt *ctxt,
			  struct segmented_address addr,
			  void *data,
			  unsigned size)
{
1268 1269 1270
	int rc;
	ulong linear;

1271
	rc = linearize(ctxt, addr, size, false, &linear);
1272 1273
	if (rc != X86EMUL_CONTINUE)
		return rc;
1274
	return read_emulated(ctxt, linear, data, size);
1275 1276 1277 1278 1279 1280 1281
}

static int segmented_write(struct x86_emulate_ctxt *ctxt,
			   struct segmented_address addr,
			   const void *data,
			   unsigned size)
{
1282 1283 1284
	int rc;
	ulong linear;

1285
	rc = linearize(ctxt, addr, size, true, &linear);
1286 1287
	if (rc != X86EMUL_CONTINUE)
		return rc;
1288 1289
	return ctxt->ops->write_emulated(ctxt, linear, data, size,
					 &ctxt->exception);
1290 1291 1292 1293 1294 1295 1296
}

static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
			     struct segmented_address addr,
			     const void *orig_data, const void *data,
			     unsigned size)
{
1297 1298 1299
	int rc;
	ulong linear;

1300
	rc = linearize(ctxt, addr, size, true, &linear);
1301 1302
	if (rc != X86EMUL_CONTINUE)
		return rc;
1303 1304
	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
					   size, &ctxt->exception);
1305 1306
}

1307 1308 1309 1310
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
			   unsigned int size, unsigned short port,
			   void *dest)
{
1311
	struct read_cache *rc = &ctxt->io_read;
1312

1313 1314
	if (rc->pos == rc->end) { /* refill pio read ahead */
		unsigned int in_page, n;
1315
		unsigned int count = ctxt->rep_prefix ?
1316
			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1317
		in_page = (ctxt->eflags & EFLG_DF) ?
1318 1319
			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1320
		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1321 1322 1323
		if (n == 0)
			n = 1;
		rc->pos = rc->end = 0;
1324
		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1325 1326
			return 0;
		rc->end = n * size;
A
Avi Kivity 已提交
1327 1328
	}

1329 1330
	if (ctxt->rep_prefix && (ctxt->d & String) &&
	    !(ctxt->eflags & EFLG_DF)) {
1331 1332 1333 1334 1335 1336 1337 1338
		ctxt->dst.data = rc->data + rc->pos;
		ctxt->dst.type = OP_MEM_STR;
		ctxt->dst.count = (rc->end - rc->pos) / size;
		rc->pos = rc->end;
	} else {
		memcpy(dest, rc->data + rc->pos, size);
		rc->pos += size;
	}
1339 1340
	return 1;
}
A
Avi Kivity 已提交
1341

1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
				     u16 index, struct desc_struct *desc)
{
	struct desc_ptr dt;
	ulong addr;

	ctxt->ops->get_idt(ctxt, &dt);

	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, index << 3 | 0x2);

	addr = dt.address + index * 8;
	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
				   &ctxt->exception);
}

1358 1359 1360
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
				     u16 selector, struct desc_ptr *dt)
{
1361
	const struct x86_emulate_ops *ops = ctxt->ops;
1362
	u32 base3 = 0;
1363

1364 1365
	if (selector & 1 << 2) {
		struct desc_struct desc;
1366 1367
		u16 sel;

1368
		memset (dt, 0, sizeof *dt);
1369 1370
		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
				      VCPU_SREG_LDTR))
1371
			return;
1372

1373
		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1374
		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1375
	} else
1376
		ops->get_gdt(ctxt, dt);
1377
}
1378

1379 1380
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1381 1382
				   u16 selector, struct desc_struct *desc,
				   ulong *desc_addr_p)
1383 1384 1385 1386
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
1387

1388
	get_descriptor_table_ptr(ctxt, selector, &dt);
1389

1390 1391
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
1392

1393
	*desc_addr_p = addr = dt.address + index * 8;
1394 1395
	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
				   &ctxt->exception);
1396
}
1397

1398 1399 1400 1401 1402 1403 1404
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				    u16 selector, struct desc_struct *desc)
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
A
Avi Kivity 已提交
1405

1406
	get_descriptor_table_ptr(ctxt, selector, &dt);
1407

1408 1409
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
A
Avi Kivity 已提交
1410

1411
	addr = dt.address + index * 8;
1412 1413
	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
				    &ctxt->exception);
1414
}
1415

1416
/* Does not support long mode */
1417
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1418
				     u16 selector, int seg, u8 cpl, bool in_task_switch)
1419
{
1420
	struct desc_struct seg_desc, old_desc;
1421
	u8 dpl, rpl;
1422 1423 1424
	unsigned err_vec = GP_VECTOR;
	u32 err_code = 0;
	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1425
	ulong desc_addr;
1426
	int ret;
1427
	u16 dummy;
1428
	u32 base3 = 0;
1429

1430
	memset(&seg_desc, 0, sizeof seg_desc);
1431

1432 1433 1434
	if (ctxt->mode == X86EMUL_MODE_REAL) {
		/* set real mode segment descriptor (keep limit etc. for
		 * unreal mode) */
1435
		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1436 1437
		set_desc_base(&seg_desc, selector << 4);
		goto load;
1438 1439 1440 1441 1442 1443 1444 1445 1446
	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
		/* VM86 needs a clean new segment descriptor */
		set_desc_base(&seg_desc, selector << 4);
		set_desc_limit(&seg_desc, 0xffff);
		seg_desc.type = 3;
		seg_desc.p = 1;
		seg_desc.s = 1;
		seg_desc.dpl = 3;
		goto load;
1447 1448
	}

1449 1450 1451 1452 1453 1454 1455
	rpl = selector & 3;

	/* NULL selector is not valid for TR, CS and SS (except for long mode) */
	if ((seg == VCPU_SREG_CS
	     || (seg == VCPU_SREG_SS
		 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
	     || seg == VCPU_SREG_TR)
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
	    && null_selector)
		goto exception;

	/* TR should be in GDT only */
	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
		goto exception;

	if (null_selector) /* for NULL selector skip all following checks */
		goto load;

1466
	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1467 1468 1469 1470 1471 1472
	if (ret != X86EMUL_CONTINUE)
		return ret;

	err_code = selector & 0xfffc;
	err_vec = GP_VECTOR;

G
Guo Chao 已提交
1473
	/* can't load system descriptor into segment selector */
1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
	if (seg <= VCPU_SREG_GS && !seg_desc.s)
		goto exception;

	if (!seg_desc.p) {
		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
		goto exception;
	}

	dpl = seg_desc.dpl;

	switch (seg) {
	case VCPU_SREG_SS:
		/*
		 * segment is not a writable data segment or segment
		 * selector's RPL != CPL or segment selector's RPL != CPL
		 */
		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
			goto exception;
A
Avi Kivity 已提交
1492
		break;
1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
	case VCPU_SREG_CS:
		if (!(seg_desc.type & 8))
			goto exception;

		if (seg_desc.type & 4) {
			/* conforming */
			if (dpl > cpl)
				goto exception;
		} else {
			/* nonconforming */
			if (rpl > cpl || dpl != cpl)
				goto exception;
		}
		/* CS(RPL) <- CPL */
		selector = (selector & 0xfffc) | cpl;
A
Avi Kivity 已提交
1508
		break;
1509 1510 1511
	case VCPU_SREG_TR:
		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
			goto exception;
1512 1513 1514 1515 1516 1517
		old_desc = seg_desc;
		seg_desc.type |= 2; /* busy */
		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
						  sizeof(seg_desc), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
1518 1519 1520 1521 1522 1523
		break;
	case VCPU_SREG_LDTR:
		if (seg_desc.s || seg_desc.type != 2)
			goto exception;
		break;
	default: /*  DS, ES, FS, or GS */
1524
		/*
1525 1526 1527
		 * segment is not a data or readable code segment or
		 * ((segment is a data or nonconforming code segment)
		 * and (both RPL and CPL > DPL))
1528
		 */
1529 1530 1531 1532
		if ((seg_desc.type & 0xa) == 0x8 ||
		    (((seg_desc.type & 0xc) != 0xc) &&
		     (rpl > dpl && cpl > dpl)))
			goto exception;
A
Avi Kivity 已提交
1533
		break;
1534 1535 1536 1537 1538
	}

	if (seg_desc.s) {
		/* mark segment as accessed */
		seg_desc.type |= 1;
1539
		ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1540 1541
		if (ret != X86EMUL_CONTINUE)
			return ret;
1542 1543 1544 1545 1546
	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
		ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
				sizeof(base3), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
1547 1548
	}
load:
1549
	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1550 1551 1552 1553 1554 1555
	return X86EMUL_CONTINUE;
exception:
	emulate_exception(ctxt, err_vec, err_code, true);
	return X86EMUL_PROPAGATE_FAULT;
}

1556 1557 1558 1559
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, int seg)
{
	u8 cpl = ctxt->ops->cpl(ctxt);
1560
	return __load_segment_descriptor(ctxt, selector, seg, cpl, false);
1561 1562
}

1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581
static void write_register_operand(struct operand *op)
{
	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
	switch (op->bytes) {
	case 1:
		*(u8 *)op->addr.reg = (u8)op->val;
		break;
	case 2:
		*(u16 *)op->addr.reg = (u16)op->val;
		break;
	case 4:
		*op->addr.reg = (u32)op->val;
		break;	/* 64b: zero-extend */
	case 8:
		*op->addr.reg = op->val;
		break;
	}
}

1582
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1583
{
1584
	switch (op->type) {
1585
	case OP_REG:
1586
		write_register_operand(op);
A
Avi Kivity 已提交
1587
		break;
1588
	case OP_MEM:
1589
		if (ctxt->lock_prefix)
P
Paolo Bonzini 已提交
1590 1591 1592 1593 1594 1595 1596
			return segmented_cmpxchg(ctxt,
						 op->addr.mem,
						 &op->orig_val,
						 &op->val,
						 op->bytes);
		else
			return segmented_write(ctxt,
1597 1598 1599
					       op->addr.mem,
					       &op->val,
					       op->bytes);
1600
		break;
1601
	case OP_MEM_STR:
P
Paolo Bonzini 已提交
1602 1603 1604 1605
		return segmented_write(ctxt,
				       op->addr.mem,
				       op->data,
				       op->bytes * op->count);
1606
		break;
A
Avi Kivity 已提交
1607
	case OP_XMM:
1608
		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
A
Avi Kivity 已提交
1609
		break;
A
Avi Kivity 已提交
1610
	case OP_MM:
1611
		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
A
Avi Kivity 已提交
1612
		break;
1613 1614
	case OP_NONE:
		/* no writeback */
1615
		break;
1616
	default:
1617
		break;
A
Avi Kivity 已提交
1618
	}
1619 1620
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1621

1622
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1623
{
1624
	struct segmented_address addr;
1625

1626
	rsp_increment(ctxt, -bytes);
1627
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1628 1629
	addr.seg = VCPU_SREG_SS;

1630 1631 1632 1633 1634
	return segmented_write(ctxt, addr, data, bytes);
}

static int em_push(struct x86_emulate_ctxt *ctxt)
{
1635
	/* Disable writeback. */
1636
	ctxt->dst.type = OP_NONE;
1637
	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1638
}
1639

1640 1641 1642 1643
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
		       void *dest, int len)
{
	int rc;
1644
	struct segmented_address addr;
1645

1646
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1647
	addr.seg = VCPU_SREG_SS;
1648
	rc = segmented_read(ctxt, addr, dest, len);
1649 1650 1651
	if (rc != X86EMUL_CONTINUE)
		return rc;

1652
	rsp_increment(ctxt, len);
1653
	return rc;
1654 1655
}

1656 1657
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
1658
	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1659 1660
}

1661
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1662
			void *dest, int len)
1663 1664
{
	int rc;
1665 1666
	unsigned long val, change_mask;
	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1667
	int cpl = ctxt->ops->cpl(ctxt);
1668

1669
	rc = emulate_pop(ctxt, &val, len);
1670 1671
	if (rc != X86EMUL_CONTINUE)
		return rc;
1672

1673
	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1674
		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1675

1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
	switch(ctxt->mode) {
	case X86EMUL_MODE_PROT64:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT16:
		if (cpl == 0)
			change_mask |= EFLG_IOPL;
		if (cpl <= iopl)
			change_mask |= EFLG_IF;
		break;
	case X86EMUL_MODE_VM86:
1686 1687
		if (iopl < 3)
			return emulate_gp(ctxt, 0);
1688 1689 1690 1691 1692
		change_mask |= EFLG_IF;
		break;
	default: /* real mode */
		change_mask |= (EFLG_IOPL | EFLG_IF);
		break;
1693
	}
1694 1695 1696 1697 1698

	*(unsigned long *)dest =
		(ctxt->eflags & ~change_mask) | (val & change_mask);

	return rc;
1699 1700
}

1701 1702
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
1703 1704 1705 1706
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->eflags;
	ctxt->dst.bytes = ctxt->op_bytes;
	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1707 1708
}

A
Avi Kivity 已提交
1709 1710 1711 1712 1713
static int em_enter(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	unsigned frame_size = ctxt->src.val;
	unsigned nesting_level = ctxt->src2.val & 31;
1714
	ulong rbp;
A
Avi Kivity 已提交
1715 1716 1717 1718

	if (nesting_level)
		return X86EMUL_UNHANDLEABLE;

1719 1720
	rbp = reg_read(ctxt, VCPU_REGS_RBP);
	rc = push(ctxt, &rbp, stack_size(ctxt));
A
Avi Kivity 已提交
1721 1722
	if (rc != X86EMUL_CONTINUE)
		return rc;
1723
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
A
Avi Kivity 已提交
1724
		      stack_mask(ctxt));
1725 1726
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
A
Avi Kivity 已提交
1727 1728 1729 1730
		      stack_mask(ctxt));
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1731 1732
static int em_leave(struct x86_emulate_ctxt *ctxt)
{
1733
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
A
Avi Kivity 已提交
1734
		      stack_mask(ctxt));
1735
	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
A
Avi Kivity 已提交
1736 1737
}

1738
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1739
{
1740 1741
	int seg = ctxt->src2.val;

1742
	ctxt->src.val = get_segment_selector(ctxt, seg);
1743

1744
	return em_push(ctxt);
1745 1746
}

1747
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1748
{
1749
	int seg = ctxt->src2.val;
1750 1751
	unsigned long selector;
	int rc;
1752

1753
	rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1754 1755 1756
	if (rc != X86EMUL_CONTINUE)
		return rc;

1757 1758 1759
	if (ctxt->modrm_reg == VCPU_SREG_SS)
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;

1760
	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1761
	return rc;
1762 1763
}

1764
static int em_pusha(struct x86_emulate_ctxt *ctxt)
1765
{
1766
	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1767 1768
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RAX;
1769

1770 1771
	while (reg <= VCPU_REGS_RDI) {
		(reg == VCPU_REGS_RSP) ?
1772
		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1773

1774
		rc = em_push(ctxt);
1775 1776
		if (rc != X86EMUL_CONTINUE)
			return rc;
1777

1778
		++reg;
1779 1780
	}

1781
	return rc;
1782 1783
}

1784 1785
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
1786
	ctxt->src.val =  (unsigned long)ctxt->eflags;
1787 1788 1789
	return em_push(ctxt);
}

1790
static int em_popa(struct x86_emulate_ctxt *ctxt)
1791
{
1792 1793
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RDI;
1794

1795 1796
	while (reg >= VCPU_REGS_RAX) {
		if (reg == VCPU_REGS_RSP) {
1797
			rsp_increment(ctxt, ctxt->op_bytes);
1798 1799
			--reg;
		}
1800

1801
		rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1802 1803 1804
		if (rc != X86EMUL_CONTINUE)
			break;
		--reg;
1805
	}
1806
	return rc;
1807 1808
}

1809
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1810
{
1811
	const struct x86_emulate_ops *ops = ctxt->ops;
1812
	int rc;
1813 1814 1815 1816 1817 1818
	struct desc_ptr dt;
	gva_t cs_addr;
	gva_t eip_addr;
	u16 cs, eip;

	/* TODO: Add limit checks */
1819
	ctxt->src.val = ctxt->eflags;
1820
	rc = em_push(ctxt);
1821 1822
	if (rc != X86EMUL_CONTINUE)
		return rc;
1823 1824 1825

	ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);

1826
	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1827
	rc = em_push(ctxt);
1828 1829
	if (rc != X86EMUL_CONTINUE)
		return rc;
1830

1831
	ctxt->src.val = ctxt->_eip;
1832
	rc = em_push(ctxt);
1833 1834 1835
	if (rc != X86EMUL_CONTINUE)
		return rc;

1836
	ops->get_idt(ctxt, &dt);
1837 1838 1839 1840

	eip_addr = dt.address + (irq << 2);
	cs_addr = dt.address + (irq << 2) + 2;

1841
	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1842 1843 1844
	if (rc != X86EMUL_CONTINUE)
		return rc;

1845
	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1846 1847 1848
	if (rc != X86EMUL_CONTINUE)
		return rc;

1849
	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1850 1851 1852
	if (rc != X86EMUL_CONTINUE)
		return rc;

1853
	ctxt->_eip = eip;
1854 1855 1856 1857

	return rc;
}

1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
	int rc;

	invalidate_registers(ctxt);
	rc = __emulate_int_real(ctxt, irq);
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);
	return rc;
}

1869
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1870 1871 1872
{
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
1873
		return __emulate_int_real(ctxt, irq);
1874 1875 1876 1877 1878 1879 1880 1881 1882 1883
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
	default:
		/* Protected mode interrupts unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
	}
}

1884
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1885
{
1886 1887 1888 1889 1890 1891 1892 1893
	int rc = X86EMUL_CONTINUE;
	unsigned long temp_eip = 0;
	unsigned long temp_eflags = 0;
	unsigned long cs = 0;
	unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
			     EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
			     EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
	unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1894

1895
	/* TODO: Add stack limit check */
1896

1897
	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1898

1899 1900
	if (rc != X86EMUL_CONTINUE)
		return rc;
1901

1902 1903
	if (temp_eip & ~0xffff)
		return emulate_gp(ctxt, 0);
1904

1905
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1906

1907 1908
	if (rc != X86EMUL_CONTINUE)
		return rc;
1909

1910
	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1911

1912 1913
	if (rc != X86EMUL_CONTINUE)
		return rc;
1914

1915
	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1916

1917 1918
	if (rc != X86EMUL_CONTINUE)
		return rc;
1919

1920
	ctxt->_eip = temp_eip;
1921 1922


1923
	if (ctxt->op_bytes == 4)
1924
		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1925
	else if (ctxt->op_bytes == 2) {
1926 1927
		ctxt->eflags &= ~0xffff;
		ctxt->eflags |= temp_eflags;
1928
	}
1929 1930 1931 1932 1933

	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
	ctxt->eflags |= EFLG_RESERVED_ONE_MASK;

	return rc;
1934 1935
}

1936
static int em_iret(struct x86_emulate_ctxt *ctxt)
1937
{
1938 1939
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
1940
		return emulate_iret_real(ctxt);
1941 1942 1943 1944
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
1945
	default:
1946 1947
		/* iret from protected mode unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
1948 1949 1950
	}
}

1951 1952 1953 1954 1955
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	unsigned short sel;

1956
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1957

1958
	rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1959 1960 1961
	if (rc != X86EMUL_CONTINUE)
		return rc;

1962 1963
	ctxt->_eip = 0;
	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1964 1965 1966
	return X86EMUL_CONTINUE;
}

1967
static int em_grp45(struct x86_emulate_ctxt *ctxt)
1968
{
1969
	int rc = X86EMUL_CONTINUE;
1970

1971
	switch (ctxt->modrm_reg) {
1972 1973
	case 2: /* call near abs */ {
		long int old_eip;
1974 1975 1976
		old_eip = ctxt->_eip;
		ctxt->_eip = ctxt->src.val;
		ctxt->src.val = old_eip;
1977
		rc = em_push(ctxt);
1978 1979
		break;
	}
1980
	case 4: /* jmp abs */
1981
		ctxt->_eip = ctxt->src.val;
1982
		break;
1983 1984 1985
	case 5: /* jmp far */
		rc = em_jmp_far(ctxt);
		break;
1986
	case 6:	/* push */
1987
		rc = em_push(ctxt);
1988 1989
		break;
	}
1990
	return rc;
1991 1992
}

1993
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
1994
{
1995
	u64 old = ctxt->dst.orig_val64;
1996

1997 1998 1999
	if (ctxt->dst.bytes == 16)
		return X86EMUL_UNHANDLEABLE;

2000 2001 2002 2003
	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2004
		ctxt->eflags &= ~EFLG_ZF;
2005
	} else {
2006 2007
		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2008

2009
		ctxt->eflags |= EFLG_ZF;
2010
	}
2011
	return X86EMUL_CONTINUE;
2012 2013
}

2014 2015
static int em_ret(struct x86_emulate_ctxt *ctxt)
{
2016 2017 2018
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->_eip;
	ctxt->dst.bytes = ctxt->op_bytes;
2019 2020 2021
	return em_pop(ctxt);
}

2022
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2023 2024 2025
{
	int rc;
	unsigned long cs;
2026
	int cpl = ctxt->ops->cpl(ctxt);
2027

2028
	rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
2029
	if (rc != X86EMUL_CONTINUE)
2030
		return rc;
2031 2032 2033
	if (ctxt->op_bytes == 4)
		ctxt->_eip = (u32)ctxt->_eip;
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2034
	if (rc != X86EMUL_CONTINUE)
2035
		return rc;
2036 2037 2038
	/* Outer-privilege level return is not implemented */
	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
		return X86EMUL_UNHANDLEABLE;
2039
	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2040 2041 2042
	return rc;
}

2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053
static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
{
        int rc;

        rc = em_ret_far(ctxt);
        if (rc != X86EMUL_CONTINUE)
                return rc;
        rsp_increment(ctxt, ctxt->src.val);
        return X86EMUL_CONTINUE;
}

2054 2055 2056
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{
	/* Save real source value, then compare EAX against destination. */
2057 2058
	ctxt->dst.orig_val = ctxt->dst.val;
	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2059
	ctxt->src.orig_val = ctxt->src.val;
2060
	ctxt->src.val = ctxt->dst.orig_val;
2061
	fastop(ctxt, em_cmp);
2062 2063 2064 2065 2066 2067 2068

	if (ctxt->eflags & EFLG_ZF) {
		/* Success: write back to memory. */
		ctxt->dst.val = ctxt->src.orig_val;
	} else {
		/* Failure: write the value we saw to EAX. */
		ctxt->dst.type = OP_REG;
2069
		ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2070
		ctxt->dst.val = ctxt->dst.orig_val;
2071 2072 2073 2074
	}
	return X86EMUL_CONTINUE;
}

2075
static int em_lseg(struct x86_emulate_ctxt *ctxt)
2076
{
2077
	int seg = ctxt->src2.val;
2078 2079 2080
	unsigned short sel;
	int rc;

2081
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2082

2083
	rc = load_segment_descriptor(ctxt, sel, seg);
2084 2085 2086
	if (rc != X86EMUL_CONTINUE)
		return rc;

2087
	ctxt->dst.val = ctxt->src.val;
2088 2089 2090
	return rc;
}

2091
static void
2092
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2093
			struct desc_struct *cs, struct desc_struct *ss)
2094 2095
{
	cs->l = 0;		/* will be adjusted later */
2096
	set_desc_base(cs, 0);	/* flat segment */
2097
	cs->g = 1;		/* 4kb granularity */
2098
	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2099 2100 2101
	cs->type = 0x0b;	/* Read, Execute, Accessed */
	cs->s = 1;
	cs->dpl = 0;		/* will be adjusted later */
2102 2103
	cs->p = 1;
	cs->d = 1;
2104
	cs->avl = 0;
2105

2106 2107
	set_desc_base(ss, 0);	/* flat segment */
	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2108 2109 2110
	ss->g = 1;		/* 4kb granularity */
	ss->s = 1;
	ss->type = 0x03;	/* Read/Write, Accessed */
2111
	ss->d = 1;		/* 32bit stack segment */
2112
	ss->dpl = 0;
2113
	ss->p = 1;
2114 2115
	ss->l = 0;
	ss->avl = 0;
2116 2117
}

2118 2119 2120 2121 2122
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

	eax = ecx = 0;
2123 2124
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2125 2126 2127 2128
		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
}

2129 2130
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
{
2131
	const struct x86_emulate_ops *ops = ctxt->ops;
2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142
	u32 eax, ebx, ecx, edx;

	/*
	 * syscall should always be enabled in longmode - so only become
	 * vendor specific (cpuid) if other modes are active...
	 */
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return true;

	eax = 0x00000000;
	ecx = 0x00000000;
2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167
	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	/*
	 * Intel ("GenuineIntel")
	 * remark: Intel CPUs only support "syscall" in 64bit
	 * longmode. Also an 64bit guest with a
	 * 32bit compat-app running will #UD !! While this
	 * behaviour can be fixed (by emulating) into AMD
	 * response - CPUs of AMD can't behave like Intel.
	 */
	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
		return false;

	/* AMD ("AuthenticAMD") */
	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
		return true;

	/* AMD ("AMDisbetter!") */
	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
		return true;
2168 2169 2170 2171 2172

	/* default: (not Intel, not AMD), apply Intel's stricter rules... */
	return false;
}

2173
static int em_syscall(struct x86_emulate_ctxt *ctxt)
2174
{
2175
	const struct x86_emulate_ops *ops = ctxt->ops;
2176
	struct desc_struct cs, ss;
2177
	u64 msr_data;
2178
	u16 cs_sel, ss_sel;
2179
	u64 efer = 0;
2180 2181

	/* syscall is not available in real mode */
2182
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2183 2184
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_ud(ctxt);
2185

2186 2187 2188
	if (!(em_syscall_is_enabled(ctxt)))
		return emulate_ud(ctxt);

2189
	ops->get_msr(ctxt, MSR_EFER, &efer);
2190
	setup_syscalls_segments(ctxt, &cs, &ss);
2191

2192 2193 2194
	if (!(efer & EFER_SCE))
		return emulate_ud(ctxt);

2195
	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2196
	msr_data >>= 32;
2197 2198
	cs_sel = (u16)(msr_data & 0xfffc);
	ss_sel = (u16)(msr_data + 8);
2199

2200
	if (efer & EFER_LMA) {
2201
		cs.d = 0;
2202 2203
		cs.l = 1;
	}
2204 2205
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2206

2207
	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2208
	if (efer & EFER_LMA) {
2209
#ifdef CONFIG_X86_64
2210
		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2211

2212
		ops->get_msr(ctxt,
2213 2214
			     ctxt->mode == X86EMUL_MODE_PROT64 ?
			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2215
		ctxt->_eip = msr_data;
2216

2217
		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2218
		ctxt->eflags &= ~msr_data;
2219 2220 2221
#endif
	} else {
		/* legacy mode */
2222
		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2223
		ctxt->_eip = (u32)msr_data;
2224

2225
		ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2226 2227
	}

2228
	return X86EMUL_CONTINUE;
2229 2230
}

2231
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2232
{
2233
	const struct x86_emulate_ops *ops = ctxt->ops;
2234
	struct desc_struct cs, ss;
2235
	u64 msr_data;
2236
	u16 cs_sel, ss_sel;
2237
	u64 efer = 0;
2238

2239
	ops->get_msr(ctxt, MSR_EFER, &efer);
2240
	/* inject #GP if in real mode */
2241 2242
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return emulate_gp(ctxt, 0);
2243

2244 2245 2246 2247 2248 2249 2250 2251
	/*
	 * Not recognized on AMD in compat mode (but is recognized in legacy
	 * mode).
	 */
	if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
	    && !vendor_intel(ctxt))
		return emulate_ud(ctxt);

2252 2253 2254
	/* XXX sysenter/sysexit have not been tested in 64bit mode.
	* Therefore, we inject an #UD.
	*/
2255 2256
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return emulate_ud(ctxt);
2257

2258
	setup_syscalls_segments(ctxt, &cs, &ss);
2259

2260
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2261 2262
	switch (ctxt->mode) {
	case X86EMUL_MODE_PROT32:
2263 2264
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
2265 2266
		break;
	case X86EMUL_MODE_PROT64:
2267 2268
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
2269
		break;
2270 2271
	default:
		break;
2272 2273
	}

2274
	ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2275 2276 2277 2278
	cs_sel = (u16)msr_data;
	cs_sel &= ~SELECTOR_RPL_MASK;
	ss_sel = cs_sel + 8;
	ss_sel &= ~SELECTOR_RPL_MASK;
2279
	if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2280
		cs.d = 0;
2281 2282 2283
		cs.l = 1;
	}

2284 2285
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2286

2287
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2288
	ctxt->_eip = msr_data;
2289

2290
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2291
	*reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2292

2293
	return X86EMUL_CONTINUE;
2294 2295
}

2296
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2297
{
2298
	const struct x86_emulate_ops *ops = ctxt->ops;
2299
	struct desc_struct cs, ss;
2300 2301
	u64 msr_data;
	int usermode;
X
Xiao Guangrong 已提交
2302
	u16 cs_sel = 0, ss_sel = 0;
2303

2304 2305
	/* inject #GP if in real mode or Virtual 8086 mode */
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2306 2307
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_gp(ctxt, 0);
2308

2309
	setup_syscalls_segments(ctxt, &cs, &ss);
2310

2311
	if ((ctxt->rex_prefix & 0x8) != 0x0)
2312 2313 2314 2315 2316 2317
		usermode = X86EMUL_MODE_PROT64;
	else
		usermode = X86EMUL_MODE_PROT32;

	cs.dpl = 3;
	ss.dpl = 3;
2318
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2319 2320
	switch (usermode) {
	case X86EMUL_MODE_PROT32:
2321
		cs_sel = (u16)(msr_data + 16);
2322 2323
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
2324
		ss_sel = (u16)(msr_data + 24);
2325 2326
		break;
	case X86EMUL_MODE_PROT64:
2327
		cs_sel = (u16)(msr_data + 32);
2328 2329
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
2330 2331
		ss_sel = cs_sel + 8;
		cs.d = 0;
2332 2333 2334
		cs.l = 1;
		break;
	}
2335 2336
	cs_sel |= SELECTOR_RPL_MASK;
	ss_sel |= SELECTOR_RPL_MASK;
2337

2338 2339
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2340

2341 2342
	ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
	*reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
2343

2344
	return X86EMUL_CONTINUE;
2345 2346
}

2347
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2348 2349 2350 2351 2352 2353 2354
{
	int iopl;
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return false;
	if (ctxt->mode == X86EMUL_MODE_VM86)
		return true;
	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2355
	return ctxt->ops->cpl(ctxt) > iopl;
2356 2357 2358 2359 2360
}

static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
					    u16 port, u16 len)
{
2361
	const struct x86_emulate_ops *ops = ctxt->ops;
2362
	struct desc_struct tr_seg;
2363
	u32 base3;
2364
	int r;
2365
	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2366
	unsigned mask = (1 << len) - 1;
2367
	unsigned long base;
2368

2369
	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2370
	if (!tr_seg.p)
2371
		return false;
2372
	if (desc_limit_scaled(&tr_seg) < 103)
2373
		return false;
2374 2375 2376 2377
	base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
	base |= ((u64)base3) << 32;
#endif
2378
	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2379 2380
	if (r != X86EMUL_CONTINUE)
		return false;
2381
	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2382
		return false;
2383
	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2384 2385 2386 2387 2388 2389 2390 2391 2392 2393
	if (r != X86EMUL_CONTINUE)
		return false;
	if ((perm >> bit_idx) & mask)
		return false;
	return true;
}

static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
				 u16 port, u16 len)
{
2394 2395 2396
	if (ctxt->perm_ok)
		return true;

2397 2398
	if (emulator_bad_iopl(ctxt))
		if (!emulator_io_port_access_allowed(ctxt, port, len))
2399
			return false;
2400 2401 2402

	ctxt->perm_ok = true;

2403 2404 2405
	return true;
}

2406 2407 2408
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_16 *tss)
{
2409
	tss->ip = ctxt->_eip;
2410
	tss->flag = ctxt->eflags;
2411 2412 2413 2414 2415 2416 2417 2418
	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2419

2420 2421 2422 2423 2424
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2425 2426 2427 2428 2429 2430
}

static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_16 *tss)
{
	int ret;
2431
	u8 cpl;
2432

2433
	ctxt->_eip = tss->ip;
2434
	ctxt->eflags = tss->flag | 2;
2435 2436 2437 2438 2439 2440 2441 2442
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2443 2444 2445 2446 2447

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
2448 2449 2450 2451 2452
	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2453

2454 2455
	cpl = tss->cs & 3;

2456
	/*
G
Guo Chao 已提交
2457
	 * Now load segment descriptors. If fault happens at this stage
2458 2459
	 * it is handled in a context of new task
	 */
2460
	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true);
2461 2462
	if (ret != X86EMUL_CONTINUE)
		return ret;
2463
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
2464 2465
	if (ret != X86EMUL_CONTINUE)
		return ret;
2466
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
2467 2468
	if (ret != X86EMUL_CONTINUE)
		return ret;
2469
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
2470 2471
	if (ret != X86EMUL_CONTINUE)
		return ret;
2472
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
2473 2474 2475 2476 2477 2478 2479 2480 2481 2482
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_16(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
2483
	const struct x86_emulate_ops *ops = ctxt->ops;
2484 2485
	struct tss_segment_16 tss_seg;
	int ret;
2486
	u32 new_tss_base = get_desc_base(new_desc);
2487

2488
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2489
			    &ctxt->exception);
2490
	if (ret != X86EMUL_CONTINUE)
2491 2492 2493
		/* FIXME: need to provide precise fault address */
		return ret;

2494
	save_state_to_tss16(ctxt, &tss_seg);
2495

2496
	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2497
			     &ctxt->exception);
2498
	if (ret != X86EMUL_CONTINUE)
2499 2500 2501
		/* FIXME: need to provide precise fault address */
		return ret;

2502
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2503
			    &ctxt->exception);
2504
	if (ret != X86EMUL_CONTINUE)
2505 2506 2507 2508 2509 2510
		/* FIXME: need to provide precise fault address */
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

2511
		ret = ops->write_std(ctxt, new_tss_base,
2512 2513
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2514
				     &ctxt->exception);
2515
		if (ret != X86EMUL_CONTINUE)
2516 2517 2518 2519
			/* FIXME: need to provide precise fault address */
			return ret;
	}

2520
	return load_state_from_tss16(ctxt, &tss_seg);
2521 2522 2523 2524 2525
}

static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_32 *tss)
{
2526
	/* CR3 and ldt selector are not saved intentionally */
2527
	tss->eip = ctxt->_eip;
2528
	tss->eflags = ctxt->eflags;
2529 2530 2531 2532 2533 2534 2535 2536
	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2537

2538 2539 2540 2541 2542 2543
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2544 2545 2546 2547 2548 2549
}

static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_32 *tss)
{
	int ret;
2550
	u8 cpl;
2551

2552
	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2553
		return emulate_gp(ctxt, 0);
2554
	ctxt->_eip = tss->eip;
2555
	ctxt->eflags = tss->eflags | 2;
2556 2557

	/* General purpose registers */
2558 2559 2560 2561 2562 2563 2564 2565
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2566 2567 2568

	/*
	 * SDM says that segment selectors are loaded before segment
2569 2570
	 * descriptors.  This is important because CPL checks will
	 * use CS.RPL.
2571
	 */
2572 2573 2574 2575 2576 2577 2578
	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2579

2580 2581 2582 2583 2584
	/*
	 * If we're switching between Protected Mode and VM86, we need to make
	 * sure to update the mode before loading the segment descriptors so
	 * that the selectors are interpreted correctly.
	 */
2585
	if (ctxt->eflags & X86_EFLAGS_VM) {
2586
		ctxt->mode = X86EMUL_MODE_VM86;
2587 2588
		cpl = 3;
	} else {
2589
		ctxt->mode = X86EMUL_MODE_PROT32;
2590 2591
		cpl = tss->cs & 3;
	}
2592

2593 2594 2595 2596
	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
2597
	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true);
2598 2599
	if (ret != X86EMUL_CONTINUE)
		return ret;
2600
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
2601 2602
	if (ret != X86EMUL_CONTINUE)
		return ret;
2603
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
2604 2605
	if (ret != X86EMUL_CONTINUE)
		return ret;
2606
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
2607 2608
	if (ret != X86EMUL_CONTINUE)
		return ret;
2609
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
2610 2611
	if (ret != X86EMUL_CONTINUE)
		return ret;
2612
	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true);
2613 2614
	if (ret != X86EMUL_CONTINUE)
		return ret;
2615
	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true);
2616 2617 2618 2619 2620 2621 2622 2623 2624 2625
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_32(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
2626
	const struct x86_emulate_ops *ops = ctxt->ops;
2627 2628
	struct tss_segment_32 tss_seg;
	int ret;
2629
	u32 new_tss_base = get_desc_base(new_desc);
2630 2631
	u32 eip_offset = offsetof(struct tss_segment_32, eip);
	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2632

2633
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2634
			    &ctxt->exception);
2635
	if (ret != X86EMUL_CONTINUE)
2636 2637 2638
		/* FIXME: need to provide precise fault address */
		return ret;

2639
	save_state_to_tss32(ctxt, &tss_seg);
2640

2641 2642 2643
	/* Only GP registers and segment selectors are saved */
	ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
			     ldt_sel_offset - eip_offset, &ctxt->exception);
2644
	if (ret != X86EMUL_CONTINUE)
2645 2646 2647
		/* FIXME: need to provide precise fault address */
		return ret;

2648
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2649
			    &ctxt->exception);
2650
	if (ret != X86EMUL_CONTINUE)
2651 2652 2653 2654 2655 2656
		/* FIXME: need to provide precise fault address */
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

2657
		ret = ops->write_std(ctxt, new_tss_base,
2658 2659
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2660
				     &ctxt->exception);
2661
		if (ret != X86EMUL_CONTINUE)
2662 2663 2664 2665
			/* FIXME: need to provide precise fault address */
			return ret;
	}

2666
	return load_state_from_tss32(ctxt, &tss_seg);
2667 2668 2669
}

static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2670
				   u16 tss_selector, int idt_index, int reason,
2671
				   bool has_error_code, u32 error_code)
2672
{
2673
	const struct x86_emulate_ops *ops = ctxt->ops;
2674 2675
	struct desc_struct curr_tss_desc, next_tss_desc;
	int ret;
2676
	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2677
	ulong old_tss_base =
2678
		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2679
	u32 desc_limit;
2680
	ulong desc_addr;
2681 2682 2683

	/* FIXME: old_tss_base == ~0 ? */

2684
	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2685 2686
	if (ret != X86EMUL_CONTINUE)
		return ret;
2687
	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2688 2689 2690 2691 2692
	if (ret != X86EMUL_CONTINUE)
		return ret;

	/* FIXME: check that next_tss_desc is tss */

2693 2694 2695 2696 2697
	/*
	 * Check privileges. The three cases are task switch caused by...
	 *
	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
	 * 2. Exception/IRQ/iret: No check is performed
G
Guo Chao 已提交
2698
	 * 3. jmp/call to TSS: Check against DPL of the TSS
2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718
	 */
	if (reason == TASK_SWITCH_GATE) {
		if (idt_index != -1) {
			/* Software interrupts */
			struct desc_struct task_gate_desc;
			int dpl;

			ret = read_interrupt_descriptor(ctxt, idt_index,
							&task_gate_desc);
			if (ret != X86EMUL_CONTINUE)
				return ret;

			dpl = task_gate_desc.dpl;
			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
		}
	} else if (reason != TASK_SWITCH_IRET) {
		int dpl = next_tss_desc.dpl;
		if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
			return emulate_gp(ctxt, tss_selector);
2719 2720
	}

2721

2722 2723 2724 2725
	desc_limit = desc_limit_scaled(&next_tss_desc);
	if (!next_tss_desc.p ||
	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
	     desc_limit < 0x2b)) {
2726
		emulate_ts(ctxt, tss_selector & 0xfffc);
2727 2728 2729 2730 2731
		return X86EMUL_PROPAGATE_FAULT;
	}

	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2732
		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2733 2734 2735 2736 2737 2738
	}

	if (reason == TASK_SWITCH_IRET)
		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;

	/* set back link to prev task only if NT bit is set in eflags
G
Guo Chao 已提交
2739
	   note that old_tss_sel is not used after this point */
2740 2741 2742 2743
	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
		old_tss_sel = 0xffff;

	if (next_tss_desc.type & 8)
2744
		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2745 2746
				     old_tss_base, &next_tss_desc);
	else
2747
		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2748
				     old_tss_base, &next_tss_desc);
2749 2750
	if (ret != X86EMUL_CONTINUE)
		return ret;
2751 2752 2753 2754 2755 2756

	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;

	if (reason != TASK_SWITCH_IRET) {
		next_tss_desc.type |= (1 << 1); /* set busy flag */
2757
		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2758 2759
	}

2760
	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
2761
	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2762

2763
	if (has_error_code) {
2764 2765 2766
		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
		ctxt->lock_prefix = 0;
		ctxt->src.val = (unsigned long) error_code;
2767
		ret = em_push(ctxt);
2768 2769
	}

2770 2771 2772 2773
	return ret;
}

int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2774
			 u16 tss_selector, int idt_index, int reason,
2775
			 bool has_error_code, u32 error_code)
2776 2777 2778
{
	int rc;

2779
	invalidate_registers(ctxt);
2780 2781
	ctxt->_eip = ctxt->eip;
	ctxt->dst.type = OP_NONE;
2782

2783
	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2784
				     has_error_code, error_code);
2785

2786
	if (rc == X86EMUL_CONTINUE) {
2787
		ctxt->eip = ctxt->_eip;
2788 2789
		writeback_registers(ctxt);
	}
2790

2791
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2792 2793
}

2794 2795
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
		struct operand *op)
2796
{
2797
	int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2798

2799 2800
	register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
	op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2801 2802
}

2803 2804 2805 2806 2807 2808
static int em_das(struct x86_emulate_ctxt *ctxt)
{
	u8 al, old_al;
	bool af, cf, old_cf;

	cf = ctxt->eflags & X86_EFLAGS_CF;
2809
	al = ctxt->dst.val;
2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826

	old_al = al;
	old_cf = cf;
	cf = false;
	af = ctxt->eflags & X86_EFLAGS_AF;
	if ((al & 0x0f) > 9 || af) {
		al -= 6;
		cf = old_cf | (al >= 250);
		af = true;
	} else {
		af = false;
	}
	if (old_al > 0x99 || old_cf) {
		al -= 0x60;
		cf = true;
	}

2827
	ctxt->dst.val = al;
2828
	/* Set PF, ZF, SF */
2829 2830 2831
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
2832
	fastop(ctxt, em_or);
2833 2834 2835 2836 2837 2838 2839 2840
	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
	if (cf)
		ctxt->eflags |= X86_EFLAGS_CF;
	if (af)
		ctxt->eflags |= X86_EFLAGS_AF;
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862
static int em_aam(struct x86_emulate_ctxt *ctxt)
{
	u8 al, ah;

	if (ctxt->src.val == 0)
		return emulate_de(ctxt);

	al = ctxt->dst.val & 0xff;
	ah = al / ctxt->src.val;
	al %= ctxt->src.val;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);

	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);

	return X86EMUL_CONTINUE;
}

2863 2864 2865 2866 2867 2868 2869 2870 2871
static int em_aad(struct x86_emulate_ctxt *ctxt)
{
	u8 al = ctxt->dst.val & 0xff;
	u8 ah = (ctxt->dst.val >> 8) & 0xff;

	al = (al + (ah * ctxt->src.val)) & 0xff;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;

2872 2873 2874 2875 2876
	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);
2877 2878 2879 2880

	return X86EMUL_CONTINUE;
}

2881 2882 2883 2884 2885 2886 2887 2888 2889
static int em_call(struct x86_emulate_ctxt *ctxt)
{
	long rel = ctxt->src.val;

	ctxt->src.val = (unsigned long)ctxt->_eip;
	jmp_rel(ctxt, rel);
	return em_push(ctxt);
}

2890 2891 2892 2893 2894 2895
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
	u16 sel, old_cs;
	ulong old_eip;
	int rc;

2896
	old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2897
	old_eip = ctxt->_eip;
2898

2899
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2900
	if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2901 2902
		return X86EMUL_CONTINUE;

2903 2904
	ctxt->_eip = 0;
	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
2905

2906
	ctxt->src.val = old_cs;
2907
	rc = em_push(ctxt);
2908 2909 2910
	if (rc != X86EMUL_CONTINUE)
		return rc;

2911
	ctxt->src.val = old_eip;
2912
	return em_push(ctxt);
2913 2914
}

2915 2916 2917 2918
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
	int rc;

2919 2920 2921 2922
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->_eip;
	ctxt->dst.bytes = ctxt->op_bytes;
	rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2923 2924
	if (rc != X86EMUL_CONTINUE)
		return rc;
2925
	rsp_increment(ctxt, ctxt->src.val);
2926 2927 2928
	return X86EMUL_CONTINUE;
}

2929 2930 2931
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
	/* Write back the register source. */
2932 2933
	ctxt->src.val = ctxt->dst.val;
	write_register_operand(&ctxt->src);
2934 2935

	/* Write back the memory destination with implicit LOCK prefix. */
2936 2937
	ctxt->dst.val = ctxt->src.orig_val;
	ctxt->lock_prefix = 1;
2938 2939 2940
	return X86EMUL_CONTINUE;
}

2941 2942
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
2943
	ctxt->dst.val = ctxt->src2.val;
2944
	return fastop(ctxt, em_imul);
2945 2946
}

2947 2948
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
2949 2950
	ctxt->dst.type = OP_REG;
	ctxt->dst.bytes = ctxt->src.bytes;
2951
	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
2952
	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2953 2954 2955 2956

	return X86EMUL_CONTINUE;
}

2957 2958 2959 2960
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
	u64 tsc = 0;

2961
	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2962 2963
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
2964 2965 2966
	return X86EMUL_CONTINUE;
}

2967 2968 2969 2970
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
{
	u64 pmc;

2971
	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
2972
		return emulate_gp(ctxt, 0);
2973 2974
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
2975 2976 2977
	return X86EMUL_CONTINUE;
}

2978 2979
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
2980
	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
2981 2982 2983
	return X86EMUL_CONTINUE;
}

B
Borislav Petkov 已提交
2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023
#define FFL(x) bit(X86_FEATURE_##x)

static int em_movbe(struct x86_emulate_ctxt *ctxt)
{
	u32 ebx, ecx, edx, eax = 1;
	u16 tmp;

	/*
	 * Check MOVBE is set in the guest-visible CPUID leaf.
	 */
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	if (!(ecx & FFL(MOVBE)))
		return emulate_ud(ctxt);

	switch (ctxt->op_bytes) {
	case 2:
		/*
		 * From MOVBE definition: "...When the operand size is 16 bits,
		 * the upper word of the destination register remains unchanged
		 * ..."
		 *
		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
		 * rules so we have to do the operation almost per hand.
		 */
		tmp = (u16)ctxt->src.val;
		ctxt->dst.val &= ~0xffffUL;
		ctxt->dst.val |= (unsigned long)swab16(tmp);
		break;
	case 4:
		ctxt->dst.val = swab32((u32)ctxt->src.val);
		break;
	case 8:
		ctxt->dst.val = swab64(ctxt->src.val);
		break;
	default:
		return X86EMUL_PROPAGATE_FAULT;
	}
	return X86EMUL_CONTINUE;
}

3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

static int em_dr_write(struct x86_emulate_ctxt *ctxt)
{
	unsigned long val;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		val = ctxt->src.val & ~0ULL;
	else
		val = ctxt->src.val & ~0U;

	/* #UD condition is already handled. */
	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3052 3053 3054 3055
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

3056 3057 3058
	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3059 3060 3061 3062 3063 3064 3065 3066 3067
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

3068
	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3069 3070
		return emulate_gp(ctxt, 0);

3071 3072
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3073 3074 3075
	return X86EMUL_CONTINUE;
}

3076 3077
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
3078
	if (ctxt->modrm_reg > VCPU_SREG_GS)
3079 3080
		return emulate_ud(ctxt);

3081
	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3082 3083 3084 3085 3086
	return X86EMUL_CONTINUE;
}

static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
3087
	u16 sel = ctxt->src.val;
3088

3089
	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3090 3091
		return emulate_ud(ctxt);

3092
	if (ctxt->modrm_reg == VCPU_SREG_SS)
3093 3094 3095
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;

	/* Disable writeback. */
3096 3097
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3098 3099
}

A
Avi Kivity 已提交
3100 3101 3102 3103 3104 3105 3106 3107 3108
static int em_lldt(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
}

A
Avi Kivity 已提交
3109 3110 3111 3112 3113 3114 3115 3116 3117
static int em_ltr(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
}

3118 3119
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
3120 3121 3122
	int rc;
	ulong linear;

3123
	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3124
	if (rc == X86EMUL_CONTINUE)
3125
		ctxt->ops->invlpg(ctxt, linear);
3126
	/* Disable writeback. */
3127
	ctxt->dst.type = OP_NONE;
3128 3129 3130
	return X86EMUL_CONTINUE;
}

3131 3132 3133 3134 3135 3136 3137 3138 3139 3140
static int em_clts(struct x86_emulate_ctxt *ctxt)
{
	ulong cr0;

	cr0 = ctxt->ops->get_cr(ctxt, 0);
	cr0 &= ~X86_CR0_TS;
	ctxt->ops->set_cr(ctxt, 0, cr0);
	return X86EMUL_CONTINUE;
}

3141 3142 3143 3144
static int em_vmcall(struct x86_emulate_ctxt *ctxt)
{
	int rc;

3145
	if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
3146 3147 3148 3149 3150 3151 3152
		return X86EMUL_UNHANDLEABLE;

	rc = ctxt->ops->fix_hypercall(ctxt);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	/* Let the processor re-execute the fixed hypercall */
3153
	ctxt->_eip = ctxt->eip;
3154
	/* Disable writeback. */
3155
	ctxt->dst.type = OP_NONE;
3156 3157 3158
	return X86EMUL_CONTINUE;
}

3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
				  void (*get)(struct x86_emulate_ctxt *ctxt,
					      struct desc_ptr *ptr))
{
	struct desc_ptr desc_ptr;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
	get(ctxt, &desc_ptr);
	if (ctxt->op_bytes == 2) {
		ctxt->op_bytes = 4;
		desc_ptr.address &= 0x00ffffff;
	}
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return segmented_write(ctxt, ctxt->dst.addr.mem,
			       &desc_ptr, 2 + ctxt->op_bytes);
}

static int em_sgdt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
}

static int em_sidt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}

3188 3189 3190 3191 3192
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
	struct desc_ptr desc_ptr;
	int rc;

3193 3194
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
3195
	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3196
			     &desc_ptr.size, &desc_ptr.address,
3197
			     ctxt->op_bytes);
3198 3199 3200 3201
	if (rc != X86EMUL_CONTINUE)
		return rc;
	ctxt->ops->set_gdt(ctxt, &desc_ptr);
	/* Disable writeback. */
3202
	ctxt->dst.type = OP_NONE;
3203 3204 3205
	return X86EMUL_CONTINUE;
}

3206
static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3207 3208 3209
{
	int rc;

3210 3211
	rc = ctxt->ops->fix_hypercall(ctxt);

3212
	/* Disable writeback. */
3213
	ctxt->dst.type = OP_NONE;
3214 3215 3216 3217 3218 3219 3220 3221
	return rc;
}

static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
	struct desc_ptr desc_ptr;
	int rc;

3222 3223
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
3224
	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3225
			     &desc_ptr.size, &desc_ptr.address,
3226
			     ctxt->op_bytes);
3227 3228 3229 3230
	if (rc != X86EMUL_CONTINUE)
		return rc;
	ctxt->ops->set_idt(ctxt, &desc_ptr);
	/* Disable writeback. */
3231
	ctxt->dst.type = OP_NONE;
3232 3233 3234 3235 3236
	return X86EMUL_CONTINUE;
}

static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
3237 3238
	if (ctxt->dst.type == OP_MEM)
		ctxt->dst.bytes = 2;
3239
	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3240 3241 3242 3243 3244 3245
	return X86EMUL_CONTINUE;
}

static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3246 3247
			  | (ctxt->src.val & 0x0f));
	ctxt->dst.type = OP_NONE;
3248 3249 3250
	return X86EMUL_CONTINUE;
}

3251 3252
static int em_loop(struct x86_emulate_ctxt *ctxt)
{
3253 3254
	register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3255 3256
	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
		jmp_rel(ctxt, ctxt->src.val);
3257 3258 3259 3260 3261 3262

	return X86EMUL_CONTINUE;
}

static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{
3263
	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3264
		jmp_rel(ctxt, ctxt->src.val);
3265 3266 3267 3268

	return X86EMUL_CONTINUE;
}

3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286
static int em_in(struct x86_emulate_ctxt *ctxt)
{
	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
			     &ctxt->dst.val))
		return X86EMUL_IO_NEEDED;

	return X86EMUL_CONTINUE;
}

static int em_out(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
				    &ctxt->src.val, 1);
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->eflags &= ~X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

static int em_sti(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
	ctxt->eflags |= X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3306 3307 3308 3309
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

3310 3311
	eax = reg_read(ctxt, VCPU_REGS_RAX);
	ecx = reg_read(ctxt, VCPU_REGS_RCX);
A
Avi Kivity 已提交
3312
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3313 3314 3315 3316
	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
A
Avi Kivity 已提交
3317 3318 3319
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331
static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
	u32 flags;

	flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;

	ctxt->eflags &= ~0xffUL;
	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3332 3333
static int em_lahf(struct x86_emulate_ctxt *ctxt)
{
3334 3335
	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
A
Avi Kivity 已提交
3336 3337 3338
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353
static int em_bswap(struct x86_emulate_ctxt *ctxt)
{
	switch (ctxt->op_bytes) {
#ifdef CONFIG_X86_64
	case 8:
		asm("bswap %0" : "+r"(ctxt->dst.val));
		break;
#endif
	default:
		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
		break;
	}
	return X86EMUL_CONTINUE;
}

3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367
static bool valid_cr(int nr)
{
	switch (nr) {
	case 0:
	case 2 ... 4:
	case 8:
		return true;
	default:
		return false;
	}
}

static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
3368
	if (!valid_cr(ctxt->modrm_reg))
3369 3370 3371 3372 3373 3374 3375
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
3376 3377
	u64 new_val = ctxt->src.val64;
	int cr = ctxt->modrm_reg;
3378
	u64 efer = 0;
3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395

	static u64 cr_reserved_bits[] = {
		0xffffffff00000000ULL,
		0, 0, 0, /* CR3 checked later */
		CR4_RESERVED_BITS,
		0, 0, 0,
		CR8_RESERVED_BITS,
	};

	if (!valid_cr(cr))
		return emulate_ud(ctxt);

	if (new_val & cr_reserved_bits[cr])
		return emulate_gp(ctxt, 0);

	switch (cr) {
	case 0: {
3396
		u64 cr4;
3397 3398 3399 3400
		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
			return emulate_gp(ctxt, 0);

3401 3402
		cr4 = ctxt->ops->get_cr(ctxt, 4);
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3403 3404 3405 3406 3407 3408 3409 3410 3411 3412

		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
		    !(cr4 & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	case 3: {
		u64 rsvd = 0;

3413 3414
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
		if (efer & EFER_LMA)
3415 3416 3417 3418 3419 3420 3421 3422
			rsvd = CR3_L_MODE_RESERVED_BITS;

		if (new_val & rsvd)
			return emulate_gp(ctxt, 0);

		break;
		}
	case 4: {
3423
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434

		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	}

	return X86EMUL_CONTINUE;
}

3435 3436 3437 3438
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
	unsigned long dr7;

3439
	ctxt->ops->get_dr(ctxt, 7, &dr7);
3440 3441 3442 3443 3444 3445 3446

	/* Check if DR7.Global_Enable is set */
	return dr7 & (1 << 13);
}

static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
3447
	int dr = ctxt->modrm_reg;
3448 3449 3450 3451 3452
	u64 cr4;

	if (dr > 7)
		return emulate_ud(ctxt);

3453
	cr4 = ctxt->ops->get_cr(ctxt, 4);
3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464
	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
		return emulate_ud(ctxt);

	if (check_dr7_gd(ctxt))
		return emulate_db(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
3465 3466
	u64 new_val = ctxt->src.val64;
	int dr = ctxt->modrm_reg;
3467 3468 3469 3470 3471 3472 3473

	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
		return emulate_gp(ctxt, 0);

	return check_dr_read(ctxt);
}

3474 3475 3476 3477
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
	u64 efer;

3478
	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3479 3480 3481 3482 3483 3484 3485 3486 3487

	if (!(efer & EFER_SVME))
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
3488
	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3489 3490

	/* Valid physical address? */
3491
	if (rax & 0xffff000000000000ULL)
3492 3493 3494 3495 3496
		return emulate_gp(ctxt, 0);

	return check_svme(ctxt);
}

3497 3498
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{
3499
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3500

3501
	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3502 3503 3504 3505 3506
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

3507 3508
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
3509
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3510
	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3511

3512
	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3513
	    ctxt->ops->check_pmc(ctxt, rcx))
3514 3515 3516 3517 3518
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

3519 3520
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
3521 3522
	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3523 3524 3525 3526 3527 3528 3529
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
3530 3531
	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3532 3533 3534 3535 3536
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

3537
#define D(_y) { .flags = (_y) }
3538 3539 3540
#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
3541
#define N    D(NotImpl)
3542
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3543 3544
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3545
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3546
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3547
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3548
#define II(_f, _e, _i) \
3549
	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3550
#define IIP(_f, _e, _i, _p) \
3551 3552
	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
3553
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3554

3555
#define D2bv(_f)      D((_f) | ByteOp), D(_f)
3556
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3557
#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
3558
#define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
3559 3560
#define I2bvIP(_f, _e, _i, _p) \
	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3561

3562 3563 3564
#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3565

3566
static const struct opcode group7_rm1[] = {
3567 3568
	DI(SrcNone | Priv, monitor),
	DI(SrcNone | Priv, mwait),
3569 3570 3571
	N, N, N, N, N, N,
};

3572
static const struct opcode group7_rm3[] = {
3573
	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
3574
	II(SrcNone  | Prot | EmulateOnUD,	em_vmmcall,	vmmcall),
3575 3576 3577 3578 3579 3580
	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
3581
};
3582

3583
static const struct opcode group7_rm7[] = {
3584
	N,
3585
	DIP(SrcNone, rdtscp, check_rdtsc),
3586 3587
	N, N, N, N, N, N,
};
3588

3589
static const struct opcode group1[] = {
3590 3591 3592 3593 3594 3595 3596 3597
	F(Lock, em_add),
	F(Lock | PageTable, em_or),
	F(Lock, em_adc),
	F(Lock, em_sbb),
	F(Lock | PageTable, em_and),
	F(Lock, em_sub),
	F(Lock, em_xor),
	F(NoWrite, em_cmp),
3598 3599
};

3600
static const struct opcode group1A[] = {
3601
	I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3602 3603
};

3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614
static const struct opcode group2[] = {
	F(DstMem | ModRM, em_rol),
	F(DstMem | ModRM, em_ror),
	F(DstMem | ModRM, em_rcl),
	F(DstMem | ModRM, em_rcr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_shr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_sar),
};

3615
static const struct opcode group3[] = {
3616 3617
	F(DstMem | SrcImm | NoWrite, em_test),
	F(DstMem | SrcImm | NoWrite, em_test),
3618 3619
	F(DstMem | SrcNone | Lock, em_not),
	F(DstMem | SrcNone | Lock, em_neg),
3620 3621
	F(DstXacc | Src2Mem, em_mul_ex),
	F(DstXacc | Src2Mem, em_imul_ex),
3622 3623
	F(DstXacc | Src2Mem, em_div_ex),
	F(DstXacc | Src2Mem, em_idiv_ex),
3624 3625
};

3626
static const struct opcode group4[] = {
3627 3628
	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3629 3630 3631
	N, N, N, N, N, N,
};

3632
static const struct opcode group5[] = {
3633 3634
	F(DstMem | SrcNone | Lock,		em_inc),
	F(DstMem | SrcNone | Lock,		em_dec),
3635 3636 3637 3638
	I(SrcMem | Stack,			em_grp45),
	I(SrcMemFAddr | ImplicitOps | Stack,	em_call_far),
	I(SrcMem | Stack,			em_grp45),
	I(SrcMemFAddr | ImplicitOps,		em_grp45),
3639
	I(SrcMem | Stack,			em_grp45), D(Undefined),
3640 3641
};

3642
static const struct opcode group6[] = {
3643 3644
	DI(Prot,	sldt),
	DI(Prot,	str),
A
Avi Kivity 已提交
3645
	II(Prot | Priv | SrcMem16, em_lldt, lldt),
A
Avi Kivity 已提交
3646
	II(Prot | Priv | SrcMem16, em_ltr, ltr),
3647 3648 3649
	N, N, N, N,
};

3650
static const struct group_dual group7 = { {
3651 3652
	II(Mov | DstMem,			em_sgdt, sgdt),
	II(Mov | DstMem,			em_sidt, sidt),
3653 3654 3655 3656 3657
	II(SrcMem | Priv,			em_lgdt, lgdt),
	II(SrcMem | Priv,			em_lidt, lidt),
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
3658
}, {
3659
	I(SrcNone | Priv | EmulateOnUD,	em_vmcall),
3660
	EXT(0, group7_rm1),
3661
	N, EXT(0, group7_rm3),
3662 3663 3664
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	EXT(0, group7_rm7),
3665 3666
} };

3667
static const struct opcode group8[] = {
3668
	N, N, N, N,
3669 3670 3671 3672
	F(DstMem | SrcImmByte | NoWrite,		em_bt),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
	F(DstMem | SrcImmByte | Lock,			em_btr),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
3673 3674
};

3675
static const struct group_dual group9 = { {
3676
	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3677 3678 3679 3680
}, {
	N, N, N, N, N, N, N, N,
} };

3681
static const struct opcode group11[] = {
3682
	I(DstMem | SrcImm | Mov | PageTable, em_mov),
3683
	X7(D(Undefined)),
3684 3685
};

3686
static const struct gprefix pfx_0f_6f_0f_7f = {
3687
	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3688 3689
};

3690
static const struct gprefix pfx_vmovntpx = {
3691 3692 3693
	I(0, em_mov), N, N, N,
};

3694
static const struct gprefix pfx_0f_28_0f_29 = {
3695
	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3696 3697
};

3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760
static const struct escape escape_d9 = { {
	N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_db = { {
	N, N, N, N, N, N, N, N,
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_dd = { {
	N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

3761
static const struct opcode opcode_table[256] = {
3762
	/* 0x00 - 0x07 */
3763
	F6ALU(Lock, em_add),
3764 3765
	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3766
	/* 0x08 - 0x0F */
3767
	F6ALU(Lock | PageTable, em_or),
3768 3769
	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
	N,
3770
	/* 0x10 - 0x17 */
3771
	F6ALU(Lock, em_adc),
3772 3773
	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3774
	/* 0x18 - 0x1F */
3775
	F6ALU(Lock, em_sbb),
3776 3777
	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3778
	/* 0x20 - 0x27 */
3779
	F6ALU(Lock | PageTable, em_and), N, N,
3780
	/* 0x28 - 0x2F */
3781
	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3782
	/* 0x30 - 0x37 */
3783
	F6ALU(Lock, em_xor), N, N,
3784
	/* 0x38 - 0x3F */
3785
	F6ALU(NoWrite, em_cmp), N, N,
3786
	/* 0x40 - 0x4F */
3787
	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3788
	/* 0x50 - 0x57 */
3789
	X8(I(SrcReg | Stack, em_push)),
3790
	/* 0x58 - 0x5F */
3791
	X8(I(DstReg | Stack, em_pop)),
3792
	/* 0x60 - 0x67 */
3793 3794
	I(ImplicitOps | Stack | No64, em_pusha),
	I(ImplicitOps | Stack | No64, em_popa),
3795 3796 3797
	N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
	N, N, N, N,
	/* 0x68 - 0x6F */
3798 3799
	I(SrcImm | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3800 3801
	I(SrcImmByte | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3802
	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3803
	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3804 3805 3806
	/* 0x70 - 0x7F */
	X16(D(SrcImmByte)),
	/* 0x80 - 0x87 */
3807 3808 3809 3810
	G(ByteOp | DstMem | SrcImm, group1),
	G(DstMem | SrcImm, group1),
	G(ByteOp | DstMem | SrcImm | No64, group1),
	G(DstMem | SrcImmByte, group1),
3811
	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3812
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3813
	/* 0x88 - 0x8F */
3814
	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3815
	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3816
	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3817 3818 3819
	D(ModRM | SrcMem | NoAccess | DstReg),
	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
	G(0, group1A),
3820
	/* 0x90 - 0x97 */
3821
	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3822
	/* 0x98 - 0x9F */
3823
	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3824
	I(SrcImmFAddr | No64, em_call_far), N,
3825
	II(ImplicitOps | Stack, em_pushf, pushf),
P
Paolo Bonzini 已提交
3826 3827
	II(ImplicitOps | Stack, em_popf, popf),
	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3828
	/* 0xA0 - 0xA7 */
3829
	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3830
	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3831
	I2bv(SrcSI | DstDI | Mov | String, em_mov),
3832
	F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
3833
	/* 0xA8 - 0xAF */
3834
	F2bv(DstAcc | SrcImm | NoWrite, em_test),
3835 3836
	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3837
	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
3838
	/* 0xB0 - 0xB7 */
3839
	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3840
	/* 0xB8 - 0xBF */
3841
	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
3842
	/* 0xC0 - 0xC7 */
3843
	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
3844
	I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3845
	I(ImplicitOps | Stack, em_ret),
3846 3847
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3848
	G(ByteOp, group11), G(0, group11),
3849
	/* 0xC8 - 0xCF */
A
Avi Kivity 已提交
3850
	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
3851 3852
	I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
	I(ImplicitOps | Stack, em_ret_far),
3853
	D(ImplicitOps), DI(SrcImmByte, intn),
3854
	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3855
	/* 0xD0 - 0xD7 */
3856 3857
	G(Src2One | ByteOp, group2), G(Src2One, group2),
	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
P
Paolo Bonzini 已提交
3858
	I(DstAcc | SrcImmUByte | No64, em_aam),
P
Paolo Bonzini 已提交
3859 3860
	I(DstAcc | SrcImmUByte | No64, em_aad),
	F(DstAcc | ByteOp | No64, em_salc),
P
Paolo Bonzini 已提交
3861
	I(DstAcc | SrcXLat | ByteOp, em_mov),
3862
	/* 0xD8 - 0xDF */
3863
	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
3864
	/* 0xE0 - 0xE7 */
3865 3866
	X3(I(SrcImmByte, em_loop)),
	I(SrcImmByte, em_jcxz),
3867 3868
	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
3869
	/* 0xE8 - 0xEF */
3870
	I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
3871
	I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3872 3873
	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
3874
	/* 0xF0 - 0xF7 */
3875
	N, DI(ImplicitOps, icebp), N, N,
3876 3877
	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
	G(ByteOp, group3), G(0, group3),
3878
	/* 0xF8 - 0xFF */
3879 3880
	D(ImplicitOps), D(ImplicitOps),
	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3881 3882 3883
	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};

3884
static const struct opcode twobyte_table[256] = {
3885
	/* 0x00 - 0x0F */
3886
	G(0, group6), GD(0, &group7), N, N,
3887
	N, I(ImplicitOps | EmulateOnUD, em_syscall),
3888
	II(ImplicitOps | Priv, em_clts, clts), N,
3889
	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3890 3891
	N, D(ImplicitOps | ModRM), N, N,
	/* 0x10 - 0x1F */
P
Paolo Bonzini 已提交
3892 3893
	N, N, N, N, N, N, N, N,
	D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
3894
	/* 0x20 - 0x2F */
3895 3896 3897 3898 3899 3900
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
						check_cr_write),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
						check_dr_write),
3901
	N, N, N, N,
3902 3903 3904
	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
	N, GP(ModRM | DstMem | SrcReg | Sse | Mov | Aligned, &pfx_vmovntpx),
3905
	N, N, N, N,
3906
	/* 0x30 - 0x3F */
3907
	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
3908
	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3909
	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
3910
	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
3911 3912
	I(ImplicitOps | EmulateOnUD, em_sysenter),
	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
3913
	N, N,
3914 3915
	N, N, N, N, N, N, N, N,
	/* 0x40 - 0x4F */
3916
	X16(D(DstReg | SrcMem | ModRM)),
3917 3918 3919
	/* 0x50 - 0x5F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x60 - 0x6F */
3920 3921 3922 3923
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3924
	/* 0x70 - 0x7F */
3925 3926 3927 3928
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3929 3930 3931
	/* 0x80 - 0x8F */
	X16(D(SrcImm)),
	/* 0x90 - 0x9F */
3932
	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3933
	/* 0xA0 - 0xA7 */
3934
	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
3935 3936
	II(ImplicitOps, em_cpuid, cpuid),
	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
3937 3938
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
3939
	/* 0xA8 - 0xAF */
3940
	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
3941
	DI(ImplicitOps, rsm),
3942
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
3943 3944
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
3945
	D(ModRM), F(DstReg | SrcMem | ModRM, em_imul),
3946
	/* 0xB0 - 0xB7 */
3947
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
3948
	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
3949
	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
3950 3951
	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
3952
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3953 3954
	/* 0xB8 - 0xBF */
	N, N,
3955
	G(BitOp, group8),
3956 3957
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
	F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
3958
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
A
Avi Kivity 已提交
3959
	/* 0xC0 - 0xC7 */
3960
	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
3961
	N, D(DstMem | SrcReg | ModRM | Mov),
3962
	N, N, N, GD(0, &group9),
A
Avi Kivity 已提交
3963 3964
	/* 0xC8 - 0xCF */
	X8(I(DstReg, em_bswap)),
3965 3966 3967 3968 3969 3970 3971 3972
	/* 0xD0 - 0xDF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xEF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xFF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};

3973
static const struct gprefix three_byte_0f_38_f0 = {
B
Borislav Petkov 已提交
3974
	I(DstReg | SrcMem | Mov, em_movbe), N, N, N
3975 3976 3977
};

static const struct gprefix three_byte_0f_38_f1 = {
B
Borislav Petkov 已提交
3978
	I(DstMem | SrcReg | Mov, em_movbe), N, N, N
3979 3980 3981 3982 3983 3984 3985 3986 3987
};

/*
 * Insns below are selected by the prefix which indexed by the third opcode
 * byte.
 */
static const struct opcode opcode_map_0f_38[256] = {
	/* 0x00 - 0x7f */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
B
Borislav Petkov 已提交
3988 3989 3990 3991 3992 3993 3994
	/* 0x80 - 0xef */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
	/* 0xf0 - 0xf1 */
	GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
	GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
	/* 0xf2 - 0xff */
	N, N, X4(N), X8(N)
3995 3996
};

3997 3998 3999 4000 4001
#undef D
#undef N
#undef G
#undef GD
#undef I
4002
#undef GP
4003
#undef EXT
4004

4005
#undef D2bv
4006
#undef D2bvIP
4007
#undef I2bv
4008
#undef I2bvIP
4009
#undef I6ALU
4010

4011
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4012 4013 4014
{
	unsigned size;

4015
	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027
	if (size == 8)
		size = 4;
	return size;
}

static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
		      unsigned size, bool sign_extension)
{
	int rc = X86EMUL_CONTINUE;

	op->type = OP_IMM;
	op->bytes = size;
4028
	op->addr.mem.ea = ctxt->_eip;
4029 4030 4031
	/* NB. Immediates are sign-extended as necessary. */
	switch (op->bytes) {
	case 1:
4032
		op->val = insn_fetch(s8, ctxt);
4033 4034
		break;
	case 2:
4035
		op->val = insn_fetch(s16, ctxt);
4036 4037
		break;
	case 4:
4038
		op->val = insn_fetch(s32, ctxt);
4039
		break;
4040 4041 4042
	case 8:
		op->val = insn_fetch(s64, ctxt);
		break;
4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060
	}
	if (!sign_extension) {
		switch (op->bytes) {
		case 1:
			op->val &= 0xff;
			break;
		case 2:
			op->val &= 0xffff;
			break;
		case 4:
			op->val &= 0xffffffff;
			break;
		}
	}
done:
	return rc;
}

4061 4062 4063 4064 4065 4066 4067
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
			  unsigned d)
{
	int rc = X86EMUL_CONTINUE;

	switch (d) {
	case OpReg:
4068
		decode_register_operand(ctxt, op);
4069 4070
		break;
	case OpImmUByte:
4071
		rc = decode_imm(ctxt, op, 1, false);
4072 4073
		break;
	case OpMem:
4074
		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4075 4076 4077
	mem_common:
		*op = ctxt->memop;
		ctxt->memopp = op;
4078
		if (ctxt->d & BitOp)
4079 4080 4081
			fetch_bit_operand(ctxt);
		op->orig_val = op->val;
		break;
4082
	case OpMem64:
4083
		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4084
		goto mem_common;
4085 4086 4087
	case OpAcc:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4088
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4089 4090 4091
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109
	case OpAccLo:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
	case OpAccHi:
		if (ctxt->d & ByteOp) {
			op->type = OP_NONE;
			break;
		}
		op->type = OP_REG;
		op->bytes = ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
4110 4111 4112 4113
	case OpDI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4114
			register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4115 4116
		op->addr.mem.seg = VCPU_SREG_ES;
		op->val = 0;
4117
		op->count = 1;
4118 4119 4120 4121
		break;
	case OpDX:
		op->type = OP_REG;
		op->bytes = 2;
4122
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4123 4124
		fetch_register_operand(op);
		break;
4125 4126
	case OpCL:
		op->bytes = 1;
4127
		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138
		break;
	case OpImmByte:
		rc = decode_imm(ctxt, op, 1, true);
		break;
	case OpOne:
		op->bytes = 1;
		op->val = 1;
		break;
	case OpImm:
		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
		break;
4139 4140 4141
	case OpImm64:
		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
		break;
4142 4143
	case OpMem8:
		ctxt->memop.bytes = 1;
4144
		if (ctxt->memop.type == OP_REG) {
4145 4146
			ctxt->memop.addr.reg = decode_register(ctxt,
					ctxt->modrm_rm, true);
4147 4148
			fetch_register_operand(&ctxt->memop);
		}
4149
		goto mem_common;
4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165
	case OpMem16:
		ctxt->memop.bytes = 2;
		goto mem_common;
	case OpMem32:
		ctxt->memop.bytes = 4;
		goto mem_common;
	case OpImmU16:
		rc = decode_imm(ctxt, op, 2, false);
		break;
	case OpImmU:
		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
		break;
	case OpSI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4166
			register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
B
Bandan Das 已提交
4167
		op->addr.mem.seg = ctxt->seg_override;
4168
		op->val = 0;
4169
		op->count = 1;
4170
		break;
P
Paolo Bonzini 已提交
4171 4172 4173 4174 4175 4176 4177
	case OpXLat:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
			register_address(ctxt,
				reg_read(ctxt, VCPU_REGS_RBX) +
				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
B
Bandan Das 已提交
4178
		op->addr.mem.seg = ctxt->seg_override;
P
Paolo Bonzini 已提交
4179 4180
		op->val = 0;
		break;
4181 4182 4183 4184 4185 4186 4187 4188 4189
	case OpImmFAddr:
		op->type = OP_IMM;
		op->addr.mem.ea = ctxt->_eip;
		op->bytes = ctxt->op_bytes + 2;
		insn_fetch_arr(op->valptr, op->bytes, ctxt);
		break;
	case OpMemFAddr:
		ctxt->memop.bytes = ctxt->op_bytes + 2;
		goto mem_common;
4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207
	case OpES:
		op->val = VCPU_SREG_ES;
		break;
	case OpCS:
		op->val = VCPU_SREG_CS;
		break;
	case OpSS:
		op->val = VCPU_SREG_SS;
		break;
	case OpDS:
		op->val = VCPU_SREG_DS;
		break;
	case OpFS:
		op->val = VCPU_SREG_FS;
		break;
	case OpGS:
		op->val = VCPU_SREG_GS;
		break;
4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218
	case OpImplicit:
		/* Special instructions do their own operand decoding. */
	default:
		op->type = OP_NONE; /* Disable writeback. */
		break;
	}

done:
	return rc;
}

4219
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4220 4221 4222
{
	int rc = X86EMUL_CONTINUE;
	int mode = ctxt->mode;
4223
	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4224
	bool op_prefix = false;
B
Bandan Das 已提交
4225
	bool has_seg_override = false;
4226
	struct opcode opcode;
4227

4228 4229
	ctxt->memop.type = OP_NONE;
	ctxt->memopp = NULL;
4230
	ctxt->_eip = ctxt->eip;
4231 4232
	ctxt->fetch.ptr = ctxt->fetch.data;
	ctxt->fetch.end = ctxt->fetch.data + insn_len;
B
Borislav Petkov 已提交
4233
	ctxt->opcode_len = 1;
4234
	if (insn_len > 0)
4235
		memcpy(ctxt->fetch.data, insn, insn_len);
4236
	else {
4237
		rc = __do_insn_fetch_bytes(ctxt, 1);
4238 4239 4240
		if (rc != X86EMUL_CONTINUE)
			return rc;
	}
4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257

	switch (mode) {
	case X86EMUL_MODE_REAL:
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
		def_op_bytes = def_ad_bytes = 2;
		break;
	case X86EMUL_MODE_PROT32:
		def_op_bytes = def_ad_bytes = 4;
		break;
#ifdef CONFIG_X86_64
	case X86EMUL_MODE_PROT64:
		def_op_bytes = 4;
		def_ad_bytes = 8;
		break;
#endif
	default:
4258
		return EMULATION_FAILED;
4259 4260
	}

4261 4262
	ctxt->op_bytes = def_op_bytes;
	ctxt->ad_bytes = def_ad_bytes;
4263 4264 4265

	/* Legacy prefixes. */
	for (;;) {
4266
		switch (ctxt->b = insn_fetch(u8, ctxt)) {
4267
		case 0x66:	/* operand-size override */
4268
			op_prefix = true;
4269
			/* switch between 2/4 bytes */
4270
			ctxt->op_bytes = def_op_bytes ^ 6;
4271 4272 4273 4274
			break;
		case 0x67:	/* address-size override */
			if (mode == X86EMUL_MODE_PROT64)
				/* switch between 4/8 bytes */
4275
				ctxt->ad_bytes = def_ad_bytes ^ 12;
4276 4277
			else
				/* switch between 2/4 bytes */
4278
				ctxt->ad_bytes = def_ad_bytes ^ 6;
4279 4280 4281 4282 4283
			break;
		case 0x26:	/* ES override */
		case 0x2e:	/* CS override */
		case 0x36:	/* SS override */
		case 0x3e:	/* DS override */
B
Bandan Das 已提交
4284 4285
			has_seg_override = true;
			ctxt->seg_override = (ctxt->b >> 3) & 3;
4286 4287 4288
			break;
		case 0x64:	/* FS override */
		case 0x65:	/* GS override */
B
Bandan Das 已提交
4289 4290
			has_seg_override = true;
			ctxt->seg_override = ctxt->b & 7;
4291 4292 4293 4294
			break;
		case 0x40 ... 0x4f: /* REX */
			if (mode != X86EMUL_MODE_PROT64)
				goto done_prefixes;
4295
			ctxt->rex_prefix = ctxt->b;
4296 4297
			continue;
		case 0xf0:	/* LOCK */
4298
			ctxt->lock_prefix = 1;
4299 4300 4301
			break;
		case 0xf2:	/* REPNE/REPNZ */
		case 0xf3:	/* REP/REPE/REPZ */
4302
			ctxt->rep_prefix = ctxt->b;
4303 4304 4305 4306 4307 4308 4309
			break;
		default:
			goto done_prefixes;
		}

		/* Any legacy prefix after a REX prefix nullifies its effect. */

4310
		ctxt->rex_prefix = 0;
4311 4312 4313 4314 4315
	}

done_prefixes:

	/* REX prefix. */
4316 4317
	if (ctxt->rex_prefix & 8)
		ctxt->op_bytes = 8;	/* REX.W */
4318 4319

	/* Opcode byte(s). */
4320
	opcode = opcode_table[ctxt->b];
4321
	/* Two-byte opcode? */
4322
	if (ctxt->b == 0x0f) {
B
Borislav Petkov 已提交
4323
		ctxt->opcode_len = 2;
4324
		ctxt->b = insn_fetch(u8, ctxt);
4325
		opcode = twobyte_table[ctxt->b];
4326 4327 4328 4329 4330 4331 4332

		/* 0F_38 opcode map */
		if (ctxt->b == 0x38) {
			ctxt->opcode_len = 3;
			ctxt->b = insn_fetch(u8, ctxt);
			opcode = opcode_map_0f_38[ctxt->b];
		}
4333
	}
4334
	ctxt->d = opcode.flags;
4335

4336 4337 4338
	if (ctxt->d & ModRM)
		ctxt->modrm = insn_fetch(u8, ctxt);

4339 4340 4341 4342 4343 4344 4345
	/* vex-prefix instructions are not implemented */
	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
	    (mode == X86EMUL_MODE_PROT64 ||
	    (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
		ctxt->d = NotImpl;
	}

4346 4347
	while (ctxt->d & GroupMask) {
		switch (ctxt->d & GroupMask) {
4348
		case Group:
4349
			goffset = (ctxt->modrm >> 3) & 7;
4350 4351 4352
			opcode = opcode.u.group[goffset];
			break;
		case GroupDual:
4353 4354
			goffset = (ctxt->modrm >> 3) & 7;
			if ((ctxt->modrm >> 6) == 3)
4355 4356 4357 4358 4359
				opcode = opcode.u.gdual->mod3[goffset];
			else
				opcode = opcode.u.gdual->mod012[goffset];
			break;
		case RMExt:
4360
			goffset = ctxt->modrm & 7;
4361
			opcode = opcode.u.group[goffset];
4362 4363
			break;
		case Prefix:
4364
			if (ctxt->rep_prefix && op_prefix)
4365
				return EMULATION_FAILED;
4366
			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4367 4368 4369 4370 4371 4372 4373
			switch (simd_prefix) {
			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
			}
			break;
4374 4375 4376 4377 4378 4379
		case Escape:
			if (ctxt->modrm > 0xbf)
				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
			else
				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
			break;
4380
		default:
4381
			return EMULATION_FAILED;
4382
		}
4383

4384
		ctxt->d &= ~(u64)GroupMask;
4385
		ctxt->d |= opcode.flags;
4386 4387
	}

4388 4389 4390 4391
	/* Unrecognised? */
	if (ctxt->d == 0)
		return EMULATION_FAILED;

4392
	ctxt->execute = opcode.u.execute;
4393

4394 4395 4396
	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
		return EMULATION_FAILED;

4397
	if (unlikely(ctxt->d &
4398
		     (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
4399 4400 4401 4402 4403 4404
		/*
		 * These are copied unconditionally here, and checked unconditionally
		 * in x86_emulate_insn.
		 */
		ctxt->check_perm = opcode.check_perm;
		ctxt->intercept = opcode.intercept;
4405

4406 4407
		if (ctxt->d & NotImpl)
			return EMULATION_FAILED;
4408

4409
		if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
4410
			ctxt->op_bytes = 8;
4411

4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423
		if (ctxt->d & Op3264) {
			if (mode == X86EMUL_MODE_PROT64)
				ctxt->op_bytes = 8;
			else
				ctxt->op_bytes = 4;
		}

		if (ctxt->d & Sse)
			ctxt->op_bytes = 16;
		else if (ctxt->d & Mmx)
			ctxt->op_bytes = 8;
	}
A
Avi Kivity 已提交
4424

4425
	/* ModRM and SIB bytes. */
4426
	if (ctxt->d & ModRM) {
4427
		rc = decode_modrm(ctxt, &ctxt->memop);
B
Bandan Das 已提交
4428 4429 4430 4431
		if (!has_seg_override) {
			has_seg_override = true;
			ctxt->seg_override = ctxt->modrm_seg;
		}
4432
	} else if (ctxt->d & MemAbs)
4433
		rc = decode_abs(ctxt, &ctxt->memop);
4434 4435 4436
	if (rc != X86EMUL_CONTINUE)
		goto done;

B
Bandan Das 已提交
4437 4438
	if (!has_seg_override)
		ctxt->seg_override = VCPU_SREG_DS;
4439

B
Bandan Das 已提交
4440
	ctxt->memop.addr.mem.seg = ctxt->seg_override;
4441 4442 4443 4444 4445

	/*
	 * Decode and fetch the source operand: register, memory
	 * or immediate.
	 */
4446
	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4447 4448 4449
	if (rc != X86EMUL_CONTINUE)
		goto done;

4450 4451 4452 4453
	/*
	 * Decode and fetch the second source operand: register, memory
	 * or immediate.
	 */
4454
	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4455 4456 4457
	if (rc != X86EMUL_CONTINUE)
		goto done;

4458
	/* Decode and fetch the destination operand: register or memory. */
4459
	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4460 4461

done:
4462
	if (ctxt->rip_relative)
4463
		ctxt->memopp->addr.mem.ea += ctxt->_eip;
4464

4465
	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4466 4467
}

4468 4469 4470 4471 4472
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
{
	return ctxt->d & PageTable;
}

4473 4474 4475 4476 4477 4478 4479 4480 4481
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
	/* The second termination condition only applies for REPE
	 * and REPNE. Test if the repeat string operation prefix is
	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
	 * corresponding termination condition according to:
	 * 	- if REPE/REPZ and ZF = 0 then done
	 * 	- if REPNE/REPNZ and ZF = 1 then done
	 */
4482 4483 4484
	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
4485
		 ((ctxt->eflags & EFLG_ZF) == 0))
4486
		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
4487 4488 4489 4490 4491 4492
		    ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
		return true;

	return false;
}

A
Avi Kivity 已提交
4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
	bool fault = false;

	ctxt->ops->get_fpu(ctxt);
	asm volatile("1: fwait \n\t"
		     "2: \n\t"
		     ".pushsection .fixup,\"ax\" \n\t"
		     "3: \n\t"
		     "movb $1, %[fault] \n\t"
		     "jmp 2b \n\t"
		     ".popsection \n\t"
		     _ASM_EXTABLE(1b, 3b)
4506
		     : [fault]"+qm"(fault));
A
Avi Kivity 已提交
4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521
	ctxt->ops->put_fpu(ctxt);

	if (unlikely(fault))
		return emulate_exception(ctxt, MF_VECTOR, 0, false);

	return X86EMUL_CONTINUE;
}

static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
				       struct operand *op)
{
	if (op->type == OP_MM)
		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
}

4522 4523 4524
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{
	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4525 4526
	if (!(ctxt->d & ByteOp))
		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4527
	asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4528 4529 4530
	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
	      [fastop]"+S"(fop)
	    : "c"(ctxt->src2.val));
4531
	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4532 4533
	if (!fop) /* exception is returned in fop variable */
		return emulate_de(ctxt);
4534 4535
	return X86EMUL_CONTINUE;
}
4536

4537 4538
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
B
Bandan Das 已提交
4539 4540
	memset(&ctxt->rip_relative, 0,
	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4541 4542 4543 4544 4545 4546

	ctxt->io_read.pos = 0;
	ctxt->io_read.end = 0;
	ctxt->mem_read.end = 0;
}

4547
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4548
{
4549
	const struct x86_emulate_ops *ops = ctxt->ops;
4550
	int rc = X86EMUL_CONTINUE;
4551
	int saved_dst_type = ctxt->dst.type;
4552

4553
	ctxt->mem_read.pos = 0;
4554

4555 4556
	/* LOCK prefix is allowed only with some instructions */
	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4557
		rc = emulate_ud(ctxt);
4558 4559 4560
		goto done;
	}

4561
	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4562
		rc = emulate_ud(ctxt);
4563 4564 4565
		goto done;
	}

4566 4567 4568 4569 4570 4571 4572
	if (unlikely(ctxt->d &
		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
				(ctxt->d & Undefined)) {
			rc = emulate_ud(ctxt);
			goto done;
		}
A
Avi Kivity 已提交
4573

4574 4575 4576
		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
			rc = emulate_ud(ctxt);
A
Avi Kivity 已提交
4577
			goto done;
4578
		}
A
Avi Kivity 已提交
4579

4580 4581
		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
			rc = emulate_nm(ctxt);
4582
			goto done;
4583
		}
4584

4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597
		if (ctxt->d & Mmx) {
			rc = flush_pending_x87_faults(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			/*
			 * Now that we know the fpu is exception safe, we can fetch
			 * operands from it.
			 */
			fetch_possible_mmx_operand(ctxt, &ctxt->src);
			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
			if (!(ctxt->d & Mov))
				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
		}
4598

4599
		if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4600 4601 4602 4603 4604
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_PRE_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}
4605

4606 4607
		/* Privileged instruction can be executed only in CPL=0 */
		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4608 4609 4610 4611
			if (ctxt->d & PrivUD)
				rc = emulate_ud(ctxt);
			else
				rc = emulate_gp(ctxt, 0);
4612
			goto done;
4613
		}
4614

4615 4616 4617
		/* Instruction can only be executed in protected mode */
		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
			rc = emulate_ud(ctxt);
4618
			goto done;
4619
		}
4620

4621
		/* Do instruction specific permission checks */
4622
		if (ctxt->d & CheckPerm) {
4623 4624 4625 4626 4627
			rc = ctxt->check_perm(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

4628
		if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4629 4630 4631 4632 4633 4634 4635 4636 4637 4638
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_POST_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

		if (ctxt->rep_prefix && (ctxt->d & String)) {
			/* All REP prefixes have the same first termination condition */
			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
				ctxt->eip = ctxt->_eip;
4639
				ctxt->eflags &= ~EFLG_RF;
4640 4641
				goto done;
			}
4642 4643 4644
		}
	}

4645 4646 4647
	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
		rc = segmented_read(ctxt, ctxt->src.addr.mem,
				    ctxt->src.valptr, ctxt->src.bytes);
4648
		if (rc != X86EMUL_CONTINUE)
4649
			goto done;
4650
		ctxt->src.orig_val64 = ctxt->src.val64;
4651 4652
	}

4653 4654 4655
	if (ctxt->src2.type == OP_MEM) {
		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
				    &ctxt->src2.val, ctxt->src2.bytes);
4656 4657 4658 4659
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

4660
	if ((ctxt->d & DstMask) == ImplicitOps)
4661 4662 4663
		goto special_insn;


4664
	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4665
		/* optimisation - avoid slow emulated read if Mov */
4666 4667
		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
				   &ctxt->dst.val, ctxt->dst.bytes);
4668 4669
		if (rc != X86EMUL_CONTINUE)
			goto done;
4670
	}
4671
	ctxt->dst.orig_val = ctxt->dst.val;
4672

4673 4674
special_insn:

4675
	if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4676
		rc = emulator_check_intercept(ctxt, ctxt->intercept,
4677
					      X86_ICPT_POST_MEMACCESS);
4678 4679 4680 4681
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

4682 4683 4684 4685
	if (ctxt->rep_prefix && (ctxt->d & String))
		ctxt->eflags |= EFLG_RF;
	else
		ctxt->eflags &= ~EFLG_RF;
4686

4687
	if (ctxt->execute) {
4688 4689 4690 4691 4692 4693 4694
		if (ctxt->d & Fastop) {
			void (*fop)(struct fastop *) = (void *)ctxt->execute;
			rc = fastop(ctxt, fop);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			goto writeback;
		}
4695
		rc = ctxt->execute(ctxt);
4696 4697 4698 4699 4700
		if (rc != X86EMUL_CONTINUE)
			goto done;
		goto writeback;
	}

B
Borislav Petkov 已提交
4701
	if (ctxt->opcode_len == 2)
A
Avi Kivity 已提交
4702
		goto twobyte_insn;
4703 4704
	else if (ctxt->opcode_len == 3)
		goto threebyte_insn;
A
Avi Kivity 已提交
4705

4706
	switch (ctxt->b) {
A
Avi Kivity 已提交
4707
	case 0x63:		/* movsxd */
4708
		if (ctxt->mode != X86EMUL_MODE_PROT64)
A
Avi Kivity 已提交
4709
			goto cannot_emulate;
4710
		ctxt->dst.val = (s32) ctxt->src.val;
A
Avi Kivity 已提交
4711
		break;
4712
	case 0x70 ... 0x7f: /* jcc (short) */
4713 4714
		if (test_cc(ctxt->b, ctxt->eflags))
			jmp_rel(ctxt, ctxt->src.val);
4715
		break;
N
Nitin A Kamble 已提交
4716
	case 0x8d: /* lea r16/r32, m */
4717
		ctxt->dst.val = ctxt->src.addr.mem.ea;
N
Nitin A Kamble 已提交
4718
		break;
4719
	case 0x90 ... 0x97: /* nop / xchg reg, rax */
4720
		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4721 4722 4723
			ctxt->dst.type = OP_NONE;
		else
			rc = em_xchg(ctxt);
4724
		break;
4725
	case 0x98: /* cbw/cwde/cdqe */
4726 4727 4728 4729
		switch (ctxt->op_bytes) {
		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4730 4731
		}
		break;
4732
	case 0xcc:		/* int3 */
4733 4734
		rc = emulate_int(ctxt, 3);
		break;
4735
	case 0xcd:		/* int n */
4736
		rc = emulate_int(ctxt, ctxt->src.val);
4737 4738
		break;
	case 0xce:		/* into */
4739 4740
		if (ctxt->eflags & EFLG_OF)
			rc = emulate_int(ctxt, 4);
4741
		break;
4742
	case 0xe9: /* jmp rel */
4743
	case 0xeb: /* jmp rel short */
4744 4745
		jmp_rel(ctxt, ctxt->src.val);
		ctxt->dst.type = OP_NONE; /* Disable writeback. */
4746
		break;
4747
	case 0xf4:              /* hlt */
4748
		ctxt->ops->halt(ctxt);
4749
		break;
4750 4751 4752 4753 4754 4755 4756
	case 0xf5:	/* cmc */
		/* complement carry flag from eflags reg */
		ctxt->eflags ^= EFLG_CF;
		break;
	case 0xf8: /* clc */
		ctxt->eflags &= ~EFLG_CF;
		break;
4757 4758 4759
	case 0xf9: /* stc */
		ctxt->eflags |= EFLG_CF;
		break;
4760 4761 4762 4763 4764 4765
	case 0xfc: /* cld */
		ctxt->eflags &= ~EFLG_DF;
		break;
	case 0xfd: /* std */
		ctxt->eflags |= EFLG_DF;
		break;
4766 4767
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
4768
	}
4769

4770 4771 4772
	if (rc != X86EMUL_CONTINUE)
		goto done;

4773
writeback:
4774 4775 4776 4777 4778 4779
	if (ctxt->d & SrcWrite) {
		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
		rc = writeback(ctxt, &ctxt->src);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
4780 4781 4782 4783 4784
	if (!(ctxt->d & NoWrite)) {
		rc = writeback(ctxt, &ctxt->dst);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
4785

4786 4787 4788 4789
	/*
	 * restore dst type in case the decoding will be reused
	 * (happens for string instruction )
	 */
4790
	ctxt->dst.type = saved_dst_type;
4791

4792
	if ((ctxt->d & SrcMask) == SrcSI)
4793
		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4794

4795
	if ((ctxt->d & DstMask) == DstDI)
4796
		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4797

4798
	if (ctxt->rep_prefix && (ctxt->d & String)) {
4799
		unsigned int count;
4800
		struct read_cache *r = &ctxt->io_read;
4801 4802 4803 4804 4805 4806
		if ((ctxt->d & SrcMask) == SrcSI)
			count = ctxt->src.count;
		else
			count = ctxt->dst.count;
		register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
				-count);
4807

4808 4809 4810 4811 4812
		if (!string_insn_completed(ctxt)) {
			/*
			 * Re-enter guest when pio read ahead buffer is empty
			 * or, if it is not used, after each 1024 iteration.
			 */
4813
			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4814 4815 4816 4817 4818 4819
			    (r->end == 0 || r->end != r->pos)) {
				/*
				 * Reset read cache. Usually happens before
				 * decode, but since instruction is restarted
				 * we have to do it here.
				 */
4820
				ctxt->mem_read.end = 0;
4821
				writeback_registers(ctxt);
4822 4823 4824
				return EMULATION_RESTART;
			}
			goto done; /* skip rip writeback */
4825
		}
4826
		ctxt->eflags &= ~EFLG_RF;
4827
	}
4828

4829
	ctxt->eip = ctxt->_eip;
4830 4831

done:
4832 4833
	if (rc == X86EMUL_PROPAGATE_FAULT)
		ctxt->have_exception = true;
4834 4835 4836
	if (rc == X86EMUL_INTERCEPTED)
		return EMULATION_INTERCEPTED;

4837 4838 4839
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);

4840
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
A
Avi Kivity 已提交
4841 4842

twobyte_insn:
4843
	switch (ctxt->b) {
4844
	case 0x09:		/* wbinvd */
4845
		(ctxt->ops->wbinvd)(ctxt);
4846 4847
		break;
	case 0x08:		/* invd */
4848 4849
	case 0x0d:		/* GrpP (prefetch) */
	case 0x18:		/* Grp16 (prefetch/nop) */
P
Paolo Bonzini 已提交
4850
	case 0x1f:		/* nop */
4851 4852
		break;
	case 0x20: /* mov cr, reg */
4853
		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4854
		break;
A
Avi Kivity 已提交
4855
	case 0x21: /* mov from dr to reg */
4856
		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
A
Avi Kivity 已提交
4857 4858
		break;
	case 0x40 ... 0x4f:	/* cmov */
4859 4860 4861 4862
		if (test_cc(ctxt->b, ctxt->eflags))
			ctxt->dst.val = ctxt->src.val;
		else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
			 ctxt->op_bytes != 4)
4863
			ctxt->dst.type = OP_NONE; /* no writeback */
A
Avi Kivity 已提交
4864
		break;
4865
	case 0x80 ... 0x8f: /* jnz rel, etc*/
4866 4867
		if (test_cc(ctxt->b, ctxt->eflags))
			jmp_rel(ctxt, ctxt->src.val);
4868
		break;
4869
	case 0x90 ... 0x9f:     /* setcc r/m8 */
4870
		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4871
		break;
4872 4873
	case 0xae:              /* clflush */
		break;
A
Avi Kivity 已提交
4874
	case 0xb6 ... 0xb7:	/* movzx */
4875
		ctxt->dst.bytes = ctxt->op_bytes;
4876
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
4877
						       : (u16) ctxt->src.val;
A
Avi Kivity 已提交
4878 4879
		break;
	case 0xbe ... 0xbf:	/* movsx */
4880
		ctxt->dst.bytes = ctxt->op_bytes;
4881
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
4882
							(s16) ctxt->src.val;
A
Avi Kivity 已提交
4883
		break;
4884
	case 0xc3:		/* movnti */
4885
		ctxt->dst.bytes = ctxt->op_bytes;
4886 4887
		ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
							(u32) ctxt->src.val;
4888
		break;
4889 4890
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
4891
	}
4892

4893 4894
threebyte_insn:

4895 4896 4897
	if (rc != X86EMUL_CONTINUE)
		goto done;

A
Avi Kivity 已提交
4898 4899 4900
	goto writeback;

cannot_emulate:
4901
	return EMULATION_FAILED;
A
Avi Kivity 已提交
4902
}
4903 4904 4905 4906 4907 4908 4909 4910 4911 4912

void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
{
	invalidate_registers(ctxt);
}

void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
{
	writeback_registers(ctxt);
}