emulate.c 125.3 KB
Newer Older
A
Avi Kivity 已提交
1
/******************************************************************************
2
 * emulate.c
A
Avi Kivity 已提交
3 4 5 6 7 8
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9
 * privileged instructions:
A
Avi Kivity 已提交
10 11
 *
 * Copyright (C) 2006 Qumranet
N
Nicolas Kaiser 已提交
12
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
13 14 15 16 17 18 19 20 21 22
 *
 *   Avi Kivity <avi@qumranet.com>
 *   Yaniv Kamay <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

23
#include <linux/kvm_host.h>
24
#include "kvm_cache_regs.h"
A
Avi Kivity 已提交
25
#include <linux/module.h>
26
#include <asm/kvm_emulate.h>
27
#include <linux/stringify.h>
A
Avi Kivity 已提交
28

29
#include "x86.h"
30
#include "tss.h"
31

32 33 34
/*
 * Operand types
 */
35 36 37 38 39 40 41 42 43
#define OpNone             0ull
#define OpImplicit         1ull  /* No generic decode */
#define OpReg              2ull  /* Register */
#define OpMem              3ull  /* Memory */
#define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
#define OpDI               5ull  /* ES:DI/EDI/RDI */
#define OpMem64            6ull  /* Memory, 64-bit */
#define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
#define OpDX               8ull  /* DX register */
44 45 46
#define OpCL               9ull  /* CL register (for shifts) */
#define OpImmByte         10ull  /* 8-bit sign extended immediate */
#define OpOne             11ull  /* Implied 1 */
47
#define OpImm             12ull  /* Sign extended up to 32-bit immediate */
48 49 50 51 52 53 54
#define OpMem16           13ull  /* Memory operand (16-bit). */
#define OpMem32           14ull  /* Memory operand (32-bit). */
#define OpImmU            15ull  /* Immediate operand, zero extended */
#define OpSI              16ull  /* SI/ESI/RSI */
#define OpImmFAddr        17ull  /* Immediate far address */
#define OpMemFAddr        18ull  /* Far address in memory */
#define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
55 56 57 58 59 60
#define OpES              20ull  /* ES */
#define OpCS              21ull  /* CS */
#define OpSS              22ull  /* SS */
#define OpDS              23ull  /* DS */
#define OpFS              24ull  /* FS */
#define OpGS              25ull  /* GS */
61
#define OpMem8            26ull  /* 8-bit zero extended memory operand */
62
#define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
P
Paolo Bonzini 已提交
63
#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
64 65
#define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
#define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
66 67

#define OpBits             5  /* Width of operand field */
68
#define OpMask             ((1ull << OpBits) - 1)
69

A
Avi Kivity 已提交
70 71 72 73 74 75 76 77 78 79
/*
 * Opcode effective-address decode tables.
 * Note that we only emulate instructions that have at least one memory
 * operand (excluding implicit stack references). We assume that stack
 * references and instruction fetches will never occur in special memory
 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
 * not be handled.
 */

/* Operand sizes: 8-bit operands or specified/overridden size. */
80
#define ByteOp      (1<<0)	/* 8-bit operands. */
A
Avi Kivity 已提交
81
/* Destination operand type. */
82 83 84 85 86 87 88 89 90
#define DstShift    1
#define ImplicitOps (OpImplicit << DstShift)
#define DstReg      (OpReg << DstShift)
#define DstMem      (OpMem << DstShift)
#define DstAcc      (OpAcc << DstShift)
#define DstDI       (OpDI << DstShift)
#define DstMem64    (OpMem64 << DstShift)
#define DstImmUByte (OpImmUByte << DstShift)
#define DstDX       (OpDX << DstShift)
91
#define DstAccLo    (OpAccLo << DstShift)
92
#define DstMask     (OpMask << DstShift)
A
Avi Kivity 已提交
93
/* Source operand type. */
94 95 96 97 98 99 100 101 102 103 104 105
#define SrcShift    6
#define SrcNone     (OpNone << SrcShift)
#define SrcReg      (OpReg << SrcShift)
#define SrcMem      (OpMem << SrcShift)
#define SrcMem16    (OpMem16 << SrcShift)
#define SrcMem32    (OpMem32 << SrcShift)
#define SrcImm      (OpImm << SrcShift)
#define SrcImmByte  (OpImmByte << SrcShift)
#define SrcOne      (OpOne << SrcShift)
#define SrcImmUByte (OpImmUByte << SrcShift)
#define SrcImmU     (OpImmU << SrcShift)
#define SrcSI       (OpSI << SrcShift)
P
Paolo Bonzini 已提交
106
#define SrcXLat     (OpXLat << SrcShift)
107 108 109 110
#define SrcImmFAddr (OpImmFAddr << SrcShift)
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc      (OpAcc << SrcShift)
#define SrcImmU16   (OpImmU16 << SrcShift)
111
#define SrcImm64    (OpImm64 << SrcShift)
112
#define SrcDX       (OpDX << SrcShift)
113
#define SrcMem8     (OpMem8 << SrcShift)
114
#define SrcAccHi    (OpAccHi << SrcShift)
115
#define SrcMask     (OpMask << SrcShift)
116 117 118 119 120 121 122 123 124
#define BitOp       (1<<11)
#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
#define String      (1<<13)     /* String instruction (rep capable) */
#define Stack       (1<<14)     /* Stack instruction (push/pop) */
#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
125
#define Escape      (5<<15)     /* Escape to coprocessor instruction */
126
#define Sse         (1<<18)     /* SSE Vector instruction */
127 128 129 130
/* Generic ModRM decode. */
#define ModRM       (1<<19)
/* Destination is only written; never read. */
#define Mov         (1<<20)
131
/* Misc flags */
132
#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
133
#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134
#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135
#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136
#define Undefined   (1<<25) /* No Such Instruction */
137
#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
138
#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
139
#define No64	    (1<<28)
140
#define PageTable   (1 << 29)   /* instruction used to write page table */
141
#define NotImpl     (1 << 30)   /* instruction is not implemented */
142
/* Source 2 operand type */
143
#define Src2Shift   (31)
144
#define Src2None    (OpNone << Src2Shift)
145
#define Src2Mem     (OpMem << Src2Shift)
146 147 148 149
#define Src2CL      (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
#define Src2One     (OpOne << Src2Shift)
#define Src2Imm     (OpImm << Src2Shift)
150 151 152 153 154 155
#define Src2ES      (OpES << Src2Shift)
#define Src2CS      (OpCS << Src2Shift)
#define Src2SS      (OpSS << Src2Shift)
#define Src2DS      (OpDS << Src2Shift)
#define Src2FS      (OpFS << Src2Shift)
#define Src2GS      (OpGS << Src2Shift)
156
#define Src2Mask    (OpMask << Src2Shift)
A
Avi Kivity 已提交
157
#define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
158 159 160
#define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
#define Unaligned   ((u64)1 << 42)  /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx         ((u64)1 << 43)  /* Advanced Vector Extensions */
161
#define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
162
#define NoWrite     ((u64)1 << 45)  /* No writeback */
163
#define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
164
#define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
165 166
#define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
#define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
A
Avi Kivity 已提交
167

168
#define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
A
Avi Kivity 已提交
169

170 171 172 173 174 175 176 177
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
178

179 180 181 182 183 184
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
#define FASTOP_SIZE 8

/*
 * fastop functions have a special calling convention:
 *
185 186
 * dst:    rax        (in/out)
 * src:    rdx        (in/out)
187 188
 * src2:   rcx        (in)
 * flags:  rflags     (in/out)
189
 * ex:     rsi        (in:fastop pointer, out:zero if exception)
190 191 192 193 194 195 196 197 198 199 200
 *
 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
 * different operand sizes can be reached by calculation, rather than a jump
 * table (which would be bigger than the code).
 *
 * fastop functions are declared as taking a never-defined fastop parameter,
 * so they can't be called from C directly.
 */

struct fastop;

201
struct opcode {
202 203
	u64 flags : 56;
	u64 intercept : 8;
204
	union {
205
		int (*execute)(struct x86_emulate_ctxt *ctxt);
206 207 208
		const struct opcode *group;
		const struct group_dual *gdual;
		const struct gprefix *gprefix;
209
		const struct escape *esc;
210
		void (*fastop)(struct fastop *fake);
211
	} u;
212
	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
213 214 215 216 217
};

struct group_dual {
	struct opcode mod012[8];
	struct opcode mod3[8];
218 219
};

220 221 222 223 224 225 226
struct gprefix {
	struct opcode pfx_no;
	struct opcode pfx_66;
	struct opcode pfx_f2;
	struct opcode pfx_f3;
};

227 228 229 230 231
struct escape {
	struct opcode op[8];
	struct opcode high[64];
};

A
Avi Kivity 已提交
232
/* EFLAGS bit definitions. */
233 234 235 236
#define EFLG_ID (1<<21)
#define EFLG_VIP (1<<20)
#define EFLG_VIF (1<<19)
#define EFLG_AC (1<<18)
237 238
#define EFLG_VM (1<<17)
#define EFLG_RF (1<<16)
239 240
#define EFLG_IOPL (3<<12)
#define EFLG_NT (1<<14)
A
Avi Kivity 已提交
241 242
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
243
#define EFLG_IF (1<<9)
244
#define EFLG_TF (1<<8)
A
Avi Kivity 已提交
245 246 247 248 249 250
#define EFLG_SF (1<<7)
#define EFLG_ZF (1<<6)
#define EFLG_AF (1<<4)
#define EFLG_PF (1<<2)
#define EFLG_CF (1<<0)

251 252 253
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
#define EFLG_RESERVED_ONE_MASK 2

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	if (!(ctxt->regs_valid & (1 << nr))) {
		ctxt->regs_valid |= 1 << nr;
		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
	}
	return ctxt->_regs[nr];
}

static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	ctxt->regs_valid |= 1 << nr;
	ctxt->regs_dirty |= 1 << nr;
	return &ctxt->_regs[nr];
}

static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	reg_read(ctxt, nr);
	return reg_write(ctxt, nr);
}

static void writeback_registers(struct x86_emulate_ctxt *ctxt)
{
	unsigned reg;

	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
}

static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
{
	ctxt->regs_dirty = 0;
	ctxt->regs_valid = 0;
}

A
Avi Kivity 已提交
290 291 292 293 294 295
/*
 * These EFLAGS bits are restored from saved value during emulation, and
 * any changes are written back to the saved value after emulation.
 */
#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)

296 297 298 299 300 301
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif

302 303
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));

304 305 306 307 308 309 310 311 312 313 314 315 316
#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
#define FOP_RET   "ret \n\t"

#define FOP_START(op) \
	extern void em_##op(struct fastop *fake); \
	asm(".pushsection .text, \"ax\" \n\t" \
	    ".global em_" #op " \n\t" \
            FOP_ALIGN \
	    "em_" #op ": \n\t"

#define FOP_END \
	    ".popsection")

317 318
#define FOPNOP() FOP_ALIGN FOP_RET

319
#define FOP1E(op,  dst) \
320 321 322 323
	FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET

#define FOP1EEX(op,  dst) \
	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
324 325 326 327 328 329 330 331 332

#define FASTOP1(op) \
	FOP_START(op) \
	FOP1E(op##b, al) \
	FOP1E(op##w, ax) \
	FOP1E(op##l, eax) \
	ON64(FOP1E(op##q, rax))	\
	FOP_END

333 334 335 336 337 338 339 340 341
/* 1-operand, using src2 (for MUL/DIV r/m) */
#define FASTOP1SRC2(op, name) \
	FOP_START(name) \
	FOP1E(op, cl) \
	FOP1E(op, cx) \
	FOP1E(op, ecx) \
	ON64(FOP1E(op, rcx)) \
	FOP_END

342 343 344 345 346 347 348 349 350
/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
#define FASTOP1SRC2EX(op, name) \
	FOP_START(name) \
	FOP1EEX(op, cl) \
	FOP1EEX(op, cx) \
	FOP1EEX(op, ecx) \
	ON64(FOP1EEX(op, rcx)) \
	FOP_END

351 352 353 354 355
#define FOP2E(op,  dst, src)	   \
	FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET

#define FASTOP2(op) \
	FOP_START(op) \
356 357 358 359
	FOP2E(op##b, al, dl) \
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
360 361
	FOP_END

362 363 364 365
/* 2 operand, word only */
#define FASTOP2W(op) \
	FOP_START(op) \
	FOPNOP() \
366 367 368
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
369 370
	FOP_END

371 372 373 374 375 376 377 378 379
/* 2 operand, src is CL */
#define FASTOP2CL(op) \
	FOP_START(op) \
	FOP2E(op##b, al, cl) \
	FOP2E(op##w, ax, cl) \
	FOP2E(op##l, eax, cl) \
	ON64(FOP2E(op##q, rax, cl)) \
	FOP_END

380 381 382 383 384 385 386
#define FOP3E(op,  dst, src, src2) \
	FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET

/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \
	FOP_START(op) \
	FOPNOP() \
387 388 389
	FOP3E(op##w, ax, dx, cl) \
	FOP3E(op##l, eax, edx, cl) \
	ON64(FOP3E(op##q, rax, rdx, cl)) \
390 391
	FOP_END

392 393 394
/* Special case for SETcc - 1 instruction per cc */
#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"

395 396 397
asm(".global kvm_fastop_exception \n"
    "kvm_fastop_exception: xor %esi, %esi; ret");

398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
FOP_SETCC(setc)
FOP_SETCC(setnc)
FOP_SETCC(setz)
FOP_SETCC(setnz)
FOP_SETCC(setbe)
FOP_SETCC(setnbe)
FOP_SETCC(sets)
FOP_SETCC(setns)
FOP_SETCC(setp)
FOP_SETCC(setnp)
FOP_SETCC(setl)
FOP_SETCC(setnl)
FOP_SETCC(setle)
FOP_SETCC(setnle)
FOP_END;

P
Paolo Bonzini 已提交
417 418 419
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
FOP_END;

420 421 422 423 424 425
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
				    enum x86_intercept intercept,
				    enum x86_intercept_stage stage)
{
	struct x86_instruction_info info = {
		.intercept  = intercept,
426 427 428 429 430
		.rep_prefix = ctxt->rep_prefix,
		.modrm_mod  = ctxt->modrm_mod,
		.modrm_reg  = ctxt->modrm_reg,
		.modrm_rm   = ctxt->modrm_rm,
		.src_val    = ctxt->src.val64,
431
		.dst_val    = ctxt->dst.val64,
432 433 434
		.src_bytes  = ctxt->src.bytes,
		.dst_bytes  = ctxt->dst.bytes,
		.ad_bytes   = ctxt->ad_bytes,
435 436 437
		.next_rip   = ctxt->eip,
	};

438
	return ctxt->ops->intercept(ctxt, &info, stage);
439 440
}

A
Avi Kivity 已提交
441 442 443 444 445
static void assign_masked(ulong *dest, ulong src, ulong mask)
{
	*dest = (*dest & ~mask) | (src & mask);
}

446
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
447
{
448
	return (1UL << (ctxt->ad_bytes << 3)) - 1;
449 450
}

A
Avi Kivity 已提交
451 452 453 454 455 456 457 458 459 460 461
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
{
	u16 sel;
	struct desc_struct ss;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return ~0UL;
	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
}

A
Avi Kivity 已提交
462 463 464 465 466
static int stack_size(struct x86_emulate_ctxt *ctxt)
{
	return (__fls(stack_mask(ctxt)) + 1) >> 3;
}

A
Avi Kivity 已提交
467
/* Access/update address held in a register, based on addressing mode. */
468
static inline unsigned long
469
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
470
{
471
	if (ctxt->ad_bytes == sizeof(unsigned long))
472 473
		return reg;
	else
474
		return reg & ad_mask(ctxt);
475 476 477
}

static inline unsigned long
478
register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
479
{
480
	return address_mask(ctxt, reg);
481 482
}

483 484 485 486 487
static void masked_increment(ulong *reg, ulong mask, int inc)
{
	assign_masked(reg, *reg + inc, mask);
}

488
static inline void
489
register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
490
{
491 492
	ulong mask;

493
	if (ctxt->ad_bytes == sizeof(unsigned long))
494
		mask = ~0UL;
495
	else
496 497 498 499 500 501
		mask = ad_mask(ctxt);
	masked_increment(reg, mask, inc);
}

static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
{
502
	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
503
}
A
Avi Kivity 已提交
504

505
static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
506
{
507
	register_address_increment(ctxt, &ctxt->_eip, rel);
508
}
509

510 511 512 513 514 515 516
static u32 desc_limit_scaled(struct desc_struct *desc)
{
	u32 limit = get_desc_limit(desc);

	return desc->g ? (limit << 12) | 0xfff : limit;
}

517
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
518 519 520 521
{
	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
		return 0;

522
	return ctxt->ops->get_cached_segment_base(ctxt, seg);
523 524
}

525 526
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
			     u32 error, bool valid)
527
{
528 529 530
	ctxt->exception.vector = vec;
	ctxt->exception.error_code = error;
	ctxt->exception.error_code_valid = valid;
531
	return X86EMUL_PROPAGATE_FAULT;
532 533
}

534 535 536 537 538
static int emulate_db(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, DB_VECTOR, 0, false);
}

539
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
540
{
541
	return emulate_exception(ctxt, GP_VECTOR, err, true);
542 543
}

544 545 546 547 548
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{
	return emulate_exception(ctxt, SS_VECTOR, err, true);
}

549
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
550
{
551
	return emulate_exception(ctxt, UD_VECTOR, 0, false);
552 553
}

554
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
555
{
556
	return emulate_exception(ctxt, TS_VECTOR, err, true);
557 558
}

559 560
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
561
	return emulate_exception(ctxt, DE_VECTOR, 0, false);
562 563
}

A
Avi Kivity 已提交
564 565 566 567 568
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, NM_VECTOR, 0, false);
}

569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
	u16 selector;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
	return selector;
}

static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
				 unsigned seg)
{
	u16 dummy;
	u32 base3;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
/*
 * x86 defines three classes of vector instructions: explicitly
 * aligned, explicitly unaligned, and the rest, which change behaviour
 * depending on whether they're AVX encoded or not.
 *
 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
 * subject to the same check.
 */
static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
{
	if (likely(size < 16))
		return false;

	if (ctxt->d & Aligned)
		return true;
	else if (ctxt->d & Unaligned)
		return false;
	else if (ctxt->d & Avx)
		return false;
	else
		return true;
}

612
static int __linearize(struct x86_emulate_ctxt *ctxt,
613
		     struct segmented_address addr,
614
		     unsigned size, bool write, bool fetch,
615 616
		     ulong *linear)
{
617 618
	struct desc_struct desc;
	bool usable;
619
	ulong la;
620
	u32 lim;
621
	u16 sel;
622
	unsigned cpl;
623

624
	la = seg_base(ctxt, addr.seg) + addr.ea;
625 626 627 628 629 630
	switch (ctxt->mode) {
	case X86EMUL_MODE_PROT64:
		if (((signed long)la << 16) >> 16 != la)
			return emulate_gp(ctxt, 0);
		break;
	default:
631 632
		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
						addr.seg);
633 634
		if (!usable)
			goto bad;
635 636 637
		/* code segment in protected mode or read-only data segment */
		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
					|| !(desc.type & 2)) && write)
638 639
			goto bad;
		/* unreadable code segment */
640
		if (!fetch && (desc.type & 8) && !(desc.type & 2))
641 642 643 644 645 646 647
			goto bad;
		lim = desc_limit_scaled(&desc);
		if ((desc.type & 8) || !(desc.type & 4)) {
			/* expand-up segment */
			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
				goto bad;
		} else {
G
Guo Chao 已提交
648
			/* expand-down segment */
649 650 651 652 653 654
			if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
				goto bad;
			lim = desc.d ? 0xffffffff : 0xffff;
			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
				goto bad;
		}
655
		cpl = ctxt->ops->cpl(ctxt);
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
		if (!(desc.type & 8)) {
			/* data segment */
			if (cpl > desc.dpl)
				goto bad;
		} else if ((desc.type & 8) && !(desc.type & 4)) {
			/* nonconforming code segment */
			if (cpl != desc.dpl)
				goto bad;
		} else if ((desc.type & 8) && (desc.type & 4)) {
			/* conforming code segment */
			if (cpl < desc.dpl)
				goto bad;
		}
		break;
	}
671
	if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
672
		la &= (u32)-1;
673 674
	if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
		return emulate_gp(ctxt, 0);
675 676
	*linear = la;
	return X86EMUL_CONTINUE;
677 678
bad:
	if (addr.seg == VCPU_SREG_SS)
679
		return emulate_ss(ctxt, sel);
680
	else
681
		return emulate_gp(ctxt, sel);
682 683
}

684 685 686 687 688 689 690 691 692
static int linearize(struct x86_emulate_ctxt *ctxt,
		     struct segmented_address addr,
		     unsigned size, bool write,
		     ulong *linear)
{
	return __linearize(ctxt, addr, size, write, false, linear);
}


693 694 695 696 697
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
			      struct segmented_address addr,
			      void *data,
			      unsigned size)
{
698 699 700
	int rc;
	ulong linear;

701
	rc = linearize(ctxt, addr, size, false, &linear);
702 703
	if (rc != X86EMUL_CONTINUE)
		return rc;
704
	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
705 706
}

707
/*
708
 * Prefetch the remaining bytes of the instruction without crossing page
709 710
 * boundary if they are not in fetch_cache yet.
 */
711
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
712 713
{
	int rc;
714
	unsigned size;
715
	unsigned long linear;
716
	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
717
	struct segmented_address addr = { .seg = VCPU_SREG_CS,
718 719
					   .ea = ctxt->eip + cur_size };

720 721 722 723 724 725
	size = 15UL ^ cur_size;
	rc = __linearize(ctxt, addr, size, false, true, &linear);
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;

	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
726 727 728 729 730 731 732 733

	/*
	 * One instruction can only straddle two pages,
	 * and one has been loaded at the beginning of
	 * x86_decode_insn.  So, if not enough bytes
	 * still, we must have hit the 15-byte boundary.
	 */
	if (unlikely(size < op_size))
734
		return X86EMUL_UNHANDLEABLE;
735
	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
736 737 738
			      size, &ctxt->exception);
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;
739
	ctxt->fetch.end += size;
740
	return X86EMUL_CONTINUE;
741 742
}

743 744
static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
					       unsigned size)
745
{
746
	if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size))
747 748 749
		return __do_insn_fetch_bytes(ctxt, size);
	else
		return X86EMUL_CONTINUE;
750 751
}

752
/* Fetch next part of the instruction being emulated. */
753
#define insn_fetch(_type, _ctxt)					\
754 755 756
({	_type _x;							\
									\
	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
757 758
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
759
	ctxt->_eip += sizeof(_type);					\
760 761
	_x = *(_type __aligned(1) *) ctxt->fetch.ptr;			\
	ctxt->fetch.ptr += sizeof(_type);				\
762
	_x;								\
763 764
})

765
#define insn_fetch_arr(_arr, _size, _ctxt)				\
766 767
({									\
	rc = do_insn_fetch_bytes(_ctxt, _size);				\
768 769
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
770
	ctxt->_eip += (_size);						\
771 772
	memcpy(_arr, ctxt->fetch.ptr, _size);				\
	ctxt->fetch.ptr += (_size);					\
773 774
})

775 776 777 778 779
/*
 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 * pointer into the block that addresses the relevant register.
 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 */
780
static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
781
			     int byteop)
A
Avi Kivity 已提交
782 783
{
	void *p;
784
	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
A
Avi Kivity 已提交
785 786

	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
787 788 789
		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
	else
		p = reg_rmw(ctxt, modrm_reg);
A
Avi Kivity 已提交
790 791 792 793
	return p;
}

static int read_descriptor(struct x86_emulate_ctxt *ctxt,
794
			   struct segmented_address addr,
A
Avi Kivity 已提交
795 796 797 798 799 800 801
			   u16 *size, unsigned long *address, int op_bytes)
{
	int rc;

	if (op_bytes == 2)
		op_bytes = 3;
	*address = 0;
802
	rc = segmented_read_std(ctxt, addr, size, 2);
803
	if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
804
		return rc;
805
	addr.ea += 2;
806
	rc = segmented_read_std(ctxt, addr, address, op_bytes);
A
Avi Kivity 已提交
807 808 809
	return rc;
}

810 811 812 813 814 815 816 817 818 819
FASTOP2(add);
FASTOP2(or);
FASTOP2(adc);
FASTOP2(sbb);
FASTOP2(and);
FASTOP2(sub);
FASTOP2(xor);
FASTOP2(cmp);
FASTOP2(test);

820 821
FASTOP1SRC2(mul, mul_ex);
FASTOP1SRC2(imul, imul_ex);
822 823
FASTOP1SRC2EX(div, div_ex);
FASTOP1SRC2EX(idiv, idiv_ex);
824

825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849
FASTOP3WCL(shld);
FASTOP3WCL(shrd);

FASTOP2W(imul);

FASTOP1(not);
FASTOP1(neg);
FASTOP1(inc);
FASTOP1(dec);

FASTOP2CL(rol);
FASTOP2CL(ror);
FASTOP2CL(rcl);
FASTOP2CL(rcr);
FASTOP2CL(shl);
FASTOP2CL(shr);
FASTOP2CL(sar);

FASTOP2W(bsf);
FASTOP2W(bsr);
FASTOP2W(bt);
FASTOP2W(bts);
FASTOP2W(btr);
FASTOP2W(btc);

850 851
FASTOP2(xadd);

852
static u8 test_cc(unsigned int condition, unsigned long flags)
853
{
854 855
	u8 rc;
	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
856

857
	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
858
	asm("push %[flags]; popf; call *%[fastop]"
859 860
	    : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
	return rc;
861 862
}

863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
static void fetch_register_operand(struct operand *op)
{
	switch (op->bytes) {
	case 1:
		op->val = *(u8 *)op->addr.reg;
		break;
	case 2:
		op->val = *(u16 *)op->addr.reg;
		break;
	case 4:
		op->val = *(u32 *)op->addr.reg;
		break;
	case 8:
		op->val = *(u64 *)op->addr.reg;
		break;
	}
}

A
Avi Kivity 已提交
881 882 883 884
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
885 886 887 888 889 890 891 892
	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
893
#ifdef CONFIG_X86_64
894 895 896 897 898 899 900 901
	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
902 903 904 905 906 907 908 909 910 911 912
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
			  int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
913 914 915 916 917 918 919 920
	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
A
Avi Kivity 已提交
921
#ifdef CONFIG_X86_64
922 923 924 925 926 927 928 929
	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
A
Avi Kivity 已提交
930 931 932 933 934 935
#endif
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

A
Avi Kivity 已提交
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	ctxt->ops->get_fpu(ctxt);
	switch (reg) {
	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
	default: BUG();
	}
	ctxt->ops->put_fpu(ctxt);
}

970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fninit");
	ctxt->ops->put_fpu(ctxt);
	return X86EMUL_CONTINUE;
}

static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
{
	u16 fcw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fnstcw %0": "+m"(fcw));
	ctxt->ops->put_fpu(ctxt);

	/* force 2 byte destination */
	ctxt->dst.bytes = 2;
	ctxt->dst.val = fcw;

	return X86EMUL_CONTINUE;
}

static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
{
	u16 fsw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	ctxt->ops->get_fpu(ctxt);
	asm volatile("fnstsw %0": "+m"(fsw));
	ctxt->ops->put_fpu(ctxt);

	/* force 2 byte destination */
	ctxt->dst.bytes = 2;
	ctxt->dst.val = fsw;

	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1017
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1018
				    struct operand *op)
1019
{
1020
	unsigned reg = ctxt->modrm_reg;
1021

1022 1023
	if (!(ctxt->d & ModRM))
		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
A
Avi Kivity 已提交
1024

1025
	if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1026 1027 1028 1029 1030 1031
		op->type = OP_XMM;
		op->bytes = 16;
		op->addr.xmm = reg;
		read_sse_reg(ctxt, &op->vec_val, reg);
		return;
	}
A
Avi Kivity 已提交
1032 1033 1034 1035 1036 1037 1038
	if (ctxt->d & Mmx) {
		reg &= 7;
		op->type = OP_MM;
		op->bytes = 8;
		op->addr.mm = reg;
		return;
	}
A
Avi Kivity 已提交
1039

1040
	op->type = OP_REG;
1041 1042 1043
	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);

1044
	fetch_register_operand(op);
1045 1046 1047
	op->orig_val = op->val;
}

1048 1049 1050 1051 1052 1053
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
{
	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
		ctxt->modrm_seg = VCPU_SREG_SS;
}

1054
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1055
			struct operand *op)
1056 1057
{
	u8 sib;
B
Bandan Das 已提交
1058
	int index_reg, base_reg, scale;
1059
	int rc = X86EMUL_CONTINUE;
1060
	ulong modrm_ea = 0;
1061

B
Bandan Das 已提交
1062 1063 1064
	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1065

B
Bandan Das 已提交
1066
	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1067
	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
B
Bandan Das 已提交
1068
	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1069
	ctxt->modrm_seg = VCPU_SREG_DS;
1070

1071
	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1072
		op->type = OP_REG;
1073
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1074
		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1075
				ctxt->d & ByteOp);
1076
		if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1077 1078
			op->type = OP_XMM;
			op->bytes = 16;
1079 1080
			op->addr.xmm = ctxt->modrm_rm;
			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
A
Avi Kivity 已提交
1081 1082
			return rc;
		}
A
Avi Kivity 已提交
1083 1084 1085
		if (ctxt->d & Mmx) {
			op->type = OP_MM;
			op->bytes = 8;
1086
			op->addr.mm = ctxt->modrm_rm & 7;
A
Avi Kivity 已提交
1087 1088
			return rc;
		}
1089
		fetch_register_operand(op);
1090 1091 1092
		return rc;
	}

1093 1094
	op->type = OP_MEM;

1095
	if (ctxt->ad_bytes == 2) {
1096 1097 1098 1099
		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1100 1101

		/* 16-bit ModR/M decode. */
1102
		switch (ctxt->modrm_mod) {
1103
		case 0:
1104
			if (ctxt->modrm_rm == 6)
1105
				modrm_ea += insn_fetch(u16, ctxt);
1106 1107
			break;
		case 1:
1108
			modrm_ea += insn_fetch(s8, ctxt);
1109 1110
			break;
		case 2:
1111
			modrm_ea += insn_fetch(u16, ctxt);
1112 1113
			break;
		}
1114
		switch (ctxt->modrm_rm) {
1115
		case 0:
1116
			modrm_ea += bx + si;
1117 1118
			break;
		case 1:
1119
			modrm_ea += bx + di;
1120 1121
			break;
		case 2:
1122
			modrm_ea += bp + si;
1123 1124
			break;
		case 3:
1125
			modrm_ea += bp + di;
1126 1127
			break;
		case 4:
1128
			modrm_ea += si;
1129 1130
			break;
		case 5:
1131
			modrm_ea += di;
1132 1133
			break;
		case 6:
1134
			if (ctxt->modrm_mod != 0)
1135
				modrm_ea += bp;
1136 1137
			break;
		case 7:
1138
			modrm_ea += bx;
1139 1140
			break;
		}
1141 1142 1143
		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
			ctxt->modrm_seg = VCPU_SREG_SS;
1144
		modrm_ea = (u16)modrm_ea;
1145 1146
	} else {
		/* 32/64-bit ModR/M decode. */
1147
		if ((ctxt->modrm_rm & 7) == 4) {
1148
			sib = insn_fetch(u8, ctxt);
1149 1150 1151 1152
			index_reg |= (sib >> 3) & 7;
			base_reg |= sib & 7;
			scale = sib >> 6;

1153
			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1154
				modrm_ea += insn_fetch(s32, ctxt);
1155
			else {
1156
				modrm_ea += reg_read(ctxt, base_reg);
1157 1158
				adjust_modrm_seg(ctxt, base_reg);
			}
1159
			if (index_reg != 4)
1160
				modrm_ea += reg_read(ctxt, index_reg) << scale;
1161
		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1162
			if (ctxt->mode == X86EMUL_MODE_PROT64)
1163
				ctxt->rip_relative = 1;
1164 1165
		} else {
			base_reg = ctxt->modrm_rm;
1166
			modrm_ea += reg_read(ctxt, base_reg);
1167 1168
			adjust_modrm_seg(ctxt, base_reg);
		}
1169
		switch (ctxt->modrm_mod) {
1170
		case 0:
1171
			if (ctxt->modrm_rm == 5)
1172
				modrm_ea += insn_fetch(s32, ctxt);
1173 1174
			break;
		case 1:
1175
			modrm_ea += insn_fetch(s8, ctxt);
1176 1177
			break;
		case 2:
1178
			modrm_ea += insn_fetch(s32, ctxt);
1179 1180 1181
			break;
		}
	}
1182
	op->addr.mem.ea = modrm_ea;
1183 1184 1185
	if (ctxt->ad_bytes != 8)
		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;

1186 1187 1188 1189 1190
done:
	return rc;
}

static int decode_abs(struct x86_emulate_ctxt *ctxt,
1191
		      struct operand *op)
1192
{
1193
	int rc = X86EMUL_CONTINUE;
1194

1195
	op->type = OP_MEM;
1196
	switch (ctxt->ad_bytes) {
1197
	case 2:
1198
		op->addr.mem.ea = insn_fetch(u16, ctxt);
1199 1200
		break;
	case 4:
1201
		op->addr.mem.ea = insn_fetch(u32, ctxt);
1202 1203
		break;
	case 8:
1204
		op->addr.mem.ea = insn_fetch(u64, ctxt);
1205 1206 1207 1208 1209 1210
		break;
	}
done:
	return rc;
}

1211
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1212
{
1213
	long sv = 0, mask;
1214

1215
	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1216
		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1217

1218 1219 1220 1221
		if (ctxt->src.bytes == 2)
			sv = (s16)ctxt->src.val & (s16)mask;
		else if (ctxt->src.bytes == 4)
			sv = (s32)ctxt->src.val & (s32)mask;
1222 1223
		else
			sv = (s64)ctxt->src.val & (s64)mask;
1224

1225
		ctxt->dst.addr.mem.ea += (sv >> 3);
1226
	}
1227 1228

	/* only subword offset */
1229
	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1230 1231
}

1232 1233
static int read_emulated(struct x86_emulate_ctxt *ctxt,
			 unsigned long addr, void *dest, unsigned size)
A
Avi Kivity 已提交
1234
{
1235
	int rc;
1236
	struct read_cache *mc = &ctxt->mem_read;
A
Avi Kivity 已提交
1237

1238 1239
	if (mc->pos < mc->end)
		goto read_cached;
A
Avi Kivity 已提交
1240

1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
	WARN_ON((mc->end + size) >= sizeof(mc->data));

	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
				      &ctxt->exception);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	mc->end += size;

read_cached:
	memcpy(dest, mc->data + mc->pos, size);
	mc->pos += size;
1253 1254
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1255

1256 1257 1258 1259 1260
static int segmented_read(struct x86_emulate_ctxt *ctxt,
			  struct segmented_address addr,
			  void *data,
			  unsigned size)
{
1261 1262 1263
	int rc;
	ulong linear;

1264
	rc = linearize(ctxt, addr, size, false, &linear);
1265 1266
	if (rc != X86EMUL_CONTINUE)
		return rc;
1267
	return read_emulated(ctxt, linear, data, size);
1268 1269 1270 1271 1272 1273 1274
}

static int segmented_write(struct x86_emulate_ctxt *ctxt,
			   struct segmented_address addr,
			   const void *data,
			   unsigned size)
{
1275 1276 1277
	int rc;
	ulong linear;

1278
	rc = linearize(ctxt, addr, size, true, &linear);
1279 1280
	if (rc != X86EMUL_CONTINUE)
		return rc;
1281 1282
	return ctxt->ops->write_emulated(ctxt, linear, data, size,
					 &ctxt->exception);
1283 1284 1285 1286 1287 1288 1289
}

static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
			     struct segmented_address addr,
			     const void *orig_data, const void *data,
			     unsigned size)
{
1290 1291 1292
	int rc;
	ulong linear;

1293
	rc = linearize(ctxt, addr, size, true, &linear);
1294 1295
	if (rc != X86EMUL_CONTINUE)
		return rc;
1296 1297
	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
					   size, &ctxt->exception);
1298 1299
}

1300 1301 1302 1303
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
			   unsigned int size, unsigned short port,
			   void *dest)
{
1304
	struct read_cache *rc = &ctxt->io_read;
1305

1306 1307
	if (rc->pos == rc->end) { /* refill pio read ahead */
		unsigned int in_page, n;
1308
		unsigned int count = ctxt->rep_prefix ?
1309
			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1310
		in_page = (ctxt->eflags & EFLG_DF) ?
1311 1312
			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1313 1314 1315 1316 1317
		n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
			count);
		if (n == 0)
			n = 1;
		rc->pos = rc->end = 0;
1318
		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1319 1320
			return 0;
		rc->end = n * size;
A
Avi Kivity 已提交
1321 1322
	}

1323 1324
	if (ctxt->rep_prefix && (ctxt->d & String) &&
	    !(ctxt->eflags & EFLG_DF)) {
1325 1326 1327 1328 1329 1330 1331 1332
		ctxt->dst.data = rc->data + rc->pos;
		ctxt->dst.type = OP_MEM_STR;
		ctxt->dst.count = (rc->end - rc->pos) / size;
		rc->pos = rc->end;
	} else {
		memcpy(dest, rc->data + rc->pos, size);
		rc->pos += size;
	}
1333 1334
	return 1;
}
A
Avi Kivity 已提交
1335

1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
				     u16 index, struct desc_struct *desc)
{
	struct desc_ptr dt;
	ulong addr;

	ctxt->ops->get_idt(ctxt, &dt);

	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, index << 3 | 0x2);

	addr = dt.address + index * 8;
	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
				   &ctxt->exception);
}

1352 1353 1354
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
				     u16 selector, struct desc_ptr *dt)
{
1355
	const struct x86_emulate_ops *ops = ctxt->ops;
1356
	u32 base3 = 0;
1357

1358 1359
	if (selector & 1 << 2) {
		struct desc_struct desc;
1360 1361
		u16 sel;

1362
		memset (dt, 0, sizeof *dt);
1363 1364
		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
				      VCPU_SREG_LDTR))
1365
			return;
1366

1367
		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1368
		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1369
	} else
1370
		ops->get_gdt(ctxt, dt);
1371
}
1372

1373 1374
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1375 1376
				   u16 selector, struct desc_struct *desc,
				   ulong *desc_addr_p)
1377 1378 1379 1380
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
1381

1382
	get_descriptor_table_ptr(ctxt, selector, &dt);
1383

1384 1385
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
1386

1387
	*desc_addr_p = addr = dt.address + index * 8;
1388 1389
	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
				   &ctxt->exception);
1390
}
1391

1392 1393 1394 1395 1396 1397 1398
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				    u16 selector, struct desc_struct *desc)
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
A
Avi Kivity 已提交
1399

1400
	get_descriptor_table_ptr(ctxt, selector, &dt);
1401

1402 1403
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
A
Avi Kivity 已提交
1404

1405
	addr = dt.address + index * 8;
1406 1407
	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
				    &ctxt->exception);
1408
}
1409

1410
/* Does not support long mode */
1411
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1412
				     u16 selector, int seg, u8 cpl, bool in_task_switch)
1413
{
1414
	struct desc_struct seg_desc, old_desc;
1415
	u8 dpl, rpl;
1416 1417 1418
	unsigned err_vec = GP_VECTOR;
	u32 err_code = 0;
	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1419
	ulong desc_addr;
1420
	int ret;
1421
	u16 dummy;
1422
	u32 base3 = 0;
1423

1424
	memset(&seg_desc, 0, sizeof seg_desc);
1425

1426 1427 1428
	if (ctxt->mode == X86EMUL_MODE_REAL) {
		/* set real mode segment descriptor (keep limit etc. for
		 * unreal mode) */
1429
		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1430 1431
		set_desc_base(&seg_desc, selector << 4);
		goto load;
1432 1433 1434 1435 1436 1437 1438 1439 1440
	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
		/* VM86 needs a clean new segment descriptor */
		set_desc_base(&seg_desc, selector << 4);
		set_desc_limit(&seg_desc, 0xffff);
		seg_desc.type = 3;
		seg_desc.p = 1;
		seg_desc.s = 1;
		seg_desc.dpl = 3;
		goto load;
1441 1442
	}

1443 1444 1445 1446 1447 1448 1449
	rpl = selector & 3;

	/* NULL selector is not valid for TR, CS and SS (except for long mode) */
	if ((seg == VCPU_SREG_CS
	     || (seg == VCPU_SREG_SS
		 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
	     || seg == VCPU_SREG_TR)
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
	    && null_selector)
		goto exception;

	/* TR should be in GDT only */
	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
		goto exception;

	if (null_selector) /* for NULL selector skip all following checks */
		goto load;

1460
	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1461 1462 1463 1464 1465 1466
	if (ret != X86EMUL_CONTINUE)
		return ret;

	err_code = selector & 0xfffc;
	err_vec = GP_VECTOR;

G
Guo Chao 已提交
1467
	/* can't load system descriptor into segment selector */
1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
	if (seg <= VCPU_SREG_GS && !seg_desc.s)
		goto exception;

	if (!seg_desc.p) {
		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
		goto exception;
	}

	dpl = seg_desc.dpl;

	switch (seg) {
	case VCPU_SREG_SS:
		/*
		 * segment is not a writable data segment or segment
		 * selector's RPL != CPL or segment selector's RPL != CPL
		 */
		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
			goto exception;
A
Avi Kivity 已提交
1486
		break;
1487
	case VCPU_SREG_CS:
1488 1489 1490
		if (in_task_switch && rpl != dpl)
			goto exception;

1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
		if (!(seg_desc.type & 8))
			goto exception;

		if (seg_desc.type & 4) {
			/* conforming */
			if (dpl > cpl)
				goto exception;
		} else {
			/* nonconforming */
			if (rpl > cpl || dpl != cpl)
				goto exception;
		}
		/* CS(RPL) <- CPL */
		selector = (selector & 0xfffc) | cpl;
A
Avi Kivity 已提交
1505
		break;
1506 1507 1508
	case VCPU_SREG_TR:
		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
			goto exception;
1509 1510 1511 1512 1513 1514
		old_desc = seg_desc;
		seg_desc.type |= 2; /* busy */
		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
						  sizeof(seg_desc), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
1515 1516 1517 1518 1519 1520
		break;
	case VCPU_SREG_LDTR:
		if (seg_desc.s || seg_desc.type != 2)
			goto exception;
		break;
	default: /*  DS, ES, FS, or GS */
1521
		/*
1522 1523 1524
		 * segment is not a data or readable code segment or
		 * ((segment is a data or nonconforming code segment)
		 * and (both RPL and CPL > DPL))
1525
		 */
1526 1527 1528 1529
		if ((seg_desc.type & 0xa) == 0x8 ||
		    (((seg_desc.type & 0xc) != 0xc) &&
		     (rpl > dpl && cpl > dpl)))
			goto exception;
A
Avi Kivity 已提交
1530
		break;
1531 1532 1533 1534 1535
	}

	if (seg_desc.s) {
		/* mark segment as accessed */
		seg_desc.type |= 1;
1536
		ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1537 1538
		if (ret != X86EMUL_CONTINUE)
			return ret;
1539 1540 1541 1542 1543
	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
		ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
				sizeof(base3), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
1544 1545
	}
load:
1546
	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1547 1548 1549 1550 1551 1552
	return X86EMUL_CONTINUE;
exception:
	emulate_exception(ctxt, err_vec, err_code, true);
	return X86EMUL_PROPAGATE_FAULT;
}

1553 1554 1555 1556
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, int seg)
{
	u8 cpl = ctxt->ops->cpl(ctxt);
1557
	return __load_segment_descriptor(ctxt, selector, seg, cpl, false);
1558 1559
}

1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578
static void write_register_operand(struct operand *op)
{
	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
	switch (op->bytes) {
	case 1:
		*(u8 *)op->addr.reg = (u8)op->val;
		break;
	case 2:
		*(u16 *)op->addr.reg = (u16)op->val;
		break;
	case 4:
		*op->addr.reg = (u32)op->val;
		break;	/* 64b: zero-extend */
	case 8:
		*op->addr.reg = op->val;
		break;
	}
}

1579
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1580
{
1581
	switch (op->type) {
1582
	case OP_REG:
1583
		write_register_operand(op);
A
Avi Kivity 已提交
1584
		break;
1585
	case OP_MEM:
1586
		if (ctxt->lock_prefix)
P
Paolo Bonzini 已提交
1587 1588 1589 1590 1591 1592 1593
			return segmented_cmpxchg(ctxt,
						 op->addr.mem,
						 &op->orig_val,
						 &op->val,
						 op->bytes);
		else
			return segmented_write(ctxt,
1594 1595 1596
					       op->addr.mem,
					       &op->val,
					       op->bytes);
1597
		break;
1598
	case OP_MEM_STR:
P
Paolo Bonzini 已提交
1599 1600 1601 1602
		return segmented_write(ctxt,
				       op->addr.mem,
				       op->data,
				       op->bytes * op->count);
1603
		break;
A
Avi Kivity 已提交
1604
	case OP_XMM:
1605
		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
A
Avi Kivity 已提交
1606
		break;
A
Avi Kivity 已提交
1607
	case OP_MM:
1608
		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
A
Avi Kivity 已提交
1609
		break;
1610 1611
	case OP_NONE:
		/* no writeback */
1612
		break;
1613
	default:
1614
		break;
A
Avi Kivity 已提交
1615
	}
1616 1617
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1618

1619
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1620
{
1621
	struct segmented_address addr;
1622

1623
	rsp_increment(ctxt, -bytes);
1624
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1625 1626
	addr.seg = VCPU_SREG_SS;

1627 1628 1629 1630 1631
	return segmented_write(ctxt, addr, data, bytes);
}

static int em_push(struct x86_emulate_ctxt *ctxt)
{
1632
	/* Disable writeback. */
1633
	ctxt->dst.type = OP_NONE;
1634
	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1635
}
1636

1637 1638 1639 1640
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
		       void *dest, int len)
{
	int rc;
1641
	struct segmented_address addr;
1642

1643
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1644
	addr.seg = VCPU_SREG_SS;
1645
	rc = segmented_read(ctxt, addr, dest, len);
1646 1647 1648
	if (rc != X86EMUL_CONTINUE)
		return rc;

1649
	rsp_increment(ctxt, len);
1650
	return rc;
1651 1652
}

1653 1654
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
1655
	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1656 1657
}

1658
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1659
			void *dest, int len)
1660 1661
{
	int rc;
1662 1663
	unsigned long val, change_mask;
	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1664
	int cpl = ctxt->ops->cpl(ctxt);
1665

1666
	rc = emulate_pop(ctxt, &val, len);
1667 1668
	if (rc != X86EMUL_CONTINUE)
		return rc;
1669

1670 1671
	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1672

1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
	switch(ctxt->mode) {
	case X86EMUL_MODE_PROT64:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT16:
		if (cpl == 0)
			change_mask |= EFLG_IOPL;
		if (cpl <= iopl)
			change_mask |= EFLG_IF;
		break;
	case X86EMUL_MODE_VM86:
1683 1684
		if (iopl < 3)
			return emulate_gp(ctxt, 0);
1685 1686 1687 1688 1689
		change_mask |= EFLG_IF;
		break;
	default: /* real mode */
		change_mask |= (EFLG_IOPL | EFLG_IF);
		break;
1690
	}
1691 1692 1693 1694 1695

	*(unsigned long *)dest =
		(ctxt->eflags & ~change_mask) | (val & change_mask);

	return rc;
1696 1697
}

1698 1699
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
1700 1701 1702 1703
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->eflags;
	ctxt->dst.bytes = ctxt->op_bytes;
	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1704 1705
}

A
Avi Kivity 已提交
1706 1707 1708 1709 1710
static int em_enter(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	unsigned frame_size = ctxt->src.val;
	unsigned nesting_level = ctxt->src2.val & 31;
1711
	ulong rbp;
A
Avi Kivity 已提交
1712 1713 1714 1715

	if (nesting_level)
		return X86EMUL_UNHANDLEABLE;

1716 1717
	rbp = reg_read(ctxt, VCPU_REGS_RBP);
	rc = push(ctxt, &rbp, stack_size(ctxt));
A
Avi Kivity 已提交
1718 1719
	if (rc != X86EMUL_CONTINUE)
		return rc;
1720
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
A
Avi Kivity 已提交
1721
		      stack_mask(ctxt));
1722 1723
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
A
Avi Kivity 已提交
1724 1725 1726 1727
		      stack_mask(ctxt));
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1728 1729
static int em_leave(struct x86_emulate_ctxt *ctxt)
{
1730
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
A
Avi Kivity 已提交
1731
		      stack_mask(ctxt));
1732
	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
A
Avi Kivity 已提交
1733 1734
}

1735
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1736
{
1737 1738
	int seg = ctxt->src2.val;

1739
	ctxt->src.val = get_segment_selector(ctxt, seg);
1740

1741
	return em_push(ctxt);
1742 1743
}

1744
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1745
{
1746
	int seg = ctxt->src2.val;
1747 1748
	unsigned long selector;
	int rc;
1749

1750
	rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1751 1752 1753
	if (rc != X86EMUL_CONTINUE)
		return rc;

1754 1755 1756
	if (ctxt->modrm_reg == VCPU_SREG_SS)
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;

1757
	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1758
	return rc;
1759 1760
}

1761
static int em_pusha(struct x86_emulate_ctxt *ctxt)
1762
{
1763
	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1764 1765
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RAX;
1766

1767 1768
	while (reg <= VCPU_REGS_RDI) {
		(reg == VCPU_REGS_RSP) ?
1769
		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1770

1771
		rc = em_push(ctxt);
1772 1773
		if (rc != X86EMUL_CONTINUE)
			return rc;
1774

1775
		++reg;
1776 1777
	}

1778
	return rc;
1779 1780
}

1781 1782
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
1783
	ctxt->src.val =  (unsigned long)ctxt->eflags;
1784 1785 1786
	return em_push(ctxt);
}

1787
static int em_popa(struct x86_emulate_ctxt *ctxt)
1788
{
1789 1790
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RDI;
1791

1792 1793
	while (reg >= VCPU_REGS_RAX) {
		if (reg == VCPU_REGS_RSP) {
1794
			rsp_increment(ctxt, ctxt->op_bytes);
1795 1796
			--reg;
		}
1797

1798
		rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1799 1800 1801
		if (rc != X86EMUL_CONTINUE)
			break;
		--reg;
1802
	}
1803
	return rc;
1804 1805
}

1806
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1807
{
1808
	const struct x86_emulate_ops *ops = ctxt->ops;
1809
	int rc;
1810 1811 1812 1813 1814 1815
	struct desc_ptr dt;
	gva_t cs_addr;
	gva_t eip_addr;
	u16 cs, eip;

	/* TODO: Add limit checks */
1816
	ctxt->src.val = ctxt->eflags;
1817
	rc = em_push(ctxt);
1818 1819
	if (rc != X86EMUL_CONTINUE)
		return rc;
1820 1821 1822

	ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);

1823
	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1824
	rc = em_push(ctxt);
1825 1826
	if (rc != X86EMUL_CONTINUE)
		return rc;
1827

1828
	ctxt->src.val = ctxt->_eip;
1829
	rc = em_push(ctxt);
1830 1831 1832
	if (rc != X86EMUL_CONTINUE)
		return rc;

1833
	ops->get_idt(ctxt, &dt);
1834 1835 1836 1837

	eip_addr = dt.address + (irq << 2);
	cs_addr = dt.address + (irq << 2) + 2;

1838
	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1839 1840 1841
	if (rc != X86EMUL_CONTINUE)
		return rc;

1842
	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1843 1844 1845
	if (rc != X86EMUL_CONTINUE)
		return rc;

1846
	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1847 1848 1849
	if (rc != X86EMUL_CONTINUE)
		return rc;

1850
	ctxt->_eip = eip;
1851 1852 1853 1854

	return rc;
}

1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
	int rc;

	invalidate_registers(ctxt);
	rc = __emulate_int_real(ctxt, irq);
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);
	return rc;
}

1866
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1867 1868 1869
{
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
1870
		return __emulate_int_real(ctxt, irq);
1871 1872 1873 1874 1875 1876 1877 1878 1879 1880
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
	default:
		/* Protected mode interrupts unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
	}
}

1881
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1882
{
1883 1884 1885 1886 1887 1888 1889 1890
	int rc = X86EMUL_CONTINUE;
	unsigned long temp_eip = 0;
	unsigned long temp_eflags = 0;
	unsigned long cs = 0;
	unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
			     EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
			     EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
	unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1891

1892
	/* TODO: Add stack limit check */
1893

1894
	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1895

1896 1897
	if (rc != X86EMUL_CONTINUE)
		return rc;
1898

1899 1900
	if (temp_eip & ~0xffff)
		return emulate_gp(ctxt, 0);
1901

1902
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1903

1904 1905
	if (rc != X86EMUL_CONTINUE)
		return rc;
1906

1907
	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1908

1909 1910
	if (rc != X86EMUL_CONTINUE)
		return rc;
1911

1912
	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1913

1914 1915
	if (rc != X86EMUL_CONTINUE)
		return rc;
1916

1917
	ctxt->_eip = temp_eip;
1918 1919


1920
	if (ctxt->op_bytes == 4)
1921
		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1922
	else if (ctxt->op_bytes == 2) {
1923 1924
		ctxt->eflags &= ~0xffff;
		ctxt->eflags |= temp_eflags;
1925
	}
1926 1927 1928 1929 1930

	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
	ctxt->eflags |= EFLG_RESERVED_ONE_MASK;

	return rc;
1931 1932
}

1933
static int em_iret(struct x86_emulate_ctxt *ctxt)
1934
{
1935 1936
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
1937
		return emulate_iret_real(ctxt);
1938 1939 1940 1941
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
1942
	default:
1943 1944
		/* iret from protected mode unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
1945 1946 1947
	}
}

1948 1949 1950 1951 1952
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	unsigned short sel;

1953
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1954

1955
	rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1956 1957 1958
	if (rc != X86EMUL_CONTINUE)
		return rc;

1959 1960
	ctxt->_eip = 0;
	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1961 1962 1963
	return X86EMUL_CONTINUE;
}

1964
static int em_grp45(struct x86_emulate_ctxt *ctxt)
1965
{
1966
	int rc = X86EMUL_CONTINUE;
1967

1968
	switch (ctxt->modrm_reg) {
1969 1970
	case 2: /* call near abs */ {
		long int old_eip;
1971 1972 1973
		old_eip = ctxt->_eip;
		ctxt->_eip = ctxt->src.val;
		ctxt->src.val = old_eip;
1974
		rc = em_push(ctxt);
1975 1976
		break;
	}
1977
	case 4: /* jmp abs */
1978
		ctxt->_eip = ctxt->src.val;
1979
		break;
1980 1981 1982
	case 5: /* jmp far */
		rc = em_jmp_far(ctxt);
		break;
1983
	case 6:	/* push */
1984
		rc = em_push(ctxt);
1985 1986
		break;
	}
1987
	return rc;
1988 1989
}

1990
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
1991
{
1992
	u64 old = ctxt->dst.orig_val64;
1993

1994 1995 1996
	if (ctxt->dst.bytes == 16)
		return X86EMUL_UNHANDLEABLE;

1997 1998 1999 2000
	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2001
		ctxt->eflags &= ~EFLG_ZF;
2002
	} else {
2003 2004
		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2005

2006
		ctxt->eflags |= EFLG_ZF;
2007
	}
2008
	return X86EMUL_CONTINUE;
2009 2010
}

2011 2012
static int em_ret(struct x86_emulate_ctxt *ctxt)
{
2013 2014 2015
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->_eip;
	ctxt->dst.bytes = ctxt->op_bytes;
2016 2017 2018
	return em_pop(ctxt);
}

2019
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2020 2021 2022
{
	int rc;
	unsigned long cs;
2023
	int cpl = ctxt->ops->cpl(ctxt);
2024

2025
	rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
2026
	if (rc != X86EMUL_CONTINUE)
2027
		return rc;
2028 2029 2030
	if (ctxt->op_bytes == 4)
		ctxt->_eip = (u32)ctxt->_eip;
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2031
	if (rc != X86EMUL_CONTINUE)
2032
		return rc;
2033 2034 2035
	/* Outer-privilege level return is not implemented */
	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
		return X86EMUL_UNHANDLEABLE;
2036
	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2037 2038 2039
	return rc;
}

2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050
static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
{
        int rc;

        rc = em_ret_far(ctxt);
        if (rc != X86EMUL_CONTINUE)
                return rc;
        rsp_increment(ctxt, ctxt->src.val);
        return X86EMUL_CONTINUE;
}

2051 2052 2053
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{
	/* Save real source value, then compare EAX against destination. */
2054 2055
	ctxt->dst.orig_val = ctxt->dst.val;
	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2056
	ctxt->src.orig_val = ctxt->src.val;
2057
	ctxt->src.val = ctxt->dst.orig_val;
2058
	fastop(ctxt, em_cmp);
2059 2060 2061 2062 2063 2064 2065

	if (ctxt->eflags & EFLG_ZF) {
		/* Success: write back to memory. */
		ctxt->dst.val = ctxt->src.orig_val;
	} else {
		/* Failure: write the value we saw to EAX. */
		ctxt->dst.type = OP_REG;
2066
		ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2067
		ctxt->dst.val = ctxt->dst.orig_val;
2068 2069 2070 2071
	}
	return X86EMUL_CONTINUE;
}

2072
static int em_lseg(struct x86_emulate_ctxt *ctxt)
2073
{
2074
	int seg = ctxt->src2.val;
2075 2076 2077
	unsigned short sel;
	int rc;

2078
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2079

2080
	rc = load_segment_descriptor(ctxt, sel, seg);
2081 2082 2083
	if (rc != X86EMUL_CONTINUE)
		return rc;

2084
	ctxt->dst.val = ctxt->src.val;
2085 2086 2087
	return rc;
}

2088
static void
2089
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2090
			struct desc_struct *cs, struct desc_struct *ss)
2091 2092
{
	cs->l = 0;		/* will be adjusted later */
2093
	set_desc_base(cs, 0);	/* flat segment */
2094
	cs->g = 1;		/* 4kb granularity */
2095
	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2096 2097 2098
	cs->type = 0x0b;	/* Read, Execute, Accessed */
	cs->s = 1;
	cs->dpl = 0;		/* will be adjusted later */
2099 2100
	cs->p = 1;
	cs->d = 1;
2101
	cs->avl = 0;
2102

2103 2104
	set_desc_base(ss, 0);	/* flat segment */
	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2105 2106 2107
	ss->g = 1;		/* 4kb granularity */
	ss->s = 1;
	ss->type = 0x03;	/* Read/Write, Accessed */
2108
	ss->d = 1;		/* 32bit stack segment */
2109
	ss->dpl = 0;
2110
	ss->p = 1;
2111 2112
	ss->l = 0;
	ss->avl = 0;
2113 2114
}

2115 2116 2117 2118 2119
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

	eax = ecx = 0;
2120 2121
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2122 2123 2124 2125
		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
}

2126 2127
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
{
2128
	const struct x86_emulate_ops *ops = ctxt->ops;
2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139
	u32 eax, ebx, ecx, edx;

	/*
	 * syscall should always be enabled in longmode - so only become
	 * vendor specific (cpuid) if other modes are active...
	 */
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return true;

	eax = 0x00000000;
	ecx = 0x00000000;
2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	/*
	 * Intel ("GenuineIntel")
	 * remark: Intel CPUs only support "syscall" in 64bit
	 * longmode. Also an 64bit guest with a
	 * 32bit compat-app running will #UD !! While this
	 * behaviour can be fixed (by emulating) into AMD
	 * response - CPUs of AMD can't behave like Intel.
	 */
	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
		return false;

	/* AMD ("AuthenticAMD") */
	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
		return true;

	/* AMD ("AMDisbetter!") */
	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
		return true;
2165 2166 2167 2168 2169

	/* default: (not Intel, not AMD), apply Intel's stricter rules... */
	return false;
}

2170
static int em_syscall(struct x86_emulate_ctxt *ctxt)
2171
{
2172
	const struct x86_emulate_ops *ops = ctxt->ops;
2173
	struct desc_struct cs, ss;
2174
	u64 msr_data;
2175
	u16 cs_sel, ss_sel;
2176
	u64 efer = 0;
2177 2178

	/* syscall is not available in real mode */
2179
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2180 2181
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_ud(ctxt);
2182

2183 2184 2185
	if (!(em_syscall_is_enabled(ctxt)))
		return emulate_ud(ctxt);

2186
	ops->get_msr(ctxt, MSR_EFER, &efer);
2187
	setup_syscalls_segments(ctxt, &cs, &ss);
2188

2189 2190 2191
	if (!(efer & EFER_SCE))
		return emulate_ud(ctxt);

2192
	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2193
	msr_data >>= 32;
2194 2195
	cs_sel = (u16)(msr_data & 0xfffc);
	ss_sel = (u16)(msr_data + 8);
2196

2197
	if (efer & EFER_LMA) {
2198
		cs.d = 0;
2199 2200
		cs.l = 1;
	}
2201 2202
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2203

2204
	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2205
	if (efer & EFER_LMA) {
2206
#ifdef CONFIG_X86_64
2207
		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags & ~EFLG_RF;
2208

2209
		ops->get_msr(ctxt,
2210 2211
			     ctxt->mode == X86EMUL_MODE_PROT64 ?
			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2212
		ctxt->_eip = msr_data;
2213

2214
		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2215 2216 2217 2218
		ctxt->eflags &= ~(msr_data | EFLG_RF);
#endif
	} else {
		/* legacy mode */
2219
		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2220
		ctxt->_eip = (u32)msr_data;
2221 2222 2223 2224

		ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
	}

2225
	return X86EMUL_CONTINUE;
2226 2227
}

2228
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2229
{
2230
	const struct x86_emulate_ops *ops = ctxt->ops;
2231
	struct desc_struct cs, ss;
2232
	u64 msr_data;
2233
	u16 cs_sel, ss_sel;
2234
	u64 efer = 0;
2235

2236
	ops->get_msr(ctxt, MSR_EFER, &efer);
2237
	/* inject #GP if in real mode */
2238 2239
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return emulate_gp(ctxt, 0);
2240

2241 2242 2243 2244 2245 2246 2247 2248
	/*
	 * Not recognized on AMD in compat mode (but is recognized in legacy
	 * mode).
	 */
	if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
	    && !vendor_intel(ctxt))
		return emulate_ud(ctxt);

2249 2250 2251
	/* XXX sysenter/sysexit have not been tested in 64bit mode.
	* Therefore, we inject an #UD.
	*/
2252 2253
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return emulate_ud(ctxt);
2254

2255
	setup_syscalls_segments(ctxt, &cs, &ss);
2256

2257
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2258 2259
	switch (ctxt->mode) {
	case X86EMUL_MODE_PROT32:
2260 2261
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
2262 2263
		break;
	case X86EMUL_MODE_PROT64:
2264 2265
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
2266
		break;
2267 2268
	default:
		break;
2269 2270 2271
	}

	ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
2272 2273 2274 2275
	cs_sel = (u16)msr_data;
	cs_sel &= ~SELECTOR_RPL_MASK;
	ss_sel = cs_sel + 8;
	ss_sel &= ~SELECTOR_RPL_MASK;
2276
	if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
2277
		cs.d = 0;
2278 2279 2280
		cs.l = 1;
	}

2281 2282
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2283

2284
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2285
	ctxt->_eip = msr_data;
2286

2287
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2288
	*reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2289

2290
	return X86EMUL_CONTINUE;
2291 2292
}

2293
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2294
{
2295
	const struct x86_emulate_ops *ops = ctxt->ops;
2296
	struct desc_struct cs, ss;
2297 2298
	u64 msr_data;
	int usermode;
X
Xiao Guangrong 已提交
2299
	u16 cs_sel = 0, ss_sel = 0;
2300

2301 2302
	/* inject #GP if in real mode or Virtual 8086 mode */
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2303 2304
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_gp(ctxt, 0);
2305

2306
	setup_syscalls_segments(ctxt, &cs, &ss);
2307

2308
	if ((ctxt->rex_prefix & 0x8) != 0x0)
2309 2310 2311 2312 2313 2314
		usermode = X86EMUL_MODE_PROT64;
	else
		usermode = X86EMUL_MODE_PROT32;

	cs.dpl = 3;
	ss.dpl = 3;
2315
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2316 2317
	switch (usermode) {
	case X86EMUL_MODE_PROT32:
2318
		cs_sel = (u16)(msr_data + 16);
2319 2320
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
2321
		ss_sel = (u16)(msr_data + 24);
2322 2323
		break;
	case X86EMUL_MODE_PROT64:
2324
		cs_sel = (u16)(msr_data + 32);
2325 2326
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
2327 2328
		ss_sel = cs_sel + 8;
		cs.d = 0;
2329 2330 2331
		cs.l = 1;
		break;
	}
2332 2333
	cs_sel |= SELECTOR_RPL_MASK;
	ss_sel |= SELECTOR_RPL_MASK;
2334

2335 2336
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2337

2338 2339
	ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
	*reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
2340

2341
	return X86EMUL_CONTINUE;
2342 2343
}

2344
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2345 2346 2347 2348 2349 2350 2351
{
	int iopl;
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return false;
	if (ctxt->mode == X86EMUL_MODE_VM86)
		return true;
	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2352
	return ctxt->ops->cpl(ctxt) > iopl;
2353 2354 2355 2356 2357
}

static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
					    u16 port, u16 len)
{
2358
	const struct x86_emulate_ops *ops = ctxt->ops;
2359
	struct desc_struct tr_seg;
2360
	u32 base3;
2361
	int r;
2362
	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2363
	unsigned mask = (1 << len) - 1;
2364
	unsigned long base;
2365

2366
	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2367
	if (!tr_seg.p)
2368
		return false;
2369
	if (desc_limit_scaled(&tr_seg) < 103)
2370
		return false;
2371 2372 2373 2374
	base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
	base |= ((u64)base3) << 32;
#endif
2375
	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2376 2377
	if (r != X86EMUL_CONTINUE)
		return false;
2378
	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2379
		return false;
2380
	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2381 2382 2383 2384 2385 2386 2387 2388 2389 2390
	if (r != X86EMUL_CONTINUE)
		return false;
	if ((perm >> bit_idx) & mask)
		return false;
	return true;
}

static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
				 u16 port, u16 len)
{
2391 2392 2393
	if (ctxt->perm_ok)
		return true;

2394 2395
	if (emulator_bad_iopl(ctxt))
		if (!emulator_io_port_access_allowed(ctxt, port, len))
2396
			return false;
2397 2398 2399

	ctxt->perm_ok = true;

2400 2401 2402
	return true;
}

2403 2404 2405
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_16 *tss)
{
2406
	tss->ip = ctxt->_eip;
2407
	tss->flag = ctxt->eflags;
2408 2409 2410 2411 2412 2413 2414 2415
	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2416

2417 2418 2419 2420 2421
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2422 2423 2424 2425 2426 2427
}

static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_16 *tss)
{
	int ret;
2428
	u8 cpl;
2429

2430
	ctxt->_eip = tss->ip;
2431
	ctxt->eflags = tss->flag | 2;
2432 2433 2434 2435 2436 2437 2438 2439
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2440 2441 2442 2443 2444

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
2445 2446 2447 2448 2449
	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2450

2451 2452
	cpl = tss->cs & 3;

2453
	/*
G
Guo Chao 已提交
2454
	 * Now load segment descriptors. If fault happens at this stage
2455 2456
	 * it is handled in a context of new task
	 */
2457
	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true);
2458 2459
	if (ret != X86EMUL_CONTINUE)
		return ret;
2460
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
2461 2462
	if (ret != X86EMUL_CONTINUE)
		return ret;
2463
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
2464 2465
	if (ret != X86EMUL_CONTINUE)
		return ret;
2466
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
2467 2468
	if (ret != X86EMUL_CONTINUE)
		return ret;
2469
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
2470 2471 2472 2473 2474 2475 2476 2477 2478 2479
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_16(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
2480
	const struct x86_emulate_ops *ops = ctxt->ops;
2481 2482
	struct tss_segment_16 tss_seg;
	int ret;
2483
	u32 new_tss_base = get_desc_base(new_desc);
2484

2485
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2486
			    &ctxt->exception);
2487
	if (ret != X86EMUL_CONTINUE)
2488 2489 2490
		/* FIXME: need to provide precise fault address */
		return ret;

2491
	save_state_to_tss16(ctxt, &tss_seg);
2492

2493
	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2494
			     &ctxt->exception);
2495
	if (ret != X86EMUL_CONTINUE)
2496 2497 2498
		/* FIXME: need to provide precise fault address */
		return ret;

2499
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2500
			    &ctxt->exception);
2501
	if (ret != X86EMUL_CONTINUE)
2502 2503 2504 2505 2506 2507
		/* FIXME: need to provide precise fault address */
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

2508
		ret = ops->write_std(ctxt, new_tss_base,
2509 2510
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2511
				     &ctxt->exception);
2512
		if (ret != X86EMUL_CONTINUE)
2513 2514 2515 2516
			/* FIXME: need to provide precise fault address */
			return ret;
	}

2517
	return load_state_from_tss16(ctxt, &tss_seg);
2518 2519 2520 2521 2522
}

static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_32 *tss)
{
2523
	/* CR3 and ldt selector are not saved intentionally */
2524
	tss->eip = ctxt->_eip;
2525
	tss->eflags = ctxt->eflags;
2526 2527 2528 2529 2530 2531 2532 2533
	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2534

2535 2536 2537 2538 2539 2540
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2541 2542 2543 2544 2545 2546
}

static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_32 *tss)
{
	int ret;
2547
	u8 cpl;
2548

2549
	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2550
		return emulate_gp(ctxt, 0);
2551
	ctxt->_eip = tss->eip;
2552
	ctxt->eflags = tss->eflags | 2;
2553 2554

	/* General purpose registers */
2555 2556 2557 2558 2559 2560 2561 2562
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2563 2564 2565

	/*
	 * SDM says that segment selectors are loaded before segment
2566 2567
	 * descriptors.  This is important because CPL checks will
	 * use CS.RPL.
2568
	 */
2569 2570 2571 2572 2573 2574 2575
	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2576

2577 2578 2579 2580 2581
	/*
	 * If we're switching between Protected Mode and VM86, we need to make
	 * sure to update the mode before loading the segment descriptors so
	 * that the selectors are interpreted correctly.
	 */
2582
	if (ctxt->eflags & X86_EFLAGS_VM) {
2583
		ctxt->mode = X86EMUL_MODE_VM86;
2584 2585
		cpl = 3;
	} else {
2586
		ctxt->mode = X86EMUL_MODE_PROT32;
2587 2588
		cpl = tss->cs & 3;
	}
2589

2590 2591 2592 2593
	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
2594
	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true);
2595 2596
	if (ret != X86EMUL_CONTINUE)
		return ret;
2597
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
2598 2599
	if (ret != X86EMUL_CONTINUE)
		return ret;
2600
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
2601 2602
	if (ret != X86EMUL_CONTINUE)
		return ret;
2603
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
2604 2605
	if (ret != X86EMUL_CONTINUE)
		return ret;
2606
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
2607 2608
	if (ret != X86EMUL_CONTINUE)
		return ret;
2609
	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true);
2610 2611
	if (ret != X86EMUL_CONTINUE)
		return ret;
2612
	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true);
2613 2614 2615 2616 2617 2618 2619 2620 2621 2622
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_32(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
2623
	const struct x86_emulate_ops *ops = ctxt->ops;
2624 2625
	struct tss_segment_32 tss_seg;
	int ret;
2626
	u32 new_tss_base = get_desc_base(new_desc);
2627 2628
	u32 eip_offset = offsetof(struct tss_segment_32, eip);
	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2629

2630
	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2631
			    &ctxt->exception);
2632
	if (ret != X86EMUL_CONTINUE)
2633 2634 2635
		/* FIXME: need to provide precise fault address */
		return ret;

2636
	save_state_to_tss32(ctxt, &tss_seg);
2637

2638 2639 2640
	/* Only GP registers and segment selectors are saved */
	ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
			     ldt_sel_offset - eip_offset, &ctxt->exception);
2641
	if (ret != X86EMUL_CONTINUE)
2642 2643 2644
		/* FIXME: need to provide precise fault address */
		return ret;

2645
	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2646
			    &ctxt->exception);
2647
	if (ret != X86EMUL_CONTINUE)
2648 2649 2650 2651 2652 2653
		/* FIXME: need to provide precise fault address */
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

2654
		ret = ops->write_std(ctxt, new_tss_base,
2655 2656
				     &tss_seg.prev_task_link,
				     sizeof tss_seg.prev_task_link,
2657
				     &ctxt->exception);
2658
		if (ret != X86EMUL_CONTINUE)
2659 2660 2661 2662
			/* FIXME: need to provide precise fault address */
			return ret;
	}

2663
	return load_state_from_tss32(ctxt, &tss_seg);
2664 2665 2666
}

static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2667
				   u16 tss_selector, int idt_index, int reason,
2668
				   bool has_error_code, u32 error_code)
2669
{
2670
	const struct x86_emulate_ops *ops = ctxt->ops;
2671 2672
	struct desc_struct curr_tss_desc, next_tss_desc;
	int ret;
2673
	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2674
	ulong old_tss_base =
2675
		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2676
	u32 desc_limit;
2677
	ulong desc_addr;
2678 2679 2680

	/* FIXME: old_tss_base == ~0 ? */

2681
	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2682 2683
	if (ret != X86EMUL_CONTINUE)
		return ret;
2684
	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2685 2686 2687 2688 2689
	if (ret != X86EMUL_CONTINUE)
		return ret;

	/* FIXME: check that next_tss_desc is tss */

2690 2691 2692 2693 2694
	/*
	 * Check privileges. The three cases are task switch caused by...
	 *
	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
	 * 2. Exception/IRQ/iret: No check is performed
G
Guo Chao 已提交
2695
	 * 3. jmp/call to TSS: Check against DPL of the TSS
2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715
	 */
	if (reason == TASK_SWITCH_GATE) {
		if (idt_index != -1) {
			/* Software interrupts */
			struct desc_struct task_gate_desc;
			int dpl;

			ret = read_interrupt_descriptor(ctxt, idt_index,
							&task_gate_desc);
			if (ret != X86EMUL_CONTINUE)
				return ret;

			dpl = task_gate_desc.dpl;
			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
		}
	} else if (reason != TASK_SWITCH_IRET) {
		int dpl = next_tss_desc.dpl;
		if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
			return emulate_gp(ctxt, tss_selector);
2716 2717
	}

2718

2719 2720 2721 2722
	desc_limit = desc_limit_scaled(&next_tss_desc);
	if (!next_tss_desc.p ||
	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
	     desc_limit < 0x2b)) {
2723
		emulate_ts(ctxt, tss_selector & 0xfffc);
2724 2725 2726 2727 2728
		return X86EMUL_PROPAGATE_FAULT;
	}

	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2729
		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2730 2731 2732 2733 2734 2735
	}

	if (reason == TASK_SWITCH_IRET)
		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;

	/* set back link to prev task only if NT bit is set in eflags
G
Guo Chao 已提交
2736
	   note that old_tss_sel is not used after this point */
2737 2738 2739 2740
	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
		old_tss_sel = 0xffff;

	if (next_tss_desc.type & 8)
2741
		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2742 2743
				     old_tss_base, &next_tss_desc);
	else
2744
		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2745
				     old_tss_base, &next_tss_desc);
2746 2747
	if (ret != X86EMUL_CONTINUE)
		return ret;
2748 2749 2750 2751 2752 2753

	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;

	if (reason != TASK_SWITCH_IRET) {
		next_tss_desc.type |= (1 << 1); /* set busy flag */
2754
		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2755 2756
	}

2757
	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
2758
	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2759

2760
	if (has_error_code) {
2761 2762 2763
		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
		ctxt->lock_prefix = 0;
		ctxt->src.val = (unsigned long) error_code;
2764
		ret = em_push(ctxt);
2765 2766
	}

2767 2768 2769 2770
	return ret;
}

int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2771
			 u16 tss_selector, int idt_index, int reason,
2772
			 bool has_error_code, u32 error_code)
2773 2774 2775
{
	int rc;

2776
	invalidate_registers(ctxt);
2777 2778
	ctxt->_eip = ctxt->eip;
	ctxt->dst.type = OP_NONE;
2779

2780
	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2781
				     has_error_code, error_code);
2782

2783
	if (rc == X86EMUL_CONTINUE) {
2784
		ctxt->eip = ctxt->_eip;
2785 2786
		writeback_registers(ctxt);
	}
2787

2788
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2789 2790
}

2791 2792
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
		struct operand *op)
2793
{
2794
	int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2795

2796 2797
	register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
	op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2798 2799
}

2800 2801 2802 2803 2804 2805
static int em_das(struct x86_emulate_ctxt *ctxt)
{
	u8 al, old_al;
	bool af, cf, old_cf;

	cf = ctxt->eflags & X86_EFLAGS_CF;
2806
	al = ctxt->dst.val;
2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823

	old_al = al;
	old_cf = cf;
	cf = false;
	af = ctxt->eflags & X86_EFLAGS_AF;
	if ((al & 0x0f) > 9 || af) {
		al -= 6;
		cf = old_cf | (al >= 250);
		af = true;
	} else {
		af = false;
	}
	if (old_al > 0x99 || old_cf) {
		al -= 0x60;
		cf = true;
	}

2824
	ctxt->dst.val = al;
2825
	/* Set PF, ZF, SF */
2826 2827 2828
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
2829
	fastop(ctxt, em_or);
2830 2831 2832 2833 2834 2835 2836 2837
	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
	if (cf)
		ctxt->eflags |= X86_EFLAGS_CF;
	if (af)
		ctxt->eflags |= X86_EFLAGS_AF;
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859
static int em_aam(struct x86_emulate_ctxt *ctxt)
{
	u8 al, ah;

	if (ctxt->src.val == 0)
		return emulate_de(ctxt);

	al = ctxt->dst.val & 0xff;
	ah = al / ctxt->src.val;
	al %= ctxt->src.val;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);

	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);

	return X86EMUL_CONTINUE;
}

2860 2861 2862 2863 2864 2865 2866 2867 2868
static int em_aad(struct x86_emulate_ctxt *ctxt)
{
	u8 al = ctxt->dst.val & 0xff;
	u8 ah = (ctxt->dst.val >> 8) & 0xff;

	al = (al + (ah * ctxt->src.val)) & 0xff;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;

2869 2870 2871 2872 2873
	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);
2874 2875 2876 2877

	return X86EMUL_CONTINUE;
}

2878 2879 2880 2881 2882 2883 2884 2885 2886
static int em_call(struct x86_emulate_ctxt *ctxt)
{
	long rel = ctxt->src.val;

	ctxt->src.val = (unsigned long)ctxt->_eip;
	jmp_rel(ctxt, rel);
	return em_push(ctxt);
}

2887 2888 2889 2890 2891 2892
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
	u16 sel, old_cs;
	ulong old_eip;
	int rc;

2893
	old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2894
	old_eip = ctxt->_eip;
2895

2896
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2897
	if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2898 2899
		return X86EMUL_CONTINUE;

2900 2901
	ctxt->_eip = 0;
	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
2902

2903
	ctxt->src.val = old_cs;
2904
	rc = em_push(ctxt);
2905 2906 2907
	if (rc != X86EMUL_CONTINUE)
		return rc;

2908
	ctxt->src.val = old_eip;
2909
	return em_push(ctxt);
2910 2911
}

2912 2913 2914 2915
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
	int rc;

2916 2917 2918 2919
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->_eip;
	ctxt->dst.bytes = ctxt->op_bytes;
	rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2920 2921
	if (rc != X86EMUL_CONTINUE)
		return rc;
2922
	rsp_increment(ctxt, ctxt->src.val);
2923 2924 2925
	return X86EMUL_CONTINUE;
}

2926 2927 2928
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
	/* Write back the register source. */
2929 2930
	ctxt->src.val = ctxt->dst.val;
	write_register_operand(&ctxt->src);
2931 2932

	/* Write back the memory destination with implicit LOCK prefix. */
2933 2934
	ctxt->dst.val = ctxt->src.orig_val;
	ctxt->lock_prefix = 1;
2935 2936 2937
	return X86EMUL_CONTINUE;
}

2938 2939
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
2940
	ctxt->dst.val = ctxt->src2.val;
2941
	return fastop(ctxt, em_imul);
2942 2943
}

2944 2945
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
2946 2947
	ctxt->dst.type = OP_REG;
	ctxt->dst.bytes = ctxt->src.bytes;
2948
	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
2949
	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2950 2951 2952 2953

	return X86EMUL_CONTINUE;
}

2954 2955 2956 2957
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
	u64 tsc = 0;

2958
	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2959 2960
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
2961 2962 2963
	return X86EMUL_CONTINUE;
}

2964 2965 2966 2967
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
{
	u64 pmc;

2968
	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
2969
		return emulate_gp(ctxt, 0);
2970 2971
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
2972 2973 2974
	return X86EMUL_CONTINUE;
}

2975 2976
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
2977
	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
2978 2979 2980
	return X86EMUL_CONTINUE;
}

B
Borislav Petkov 已提交
2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020
#define FFL(x) bit(X86_FEATURE_##x)

static int em_movbe(struct x86_emulate_ctxt *ctxt)
{
	u32 ebx, ecx, edx, eax = 1;
	u16 tmp;

	/*
	 * Check MOVBE is set in the guest-visible CPUID leaf.
	 */
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
	if (!(ecx & FFL(MOVBE)))
		return emulate_ud(ctxt);

	switch (ctxt->op_bytes) {
	case 2:
		/*
		 * From MOVBE definition: "...When the operand size is 16 bits,
		 * the upper word of the destination register remains unchanged
		 * ..."
		 *
		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
		 * rules so we have to do the operation almost per hand.
		 */
		tmp = (u16)ctxt->src.val;
		ctxt->dst.val &= ~0xffffUL;
		ctxt->dst.val |= (unsigned long)swab16(tmp);
		break;
	case 4:
		ctxt->dst.val = swab32((u32)ctxt->src.val);
		break;
	case 8:
		ctxt->dst.val = swab64(ctxt->src.val);
		break;
	default:
		return X86EMUL_PROPAGATE_FAULT;
	}
	return X86EMUL_CONTINUE;
}

3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

static int em_dr_write(struct x86_emulate_ctxt *ctxt)
{
	unsigned long val;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		val = ctxt->src.val & ~0ULL;
	else
		val = ctxt->src.val & ~0U;

	/* #UD condition is already handled. */
	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3049 3050 3051 3052
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

3053 3054 3055
	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3056 3057 3058 3059 3060 3061 3062 3063 3064
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

3065
	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3066 3067
		return emulate_gp(ctxt, 0);

3068 3069
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3070 3071 3072
	return X86EMUL_CONTINUE;
}

3073 3074
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
3075
	if (ctxt->modrm_reg > VCPU_SREG_GS)
3076 3077
		return emulate_ud(ctxt);

3078
	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3079 3080 3081 3082 3083
	return X86EMUL_CONTINUE;
}

static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
3084
	u16 sel = ctxt->src.val;
3085

3086
	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3087 3088
		return emulate_ud(ctxt);

3089
	if (ctxt->modrm_reg == VCPU_SREG_SS)
3090 3091 3092
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;

	/* Disable writeback. */
3093 3094
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3095 3096
}

A
Avi Kivity 已提交
3097 3098 3099 3100 3101 3102 3103 3104 3105
static int em_lldt(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
}

A
Avi Kivity 已提交
3106 3107 3108 3109 3110 3111 3112 3113 3114
static int em_ltr(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
}

3115 3116
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
3117 3118 3119
	int rc;
	ulong linear;

3120
	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3121
	if (rc == X86EMUL_CONTINUE)
3122
		ctxt->ops->invlpg(ctxt, linear);
3123
	/* Disable writeback. */
3124
	ctxt->dst.type = OP_NONE;
3125 3126 3127
	return X86EMUL_CONTINUE;
}

3128 3129 3130 3131 3132 3133 3134 3135 3136 3137
static int em_clts(struct x86_emulate_ctxt *ctxt)
{
	ulong cr0;

	cr0 = ctxt->ops->get_cr(ctxt, 0);
	cr0 &= ~X86_CR0_TS;
	ctxt->ops->set_cr(ctxt, 0, cr0);
	return X86EMUL_CONTINUE;
}

3138 3139 3140 3141
static int em_vmcall(struct x86_emulate_ctxt *ctxt)
{
	int rc;

3142
	if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
3143 3144 3145 3146 3147 3148 3149
		return X86EMUL_UNHANDLEABLE;

	rc = ctxt->ops->fix_hypercall(ctxt);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	/* Let the processor re-execute the fixed hypercall */
3150
	ctxt->_eip = ctxt->eip;
3151
	/* Disable writeback. */
3152
	ctxt->dst.type = OP_NONE;
3153 3154 3155
	return X86EMUL_CONTINUE;
}

3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
				  void (*get)(struct x86_emulate_ctxt *ctxt,
					      struct desc_ptr *ptr))
{
	struct desc_ptr desc_ptr;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
	get(ctxt, &desc_ptr);
	if (ctxt->op_bytes == 2) {
		ctxt->op_bytes = 4;
		desc_ptr.address &= 0x00ffffff;
	}
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return segmented_write(ctxt, ctxt->dst.addr.mem,
			       &desc_ptr, 2 + ctxt->op_bytes);
}

static int em_sgdt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
}

static int em_sidt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}

3185 3186 3187 3188 3189
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
	struct desc_ptr desc_ptr;
	int rc;

3190 3191
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
3192
	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3193
			     &desc_ptr.size, &desc_ptr.address,
3194
			     ctxt->op_bytes);
3195 3196 3197 3198
	if (rc != X86EMUL_CONTINUE)
		return rc;
	ctxt->ops->set_gdt(ctxt, &desc_ptr);
	/* Disable writeback. */
3199
	ctxt->dst.type = OP_NONE;
3200 3201 3202
	return X86EMUL_CONTINUE;
}

3203
static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3204 3205 3206
{
	int rc;

3207 3208
	rc = ctxt->ops->fix_hypercall(ctxt);

3209
	/* Disable writeback. */
3210
	ctxt->dst.type = OP_NONE;
3211 3212 3213 3214 3215 3216 3217 3218
	return rc;
}

static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
	struct desc_ptr desc_ptr;
	int rc;

3219 3220
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
3221
	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3222
			     &desc_ptr.size, &desc_ptr.address,
3223
			     ctxt->op_bytes);
3224 3225 3226 3227
	if (rc != X86EMUL_CONTINUE)
		return rc;
	ctxt->ops->set_idt(ctxt, &desc_ptr);
	/* Disable writeback. */
3228
	ctxt->dst.type = OP_NONE;
3229 3230 3231 3232 3233
	return X86EMUL_CONTINUE;
}

static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
3234 3235
	if (ctxt->dst.type == OP_MEM)
		ctxt->dst.bytes = 2;
3236
	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3237 3238 3239 3240 3241 3242
	return X86EMUL_CONTINUE;
}

static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3243 3244
			  | (ctxt->src.val & 0x0f));
	ctxt->dst.type = OP_NONE;
3245 3246 3247
	return X86EMUL_CONTINUE;
}

3248 3249
static int em_loop(struct x86_emulate_ctxt *ctxt)
{
3250 3251
	register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3252 3253
	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
		jmp_rel(ctxt, ctxt->src.val);
3254 3255 3256 3257 3258 3259

	return X86EMUL_CONTINUE;
}

static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{
3260
	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3261
		jmp_rel(ctxt, ctxt->src.val);
3262 3263 3264 3265

	return X86EMUL_CONTINUE;
}

3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283
static int em_in(struct x86_emulate_ctxt *ctxt)
{
	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
			     &ctxt->dst.val))
		return X86EMUL_IO_NEEDED;

	return X86EMUL_CONTINUE;
}

static int em_out(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
				    &ctxt->src.val, 1);
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->eflags &= ~X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

static int em_sti(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
	ctxt->eflags |= X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3303 3304 3305 3306
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

3307 3308
	eax = reg_read(ctxt, VCPU_REGS_RAX);
	ecx = reg_read(ctxt, VCPU_REGS_RCX);
A
Avi Kivity 已提交
3309
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3310 3311 3312 3313
	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
A
Avi Kivity 已提交
3314 3315 3316
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328
static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
	u32 flags;

	flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;

	ctxt->eflags &= ~0xffUL;
	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3329 3330
static int em_lahf(struct x86_emulate_ctxt *ctxt)
{
3331 3332
	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
A
Avi Kivity 已提交
3333 3334 3335
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350
static int em_bswap(struct x86_emulate_ctxt *ctxt)
{
	switch (ctxt->op_bytes) {
#ifdef CONFIG_X86_64
	case 8:
		asm("bswap %0" : "+r"(ctxt->dst.val));
		break;
#endif
	default:
		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
		break;
	}
	return X86EMUL_CONTINUE;
}

3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364
static bool valid_cr(int nr)
{
	switch (nr) {
	case 0:
	case 2 ... 4:
	case 8:
		return true;
	default:
		return false;
	}
}

static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
3365
	if (!valid_cr(ctxt->modrm_reg))
3366 3367 3368 3369 3370 3371 3372
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
3373 3374
	u64 new_val = ctxt->src.val64;
	int cr = ctxt->modrm_reg;
3375
	u64 efer = 0;
3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392

	static u64 cr_reserved_bits[] = {
		0xffffffff00000000ULL,
		0, 0, 0, /* CR3 checked later */
		CR4_RESERVED_BITS,
		0, 0, 0,
		CR8_RESERVED_BITS,
	};

	if (!valid_cr(cr))
		return emulate_ud(ctxt);

	if (new_val & cr_reserved_bits[cr])
		return emulate_gp(ctxt, 0);

	switch (cr) {
	case 0: {
3393
		u64 cr4;
3394 3395 3396 3397
		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
			return emulate_gp(ctxt, 0);

3398 3399
		cr4 = ctxt->ops->get_cr(ctxt, 4);
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3400 3401 3402 3403 3404 3405 3406 3407 3408 3409

		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
		    !(cr4 & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	case 3: {
		u64 rsvd = 0;

3410 3411
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
		if (efer & EFER_LMA)
3412 3413 3414 3415 3416 3417 3418 3419
			rsvd = CR3_L_MODE_RESERVED_BITS;

		if (new_val & rsvd)
			return emulate_gp(ctxt, 0);

		break;
		}
	case 4: {
3420
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431

		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	}

	return X86EMUL_CONTINUE;
}

3432 3433 3434 3435
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
	unsigned long dr7;

3436
	ctxt->ops->get_dr(ctxt, 7, &dr7);
3437 3438 3439 3440 3441 3442 3443

	/* Check if DR7.Global_Enable is set */
	return dr7 & (1 << 13);
}

static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
3444
	int dr = ctxt->modrm_reg;
3445 3446 3447 3448 3449
	u64 cr4;

	if (dr > 7)
		return emulate_ud(ctxt);

3450
	cr4 = ctxt->ops->get_cr(ctxt, 4);
3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461
	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
		return emulate_ud(ctxt);

	if (check_dr7_gd(ctxt))
		return emulate_db(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
3462 3463
	u64 new_val = ctxt->src.val64;
	int dr = ctxt->modrm_reg;
3464 3465 3466 3467 3468 3469 3470

	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
		return emulate_gp(ctxt, 0);

	return check_dr_read(ctxt);
}

3471 3472 3473 3474
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
	u64 efer;

3475
	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3476 3477 3478 3479 3480 3481 3482 3483 3484

	if (!(efer & EFER_SVME))
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
3485
	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3486 3487

	/* Valid physical address? */
3488
	if (rax & 0xffff000000000000ULL)
3489 3490 3491 3492 3493
		return emulate_gp(ctxt, 0);

	return check_svme(ctxt);
}

3494 3495
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{
3496
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3497

3498
	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3499 3500 3501 3502 3503
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

3504 3505
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
3506
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3507
	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3508

3509
	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3510
	    ctxt->ops->check_pmc(ctxt, rcx))
3511 3512 3513 3514 3515
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

3516 3517
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
3518 3519
	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3520 3521 3522 3523 3524 3525 3526
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
3527 3528
	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3529 3530 3531 3532 3533
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

3534
#define D(_y) { .flags = (_y) }
3535 3536 3537
#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
3538
#define N    D(NotImpl)
3539
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3540 3541
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3542
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3543
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3544
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3545
#define II(_f, _e, _i) \
3546
	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3547
#define IIP(_f, _e, _i, _p) \
3548 3549
	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
3550
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3551

3552
#define D2bv(_f)      D((_f) | ByteOp), D(_f)
3553
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3554
#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
3555
#define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
3556 3557
#define I2bvIP(_f, _e, _i, _p) \
	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3558

3559 3560 3561
#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3562

3563
static const struct opcode group7_rm1[] = {
3564 3565
	DI(SrcNone | Priv, monitor),
	DI(SrcNone | Priv, mwait),
3566 3567 3568
	N, N, N, N, N, N,
};

3569
static const struct opcode group7_rm3[] = {
3570
	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
3571
	II(SrcNone  | Prot | EmulateOnUD,	em_vmmcall,	vmmcall),
3572 3573 3574 3575 3576 3577
	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
3578
};
3579

3580
static const struct opcode group7_rm7[] = {
3581
	N,
3582
	DIP(SrcNone, rdtscp, check_rdtsc),
3583 3584
	N, N, N, N, N, N,
};
3585

3586
static const struct opcode group1[] = {
3587 3588 3589 3590 3591 3592 3593 3594
	F(Lock, em_add),
	F(Lock | PageTable, em_or),
	F(Lock, em_adc),
	F(Lock, em_sbb),
	F(Lock | PageTable, em_and),
	F(Lock, em_sub),
	F(Lock, em_xor),
	F(NoWrite, em_cmp),
3595 3596
};

3597
static const struct opcode group1A[] = {
3598
	I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3599 3600
};

3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611
static const struct opcode group2[] = {
	F(DstMem | ModRM, em_rol),
	F(DstMem | ModRM, em_ror),
	F(DstMem | ModRM, em_rcl),
	F(DstMem | ModRM, em_rcr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_shr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_sar),
};

3612
static const struct opcode group3[] = {
3613 3614
	F(DstMem | SrcImm | NoWrite, em_test),
	F(DstMem | SrcImm | NoWrite, em_test),
3615 3616
	F(DstMem | SrcNone | Lock, em_not),
	F(DstMem | SrcNone | Lock, em_neg),
3617 3618
	F(DstXacc | Src2Mem, em_mul_ex),
	F(DstXacc | Src2Mem, em_imul_ex),
3619 3620
	F(DstXacc | Src2Mem, em_div_ex),
	F(DstXacc | Src2Mem, em_idiv_ex),
3621 3622
};

3623
static const struct opcode group4[] = {
3624 3625
	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3626 3627 3628
	N, N, N, N, N, N,
};

3629
static const struct opcode group5[] = {
3630 3631
	F(DstMem | SrcNone | Lock,		em_inc),
	F(DstMem | SrcNone | Lock,		em_dec),
3632 3633 3634 3635
	I(SrcMem | Stack,			em_grp45),
	I(SrcMemFAddr | ImplicitOps | Stack,	em_call_far),
	I(SrcMem | Stack,			em_grp45),
	I(SrcMemFAddr | ImplicitOps,		em_grp45),
3636
	I(SrcMem | Stack,			em_grp45), D(Undefined),
3637 3638
};

3639
static const struct opcode group6[] = {
3640 3641
	DI(Prot,	sldt),
	DI(Prot,	str),
A
Avi Kivity 已提交
3642
	II(Prot | Priv | SrcMem16, em_lldt, lldt),
A
Avi Kivity 已提交
3643
	II(Prot | Priv | SrcMem16, em_ltr, ltr),
3644 3645 3646
	N, N, N, N,
};

3647
static const struct group_dual group7 = { {
3648 3649
	II(Mov | DstMem,			em_sgdt, sgdt),
	II(Mov | DstMem,			em_sidt, sidt),
3650 3651 3652 3653 3654
	II(SrcMem | Priv,			em_lgdt, lgdt),
	II(SrcMem | Priv,			em_lidt, lidt),
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
3655
}, {
3656
	I(SrcNone | Priv | EmulateOnUD,	em_vmcall),
3657
	EXT(0, group7_rm1),
3658
	N, EXT(0, group7_rm3),
3659 3660 3661
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	EXT(0, group7_rm7),
3662 3663
} };

3664
static const struct opcode group8[] = {
3665
	N, N, N, N,
3666 3667 3668 3669
	F(DstMem | SrcImmByte | NoWrite,		em_bt),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
	F(DstMem | SrcImmByte | Lock,			em_btr),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
3670 3671
};

3672
static const struct group_dual group9 = { {
3673
	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3674 3675 3676 3677
}, {
	N, N, N, N, N, N, N, N,
} };

3678
static const struct opcode group11[] = {
3679
	I(DstMem | SrcImm | Mov | PageTable, em_mov),
3680
	X7(D(Undefined)),
3681 3682
};

3683
static const struct gprefix pfx_0f_6f_0f_7f = {
3684
	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3685 3686
};

3687
static const struct gprefix pfx_vmovntpx = {
3688 3689 3690
	I(0, em_mov), N, N, N,
};

3691
static const struct gprefix pfx_0f_28_0f_29 = {
3692
	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3693 3694
};

3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757
static const struct escape escape_d9 = { {
	N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_db = { {
	N, N, N, N, N, N, N, N,
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_dd = { {
	N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

3758
static const struct opcode opcode_table[256] = {
3759
	/* 0x00 - 0x07 */
3760
	F6ALU(Lock, em_add),
3761 3762
	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3763
	/* 0x08 - 0x0F */
3764
	F6ALU(Lock | PageTable, em_or),
3765 3766
	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
	N,
3767
	/* 0x10 - 0x17 */
3768
	F6ALU(Lock, em_adc),
3769 3770
	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3771
	/* 0x18 - 0x1F */
3772
	F6ALU(Lock, em_sbb),
3773 3774
	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3775
	/* 0x20 - 0x27 */
3776
	F6ALU(Lock | PageTable, em_and), N, N,
3777
	/* 0x28 - 0x2F */
3778
	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3779
	/* 0x30 - 0x37 */
3780
	F6ALU(Lock, em_xor), N, N,
3781
	/* 0x38 - 0x3F */
3782
	F6ALU(NoWrite, em_cmp), N, N,
3783
	/* 0x40 - 0x4F */
3784
	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3785
	/* 0x50 - 0x57 */
3786
	X8(I(SrcReg | Stack, em_push)),
3787
	/* 0x58 - 0x5F */
3788
	X8(I(DstReg | Stack, em_pop)),
3789
	/* 0x60 - 0x67 */
3790 3791
	I(ImplicitOps | Stack | No64, em_pusha),
	I(ImplicitOps | Stack | No64, em_popa),
3792 3793 3794
	N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
	N, N, N, N,
	/* 0x68 - 0x6F */
3795 3796
	I(SrcImm | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3797 3798
	I(SrcImmByte | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3799
	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3800
	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3801 3802 3803
	/* 0x70 - 0x7F */
	X16(D(SrcImmByte)),
	/* 0x80 - 0x87 */
3804 3805 3806 3807
	G(ByteOp | DstMem | SrcImm, group1),
	G(DstMem | SrcImm, group1),
	G(ByteOp | DstMem | SrcImm | No64, group1),
	G(DstMem | SrcImmByte, group1),
3808
	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3809
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3810
	/* 0x88 - 0x8F */
3811
	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3812
	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3813
	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3814 3815 3816
	D(ModRM | SrcMem | NoAccess | DstReg),
	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
	G(0, group1A),
3817
	/* 0x90 - 0x97 */
3818
	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3819
	/* 0x98 - 0x9F */
3820
	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3821
	I(SrcImmFAddr | No64, em_call_far), N,
3822
	II(ImplicitOps | Stack, em_pushf, pushf),
P
Paolo Bonzini 已提交
3823 3824
	II(ImplicitOps | Stack, em_popf, popf),
	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3825
	/* 0xA0 - 0xA7 */
3826
	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3827
	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3828
	I2bv(SrcSI | DstDI | Mov | String, em_mov),
3829
	F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
3830
	/* 0xA8 - 0xAF */
3831
	F2bv(DstAcc | SrcImm | NoWrite, em_test),
3832 3833
	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3834
	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
3835
	/* 0xB0 - 0xB7 */
3836
	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3837
	/* 0xB8 - 0xBF */
3838
	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
3839
	/* 0xC0 - 0xC7 */
3840
	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
3841
	I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3842
	I(ImplicitOps | Stack, em_ret),
3843 3844
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3845
	G(ByteOp, group11), G(0, group11),
3846
	/* 0xC8 - 0xCF */
A
Avi Kivity 已提交
3847
	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
3848 3849
	I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
	I(ImplicitOps | Stack, em_ret_far),
3850
	D(ImplicitOps), DI(SrcImmByte, intn),
3851
	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3852
	/* 0xD0 - 0xD7 */
3853 3854
	G(Src2One | ByteOp, group2), G(Src2One, group2),
	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
P
Paolo Bonzini 已提交
3855
	I(DstAcc | SrcImmUByte | No64, em_aam),
P
Paolo Bonzini 已提交
3856 3857
	I(DstAcc | SrcImmUByte | No64, em_aad),
	F(DstAcc | ByteOp | No64, em_salc),
P
Paolo Bonzini 已提交
3858
	I(DstAcc | SrcXLat | ByteOp, em_mov),
3859
	/* 0xD8 - 0xDF */
3860
	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
3861
	/* 0xE0 - 0xE7 */
3862 3863
	X3(I(SrcImmByte, em_loop)),
	I(SrcImmByte, em_jcxz),
3864 3865
	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
3866
	/* 0xE8 - 0xEF */
3867
	I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
3868
	I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3869 3870
	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
3871
	/* 0xF0 - 0xF7 */
3872
	N, DI(ImplicitOps, icebp), N, N,
3873 3874
	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
	G(ByteOp, group3), G(0, group3),
3875
	/* 0xF8 - 0xFF */
3876 3877
	D(ImplicitOps), D(ImplicitOps),
	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3878 3879 3880
	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};

3881
static const struct opcode twobyte_table[256] = {
3882
	/* 0x00 - 0x0F */
3883
	G(0, group6), GD(0, &group7), N, N,
3884
	N, I(ImplicitOps | EmulateOnUD, em_syscall),
3885
	II(ImplicitOps | Priv, em_clts, clts), N,
3886
	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3887 3888
	N, D(ImplicitOps | ModRM), N, N,
	/* 0x10 - 0x1F */
P
Paolo Bonzini 已提交
3889 3890
	N, N, N, N, N, N, N, N,
	D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
3891
	/* 0x20 - 0x2F */
3892 3893 3894 3895 3896 3897
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
						check_cr_write),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
						check_dr_write),
3898
	N, N, N, N,
3899 3900 3901
	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
	N, GP(ModRM | DstMem | SrcReg | Sse | Mov | Aligned, &pfx_vmovntpx),
3902
	N, N, N, N,
3903
	/* 0x30 - 0x3F */
3904
	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
3905
	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3906
	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
3907
	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
3908 3909
	I(ImplicitOps | EmulateOnUD, em_sysenter),
	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
3910
	N, N,
3911 3912
	N, N, N, N, N, N, N, N,
	/* 0x40 - 0x4F */
3913
	X16(D(DstReg | SrcMem | ModRM)),
3914 3915 3916
	/* 0x50 - 0x5F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x60 - 0x6F */
3917 3918 3919 3920
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3921
	/* 0x70 - 0x7F */
3922 3923 3924 3925
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3926 3927 3928
	/* 0x80 - 0x8F */
	X16(D(SrcImm)),
	/* 0x90 - 0x9F */
3929
	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3930
	/* 0xA0 - 0xA7 */
3931
	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
3932 3933
	II(ImplicitOps, em_cpuid, cpuid),
	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
3934 3935
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
3936
	/* 0xA8 - 0xAF */
3937
	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
3938
	DI(ImplicitOps, rsm),
3939
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
3940 3941
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
3942
	D(ModRM), F(DstReg | SrcMem | ModRM, em_imul),
3943
	/* 0xB0 - 0xB7 */
3944
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
3945
	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
3946
	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
3947 3948
	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
3949
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3950 3951
	/* 0xB8 - 0xBF */
	N, N,
3952
	G(BitOp, group8),
3953 3954
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
	F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
3955
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
A
Avi Kivity 已提交
3956
	/* 0xC0 - 0xC7 */
3957
	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
3958
	N, D(DstMem | SrcReg | ModRM | Mov),
3959
	N, N, N, GD(0, &group9),
A
Avi Kivity 已提交
3960 3961
	/* 0xC8 - 0xCF */
	X8(I(DstReg, em_bswap)),
3962 3963 3964 3965 3966 3967 3968 3969
	/* 0xD0 - 0xDF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xEF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xFF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};

3970
static const struct gprefix three_byte_0f_38_f0 = {
B
Borislav Petkov 已提交
3971
	I(DstReg | SrcMem | Mov, em_movbe), N, N, N
3972 3973 3974
};

static const struct gprefix three_byte_0f_38_f1 = {
B
Borislav Petkov 已提交
3975
	I(DstMem | SrcReg | Mov, em_movbe), N, N, N
3976 3977 3978 3979 3980 3981 3982 3983 3984
};

/*
 * Insns below are selected by the prefix which indexed by the third opcode
 * byte.
 */
static const struct opcode opcode_map_0f_38[256] = {
	/* 0x00 - 0x7f */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
B
Borislav Petkov 已提交
3985 3986 3987 3988 3989 3990 3991
	/* 0x80 - 0xef */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
	/* 0xf0 - 0xf1 */
	GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
	GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
	/* 0xf2 - 0xff */
	N, N, X4(N), X8(N)
3992 3993
};

3994 3995 3996 3997 3998
#undef D
#undef N
#undef G
#undef GD
#undef I
3999
#undef GP
4000
#undef EXT
4001

4002
#undef D2bv
4003
#undef D2bvIP
4004
#undef I2bv
4005
#undef I2bvIP
4006
#undef I6ALU
4007

4008
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4009 4010 4011
{
	unsigned size;

4012
	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024
	if (size == 8)
		size = 4;
	return size;
}

static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
		      unsigned size, bool sign_extension)
{
	int rc = X86EMUL_CONTINUE;

	op->type = OP_IMM;
	op->bytes = size;
4025
	op->addr.mem.ea = ctxt->_eip;
4026 4027 4028
	/* NB. Immediates are sign-extended as necessary. */
	switch (op->bytes) {
	case 1:
4029
		op->val = insn_fetch(s8, ctxt);
4030 4031
		break;
	case 2:
4032
		op->val = insn_fetch(s16, ctxt);
4033 4034
		break;
	case 4:
4035
		op->val = insn_fetch(s32, ctxt);
4036
		break;
4037 4038 4039
	case 8:
		op->val = insn_fetch(s64, ctxt);
		break;
4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057
	}
	if (!sign_extension) {
		switch (op->bytes) {
		case 1:
			op->val &= 0xff;
			break;
		case 2:
			op->val &= 0xffff;
			break;
		case 4:
			op->val &= 0xffffffff;
			break;
		}
	}
done:
	return rc;
}

4058 4059 4060 4061 4062 4063 4064
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
			  unsigned d)
{
	int rc = X86EMUL_CONTINUE;

	switch (d) {
	case OpReg:
4065
		decode_register_operand(ctxt, op);
4066 4067
		break;
	case OpImmUByte:
4068
		rc = decode_imm(ctxt, op, 1, false);
4069 4070
		break;
	case OpMem:
4071
		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4072 4073 4074
	mem_common:
		*op = ctxt->memop;
		ctxt->memopp = op;
4075
		if (ctxt->d & BitOp)
4076 4077 4078
			fetch_bit_operand(ctxt);
		op->orig_val = op->val;
		break;
4079
	case OpMem64:
4080
		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4081
		goto mem_common;
4082 4083 4084
	case OpAcc:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4085
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4086 4087 4088
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106
	case OpAccLo:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
	case OpAccHi:
		if (ctxt->d & ByteOp) {
			op->type = OP_NONE;
			break;
		}
		op->type = OP_REG;
		op->bytes = ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
4107 4108 4109 4110
	case OpDI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4111
			register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4112 4113
		op->addr.mem.seg = VCPU_SREG_ES;
		op->val = 0;
4114
		op->count = 1;
4115 4116 4117 4118
		break;
	case OpDX:
		op->type = OP_REG;
		op->bytes = 2;
4119
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4120 4121
		fetch_register_operand(op);
		break;
4122 4123
	case OpCL:
		op->bytes = 1;
4124
		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135
		break;
	case OpImmByte:
		rc = decode_imm(ctxt, op, 1, true);
		break;
	case OpOne:
		op->bytes = 1;
		op->val = 1;
		break;
	case OpImm:
		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
		break;
4136 4137 4138
	case OpImm64:
		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
		break;
4139 4140
	case OpMem8:
		ctxt->memop.bytes = 1;
4141
		if (ctxt->memop.type == OP_REG) {
4142 4143
			ctxt->memop.addr.reg = decode_register(ctxt,
					ctxt->modrm_rm, true);
4144 4145
			fetch_register_operand(&ctxt->memop);
		}
4146
		goto mem_common;
4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162
	case OpMem16:
		ctxt->memop.bytes = 2;
		goto mem_common;
	case OpMem32:
		ctxt->memop.bytes = 4;
		goto mem_common;
	case OpImmU16:
		rc = decode_imm(ctxt, op, 2, false);
		break;
	case OpImmU:
		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
		break;
	case OpSI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
4163
			register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
B
Bandan Das 已提交
4164
		op->addr.mem.seg = ctxt->seg_override;
4165
		op->val = 0;
4166
		op->count = 1;
4167
		break;
P
Paolo Bonzini 已提交
4168 4169 4170 4171 4172 4173 4174
	case OpXLat:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
			register_address(ctxt,
				reg_read(ctxt, VCPU_REGS_RBX) +
				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
B
Bandan Das 已提交
4175
		op->addr.mem.seg = ctxt->seg_override;
P
Paolo Bonzini 已提交
4176 4177
		op->val = 0;
		break;
4178 4179 4180 4181 4182 4183 4184 4185 4186
	case OpImmFAddr:
		op->type = OP_IMM;
		op->addr.mem.ea = ctxt->_eip;
		op->bytes = ctxt->op_bytes + 2;
		insn_fetch_arr(op->valptr, op->bytes, ctxt);
		break;
	case OpMemFAddr:
		ctxt->memop.bytes = ctxt->op_bytes + 2;
		goto mem_common;
4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204
	case OpES:
		op->val = VCPU_SREG_ES;
		break;
	case OpCS:
		op->val = VCPU_SREG_CS;
		break;
	case OpSS:
		op->val = VCPU_SREG_SS;
		break;
	case OpDS:
		op->val = VCPU_SREG_DS;
		break;
	case OpFS:
		op->val = VCPU_SREG_FS;
		break;
	case OpGS:
		op->val = VCPU_SREG_GS;
		break;
4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215
	case OpImplicit:
		/* Special instructions do their own operand decoding. */
	default:
		op->type = OP_NONE; /* Disable writeback. */
		break;
	}

done:
	return rc;
}

4216
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4217 4218 4219
{
	int rc = X86EMUL_CONTINUE;
	int mode = ctxt->mode;
4220
	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4221
	bool op_prefix = false;
B
Bandan Das 已提交
4222
	bool has_seg_override = false;
4223
	struct opcode opcode;
4224

4225 4226
	ctxt->memop.type = OP_NONE;
	ctxt->memopp = NULL;
4227
	ctxt->_eip = ctxt->eip;
4228 4229
	ctxt->fetch.ptr = ctxt->fetch.data;
	ctxt->fetch.end = ctxt->fetch.data + insn_len;
B
Borislav Petkov 已提交
4230
	ctxt->opcode_len = 1;
4231
	if (insn_len > 0)
4232
		memcpy(ctxt->fetch.data, insn, insn_len);
4233
	else {
4234
		rc = __do_insn_fetch_bytes(ctxt, 1);
4235 4236 4237
		if (rc != X86EMUL_CONTINUE)
			return rc;
	}
4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254

	switch (mode) {
	case X86EMUL_MODE_REAL:
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
		def_op_bytes = def_ad_bytes = 2;
		break;
	case X86EMUL_MODE_PROT32:
		def_op_bytes = def_ad_bytes = 4;
		break;
#ifdef CONFIG_X86_64
	case X86EMUL_MODE_PROT64:
		def_op_bytes = 4;
		def_ad_bytes = 8;
		break;
#endif
	default:
4255
		return EMULATION_FAILED;
4256 4257
	}

4258 4259
	ctxt->op_bytes = def_op_bytes;
	ctxt->ad_bytes = def_ad_bytes;
4260 4261 4262

	/* Legacy prefixes. */
	for (;;) {
4263
		switch (ctxt->b = insn_fetch(u8, ctxt)) {
4264
		case 0x66:	/* operand-size override */
4265
			op_prefix = true;
4266
			/* switch between 2/4 bytes */
4267
			ctxt->op_bytes = def_op_bytes ^ 6;
4268 4269 4270 4271
			break;
		case 0x67:	/* address-size override */
			if (mode == X86EMUL_MODE_PROT64)
				/* switch between 4/8 bytes */
4272
				ctxt->ad_bytes = def_ad_bytes ^ 12;
4273 4274
			else
				/* switch between 2/4 bytes */
4275
				ctxt->ad_bytes = def_ad_bytes ^ 6;
4276 4277 4278 4279 4280
			break;
		case 0x26:	/* ES override */
		case 0x2e:	/* CS override */
		case 0x36:	/* SS override */
		case 0x3e:	/* DS override */
B
Bandan Das 已提交
4281 4282
			has_seg_override = true;
			ctxt->seg_override = (ctxt->b >> 3) & 3;
4283 4284 4285
			break;
		case 0x64:	/* FS override */
		case 0x65:	/* GS override */
B
Bandan Das 已提交
4286 4287
			has_seg_override = true;
			ctxt->seg_override = ctxt->b & 7;
4288 4289 4290 4291
			break;
		case 0x40 ... 0x4f: /* REX */
			if (mode != X86EMUL_MODE_PROT64)
				goto done_prefixes;
4292
			ctxt->rex_prefix = ctxt->b;
4293 4294
			continue;
		case 0xf0:	/* LOCK */
4295
			ctxt->lock_prefix = 1;
4296 4297 4298
			break;
		case 0xf2:	/* REPNE/REPNZ */
		case 0xf3:	/* REP/REPE/REPZ */
4299
			ctxt->rep_prefix = ctxt->b;
4300 4301 4302 4303 4304 4305 4306
			break;
		default:
			goto done_prefixes;
		}

		/* Any legacy prefix after a REX prefix nullifies its effect. */

4307
		ctxt->rex_prefix = 0;
4308 4309 4310 4311 4312
	}

done_prefixes:

	/* REX prefix. */
4313 4314
	if (ctxt->rex_prefix & 8)
		ctxt->op_bytes = 8;	/* REX.W */
4315 4316

	/* Opcode byte(s). */
4317
	opcode = opcode_table[ctxt->b];
4318
	/* Two-byte opcode? */
4319
	if (ctxt->b == 0x0f) {
B
Borislav Petkov 已提交
4320
		ctxt->opcode_len = 2;
4321
		ctxt->b = insn_fetch(u8, ctxt);
4322
		opcode = twobyte_table[ctxt->b];
4323 4324 4325 4326 4327 4328 4329

		/* 0F_38 opcode map */
		if (ctxt->b == 0x38) {
			ctxt->opcode_len = 3;
			ctxt->b = insn_fetch(u8, ctxt);
			opcode = opcode_map_0f_38[ctxt->b];
		}
4330
	}
4331
	ctxt->d = opcode.flags;
4332

4333 4334 4335
	if (ctxt->d & ModRM)
		ctxt->modrm = insn_fetch(u8, ctxt);

4336 4337 4338 4339 4340 4341 4342
	/* vex-prefix instructions are not implemented */
	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
	    (mode == X86EMUL_MODE_PROT64 ||
	    (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
		ctxt->d = NotImpl;
	}

4343 4344
	while (ctxt->d & GroupMask) {
		switch (ctxt->d & GroupMask) {
4345
		case Group:
4346
			goffset = (ctxt->modrm >> 3) & 7;
4347 4348 4349
			opcode = opcode.u.group[goffset];
			break;
		case GroupDual:
4350 4351
			goffset = (ctxt->modrm >> 3) & 7;
			if ((ctxt->modrm >> 6) == 3)
4352 4353 4354 4355 4356
				opcode = opcode.u.gdual->mod3[goffset];
			else
				opcode = opcode.u.gdual->mod012[goffset];
			break;
		case RMExt:
4357
			goffset = ctxt->modrm & 7;
4358
			opcode = opcode.u.group[goffset];
4359 4360
			break;
		case Prefix:
4361
			if (ctxt->rep_prefix && op_prefix)
4362
				return EMULATION_FAILED;
4363
			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4364 4365 4366 4367 4368 4369 4370
			switch (simd_prefix) {
			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
			}
			break;
4371 4372 4373 4374 4375 4376
		case Escape:
			if (ctxt->modrm > 0xbf)
				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
			else
				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
			break;
4377
		default:
4378
			return EMULATION_FAILED;
4379
		}
4380

4381
		ctxt->d &= ~(u64)GroupMask;
4382
		ctxt->d |= opcode.flags;
4383 4384
	}

4385 4386 4387 4388
	/* Unrecognised? */
	if (ctxt->d == 0)
		return EMULATION_FAILED;

4389
	ctxt->execute = opcode.u.execute;
4390

4391 4392 4393 4394 4395 4396 4397 4398
	if (unlikely(ctxt->d &
		     (NotImpl|EmulateOnUD|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
		/*
		 * These are copied unconditionally here, and checked unconditionally
		 * in x86_emulate_insn.
		 */
		ctxt->check_perm = opcode.check_perm;
		ctxt->intercept = opcode.intercept;
4399

4400 4401
		if (ctxt->d & NotImpl)
			return EMULATION_FAILED;
4402

4403 4404
		if (!(ctxt->d & EmulateOnUD) && ctxt->ud)
			return EMULATION_FAILED;
4405

4406
		if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
4407
			ctxt->op_bytes = 8;
4408

4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420
		if (ctxt->d & Op3264) {
			if (mode == X86EMUL_MODE_PROT64)
				ctxt->op_bytes = 8;
			else
				ctxt->op_bytes = 4;
		}

		if (ctxt->d & Sse)
			ctxt->op_bytes = 16;
		else if (ctxt->d & Mmx)
			ctxt->op_bytes = 8;
	}
A
Avi Kivity 已提交
4421

4422
	/* ModRM and SIB bytes. */
4423
	if (ctxt->d & ModRM) {
4424
		rc = decode_modrm(ctxt, &ctxt->memop);
B
Bandan Das 已提交
4425 4426 4427 4428
		if (!has_seg_override) {
			has_seg_override = true;
			ctxt->seg_override = ctxt->modrm_seg;
		}
4429
	} else if (ctxt->d & MemAbs)
4430
		rc = decode_abs(ctxt, &ctxt->memop);
4431 4432 4433
	if (rc != X86EMUL_CONTINUE)
		goto done;

B
Bandan Das 已提交
4434 4435
	if (!has_seg_override)
		ctxt->seg_override = VCPU_SREG_DS;
4436

B
Bandan Das 已提交
4437
	ctxt->memop.addr.mem.seg = ctxt->seg_override;
4438 4439 4440 4441 4442

	/*
	 * Decode and fetch the source operand: register, memory
	 * or immediate.
	 */
4443
	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4444 4445 4446
	if (rc != X86EMUL_CONTINUE)
		goto done;

4447 4448 4449 4450
	/*
	 * Decode and fetch the second source operand: register, memory
	 * or immediate.
	 */
4451
	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4452 4453 4454
	if (rc != X86EMUL_CONTINUE)
		goto done;

4455
	/* Decode and fetch the destination operand: register or memory. */
4456
	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4457 4458

done:
4459
	if (ctxt->rip_relative)
4460
		ctxt->memopp->addr.mem.ea += ctxt->_eip;
4461

4462
	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4463 4464
}

4465 4466 4467 4468 4469
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
{
	return ctxt->d & PageTable;
}

4470 4471 4472 4473 4474 4475 4476 4477 4478
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
	/* The second termination condition only applies for REPE
	 * and REPNE. Test if the repeat string operation prefix is
	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
	 * corresponding termination condition according to:
	 * 	- if REPE/REPZ and ZF = 0 then done
	 * 	- if REPNE/REPNZ and ZF = 1 then done
	 */
4479 4480 4481
	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
4482
		 ((ctxt->eflags & EFLG_ZF) == 0))
4483
		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
4484 4485 4486 4487 4488 4489
		    ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
		return true;

	return false;
}

A
Avi Kivity 已提交
4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
	bool fault = false;

	ctxt->ops->get_fpu(ctxt);
	asm volatile("1: fwait \n\t"
		     "2: \n\t"
		     ".pushsection .fixup,\"ax\" \n\t"
		     "3: \n\t"
		     "movb $1, %[fault] \n\t"
		     "jmp 2b \n\t"
		     ".popsection \n\t"
		     _ASM_EXTABLE(1b, 3b)
4503
		     : [fault]"+qm"(fault));
A
Avi Kivity 已提交
4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518
	ctxt->ops->put_fpu(ctxt);

	if (unlikely(fault))
		return emulate_exception(ctxt, MF_VECTOR, 0, false);

	return X86EMUL_CONTINUE;
}

static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
				       struct operand *op)
{
	if (op->type == OP_MM)
		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
}

4519 4520 4521
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{
	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4522 4523
	if (!(ctxt->d & ByteOp))
		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4524
	asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4525 4526 4527
	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
	      [fastop]"+S"(fop)
	    : "c"(ctxt->src2.val));
4528
	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4529 4530
	if (!fop) /* exception is returned in fop variable */
		return emulate_de(ctxt);
4531 4532
	return X86EMUL_CONTINUE;
}
4533

4534 4535
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
B
Bandan Das 已提交
4536 4537
	memset(&ctxt->rip_relative, 0,
	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4538 4539 4540 4541 4542 4543

	ctxt->io_read.pos = 0;
	ctxt->io_read.end = 0;
	ctxt->mem_read.end = 0;
}

4544
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4545
{
4546
	const struct x86_emulate_ops *ops = ctxt->ops;
4547
	int rc = X86EMUL_CONTINUE;
4548
	int saved_dst_type = ctxt->dst.type;
4549

4550
	ctxt->mem_read.pos = 0;
4551

4552 4553
	/* LOCK prefix is allowed only with some instructions */
	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4554
		rc = emulate_ud(ctxt);
4555 4556 4557
		goto done;
	}

4558
	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4559
		rc = emulate_ud(ctxt);
4560 4561 4562
		goto done;
	}

4563 4564 4565 4566 4567 4568 4569
	if (unlikely(ctxt->d &
		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
				(ctxt->d & Undefined)) {
			rc = emulate_ud(ctxt);
			goto done;
		}
A
Avi Kivity 已提交
4570

4571 4572 4573
		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
			rc = emulate_ud(ctxt);
A
Avi Kivity 已提交
4574
			goto done;
4575
		}
A
Avi Kivity 已提交
4576

4577 4578
		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
			rc = emulate_nm(ctxt);
4579
			goto done;
4580
		}
4581

4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594
		if (ctxt->d & Mmx) {
			rc = flush_pending_x87_faults(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			/*
			 * Now that we know the fpu is exception safe, we can fetch
			 * operands from it.
			 */
			fetch_possible_mmx_operand(ctxt, &ctxt->src);
			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
			if (!(ctxt->d & Mov))
				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
		}
4595

4596
		if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4597 4598 4599 4600 4601
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_PRE_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}
4602

4603 4604 4605
		/* Privileged instruction can be executed only in CPL=0 */
		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
			rc = emulate_gp(ctxt, 0);
4606
			goto done;
4607
		}
4608

4609 4610 4611
		/* Instruction can only be executed in protected mode */
		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
			rc = emulate_ud(ctxt);
4612
			goto done;
4613
		}
4614

4615
		/* Do instruction specific permission checks */
4616
		if (ctxt->d & CheckPerm) {
4617 4618 4619 4620 4621
			rc = ctxt->check_perm(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

4622
		if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_POST_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

		if (ctxt->rep_prefix && (ctxt->d & String)) {
			/* All REP prefixes have the same first termination condition */
			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
				ctxt->eip = ctxt->_eip;
				goto done;
			}
4635 4636 4637
		}
	}

4638 4639 4640
	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
		rc = segmented_read(ctxt, ctxt->src.addr.mem,
				    ctxt->src.valptr, ctxt->src.bytes);
4641
		if (rc != X86EMUL_CONTINUE)
4642
			goto done;
4643
		ctxt->src.orig_val64 = ctxt->src.val64;
4644 4645
	}

4646 4647 4648
	if (ctxt->src2.type == OP_MEM) {
		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
				    &ctxt->src2.val, ctxt->src2.bytes);
4649 4650 4651 4652
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

4653
	if ((ctxt->d & DstMask) == ImplicitOps)
4654 4655 4656
		goto special_insn;


4657
	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4658
		/* optimisation - avoid slow emulated read if Mov */
4659 4660
		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
				   &ctxt->dst.val, ctxt->dst.bytes);
4661 4662
		if (rc != X86EMUL_CONTINUE)
			goto done;
4663
	}
4664
	ctxt->dst.orig_val = ctxt->dst.val;
4665

4666 4667
special_insn:

4668
	if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4669
		rc = emulator_check_intercept(ctxt, ctxt->intercept,
4670
					      X86_ICPT_POST_MEMACCESS);
4671 4672 4673 4674
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

4675
	if (ctxt->execute) {
4676 4677 4678 4679 4680 4681 4682
		if (ctxt->d & Fastop) {
			void (*fop)(struct fastop *) = (void *)ctxt->execute;
			rc = fastop(ctxt, fop);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			goto writeback;
		}
4683
		rc = ctxt->execute(ctxt);
4684 4685 4686 4687 4688
		if (rc != X86EMUL_CONTINUE)
			goto done;
		goto writeback;
	}

B
Borislav Petkov 已提交
4689
	if (ctxt->opcode_len == 2)
A
Avi Kivity 已提交
4690
		goto twobyte_insn;
4691 4692
	else if (ctxt->opcode_len == 3)
		goto threebyte_insn;
A
Avi Kivity 已提交
4693

4694
	switch (ctxt->b) {
A
Avi Kivity 已提交
4695
	case 0x63:		/* movsxd */
4696
		if (ctxt->mode != X86EMUL_MODE_PROT64)
A
Avi Kivity 已提交
4697
			goto cannot_emulate;
4698
		ctxt->dst.val = (s32) ctxt->src.val;
A
Avi Kivity 已提交
4699
		break;
4700
	case 0x70 ... 0x7f: /* jcc (short) */
4701 4702
		if (test_cc(ctxt->b, ctxt->eflags))
			jmp_rel(ctxt, ctxt->src.val);
4703
		break;
N
Nitin A Kamble 已提交
4704
	case 0x8d: /* lea r16/r32, m */
4705
		ctxt->dst.val = ctxt->src.addr.mem.ea;
N
Nitin A Kamble 已提交
4706
		break;
4707
	case 0x90 ... 0x97: /* nop / xchg reg, rax */
4708
		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4709 4710 4711
			ctxt->dst.type = OP_NONE;
		else
			rc = em_xchg(ctxt);
4712
		break;
4713
	case 0x98: /* cbw/cwde/cdqe */
4714 4715 4716 4717
		switch (ctxt->op_bytes) {
		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4718 4719
		}
		break;
4720
	case 0xcc:		/* int3 */
4721 4722
		rc = emulate_int(ctxt, 3);
		break;
4723
	case 0xcd:		/* int n */
4724
		rc = emulate_int(ctxt, ctxt->src.val);
4725 4726
		break;
	case 0xce:		/* into */
4727 4728
		if (ctxt->eflags & EFLG_OF)
			rc = emulate_int(ctxt, 4);
4729
		break;
4730
	case 0xe9: /* jmp rel */
4731
	case 0xeb: /* jmp rel short */
4732 4733
		jmp_rel(ctxt, ctxt->src.val);
		ctxt->dst.type = OP_NONE; /* Disable writeback. */
4734
		break;
4735
	case 0xf4:              /* hlt */
4736
		ctxt->ops->halt(ctxt);
4737
		break;
4738 4739 4740 4741 4742 4743 4744
	case 0xf5:	/* cmc */
		/* complement carry flag from eflags reg */
		ctxt->eflags ^= EFLG_CF;
		break;
	case 0xf8: /* clc */
		ctxt->eflags &= ~EFLG_CF;
		break;
4745 4746 4747
	case 0xf9: /* stc */
		ctxt->eflags |= EFLG_CF;
		break;
4748 4749 4750 4751 4752 4753
	case 0xfc: /* cld */
		ctxt->eflags &= ~EFLG_DF;
		break;
	case 0xfd: /* std */
		ctxt->eflags |= EFLG_DF;
		break;
4754 4755
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
4756
	}
4757

4758 4759 4760
	if (rc != X86EMUL_CONTINUE)
		goto done;

4761
writeback:
4762 4763 4764 4765 4766 4767
	if (ctxt->d & SrcWrite) {
		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
		rc = writeback(ctxt, &ctxt->src);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
4768 4769 4770 4771 4772
	if (!(ctxt->d & NoWrite)) {
		rc = writeback(ctxt, &ctxt->dst);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
4773

4774 4775 4776 4777
	/*
	 * restore dst type in case the decoding will be reused
	 * (happens for string instruction )
	 */
4778
	ctxt->dst.type = saved_dst_type;
4779

4780
	if ((ctxt->d & SrcMask) == SrcSI)
4781
		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4782

4783
	if ((ctxt->d & DstMask) == DstDI)
4784
		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4785

4786
	if (ctxt->rep_prefix && (ctxt->d & String)) {
4787
		unsigned int count;
4788
		struct read_cache *r = &ctxt->io_read;
4789 4790 4791 4792 4793 4794
		if ((ctxt->d & SrcMask) == SrcSI)
			count = ctxt->src.count;
		else
			count = ctxt->dst.count;
		register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
				-count);
4795

4796 4797 4798 4799 4800
		if (!string_insn_completed(ctxt)) {
			/*
			 * Re-enter guest when pio read ahead buffer is empty
			 * or, if it is not used, after each 1024 iteration.
			 */
4801
			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4802 4803 4804 4805 4806 4807
			    (r->end == 0 || r->end != r->pos)) {
				/*
				 * Reset read cache. Usually happens before
				 * decode, but since instruction is restarted
				 * we have to do it here.
				 */
4808
				ctxt->mem_read.end = 0;
4809
				writeback_registers(ctxt);
4810 4811 4812
				return EMULATION_RESTART;
			}
			goto done; /* skip rip writeback */
4813
		}
4814
	}
4815

4816
	ctxt->eip = ctxt->_eip;
4817 4818

done:
4819 4820
	if (rc == X86EMUL_PROPAGATE_FAULT)
		ctxt->have_exception = true;
4821 4822 4823
	if (rc == X86EMUL_INTERCEPTED)
		return EMULATION_INTERCEPTED;

4824 4825 4826
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);

4827
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
A
Avi Kivity 已提交
4828 4829

twobyte_insn:
4830
	switch (ctxt->b) {
4831
	case 0x09:		/* wbinvd */
4832
		(ctxt->ops->wbinvd)(ctxt);
4833 4834
		break;
	case 0x08:		/* invd */
4835 4836
	case 0x0d:		/* GrpP (prefetch) */
	case 0x18:		/* Grp16 (prefetch/nop) */
P
Paolo Bonzini 已提交
4837
	case 0x1f:		/* nop */
4838 4839
		break;
	case 0x20: /* mov cr, reg */
4840
		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4841
		break;
A
Avi Kivity 已提交
4842
	case 0x21: /* mov from dr to reg */
4843
		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
A
Avi Kivity 已提交
4844 4845
		break;
	case 0x40 ... 0x4f:	/* cmov */
4846 4847 4848 4849
		if (test_cc(ctxt->b, ctxt->eflags))
			ctxt->dst.val = ctxt->src.val;
		else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
			 ctxt->op_bytes != 4)
4850
			ctxt->dst.type = OP_NONE; /* no writeback */
A
Avi Kivity 已提交
4851
		break;
4852
	case 0x80 ... 0x8f: /* jnz rel, etc*/
4853 4854
		if (test_cc(ctxt->b, ctxt->eflags))
			jmp_rel(ctxt, ctxt->src.val);
4855
		break;
4856
	case 0x90 ... 0x9f:     /* setcc r/m8 */
4857
		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4858
		break;
4859 4860
	case 0xae:              /* clflush */
		break;
A
Avi Kivity 已提交
4861
	case 0xb6 ... 0xb7:	/* movzx */
4862
		ctxt->dst.bytes = ctxt->op_bytes;
4863
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
4864
						       : (u16) ctxt->src.val;
A
Avi Kivity 已提交
4865 4866
		break;
	case 0xbe ... 0xbf:	/* movsx */
4867
		ctxt->dst.bytes = ctxt->op_bytes;
4868
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
4869
							(s16) ctxt->src.val;
A
Avi Kivity 已提交
4870
		break;
4871
	case 0xc3:		/* movnti */
4872
		ctxt->dst.bytes = ctxt->op_bytes;
4873 4874
		ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
							(u32) ctxt->src.val;
4875
		break;
4876 4877
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
4878
	}
4879

4880 4881
threebyte_insn:

4882 4883 4884
	if (rc != X86EMUL_CONTINUE)
		goto done;

A
Avi Kivity 已提交
4885 4886 4887
	goto writeback;

cannot_emulate:
4888
	return EMULATION_FAILED;
A
Avi Kivity 已提交
4889
}
4890 4891 4892 4893 4894 4895 4896 4897 4898 4899

void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
{
	invalidate_registers(ctxt);
}

void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
{
	writeback_registers(ctxt);
}