emulate.c 150.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
A
Avi Kivity 已提交
2
/******************************************************************************
3
 * emulate.c
A
Avi Kivity 已提交
4 5 6 7 8 9
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
10
 * privileged instructions:
A
Avi Kivity 已提交
11 12
 *
 * Copyright (C) 2006 Qumranet
N
Nicolas Kaiser 已提交
13
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
14 15 16 17 18 19 20
 *
 *   Avi Kivity <avi@qumranet.com>
 *   Yaniv Kamay <yaniv@qumranet.com>
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

21
#include <linux/kvm_host.h>
22
#include "kvm_cache_regs.h"
23
#include "kvm_emulate.h"
24
#include <linux/stringify.h>
25
#include <asm/fpu/api.h>
26
#include <asm/debugreg.h>
27
#include <asm/nospec-branch.h>
A
Avi Kivity 已提交
28

29
#include "x86.h"
30
#include "tss.h"
31
#include "mmu.h"
32
#include "pmu.h"
33

34 35 36
/*
 * Operand types
 */
37 38 39 40 41 42 43 44 45
#define OpNone             0ull
#define OpImplicit         1ull  /* No generic decode */
#define OpReg              2ull  /* Register */
#define OpMem              3ull  /* Memory */
#define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
#define OpDI               5ull  /* ES:DI/EDI/RDI */
#define OpMem64            6ull  /* Memory, 64-bit */
#define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
#define OpDX               8ull  /* DX register */
46 47 48
#define OpCL               9ull  /* CL register (for shifts) */
#define OpImmByte         10ull  /* 8-bit sign extended immediate */
#define OpOne             11ull  /* Implied 1 */
49
#define OpImm             12ull  /* Sign extended up to 32-bit immediate */
50 51 52 53 54 55 56
#define OpMem16           13ull  /* Memory operand (16-bit). */
#define OpMem32           14ull  /* Memory operand (32-bit). */
#define OpImmU            15ull  /* Immediate operand, zero extended */
#define OpSI              16ull  /* SI/ESI/RSI */
#define OpImmFAddr        17ull  /* Immediate far address */
#define OpMemFAddr        18ull  /* Far address in memory */
#define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
57 58 59 60 61 62
#define OpES              20ull  /* ES */
#define OpCS              21ull  /* CS */
#define OpSS              22ull  /* SS */
#define OpDS              23ull  /* DS */
#define OpFS              24ull  /* FS */
#define OpGS              25ull  /* GS */
63
#define OpMem8            26ull  /* 8-bit zero extended memory operand */
64
#define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
P
Paolo Bonzini 已提交
65
#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
66 67
#define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
#define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
68 69

#define OpBits             5  /* Width of operand field */
70
#define OpMask             ((1ull << OpBits) - 1)
71

A
Avi Kivity 已提交
72 73 74 75 76 77 78 79 80 81
/*
 * Opcode effective-address decode tables.
 * Note that we only emulate instructions that have at least one memory
 * operand (excluding implicit stack references). We assume that stack
 * references and instruction fetches will never occur in special memory
 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
 * not be handled.
 */

/* Operand sizes: 8-bit operands or specified/overridden size. */
82
#define ByteOp      (1<<0)	/* 8-bit operands. */
A
Avi Kivity 已提交
83
/* Destination operand type. */
84 85 86 87 88 89 90
#define DstShift    1
#define ImplicitOps (OpImplicit << DstShift)
#define DstReg      (OpReg << DstShift)
#define DstMem      (OpMem << DstShift)
#define DstAcc      (OpAcc << DstShift)
#define DstDI       (OpDI << DstShift)
#define DstMem64    (OpMem64 << DstShift)
91
#define DstMem16    (OpMem16 << DstShift)
92 93
#define DstImmUByte (OpImmUByte << DstShift)
#define DstDX       (OpDX << DstShift)
94
#define DstAccLo    (OpAccLo << DstShift)
95
#define DstMask     (OpMask << DstShift)
A
Avi Kivity 已提交
96
/* Source operand type. */
97 98 99 100 101 102 103 104 105 106 107 108
#define SrcShift    6
#define SrcNone     (OpNone << SrcShift)
#define SrcReg      (OpReg << SrcShift)
#define SrcMem      (OpMem << SrcShift)
#define SrcMem16    (OpMem16 << SrcShift)
#define SrcMem32    (OpMem32 << SrcShift)
#define SrcImm      (OpImm << SrcShift)
#define SrcImmByte  (OpImmByte << SrcShift)
#define SrcOne      (OpOne << SrcShift)
#define SrcImmUByte (OpImmUByte << SrcShift)
#define SrcImmU     (OpImmU << SrcShift)
#define SrcSI       (OpSI << SrcShift)
P
Paolo Bonzini 已提交
109
#define SrcXLat     (OpXLat << SrcShift)
110 111 112 113
#define SrcImmFAddr (OpImmFAddr << SrcShift)
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc      (OpAcc << SrcShift)
#define SrcImmU16   (OpImmU16 << SrcShift)
114
#define SrcImm64    (OpImm64 << SrcShift)
115
#define SrcDX       (OpDX << SrcShift)
116
#define SrcMem8     (OpMem8 << SrcShift)
117
#define SrcAccHi    (OpAccHi << SrcShift)
118
#define SrcMask     (OpMask << SrcShift)
119 120 121 122 123 124 125 126 127
#define BitOp       (1<<11)
#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
#define String      (1<<13)     /* String instruction (rep capable) */
#define Stack       (1<<14)     /* Stack instruction (push/pop) */
#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
128
#define Escape      (5<<15)     /* Escape to coprocessor instruction */
129
#define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
130
#define ModeDual    (7<<15)     /* Different instruction for 32/64 bit */
131
#define Sse         (1<<18)     /* SSE Vector instruction */
132 133 134 135
/* Generic ModRM decode. */
#define ModRM       (1<<19)
/* Destination is only written; never read. */
#define Mov         (1<<20)
136
/* Misc flags */
137
#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
138
#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
139
#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
140
#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
141
#define Undefined   (1<<25) /* No Such Instruction */
142
#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
143
#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
144
#define No64	    (1<<28)
145
#define PageTable   (1 << 29)   /* instruction used to write page table */
146
#define NotImpl     (1 << 30)   /* instruction is not implemented */
147
/* Source 2 operand type */
148
#define Src2Shift   (31)
149
#define Src2None    (OpNone << Src2Shift)
150
#define Src2Mem     (OpMem << Src2Shift)
151 152 153 154
#define Src2CL      (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
#define Src2One     (OpOne << Src2Shift)
#define Src2Imm     (OpImm << Src2Shift)
155 156 157 158 159 160
#define Src2ES      (OpES << Src2Shift)
#define Src2CS      (OpCS << Src2Shift)
#define Src2SS      (OpSS << Src2Shift)
#define Src2DS      (OpDS << Src2Shift)
#define Src2FS      (OpFS << Src2Shift)
#define Src2GS      (OpGS << Src2Shift)
161
#define Src2Mask    (OpMask << Src2Shift)
A
Avi Kivity 已提交
162
#define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
163
#define AlignMask   ((u64)7 << 41)
164
#define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
165 166 167
#define Unaligned   ((u64)2 << 41)  /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx         ((u64)3 << 41)  /* Advanced Vector Extensions */
#define Aligned16   ((u64)4 << 41)  /* Aligned to 16 byte boundary (e.g. FXSAVE) */
168
#define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
169
#define NoWrite     ((u64)1 << 45)  /* No writeback */
170
#define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
171
#define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
172 173
#define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
#define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
174
#define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
175
#define NearBranch  ((u64)1 << 52)  /* Near branches */
176
#define No16	    ((u64)1 << 53)  /* No 16 bit operand */
177
#define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
178
#define TwoMemOp    ((u64)1 << 55)  /* Instruction has two memory operand */
A
Avi Kivity 已提交
179

180
#define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
A
Avi Kivity 已提交
181

182 183 184 185 186 187 188 189
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
190

191 192 193
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
#define FASTOP_SIZE 8

194
struct opcode {
195 196
	u64 flags : 56;
	u64 intercept : 8;
197
	union {
198
		int (*execute)(struct x86_emulate_ctxt *ctxt);
199 200 201
		const struct opcode *group;
		const struct group_dual *gdual;
		const struct gprefix *gprefix;
202
		const struct escape *esc;
203
		const struct instr_dual *idual;
204
		const struct mode_dual *mdual;
205
		void (*fastop)(struct fastop *fake);
206
	} u;
207
	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
208 209 210 211 212
};

struct group_dual {
	struct opcode mod012[8];
	struct opcode mod3[8];
213 214
};

215 216 217 218 219 220 221
struct gprefix {
	struct opcode pfx_no;
	struct opcode pfx_66;
	struct opcode pfx_f2;
	struct opcode pfx_f3;
};

222 223 224 225 226
struct escape {
	struct opcode op[8];
	struct opcode high[64];
};

227 228 229 230 231
struct instr_dual {
	struct opcode mod012;
	struct opcode mod3;
};

232 233 234 235 236
struct mode_dual {
	struct opcode mode32;
	struct opcode mode64;
};

237 238
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a

239 240 241 242 243 244 245
enum x86_transfer_type {
	X86_TRANSFER_NONE,
	X86_TRANSFER_CALL_JMP,
	X86_TRANSFER_RET,
	X86_TRANSFER_TASK_SWITCH,
};

246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	if (!(ctxt->regs_valid & (1 << nr))) {
		ctxt->regs_valid |= 1 << nr;
		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
	}
	return ctxt->_regs[nr];
}

static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	ctxt->regs_valid |= 1 << nr;
	ctxt->regs_dirty |= 1 << nr;
	return &ctxt->_regs[nr];
}

static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	reg_read(ctxt, nr);
	return reg_write(ctxt, nr);
}

static void writeback_registers(struct x86_emulate_ctxt *ctxt)
{
	unsigned reg;

	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
}

static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
{
	ctxt->regs_dirty = 0;
	ctxt->regs_valid = 0;
}

A
Avi Kivity 已提交
282 283 284 285
/*
 * These EFLAGS bits are restored from saved value during emulation, and
 * any changes are written back to the saved value after emulation.
 */
286 287
#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
		     X86_EFLAGS_PF|X86_EFLAGS_CF)
A
Avi Kivity 已提交
288

289 290 291 292 293 294
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif

295 296 297 298 299 300 301 302 303 304 305 306 307
/*
 * fastop functions have a special calling convention:
 *
 * dst:    rax        (in/out)
 * src:    rdx        (in/out)
 * src2:   rcx        (in)
 * flags:  rflags     (in/out)
 * ex:     rsi        (in:fastop pointer, out:zero if exception)
 *
 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
 * different operand sizes can be reached by calculation, rather than a jump
 * table (which would be bigger than the code).
 */
308
static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
309

310
#define __FOP_FUNC(name) \
311 312 313 314
	".align " __stringify(FASTOP_SIZE) " \n\t" \
	".type " name ", @function \n\t" \
	name ":\n\t"

315 316 317 318 319 320 321 322 323
#define FOP_FUNC(name) \
	__FOP_FUNC(#name)

#define __FOP_RET(name) \
	"ret \n\t" \
	".size " name ", .-" name "\n\t"

#define FOP_RET(name) \
	__FOP_RET(#name)
324 325 326 327 328

#define FOP_START(op) \
	extern void em_##op(struct fastop *fake); \
	asm(".pushsection .text, \"ax\" \n\t" \
	    ".global em_" #op " \n\t" \
329 330
	    ".align " __stringify(FASTOP_SIZE) " \n\t" \
	    "em_" #op ":\n\t"
331 332 333 334

#define FOP_END \
	    ".popsection")

335 336 337 338
#define __FOPNOP(name) \
	__FOP_FUNC(name) \
	__FOP_RET(name)

339
#define FOPNOP() \
340
	__FOPNOP(__stringify(__UNIQUE_ID(nop)))
341

342
#define FOP1E(op,  dst) \
343 344 345
	__FOP_FUNC(#op "_" #dst) \
	"10: " #op " %" #dst " \n\t" \
	__FOP_RET(#op "_" #dst)
346 347 348

#define FOP1EEX(op,  dst) \
	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
349 350 351 352 353 354 355 356 357

#define FASTOP1(op) \
	FOP_START(op) \
	FOP1E(op##b, al) \
	FOP1E(op##w, ax) \
	FOP1E(op##l, eax) \
	ON64(FOP1E(op##q, rax))	\
	FOP_END

358 359 360 361 362 363 364 365 366
/* 1-operand, using src2 (for MUL/DIV r/m) */
#define FASTOP1SRC2(op, name) \
	FOP_START(name) \
	FOP1E(op, cl) \
	FOP1E(op, cx) \
	FOP1E(op, ecx) \
	ON64(FOP1E(op, rcx)) \
	FOP_END

367 368 369 370 371 372 373 374 375
/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
#define FASTOP1SRC2EX(op, name) \
	FOP_START(name) \
	FOP1EEX(op, cl) \
	FOP1EEX(op, cx) \
	FOP1EEX(op, ecx) \
	ON64(FOP1EEX(op, rcx)) \
	FOP_END

376
#define FOP2E(op,  dst, src)	   \
377 378 379
	__FOP_FUNC(#op "_" #dst "_" #src) \
	#op " %" #src ", %" #dst " \n\t" \
	__FOP_RET(#op "_" #dst "_" #src)
380 381 382

#define FASTOP2(op) \
	FOP_START(op) \
383 384 385 386
	FOP2E(op##b, al, dl) \
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
387 388
	FOP_END

389 390 391 392
/* 2 operand, word only */
#define FASTOP2W(op) \
	FOP_START(op) \
	FOPNOP() \
393 394 395
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
396 397
	FOP_END

398 399 400 401 402 403 404 405 406
/* 2 operand, src is CL */
#define FASTOP2CL(op) \
	FOP_START(op) \
	FOP2E(op##b, al, cl) \
	FOP2E(op##w, ax, cl) \
	FOP2E(op##l, eax, cl) \
	ON64(FOP2E(op##q, rax, cl)) \
	FOP_END

407 408 409 410 411 412 413 414 415
/* 2 operand, src and dest are reversed */
#define FASTOP2R(op, name) \
	FOP_START(name) \
	FOP2E(op##b, dl, al) \
	FOP2E(op##w, dx, ax) \
	FOP2E(op##l, edx, eax) \
	ON64(FOP2E(op##q, rdx, rax)) \
	FOP_END

416
#define FOP3E(op,  dst, src, src2) \
417 418 419
	__FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
	#op " %" #src2 ", %" #src ", %" #dst " \n\t"\
	__FOP_RET(#op "_" #dst "_" #src "_" #src2)
420 421 422 423 424

/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \
	FOP_START(op) \
	FOPNOP() \
425 426 427
	FOP3E(op##w, ax, dx, cl) \
	FOP3E(op##l, eax, edx, cl) \
	ON64(FOP3E(op##q, rax, rdx, cl)) \
428 429
	FOP_END

430
/* Special case for SETcc - 1 instruction per cc */
431 432 433 434 435
#define FOP_SETCC(op) \
	".align 4 \n\t" \
	".type " #op ", @function \n\t" \
	#op ": \n\t" \
	#op " %al \n\t" \
436
	__FOP_RET(#op)
437

438 439 440 441
asm(".pushsection .fixup, \"ax\"\n"
    ".global kvm_fastop_exception \n"
    "kvm_fastop_exception: xor %esi, %esi; ret\n"
    ".popsection");
442

443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
FOP_SETCC(setc)
FOP_SETCC(setnc)
FOP_SETCC(setz)
FOP_SETCC(setnz)
FOP_SETCC(setbe)
FOP_SETCC(setnbe)
FOP_SETCC(sets)
FOP_SETCC(setns)
FOP_SETCC(setp)
FOP_SETCC(setnp)
FOP_SETCC(setl)
FOP_SETCC(setnl)
FOP_SETCC(setle)
FOP_SETCC(setnle)
FOP_END;

462 463 464 465
FOP_START(salc)
FOP_FUNC(salc)
"pushf; sbb %al, %al; popf \n\t"
FOP_RET(salc)
P
Paolo Bonzini 已提交
466 467
FOP_END;

R
Radim Krčmář 已提交
468 469
/*
 * XXX: inoutclob user must know where the argument is being expanded.
470
 *      Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
R
Radim Krčmář 已提交
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
 */
#define asm_safe(insn, inoutclob...) \
({ \
	int _fault = 0; \
 \
	asm volatile("1:" insn "\n" \
	             "2:\n" \
	             ".pushsection .fixup, \"ax\"\n" \
	             "3: movl $1, %[_fault]\n" \
	             "   jmp  2b\n" \
	             ".popsection\n" \
	             _ASM_EXTABLE(1b, 3b) \
	             : [_fault] "+qm"(_fault) inoutclob ); \
 \
	_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
})

488 489 490 491 492 493
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
				    enum x86_intercept intercept,
				    enum x86_intercept_stage stage)
{
	struct x86_instruction_info info = {
		.intercept  = intercept,
494 495 496 497 498
		.rep_prefix = ctxt->rep_prefix,
		.modrm_mod  = ctxt->modrm_mod,
		.modrm_reg  = ctxt->modrm_reg,
		.modrm_rm   = ctxt->modrm_rm,
		.src_val    = ctxt->src.val64,
499
		.dst_val    = ctxt->dst.val64,
500 501 502
		.src_bytes  = ctxt->src.bytes,
		.dst_bytes  = ctxt->dst.bytes,
		.ad_bytes   = ctxt->ad_bytes,
503 504 505
		.next_rip   = ctxt->eip,
	};

506
	return ctxt->ops->intercept(ctxt, &info, stage);
507 508
}

A
Avi Kivity 已提交
509 510 511 512 513
static void assign_masked(ulong *dest, ulong src, ulong mask)
{
	*dest = (*dest & ~mask) | (src & mask);
}

514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
static void assign_register(unsigned long *reg, u64 val, int bytes)
{
	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
	switch (bytes) {
	case 1:
		*(u8 *)reg = (u8)val;
		break;
	case 2:
		*(u16 *)reg = (u16)val;
		break;
	case 4:
		*reg = (u32)val;
		break;	/* 64b: zero-extend */
	case 8:
		*reg = val;
		break;
	}
}

533
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
534
{
535
	return (1UL << (ctxt->ad_bytes << 3)) - 1;
536 537
}

A
Avi Kivity 已提交
538 539 540 541 542 543 544 545 546 547 548
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
{
	u16 sel;
	struct desc_struct ss;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return ~0UL;
	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
}

A
Avi Kivity 已提交
549 550 551 552 553
static int stack_size(struct x86_emulate_ctxt *ctxt)
{
	return (__fls(stack_mask(ctxt)) + 1) >> 3;
}

A
Avi Kivity 已提交
554
/* Access/update address held in a register, based on addressing mode. */
555
static inline unsigned long
556
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
557
{
558
	if (ctxt->ad_bytes == sizeof(unsigned long))
559 560
		return reg;
	else
561
		return reg & ad_mask(ctxt);
562 563 564
}

static inline unsigned long
565
register_address(struct x86_emulate_ctxt *ctxt, int reg)
566
{
567
	return address_mask(ctxt, reg_read(ctxt, reg));
568 569
}

570 571 572 573 574
static void masked_increment(ulong *reg, ulong mask, int inc)
{
	assign_masked(reg, *reg + inc, mask);
}

575
static inline void
576
register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
577
{
578
	ulong *preg = reg_rmw(ctxt, reg);
579

580
	assign_register(preg, *preg + inc, ctxt->ad_bytes);
581 582 583 584
}

static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
{
585
	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
586
}
A
Avi Kivity 已提交
587

588 589 590 591 592 593 594
static u32 desc_limit_scaled(struct desc_struct *desc)
{
	u32 limit = get_desc_limit(desc);

	return desc->g ? (limit << 12) | 0xfff : limit;
}

595
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
596 597 598 599
{
	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
		return 0;

600
	return ctxt->ops->get_cached_segment_base(ctxt, seg);
601 602
}

603 604
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
			     u32 error, bool valid)
605
{
606
	WARN_ON(vec > 0x1f);
607 608 609
	ctxt->exception.vector = vec;
	ctxt->exception.error_code = error;
	ctxt->exception.error_code_valid = valid;
610
	return X86EMUL_PROPAGATE_FAULT;
611 612
}

613 614 615 616 617
static int emulate_db(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, DB_VECTOR, 0, false);
}

618
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
619
{
620
	return emulate_exception(ctxt, GP_VECTOR, err, true);
621 622
}

623 624 625 626 627
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{
	return emulate_exception(ctxt, SS_VECTOR, err, true);
}

628
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
629
{
630
	return emulate_exception(ctxt, UD_VECTOR, 0, false);
631 632
}

633
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
634
{
635
	return emulate_exception(ctxt, TS_VECTOR, err, true);
636 637
}

638 639
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
640
	return emulate_exception(ctxt, DE_VECTOR, 0, false);
641 642
}

A
Avi Kivity 已提交
643 644 645 646 647
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, NM_VECTOR, 0, false);
}

648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
	u16 selector;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
	return selector;
}

static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
				 unsigned seg)
{
	u16 dummy;
	u32 base3;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}

668 669 670 671 672 673 674 675 676 677 678
static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
{
	return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
}

static inline bool emul_is_noncanonical_address(u64 la,
						struct x86_emulate_ctxt *ctxt)
{
	return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
}

679 680 681 682 683 684
/*
 * x86 defines three classes of vector instructions: explicitly
 * aligned, explicitly unaligned, and the rest, which change behaviour
 * depending on whether they're AVX encoded or not.
 *
 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
685 686
 * subject to the same check.  FXSAVE and FXRSTOR are checked here too as their
 * 512 bytes of data must be aligned to a 16 byte boundary.
687
 */
688
static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
689
{
690
	u64 alignment = ctxt->d & AlignMask;
691 692

	if (likely(size < 16))
693
		return 1;
694

695 696 697
	switch (alignment) {
	case Unaligned:
	case Avx:
698
		return 1;
699
	case Aligned16:
700
		return 16;
701 702
	case Aligned:
	default:
703
		return size;
704
	}
705 706
}

707 708 709 710
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
				       struct segmented_address addr,
				       unsigned *max_size, unsigned size,
				       bool write, bool fetch,
711
				       enum x86emul_mode mode, ulong *linear)
712
{
713 714
	struct desc_struct desc;
	bool usable;
715
	ulong la;
716
	u32 lim;
717
	u16 sel;
718
	u8  va_bits;
719

720
	la = seg_base(ctxt, addr.seg) + addr.ea;
721
	*max_size = 0;
722
	switch (mode) {
723
	case X86EMUL_MODE_PROT64:
724
		*linear = la;
725 726
		va_bits = ctxt_virt_addr_bits(ctxt);
		if (get_canonical(la, va_bits) != la)
727
			goto bad;
728

729
		*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
730 731
		if (size > *max_size)
			goto bad;
732 733
		break;
	default:
734
		*linear = la = (u32)la;
735 736
		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
						addr.seg);
737 738
		if (!usable)
			goto bad;
739 740 741
		/* code segment in protected mode or read-only data segment */
		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
					|| !(desc.type & 2)) && write)
742 743
			goto bad;
		/* unreadable code segment */
744
		if (!fetch && (desc.type & 8) && !(desc.type & 2))
745 746
			goto bad;
		lim = desc_limit_scaled(&desc);
747
		if (!(desc.type & 8) && (desc.type & 4)) {
G
Guo Chao 已提交
748
			/* expand-down segment */
749
			if (addr.ea <= lim)
750 751 752
				goto bad;
			lim = desc.d ? 0xffffffff : 0xffff;
		}
753 754
		if (addr.ea > lim)
			goto bad;
755 756 757 758 759 760 761
		if (lim == 0xffffffff)
			*max_size = ~0u;
		else {
			*max_size = (u64)lim + 1 - addr.ea;
			if (size > *max_size)
				goto bad;
		}
762 763
		break;
	}
764
	if (la & (insn_alignment(ctxt, size) - 1))
765
		return emulate_gp(ctxt, 0);
766
	return X86EMUL_CONTINUE;
767 768
bad:
	if (addr.seg == VCPU_SREG_SS)
769
		return emulate_ss(ctxt, 0);
770
	else
771
		return emulate_gp(ctxt, 0);
772 773
}

774 775 776 777 778
static int linearize(struct x86_emulate_ctxt *ctxt,
		     struct segmented_address addr,
		     unsigned size, bool write,
		     ulong *linear)
{
779
	unsigned max_size;
780 781
	return __linearize(ctxt, addr, &max_size, size, write, false,
			   ctxt->mode, linear);
782 783
}

784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
			     enum x86emul_mode mode)
{
	ulong linear;
	int rc;
	unsigned max_size;
	struct segmented_address addr = { .seg = VCPU_SREG_CS,
					   .ea = dst };

	if (ctxt->op_bytes != sizeof(unsigned long))
		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
	rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
	if (rc == X86EMUL_CONTINUE)
		ctxt->_eip = addr.ea;
	return rc;
}

static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
	return assign_eip(ctxt, dst, ctxt->mode);
804 805
}

806 807 808 809
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
			  const struct desc_struct *cs_desc)
{
	enum x86emul_mode mode = ctxt->mode;
810
	int rc;
811 812

#ifdef CONFIG_X86_64
813 814 815
	if (ctxt->mode >= X86EMUL_MODE_PROT16) {
		if (cs_desc->l) {
			u64 efer = 0;
816

817 818 819 820 821
			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
			if (efer & EFER_LMA)
				mode = X86EMUL_MODE_PROT64;
		} else
			mode = X86EMUL_MODE_PROT32; /* temporary value */
822 823 824 825
	}
#endif
	if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
		mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
826 827 828 829
	rc = assign_eip(ctxt, dst, mode);
	if (rc == X86EMUL_CONTINUE)
		ctxt->mode = mode;
	return rc;
830 831 832 833 834 835
}

static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{
	return assign_eip_near(ctxt, ctxt->_eip + rel);
}
836

837 838 839
static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
			      void *data, unsigned size)
{
840
	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
841 842 843 844 845 846
}

static int linear_write_system(struct x86_emulate_ctxt *ctxt,
			       ulong linear, void *data,
			       unsigned int size)
{
847
	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
848 849
}

850 851 852 853 854
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
			      struct segmented_address addr,
			      void *data,
			      unsigned size)
{
855 856 857
	int rc;
	ulong linear;

858
	rc = linearize(ctxt, addr, size, false, &linear);
859 860
	if (rc != X86EMUL_CONTINUE)
		return rc;
861
	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
862 863
}

864 865 866 867 868 869 870 871 872 873 874
static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
			       struct segmented_address addr,
			       void *data,
			       unsigned int size)
{
	int rc;
	ulong linear;

	rc = linearize(ctxt, addr, size, true, &linear);
	if (rc != X86EMUL_CONTINUE)
		return rc;
875
	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
876 877
}

878
/*
879
 * Prefetch the remaining bytes of the instruction without crossing page
880 881
 * boundary if they are not in fetch_cache yet.
 */
882
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
883 884
{
	int rc;
885
	unsigned size, max_size;
886
	unsigned long linear;
887
	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
888
	struct segmented_address addr = { .seg = VCPU_SREG_CS,
889 890
					   .ea = ctxt->eip + cur_size };

891 892 893 894 895 896 897 898 899 900
	/*
	 * We do not know exactly how many bytes will be needed, and
	 * __linearize is expensive, so fetch as much as possible.  We
	 * just have to avoid going beyond the 15 byte limit, the end
	 * of the segment, or the end of the page.
	 *
	 * __linearize is called with size 0 so that it does not do any
	 * boundary check itself.  Instead, we use max_size to check
	 * against op_size.
	 */
901 902
	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
			 &linear);
903 904 905
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;

906
	size = min_t(unsigned, 15UL ^ cur_size, max_size);
907
	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
908 909 910 911 912 913 914 915

	/*
	 * One instruction can only straddle two pages,
	 * and one has been loaded at the beginning of
	 * x86_decode_insn.  So, if not enough bytes
	 * still, we must have hit the 15-byte boundary.
	 */
	if (unlikely(size < op_size))
916 917
		return emulate_gp(ctxt, 0);

918
	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
919 920 921
			      size, &ctxt->exception);
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;
922
	ctxt->fetch.end += size;
923
	return X86EMUL_CONTINUE;
924 925
}

926 927
static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
					       unsigned size)
928
{
929 930 931 932
	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;

	if (unlikely(done_size < size))
		return __do_insn_fetch_bytes(ctxt, size - done_size);
933 934
	else
		return X86EMUL_CONTINUE;
935 936
}

937
/* Fetch next part of the instruction being emulated. */
938
#define insn_fetch(_type, _ctxt)					\
939 940 941
({	_type _x;							\
									\
	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
942 943
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
944
	ctxt->_eip += sizeof(_type);					\
945
	memcpy(&_x, ctxt->fetch.ptr, sizeof(_type));			\
946
	ctxt->fetch.ptr += sizeof(_type);				\
947
	_x;								\
948 949
})

950
#define insn_fetch_arr(_arr, _size, _ctxt)				\
951 952
({									\
	rc = do_insn_fetch_bytes(_ctxt, _size);				\
953 954
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
955
	ctxt->_eip += (_size);						\
956 957
	memcpy(_arr, ctxt->fetch.ptr, _size);				\
	ctxt->fetch.ptr += (_size);					\
958 959
})

960 961 962 963 964
/*
 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 * pointer into the block that addresses the relevant register.
 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 */
965
static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
966
			     int byteop)
A
Avi Kivity 已提交
967 968
{
	void *p;
969
	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
A
Avi Kivity 已提交
970 971

	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
972 973 974
		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
	else
		p = reg_rmw(ctxt, modrm_reg);
A
Avi Kivity 已提交
975 976 977 978
	return p;
}

static int read_descriptor(struct x86_emulate_ctxt *ctxt,
979
			   struct segmented_address addr,
A
Avi Kivity 已提交
980 981 982 983 984 985 986
			   u16 *size, unsigned long *address, int op_bytes)
{
	int rc;

	if (op_bytes == 2)
		op_bytes = 3;
	*address = 0;
987
	rc = segmented_read_std(ctxt, addr, size, 2);
988
	if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
989
		return rc;
990
	addr.ea += 2;
991
	rc = segmented_read_std(ctxt, addr, address, op_bytes);
A
Avi Kivity 已提交
992 993 994
	return rc;
}

995 996 997 998 999 1000 1001 1002 1003 1004
FASTOP2(add);
FASTOP2(or);
FASTOP2(adc);
FASTOP2(sbb);
FASTOP2(and);
FASTOP2(sub);
FASTOP2(xor);
FASTOP2(cmp);
FASTOP2(test);

1005 1006
FASTOP1SRC2(mul, mul_ex);
FASTOP1SRC2(imul, imul_ex);
1007 1008
FASTOP1SRC2EX(div, div_ex);
FASTOP1SRC2EX(idiv, idiv_ex);
1009

1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
FASTOP3WCL(shld);
FASTOP3WCL(shrd);

FASTOP2W(imul);

FASTOP1(not);
FASTOP1(neg);
FASTOP1(inc);
FASTOP1(dec);

FASTOP2CL(rol);
FASTOP2CL(ror);
FASTOP2CL(rcl);
FASTOP2CL(rcr);
FASTOP2CL(shl);
FASTOP2CL(shr);
FASTOP2CL(sar);

FASTOP2W(bsf);
FASTOP2W(bsr);
FASTOP2W(bt);
FASTOP2W(bts);
FASTOP2W(btr);
FASTOP2W(btc);

1035 1036
FASTOP2(xadd);

1037 1038
FASTOP2R(cmp, cmp_r);

1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
{
	/* If src is zero, do not writeback, but update flags */
	if (ctxt->src.val == 0)
		ctxt->dst.type = OP_NONE;
	return fastop(ctxt, em_bsf);
}

static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
{
	/* If src is zero, do not writeback, but update flags */
	if (ctxt->src.val == 0)
		ctxt->dst.type = OP_NONE;
	return fastop(ctxt, em_bsr);
}

1055
static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1056
{
1057 1058
	u8 rc;
	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1059

1060
	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1061 1062
	asm("push %[flags]; popf; " CALL_NOSPEC
	    : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1063
	return rc;
1064 1065
}

1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
static void fetch_register_operand(struct operand *op)
{
	switch (op->bytes) {
	case 1:
		op->val = *(u8 *)op->addr.reg;
		break;
	case 2:
		op->val = *(u16 *)op->addr.reg;
		break;
	case 4:
		op->val = *(u32 *)op->addr.reg;
		break;
	case 8:
		op->val = *(u64 *)op->addr.reg;
		break;
	}
}

1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
static void emulator_get_fpu(void)
{
	fpregs_lock();

	fpregs_assert_state_consistent();
	if (test_thread_flag(TIF_NEED_FPU_LOAD))
		switch_fpu_return();
}

static void emulator_put_fpu(void)
{
	fpregs_unlock();
}

1098
static void read_sse_reg(sse128_t *data, int reg)
A
Avi Kivity 已提交
1099
{
1100
	emulator_get_fpu();
A
Avi Kivity 已提交
1101
	switch (reg) {
1102 1103 1104 1105 1106 1107 1108 1109
	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
1110
#ifdef CONFIG_X86_64
1111 1112 1113 1114 1115 1116 1117 1118
	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
1119 1120 1121
#endif
	default: BUG();
	}
1122
	emulator_put_fpu();
A
Avi Kivity 已提交
1123 1124
}

1125
static void write_sse_reg(sse128_t *data, int reg)
A
Avi Kivity 已提交
1126
{
1127
	emulator_get_fpu();
A
Avi Kivity 已提交
1128
	switch (reg) {
1129 1130 1131 1132 1133 1134 1135 1136
	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
A
Avi Kivity 已提交
1137
#ifdef CONFIG_X86_64
1138 1139 1140 1141 1142 1143 1144 1145
	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
A
Avi Kivity 已提交
1146 1147 1148
#endif
	default: BUG();
	}
1149
	emulator_put_fpu();
A
Avi Kivity 已提交
1150 1151
}

1152
static void read_mmx_reg(u64 *data, int reg)
A
Avi Kivity 已提交
1153
{
1154
	emulator_get_fpu();
A
Avi Kivity 已提交
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
	switch (reg) {
	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
	default: BUG();
	}
1166
	emulator_put_fpu();
A
Avi Kivity 已提交
1167 1168
}

1169
static void write_mmx_reg(u64 *data, int reg)
A
Avi Kivity 已提交
1170
{
1171
	emulator_get_fpu();
A
Avi Kivity 已提交
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
	switch (reg) {
	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
	default: BUG();
	}
1183
	emulator_put_fpu();
A
Avi Kivity 已提交
1184 1185
}

1186 1187 1188 1189 1190
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

1191
	emulator_get_fpu();
1192
	asm volatile("fninit");
1193
	emulator_put_fpu();
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
	return X86EMUL_CONTINUE;
}

static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
{
	u16 fcw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

1204
	emulator_get_fpu();
1205
	asm volatile("fnstcw %0": "+m"(fcw));
1206
	emulator_put_fpu();
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219

	ctxt->dst.val = fcw;

	return X86EMUL_CONTINUE;
}

static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
{
	u16 fsw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

1220
	emulator_get_fpu();
1221
	asm volatile("fnstsw %0": "+m"(fsw));
1222
	emulator_put_fpu();
1223 1224 1225 1226 1227 1228

	ctxt->dst.val = fsw;

	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1229
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1230
				    struct operand *op)
1231
{
1232
	unsigned reg = ctxt->modrm_reg;
1233

1234 1235
	if (!(ctxt->d & ModRM))
		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
A
Avi Kivity 已提交
1236

1237
	if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1238 1239 1240
		op->type = OP_XMM;
		op->bytes = 16;
		op->addr.xmm = reg;
1241
		read_sse_reg(&op->vec_val, reg);
A
Avi Kivity 已提交
1242 1243
		return;
	}
A
Avi Kivity 已提交
1244 1245 1246 1247 1248 1249 1250
	if (ctxt->d & Mmx) {
		reg &= 7;
		op->type = OP_MM;
		op->bytes = 8;
		op->addr.mm = reg;
		return;
	}
A
Avi Kivity 已提交
1251

1252
	op->type = OP_REG;
1253 1254 1255
	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);

1256
	fetch_register_operand(op);
1257 1258 1259
	op->orig_val = op->val;
}

1260 1261 1262 1263 1264 1265
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
{
	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
		ctxt->modrm_seg = VCPU_SREG_SS;
}

1266
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1267
			struct operand *op)
1268 1269
{
	u8 sib;
B
Bandan Das 已提交
1270
	int index_reg, base_reg, scale;
1271
	int rc = X86EMUL_CONTINUE;
1272
	ulong modrm_ea = 0;
1273

B
Bandan Das 已提交
1274 1275 1276
	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1277

B
Bandan Das 已提交
1278
	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1279
	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
B
Bandan Das 已提交
1280
	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1281
	ctxt->modrm_seg = VCPU_SREG_DS;
1282

1283
	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1284
		op->type = OP_REG;
1285
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1286
		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1287
				ctxt->d & ByteOp);
1288
		if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1289 1290
			op->type = OP_XMM;
			op->bytes = 16;
1291
			op->addr.xmm = ctxt->modrm_rm;
1292
			read_sse_reg(&op->vec_val, ctxt->modrm_rm);
A
Avi Kivity 已提交
1293 1294
			return rc;
		}
A
Avi Kivity 已提交
1295 1296 1297
		if (ctxt->d & Mmx) {
			op->type = OP_MM;
			op->bytes = 8;
1298
			op->addr.mm = ctxt->modrm_rm & 7;
A
Avi Kivity 已提交
1299 1300
			return rc;
		}
1301
		fetch_register_operand(op);
1302 1303 1304
		return rc;
	}

1305 1306
	op->type = OP_MEM;

1307
	if (ctxt->ad_bytes == 2) {
1308 1309 1310 1311
		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1312 1313

		/* 16-bit ModR/M decode. */
1314
		switch (ctxt->modrm_mod) {
1315
		case 0:
1316
			if (ctxt->modrm_rm == 6)
1317
				modrm_ea += insn_fetch(u16, ctxt);
1318 1319
			break;
		case 1:
1320
			modrm_ea += insn_fetch(s8, ctxt);
1321 1322
			break;
		case 2:
1323
			modrm_ea += insn_fetch(u16, ctxt);
1324 1325
			break;
		}
1326
		switch (ctxt->modrm_rm) {
1327
		case 0:
1328
			modrm_ea += bx + si;
1329 1330
			break;
		case 1:
1331
			modrm_ea += bx + di;
1332 1333
			break;
		case 2:
1334
			modrm_ea += bp + si;
1335 1336
			break;
		case 3:
1337
			modrm_ea += bp + di;
1338 1339
			break;
		case 4:
1340
			modrm_ea += si;
1341 1342
			break;
		case 5:
1343
			modrm_ea += di;
1344 1345
			break;
		case 6:
1346
			if (ctxt->modrm_mod != 0)
1347
				modrm_ea += bp;
1348 1349
			break;
		case 7:
1350
			modrm_ea += bx;
1351 1352
			break;
		}
1353 1354 1355
		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
			ctxt->modrm_seg = VCPU_SREG_SS;
1356
		modrm_ea = (u16)modrm_ea;
1357 1358
	} else {
		/* 32/64-bit ModR/M decode. */
1359
		if ((ctxt->modrm_rm & 7) == 4) {
1360
			sib = insn_fetch(u8, ctxt);
1361 1362 1363 1364
			index_reg |= (sib >> 3) & 7;
			base_reg |= sib & 7;
			scale = sib >> 6;

1365
			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1366
				modrm_ea += insn_fetch(s32, ctxt);
1367
			else {
1368
				modrm_ea += reg_read(ctxt, base_reg);
1369
				adjust_modrm_seg(ctxt, base_reg);
1370 1371 1372 1373
				/* Increment ESP on POP [ESP] */
				if ((ctxt->d & IncSP) &&
				    base_reg == VCPU_REGS_RSP)
					modrm_ea += ctxt->op_bytes;
1374
			}
1375
			if (index_reg != 4)
1376
				modrm_ea += reg_read(ctxt, index_reg) << scale;
1377
		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1378
			modrm_ea += insn_fetch(s32, ctxt);
1379
			if (ctxt->mode == X86EMUL_MODE_PROT64)
1380
				ctxt->rip_relative = 1;
1381 1382
		} else {
			base_reg = ctxt->modrm_rm;
1383
			modrm_ea += reg_read(ctxt, base_reg);
1384 1385
			adjust_modrm_seg(ctxt, base_reg);
		}
1386
		switch (ctxt->modrm_mod) {
1387
		case 1:
1388
			modrm_ea += insn_fetch(s8, ctxt);
1389 1390
			break;
		case 2:
1391
			modrm_ea += insn_fetch(s32, ctxt);
1392 1393 1394
			break;
		}
	}
1395
	op->addr.mem.ea = modrm_ea;
1396 1397 1398
	if (ctxt->ad_bytes != 8)
		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;

1399 1400 1401 1402 1403
done:
	return rc;
}

static int decode_abs(struct x86_emulate_ctxt *ctxt,
1404
		      struct operand *op)
1405
{
1406
	int rc = X86EMUL_CONTINUE;
1407

1408
	op->type = OP_MEM;
1409
	switch (ctxt->ad_bytes) {
1410
	case 2:
1411
		op->addr.mem.ea = insn_fetch(u16, ctxt);
1412 1413
		break;
	case 4:
1414
		op->addr.mem.ea = insn_fetch(u32, ctxt);
1415 1416
		break;
	case 8:
1417
		op->addr.mem.ea = insn_fetch(u64, ctxt);
1418 1419 1420 1421 1422 1423
		break;
	}
done:
	return rc;
}

1424
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1425
{
1426
	long sv = 0, mask;
1427

1428
	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1429
		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1430

1431 1432 1433 1434
		if (ctxt->src.bytes == 2)
			sv = (s16)ctxt->src.val & (s16)mask;
		else if (ctxt->src.bytes == 4)
			sv = (s32)ctxt->src.val & (s32)mask;
1435 1436
		else
			sv = (s64)ctxt->src.val & (s64)mask;
1437

1438 1439
		ctxt->dst.addr.mem.ea = address_mask(ctxt,
					   ctxt->dst.addr.mem.ea + (sv >> 3));
1440
	}
1441 1442

	/* only subword offset */
1443
	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1444 1445
}

1446 1447
static int read_emulated(struct x86_emulate_ctxt *ctxt,
			 unsigned long addr, void *dest, unsigned size)
A
Avi Kivity 已提交
1448
{
1449
	int rc;
1450
	struct read_cache *mc = &ctxt->mem_read;
A
Avi Kivity 已提交
1451

1452 1453
	if (mc->pos < mc->end)
		goto read_cached;
A
Avi Kivity 已提交
1454

1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466
	WARN_ON((mc->end + size) >= sizeof(mc->data));

	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
				      &ctxt->exception);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	mc->end += size;

read_cached:
	memcpy(dest, mc->data + mc->pos, size);
	mc->pos += size;
1467 1468
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1469

1470 1471 1472 1473 1474
static int segmented_read(struct x86_emulate_ctxt *ctxt,
			  struct segmented_address addr,
			  void *data,
			  unsigned size)
{
1475 1476 1477
	int rc;
	ulong linear;

1478
	rc = linearize(ctxt, addr, size, false, &linear);
1479 1480
	if (rc != X86EMUL_CONTINUE)
		return rc;
1481
	return read_emulated(ctxt, linear, data, size);
1482 1483 1484 1485 1486 1487 1488
}

static int segmented_write(struct x86_emulate_ctxt *ctxt,
			   struct segmented_address addr,
			   const void *data,
			   unsigned size)
{
1489 1490 1491
	int rc;
	ulong linear;

1492
	rc = linearize(ctxt, addr, size, true, &linear);
1493 1494
	if (rc != X86EMUL_CONTINUE)
		return rc;
1495 1496
	return ctxt->ops->write_emulated(ctxt, linear, data, size,
					 &ctxt->exception);
1497 1498 1499 1500 1501 1502 1503
}

static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
			     struct segmented_address addr,
			     const void *orig_data, const void *data,
			     unsigned size)
{
1504 1505 1506
	int rc;
	ulong linear;

1507
	rc = linearize(ctxt, addr, size, true, &linear);
1508 1509
	if (rc != X86EMUL_CONTINUE)
		return rc;
1510 1511
	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
					   size, &ctxt->exception);
1512 1513
}

1514 1515 1516 1517
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
			   unsigned int size, unsigned short port,
			   void *dest)
{
1518
	struct read_cache *rc = &ctxt->io_read;
1519

1520 1521
	if (rc->pos == rc->end) { /* refill pio read ahead */
		unsigned int in_page, n;
1522
		unsigned int count = ctxt->rep_prefix ?
1523
			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1524
		in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1525 1526
			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1527
		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1528 1529 1530
		if (n == 0)
			n = 1;
		rc->pos = rc->end = 0;
1531
		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1532 1533
			return 0;
		rc->end = n * size;
A
Avi Kivity 已提交
1534 1535
	}

1536
	if (ctxt->rep_prefix && (ctxt->d & String) &&
1537
	    !(ctxt->eflags & X86_EFLAGS_DF)) {
1538 1539 1540 1541 1542 1543 1544 1545
		ctxt->dst.data = rc->data + rc->pos;
		ctxt->dst.type = OP_MEM_STR;
		ctxt->dst.count = (rc->end - rc->pos) / size;
		rc->pos = rc->end;
	} else {
		memcpy(dest, rc->data + rc->pos, size);
		rc->pos += size;
	}
1546 1547
	return 1;
}
A
Avi Kivity 已提交
1548

1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
				     u16 index, struct desc_struct *desc)
{
	struct desc_ptr dt;
	ulong addr;

	ctxt->ops->get_idt(ctxt, &dt);

	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, index << 3 | 0x2);

	addr = dt.address + index * 8;
1561
	return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1562 1563
}

1564 1565 1566
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
				     u16 selector, struct desc_ptr *dt)
{
1567
	const struct x86_emulate_ops *ops = ctxt->ops;
1568
	u32 base3 = 0;
1569

1570 1571
	if (selector & 1 << 2) {
		struct desc_struct desc;
1572 1573
		u16 sel;

1574
		memset(dt, 0, sizeof(*dt));
1575 1576
		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
				      VCPU_SREG_LDTR))
1577
			return;
1578

1579
		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1580
		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1581
	} else
1582
		ops->get_gdt(ctxt, dt);
1583
}
1584

1585 1586
static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
			      u16 selector, ulong *desc_addr_p)
1587 1588 1589 1590
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
1591

1592
	get_descriptor_table_ptr(ctxt, selector, &dt);
1593

1594 1595
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
1596

1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623
	addr = dt.address + index * 8;

#ifdef CONFIG_X86_64
	if (addr >> 32 != 0) {
		u64 efer = 0;

		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
		if (!(efer & EFER_LMA))
			addr &= (u32)-1;
	}
#endif

	*desc_addr_p = addr;
	return X86EMUL_CONTINUE;
}

/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, struct desc_struct *desc,
				   ulong *desc_addr_p)
{
	int rc;

	rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
	if (rc != X86EMUL_CONTINUE)
		return rc;

1624
	return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1625
}
1626

1627 1628 1629 1630
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				    u16 selector, struct desc_struct *desc)
{
1631
	int rc;
1632
	ulong addr;
A
Avi Kivity 已提交
1633

1634 1635 1636
	rc = get_descriptor_ptr(ctxt, selector, &addr);
	if (rc != X86EMUL_CONTINUE)
		return rc;
A
Avi Kivity 已提交
1637

1638
	return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1639
}
1640

1641
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1642
				     u16 selector, int seg, u8 cpl,
1643
				     enum x86_transfer_type transfer,
1644
				     struct desc_struct *desc)
1645
{
1646
	struct desc_struct seg_desc, old_desc;
1647
	u8 dpl, rpl;
1648 1649 1650
	unsigned err_vec = GP_VECTOR;
	u32 err_code = 0;
	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1651
	ulong desc_addr;
1652
	int ret;
1653
	u16 dummy;
1654
	u32 base3 = 0;
1655

1656
	memset(&seg_desc, 0, sizeof(seg_desc));
1657

1658 1659 1660
	if (ctxt->mode == X86EMUL_MODE_REAL) {
		/* set real mode segment descriptor (keep limit etc. for
		 * unreal mode) */
1661
		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1662 1663
		set_desc_base(&seg_desc, selector << 4);
		goto load;
1664 1665 1666 1667 1668 1669 1670 1671 1672
	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
		/* VM86 needs a clean new segment descriptor */
		set_desc_base(&seg_desc, selector << 4);
		set_desc_limit(&seg_desc, 0xffff);
		seg_desc.type = 3;
		seg_desc.p = 1;
		seg_desc.s = 1;
		seg_desc.dpl = 3;
		goto load;
1673 1674
	}

1675 1676
	rpl = selector & 3;

1677 1678 1679 1680
	/* TR should be in GDT only */
	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
		goto exception;

1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702
	/* NULL selector is not valid for TR, CS and (except for long mode) SS */
	if (null_selector) {
		if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
			goto exception;

		if (seg == VCPU_SREG_SS) {
			if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
				goto exception;

			/*
			 * ctxt->ops->set_segment expects the CPL to be in
			 * SS.DPL, so fake an expand-up 32-bit data segment.
			 */
			seg_desc.type = 3;
			seg_desc.p = 1;
			seg_desc.s = 1;
			seg_desc.dpl = cpl;
			seg_desc.d = 1;
			seg_desc.g = 1;
		}

		/* Skip all following checks */
1703
		goto load;
1704
	}
1705

1706
	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1707 1708 1709 1710
	if (ret != X86EMUL_CONTINUE)
		return ret;

	err_code = selector & 0xfffc;
1711 1712
	err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
							   GP_VECTOR;
1713

G
Guo Chao 已提交
1714
	/* can't load system descriptor into segment selector */
1715 1716 1717
	if (seg <= VCPU_SREG_GS && !seg_desc.s) {
		if (transfer == X86_TRANSFER_CALL_JMP)
			return X86EMUL_UNHANDLEABLE;
1718
		goto exception;
1719
	}
1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735

	if (!seg_desc.p) {
		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
		goto exception;
	}

	dpl = seg_desc.dpl;

	switch (seg) {
	case VCPU_SREG_SS:
		/*
		 * segment is not a writable data segment or segment
		 * selector's RPL != CPL or segment selector's RPL != CPL
		 */
		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
			goto exception;
A
Avi Kivity 已提交
1736
		break;
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749
	case VCPU_SREG_CS:
		if (!(seg_desc.type & 8))
			goto exception;

		if (seg_desc.type & 4) {
			/* conforming */
			if (dpl > cpl)
				goto exception;
		} else {
			/* nonconforming */
			if (rpl > cpl || dpl != cpl)
				goto exception;
		}
1750 1751 1752 1753 1754 1755 1756 1757 1758
		/* in long-mode d/b must be clear if l is set */
		if (seg_desc.d && seg_desc.l) {
			u64 efer = 0;

			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
			if (efer & EFER_LMA)
				goto exception;
		}

1759 1760
		/* CS(RPL) <- CPL */
		selector = (selector & 0xfffc) | cpl;
A
Avi Kivity 已提交
1761
		break;
1762 1763 1764
	case VCPU_SREG_TR:
		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
			goto exception;
1765 1766 1767 1768 1769 1770
		old_desc = seg_desc;
		seg_desc.type |= 2; /* busy */
		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
						  sizeof(seg_desc), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
1771 1772 1773 1774 1775 1776
		break;
	case VCPU_SREG_LDTR:
		if (seg_desc.s || seg_desc.type != 2)
			goto exception;
		break;
	default: /*  DS, ES, FS, or GS */
1777
		/*
1778 1779 1780
		 * segment is not a data or readable code segment or
		 * ((segment is a data or nonconforming code segment)
		 * and (both RPL and CPL > DPL))
1781
		 */
1782 1783 1784 1785
		if ((seg_desc.type & 0xa) == 0x8 ||
		    (((seg_desc.type & 0xc) != 0xc) &&
		     (rpl > dpl && cpl > dpl)))
			goto exception;
A
Avi Kivity 已提交
1786
		break;
1787 1788 1789 1790
	}

	if (seg_desc.s) {
		/* mark segment as accessed */
1791 1792 1793 1794 1795 1796 1797
		if (!(seg_desc.type & 1)) {
			seg_desc.type |= 1;
			ret = write_segment_descriptor(ctxt, selector,
						       &seg_desc);
			if (ret != X86EMUL_CONTINUE)
				return ret;
		}
1798
	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1799
		ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1800 1801
		if (ret != X86EMUL_CONTINUE)
			return ret;
1802 1803
		if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
				((u64)base3 << 32), ctxt))
1804
			return emulate_gp(ctxt, 0);
1805 1806
	}
load:
1807
	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1808 1809
	if (desc)
		*desc = seg_desc;
1810 1811
	return X86EMUL_CONTINUE;
exception:
1812
	return emulate_exception(ctxt, err_vec, err_code, true);
1813 1814
}

1815 1816 1817 1818
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, int seg)
{
	u8 cpl = ctxt->ops->cpl(ctxt);
1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833

	/*
	 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
	 * they can load it at CPL<3 (Intel's manual says only LSS can,
	 * but it's wrong).
	 *
	 * However, the Intel manual says that putting IST=1/DPL=3 in
	 * an interrupt gate will result in SS=3 (the AMD manual instead
	 * says it doesn't), so allow SS=3 in __load_segment_descriptor
	 * and only forbid it here.
	 */
	if (seg == VCPU_SREG_SS && selector == 3 &&
	    ctxt->mode == X86EMUL_MODE_PROT64)
		return emulate_exception(ctxt, GP_VECTOR, 0, true);

1834 1835
	return __load_segment_descriptor(ctxt, selector, seg, cpl,
					 X86_TRANSFER_NONE, NULL);
1836 1837
}

1838 1839
static void write_register_operand(struct operand *op)
{
1840
	return assign_register(op->addr.reg, op->val, op->bytes);
1841 1842
}

1843
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1844
{
1845
	switch (op->type) {
1846
	case OP_REG:
1847
		write_register_operand(op);
A
Avi Kivity 已提交
1848
		break;
1849
	case OP_MEM:
1850
		if (ctxt->lock_prefix)
P
Paolo Bonzini 已提交
1851 1852 1853 1854 1855 1856 1857
			return segmented_cmpxchg(ctxt,
						 op->addr.mem,
						 &op->orig_val,
						 &op->val,
						 op->bytes);
		else
			return segmented_write(ctxt,
1858 1859 1860
					       op->addr.mem,
					       &op->val,
					       op->bytes);
1861
		break;
1862
	case OP_MEM_STR:
P
Paolo Bonzini 已提交
1863 1864 1865 1866
		return segmented_write(ctxt,
				       op->addr.mem,
				       op->data,
				       op->bytes * op->count);
1867
		break;
A
Avi Kivity 已提交
1868
	case OP_XMM:
1869
		write_sse_reg(&op->vec_val, op->addr.xmm);
A
Avi Kivity 已提交
1870
		break;
A
Avi Kivity 已提交
1871
	case OP_MM:
1872
		write_mmx_reg(&op->mm_val, op->addr.mm);
A
Avi Kivity 已提交
1873
		break;
1874 1875
	case OP_NONE:
		/* no writeback */
1876
		break;
1877
	default:
1878
		break;
A
Avi Kivity 已提交
1879
	}
1880 1881
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1882

1883
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1884
{
1885
	struct segmented_address addr;
1886

1887
	rsp_increment(ctxt, -bytes);
1888
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1889 1890
	addr.seg = VCPU_SREG_SS;

1891 1892 1893 1894 1895
	return segmented_write(ctxt, addr, data, bytes);
}

static int em_push(struct x86_emulate_ctxt *ctxt)
{
1896
	/* Disable writeback. */
1897
	ctxt->dst.type = OP_NONE;
1898
	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1899
}
1900

1901 1902 1903 1904
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
		       void *dest, int len)
{
	int rc;
1905
	struct segmented_address addr;
1906

1907
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1908
	addr.seg = VCPU_SREG_SS;
1909
	rc = segmented_read(ctxt, addr, dest, len);
1910 1911 1912
	if (rc != X86EMUL_CONTINUE)
		return rc;

1913
	rsp_increment(ctxt, len);
1914
	return rc;
1915 1916
}

1917 1918
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
1919
	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1920 1921
}

1922
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1923
			void *dest, int len)
1924 1925
{
	int rc;
1926
	unsigned long val, change_mask;
1927
	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1928
	int cpl = ctxt->ops->cpl(ctxt);
1929

1930
	rc = emulate_pop(ctxt, &val, len);
1931 1932
	if (rc != X86EMUL_CONTINUE)
		return rc;
1933

1934 1935 1936 1937
	change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
		      X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
		      X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
		      X86_EFLAGS_AC | X86_EFLAGS_ID;
1938

1939 1940 1941 1942 1943
	switch(ctxt->mode) {
	case X86EMUL_MODE_PROT64:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT16:
		if (cpl == 0)
1944
			change_mask |= X86_EFLAGS_IOPL;
1945
		if (cpl <= iopl)
1946
			change_mask |= X86_EFLAGS_IF;
1947 1948
		break;
	case X86EMUL_MODE_VM86:
1949 1950
		if (iopl < 3)
			return emulate_gp(ctxt, 0);
1951
		change_mask |= X86_EFLAGS_IF;
1952 1953
		break;
	default: /* real mode */
1954
		change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1955
		break;
1956
	}
1957 1958 1959 1960 1961

	*(unsigned long *)dest =
		(ctxt->eflags & ~change_mask) | (val & change_mask);

	return rc;
1962 1963
}

1964 1965
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
1966 1967 1968 1969
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->eflags;
	ctxt->dst.bytes = ctxt->op_bytes;
	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1970 1971
}

A
Avi Kivity 已提交
1972 1973 1974 1975 1976
static int em_enter(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	unsigned frame_size = ctxt->src.val;
	unsigned nesting_level = ctxt->src2.val & 31;
1977
	ulong rbp;
A
Avi Kivity 已提交
1978 1979 1980 1981

	if (nesting_level)
		return X86EMUL_UNHANDLEABLE;

1982 1983
	rbp = reg_read(ctxt, VCPU_REGS_RBP);
	rc = push(ctxt, &rbp, stack_size(ctxt));
A
Avi Kivity 已提交
1984 1985
	if (rc != X86EMUL_CONTINUE)
		return rc;
1986
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
A
Avi Kivity 已提交
1987
		      stack_mask(ctxt));
1988 1989
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
A
Avi Kivity 已提交
1990 1991 1992 1993
		      stack_mask(ctxt));
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1994 1995
static int em_leave(struct x86_emulate_ctxt *ctxt)
{
1996
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
A
Avi Kivity 已提交
1997
		      stack_mask(ctxt));
1998
	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
A
Avi Kivity 已提交
1999 2000
}

2001
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
2002
{
2003 2004
	int seg = ctxt->src2.val;

2005
	ctxt->src.val = get_segment_selector(ctxt, seg);
2006 2007 2008 2009
	if (ctxt->op_bytes == 4) {
		rsp_increment(ctxt, -2);
		ctxt->op_bytes = 2;
	}
2010

2011
	return em_push(ctxt);
2012 2013
}

2014
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
2015
{
2016
	int seg = ctxt->src2.val;
2017 2018
	unsigned long selector;
	int rc;
2019

2020
	rc = emulate_pop(ctxt, &selector, 2);
2021 2022 2023
	if (rc != X86EMUL_CONTINUE)
		return rc;

2024 2025
	if (ctxt->modrm_reg == VCPU_SREG_SS)
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2026 2027
	if (ctxt->op_bytes > 2)
		rsp_increment(ctxt, ctxt->op_bytes - 2);
2028

2029
	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
2030
	return rc;
2031 2032
}

2033
static int em_pusha(struct x86_emulate_ctxt *ctxt)
2034
{
2035
	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
2036 2037
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RAX;
2038

2039 2040
	while (reg <= VCPU_REGS_RDI) {
		(reg == VCPU_REGS_RSP) ?
2041
		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
2042

2043
		rc = em_push(ctxt);
2044 2045
		if (rc != X86EMUL_CONTINUE)
			return rc;
2046

2047
		++reg;
2048 2049
	}

2050
	return rc;
2051 2052
}

2053 2054
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
2055
	ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2056 2057 2058
	return em_push(ctxt);
}

2059
static int em_popa(struct x86_emulate_ctxt *ctxt)
2060
{
2061 2062
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RDI;
2063
	u32 val;
2064

2065 2066
	while (reg >= VCPU_REGS_RAX) {
		if (reg == VCPU_REGS_RSP) {
2067
			rsp_increment(ctxt, ctxt->op_bytes);
2068 2069
			--reg;
		}
2070

2071
		rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2072 2073
		if (rc != X86EMUL_CONTINUE)
			break;
2074
		assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2075
		--reg;
2076
	}
2077
	return rc;
2078 2079
}

2080
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2081
{
2082
	const struct x86_emulate_ops *ops = ctxt->ops;
2083
	int rc;
2084 2085 2086 2087 2088 2089
	struct desc_ptr dt;
	gva_t cs_addr;
	gva_t eip_addr;
	u16 cs, eip;

	/* TODO: Add limit checks */
2090
	ctxt->src.val = ctxt->eflags;
2091
	rc = em_push(ctxt);
2092 2093
	if (rc != X86EMUL_CONTINUE)
		return rc;
2094

2095
	ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2096

2097
	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2098
	rc = em_push(ctxt);
2099 2100
	if (rc != X86EMUL_CONTINUE)
		return rc;
2101

2102
	ctxt->src.val = ctxt->_eip;
2103
	rc = em_push(ctxt);
2104 2105 2106
	if (rc != X86EMUL_CONTINUE)
		return rc;

2107
	ops->get_idt(ctxt, &dt);
2108 2109 2110 2111

	eip_addr = dt.address + (irq << 2);
	cs_addr = dt.address + (irq << 2) + 2;

2112
	rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2113 2114 2115
	if (rc != X86EMUL_CONTINUE)
		return rc;

2116
	rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2117 2118 2119
	if (rc != X86EMUL_CONTINUE)
		return rc;

2120
	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2121 2122 2123
	if (rc != X86EMUL_CONTINUE)
		return rc;

2124
	ctxt->_eip = eip;
2125 2126 2127 2128

	return rc;
}

2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
	int rc;

	invalidate_registers(ctxt);
	rc = __emulate_int_real(ctxt, irq);
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);
	return rc;
}

2140
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2141 2142 2143
{
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
2144
		return __emulate_int_real(ctxt, irq);
2145 2146 2147 2148 2149 2150 2151 2152 2153 2154
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
	default:
		/* Protected mode interrupts unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
	}
}

2155
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2156
{
2157 2158 2159 2160
	int rc = X86EMUL_CONTINUE;
	unsigned long temp_eip = 0;
	unsigned long temp_eflags = 0;
	unsigned long cs = 0;
2161 2162 2163 2164 2165
	unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
			     X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
			     X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
			     X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
			     X86_EFLAGS_AC | X86_EFLAGS_ID |
W
Wanpeng Li 已提交
2166
			     X86_EFLAGS_FIXED;
2167 2168
	unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
				  X86_EFLAGS_VIP;
2169

2170
	/* TODO: Add stack limit check */
2171

2172
	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2173

2174 2175
	if (rc != X86EMUL_CONTINUE)
		return rc;
2176

2177 2178
	if (temp_eip & ~0xffff)
		return emulate_gp(ctxt, 0);
2179

2180
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2181

2182 2183
	if (rc != X86EMUL_CONTINUE)
		return rc;
2184

2185
	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2186

2187 2188
	if (rc != X86EMUL_CONTINUE)
		return rc;
2189

2190
	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2191

2192 2193
	if (rc != X86EMUL_CONTINUE)
		return rc;
2194

2195
	ctxt->_eip = temp_eip;
2196

2197
	if (ctxt->op_bytes == 4)
2198
		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2199
	else if (ctxt->op_bytes == 2) {
2200 2201
		ctxt->eflags &= ~0xffff;
		ctxt->eflags |= temp_eflags;
2202
	}
2203 2204

	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
W
Wanpeng Li 已提交
2205
	ctxt->eflags |= X86_EFLAGS_FIXED;
2206
	ctxt->ops->set_nmi_mask(ctxt, false);
2207 2208

	return rc;
2209 2210
}

2211
static int em_iret(struct x86_emulate_ctxt *ctxt)
2212
{
2213 2214
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
2215
		return emulate_iret_real(ctxt);
2216 2217 2218 2219
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
2220
	default:
2221 2222
		/* iret from protected mode unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
2223 2224 2225
	}
}

2226 2227 2228
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
	int rc;
2229 2230
	unsigned short sel;
	struct desc_struct new_desc;
2231 2232
	u8 cpl = ctxt->ops->cpl(ctxt);

2233
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2234

2235 2236
	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_CALL_JMP,
2237
				       &new_desc);
2238 2239 2240
	if (rc != X86EMUL_CONTINUE)
		return rc;

2241
	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2242 2243 2244 2245
	/* Error handling is not implemented. */
	if (rc != X86EMUL_CONTINUE)
		return X86EMUL_UNHANDLEABLE;

2246
	return rc;
2247 2248
}

2249
static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2250
{
2251 2252
	return assign_eip_near(ctxt, ctxt->src.val);
}
2253

2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264
static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	long int old_eip;

	old_eip = ctxt->_eip;
	rc = assign_eip_near(ctxt, ctxt->src.val);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	ctxt->src.val = old_eip;
	rc = em_push(ctxt);
2265
	return rc;
2266 2267
}

2268
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2269
{
2270
	u64 old = ctxt->dst.orig_val64;
2271

2272 2273 2274
	if (ctxt->dst.bytes == 16)
		return X86EMUL_UNHANDLEABLE;

2275 2276 2277 2278
	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2279
		ctxt->eflags &= ~X86_EFLAGS_ZF;
2280
	} else {
2281 2282
		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2283

2284
		ctxt->eflags |= X86_EFLAGS_ZF;
2285
	}
2286
	return X86EMUL_CONTINUE;
2287 2288
}

2289 2290
static int em_ret(struct x86_emulate_ctxt *ctxt)
{
2291 2292 2293 2294 2295 2296 2297 2298
	int rc;
	unsigned long eip;

	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	return assign_eip_near(ctxt, eip);
2299 2300
}

2301
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2302 2303
{
	int rc;
2304
	unsigned long eip, cs;
2305
	int cpl = ctxt->ops->cpl(ctxt);
2306
	struct desc_struct new_desc;
2307

2308
	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2309
	if (rc != X86EMUL_CONTINUE)
2310
		return rc;
2311
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2312
	if (rc != X86EMUL_CONTINUE)
2313
		return rc;
2314 2315 2316
	/* Outer-privilege level return is not implemented */
	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
		return X86EMUL_UNHANDLEABLE;
2317 2318
	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_RET,
2319 2320 2321
				       &new_desc);
	if (rc != X86EMUL_CONTINUE)
		return rc;
2322
	rc = assign_eip_far(ctxt, eip, &new_desc);
2323 2324 2325 2326
	/* Error handling is not implemented. */
	if (rc != X86EMUL_CONTINUE)
		return X86EMUL_UNHANDLEABLE;

2327 2328 2329
	return rc;
}

2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340
static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
{
        int rc;

        rc = em_ret_far(ctxt);
        if (rc != X86EMUL_CONTINUE)
                return rc;
        rsp_increment(ctxt, ctxt->src.val);
        return X86EMUL_CONTINUE;
}

2341 2342 2343
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{
	/* Save real source value, then compare EAX against destination. */
2344 2345
	ctxt->dst.orig_val = ctxt->dst.val;
	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2346
	ctxt->src.orig_val = ctxt->src.val;
2347
	ctxt->src.val = ctxt->dst.orig_val;
2348
	fastop(ctxt, em_cmp);
2349

2350
	if (ctxt->eflags & X86_EFLAGS_ZF) {
2351 2352
		/* Success: write back to memory; no update of EAX */
		ctxt->src.type = OP_NONE;
2353 2354 2355
		ctxt->dst.val = ctxt->src.orig_val;
	} else {
		/* Failure: write the value we saw to EAX. */
2356 2357 2358 2359
		ctxt->src.type = OP_REG;
		ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
		ctxt->src.val = ctxt->dst.orig_val;
		/* Create write-cycle to dest by writing the same value */
2360
		ctxt->dst.val = ctxt->dst.orig_val;
2361 2362 2363 2364
	}
	return X86EMUL_CONTINUE;
}

2365
static int em_lseg(struct x86_emulate_ctxt *ctxt)
2366
{
2367
	int seg = ctxt->src2.val;
2368 2369 2370
	unsigned short sel;
	int rc;

2371
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2372

2373
	rc = load_segment_descriptor(ctxt, sel, seg);
2374 2375 2376
	if (rc != X86EMUL_CONTINUE)
		return rc;

2377
	ctxt->dst.val = ctxt->src.val;
2378 2379 2380
	return rc;
}

2381 2382
static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
{
2383
#ifdef CONFIG_X86_64
2384
	return ctxt->ops->guest_has_long_mode(ctxt);
2385 2386 2387
#else
	return false;
#endif
2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401
}

static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
{
	desc->g    = (flags >> 23) & 1;
	desc->d    = (flags >> 22) & 1;
	desc->l    = (flags >> 21) & 1;
	desc->avl  = (flags >> 20) & 1;
	desc->p    = (flags >> 15) & 1;
	desc->dpl  = (flags >> 13) & 3;
	desc->s    = (flags >> 12) & 1;
	desc->type = (flags >>  8) & 15;
}

2402 2403
static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
			   int n)
2404 2405 2406 2407 2408
{
	struct desc_struct desc;
	int offset;
	u16 selector;

2409
	selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2410 2411 2412 2413 2414 2415

	if (n < 3)
		offset = 0x7f84 + n * 12;
	else
		offset = 0x7f2c + (n - 3) * 12;

2416 2417 2418
	set_desc_base(&desc,      GET_SMSTATE(u32, smstate, offset + 8));
	set_desc_limit(&desc,     GET_SMSTATE(u32, smstate, offset + 4));
	rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2419 2420 2421 2422
	ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
	return X86EMUL_CONTINUE;
}

2423
#ifdef CONFIG_X86_64
2424 2425
static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
			   int n)
2426 2427 2428 2429 2430 2431 2432 2433
{
	struct desc_struct desc;
	int offset;
	u16 selector;
	u32 base3;

	offset = 0x7e00 + n * 16;

2434 2435 2436 2437 2438
	selector =                GET_SMSTATE(u16, smstate, offset);
	rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
	set_desc_limit(&desc,     GET_SMSTATE(u32, smstate, offset + 4));
	set_desc_base(&desc,      GET_SMSTATE(u32, smstate, offset + 8));
	base3 =                   GET_SMSTATE(u32, smstate, offset + 12);
2439 2440 2441 2442

	ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
	return X86EMUL_CONTINUE;
}
2443
#endif
2444 2445

static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2446
				    u64 cr0, u64 cr3, u64 cr4)
2447 2448
{
	int bad;
2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460
	u64 pcid;

	/* In order to later set CR4.PCIDE, CR3[11:0] must be zero.  */
	pcid = 0;
	if (cr4 & X86_CR4_PCIDE) {
		pcid = cr3 & 0xfff;
		cr3 &= ~0xfff;
	}

	bad = ctxt->ops->set_cr(ctxt, 3, cr3);
	if (bad)
		return X86EMUL_UNHANDLEABLE;
2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478

	/*
	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
	 * Then enable protected mode.	However, PCID cannot be enabled
	 * if EFER.LMA=0, so set it separately.
	 */
	bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
	if (bad)
		return X86EMUL_UNHANDLEABLE;

	bad = ctxt->ops->set_cr(ctxt, 0, cr0);
	if (bad)
		return X86EMUL_UNHANDLEABLE;

	if (cr4 & X86_CR4_PCIDE) {
		bad = ctxt->ops->set_cr(ctxt, 4, cr4);
		if (bad)
			return X86EMUL_UNHANDLEABLE;
2479 2480 2481 2482 2483 2484
		if (pcid) {
			bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
			if (bad)
				return X86EMUL_UNHANDLEABLE;
		}

2485 2486 2487 2488 2489
	}

	return X86EMUL_CONTINUE;
}

2490 2491
static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
			     const char *smstate)
2492 2493 2494 2495
{
	struct desc_struct desc;
	struct desc_ptr dt;
	u16 selector;
2496
	u32 val, cr0, cr3, cr4;
2497 2498
	int i;

2499 2500 2501 2502
	cr0 =                      GET_SMSTATE(u32, smstate, 0x7ffc);
	cr3 =                      GET_SMSTATE(u32, smstate, 0x7ff8);
	ctxt->eflags =             GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
	ctxt->_eip =               GET_SMSTATE(u32, smstate, 0x7ff0);
2503 2504

	for (i = 0; i < 8; i++)
2505
		*reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2506

2507
	val = GET_SMSTATE(u32, smstate, 0x7fcc);
2508

2509
	if (ctxt->ops->set_dr(ctxt, 6, val))
2510 2511
		return X86EMUL_UNHANDLEABLE;

2512
	val = GET_SMSTATE(u32, smstate, 0x7fc8);
2513

2514
	if (ctxt->ops->set_dr(ctxt, 7, val))
2515
		return X86EMUL_UNHANDLEABLE;
2516

2517 2518 2519 2520
	selector =                 GET_SMSTATE(u32, smstate, 0x7fc4);
	set_desc_base(&desc,       GET_SMSTATE(u32, smstate, 0x7f64));
	set_desc_limit(&desc,      GET_SMSTATE(u32, smstate, 0x7f60));
	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smstate, 0x7f5c));
2521 2522
	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);

2523 2524 2525 2526
	selector =                 GET_SMSTATE(u32, smstate, 0x7fc0);
	set_desc_base(&desc,       GET_SMSTATE(u32, smstate, 0x7f80));
	set_desc_limit(&desc,      GET_SMSTATE(u32, smstate, 0x7f7c));
	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smstate, 0x7f78));
2527 2528
	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);

2529 2530
	dt.address =               GET_SMSTATE(u32, smstate, 0x7f74);
	dt.size =                  GET_SMSTATE(u32, smstate, 0x7f70);
2531 2532
	ctxt->ops->set_gdt(ctxt, &dt);

2533 2534
	dt.address =               GET_SMSTATE(u32, smstate, 0x7f58);
	dt.size =                  GET_SMSTATE(u32, smstate, 0x7f54);
2535 2536 2537
	ctxt->ops->set_idt(ctxt, &dt);

	for (i = 0; i < 6; i++) {
2538
		int r = rsm_load_seg_32(ctxt, smstate, i);
2539 2540 2541 2542
		if (r != X86EMUL_CONTINUE)
			return r;
	}

2543
	cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2544

2545
	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2546

2547
	return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2548 2549
}

2550
#ifdef CONFIG_X86_64
2551 2552
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
			     const char *smstate)
2553 2554 2555
{
	struct desc_struct desc;
	struct desc_ptr dt;
2556
	u64 val, cr0, cr3, cr4;
2557 2558
	u32 base3;
	u16 selector;
2559
	int i, r;
2560 2561

	for (i = 0; i < 16; i++)
2562
		*reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2563

2564 2565
	ctxt->_eip   = GET_SMSTATE(u64, smstate, 0x7f78);
	ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2566

2567
	val = GET_SMSTATE(u64, smstate, 0x7f68);
2568

2569
	if (ctxt->ops->set_dr(ctxt, 6, val))
2570 2571
		return X86EMUL_UNHANDLEABLE;

2572
	val = GET_SMSTATE(u64, smstate, 0x7f60);
2573

2574
	if (ctxt->ops->set_dr(ctxt, 7, val))
2575
		return X86EMUL_UNHANDLEABLE;
2576

2577 2578 2579 2580 2581
	cr0 =                       GET_SMSTATE(u64, smstate, 0x7f58);
	cr3 =                       GET_SMSTATE(u64, smstate, 0x7f50);
	cr4 =                       GET_SMSTATE(u64, smstate, 0x7f48);
	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
	val =                       GET_SMSTATE(u64, smstate, 0x7ed0);
2582 2583 2584

	if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
		return X86EMUL_UNHANDLEABLE;
2585

2586 2587 2588 2589 2590
	selector =                  GET_SMSTATE(u32, smstate, 0x7e90);
	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smstate, 0x7e92) << 8);
	set_desc_limit(&desc,       GET_SMSTATE(u32, smstate, 0x7e94));
	set_desc_base(&desc,        GET_SMSTATE(u32, smstate, 0x7e98));
	base3 =                     GET_SMSTATE(u32, smstate, 0x7e9c);
2591 2592
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);

2593 2594
	dt.size =                   GET_SMSTATE(u32, smstate, 0x7e84);
	dt.address =                GET_SMSTATE(u64, smstate, 0x7e88);
2595 2596
	ctxt->ops->set_idt(ctxt, &dt);

2597 2598 2599 2600 2601
	selector =                  GET_SMSTATE(u32, smstate, 0x7e70);
	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smstate, 0x7e72) << 8);
	set_desc_limit(&desc,       GET_SMSTATE(u32, smstate, 0x7e74));
	set_desc_base(&desc,        GET_SMSTATE(u32, smstate, 0x7e78));
	base3 =                     GET_SMSTATE(u32, smstate, 0x7e7c);
2602 2603
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);

2604 2605
	dt.size =                   GET_SMSTATE(u32, smstate, 0x7e64);
	dt.address =                GET_SMSTATE(u64, smstate, 0x7e68);
2606 2607
	ctxt->ops->set_gdt(ctxt, &dt);

2608
	r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2609 2610 2611
	if (r != X86EMUL_CONTINUE)
		return r;

2612
	for (i = 0; i < 6; i++) {
2613
		r = rsm_load_seg_64(ctxt, smstate, i);
2614 2615 2616 2617
		if (r != X86EMUL_CONTINUE)
			return r;
	}

2618
	return X86EMUL_CONTINUE;
2619
}
2620
#endif
2621

P
Paolo Bonzini 已提交
2622 2623
static int em_rsm(struct x86_emulate_ctxt *ctxt)
{
2624
	unsigned long cr0, cr4, efer;
2625
	char buf[512];
2626 2627 2628
	u64 smbase;
	int ret;

2629
	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
P
Paolo Bonzini 已提交
2630 2631
		return emulate_ud(ctxt);

2632 2633 2634 2635 2636 2637
	smbase = ctxt->ops->get_smbase(ctxt);

	ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
	if (ret != X86EMUL_CONTINUE)
		return X86EMUL_UNHANDLEABLE;

2638 2639 2640 2641 2642 2643
	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
		ctxt->ops->set_nmi_mask(ctxt, false);

	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
		~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));

2644 2645
	/*
	 * Get back to real mode, to prepare a safe state in which to load
2646 2647
	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
	 * supports long mode.
2648
	 */
2649 2650 2651 2652
	if (emulator_has_longmode(ctxt)) {
		struct desc_struct cs_desc;

		/* Zero CR4.PCIDE before CR0.PG.  */
2653 2654
		cr4 = ctxt->ops->get_cr(ctxt, 4);
		if (cr4 & X86_CR4_PCIDE)
2655 2656 2657 2658 2659 2660 2661 2662 2663 2664
			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);

		/* A 32-bit code segment is required to clear EFER.LMA.  */
		memset(&cs_desc, 0, sizeof(cs_desc));
		cs_desc.type = 0xb;
		cs_desc.s = cs_desc.g = cs_desc.p = 1;
		ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
	}

	/* For the 64-bit case, this will clear EFER.LMA.  */
2665 2666 2667
	cr0 = ctxt->ops->get_cr(ctxt, 0);
	if (cr0 & X86_CR0_PE)
		ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2668

2669 2670 2671 2672 2673 2674 2675 2676 2677 2678
	if (emulator_has_longmode(ctxt)) {
		/* Clear CR4.PAE before clearing EFER.LME. */
		cr4 = ctxt->ops->get_cr(ctxt, 4);
		if (cr4 & X86_CR4_PAE)
			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);

		/* And finally go back to 32-bit mode.  */
		efer = 0;
		ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
	}
2679

2680 2681 2682 2683 2684
	/*
	 * Give pre_leave_smm() a chance to make ISA-specific changes to the
	 * vCPU state (e.g. enter guest mode) before loading state from the SMM
	 * state-save area.
	 */
2685
	if (ctxt->ops->pre_leave_smm(ctxt, buf))
2686 2687
		return X86EMUL_UNHANDLEABLE;

2688
#ifdef CONFIG_X86_64
2689
	if (emulator_has_longmode(ctxt))
2690
		ret = rsm_load_state_64(ctxt, buf);
2691
	else
2692
#endif
2693
		ret = rsm_load_state_32(ctxt, buf);
2694 2695 2696 2697 2698 2699

	if (ret != X86EMUL_CONTINUE) {
		/* FIXME: should triple fault */
		return X86EMUL_UNHANDLEABLE;
	}

2700 2701
	ctxt->ops->post_leave_smm(ctxt);

2702
	return X86EMUL_CONTINUE;
P
Paolo Bonzini 已提交
2703 2704
}

2705
static void
2706
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2707
			struct desc_struct *cs, struct desc_struct *ss)
2708 2709
{
	cs->l = 0;		/* will be adjusted later */
2710
	set_desc_base(cs, 0);	/* flat segment */
2711
	cs->g = 1;		/* 4kb granularity */
2712
	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2713 2714 2715
	cs->type = 0x0b;	/* Read, Execute, Accessed */
	cs->s = 1;
	cs->dpl = 0;		/* will be adjusted later */
2716 2717
	cs->p = 1;
	cs->d = 1;
2718
	cs->avl = 0;
2719

2720 2721
	set_desc_base(ss, 0);	/* flat segment */
	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2722 2723 2724
	ss->g = 1;		/* 4kb granularity */
	ss->s = 1;
	ss->type = 0x03;	/* Read/Write, Accessed */
2725
	ss->d = 1;		/* 32bit stack segment */
2726
	ss->dpl = 0;
2727
	ss->p = 1;
2728 2729
	ss->l = 0;
	ss->avl = 0;
2730 2731
}

2732 2733 2734 2735 2736
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

	eax = ecx = 0;
2737
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2738
	return is_guest_vendor_intel(ebx, ecx, edx);
2739 2740
}

2741 2742
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
{
2743
	const struct x86_emulate_ops *ops = ctxt->ops;
2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754
	u32 eax, ebx, ecx, edx;

	/*
	 * syscall should always be enabled in longmode - so only become
	 * vendor specific (cpuid) if other modes are active...
	 */
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return true;

	eax = 0x00000000;
	ecx = 0x00000000;
2755
	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2756
	/*
2757 2758 2759 2760
	 * remark: Intel CPUs only support "syscall" in 64bit longmode. Also a
	 * 64bit guest with a 32bit compat-app running will #UD !! While this
	 * behaviour can be fixed (by emulating) into AMD response - CPUs of
	 * AMD can't behave like Intel.
2761
	 */
2762
	if (is_guest_vendor_intel(ebx, ecx, edx))
2763 2764
		return false;

2765 2766
	if (is_guest_vendor_amd(ebx, ecx, edx) ||
	    is_guest_vendor_hygon(ebx, ecx, edx))
2767 2768 2769 2770 2771 2772
		return true;

	/*
	 * default: (not Intel, not AMD, not Hygon), apply Intel's
	 * stricter rules...
	 */
2773 2774 2775
	return false;
}

2776
static int em_syscall(struct x86_emulate_ctxt *ctxt)
2777
{
2778
	const struct x86_emulate_ops *ops = ctxt->ops;
2779
	struct desc_struct cs, ss;
2780
	u64 msr_data;
2781
	u16 cs_sel, ss_sel;
2782
	u64 efer = 0;
2783 2784

	/* syscall is not available in real mode */
2785
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2786 2787
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_ud(ctxt);
2788

2789 2790 2791
	if (!(em_syscall_is_enabled(ctxt)))
		return emulate_ud(ctxt);

2792
	ops->get_msr(ctxt, MSR_EFER, &efer);
2793 2794 2795
	if (!(efer & EFER_SCE))
		return emulate_ud(ctxt);

2796
	setup_syscalls_segments(ctxt, &cs, &ss);
2797
	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2798
	msr_data >>= 32;
2799 2800
	cs_sel = (u16)(msr_data & 0xfffc);
	ss_sel = (u16)(msr_data + 8);
2801

2802
	if (efer & EFER_LMA) {
2803
		cs.d = 0;
2804 2805
		cs.l = 1;
	}
2806 2807
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2808

2809
	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2810
	if (efer & EFER_LMA) {
2811
#ifdef CONFIG_X86_64
2812
		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2813

2814
		ops->get_msr(ctxt,
2815 2816
			     ctxt->mode == X86EMUL_MODE_PROT64 ?
			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2817
		ctxt->_eip = msr_data;
2818

2819
		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2820
		ctxt->eflags &= ~msr_data;
W
Wanpeng Li 已提交
2821
		ctxt->eflags |= X86_EFLAGS_FIXED;
2822 2823 2824
#endif
	} else {
		/* legacy mode */
2825
		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2826
		ctxt->_eip = (u32)msr_data;
2827

2828
		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2829 2830
	}

2831
	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2832
	return X86EMUL_CONTINUE;
2833 2834
}

2835
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2836
{
2837
	const struct x86_emulate_ops *ops = ctxt->ops;
2838
	struct desc_struct cs, ss;
2839
	u64 msr_data;
2840
	u16 cs_sel, ss_sel;
2841
	u64 efer = 0;
2842

2843
	ops->get_msr(ctxt, MSR_EFER, &efer);
2844
	/* inject #GP if in real mode */
2845 2846
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return emulate_gp(ctxt, 0);
2847

2848 2849 2850 2851
	/*
	 * Not recognized on AMD in compat mode (but is recognized in legacy
	 * mode).
	 */
2852
	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2853 2854 2855
	    && !vendor_intel(ctxt))
		return emulate_ud(ctxt);

2856
	/* sysenter/sysexit have not been tested in 64bit mode. */
2857
	if (ctxt->mode == X86EMUL_MODE_PROT64)
2858
		return X86EMUL_UNHANDLEABLE;
2859

2860
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2861 2862
	if ((msr_data & 0xfffc) == 0x0)
		return emulate_gp(ctxt, 0);
2863

2864
	setup_syscalls_segments(ctxt, &cs, &ss);
2865
	ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2866
	cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2867
	ss_sel = cs_sel + 8;
2868
	if (efer & EFER_LMA) {
2869
		cs.d = 0;
2870 2871 2872
		cs.l = 1;
	}

2873 2874
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2875

2876
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2877
	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2878

2879
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2880 2881
	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
							      (u32)msr_data;
2882 2883
	if (efer & EFER_LMA)
		ctxt->mode = X86EMUL_MODE_PROT64;
2884

2885
	return X86EMUL_CONTINUE;
2886 2887
}

2888
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2889
{
2890
	const struct x86_emulate_ops *ops = ctxt->ops;
2891
	struct desc_struct cs, ss;
2892
	u64 msr_data, rcx, rdx;
2893
	int usermode;
X
Xiao Guangrong 已提交
2894
	u16 cs_sel = 0, ss_sel = 0;
2895

2896 2897
	/* inject #GP if in real mode or Virtual 8086 mode */
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2898 2899
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_gp(ctxt, 0);
2900

2901
	setup_syscalls_segments(ctxt, &cs, &ss);
2902

2903
	if ((ctxt->rex_prefix & 0x8) != 0x0)
2904 2905 2906 2907
		usermode = X86EMUL_MODE_PROT64;
	else
		usermode = X86EMUL_MODE_PROT32;

2908 2909 2910
	rcx = reg_read(ctxt, VCPU_REGS_RCX);
	rdx = reg_read(ctxt, VCPU_REGS_RDX);

2911 2912
	cs.dpl = 3;
	ss.dpl = 3;
2913
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2914 2915
	switch (usermode) {
	case X86EMUL_MODE_PROT32:
2916
		cs_sel = (u16)(msr_data + 16);
2917 2918
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
2919
		ss_sel = (u16)(msr_data + 24);
2920 2921
		rcx = (u32)rcx;
		rdx = (u32)rdx;
2922 2923
		break;
	case X86EMUL_MODE_PROT64:
2924
		cs_sel = (u16)(msr_data + 32);
2925 2926
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
2927 2928
		ss_sel = cs_sel + 8;
		cs.d = 0;
2929
		cs.l = 1;
2930 2931
		if (emul_is_noncanonical_address(rcx, ctxt) ||
		    emul_is_noncanonical_address(rdx, ctxt))
2932
			return emulate_gp(ctxt, 0);
2933 2934
		break;
	}
2935 2936
	cs_sel |= SEGMENT_RPL_MASK;
	ss_sel |= SEGMENT_RPL_MASK;
2937

2938 2939
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2940

2941 2942
	ctxt->_eip = rdx;
	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2943

2944
	return X86EMUL_CONTINUE;
2945 2946
}

2947
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2948 2949 2950 2951 2952 2953
{
	int iopl;
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return false;
	if (ctxt->mode == X86EMUL_MODE_VM86)
		return true;
2954
	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2955
	return ctxt->ops->cpl(ctxt) > iopl;
2956 2957
}

2958 2959 2960
#define VMWARE_PORT_VMPORT	(0x5658)
#define VMWARE_PORT_VMRPC	(0x5659)

2961 2962 2963
static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
					    u16 port, u16 len)
{
2964
	const struct x86_emulate_ops *ops = ctxt->ops;
2965
	struct desc_struct tr_seg;
2966
	u32 base3;
2967
	int r;
2968
	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2969
	unsigned mask = (1 << len) - 1;
2970
	unsigned long base;
2971

2972 2973 2974 2975 2976 2977 2978 2979
	/*
	 * VMware allows access to these ports even if denied
	 * by TSS I/O permission bitmap. Mimic behavior.
	 */
	if (enable_vmware_backdoor &&
	    ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
		return true;

2980
	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2981
	if (!tr_seg.p)
2982
		return false;
2983
	if (desc_limit_scaled(&tr_seg) < 103)
2984
		return false;
2985 2986 2987 2988
	base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
	base |= ((u64)base3) << 32;
#endif
2989
	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2990 2991
	if (r != X86EMUL_CONTINUE)
		return false;
2992
	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2993
		return false;
2994
	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2995 2996 2997 2998 2999 3000 3001 3002 3003 3004
	if (r != X86EMUL_CONTINUE)
		return false;
	if ((perm >> bit_idx) & mask)
		return false;
	return true;
}

static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
				 u16 port, u16 len)
{
3005 3006 3007
	if (ctxt->perm_ok)
		return true;

3008 3009
	if (emulator_bad_iopl(ctxt))
		if (!emulator_io_port_access_allowed(ctxt, port, len))
3010
			return false;
3011 3012 3013

	ctxt->perm_ok = true;

3014 3015 3016
	return true;
}

3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032
static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
{
	/*
	 * Intel CPUs mask the counter and pointers in quite strange
	 * manner when ECX is zero due to REP-string optimizations.
	 */
#ifdef CONFIG_X86_64
	if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
		return;

	*reg_write(ctxt, VCPU_REGS_RCX) = 0;

	switch (ctxt->b) {
	case 0xa4:	/* movsb */
	case 0xa5:	/* movsd/w */
		*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
3033
		fallthrough;
3034 3035 3036 3037 3038 3039 3040
	case 0xaa:	/* stosb */
	case 0xab:	/* stosd/w */
		*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
	}
#endif
}

3041 3042 3043
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_16 *tss)
{
3044
	tss->ip = ctxt->_eip;
3045
	tss->flag = ctxt->eflags;
3046 3047 3048 3049 3050 3051 3052 3053
	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
3054

3055 3056 3057 3058 3059
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
3060 3061 3062 3063 3064 3065
}

static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_16 *tss)
{
	int ret;
3066
	u8 cpl;
3067

3068
	ctxt->_eip = tss->ip;
3069
	ctxt->eflags = tss->flag | 2;
3070 3071 3072 3073 3074 3075 3076 3077
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3078 3079 3080 3081 3082

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
3083 3084 3085 3086 3087
	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3088

3089 3090
	cpl = tss->cs & 3;

3091
	/*
G
Guo Chao 已提交
3092
	 * Now load segment descriptors. If fault happens at this stage
3093 3094
	 * it is handled in a context of new task
	 */
3095
	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3096
					X86_TRANSFER_TASK_SWITCH, NULL);
3097 3098
	if (ret != X86EMUL_CONTINUE)
		return ret;
3099
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3100
					X86_TRANSFER_TASK_SWITCH, NULL);
3101 3102
	if (ret != X86EMUL_CONTINUE)
		return ret;
3103
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3104
					X86_TRANSFER_TASK_SWITCH, NULL);
3105 3106
	if (ret != X86EMUL_CONTINUE)
		return ret;
3107
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3108
					X86_TRANSFER_TASK_SWITCH, NULL);
3109 3110
	if (ret != X86EMUL_CONTINUE)
		return ret;
3111
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3112
					X86_TRANSFER_TASK_SWITCH, NULL);
3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_16(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
	struct tss_segment_16 tss_seg;
	int ret;
3125
	u32 new_tss_base = get_desc_base(new_desc);
3126

3127
	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3128
	if (ret != X86EMUL_CONTINUE)
3129 3130
		return ret;

3131
	save_state_to_tss16(ctxt, &tss_seg);
3132

3133
	ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3134
	if (ret != X86EMUL_CONTINUE)
3135 3136
		return ret;

3137
	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3138
	if (ret != X86EMUL_CONTINUE)
3139 3140 3141 3142 3143
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

3144 3145
		ret = linear_write_system(ctxt, new_tss_base,
					  &tss_seg.prev_task_link,
3146
					  sizeof(tss_seg.prev_task_link));
3147
		if (ret != X86EMUL_CONTINUE)
3148 3149 3150
			return ret;
	}

3151
	return load_state_from_tss16(ctxt, &tss_seg);
3152 3153 3154 3155 3156
}

static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_32 *tss)
{
3157
	/* CR3 and ldt selector are not saved intentionally */
3158
	tss->eip = ctxt->_eip;
3159
	tss->eflags = ctxt->eflags;
3160 3161 3162 3163 3164 3165 3166 3167
	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3168

3169 3170 3171 3172 3173 3174
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3175 3176 3177 3178 3179 3180
}

static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_32 *tss)
{
	int ret;
3181
	u8 cpl;
3182

3183
	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3184
		return emulate_gp(ctxt, 0);
3185
	ctxt->_eip = tss->eip;
3186
	ctxt->eflags = tss->eflags | 2;
3187 3188

	/* General purpose registers */
3189 3190 3191 3192 3193 3194 3195 3196
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3197 3198 3199

	/*
	 * SDM says that segment selectors are loaded before segment
3200 3201
	 * descriptors.  This is important because CPL checks will
	 * use CS.RPL.
3202
	 */
3203 3204 3205 3206 3207 3208 3209
	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3210

3211 3212 3213 3214 3215
	/*
	 * If we're switching between Protected Mode and VM86, we need to make
	 * sure to update the mode before loading the segment descriptors so
	 * that the selectors are interpreted correctly.
	 */
3216
	if (ctxt->eflags & X86_EFLAGS_VM) {
3217
		ctxt->mode = X86EMUL_MODE_VM86;
3218 3219
		cpl = 3;
	} else {
3220
		ctxt->mode = X86EMUL_MODE_PROT32;
3221 3222
		cpl = tss->cs & 3;
	}
3223

3224
	/*
I
Ingo Molnar 已提交
3225
	 * Now load segment descriptors. If fault happens at this stage
3226 3227
	 * it is handled in a context of new task
	 */
3228
	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3229
					cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3230 3231
	if (ret != X86EMUL_CONTINUE)
		return ret;
3232
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3233
					X86_TRANSFER_TASK_SWITCH, NULL);
3234 3235
	if (ret != X86EMUL_CONTINUE)
		return ret;
3236
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3237
					X86_TRANSFER_TASK_SWITCH, NULL);
3238 3239
	if (ret != X86EMUL_CONTINUE)
		return ret;
3240
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3241
					X86_TRANSFER_TASK_SWITCH, NULL);
3242 3243
	if (ret != X86EMUL_CONTINUE)
		return ret;
3244
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3245
					X86_TRANSFER_TASK_SWITCH, NULL);
3246 3247
	if (ret != X86EMUL_CONTINUE)
		return ret;
3248
	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3249
					X86_TRANSFER_TASK_SWITCH, NULL);
3250 3251
	if (ret != X86EMUL_CONTINUE)
		return ret;
3252
	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3253
					X86_TRANSFER_TASK_SWITCH, NULL);
3254

3255
	return ret;
3256 3257 3258 3259 3260 3261 3262 3263
}

static int task_switch_32(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
	struct tss_segment_32 tss_seg;
	int ret;
3264
	u32 new_tss_base = get_desc_base(new_desc);
3265 3266
	u32 eip_offset = offsetof(struct tss_segment_32, eip);
	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3267

3268
	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3269
	if (ret != X86EMUL_CONTINUE)
3270 3271
		return ret;

3272
	save_state_to_tss32(ctxt, &tss_seg);
3273

3274
	/* Only GP registers and segment selectors are saved */
3275 3276
	ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
				  ldt_sel_offset - eip_offset);
3277
	if (ret != X86EMUL_CONTINUE)
3278 3279
		return ret;

3280
	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3281
	if (ret != X86EMUL_CONTINUE)
3282 3283 3284 3285 3286
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

3287 3288
		ret = linear_write_system(ctxt, new_tss_base,
					  &tss_seg.prev_task_link,
3289
					  sizeof(tss_seg.prev_task_link));
3290
		if (ret != X86EMUL_CONTINUE)
3291 3292 3293
			return ret;
	}

3294
	return load_state_from_tss32(ctxt, &tss_seg);
3295 3296 3297
}

static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3298
				   u16 tss_selector, int idt_index, int reason,
3299
				   bool has_error_code, u32 error_code)
3300
{
3301
	const struct x86_emulate_ops *ops = ctxt->ops;
3302 3303
	struct desc_struct curr_tss_desc, next_tss_desc;
	int ret;
3304
	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3305
	ulong old_tss_base =
3306
		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3307
	u32 desc_limit;
3308
	ulong desc_addr, dr7;
3309 3310 3311

	/* FIXME: old_tss_base == ~0 ? */

3312
	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3313 3314
	if (ret != X86EMUL_CONTINUE)
		return ret;
3315
	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3316 3317 3318 3319 3320
	if (ret != X86EMUL_CONTINUE)
		return ret;

	/* FIXME: check that next_tss_desc is tss */

3321 3322 3323 3324 3325
	/*
	 * Check privileges. The three cases are task switch caused by...
	 *
	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
	 * 2. Exception/IRQ/iret: No check is performed
3326 3327
	 * 3. jmp/call to TSS/task-gate: No check is performed since the
	 *    hardware checks it before exiting.
3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343
	 */
	if (reason == TASK_SWITCH_GATE) {
		if (idt_index != -1) {
			/* Software interrupts */
			struct desc_struct task_gate_desc;
			int dpl;

			ret = read_interrupt_descriptor(ctxt, idt_index,
							&task_gate_desc);
			if (ret != X86EMUL_CONTINUE)
				return ret;

			dpl = task_gate_desc.dpl;
			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
		}
3344 3345
	}

3346 3347 3348 3349
	desc_limit = desc_limit_scaled(&next_tss_desc);
	if (!next_tss_desc.p ||
	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
	     desc_limit < 0x2b)) {
3350
		return emulate_ts(ctxt, tss_selector & 0xfffc);
3351 3352 3353 3354
	}

	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3355
		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3356 3357 3358 3359 3360 3361
	}

	if (reason == TASK_SWITCH_IRET)
		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;

	/* set back link to prev task only if NT bit is set in eflags
G
Guo Chao 已提交
3362
	   note that old_tss_sel is not used after this point */
3363 3364 3365 3366
	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
		old_tss_sel = 0xffff;

	if (next_tss_desc.type & 8)
3367
		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3368 3369
				     old_tss_base, &next_tss_desc);
	else
3370
		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3371
				     old_tss_base, &next_tss_desc);
3372 3373
	if (ret != X86EMUL_CONTINUE)
		return ret;
3374 3375 3376 3377 3378 3379

	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;

	if (reason != TASK_SWITCH_IRET) {
		next_tss_desc.type |= (1 << 1); /* set busy flag */
3380
		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3381 3382
	}

3383
	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
3384
	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3385

3386
	if (has_error_code) {
3387 3388 3389
		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
		ctxt->lock_prefix = 0;
		ctxt->src.val = (unsigned long) error_code;
3390
		ret = em_push(ctxt);
3391 3392
	}

3393 3394 3395
	ops->get_dr(ctxt, 7, &dr7);
	ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));

3396 3397 3398 3399
	return ret;
}

int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3400
			 u16 tss_selector, int idt_index, int reason,
3401
			 bool has_error_code, u32 error_code)
3402 3403 3404
{
	int rc;

3405
	invalidate_registers(ctxt);
3406 3407
	ctxt->_eip = ctxt->eip;
	ctxt->dst.type = OP_NONE;
3408

3409
	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3410
				     has_error_code, error_code);
3411

3412
	if (rc == X86EMUL_CONTINUE) {
3413
		ctxt->eip = ctxt->_eip;
3414 3415
		writeback_registers(ctxt);
	}
3416

3417
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3418 3419
}

3420 3421
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
		struct operand *op)
3422
{
3423
	int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3424

3425 3426
	register_address_increment(ctxt, reg, df * op->bytes);
	op->addr.mem.ea = register_address(ctxt, reg);
3427 3428
}

3429 3430 3431 3432 3433 3434
static int em_das(struct x86_emulate_ctxt *ctxt)
{
	u8 al, old_al;
	bool af, cf, old_cf;

	cf = ctxt->eflags & X86_EFLAGS_CF;
3435
	al = ctxt->dst.val;
3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452

	old_al = al;
	old_cf = cf;
	cf = false;
	af = ctxt->eflags & X86_EFLAGS_AF;
	if ((al & 0x0f) > 9 || af) {
		al -= 6;
		cf = old_cf | (al >= 250);
		af = true;
	} else {
		af = false;
	}
	if (old_al > 0x99 || old_cf) {
		al -= 0x60;
		cf = true;
	}

3453
	ctxt->dst.val = al;
3454
	/* Set PF, ZF, SF */
3455 3456 3457
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
3458
	fastop(ctxt, em_or);
3459 3460 3461 3462 3463 3464 3465 3466
	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
	if (cf)
		ctxt->eflags |= X86_EFLAGS_CF;
	if (af)
		ctxt->eflags |= X86_EFLAGS_AF;
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488
static int em_aam(struct x86_emulate_ctxt *ctxt)
{
	u8 al, ah;

	if (ctxt->src.val == 0)
		return emulate_de(ctxt);

	al = ctxt->dst.val & 0xff;
	ah = al / ctxt->src.val;
	al %= ctxt->src.val;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);

	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);

	return X86EMUL_CONTINUE;
}

3489 3490 3491 3492 3493 3494 3495 3496 3497
static int em_aad(struct x86_emulate_ctxt *ctxt)
{
	u8 al = ctxt->dst.val & 0xff;
	u8 ah = (ctxt->dst.val >> 8) & 0xff;

	al = (al + (ah * ctxt->src.val)) & 0xff;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;

3498 3499 3500 3501 3502
	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);
3503 3504 3505 3506

	return X86EMUL_CONTINUE;
}

3507 3508
static int em_call(struct x86_emulate_ctxt *ctxt)
{
3509
	int rc;
3510 3511 3512
	long rel = ctxt->src.val;

	ctxt->src.val = (unsigned long)ctxt->_eip;
3513 3514 3515
	rc = jmp_rel(ctxt, rel);
	if (rc != X86EMUL_CONTINUE)
		return rc;
3516 3517 3518
	return em_push(ctxt);
}

3519 3520 3521 3522 3523
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
	u16 sel, old_cs;
	ulong old_eip;
	int rc;
3524 3525 3526
	struct desc_struct old_desc, new_desc;
	const struct x86_emulate_ops *ops = ctxt->ops;
	int cpl = ctxt->ops->cpl(ctxt);
3527
	enum x86emul_mode prev_mode = ctxt->mode;
3528

3529
	old_eip = ctxt->_eip;
3530
	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3531

3532
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3533 3534
	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_CALL_JMP, &new_desc);
3535
	if (rc != X86EMUL_CONTINUE)
3536
		return rc;
3537

3538
	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3539 3540
	if (rc != X86EMUL_CONTINUE)
		goto fail;
3541

3542
	ctxt->src.val = old_cs;
3543
	rc = em_push(ctxt);
3544
	if (rc != X86EMUL_CONTINUE)
3545
		goto fail;
3546

3547
	ctxt->src.val = old_eip;
3548 3549 3550
	rc = em_push(ctxt);
	/* If we failed, we tainted the memory, but the very least we should
	   restore cs */
3551 3552
	if (rc != X86EMUL_CONTINUE) {
		pr_warn_once("faulting far call emulation tainted memory\n");
3553
		goto fail;
3554
	}
3555 3556 3557
	return rc;
fail:
	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3558
	ctxt->mode = prev_mode;
3559 3560
	return rc;

3561 3562
}

3563 3564 3565
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
	int rc;
3566
	unsigned long eip;
3567

3568 3569 3570 3571
	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	rc = assign_eip_near(ctxt, eip);
3572 3573
	if (rc != X86EMUL_CONTINUE)
		return rc;
3574
	rsp_increment(ctxt, ctxt->src.val);
3575 3576 3577
	return X86EMUL_CONTINUE;
}

3578 3579 3580
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
	/* Write back the register source. */
3581 3582
	ctxt->src.val = ctxt->dst.val;
	write_register_operand(&ctxt->src);
3583 3584

	/* Write back the memory destination with implicit LOCK prefix. */
3585 3586
	ctxt->dst.val = ctxt->src.orig_val;
	ctxt->lock_prefix = 1;
3587 3588 3589
	return X86EMUL_CONTINUE;
}

3590 3591
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
3592
	ctxt->dst.val = ctxt->src2.val;
3593
	return fastop(ctxt, em_imul);
3594 3595
}

3596 3597
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
3598 3599
	ctxt->dst.type = OP_REG;
	ctxt->dst.bytes = ctxt->src.bytes;
3600
	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3601
	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3602 3603 3604 3605

	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3606 3607 3608 3609 3610
static int em_rdpid(struct x86_emulate_ctxt *ctxt)
{
	u64 tsc_aux = 0;

	if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3611
		return emulate_ud(ctxt);
P
Paolo Bonzini 已提交
3612 3613 3614 3615
	ctxt->dst.val = tsc_aux;
	return X86EMUL_CONTINUE;
}

3616 3617 3618 3619
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
	u64 tsc = 0;

3620
	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3621 3622
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3623 3624 3625
	return X86EMUL_CONTINUE;
}

3626 3627 3628 3629
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
{
	u64 pmc;

3630
	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3631
		return emulate_gp(ctxt, 0);
3632 3633
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3634 3635 3636
	return X86EMUL_CONTINUE;
}

3637 3638
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
3639
	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3640 3641 3642
	return X86EMUL_CONTINUE;
}

B
Borislav Petkov 已提交
3643 3644 3645 3646
static int em_movbe(struct x86_emulate_ctxt *ctxt)
{
	u16 tmp;

3647
	if (!ctxt->ops->guest_has_movbe(ctxt))
B
Borislav Petkov 已提交
3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670
		return emulate_ud(ctxt);

	switch (ctxt->op_bytes) {
	case 2:
		/*
		 * From MOVBE definition: "...When the operand size is 16 bits,
		 * the upper word of the destination register remains unchanged
		 * ..."
		 *
		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
		 * rules so we have to do the operation almost per hand.
		 */
		tmp = (u16)ctxt->src.val;
		ctxt->dst.val &= ~0xffffUL;
		ctxt->dst.val |= (unsigned long)swab16(tmp);
		break;
	case 4:
		ctxt->dst.val = swab32((u32)ctxt->src.val);
		break;
	case 8:
		ctxt->dst.val = swab64(ctxt->src.val);
		break;
	default:
3671
		BUG();
B
Borislav Petkov 已提交
3672 3673 3674 3675
	}
	return X86EMUL_CONTINUE;
}

3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

static int em_dr_write(struct x86_emulate_ctxt *ctxt)
{
	unsigned long val;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		val = ctxt->src.val & ~0ULL;
	else
		val = ctxt->src.val & ~0U;

	/* #UD condition is already handled. */
	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3704 3705
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
{
3706
	u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3707
	u64 msr_data;
3708
	int r;
3709

3710 3711
	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3712 3713 3714 3715 3716
	r = ctxt->ops->set_msr(ctxt, msr_index, msr_data);

	if (r == X86EMUL_IO_NEEDED)
		return r;

3717
	if (r > 0)
3718 3719
		return emulate_gp(ctxt, 0);

3720
	return r < 0 ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
3721 3722 3723 3724
}

static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
{
3725
	u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3726
	u64 msr_data;
3727 3728 3729 3730 3731 3732
	int r;

	r = ctxt->ops->get_msr(ctxt, msr_index, &msr_data);

	if (r == X86EMUL_IO_NEEDED)
		return r;
3733

3734
	if (r)
3735 3736
		return emulate_gp(ctxt, 0);

3737 3738
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3739 3740 3741
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3742
static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3743
{
P
Paolo Bonzini 已提交
3744 3745 3746 3747
	if (segment > VCPU_SREG_GS &&
	    (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
	    ctxt->ops->cpl(ctxt) > 0)
		return emulate_gp(ctxt, 0);
3748

P
Paolo Bonzini 已提交
3749
	ctxt->dst.val = get_segment_selector(ctxt, segment);
3750 3751
	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
		ctxt->dst.bytes = 2;
3752 3753 3754
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3755 3756 3757 3758 3759 3760 3761 3762
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->modrm_reg > VCPU_SREG_GS)
		return emulate_ud(ctxt);

	return em_store_sreg(ctxt, ctxt->modrm_reg);
}

3763 3764
static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
3765
	u16 sel = ctxt->src.val;
3766

3767
	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3768 3769
		return emulate_ud(ctxt);

3770
	if (ctxt->modrm_reg == VCPU_SREG_SS)
3771 3772 3773
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;

	/* Disable writeback. */
3774 3775
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3776 3777
}

P
Paolo Bonzini 已提交
3778 3779 3780 3781 3782
static int em_sldt(struct x86_emulate_ctxt *ctxt)
{
	return em_store_sreg(ctxt, VCPU_SREG_LDTR);
}

A
Avi Kivity 已提交
3783 3784 3785 3786 3787 3788 3789 3790 3791
static int em_lldt(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
}

P
Paolo Bonzini 已提交
3792 3793 3794 3795 3796
static int em_str(struct x86_emulate_ctxt *ctxt)
{
	return em_store_sreg(ctxt, VCPU_SREG_TR);
}

A
Avi Kivity 已提交
3797 3798 3799 3800 3801 3802 3803 3804 3805
static int em_ltr(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
}

3806 3807
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
3808 3809 3810
	int rc;
	ulong linear;

3811
	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3812
	if (rc == X86EMUL_CONTINUE)
3813
		ctxt->ops->invlpg(ctxt, linear);
3814
	/* Disable writeback. */
3815
	ctxt->dst.type = OP_NONE;
3816 3817 3818
	return X86EMUL_CONTINUE;
}

3819 3820 3821 3822 3823 3824 3825 3826 3827 3828
static int em_clts(struct x86_emulate_ctxt *ctxt)
{
	ulong cr0;

	cr0 = ctxt->ops->get_cr(ctxt, 0);
	cr0 &= ~X86_CR0_TS;
	ctxt->ops->set_cr(ctxt, 0, cr0);
	return X86EMUL_CONTINUE;
}

3829
static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3830
{
3831
	int rc = ctxt->ops->fix_hypercall(ctxt);
3832 3833 3834 3835 3836

	if (rc != X86EMUL_CONTINUE)
		return rc;

	/* Let the processor re-execute the fixed hypercall */
3837
	ctxt->_eip = ctxt->eip;
3838
	/* Disable writeback. */
3839
	ctxt->dst.type = OP_NONE;
3840 3841 3842
	return X86EMUL_CONTINUE;
}

3843 3844 3845 3846 3847 3848
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
				  void (*get)(struct x86_emulate_ctxt *ctxt,
					      struct desc_ptr *ptr))
{
	struct desc_ptr desc_ptr;

P
Paolo Bonzini 已提交
3849 3850 3851 3852
	if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
	    ctxt->ops->cpl(ctxt) > 0)
		return emulate_gp(ctxt, 0);

3853 3854 3855 3856 3857 3858 3859 3860 3861
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
	get(ctxt, &desc_ptr);
	if (ctxt->op_bytes == 2) {
		ctxt->op_bytes = 4;
		desc_ptr.address &= 0x00ffffff;
	}
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
3862 3863
	return segmented_write_std(ctxt, ctxt->dst.addr.mem,
				   &desc_ptr, 2 + ctxt->op_bytes);
3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875
}

static int em_sgdt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
}

static int em_sidt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}

3876
static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3877 3878 3879 3880
{
	struct desc_ptr desc_ptr;
	int rc;

3881 3882
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
3883
	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3884
			     &desc_ptr.size, &desc_ptr.address,
3885
			     ctxt->op_bytes);
3886 3887
	if (rc != X86EMUL_CONTINUE)
		return rc;
3888
	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3889
	    emul_is_noncanonical_address(desc_ptr.address, ctxt))
3890
		return emulate_gp(ctxt, 0);
3891 3892 3893 3894
	if (lgdt)
		ctxt->ops->set_gdt(ctxt, &desc_ptr);
	else
		ctxt->ops->set_idt(ctxt, &desc_ptr);
3895
	/* Disable writeback. */
3896
	ctxt->dst.type = OP_NONE;
3897 3898 3899
	return X86EMUL_CONTINUE;
}

3900 3901 3902 3903 3904
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
	return em_lgdt_lidt(ctxt, true);
}

3905 3906
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
3907
	return em_lgdt_lidt(ctxt, false);
3908 3909 3910 3911
}

static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
P
Paolo Bonzini 已提交
3912 3913 3914 3915
	if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
	    ctxt->ops->cpl(ctxt) > 0)
		return emulate_gp(ctxt, 0);

3916 3917
	if (ctxt->dst.type == OP_MEM)
		ctxt->dst.bytes = 2;
3918
	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3919 3920 3921 3922 3923 3924
	return X86EMUL_CONTINUE;
}

static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3925 3926
			  | (ctxt->src.val & 0x0f));
	ctxt->dst.type = OP_NONE;
3927 3928 3929
	return X86EMUL_CONTINUE;
}

3930 3931
static int em_loop(struct x86_emulate_ctxt *ctxt)
{
3932 3933
	int rc = X86EMUL_CONTINUE;

3934
	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3935
	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3936
	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3937
		rc = jmp_rel(ctxt, ctxt->src.val);
3938

3939
	return rc;
3940 3941 3942 3943
}

static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{
3944 3945
	int rc = X86EMUL_CONTINUE;

3946
	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3947
		rc = jmp_rel(ctxt, ctxt->src.val);
3948

3949
	return rc;
3950 3951
}

3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969
static int em_in(struct x86_emulate_ctxt *ctxt)
{
	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
			     &ctxt->dst.val))
		return X86EMUL_IO_NEEDED;

	return X86EMUL_CONTINUE;
}

static int em_out(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
				    &ctxt->src.val, 1);
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->eflags &= ~X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

static int em_sti(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
	ctxt->eflags |= X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3989 3990 3991
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;
K
Kyle Huey 已提交
3992 3993 3994 3995 3996 3997 3998
	u64 msr = 0;

	ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
	if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
	    ctxt->ops->cpl(ctxt)) {
		return emulate_gp(ctxt, 0);
	}
A
Avi Kivity 已提交
3999

4000 4001
	eax = reg_read(ctxt, VCPU_REGS_RAX);
	ecx = reg_read(ctxt, VCPU_REGS_RCX);
4002
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
4003 4004 4005 4006
	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
A
Avi Kivity 已提交
4007 4008 4009
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
4010 4011 4012 4013
static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
	u32 flags;

4014 4015
	flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
		X86_EFLAGS_SF;
P
Paolo Bonzini 已提交
4016 4017 4018 4019 4020 4021 4022
	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;

	ctxt->eflags &= ~0xffUL;
	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
4023 4024
static int em_lahf(struct x86_emulate_ctxt *ctxt)
{
4025 4026
	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
A
Avi Kivity 已提交
4027 4028 4029
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044
static int em_bswap(struct x86_emulate_ctxt *ctxt)
{
	switch (ctxt->op_bytes) {
#ifdef CONFIG_X86_64
	case 8:
		asm("bswap %0" : "+r"(ctxt->dst.val));
		break;
#endif
	default:
		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
		break;
	}
	return X86EMUL_CONTINUE;
}

4045 4046 4047 4048 4049 4050
static int em_clflush(struct x86_emulate_ctxt *ctxt)
{
	/* emulating clflush regardless of cpuid */
	return X86EMUL_CONTINUE;
}

4051 4052 4053 4054 4055 4056
static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
{
	/* emulating clflushopt regardless of cpuid */
	return X86EMUL_CONTINUE;
}

4057 4058 4059 4060 4061 4062
static int em_movsxd(struct x86_emulate_ctxt *ctxt)
{
	ctxt->dst.val = (s32) ctxt->src.val;
	return X86EMUL_CONTINUE;
}

4063 4064
static int check_fxsr(struct x86_emulate_ctxt *ctxt)
{
4065
	if (!ctxt->ops->guest_has_fxsr(ctxt))
4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080
		return emulate_ud(ctxt);

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	/*
	 * Don't emulate a case that should never be hit, instead of working
	 * around a lack of fxsave64/fxrstor64 on old compilers.
	 */
	if (ctxt->mode >= X86EMUL_MODE_PROT64)
		return X86EMUL_UNHANDLEABLE;

	return X86EMUL_CONTINUE;
}

4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099
/*
 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
 * and restore MXCSR.
 */
static size_t __fxstate_size(int nregs)
{
	return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
}

static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
{
	bool cr4_osfxsr;
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return __fxstate_size(16);

	cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
	return __fxstate_size(cr4_osfxsr ? 8 : 0);
}

4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126
/*
 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
 *  1) 16 bit mode
 *  2) 32 bit mode
 *     - like (1), but FIP and FDP (foo) are only 16 bit.  At least Intel CPUs
 *       preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
 *       save and restore
 *  3) 64-bit mode with REX.W prefix
 *     - like (2), but XMM 8-15 are being saved and restored
 *  4) 64-bit mode without REX.W prefix
 *     - like (3), but FIP and FDP are 64 bit
 *
 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
 * desired result.  (4) is not emulated.
 *
 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
 * and FPU DS) should match.
 */
static int em_fxsave(struct x86_emulate_ctxt *ctxt)
{
	struct fxregs_state fx_state;
	int rc;

	rc = check_fxsr(ctxt);
	if (rc != X86EMUL_CONTINUE)
		return rc;

4127 4128
	emulator_get_fpu();

4129 4130
	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));

4131 4132
	emulator_put_fpu();

4133 4134 4135
	if (rc != X86EMUL_CONTINUE)
		return rc;

4136 4137
	return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
		                   fxstate_size(ctxt));
4138 4139
}

4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159
/*
 * FXRSTOR might restore XMM registers not provided by the guest. Fill
 * in the host registers (via FXSAVE) instead, so they won't be modified.
 * (preemption has to stay disabled until FXRSTOR).
 *
 * Use noinline to keep the stack for other functions called by callers small.
 */
static noinline int fxregs_fixup(struct fxregs_state *fx_state,
				 const size_t used_size)
{
	struct fxregs_state fx_tmp;
	int rc;

	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
	memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
	       __fxstate_size(16) - used_size);

	return rc;
}

4160 4161 4162 4163
static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
{
	struct fxregs_state fx_state;
	int rc;
4164
	size_t size;
4165 4166 4167 4168 4169

	rc = check_fxsr(ctxt);
	if (rc != X86EMUL_CONTINUE)
		return rc;

4170 4171 4172 4173 4174
	size = fxstate_size(ctxt);
	rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
	if (rc != X86EMUL_CONTINUE)
		return rc;

4175 4176
	emulator_get_fpu();

4177
	if (size < __fxstate_size(16)) {
4178
		rc = fxregs_fixup(&fx_state, size);
4179 4180 4181
		if (rc != X86EMUL_CONTINUE)
			goto out;
	}
4182

4183 4184 4185 4186
	if (fx_state.mxcsr >> 16) {
		rc = emulate_gp(ctxt, 0);
		goto out;
	}
4187 4188 4189 4190

	if (rc == X86EMUL_CONTINUE)
		rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));

4191
out:
4192 4193
	emulator_put_fpu();

4194 4195 4196
	return rc;
}

4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210
static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ecx, edx;

	eax = reg_read(ctxt, VCPU_REGS_RAX);
	edx = reg_read(ctxt, VCPU_REGS_RDX);
	ecx = reg_read(ctxt, VCPU_REGS_RCX);

	if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222
static bool valid_cr(int nr)
{
	switch (nr) {
	case 0:
	case 2 ... 4:
	case 8:
		return true;
	default:
		return false;
	}
}

4223
static int check_cr_access(struct x86_emulate_ctxt *ctxt)
4224
{
4225
	if (!valid_cr(ctxt->modrm_reg))
4226 4227 4228 4229 4230
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

4231 4232 4233 4234
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
	unsigned long dr7;

4235
	ctxt->ops->get_dr(ctxt, 7, &dr7);
4236 4237 4238 4239 4240 4241 4242

	/* Check if DR7.Global_Enable is set */
	return dr7 & (1 << 13);
}

static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
4243
	int dr = ctxt->modrm_reg;
4244 4245 4246 4247 4248
	u64 cr4;

	if (dr > 7)
		return emulate_ud(ctxt);

4249
	cr4 = ctxt->ops->get_cr(ctxt, 4);
4250 4251 4252
	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
		return emulate_ud(ctxt);

4253 4254 4255 4256
	if (check_dr7_gd(ctxt)) {
		ulong dr6;

		ctxt->ops->get_dr(ctxt, 6, &dr6);
4257
		dr6 &= ~DR_TRAP_BITS;
4258
		dr6 |= DR6_BD | DR6_ACTIVE_LOW;
4259
		ctxt->ops->set_dr(ctxt, 6, dr6);
4260
		return emulate_db(ctxt);
4261
	}
4262 4263 4264 4265 4266 4267

	return X86EMUL_CONTINUE;
}

static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
4268 4269
	u64 new_val = ctxt->src.val64;
	int dr = ctxt->modrm_reg;
4270 4271 4272 4273 4274 4275 4276

	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
		return emulate_gp(ctxt, 0);

	return check_dr_read(ctxt);
}

4277 4278
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
4279
	u64 efer = 0;
4280

4281
	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4282 4283 4284 4285 4286 4287 4288 4289 4290

	if (!(efer & EFER_SVME))
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
4291
	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4292 4293

	/* Valid physical address? */
4294
	if (rax & 0xffff000000000000ULL)
4295 4296 4297 4298 4299
		return emulate_gp(ctxt, 0);

	return check_svme(ctxt);
}

4300 4301
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{
4302
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4303

4304
	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4305 4306 4307 4308 4309
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

4310 4311
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
4312
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4313
	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4314

4315 4316 4317 4318 4319 4320 4321
	/*
	 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
	 * in Ring3 when CR4.PCE=0.
	 */
	if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
		return X86EMUL_CONTINUE;

4322
	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4323
	    ctxt->ops->check_pmc(ctxt, rcx))
4324 4325 4326 4327 4328
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

4329 4330
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
4331 4332
	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4333 4334 4335 4336 4337 4338 4339
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
4340 4341
	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4342 4343 4344 4345 4346
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

4347
#define D(_y) { .flags = (_y) }
4348 4349 4350
#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
4351
#define N    D(NotImpl)
4352
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4353 4354
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4355
#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4356
#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4357
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4358
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4359
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4360
#define II(_f, _e, _i) \
4361
	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4362
#define IIP(_f, _e, _i, _p) \
4363 4364
	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
4365
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4366

4367
#define D2bv(_f)      D((_f) | ByteOp), D(_f)
4368
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4369
#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
4370
#define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
4371 4372
#define I2bvIP(_f, _e, _i, _p) \
	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4373

4374 4375 4376
#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4377

4378 4379
static const struct opcode group7_rm0[] = {
	N,
4380
	I(SrcNone | Priv | EmulateOnUD,	em_hypercall),
4381 4382 4383
	N, N, N, N, N, N,
};

4384
static const struct opcode group7_rm1[] = {
4385 4386
	DI(SrcNone | Priv, monitor),
	DI(SrcNone | Priv, mwait),
4387 4388 4389
	N, N, N, N, N, N,
};

4390 4391 4392 4393 4394 4395
static const struct opcode group7_rm2[] = {
	N,
	II(ImplicitOps | Priv,			em_xsetbv,	xsetbv),
	N, N, N, N, N, N,
};

4396
static const struct opcode group7_rm3[] = {
4397
	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
4398
	II(SrcNone  | Prot | EmulateOnUD,	em_hypercall,	vmmcall),
4399 4400 4401 4402 4403 4404
	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
4405
};
4406

4407
static const struct opcode group7_rm7[] = {
4408
	N,
4409
	DIP(SrcNone, rdtscp, check_rdtsc),
4410 4411
	N, N, N, N, N, N,
};
4412

4413
static const struct opcode group1[] = {
4414 4415 4416 4417 4418 4419 4420 4421
	F(Lock, em_add),
	F(Lock | PageTable, em_or),
	F(Lock, em_adc),
	F(Lock, em_sbb),
	F(Lock | PageTable, em_and),
	F(Lock, em_sub),
	F(Lock, em_xor),
	F(NoWrite, em_cmp),
4422 4423
};

4424
static const struct opcode group1A[] = {
4425
	I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4426 4427
};

4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438
static const struct opcode group2[] = {
	F(DstMem | ModRM, em_rol),
	F(DstMem | ModRM, em_ror),
	F(DstMem | ModRM, em_rcl),
	F(DstMem | ModRM, em_rcr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_shr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_sar),
};

4439
static const struct opcode group3[] = {
4440 4441
	F(DstMem | SrcImm | NoWrite, em_test),
	F(DstMem | SrcImm | NoWrite, em_test),
4442 4443
	F(DstMem | SrcNone | Lock, em_not),
	F(DstMem | SrcNone | Lock, em_neg),
4444 4445
	F(DstXacc | Src2Mem, em_mul_ex),
	F(DstXacc | Src2Mem, em_imul_ex),
4446 4447
	F(DstXacc | Src2Mem, em_div_ex),
	F(DstXacc | Src2Mem, em_idiv_ex),
4448 4449
};

4450
static const struct opcode group4[] = {
4451 4452
	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4453 4454 4455
	N, N, N, N, N, N,
};

4456
static const struct opcode group5[] = {
4457 4458
	F(DstMem | SrcNone | Lock,		em_inc),
	F(DstMem | SrcNone | Lock,		em_dec),
4459
	I(SrcMem | NearBranch,			em_call_near_abs),
4460
	I(SrcMemFAddr | ImplicitOps,		em_call_far),
4461
	I(SrcMem | NearBranch,			em_jmp_abs),
4462
	I(SrcMemFAddr | ImplicitOps,		em_jmp_far),
4463
	I(SrcMem | Stack | TwoMemOp,		em_push), D(Undefined),
4464 4465
};

4466
static const struct opcode group6[] = {
P
Paolo Bonzini 已提交
4467 4468
	II(Prot | DstMem,	   em_sldt, sldt),
	II(Prot | DstMem,	   em_str, str),
A
Avi Kivity 已提交
4469
	II(Prot | Priv | SrcMem16, em_lldt, lldt),
A
Avi Kivity 已提交
4470
	II(Prot | Priv | SrcMem16, em_ltr, ltr),
4471 4472 4473
	N, N, N, N,
};

4474
static const struct group_dual group7 = { {
4475 4476
	II(Mov | DstMem,			em_sgdt, sgdt),
	II(Mov | DstMem,			em_sidt, sidt),
4477 4478 4479 4480 4481
	II(SrcMem | Priv,			em_lgdt, lgdt),
	II(SrcMem | Priv,			em_lidt, lidt),
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
4482
}, {
4483
	EXT(0, group7_rm0),
4484
	EXT(0, group7_rm1),
4485 4486
	EXT(0, group7_rm2),
	EXT(0, group7_rm3),
4487 4488 4489
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	EXT(0, group7_rm7),
4490 4491
} };

4492
static const struct opcode group8[] = {
4493
	N, N, N, N,
4494 4495 4496 4497
	F(DstMem | SrcImmByte | NoWrite,		em_bt),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
	F(DstMem | SrcImmByte | Lock,			em_btr),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
4498 4499
};

P
Paolo Bonzini 已提交
4500 4501 4502 4503 4504
/*
 * The "memory" destination is actually always a register, since we come
 * from the register case of group9.
 */
static const struct gprefix pfx_0f_c7_7 = {
4505
	N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
P
Paolo Bonzini 已提交
4506 4507 4508
};


4509
static const struct group_dual group9 = { {
4510
	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4511
}, {
P
Paolo Bonzini 已提交
4512 4513
	N, N, N, N, N, N, N,
	GP(0, &pfx_0f_c7_7),
4514 4515
} };

4516
static const struct opcode group11[] = {
4517
	I(DstMem | SrcImm | Mov | PageTable, em_mov),
4518
	X7(D(Undefined)),
4519 4520
};

4521
static const struct gprefix pfx_0f_ae_7 = {
4522
	I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
4523 4524 4525
};

static const struct group_dual group15 = { {
4526 4527 4528
	I(ModRM | Aligned16, em_fxsave),
	I(ModRM | Aligned16, em_fxrstor),
	N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4529 4530 4531 4532
}, {
	N, N, N, N, N, N, N, N,
} };

4533
static const struct gprefix pfx_0f_6f_0f_7f = {
4534
	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4535 4536
};

4537 4538 4539 4540
static const struct instr_dual instr_dual_0f_2b = {
	I(0, em_mov), N
};

4541
static const struct gprefix pfx_0f_2b = {
4542
	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4543 4544
};

4545 4546 4547 4548
static const struct gprefix pfx_0f_10_0f_11 = {
	I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
};

4549
static const struct gprefix pfx_0f_28_0f_29 = {
4550
	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4551 4552
};

4553 4554 4555 4556
static const struct gprefix pfx_0f_e7 = {
	N, I(Sse, em_mov), N, N,
};

4557
static const struct escape escape_d9 = { {
4558
	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_db = { {
	N, N, N, N, N, N, N, N,
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_dd = { {
4600
	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

4620 4621 4622 4623
static const struct instr_dual instr_dual_0f_c3 = {
	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
};

4624 4625 4626 4627
static const struct mode_dual mode_dual_63 = {
	N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
};

4628
static const struct opcode opcode_table[256] = {
4629
	/* 0x00 - 0x07 */
4630
	F6ALU(Lock, em_add),
4631 4632
	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4633
	/* 0x08 - 0x0F */
4634
	F6ALU(Lock | PageTable, em_or),
4635 4636
	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
	N,
4637
	/* 0x10 - 0x17 */
4638
	F6ALU(Lock, em_adc),
4639 4640
	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4641
	/* 0x18 - 0x1F */
4642
	F6ALU(Lock, em_sbb),
4643 4644
	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4645
	/* 0x20 - 0x27 */
4646
	F6ALU(Lock | PageTable, em_and), N, N,
4647
	/* 0x28 - 0x2F */
4648
	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4649
	/* 0x30 - 0x37 */
4650
	F6ALU(Lock, em_xor), N, N,
4651
	/* 0x38 - 0x3F */
4652
	F6ALU(NoWrite, em_cmp), N, N,
4653
	/* 0x40 - 0x4F */
4654
	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4655
	/* 0x50 - 0x57 */
4656
	X8(I(SrcReg | Stack, em_push)),
4657
	/* 0x58 - 0x5F */
4658
	X8(I(DstReg | Stack, em_pop)),
4659
	/* 0x60 - 0x67 */
4660 4661
	I(ImplicitOps | Stack | No64, em_pusha),
	I(ImplicitOps | Stack | No64, em_popa),
4662
	N, MD(ModRM, &mode_dual_63),
4663 4664
	N, N, N, N,
	/* 0x68 - 0x6F */
4665 4666
	I(SrcImm | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4667 4668
	I(SrcImmByte | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4669
	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4670
	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4671
	/* 0x70 - 0x7F */
4672
	X16(D(SrcImmByte | NearBranch)),
4673
	/* 0x80 - 0x87 */
4674 4675 4676 4677
	G(ByteOp | DstMem | SrcImm, group1),
	G(DstMem | SrcImm, group1),
	G(ByteOp | DstMem | SrcImm | No64, group1),
	G(DstMem | SrcImmByte, group1),
4678
	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4679
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4680
	/* 0x88 - 0x8F */
4681
	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4682
	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4683
	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4684 4685 4686
	D(ModRM | SrcMem | NoAccess | DstReg),
	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
	G(0, group1A),
4687
	/* 0x90 - 0x97 */
4688
	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4689
	/* 0x98 - 0x9F */
4690
	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4691
	I(SrcImmFAddr | No64, em_call_far), N,
4692
	II(ImplicitOps | Stack, em_pushf, pushf),
P
Paolo Bonzini 已提交
4693 4694
	II(ImplicitOps | Stack, em_popf, popf),
	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4695
	/* 0xA0 - 0xA7 */
4696
	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4697
	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4698 4699
	I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
	F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4700
	/* 0xA8 - 0xAF */
4701
	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4702 4703
	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4704
	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4705
	/* 0xB0 - 0xB7 */
4706
	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4707
	/* 0xB8 - 0xBF */
4708
	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4709
	/* 0xC0 - 0xC7 */
4710
	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4711 4712
	I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
	I(ImplicitOps | NearBranch, em_ret),
4713 4714
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4715
	G(ByteOp, group11), G(0, group11),
4716
	/* 0xC8 - 0xCF */
A
Avi Kivity 已提交
4717
	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4718 4719
	I(ImplicitOps | SrcImmU16, em_ret_far_imm),
	I(ImplicitOps, em_ret_far),
4720
	D(ImplicitOps), DI(SrcImmByte, intn),
4721
	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4722
	/* 0xD0 - 0xD7 */
4723 4724
	G(Src2One | ByteOp, group2), G(Src2One, group2),
	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
P
Paolo Bonzini 已提交
4725
	I(DstAcc | SrcImmUByte | No64, em_aam),
P
Paolo Bonzini 已提交
4726 4727
	I(DstAcc | SrcImmUByte | No64, em_aad),
	F(DstAcc | ByteOp | No64, em_salc),
P
Paolo Bonzini 已提交
4728
	I(DstAcc | SrcXLat | ByteOp, em_mov),
4729
	/* 0xD8 - 0xDF */
4730
	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4731
	/* 0xE0 - 0xE7 */
4732 4733
	X3(I(SrcImmByte | NearBranch, em_loop)),
	I(SrcImmByte | NearBranch, em_jcxz),
4734 4735
	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4736
	/* 0xE8 - 0xEF */
4737 4738 4739
	I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
	I(SrcImmFAddr | No64, em_jmp_far),
	D(SrcImmByte | ImplicitOps | NearBranch),
4740 4741
	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4742
	/* 0xF0 - 0xF7 */
4743
	N, DI(ImplicitOps, icebp), N, N,
4744 4745
	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
	G(ByteOp, group3), G(0, group3),
4746
	/* 0xF8 - 0xFF */
4747 4748
	D(ImplicitOps), D(ImplicitOps),
	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4749 4750 4751
	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};

4752
static const struct opcode twobyte_table[256] = {
4753
	/* 0x00 - 0x0F */
4754
	G(0, group6), GD(0, &group7), N, N,
4755
	N, I(ImplicitOps | EmulateOnUD, em_syscall),
4756
	II(ImplicitOps | Priv, em_clts, clts), N,
4757
	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4758
	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4759
	/* 0x10 - 0x1F */
4760 4761 4762
	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
	N, N, N, N, N, N,
4763 4764 4765 4766 4767 4768
	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
	D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
	D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
4769
	/* 0x20 - 0x2F */
4770
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
4771 4772
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4773
						check_cr_access),
4774 4775
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
						check_dr_write),
4776
	N, N, N, N,
4777 4778
	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4779
	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4780
	N, N, N, N,
4781
	/* 0x30 - 0x3F */
4782
	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4783
	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4784
	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4785
	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4786 4787
	I(ImplicitOps | EmulateOnUD, em_sysenter),
	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4788
	N, N,
4789 4790
	N, N, N, N, N, N, N, N,
	/* 0x40 - 0x4F */
4791
	X16(D(DstReg | SrcMem | ModRM)),
4792 4793 4794
	/* 0x50 - 0x5F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x60 - 0x6F */
4795 4796 4797 4798
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4799
	/* 0x70 - 0x7F */
4800 4801 4802 4803
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4804
	/* 0x80 - 0x8F */
4805
	X16(D(SrcImm | NearBranch)),
4806
	/* 0x90 - 0x9F */
4807
	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4808
	/* 0xA0 - 0xA7 */
4809
	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4810 4811
	II(ImplicitOps, em_cpuid, cpuid),
	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4812 4813
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4814
	/* 0xA8 - 0xAF */
4815
	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4816
	II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4817
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4818 4819
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4820
	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4821
	/* 0xB0 - 0xB7 */
4822
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4823
	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4824
	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4825 4826
	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4827
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4828 4829
	/* 0xB8 - 0xBF */
	N, N,
4830
	G(BitOp, group8),
4831
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4832 4833
	I(DstReg | SrcMem | ModRM, em_bsf_c),
	I(DstReg | SrcMem | ModRM, em_bsr_c),
4834
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
A
Avi Kivity 已提交
4835
	/* 0xC0 - 0xC7 */
4836
	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4837
	N, ID(0, &instr_dual_0f_c3),
4838
	N, N, N, GD(0, &group9),
A
Avi Kivity 已提交
4839 4840
	/* 0xC8 - 0xCF */
	X8(I(DstReg, em_bswap)),
4841 4842 4843
	/* 0xD0 - 0xDF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xEF */
4844 4845
	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
	N, N, N, N, N, N, N, N,
4846 4847 4848 4849
	/* 0xF0 - 0xFF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};

4850 4851 4852 4853 4854 4855 4856 4857
static const struct instr_dual instr_dual_0f_38_f0 = {
	I(DstReg | SrcMem | Mov, em_movbe), N
};

static const struct instr_dual instr_dual_0f_38_f1 = {
	I(DstMem | SrcReg | Mov, em_movbe), N
};

4858
static const struct gprefix three_byte_0f_38_f0 = {
4859
	ID(0, &instr_dual_0f_38_f0), N, N, N
4860 4861 4862
};

static const struct gprefix three_byte_0f_38_f1 = {
4863
	ID(0, &instr_dual_0f_38_f1), N, N, N
4864 4865 4866 4867 4868 4869 4870 4871 4872
};

/*
 * Insns below are selected by the prefix which indexed by the third opcode
 * byte.
 */
static const struct opcode opcode_map_0f_38[256] = {
	/* 0x00 - 0x7f */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
B
Borislav Petkov 已提交
4873 4874 4875
	/* 0x80 - 0xef */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
	/* 0xf0 - 0xf1 */
4876 4877
	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
B
Borislav Petkov 已提交
4878 4879
	/* 0xf2 - 0xff */
	N, N, X4(N), X8(N)
4880 4881
};

4882 4883 4884 4885 4886
#undef D
#undef N
#undef G
#undef GD
#undef I
4887
#undef GP
4888
#undef EXT
4889
#undef MD
N
Nadav Amit 已提交
4890
#undef ID
4891

4892
#undef D2bv
4893
#undef D2bvIP
4894
#undef I2bv
4895
#undef I2bvIP
4896
#undef I6ALU
4897

4898
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4899 4900 4901
{
	unsigned size;

4902
	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914
	if (size == 8)
		size = 4;
	return size;
}

static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
		      unsigned size, bool sign_extension)
{
	int rc = X86EMUL_CONTINUE;

	op->type = OP_IMM;
	op->bytes = size;
4915
	op->addr.mem.ea = ctxt->_eip;
4916 4917 4918
	/* NB. Immediates are sign-extended as necessary. */
	switch (op->bytes) {
	case 1:
4919
		op->val = insn_fetch(s8, ctxt);
4920 4921
		break;
	case 2:
4922
		op->val = insn_fetch(s16, ctxt);
4923 4924
		break;
	case 4:
4925
		op->val = insn_fetch(s32, ctxt);
4926
		break;
4927 4928 4929
	case 8:
		op->val = insn_fetch(s64, ctxt);
		break;
4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947
	}
	if (!sign_extension) {
		switch (op->bytes) {
		case 1:
			op->val &= 0xff;
			break;
		case 2:
			op->val &= 0xffff;
			break;
		case 4:
			op->val &= 0xffffffff;
			break;
		}
	}
done:
	return rc;
}

4948 4949 4950 4951 4952 4953 4954
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
			  unsigned d)
{
	int rc = X86EMUL_CONTINUE;

	switch (d) {
	case OpReg:
4955
		decode_register_operand(ctxt, op);
4956 4957
		break;
	case OpImmUByte:
4958
		rc = decode_imm(ctxt, op, 1, false);
4959 4960
		break;
	case OpMem:
4961
		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4962 4963 4964
	mem_common:
		*op = ctxt->memop;
		ctxt->memopp = op;
4965
		if (ctxt->d & BitOp)
4966 4967 4968
			fetch_bit_operand(ctxt);
		op->orig_val = op->val;
		break;
4969
	case OpMem64:
4970
		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4971
		goto mem_common;
4972 4973 4974
	case OpAcc:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4975
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4976 4977 4978
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996
	case OpAccLo:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
	case OpAccHi:
		if (ctxt->d & ByteOp) {
			op->type = OP_NONE;
			break;
		}
		op->type = OP_REG;
		op->bytes = ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
4997 4998 4999 5000
	case OpDI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
5001
			register_address(ctxt, VCPU_REGS_RDI);
5002 5003
		op->addr.mem.seg = VCPU_SREG_ES;
		op->val = 0;
5004
		op->count = 1;
5005 5006 5007 5008
		break;
	case OpDX:
		op->type = OP_REG;
		op->bytes = 2;
5009
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5010 5011
		fetch_register_operand(op);
		break;
5012
	case OpCL:
5013
		op->type = OP_IMM;
5014
		op->bytes = 1;
5015
		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
5016 5017 5018 5019 5020
		break;
	case OpImmByte:
		rc = decode_imm(ctxt, op, 1, true);
		break;
	case OpOne:
5021
		op->type = OP_IMM;
5022 5023 5024 5025 5026 5027
		op->bytes = 1;
		op->val = 1;
		break;
	case OpImm:
		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
		break;
5028 5029 5030
	case OpImm64:
		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
		break;
5031 5032
	case OpMem8:
		ctxt->memop.bytes = 1;
5033
		if (ctxt->memop.type == OP_REG) {
5034 5035
			ctxt->memop.addr.reg = decode_register(ctxt,
					ctxt->modrm_rm, true);
5036 5037
			fetch_register_operand(&ctxt->memop);
		}
5038
		goto mem_common;
5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054
	case OpMem16:
		ctxt->memop.bytes = 2;
		goto mem_common;
	case OpMem32:
		ctxt->memop.bytes = 4;
		goto mem_common;
	case OpImmU16:
		rc = decode_imm(ctxt, op, 2, false);
		break;
	case OpImmU:
		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
		break;
	case OpSI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
5055
			register_address(ctxt, VCPU_REGS_RSI);
B
Bandan Das 已提交
5056
		op->addr.mem.seg = ctxt->seg_override;
5057
		op->val = 0;
5058
		op->count = 1;
5059
		break;
P
Paolo Bonzini 已提交
5060 5061 5062 5063
	case OpXLat:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
5064
			address_mask(ctxt,
P
Paolo Bonzini 已提交
5065 5066
				reg_read(ctxt, VCPU_REGS_RBX) +
				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
B
Bandan Das 已提交
5067
		op->addr.mem.seg = ctxt->seg_override;
P
Paolo Bonzini 已提交
5068 5069
		op->val = 0;
		break;
5070 5071 5072 5073 5074 5075 5076 5077 5078
	case OpImmFAddr:
		op->type = OP_IMM;
		op->addr.mem.ea = ctxt->_eip;
		op->bytes = ctxt->op_bytes + 2;
		insn_fetch_arr(op->valptr, op->bytes, ctxt);
		break;
	case OpMemFAddr:
		ctxt->memop.bytes = ctxt->op_bytes + 2;
		goto mem_common;
5079
	case OpES:
5080
		op->type = OP_IMM;
5081 5082 5083
		op->val = VCPU_SREG_ES;
		break;
	case OpCS:
5084
		op->type = OP_IMM;
5085 5086 5087
		op->val = VCPU_SREG_CS;
		break;
	case OpSS:
5088
		op->type = OP_IMM;
5089 5090 5091
		op->val = VCPU_SREG_SS;
		break;
	case OpDS:
5092
		op->type = OP_IMM;
5093 5094 5095
		op->val = VCPU_SREG_DS;
		break;
	case OpFS:
5096
		op->type = OP_IMM;
5097 5098 5099
		op->val = VCPU_SREG_FS;
		break;
	case OpGS:
5100
		op->type = OP_IMM;
5101 5102
		op->val = VCPU_SREG_GS;
		break;
5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113
	case OpImplicit:
		/* Special instructions do their own operand decoding. */
	default:
		op->type = OP_NONE; /* Disable writeback. */
		break;
	}

done:
	return rc;
}

W
Wanpeng Li 已提交
5114
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
5115 5116 5117
{
	int rc = X86EMUL_CONTINUE;
	int mode = ctxt->mode;
5118
	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5119
	bool op_prefix = false;
B
Bandan Das 已提交
5120
	bool has_seg_override = false;
5121
	struct opcode opcode;
5122 5123
	u16 dummy;
	struct desc_struct desc;
5124

5125 5126
	ctxt->memop.type = OP_NONE;
	ctxt->memopp = NULL;
5127
	ctxt->_eip = ctxt->eip;
5128 5129
	ctxt->fetch.ptr = ctxt->fetch.data;
	ctxt->fetch.end = ctxt->fetch.data + insn_len;
B
Borislav Petkov 已提交
5130
	ctxt->opcode_len = 1;
5131
	ctxt->intercept = x86_intercept_none;
5132
	if (insn_len > 0)
5133
		memcpy(ctxt->fetch.data, insn, insn_len);
5134
	else {
5135
		rc = __do_insn_fetch_bytes(ctxt, 1);
5136
		if (rc != X86EMUL_CONTINUE)
5137
			goto done;
5138
	}
5139 5140 5141 5142

	switch (mode) {
	case X86EMUL_MODE_REAL:
	case X86EMUL_MODE_VM86:
5143 5144 5145 5146 5147
		def_op_bytes = def_ad_bytes = 2;
		ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
		if (desc.d)
			def_op_bytes = def_ad_bytes = 4;
		break;
5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160
	case X86EMUL_MODE_PROT16:
		def_op_bytes = def_ad_bytes = 2;
		break;
	case X86EMUL_MODE_PROT32:
		def_op_bytes = def_ad_bytes = 4;
		break;
#ifdef CONFIG_X86_64
	case X86EMUL_MODE_PROT64:
		def_op_bytes = 4;
		def_ad_bytes = 8;
		break;
#endif
	default:
5161
		return EMULATION_FAILED;
5162 5163
	}

5164 5165
	ctxt->op_bytes = def_op_bytes;
	ctxt->ad_bytes = def_ad_bytes;
5166 5167 5168

	/* Legacy prefixes. */
	for (;;) {
5169
		switch (ctxt->b = insn_fetch(u8, ctxt)) {
5170
		case 0x66:	/* operand-size override */
5171
			op_prefix = true;
5172
			/* switch between 2/4 bytes */
5173
			ctxt->op_bytes = def_op_bytes ^ 6;
5174 5175 5176 5177
			break;
		case 0x67:	/* address-size override */
			if (mode == X86EMUL_MODE_PROT64)
				/* switch between 4/8 bytes */
5178
				ctxt->ad_bytes = def_ad_bytes ^ 12;
5179 5180
			else
				/* switch between 2/4 bytes */
5181
				ctxt->ad_bytes = def_ad_bytes ^ 6;
5182 5183
			break;
		case 0x26:	/* ES override */
5184 5185 5186
			has_seg_override = true;
			ctxt->seg_override = VCPU_SREG_ES;
			break;
5187
		case 0x2e:	/* CS override */
5188 5189 5190
			has_seg_override = true;
			ctxt->seg_override = VCPU_SREG_CS;
			break;
5191
		case 0x36:	/* SS override */
5192 5193 5194
			has_seg_override = true;
			ctxt->seg_override = VCPU_SREG_SS;
			break;
5195
		case 0x3e:	/* DS override */
B
Bandan Das 已提交
5196
			has_seg_override = true;
5197
			ctxt->seg_override = VCPU_SREG_DS;
5198 5199
			break;
		case 0x64:	/* FS override */
5200 5201 5202
			has_seg_override = true;
			ctxt->seg_override = VCPU_SREG_FS;
			break;
5203
		case 0x65:	/* GS override */
B
Bandan Das 已提交
5204
			has_seg_override = true;
5205
			ctxt->seg_override = VCPU_SREG_GS;
5206 5207 5208 5209
			break;
		case 0x40 ... 0x4f: /* REX */
			if (mode != X86EMUL_MODE_PROT64)
				goto done_prefixes;
5210
			ctxt->rex_prefix = ctxt->b;
5211 5212
			continue;
		case 0xf0:	/* LOCK */
5213
			ctxt->lock_prefix = 1;
5214 5215 5216
			break;
		case 0xf2:	/* REPNE/REPNZ */
		case 0xf3:	/* REP/REPE/REPZ */
5217
			ctxt->rep_prefix = ctxt->b;
5218 5219 5220 5221 5222 5223 5224
			break;
		default:
			goto done_prefixes;
		}

		/* Any legacy prefix after a REX prefix nullifies its effect. */

5225
		ctxt->rex_prefix = 0;
5226 5227 5228 5229 5230
	}

done_prefixes:

	/* REX prefix. */
5231 5232
	if (ctxt->rex_prefix & 8)
		ctxt->op_bytes = 8;	/* REX.W */
5233 5234

	/* Opcode byte(s). */
5235
	opcode = opcode_table[ctxt->b];
5236
	/* Two-byte opcode? */
5237
	if (ctxt->b == 0x0f) {
B
Borislav Petkov 已提交
5238
		ctxt->opcode_len = 2;
5239
		ctxt->b = insn_fetch(u8, ctxt);
5240
		opcode = twobyte_table[ctxt->b];
5241 5242 5243 5244 5245 5246 5247

		/* 0F_38 opcode map */
		if (ctxt->b == 0x38) {
			ctxt->opcode_len = 3;
			ctxt->b = insn_fetch(u8, ctxt);
			opcode = opcode_map_0f_38[ctxt->b];
		}
5248
	}
5249
	ctxt->d = opcode.flags;
5250

5251 5252 5253
	if (ctxt->d & ModRM)
		ctxt->modrm = insn_fetch(u8, ctxt);

5254 5255
	/* vex-prefix instructions are not implemented */
	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5256
	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5257 5258 5259
		ctxt->d = NotImpl;
	}

5260 5261
	while (ctxt->d & GroupMask) {
		switch (ctxt->d & GroupMask) {
5262
		case Group:
5263
			goffset = (ctxt->modrm >> 3) & 7;
5264 5265 5266
			opcode = opcode.u.group[goffset];
			break;
		case GroupDual:
5267 5268
			goffset = (ctxt->modrm >> 3) & 7;
			if ((ctxt->modrm >> 6) == 3)
5269 5270 5271 5272 5273
				opcode = opcode.u.gdual->mod3[goffset];
			else
				opcode = opcode.u.gdual->mod012[goffset];
			break;
		case RMExt:
5274
			goffset = ctxt->modrm & 7;
5275
			opcode = opcode.u.group[goffset];
5276 5277
			break;
		case Prefix:
5278
			if (ctxt->rep_prefix && op_prefix)
5279
				return EMULATION_FAILED;
5280
			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5281 5282 5283 5284 5285 5286 5287
			switch (simd_prefix) {
			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
			}
			break;
5288
		case Escape:
5289 5290 5291 5292 5293 5294 5295
			if (ctxt->modrm > 0xbf) {
				size_t size = ARRAY_SIZE(opcode.u.esc->high);
				u32 index = array_index_nospec(
					ctxt->modrm - 0xc0, size);

				opcode = opcode.u.esc->high[index];
			} else {
5296
				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5297
			}
5298
			break;
5299 5300 5301 5302 5303 5304
		case InstrDual:
			if ((ctxt->modrm >> 6) == 3)
				opcode = opcode.u.idual->mod3;
			else
				opcode = opcode.u.idual->mod012;
			break;
5305 5306 5307 5308 5309 5310
		case ModeDual:
			if (ctxt->mode == X86EMUL_MODE_PROT64)
				opcode = opcode.u.mdual->mode64;
			else
				opcode = opcode.u.mdual->mode32;
			break;
5311
		default:
5312
			return EMULATION_FAILED;
5313
		}
5314

5315
		ctxt->d &= ~(u64)GroupMask;
5316
		ctxt->d |= opcode.flags;
5317 5318
	}

5319 5320 5321 5322
	/* Unrecognised? */
	if (ctxt->d == 0)
		return EMULATION_FAILED;

5323
	ctxt->execute = opcode.u.execute;
5324

W
Wanpeng Li 已提交
5325 5326
	if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
	    likely(!(ctxt->d & EmulateOnUD)))
5327 5328
		return EMULATION_FAILED;

5329
	if (unlikely(ctxt->d &
5330 5331
	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
	     No16))) {
5332 5333 5334 5335 5336 5337
		/*
		 * These are copied unconditionally here, and checked unconditionally
		 * in x86_emulate_insn.
		 */
		ctxt->check_perm = opcode.check_perm;
		ctxt->intercept = opcode.intercept;
5338

5339 5340
		if (ctxt->d & NotImpl)
			return EMULATION_FAILED;
5341

5342 5343 5344 5345 5346 5347
		if (mode == X86EMUL_MODE_PROT64) {
			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
				ctxt->op_bytes = 8;
			else if (ctxt->d & NearBranch)
				ctxt->op_bytes = 8;
		}
5348

5349 5350 5351 5352 5353 5354 5355
		if (ctxt->d & Op3264) {
			if (mode == X86EMUL_MODE_PROT64)
				ctxt->op_bytes = 8;
			else
				ctxt->op_bytes = 4;
		}

5356 5357 5358
		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
			ctxt->op_bytes = 4;

5359 5360 5361 5362 5363
		if (ctxt->d & Sse)
			ctxt->op_bytes = 16;
		else if (ctxt->d & Mmx)
			ctxt->op_bytes = 8;
	}
A
Avi Kivity 已提交
5364

5365
	/* ModRM and SIB bytes. */
5366
	if (ctxt->d & ModRM) {
5367
		rc = decode_modrm(ctxt, &ctxt->memop);
B
Bandan Das 已提交
5368 5369 5370 5371
		if (!has_seg_override) {
			has_seg_override = true;
			ctxt->seg_override = ctxt->modrm_seg;
		}
5372
	} else if (ctxt->d & MemAbs)
5373
		rc = decode_abs(ctxt, &ctxt->memop);
5374 5375 5376
	if (rc != X86EMUL_CONTINUE)
		goto done;

B
Bandan Das 已提交
5377 5378
	if (!has_seg_override)
		ctxt->seg_override = VCPU_SREG_DS;
5379

B
Bandan Das 已提交
5380
	ctxt->memop.addr.mem.seg = ctxt->seg_override;
5381 5382 5383 5384 5385

	/*
	 * Decode and fetch the source operand: register, memory
	 * or immediate.
	 */
5386
	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5387 5388 5389
	if (rc != X86EMUL_CONTINUE)
		goto done;

5390 5391 5392 5393
	/*
	 * Decode and fetch the second source operand: register, memory
	 * or immediate.
	 */
5394
	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5395 5396 5397
	if (rc != X86EMUL_CONTINUE)
		goto done;

5398
	/* Decode and fetch the destination operand: register or memory. */
5399
	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5400

5401
	if (ctxt->rip_relative && likely(ctxt->memopp))
5402 5403
		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
					ctxt->memopp->addr.mem.ea + ctxt->_eip);
5404

5405
done:
5406 5407
	if (rc == X86EMUL_PROPAGATE_FAULT)
		ctxt->have_exception = true;
5408
	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5409 5410
}

5411 5412 5413 5414 5415
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
{
	return ctxt->d & PageTable;
}

5416 5417 5418 5419 5420 5421 5422 5423 5424
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
	/* The second termination condition only applies for REPE
	 * and REPNE. Test if the repeat string operation prefix is
	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
	 * corresponding termination condition according to:
	 * 	- if REPE/REPZ and ZF = 0 then done
	 * 	- if REPNE/REPNZ and ZF = 1 then done
	 */
5425 5426 5427
	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
5428
		 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5429
		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
5430
		    ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5431 5432 5433 5434 5435
		return true;

	return false;
}

A
Avi Kivity 已提交
5436 5437
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
R
Radim Krčmář 已提交
5438
	int rc;
A
Avi Kivity 已提交
5439

5440
	emulator_get_fpu();
R
Radim Krčmář 已提交
5441
	rc = asm_safe("fwait");
5442
	emulator_put_fpu();
A
Avi Kivity 已提交
5443

R
Radim Krčmář 已提交
5444
	if (unlikely(rc != X86EMUL_CONTINUE))
A
Avi Kivity 已提交
5445 5446 5447 5448 5449
		return emulate_exception(ctxt, MF_VECTOR, 0, false);

	return X86EMUL_CONTINUE;
}

5450
static void fetch_possible_mmx_operand(struct operand *op)
A
Avi Kivity 已提交
5451 5452
{
	if (op->type == OP_MM)
5453
		read_mmx_reg(&op->mm_val, op->addr.mm);
A
Avi Kivity 已提交
5454 5455
}

5456
static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
5457 5458
{
	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5459

5460 5461
	if (!(ctxt->d & ByteOp))
		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5462

5463
	asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5464
	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5465
	      [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5466
	    : "c"(ctxt->src2.val));
5467

5468
	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5469 5470
	if (!fop) /* exception is returned in fop variable */
		return emulate_de(ctxt);
5471 5472
	return X86EMUL_CONTINUE;
}
5473

5474 5475
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
B
Bandan Das 已提交
5476 5477
	memset(&ctxt->rip_relative, 0,
	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5478 5479 5480 5481 5482 5483

	ctxt->io_read.pos = 0;
	ctxt->io_read.end = 0;
	ctxt->mem_read.end = 0;
}

5484
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5485
{
5486
	const struct x86_emulate_ops *ops = ctxt->ops;
5487
	int rc = X86EMUL_CONTINUE;
5488
	int saved_dst_type = ctxt->dst.type;
5489
	unsigned emul_flags;
5490

5491
	ctxt->mem_read.pos = 0;
5492

5493 5494
	/* LOCK prefix is allowed only with some instructions */
	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5495
		rc = emulate_ud(ctxt);
5496 5497 5498
		goto done;
	}

5499
	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5500
		rc = emulate_ud(ctxt);
5501 5502 5503
		goto done;
	}

5504
	emul_flags = ctxt->ops->get_hflags(ctxt);
5505 5506 5507 5508 5509 5510 5511
	if (unlikely(ctxt->d &
		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
				(ctxt->d & Undefined)) {
			rc = emulate_ud(ctxt);
			goto done;
		}
A
Avi Kivity 已提交
5512

5513 5514 5515
		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
			rc = emulate_ud(ctxt);
A
Avi Kivity 已提交
5516
			goto done;
5517
		}
A
Avi Kivity 已提交
5518

5519 5520
		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
			rc = emulate_nm(ctxt);
5521
			goto done;
5522
		}
5523

5524 5525 5526 5527 5528 5529 5530 5531
		if (ctxt->d & Mmx) {
			rc = flush_pending_x87_faults(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			/*
			 * Now that we know the fpu is exception safe, we can fetch
			 * operands from it.
			 */
5532 5533
			fetch_possible_mmx_operand(&ctxt->src);
			fetch_possible_mmx_operand(&ctxt->src2);
5534
			if (!(ctxt->d & Mov))
5535
				fetch_possible_mmx_operand(&ctxt->dst);
5536
		}
5537

5538
		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5539 5540 5541 5542 5543
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_PRE_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}
5544

5545 5546 5547 5548 5549 5550
		/* Instruction can only be executed in protected mode */
		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
			rc = emulate_ud(ctxt);
			goto done;
		}

5551 5552
		/* Privileged instruction can be executed only in CPL=0 */
		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5553 5554 5555 5556
			if (ctxt->d & PrivUD)
				rc = emulate_ud(ctxt);
			else
				rc = emulate_gp(ctxt, 0);
5557
			goto done;
5558
		}
5559

5560
		/* Do instruction specific permission checks */
5561
		if (ctxt->d & CheckPerm) {
5562 5563 5564 5565 5566
			rc = ctxt->check_perm(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

5567
		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5568 5569 5570 5571 5572 5573 5574 5575 5576
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_POST_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

		if (ctxt->rep_prefix && (ctxt->d & String)) {
			/* All REP prefixes have the same first termination condition */
			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5577
				string_registers_quirk(ctxt);
5578
				ctxt->eip = ctxt->_eip;
5579
				ctxt->eflags &= ~X86_EFLAGS_RF;
5580 5581
				goto done;
			}
5582 5583 5584
		}
	}

5585 5586 5587
	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
		rc = segmented_read(ctxt, ctxt->src.addr.mem,
				    ctxt->src.valptr, ctxt->src.bytes);
5588
		if (rc != X86EMUL_CONTINUE)
5589
			goto done;
5590
		ctxt->src.orig_val64 = ctxt->src.val64;
5591 5592
	}

5593 5594 5595
	if (ctxt->src2.type == OP_MEM) {
		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
				    &ctxt->src2.val, ctxt->src2.bytes);
5596 5597 5598 5599
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

5600
	if ((ctxt->d & DstMask) == ImplicitOps)
5601 5602 5603
		goto special_insn;


5604
	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5605
		/* optimisation - avoid slow emulated read if Mov */
5606 5607
		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
				   &ctxt->dst.val, ctxt->dst.bytes);
5608
		if (rc != X86EMUL_CONTINUE) {
5609 5610
			if (!(ctxt->d & NoWrite) &&
			    rc == X86EMUL_PROPAGATE_FAULT &&
5611 5612
			    ctxt->exception.vector == PF_VECTOR)
				ctxt->exception.error_code |= PFERR_WRITE_MASK;
5613
			goto done;
5614
		}
5615
	}
5616 5617
	/* Copy full 64-bit value for CMPXCHG8B.  */
	ctxt->dst.orig_val64 = ctxt->dst.val64;
5618

5619 5620
special_insn:

5621
	if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5622
		rc = emulator_check_intercept(ctxt, ctxt->intercept,
5623
					      X86_ICPT_POST_MEMACCESS);
5624 5625 5626 5627
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

5628
	if (ctxt->rep_prefix && (ctxt->d & String))
5629
		ctxt->eflags |= X86_EFLAGS_RF;
5630
	else
5631
		ctxt->eflags &= ~X86_EFLAGS_RF;
5632

5633
	if (ctxt->execute) {
5634
		if (ctxt->d & Fastop)
5635
			rc = fastop(ctxt, ctxt->fop);
5636
		else
5637
			rc = ctxt->execute(ctxt);
5638 5639 5640 5641 5642
		if (rc != X86EMUL_CONTINUE)
			goto done;
		goto writeback;
	}

B
Borislav Petkov 已提交
5643
	if (ctxt->opcode_len == 2)
A
Avi Kivity 已提交
5644
		goto twobyte_insn;
5645 5646
	else if (ctxt->opcode_len == 3)
		goto threebyte_insn;
A
Avi Kivity 已提交
5647

5648
	switch (ctxt->b) {
5649
	case 0x70 ... 0x7f: /* jcc (short) */
5650
		if (test_cc(ctxt->b, ctxt->eflags))
5651
			rc = jmp_rel(ctxt, ctxt->src.val);
5652
		break;
N
Nitin A Kamble 已提交
5653
	case 0x8d: /* lea r16/r32, m */
5654
		ctxt->dst.val = ctxt->src.addr.mem.ea;
N
Nitin A Kamble 已提交
5655
		break;
5656
	case 0x90 ... 0x97: /* nop / xchg reg, rax */
5657
		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5658 5659 5660
			ctxt->dst.type = OP_NONE;
		else
			rc = em_xchg(ctxt);
5661
		break;
5662
	case 0x98: /* cbw/cwde/cdqe */
5663 5664 5665 5666
		switch (ctxt->op_bytes) {
		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5667 5668
		}
		break;
5669
	case 0xcc:		/* int3 */
5670 5671
		rc = emulate_int(ctxt, 3);
		break;
5672
	case 0xcd:		/* int n */
5673
		rc = emulate_int(ctxt, ctxt->src.val);
5674 5675
		break;
	case 0xce:		/* into */
5676
		if (ctxt->eflags & X86_EFLAGS_OF)
5677
			rc = emulate_int(ctxt, 4);
5678
		break;
5679
	case 0xe9: /* jmp rel */
5680
	case 0xeb: /* jmp rel short */
5681
		rc = jmp_rel(ctxt, ctxt->src.val);
5682
		ctxt->dst.type = OP_NONE; /* Disable writeback. */
5683
		break;
5684
	case 0xf4:              /* hlt */
5685
		ctxt->ops->halt(ctxt);
5686
		break;
5687 5688
	case 0xf5:	/* cmc */
		/* complement carry flag from eflags reg */
5689
		ctxt->eflags ^= X86_EFLAGS_CF;
5690 5691
		break;
	case 0xf8: /* clc */
5692
		ctxt->eflags &= ~X86_EFLAGS_CF;
5693
		break;
5694
	case 0xf9: /* stc */
5695
		ctxt->eflags |= X86_EFLAGS_CF;
5696
		break;
5697
	case 0xfc: /* cld */
5698
		ctxt->eflags &= ~X86_EFLAGS_DF;
5699 5700
		break;
	case 0xfd: /* std */
5701
		ctxt->eflags |= X86_EFLAGS_DF;
5702
		break;
5703 5704
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
5705
	}
5706

5707 5708 5709
	if (rc != X86EMUL_CONTINUE)
		goto done;

5710
writeback:
5711 5712 5713 5714 5715 5716
	if (ctxt->d & SrcWrite) {
		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
		rc = writeback(ctxt, &ctxt->src);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
5717 5718 5719 5720 5721
	if (!(ctxt->d & NoWrite)) {
		rc = writeback(ctxt, &ctxt->dst);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
5722

5723 5724 5725 5726
	/*
	 * restore dst type in case the decoding will be reused
	 * (happens for string instruction )
	 */
5727
	ctxt->dst.type = saved_dst_type;
5728

5729
	if ((ctxt->d & SrcMask) == SrcSI)
5730
		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5731

5732
	if ((ctxt->d & DstMask) == DstDI)
5733
		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5734

5735
	if (ctxt->rep_prefix && (ctxt->d & String)) {
5736
		unsigned int count;
5737
		struct read_cache *r = &ctxt->io_read;
5738 5739 5740 5741
		if ((ctxt->d & SrcMask) == SrcSI)
			count = ctxt->src.count;
		else
			count = ctxt->dst.count;
5742
		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5743

5744 5745 5746 5747 5748
		if (!string_insn_completed(ctxt)) {
			/*
			 * Re-enter guest when pio read ahead buffer is empty
			 * or, if it is not used, after each 1024 iteration.
			 */
5749
			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5750 5751 5752 5753 5754 5755
			    (r->end == 0 || r->end != r->pos)) {
				/*
				 * Reset read cache. Usually happens before
				 * decode, but since instruction is restarted
				 * we have to do it here.
				 */
5756
				ctxt->mem_read.end = 0;
5757
				writeback_registers(ctxt);
5758 5759 5760
				return EMULATION_RESTART;
			}
			goto done; /* skip rip writeback */
5761
		}
5762
		ctxt->eflags &= ~X86_EFLAGS_RF;
5763
	}
5764

5765
	ctxt->eip = ctxt->_eip;
5766 5767
	if (ctxt->mode != X86EMUL_MODE_PROT64)
		ctxt->eip = (u32)ctxt->_eip;
5768 5769

done:
5770 5771
	if (rc == X86EMUL_PROPAGATE_FAULT) {
		WARN_ON(ctxt->exception.vector > 0x1f);
5772
		ctxt->have_exception = true;
5773
	}
5774 5775 5776
	if (rc == X86EMUL_INTERCEPTED)
		return EMULATION_INTERCEPTED;

5777 5778 5779
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);

5780
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
A
Avi Kivity 已提交
5781 5782

twobyte_insn:
5783
	switch (ctxt->b) {
5784
	case 0x09:		/* wbinvd */
5785
		(ctxt->ops->wbinvd)(ctxt);
5786 5787
		break;
	case 0x08:		/* invd */
5788 5789
	case 0x0d:		/* GrpP (prefetch) */
	case 0x18:		/* Grp16 (prefetch/nop) */
P
Paolo Bonzini 已提交
5790
	case 0x1f:		/* nop */
5791 5792
		break;
	case 0x20: /* mov cr, reg */
5793
		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5794
		break;
A
Avi Kivity 已提交
5795
	case 0x21: /* mov from dr to reg */
5796
		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
A
Avi Kivity 已提交
5797 5798
		break;
	case 0x40 ... 0x4f:	/* cmov */
5799 5800
		if (test_cc(ctxt->b, ctxt->eflags))
			ctxt->dst.val = ctxt->src.val;
5801
		else if (ctxt->op_bytes != 4)
5802
			ctxt->dst.type = OP_NONE; /* no writeback */
A
Avi Kivity 已提交
5803
		break;
5804
	case 0x80 ... 0x8f: /* jnz rel, etc*/
5805
		if (test_cc(ctxt->b, ctxt->eflags))
5806
			rc = jmp_rel(ctxt, ctxt->src.val);
5807
		break;
5808
	case 0x90 ... 0x9f:     /* setcc r/m8 */
5809
		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5810
		break;
A
Avi Kivity 已提交
5811
	case 0xb6 ... 0xb7:	/* movzx */
5812
		ctxt->dst.bytes = ctxt->op_bytes;
5813
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5814
						       : (u16) ctxt->src.val;
A
Avi Kivity 已提交
5815 5816
		break;
	case 0xbe ... 0xbf:	/* movsx */
5817
		ctxt->dst.bytes = ctxt->op_bytes;
5818
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5819
							(s16) ctxt->src.val;
A
Avi Kivity 已提交
5820
		break;
5821 5822
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
5823
	}
5824

5825 5826
threebyte_insn:

5827 5828 5829
	if (rc != X86EMUL_CONTINUE)
		goto done;

A
Avi Kivity 已提交
5830 5831 5832
	goto writeback;

cannot_emulate:
5833
	return EMULATION_FAILED;
A
Avi Kivity 已提交
5834
}
5835 5836 5837 5838 5839 5840 5841 5842 5843 5844

void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
{
	invalidate_registers(ctxt);
}

void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
{
	writeback_registers(ctxt);
}
5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855

bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->rep_prefix && (ctxt->d & String))
		return false;

	if (ctxt->d & TwoMemOp)
		return false;

	return true;
}