emulate.c 151.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
A
Avi Kivity 已提交
2
/******************************************************************************
3
 * emulate.c
A
Avi Kivity 已提交
4 5 6 7 8 9
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
10
 * privileged instructions:
A
Avi Kivity 已提交
11 12
 *
 * Copyright (C) 2006 Qumranet
N
Nicolas Kaiser 已提交
13
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
A
Avi Kivity 已提交
14 15 16 17 18 19 20
 *
 *   Avi Kivity <avi@qumranet.com>
 *   Yaniv Kamay <yaniv@qumranet.com>
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

21
#include <linux/kvm_host.h>
22
#include "kvm_cache_regs.h"
23
#include <asm/kvm_emulate.h>
24
#include <linux/stringify.h>
25
#include <asm/debugreg.h>
26
#include <asm/nospec-branch.h>
A
Avi Kivity 已提交
27

28
#include "x86.h"
29
#include "tss.h"
30
#include "mmu.h"
31
#include "pmu.h"
32

33 34 35
/*
 * Operand types
 */
36 37 38 39 40 41 42 43 44
#define OpNone             0ull
#define OpImplicit         1ull  /* No generic decode */
#define OpReg              2ull  /* Register */
#define OpMem              3ull  /* Memory */
#define OpAcc              4ull  /* Accumulator: AL/AX/EAX/RAX */
#define OpDI               5ull  /* ES:DI/EDI/RDI */
#define OpMem64            6ull  /* Memory, 64-bit */
#define OpImmUByte         7ull  /* Zero-extended 8-bit immediate */
#define OpDX               8ull  /* DX register */
45 46 47
#define OpCL               9ull  /* CL register (for shifts) */
#define OpImmByte         10ull  /* 8-bit sign extended immediate */
#define OpOne             11ull  /* Implied 1 */
48
#define OpImm             12ull  /* Sign extended up to 32-bit immediate */
49 50 51 52 53 54 55
#define OpMem16           13ull  /* Memory operand (16-bit). */
#define OpMem32           14ull  /* Memory operand (32-bit). */
#define OpImmU            15ull  /* Immediate operand, zero extended */
#define OpSI              16ull  /* SI/ESI/RSI */
#define OpImmFAddr        17ull  /* Immediate far address */
#define OpMemFAddr        18ull  /* Far address in memory */
#define OpImmU16          19ull  /* Immediate operand, 16 bits, zero extended */
56 57 58 59 60 61
#define OpES              20ull  /* ES */
#define OpCS              21ull  /* CS */
#define OpSS              22ull  /* SS */
#define OpDS              23ull  /* DS */
#define OpFS              24ull  /* FS */
#define OpGS              25ull  /* GS */
62
#define OpMem8            26ull  /* 8-bit zero extended memory operand */
63
#define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
P
Paolo Bonzini 已提交
64
#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
65 66
#define OpAccLo           29ull  /* Low part of extended acc (AX/AX/EAX/RAX) */
#define OpAccHi           30ull  /* High part of extended acc (-/DX/EDX/RDX) */
67 68

#define OpBits             5  /* Width of operand field */
69
#define OpMask             ((1ull << OpBits) - 1)
70

A
Avi Kivity 已提交
71 72 73 74 75 76 77 78 79 80
/*
 * Opcode effective-address decode tables.
 * Note that we only emulate instructions that have at least one memory
 * operand (excluding implicit stack references). We assume that stack
 * references and instruction fetches will never occur in special memory
 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
 * not be handled.
 */

/* Operand sizes: 8-bit operands or specified/overridden size. */
81
#define ByteOp      (1<<0)	/* 8-bit operands. */
A
Avi Kivity 已提交
82
/* Destination operand type. */
83 84 85 86 87 88 89
#define DstShift    1
#define ImplicitOps (OpImplicit << DstShift)
#define DstReg      (OpReg << DstShift)
#define DstMem      (OpMem << DstShift)
#define DstAcc      (OpAcc << DstShift)
#define DstDI       (OpDI << DstShift)
#define DstMem64    (OpMem64 << DstShift)
90
#define DstMem16    (OpMem16 << DstShift)
91 92
#define DstImmUByte (OpImmUByte << DstShift)
#define DstDX       (OpDX << DstShift)
93
#define DstAccLo    (OpAccLo << DstShift)
94
#define DstMask     (OpMask << DstShift)
A
Avi Kivity 已提交
95
/* Source operand type. */
96 97 98 99 100 101 102 103 104 105 106 107
#define SrcShift    6
#define SrcNone     (OpNone << SrcShift)
#define SrcReg      (OpReg << SrcShift)
#define SrcMem      (OpMem << SrcShift)
#define SrcMem16    (OpMem16 << SrcShift)
#define SrcMem32    (OpMem32 << SrcShift)
#define SrcImm      (OpImm << SrcShift)
#define SrcImmByte  (OpImmByte << SrcShift)
#define SrcOne      (OpOne << SrcShift)
#define SrcImmUByte (OpImmUByte << SrcShift)
#define SrcImmU     (OpImmU << SrcShift)
#define SrcSI       (OpSI << SrcShift)
P
Paolo Bonzini 已提交
108
#define SrcXLat     (OpXLat << SrcShift)
109 110 111 112
#define SrcImmFAddr (OpImmFAddr << SrcShift)
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc      (OpAcc << SrcShift)
#define SrcImmU16   (OpImmU16 << SrcShift)
113
#define SrcImm64    (OpImm64 << SrcShift)
114
#define SrcDX       (OpDX << SrcShift)
115
#define SrcMem8     (OpMem8 << SrcShift)
116
#define SrcAccHi    (OpAccHi << SrcShift)
117
#define SrcMask     (OpMask << SrcShift)
118 119 120 121 122 123 124 125 126
#define BitOp       (1<<11)
#define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
#define String      (1<<13)     /* String instruction (rep capable) */
#define Stack       (1<<14)     /* Stack instruction (push/pop) */
#define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
#define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
#define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
#define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
127
#define Escape      (5<<15)     /* Escape to coprocessor instruction */
128
#define InstrDual   (6<<15)     /* Alternate instruction decoding of mod == 3 */
129
#define ModeDual    (7<<15)     /* Different instruction for 32/64 bit */
130
#define Sse         (1<<18)     /* SSE Vector instruction */
131 132 133 134
/* Generic ModRM decode. */
#define ModRM       (1<<19)
/* Destination is only written; never read. */
#define Mov         (1<<20)
135
/* Misc flags */
136
#define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
137
#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138
#define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139
#define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140
#define Undefined   (1<<25) /* No Such Instruction */
141
#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
142
#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
143
#define No64	    (1<<28)
144
#define PageTable   (1 << 29)   /* instruction used to write page table */
145
#define NotImpl     (1 << 30)   /* instruction is not implemented */
146
/* Source 2 operand type */
147
#define Src2Shift   (31)
148
#define Src2None    (OpNone << Src2Shift)
149
#define Src2Mem     (OpMem << Src2Shift)
150 151 152 153
#define Src2CL      (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
#define Src2One     (OpOne << Src2Shift)
#define Src2Imm     (OpImm << Src2Shift)
154 155 156 157 158 159
#define Src2ES      (OpES << Src2Shift)
#define Src2CS      (OpCS << Src2Shift)
#define Src2SS      (OpSS << Src2Shift)
#define Src2DS      (OpDS << Src2Shift)
#define Src2FS      (OpFS << Src2Shift)
#define Src2GS      (OpGS << Src2Shift)
160
#define Src2Mask    (OpMask << Src2Shift)
A
Avi Kivity 已提交
161
#define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
162
#define AlignMask   ((u64)7 << 41)
163
#define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
164 165 166
#define Unaligned   ((u64)2 << 41)  /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx         ((u64)3 << 41)  /* Advanced Vector Extensions */
#define Aligned16   ((u64)4 << 41)  /* Aligned to 16 byte boundary (e.g. FXSAVE) */
167
#define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
168
#define NoWrite     ((u64)1 << 45)  /* No writeback */
169
#define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
170
#define NoMod	    ((u64)1 << 47)  /* Mod field is ignored */
171 172
#define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
#define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
173
#define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
174
#define NearBranch  ((u64)1 << 52)  /* Near branches */
175
#define No16	    ((u64)1 << 53)  /* No 16 bit operand */
176
#define IncSP       ((u64)1 << 54)  /* SP is incremented before ModRM calc */
177
#define TwoMemOp    ((u64)1 << 55)  /* Instruction has two memory operand */
A
Avi Kivity 已提交
178

179
#define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
A
Avi Kivity 已提交
180

181 182 183 184 185 186 187 188
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
189

190 191 192 193 194 195
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
#define FASTOP_SIZE 8

/*
 * fastop functions have a special calling convention:
 *
196 197
 * dst:    rax        (in/out)
 * src:    rdx        (in/out)
198 199
 * src2:   rcx        (in)
 * flags:  rflags     (in/out)
200
 * ex:     rsi        (in:fastop pointer, out:zero if exception)
201 202 203 204 205 206 207 208 209 210 211
 *
 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
 * different operand sizes can be reached by calculation, rather than a jump
 * table (which would be bigger than the code).
 *
 * fastop functions are declared as taking a never-defined fastop parameter,
 * so they can't be called from C directly.
 */

struct fastop;

212
struct opcode {
213 214
	u64 flags : 56;
	u64 intercept : 8;
215
	union {
216
		int (*execute)(struct x86_emulate_ctxt *ctxt);
217 218 219
		const struct opcode *group;
		const struct group_dual *gdual;
		const struct gprefix *gprefix;
220
		const struct escape *esc;
221
		const struct instr_dual *idual;
222
		const struct mode_dual *mdual;
223
		void (*fastop)(struct fastop *fake);
224
	} u;
225
	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
226 227 228 229 230
};

struct group_dual {
	struct opcode mod012[8];
	struct opcode mod3[8];
231 232
};

233 234 235 236 237 238 239
struct gprefix {
	struct opcode pfx_no;
	struct opcode pfx_66;
	struct opcode pfx_f2;
	struct opcode pfx_f3;
};

240 241 242 243 244
struct escape {
	struct opcode op[8];
	struct opcode high[64];
};

245 246 247 248 249
struct instr_dual {
	struct opcode mod012;
	struct opcode mod3;
};

250 251 252 253 254
struct mode_dual {
	struct opcode mode32;
	struct opcode mode64;
};

255 256
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a

257 258 259 260 261 262 263
enum x86_transfer_type {
	X86_TRANSFER_NONE,
	X86_TRANSFER_CALL_JMP,
	X86_TRANSFER_RET,
	X86_TRANSFER_TASK_SWITCH,
};

264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	if (!(ctxt->regs_valid & (1 << nr))) {
		ctxt->regs_valid |= 1 << nr;
		ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
	}
	return ctxt->_regs[nr];
}

static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	ctxt->regs_valid |= 1 << nr;
	ctxt->regs_dirty |= 1 << nr;
	return &ctxt->_regs[nr];
}

static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
	reg_read(ctxt, nr);
	return reg_write(ctxt, nr);
}

static void writeback_registers(struct x86_emulate_ctxt *ctxt)
{
	unsigned reg;

	for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
		ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
}

static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
{
	ctxt->regs_dirty = 0;
	ctxt->regs_valid = 0;
}

A
Avi Kivity 已提交
300 301 302 303
/*
 * These EFLAGS bits are restored from saved value during emulation, and
 * any changes are written back to the saved value after emulation.
 */
304 305
#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
		     X86_EFLAGS_PF|X86_EFLAGS_CF)
A
Avi Kivity 已提交
306

307 308 309 310 311 312
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif

313 314
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));

315
#define __FOP_FUNC(name) \
316 317 318 319
	".align " __stringify(FASTOP_SIZE) " \n\t" \
	".type " name ", @function \n\t" \
	name ":\n\t"

320 321 322 323 324 325 326 327 328
#define FOP_FUNC(name) \
	__FOP_FUNC(#name)

#define __FOP_RET(name) \
	"ret \n\t" \
	".size " name ", .-" name "\n\t"

#define FOP_RET(name) \
	__FOP_RET(#name)
329 330 331 332 333

#define FOP_START(op) \
	extern void em_##op(struct fastop *fake); \
	asm(".pushsection .text, \"ax\" \n\t" \
	    ".global em_" #op " \n\t" \
334 335
	    ".align " __stringify(FASTOP_SIZE) " \n\t" \
	    "em_" #op ":\n\t"
336 337 338 339

#define FOP_END \
	    ".popsection")

340 341 342 343
#define __FOPNOP(name) \
	__FOP_FUNC(name) \
	__FOP_RET(name)

344
#define FOPNOP() \
345
	__FOPNOP(__stringify(__UNIQUE_ID(nop)))
346

347
#define FOP1E(op,  dst) \
348 349 350
	__FOP_FUNC(#op "_" #dst) \
	"10: " #op " %" #dst " \n\t" \
	__FOP_RET(#op "_" #dst)
351 352 353

#define FOP1EEX(op,  dst) \
	FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
354 355 356 357 358 359 360 361 362

#define FASTOP1(op) \
	FOP_START(op) \
	FOP1E(op##b, al) \
	FOP1E(op##w, ax) \
	FOP1E(op##l, eax) \
	ON64(FOP1E(op##q, rax))	\
	FOP_END

363 364 365 366 367 368 369 370 371
/* 1-operand, using src2 (for MUL/DIV r/m) */
#define FASTOP1SRC2(op, name) \
	FOP_START(name) \
	FOP1E(op, cl) \
	FOP1E(op, cx) \
	FOP1E(op, ecx) \
	ON64(FOP1E(op, rcx)) \
	FOP_END

372 373 374 375 376 377 378 379 380
/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
#define FASTOP1SRC2EX(op, name) \
	FOP_START(name) \
	FOP1EEX(op, cl) \
	FOP1EEX(op, cx) \
	FOP1EEX(op, ecx) \
	ON64(FOP1EEX(op, rcx)) \
	FOP_END

381
#define FOP2E(op,  dst, src)	   \
382 383 384
	__FOP_FUNC(#op "_" #dst "_" #src) \
	#op " %" #src ", %" #dst " \n\t" \
	__FOP_RET(#op "_" #dst "_" #src)
385 386 387

#define FASTOP2(op) \
	FOP_START(op) \
388 389 390 391
	FOP2E(op##b, al, dl) \
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
392 393
	FOP_END

394 395 396 397
/* 2 operand, word only */
#define FASTOP2W(op) \
	FOP_START(op) \
	FOPNOP() \
398 399 400
	FOP2E(op##w, ax, dx) \
	FOP2E(op##l, eax, edx) \
	ON64(FOP2E(op##q, rax, rdx)) \
401 402
	FOP_END

403 404 405 406 407 408 409 410 411
/* 2 operand, src is CL */
#define FASTOP2CL(op) \
	FOP_START(op) \
	FOP2E(op##b, al, cl) \
	FOP2E(op##w, ax, cl) \
	FOP2E(op##l, eax, cl) \
	ON64(FOP2E(op##q, rax, cl)) \
	FOP_END

412 413 414 415 416 417 418 419 420
/* 2 operand, src and dest are reversed */
#define FASTOP2R(op, name) \
	FOP_START(name) \
	FOP2E(op##b, dl, al) \
	FOP2E(op##w, dx, ax) \
	FOP2E(op##l, edx, eax) \
	ON64(FOP2E(op##q, rdx, rax)) \
	FOP_END

421
#define FOP3E(op,  dst, src, src2) \
422 423 424
	__FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
	#op " %" #src2 ", %" #src ", %" #dst " \n\t"\
	__FOP_RET(#op "_" #dst "_" #src "_" #src2)
425 426 427 428 429

/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \
	FOP_START(op) \
	FOPNOP() \
430 431 432
	FOP3E(op##w, ax, dx, cl) \
	FOP3E(op##l, eax, edx, cl) \
	ON64(FOP3E(op##q, rax, rdx, cl)) \
433 434
	FOP_END

435
/* Special case for SETcc - 1 instruction per cc */
436 437 438 439 440
#define FOP_SETCC(op) \
	".align 4 \n\t" \
	".type " #op ", @function \n\t" \
	#op ": \n\t" \
	#op " %al \n\t" \
441
	__FOP_RET(#op)
442

443 444 445 446
asm(".pushsection .fixup, \"ax\"\n"
    ".global kvm_fastop_exception \n"
    "kvm_fastop_exception: xor %esi, %esi; ret\n"
    ".popsection");
447

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
FOP_SETCC(setc)
FOP_SETCC(setnc)
FOP_SETCC(setz)
FOP_SETCC(setnz)
FOP_SETCC(setbe)
FOP_SETCC(setnbe)
FOP_SETCC(sets)
FOP_SETCC(setns)
FOP_SETCC(setp)
FOP_SETCC(setnp)
FOP_SETCC(setl)
FOP_SETCC(setnl)
FOP_SETCC(setle)
FOP_SETCC(setnle)
FOP_END;

467 468 469 470
FOP_START(salc)
FOP_FUNC(salc)
"pushf; sbb %al, %al; popf \n\t"
FOP_RET(salc)
P
Paolo Bonzini 已提交
471 472
FOP_END;

R
Radim Krčmář 已提交
473 474
/*
 * XXX: inoutclob user must know where the argument is being expanded.
475
 *      Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
R
Radim Krčmář 已提交
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
 */
#define asm_safe(insn, inoutclob...) \
({ \
	int _fault = 0; \
 \
	asm volatile("1:" insn "\n" \
	             "2:\n" \
	             ".pushsection .fixup, \"ax\"\n" \
	             "3: movl $1, %[_fault]\n" \
	             "   jmp  2b\n" \
	             ".popsection\n" \
	             _ASM_EXTABLE(1b, 3b) \
	             : [_fault] "+qm"(_fault) inoutclob ); \
 \
	_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
})

493 494 495 496 497 498
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
				    enum x86_intercept intercept,
				    enum x86_intercept_stage stage)
{
	struct x86_instruction_info info = {
		.intercept  = intercept,
499 500 501 502 503
		.rep_prefix = ctxt->rep_prefix,
		.modrm_mod  = ctxt->modrm_mod,
		.modrm_reg  = ctxt->modrm_reg,
		.modrm_rm   = ctxt->modrm_rm,
		.src_val    = ctxt->src.val64,
504
		.dst_val    = ctxt->dst.val64,
505 506 507
		.src_bytes  = ctxt->src.bytes,
		.dst_bytes  = ctxt->dst.bytes,
		.ad_bytes   = ctxt->ad_bytes,
508 509 510
		.next_rip   = ctxt->eip,
	};

511
	return ctxt->ops->intercept(ctxt, &info, stage);
512 513
}

A
Avi Kivity 已提交
514 515 516 517 518
static void assign_masked(ulong *dest, ulong src, ulong mask)
{
	*dest = (*dest & ~mask) | (src & mask);
}

519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
static void assign_register(unsigned long *reg, u64 val, int bytes)
{
	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
	switch (bytes) {
	case 1:
		*(u8 *)reg = (u8)val;
		break;
	case 2:
		*(u16 *)reg = (u16)val;
		break;
	case 4:
		*reg = (u32)val;
		break;	/* 64b: zero-extend */
	case 8:
		*reg = val;
		break;
	}
}

538
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
539
{
540
	return (1UL << (ctxt->ad_bytes << 3)) - 1;
541 542
}

A
Avi Kivity 已提交
543 544 545 546 547 548 549 550 551 552 553
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
{
	u16 sel;
	struct desc_struct ss;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return ~0UL;
	ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
	return ~0U >> ((ss.d ^ 1) * 16);  /* d=0: 0xffff; d=1: 0xffffffff */
}

A
Avi Kivity 已提交
554 555 556 557 558
static int stack_size(struct x86_emulate_ctxt *ctxt)
{
	return (__fls(stack_mask(ctxt)) + 1) >> 3;
}

A
Avi Kivity 已提交
559
/* Access/update address held in a register, based on addressing mode. */
560
static inline unsigned long
561
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
562
{
563
	if (ctxt->ad_bytes == sizeof(unsigned long))
564 565
		return reg;
	else
566
		return reg & ad_mask(ctxt);
567 568 569
}

static inline unsigned long
570
register_address(struct x86_emulate_ctxt *ctxt, int reg)
571
{
572
	return address_mask(ctxt, reg_read(ctxt, reg));
573 574
}

575 576 577 578 579
static void masked_increment(ulong *reg, ulong mask, int inc)
{
	assign_masked(reg, *reg + inc, mask);
}

580
static inline void
581
register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
582
{
583
	ulong *preg = reg_rmw(ctxt, reg);
584

585
	assign_register(preg, *preg + inc, ctxt->ad_bytes);
586 587 588 589
}

static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
{
590
	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
591
}
A
Avi Kivity 已提交
592

593 594 595 596 597 598 599
static u32 desc_limit_scaled(struct desc_struct *desc)
{
	u32 limit = get_desc_limit(desc);

	return desc->g ? (limit << 12) | 0xfff : limit;
}

600
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
601 602 603 604
{
	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
		return 0;

605
	return ctxt->ops->get_cached_segment_base(ctxt, seg);
606 607
}

608 609
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
			     u32 error, bool valid)
610
{
611
	WARN_ON(vec > 0x1f);
612 613 614
	ctxt->exception.vector = vec;
	ctxt->exception.error_code = error;
	ctxt->exception.error_code_valid = valid;
615
	return X86EMUL_PROPAGATE_FAULT;
616 617
}

618 619 620 621 622
static int emulate_db(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, DB_VECTOR, 0, false);
}

623
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
624
{
625
	return emulate_exception(ctxt, GP_VECTOR, err, true);
626 627
}

628 629 630 631 632
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{
	return emulate_exception(ctxt, SS_VECTOR, err, true);
}

633
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
634
{
635
	return emulate_exception(ctxt, UD_VECTOR, 0, false);
636 637
}

638
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
639
{
640
	return emulate_exception(ctxt, TS_VECTOR, err, true);
641 642
}

643 644
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
645
	return emulate_exception(ctxt, DE_VECTOR, 0, false);
646 647
}

A
Avi Kivity 已提交
648 649 650 651 652
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
	return emulate_exception(ctxt, NM_VECTOR, 0, false);
}

653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
	u16 selector;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
	return selector;
}

static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
				 unsigned seg)
{
	u16 dummy;
	u32 base3;
	struct desc_struct desc;

	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}

673 674 675 676 677 678
/*
 * x86 defines three classes of vector instructions: explicitly
 * aligned, explicitly unaligned, and the rest, which change behaviour
 * depending on whether they're AVX encoded or not.
 *
 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
679 680
 * subject to the same check.  FXSAVE and FXRSTOR are checked here too as their
 * 512 bytes of data must be aligned to a 16 byte boundary.
681
 */
682
static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
683
{
684
	u64 alignment = ctxt->d & AlignMask;
685 686

	if (likely(size < 16))
687
		return 1;
688

689 690 691
	switch (alignment) {
	case Unaligned:
	case Avx:
692
		return 1;
693
	case Aligned16:
694
		return 16;
695 696
	case Aligned:
	default:
697
		return size;
698
	}
699 700
}

701 702 703 704
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
				       struct segmented_address addr,
				       unsigned *max_size, unsigned size,
				       bool write, bool fetch,
705
				       enum x86emul_mode mode, ulong *linear)
706
{
707 708
	struct desc_struct desc;
	bool usable;
709
	ulong la;
710
	u32 lim;
711
	u16 sel;
712
	u8  va_bits;
713

714
	la = seg_base(ctxt, addr.seg) + addr.ea;
715
	*max_size = 0;
716
	switch (mode) {
717
	case X86EMUL_MODE_PROT64:
718
		*linear = la;
719 720
		va_bits = ctxt_virt_addr_bits(ctxt);
		if (get_canonical(la, va_bits) != la)
721
			goto bad;
722

723
		*max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
724 725
		if (size > *max_size)
			goto bad;
726 727
		break;
	default:
728
		*linear = la = (u32)la;
729 730
		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
						addr.seg);
731 732
		if (!usable)
			goto bad;
733 734 735
		/* code segment in protected mode or read-only data segment */
		if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
					|| !(desc.type & 2)) && write)
736 737
			goto bad;
		/* unreadable code segment */
738
		if (!fetch && (desc.type & 8) && !(desc.type & 2))
739 740
			goto bad;
		lim = desc_limit_scaled(&desc);
741
		if (!(desc.type & 8) && (desc.type & 4)) {
G
Guo Chao 已提交
742
			/* expand-down segment */
743
			if (addr.ea <= lim)
744 745 746
				goto bad;
			lim = desc.d ? 0xffffffff : 0xffff;
		}
747 748
		if (addr.ea > lim)
			goto bad;
749 750 751 752 753 754 755
		if (lim == 0xffffffff)
			*max_size = ~0u;
		else {
			*max_size = (u64)lim + 1 - addr.ea;
			if (size > *max_size)
				goto bad;
		}
756 757
		break;
	}
758
	if (la & (insn_alignment(ctxt, size) - 1))
759
		return emulate_gp(ctxt, 0);
760
	return X86EMUL_CONTINUE;
761 762
bad:
	if (addr.seg == VCPU_SREG_SS)
763
		return emulate_ss(ctxt, 0);
764
	else
765
		return emulate_gp(ctxt, 0);
766 767
}

768 769 770 771 772
static int linearize(struct x86_emulate_ctxt *ctxt,
		     struct segmented_address addr,
		     unsigned size, bool write,
		     ulong *linear)
{
773
	unsigned max_size;
774 775
	return __linearize(ctxt, addr, &max_size, size, write, false,
			   ctxt->mode, linear);
776 777
}

778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
			     enum x86emul_mode mode)
{
	ulong linear;
	int rc;
	unsigned max_size;
	struct segmented_address addr = { .seg = VCPU_SREG_CS,
					   .ea = dst };

	if (ctxt->op_bytes != sizeof(unsigned long))
		addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
	rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
	if (rc == X86EMUL_CONTINUE)
		ctxt->_eip = addr.ea;
	return rc;
}

static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
	return assign_eip(ctxt, dst, ctxt->mode);
798 799
}

800 801 802 803
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
			  const struct desc_struct *cs_desc)
{
	enum x86emul_mode mode = ctxt->mode;
804
	int rc;
805 806

#ifdef CONFIG_X86_64
807 808 809
	if (ctxt->mode >= X86EMUL_MODE_PROT16) {
		if (cs_desc->l) {
			u64 efer = 0;
810

811 812 813 814 815
			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
			if (efer & EFER_LMA)
				mode = X86EMUL_MODE_PROT64;
		} else
			mode = X86EMUL_MODE_PROT32; /* temporary value */
816 817 818 819
	}
#endif
	if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
		mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
820 821 822 823
	rc = assign_eip(ctxt, dst, mode);
	if (rc == X86EMUL_CONTINUE)
		ctxt->mode = mode;
	return rc;
824 825 826 827 828 829
}

static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{
	return assign_eip_near(ctxt, ctxt->_eip + rel);
}
830

831 832 833
static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
			      void *data, unsigned size)
{
834
	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
835 836 837 838 839 840
}

static int linear_write_system(struct x86_emulate_ctxt *ctxt,
			       ulong linear, void *data,
			       unsigned int size)
{
841
	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
842 843
}

844 845 846 847 848
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
			      struct segmented_address addr,
			      void *data,
			      unsigned size)
{
849 850 851
	int rc;
	ulong linear;

852
	rc = linearize(ctxt, addr, size, false, &linear);
853 854
	if (rc != X86EMUL_CONTINUE)
		return rc;
855
	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
856 857
}

858 859 860 861 862 863 864 865 866 867 868
static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
			       struct segmented_address addr,
			       void *data,
			       unsigned int size)
{
	int rc;
	ulong linear;

	rc = linearize(ctxt, addr, size, true, &linear);
	if (rc != X86EMUL_CONTINUE)
		return rc;
869
	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
870 871
}

872
/*
873
 * Prefetch the remaining bytes of the instruction without crossing page
874 875
 * boundary if they are not in fetch_cache yet.
 */
876
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
877 878
{
	int rc;
879
	unsigned size, max_size;
880
	unsigned long linear;
881
	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
882
	struct segmented_address addr = { .seg = VCPU_SREG_CS,
883 884
					   .ea = ctxt->eip + cur_size };

885 886 887 888 889 890 891 892 893 894
	/*
	 * We do not know exactly how many bytes will be needed, and
	 * __linearize is expensive, so fetch as much as possible.  We
	 * just have to avoid going beyond the 15 byte limit, the end
	 * of the segment, or the end of the page.
	 *
	 * __linearize is called with size 0 so that it does not do any
	 * boundary check itself.  Instead, we use max_size to check
	 * against op_size.
	 */
895 896
	rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
			 &linear);
897 898 899
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;

900
	size = min_t(unsigned, 15UL ^ cur_size, max_size);
901
	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
902 903 904 905 906 907 908 909

	/*
	 * One instruction can only straddle two pages,
	 * and one has been loaded at the beginning of
	 * x86_decode_insn.  So, if not enough bytes
	 * still, we must have hit the 15-byte boundary.
	 */
	if (unlikely(size < op_size))
910 911
		return emulate_gp(ctxt, 0);

912
	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
913 914 915
			      size, &ctxt->exception);
	if (unlikely(rc != X86EMUL_CONTINUE))
		return rc;
916
	ctxt->fetch.end += size;
917
	return X86EMUL_CONTINUE;
918 919
}

920 921
static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
					       unsigned size)
922
{
923 924 925 926
	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;

	if (unlikely(done_size < size))
		return __do_insn_fetch_bytes(ctxt, size - done_size);
927 928
	else
		return X86EMUL_CONTINUE;
929 930
}

931
/* Fetch next part of the instruction being emulated. */
932
#define insn_fetch(_type, _ctxt)					\
933 934 935
({	_type _x;							\
									\
	rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));			\
936 937
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
938
	ctxt->_eip += sizeof(_type);					\
939
	memcpy(&_x, ctxt->fetch.ptr, sizeof(_type));			\
940
	ctxt->fetch.ptr += sizeof(_type);				\
941
	_x;								\
942 943
})

944
#define insn_fetch_arr(_arr, _size, _ctxt)				\
945 946
({									\
	rc = do_insn_fetch_bytes(_ctxt, _size);				\
947 948
	if (rc != X86EMUL_CONTINUE)					\
		goto done;						\
949
	ctxt->_eip += (_size);						\
950 951
	memcpy(_arr, ctxt->fetch.ptr, _size);				\
	ctxt->fetch.ptr += (_size);					\
952 953
})

954 955 956 957 958
/*
 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 * pointer into the block that addresses the relevant register.
 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 */
959
static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
960
			     int byteop)
A
Avi Kivity 已提交
961 962
{
	void *p;
963
	int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
A
Avi Kivity 已提交
964 965

	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
966 967 968
		p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
	else
		p = reg_rmw(ctxt, modrm_reg);
A
Avi Kivity 已提交
969 970 971 972
	return p;
}

static int read_descriptor(struct x86_emulate_ctxt *ctxt,
973
			   struct segmented_address addr,
A
Avi Kivity 已提交
974 975 976 977 978 979 980
			   u16 *size, unsigned long *address, int op_bytes)
{
	int rc;

	if (op_bytes == 2)
		op_bytes = 3;
	*address = 0;
981
	rc = segmented_read_std(ctxt, addr, size, 2);
982
	if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
983
		return rc;
984
	addr.ea += 2;
985
	rc = segmented_read_std(ctxt, addr, address, op_bytes);
A
Avi Kivity 已提交
986 987 988
	return rc;
}

989 990 991 992 993 994 995 996 997 998
FASTOP2(add);
FASTOP2(or);
FASTOP2(adc);
FASTOP2(sbb);
FASTOP2(and);
FASTOP2(sub);
FASTOP2(xor);
FASTOP2(cmp);
FASTOP2(test);

999 1000
FASTOP1SRC2(mul, mul_ex);
FASTOP1SRC2(imul, imul_ex);
1001 1002
FASTOP1SRC2EX(div, div_ex);
FASTOP1SRC2EX(idiv, idiv_ex);
1003

1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
FASTOP3WCL(shld);
FASTOP3WCL(shrd);

FASTOP2W(imul);

FASTOP1(not);
FASTOP1(neg);
FASTOP1(inc);
FASTOP1(dec);

FASTOP2CL(rol);
FASTOP2CL(ror);
FASTOP2CL(rcl);
FASTOP2CL(rcr);
FASTOP2CL(shl);
FASTOP2CL(shr);
FASTOP2CL(sar);

FASTOP2W(bsf);
FASTOP2W(bsr);
FASTOP2W(bt);
FASTOP2W(bts);
FASTOP2W(btr);
FASTOP2W(btc);

1029 1030
FASTOP2(xadd);

1031 1032
FASTOP2R(cmp, cmp_r);

1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
{
	/* If src is zero, do not writeback, but update flags */
	if (ctxt->src.val == 0)
		ctxt->dst.type = OP_NONE;
	return fastop(ctxt, em_bsf);
}

static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
{
	/* If src is zero, do not writeback, but update flags */
	if (ctxt->src.val == 0)
		ctxt->dst.type = OP_NONE;
	return fastop(ctxt, em_bsr);
}

1049
static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1050
{
1051 1052
	u8 rc;
	void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1053

1054
	flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1055 1056
	asm("push %[flags]; popf; " CALL_NOSPEC
	    : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1057
	return rc;
1058 1059
}

1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
static void fetch_register_operand(struct operand *op)
{
	switch (op->bytes) {
	case 1:
		op->val = *(u8 *)op->addr.reg;
		break;
	case 2:
		op->val = *(u16 *)op->addr.reg;
		break;
	case 4:
		op->val = *(u32 *)op->addr.reg;
		break;
	case 8:
		op->val = *(u64 *)op->addr.reg;
		break;
	}
}

A
Avi Kivity 已提交
1078 1079 1080
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
{
	switch (reg) {
1081 1082 1083 1084 1085 1086 1087 1088
	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
	case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
	case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
	case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
	case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
	case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
	case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
1089
#ifdef CONFIG_X86_64
1090 1091 1092 1093 1094 1095 1096 1097
	case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
	case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
	case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
	case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
	case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
	case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
	case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
	case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
A
Avi Kivity 已提交
1098 1099 1100 1101 1102 1103 1104 1105 1106
#endif
	default: BUG();
	}
}

static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
			  int reg)
{
	switch (reg) {
1107 1108 1109 1110 1111 1112 1113 1114
	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
	case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
	case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
	case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
	case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
	case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
	case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
A
Avi Kivity 已提交
1115
#ifdef CONFIG_X86_64
1116 1117 1118 1119 1120 1121 1122 1123
	case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
	case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
	case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
	case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
	case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
	case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
	case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
	case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
A
Avi Kivity 已提交
1124 1125 1126 1127 1128
#endif
	default: BUG();
	}
}

A
Avi Kivity 已提交
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	switch (reg) {
	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
	case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
	case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
	case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
	case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
	case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
	default: BUG();
	}
}

static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
	switch (reg) {
	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
	case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
	case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
	case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
	case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
	case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
	default: BUG();
	}
}

1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	asm volatile("fninit");
	return X86EMUL_CONTINUE;
}

static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
{
	u16 fcw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	asm volatile("fnstcw %0": "+m"(fcw));

	ctxt->dst.val = fcw;

	return X86EMUL_CONTINUE;
}

static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
{
	u16 fsw;

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	asm volatile("fnstsw %0": "+m"(fsw));

	ctxt->dst.val = fsw;

	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1196
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1197
				    struct operand *op)
1198
{
1199
	unsigned reg = ctxt->modrm_reg;
1200

1201 1202
	if (!(ctxt->d & ModRM))
		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
A
Avi Kivity 已提交
1203

1204
	if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1205 1206 1207 1208 1209 1210
		op->type = OP_XMM;
		op->bytes = 16;
		op->addr.xmm = reg;
		read_sse_reg(ctxt, &op->vec_val, reg);
		return;
	}
A
Avi Kivity 已提交
1211 1212 1213 1214 1215 1216 1217
	if (ctxt->d & Mmx) {
		reg &= 7;
		op->type = OP_MM;
		op->bytes = 8;
		op->addr.mm = reg;
		return;
	}
A
Avi Kivity 已提交
1218

1219
	op->type = OP_REG;
1220 1221 1222
	op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
	op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);

1223
	fetch_register_operand(op);
1224 1225 1226
	op->orig_val = op->val;
}

1227 1228 1229 1230 1231 1232
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
{
	if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
		ctxt->modrm_seg = VCPU_SREG_SS;
}

1233
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1234
			struct operand *op)
1235 1236
{
	u8 sib;
B
Bandan Das 已提交
1237
	int index_reg, base_reg, scale;
1238
	int rc = X86EMUL_CONTINUE;
1239
	ulong modrm_ea = 0;
1240

B
Bandan Das 已提交
1241 1242 1243
	ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
	index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
	base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1244

B
Bandan Das 已提交
1245
	ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1246
	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
B
Bandan Das 已提交
1247
	ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1248
	ctxt->modrm_seg = VCPU_SREG_DS;
1249

1250
	if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1251
		op->type = OP_REG;
1252
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1253
		op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1254
				ctxt->d & ByteOp);
1255
		if (ctxt->d & Sse) {
A
Avi Kivity 已提交
1256 1257
			op->type = OP_XMM;
			op->bytes = 16;
1258 1259
			op->addr.xmm = ctxt->modrm_rm;
			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
A
Avi Kivity 已提交
1260 1261
			return rc;
		}
A
Avi Kivity 已提交
1262 1263 1264
		if (ctxt->d & Mmx) {
			op->type = OP_MM;
			op->bytes = 8;
1265
			op->addr.mm = ctxt->modrm_rm & 7;
A
Avi Kivity 已提交
1266 1267
			return rc;
		}
1268
		fetch_register_operand(op);
1269 1270 1271
		return rc;
	}

1272 1273
	op->type = OP_MEM;

1274
	if (ctxt->ad_bytes == 2) {
1275 1276 1277 1278
		unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
		unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
		unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
		unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1279 1280

		/* 16-bit ModR/M decode. */
1281
		switch (ctxt->modrm_mod) {
1282
		case 0:
1283
			if (ctxt->modrm_rm == 6)
1284
				modrm_ea += insn_fetch(u16, ctxt);
1285 1286
			break;
		case 1:
1287
			modrm_ea += insn_fetch(s8, ctxt);
1288 1289
			break;
		case 2:
1290
			modrm_ea += insn_fetch(u16, ctxt);
1291 1292
			break;
		}
1293
		switch (ctxt->modrm_rm) {
1294
		case 0:
1295
			modrm_ea += bx + si;
1296 1297
			break;
		case 1:
1298
			modrm_ea += bx + di;
1299 1300
			break;
		case 2:
1301
			modrm_ea += bp + si;
1302 1303
			break;
		case 3:
1304
			modrm_ea += bp + di;
1305 1306
			break;
		case 4:
1307
			modrm_ea += si;
1308 1309
			break;
		case 5:
1310
			modrm_ea += di;
1311 1312
			break;
		case 6:
1313
			if (ctxt->modrm_mod != 0)
1314
				modrm_ea += bp;
1315 1316
			break;
		case 7:
1317
			modrm_ea += bx;
1318 1319
			break;
		}
1320 1321 1322
		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
			ctxt->modrm_seg = VCPU_SREG_SS;
1323
		modrm_ea = (u16)modrm_ea;
1324 1325
	} else {
		/* 32/64-bit ModR/M decode. */
1326
		if ((ctxt->modrm_rm & 7) == 4) {
1327
			sib = insn_fetch(u8, ctxt);
1328 1329 1330 1331
			index_reg |= (sib >> 3) & 7;
			base_reg |= sib & 7;
			scale = sib >> 6;

1332
			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1333
				modrm_ea += insn_fetch(s32, ctxt);
1334
			else {
1335
				modrm_ea += reg_read(ctxt, base_reg);
1336
				adjust_modrm_seg(ctxt, base_reg);
1337 1338 1339 1340
				/* Increment ESP on POP [ESP] */
				if ((ctxt->d & IncSP) &&
				    base_reg == VCPU_REGS_RSP)
					modrm_ea += ctxt->op_bytes;
1341
			}
1342
			if (index_reg != 4)
1343
				modrm_ea += reg_read(ctxt, index_reg) << scale;
1344
		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1345
			modrm_ea += insn_fetch(s32, ctxt);
1346
			if (ctxt->mode == X86EMUL_MODE_PROT64)
1347
				ctxt->rip_relative = 1;
1348 1349
		} else {
			base_reg = ctxt->modrm_rm;
1350
			modrm_ea += reg_read(ctxt, base_reg);
1351 1352
			adjust_modrm_seg(ctxt, base_reg);
		}
1353
		switch (ctxt->modrm_mod) {
1354
		case 1:
1355
			modrm_ea += insn_fetch(s8, ctxt);
1356 1357
			break;
		case 2:
1358
			modrm_ea += insn_fetch(s32, ctxt);
1359 1360 1361
			break;
		}
	}
1362
	op->addr.mem.ea = modrm_ea;
1363 1364 1365
	if (ctxt->ad_bytes != 8)
		ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;

1366 1367 1368 1369 1370
done:
	return rc;
}

static int decode_abs(struct x86_emulate_ctxt *ctxt,
1371
		      struct operand *op)
1372
{
1373
	int rc = X86EMUL_CONTINUE;
1374

1375
	op->type = OP_MEM;
1376
	switch (ctxt->ad_bytes) {
1377
	case 2:
1378
		op->addr.mem.ea = insn_fetch(u16, ctxt);
1379 1380
		break;
	case 4:
1381
		op->addr.mem.ea = insn_fetch(u32, ctxt);
1382 1383
		break;
	case 8:
1384
		op->addr.mem.ea = insn_fetch(u64, ctxt);
1385 1386 1387 1388 1389 1390
		break;
	}
done:
	return rc;
}

1391
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1392
{
1393
	long sv = 0, mask;
1394

1395
	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1396
		mask = ~((long)ctxt->dst.bytes * 8 - 1);
1397

1398 1399 1400 1401
		if (ctxt->src.bytes == 2)
			sv = (s16)ctxt->src.val & (s16)mask;
		else if (ctxt->src.bytes == 4)
			sv = (s32)ctxt->src.val & (s32)mask;
1402 1403
		else
			sv = (s64)ctxt->src.val & (s64)mask;
1404

1405 1406
		ctxt->dst.addr.mem.ea = address_mask(ctxt,
					   ctxt->dst.addr.mem.ea + (sv >> 3));
1407
	}
1408 1409

	/* only subword offset */
1410
	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1411 1412
}

1413 1414
static int read_emulated(struct x86_emulate_ctxt *ctxt,
			 unsigned long addr, void *dest, unsigned size)
A
Avi Kivity 已提交
1415
{
1416
	int rc;
1417
	struct read_cache *mc = &ctxt->mem_read;
A
Avi Kivity 已提交
1418

1419 1420
	if (mc->pos < mc->end)
		goto read_cached;
A
Avi Kivity 已提交
1421

1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
	WARN_ON((mc->end + size) >= sizeof(mc->data));

	rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
				      &ctxt->exception);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	mc->end += size;

read_cached:
	memcpy(dest, mc->data + mc->pos, size);
	mc->pos += size;
1434 1435
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1436

1437 1438 1439 1440 1441
static int segmented_read(struct x86_emulate_ctxt *ctxt,
			  struct segmented_address addr,
			  void *data,
			  unsigned size)
{
1442 1443 1444
	int rc;
	ulong linear;

1445
	rc = linearize(ctxt, addr, size, false, &linear);
1446 1447
	if (rc != X86EMUL_CONTINUE)
		return rc;
1448
	return read_emulated(ctxt, linear, data, size);
1449 1450 1451 1452 1453 1454 1455
}

static int segmented_write(struct x86_emulate_ctxt *ctxt,
			   struct segmented_address addr,
			   const void *data,
			   unsigned size)
{
1456 1457 1458
	int rc;
	ulong linear;

1459
	rc = linearize(ctxt, addr, size, true, &linear);
1460 1461
	if (rc != X86EMUL_CONTINUE)
		return rc;
1462 1463
	return ctxt->ops->write_emulated(ctxt, linear, data, size,
					 &ctxt->exception);
1464 1465 1466 1467 1468 1469 1470
}

static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
			     struct segmented_address addr,
			     const void *orig_data, const void *data,
			     unsigned size)
{
1471 1472 1473
	int rc;
	ulong linear;

1474
	rc = linearize(ctxt, addr, size, true, &linear);
1475 1476
	if (rc != X86EMUL_CONTINUE)
		return rc;
1477 1478
	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
					   size, &ctxt->exception);
1479 1480
}

1481 1482 1483 1484
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
			   unsigned int size, unsigned short port,
			   void *dest)
{
1485
	struct read_cache *rc = &ctxt->io_read;
1486

1487 1488
	if (rc->pos == rc->end) { /* refill pio read ahead */
		unsigned int in_page, n;
1489
		unsigned int count = ctxt->rep_prefix ?
1490
			address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1491
		in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1492 1493
			offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
			PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1494
		n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1495 1496 1497
		if (n == 0)
			n = 1;
		rc->pos = rc->end = 0;
1498
		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1499 1500
			return 0;
		rc->end = n * size;
A
Avi Kivity 已提交
1501 1502
	}

1503
	if (ctxt->rep_prefix && (ctxt->d & String) &&
1504
	    !(ctxt->eflags & X86_EFLAGS_DF)) {
1505 1506 1507 1508 1509 1510 1511 1512
		ctxt->dst.data = rc->data + rc->pos;
		ctxt->dst.type = OP_MEM_STR;
		ctxt->dst.count = (rc->end - rc->pos) / size;
		rc->pos = rc->end;
	} else {
		memcpy(dest, rc->data + rc->pos, size);
		rc->pos += size;
	}
1513 1514
	return 1;
}
A
Avi Kivity 已提交
1515

1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
				     u16 index, struct desc_struct *desc)
{
	struct desc_ptr dt;
	ulong addr;

	ctxt->ops->get_idt(ctxt, &dt);

	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, index << 3 | 0x2);

	addr = dt.address + index * 8;
1528
	return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1529 1530
}

1531 1532 1533
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
				     u16 selector, struct desc_ptr *dt)
{
1534
	const struct x86_emulate_ops *ops = ctxt->ops;
1535
	u32 base3 = 0;
1536

1537 1538
	if (selector & 1 << 2) {
		struct desc_struct desc;
1539 1540
		u16 sel;

1541
		memset(dt, 0, sizeof(*dt));
1542 1543
		if (!ops->get_segment(ctxt, &sel, &desc, &base3,
				      VCPU_SREG_LDTR))
1544
			return;
1545

1546
		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1547
		dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1548
	} else
1549
		ops->get_gdt(ctxt, dt);
1550
}
1551

1552 1553
static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
			      u16 selector, ulong *desc_addr_p)
1554 1555 1556 1557
{
	struct desc_ptr dt;
	u16 index = selector >> 3;
	ulong addr;
1558

1559
	get_descriptor_table_ptr(ctxt, selector, &dt);
1560

1561 1562
	if (dt.size < index * 8 + 7)
		return emulate_gp(ctxt, selector & 0xfffc);
1563

1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
	addr = dt.address + index * 8;

#ifdef CONFIG_X86_64
	if (addr >> 32 != 0) {
		u64 efer = 0;

		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
		if (!(efer & EFER_LMA))
			addr &= (u32)-1;
	}
#endif

	*desc_addr_p = addr;
	return X86EMUL_CONTINUE;
}

/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, struct desc_struct *desc,
				   ulong *desc_addr_p)
{
	int rc;

	rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
	if (rc != X86EMUL_CONTINUE)
		return rc;

1591
	return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1592
}
1593

1594 1595 1596 1597
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				    u16 selector, struct desc_struct *desc)
{
1598
	int rc;
1599
	ulong addr;
A
Avi Kivity 已提交
1600

1601 1602 1603
	rc = get_descriptor_ptr(ctxt, selector, &addr);
	if (rc != X86EMUL_CONTINUE)
		return rc;
A
Avi Kivity 已提交
1604

1605
	return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1606
}
1607

1608
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1609
				     u16 selector, int seg, u8 cpl,
1610
				     enum x86_transfer_type transfer,
1611
				     struct desc_struct *desc)
1612
{
1613
	struct desc_struct seg_desc, old_desc;
1614
	u8 dpl, rpl;
1615 1616 1617
	unsigned err_vec = GP_VECTOR;
	u32 err_code = 0;
	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1618
	ulong desc_addr;
1619
	int ret;
1620
	u16 dummy;
1621
	u32 base3 = 0;
1622

1623
	memset(&seg_desc, 0, sizeof(seg_desc));
1624

1625 1626 1627
	if (ctxt->mode == X86EMUL_MODE_REAL) {
		/* set real mode segment descriptor (keep limit etc. for
		 * unreal mode) */
1628
		ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1629 1630
		set_desc_base(&seg_desc, selector << 4);
		goto load;
1631 1632 1633 1634 1635 1636 1637 1638 1639
	} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
		/* VM86 needs a clean new segment descriptor */
		set_desc_base(&seg_desc, selector << 4);
		set_desc_limit(&seg_desc, 0xffff);
		seg_desc.type = 3;
		seg_desc.p = 1;
		seg_desc.s = 1;
		seg_desc.dpl = 3;
		goto load;
1640 1641
	}

1642 1643
	rpl = selector & 3;

1644 1645 1646 1647
	/* TR should be in GDT only */
	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
		goto exception;

1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669
	/* NULL selector is not valid for TR, CS and (except for long mode) SS */
	if (null_selector) {
		if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
			goto exception;

		if (seg == VCPU_SREG_SS) {
			if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
				goto exception;

			/*
			 * ctxt->ops->set_segment expects the CPL to be in
			 * SS.DPL, so fake an expand-up 32-bit data segment.
			 */
			seg_desc.type = 3;
			seg_desc.p = 1;
			seg_desc.s = 1;
			seg_desc.dpl = cpl;
			seg_desc.d = 1;
			seg_desc.g = 1;
		}

		/* Skip all following checks */
1670
		goto load;
1671
	}
1672

1673
	ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1674 1675 1676 1677
	if (ret != X86EMUL_CONTINUE)
		return ret;

	err_code = selector & 0xfffc;
1678 1679
	err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
							   GP_VECTOR;
1680

G
Guo Chao 已提交
1681
	/* can't load system descriptor into segment selector */
1682 1683 1684
	if (seg <= VCPU_SREG_GS && !seg_desc.s) {
		if (transfer == X86_TRANSFER_CALL_JMP)
			return X86EMUL_UNHANDLEABLE;
1685
		goto exception;
1686
	}
1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702

	if (!seg_desc.p) {
		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
		goto exception;
	}

	dpl = seg_desc.dpl;

	switch (seg) {
	case VCPU_SREG_SS:
		/*
		 * segment is not a writable data segment or segment
		 * selector's RPL != CPL or segment selector's RPL != CPL
		 */
		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
			goto exception;
A
Avi Kivity 已提交
1703
		break;
1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716
	case VCPU_SREG_CS:
		if (!(seg_desc.type & 8))
			goto exception;

		if (seg_desc.type & 4) {
			/* conforming */
			if (dpl > cpl)
				goto exception;
		} else {
			/* nonconforming */
			if (rpl > cpl || dpl != cpl)
				goto exception;
		}
1717 1718 1719 1720 1721 1722 1723 1724 1725
		/* in long-mode d/b must be clear if l is set */
		if (seg_desc.d && seg_desc.l) {
			u64 efer = 0;

			ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
			if (efer & EFER_LMA)
				goto exception;
		}

1726 1727
		/* CS(RPL) <- CPL */
		selector = (selector & 0xfffc) | cpl;
A
Avi Kivity 已提交
1728
		break;
1729 1730 1731
	case VCPU_SREG_TR:
		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
			goto exception;
1732 1733 1734 1735 1736 1737
		old_desc = seg_desc;
		seg_desc.type |= 2; /* busy */
		ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
						  sizeof(seg_desc), &ctxt->exception);
		if (ret != X86EMUL_CONTINUE)
			return ret;
1738 1739 1740 1741 1742 1743
		break;
	case VCPU_SREG_LDTR:
		if (seg_desc.s || seg_desc.type != 2)
			goto exception;
		break;
	default: /*  DS, ES, FS, or GS */
1744
		/*
1745 1746 1747
		 * segment is not a data or readable code segment or
		 * ((segment is a data or nonconforming code segment)
		 * and (both RPL and CPL > DPL))
1748
		 */
1749 1750 1751 1752
		if ((seg_desc.type & 0xa) == 0x8 ||
		    (((seg_desc.type & 0xc) != 0xc) &&
		     (rpl > dpl && cpl > dpl)))
			goto exception;
A
Avi Kivity 已提交
1753
		break;
1754 1755 1756 1757
	}

	if (seg_desc.s) {
		/* mark segment as accessed */
1758 1759 1760 1761 1762 1763 1764
		if (!(seg_desc.type & 1)) {
			seg_desc.type |= 1;
			ret = write_segment_descriptor(ctxt, selector,
						       &seg_desc);
			if (ret != X86EMUL_CONTINUE)
				return ret;
		}
1765
	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1766
		ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1767 1768
		if (ret != X86EMUL_CONTINUE)
			return ret;
1769 1770
		if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
				((u64)base3 << 32), ctxt))
1771
			return emulate_gp(ctxt, 0);
1772 1773
	}
load:
1774
	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1775 1776
	if (desc)
		*desc = seg_desc;
1777 1778
	return X86EMUL_CONTINUE;
exception:
1779
	return emulate_exception(ctxt, err_vec, err_code, true);
1780 1781
}

1782 1783 1784 1785
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
				   u16 selector, int seg)
{
	u8 cpl = ctxt->ops->cpl(ctxt);
1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800

	/*
	 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
	 * they can load it at CPL<3 (Intel's manual says only LSS can,
	 * but it's wrong).
	 *
	 * However, the Intel manual says that putting IST=1/DPL=3 in
	 * an interrupt gate will result in SS=3 (the AMD manual instead
	 * says it doesn't), so allow SS=3 in __load_segment_descriptor
	 * and only forbid it here.
	 */
	if (seg == VCPU_SREG_SS && selector == 3 &&
	    ctxt->mode == X86EMUL_MODE_PROT64)
		return emulate_exception(ctxt, GP_VECTOR, 0, true);

1801 1802
	return __load_segment_descriptor(ctxt, selector, seg, cpl,
					 X86_TRANSFER_NONE, NULL);
1803 1804
}

1805 1806
static void write_register_operand(struct operand *op)
{
1807
	return assign_register(op->addr.reg, op->val, op->bytes);
1808 1809
}

1810
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1811
{
1812
	switch (op->type) {
1813
	case OP_REG:
1814
		write_register_operand(op);
A
Avi Kivity 已提交
1815
		break;
1816
	case OP_MEM:
1817
		if (ctxt->lock_prefix)
P
Paolo Bonzini 已提交
1818 1819 1820 1821 1822 1823 1824
			return segmented_cmpxchg(ctxt,
						 op->addr.mem,
						 &op->orig_val,
						 &op->val,
						 op->bytes);
		else
			return segmented_write(ctxt,
1825 1826 1827
					       op->addr.mem,
					       &op->val,
					       op->bytes);
1828
		break;
1829
	case OP_MEM_STR:
P
Paolo Bonzini 已提交
1830 1831 1832 1833
		return segmented_write(ctxt,
				       op->addr.mem,
				       op->data,
				       op->bytes * op->count);
1834
		break;
A
Avi Kivity 已提交
1835
	case OP_XMM:
1836
		write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
A
Avi Kivity 已提交
1837
		break;
A
Avi Kivity 已提交
1838
	case OP_MM:
1839
		write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
A
Avi Kivity 已提交
1840
		break;
1841 1842
	case OP_NONE:
		/* no writeback */
1843
		break;
1844
	default:
1845
		break;
A
Avi Kivity 已提交
1846
	}
1847 1848
	return X86EMUL_CONTINUE;
}
A
Avi Kivity 已提交
1849

1850
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1851
{
1852
	struct segmented_address addr;
1853

1854
	rsp_increment(ctxt, -bytes);
1855
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1856 1857
	addr.seg = VCPU_SREG_SS;

1858 1859 1860 1861 1862
	return segmented_write(ctxt, addr, data, bytes);
}

static int em_push(struct x86_emulate_ctxt *ctxt)
{
1863
	/* Disable writeback. */
1864
	ctxt->dst.type = OP_NONE;
1865
	return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1866
}
1867

1868 1869 1870 1871
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
		       void *dest, int len)
{
	int rc;
1872
	struct segmented_address addr;
1873

1874
	addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1875
	addr.seg = VCPU_SREG_SS;
1876
	rc = segmented_read(ctxt, addr, dest, len);
1877 1878 1879
	if (rc != X86EMUL_CONTINUE)
		return rc;

1880
	rsp_increment(ctxt, len);
1881
	return rc;
1882 1883
}

1884 1885
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
1886
	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1887 1888
}

1889
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1890
			void *dest, int len)
1891 1892
{
	int rc;
1893
	unsigned long val, change_mask;
1894
	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1895
	int cpl = ctxt->ops->cpl(ctxt);
1896

1897
	rc = emulate_pop(ctxt, &val, len);
1898 1899
	if (rc != X86EMUL_CONTINUE)
		return rc;
1900

1901 1902 1903 1904
	change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
		      X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
		      X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
		      X86_EFLAGS_AC | X86_EFLAGS_ID;
1905

1906 1907 1908 1909 1910
	switch(ctxt->mode) {
	case X86EMUL_MODE_PROT64:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT16:
		if (cpl == 0)
1911
			change_mask |= X86_EFLAGS_IOPL;
1912
		if (cpl <= iopl)
1913
			change_mask |= X86_EFLAGS_IF;
1914 1915
		break;
	case X86EMUL_MODE_VM86:
1916 1917
		if (iopl < 3)
			return emulate_gp(ctxt, 0);
1918
		change_mask |= X86_EFLAGS_IF;
1919 1920
		break;
	default: /* real mode */
1921
		change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1922
		break;
1923
	}
1924 1925 1926 1927 1928

	*(unsigned long *)dest =
		(ctxt->eflags & ~change_mask) | (val & change_mask);

	return rc;
1929 1930
}

1931 1932
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
1933 1934 1935 1936
	ctxt->dst.type = OP_REG;
	ctxt->dst.addr.reg = &ctxt->eflags;
	ctxt->dst.bytes = ctxt->op_bytes;
	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1937 1938
}

A
Avi Kivity 已提交
1939 1940 1941 1942 1943
static int em_enter(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	unsigned frame_size = ctxt->src.val;
	unsigned nesting_level = ctxt->src2.val & 31;
1944
	ulong rbp;
A
Avi Kivity 已提交
1945 1946 1947 1948

	if (nesting_level)
		return X86EMUL_UNHANDLEABLE;

1949 1950
	rbp = reg_read(ctxt, VCPU_REGS_RBP);
	rc = push(ctxt, &rbp, stack_size(ctxt));
A
Avi Kivity 已提交
1951 1952
	if (rc != X86EMUL_CONTINUE)
		return rc;
1953
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
A
Avi Kivity 已提交
1954
		      stack_mask(ctxt));
1955 1956
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
		      reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
A
Avi Kivity 已提交
1957 1958 1959 1960
		      stack_mask(ctxt));
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
1961 1962
static int em_leave(struct x86_emulate_ctxt *ctxt)
{
1963
	assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
A
Avi Kivity 已提交
1964
		      stack_mask(ctxt));
1965
	return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
A
Avi Kivity 已提交
1966 1967
}

1968
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1969
{
1970 1971
	int seg = ctxt->src2.val;

1972
	ctxt->src.val = get_segment_selector(ctxt, seg);
1973 1974 1975 1976
	if (ctxt->op_bytes == 4) {
		rsp_increment(ctxt, -2);
		ctxt->op_bytes = 2;
	}
1977

1978
	return em_push(ctxt);
1979 1980
}

1981
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1982
{
1983
	int seg = ctxt->src2.val;
1984 1985
	unsigned long selector;
	int rc;
1986

1987
	rc = emulate_pop(ctxt, &selector, 2);
1988 1989 1990
	if (rc != X86EMUL_CONTINUE)
		return rc;

1991 1992
	if (ctxt->modrm_reg == VCPU_SREG_SS)
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1993 1994
	if (ctxt->op_bytes > 2)
		rsp_increment(ctxt, ctxt->op_bytes - 2);
1995

1996
	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1997
	return rc;
1998 1999
}

2000
static int em_pusha(struct x86_emulate_ctxt *ctxt)
2001
{
2002
	unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
2003 2004
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RAX;
2005

2006 2007
	while (reg <= VCPU_REGS_RDI) {
		(reg == VCPU_REGS_RSP) ?
2008
		(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
2009

2010
		rc = em_push(ctxt);
2011 2012
		if (rc != X86EMUL_CONTINUE)
			return rc;
2013

2014
		++reg;
2015 2016
	}

2017
	return rc;
2018 2019
}

2020 2021
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
2022
	ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
2023 2024 2025
	return em_push(ctxt);
}

2026
static int em_popa(struct x86_emulate_ctxt *ctxt)
2027
{
2028 2029
	int rc = X86EMUL_CONTINUE;
	int reg = VCPU_REGS_RDI;
2030
	u32 val;
2031

2032 2033
	while (reg >= VCPU_REGS_RAX) {
		if (reg == VCPU_REGS_RSP) {
2034
			rsp_increment(ctxt, ctxt->op_bytes);
2035 2036
			--reg;
		}
2037

2038
		rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2039 2040
		if (rc != X86EMUL_CONTINUE)
			break;
2041
		assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2042
		--reg;
2043
	}
2044
	return rc;
2045 2046
}

2047
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2048
{
2049
	const struct x86_emulate_ops *ops = ctxt->ops;
2050
	int rc;
2051 2052 2053 2054 2055 2056
	struct desc_ptr dt;
	gva_t cs_addr;
	gva_t eip_addr;
	u16 cs, eip;

	/* TODO: Add limit checks */
2057
	ctxt->src.val = ctxt->eflags;
2058
	rc = em_push(ctxt);
2059 2060
	if (rc != X86EMUL_CONTINUE)
		return rc;
2061

2062
	ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2063

2064
	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2065
	rc = em_push(ctxt);
2066 2067
	if (rc != X86EMUL_CONTINUE)
		return rc;
2068

2069
	ctxt->src.val = ctxt->_eip;
2070
	rc = em_push(ctxt);
2071 2072 2073
	if (rc != X86EMUL_CONTINUE)
		return rc;

2074
	ops->get_idt(ctxt, &dt);
2075 2076 2077 2078

	eip_addr = dt.address + (irq << 2);
	cs_addr = dt.address + (irq << 2) + 2;

2079
	rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2080 2081 2082
	if (rc != X86EMUL_CONTINUE)
		return rc;

2083
	rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2084 2085 2086
	if (rc != X86EMUL_CONTINUE)
		return rc;

2087
	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2088 2089 2090
	if (rc != X86EMUL_CONTINUE)
		return rc;

2091
	ctxt->_eip = eip;
2092 2093 2094 2095

	return rc;
}

2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
	int rc;

	invalidate_registers(ctxt);
	rc = __emulate_int_real(ctxt, irq);
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);
	return rc;
}

2107
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2108 2109 2110
{
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
2111
		return __emulate_int_real(ctxt, irq);
2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
	default:
		/* Protected mode interrupts unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
	}
}

2122
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2123
{
2124 2125 2126 2127
	int rc = X86EMUL_CONTINUE;
	unsigned long temp_eip = 0;
	unsigned long temp_eflags = 0;
	unsigned long cs = 0;
2128 2129 2130 2131 2132
	unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
			     X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
			     X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
			     X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
			     X86_EFLAGS_AC | X86_EFLAGS_ID |
W
Wanpeng Li 已提交
2133
			     X86_EFLAGS_FIXED;
2134 2135
	unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
				  X86_EFLAGS_VIP;
2136

2137
	/* TODO: Add stack limit check */
2138

2139
	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2140

2141 2142
	if (rc != X86EMUL_CONTINUE)
		return rc;
2143

2144 2145
	if (temp_eip & ~0xffff)
		return emulate_gp(ctxt, 0);
2146

2147
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2148

2149 2150
	if (rc != X86EMUL_CONTINUE)
		return rc;
2151

2152
	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2153

2154 2155
	if (rc != X86EMUL_CONTINUE)
		return rc;
2156

2157
	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2158

2159 2160
	if (rc != X86EMUL_CONTINUE)
		return rc;
2161

2162
	ctxt->_eip = temp_eip;
2163

2164
	if (ctxt->op_bytes == 4)
2165
		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2166
	else if (ctxt->op_bytes == 2) {
2167 2168
		ctxt->eflags &= ~0xffff;
		ctxt->eflags |= temp_eflags;
2169
	}
2170 2171

	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
W
Wanpeng Li 已提交
2172
	ctxt->eflags |= X86_EFLAGS_FIXED;
2173
	ctxt->ops->set_nmi_mask(ctxt, false);
2174 2175

	return rc;
2176 2177
}

2178
static int em_iret(struct x86_emulate_ctxt *ctxt)
2179
{
2180 2181
	switch(ctxt->mode) {
	case X86EMUL_MODE_REAL:
2182
		return emulate_iret_real(ctxt);
2183 2184 2185 2186
	case X86EMUL_MODE_VM86:
	case X86EMUL_MODE_PROT16:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT64:
2187
	default:
2188 2189
		/* iret from protected mode unimplemented yet */
		return X86EMUL_UNHANDLEABLE;
2190 2191 2192
	}
}

2193 2194 2195
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
	int rc;
2196 2197
	unsigned short sel;
	struct desc_struct new_desc;
2198 2199
	u8 cpl = ctxt->ops->cpl(ctxt);

2200
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2201

2202 2203
	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_CALL_JMP,
2204
				       &new_desc);
2205 2206 2207
	if (rc != X86EMUL_CONTINUE)
		return rc;

2208
	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2209 2210 2211 2212
	/* Error handling is not implemented. */
	if (rc != X86EMUL_CONTINUE)
		return X86EMUL_UNHANDLEABLE;

2213
	return rc;
2214 2215
}

2216
static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2217
{
2218 2219
	return assign_eip_near(ctxt, ctxt->src.val);
}
2220

2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231
static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
{
	int rc;
	long int old_eip;

	old_eip = ctxt->_eip;
	rc = assign_eip_near(ctxt, ctxt->src.val);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	ctxt->src.val = old_eip;
	rc = em_push(ctxt);
2232
	return rc;
2233 2234
}

2235
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2236
{
2237
	u64 old = ctxt->dst.orig_val64;
2238

2239 2240 2241
	if (ctxt->dst.bytes == 16)
		return X86EMUL_UNHANDLEABLE;

2242 2243 2244 2245
	if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
	    ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
		*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
		*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2246
		ctxt->eflags &= ~X86_EFLAGS_ZF;
2247
	} else {
2248 2249
		ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
			(u32) reg_read(ctxt, VCPU_REGS_RBX);
2250

2251
		ctxt->eflags |= X86_EFLAGS_ZF;
2252
	}
2253
	return X86EMUL_CONTINUE;
2254 2255
}

2256 2257
static int em_ret(struct x86_emulate_ctxt *ctxt)
{
2258 2259 2260 2261 2262 2263 2264 2265
	int rc;
	unsigned long eip;

	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	return assign_eip_near(ctxt, eip);
2266 2267
}

2268
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2269 2270
{
	int rc;
2271
	unsigned long eip, cs;
2272
	int cpl = ctxt->ops->cpl(ctxt);
2273
	struct desc_struct new_desc;
2274

2275
	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2276
	if (rc != X86EMUL_CONTINUE)
2277
		return rc;
2278
	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2279
	if (rc != X86EMUL_CONTINUE)
2280
		return rc;
2281 2282 2283
	/* Outer-privilege level return is not implemented */
	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
		return X86EMUL_UNHANDLEABLE;
2284 2285
	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_RET,
2286 2287 2288
				       &new_desc);
	if (rc != X86EMUL_CONTINUE)
		return rc;
2289
	rc = assign_eip_far(ctxt, eip, &new_desc);
2290 2291 2292 2293
	/* Error handling is not implemented. */
	if (rc != X86EMUL_CONTINUE)
		return X86EMUL_UNHANDLEABLE;

2294 2295 2296
	return rc;
}

2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307
static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
{
        int rc;

        rc = em_ret_far(ctxt);
        if (rc != X86EMUL_CONTINUE)
                return rc;
        rsp_increment(ctxt, ctxt->src.val);
        return X86EMUL_CONTINUE;
}

2308 2309 2310
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{
	/* Save real source value, then compare EAX against destination. */
2311 2312
	ctxt->dst.orig_val = ctxt->dst.val;
	ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2313
	ctxt->src.orig_val = ctxt->src.val;
2314
	ctxt->src.val = ctxt->dst.orig_val;
2315
	fastop(ctxt, em_cmp);
2316

2317
	if (ctxt->eflags & X86_EFLAGS_ZF) {
2318 2319
		/* Success: write back to memory; no update of EAX */
		ctxt->src.type = OP_NONE;
2320 2321 2322
		ctxt->dst.val = ctxt->src.orig_val;
	} else {
		/* Failure: write the value we saw to EAX. */
2323 2324 2325 2326
		ctxt->src.type = OP_REG;
		ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
		ctxt->src.val = ctxt->dst.orig_val;
		/* Create write-cycle to dest by writing the same value */
2327
		ctxt->dst.val = ctxt->dst.orig_val;
2328 2329 2330 2331
	}
	return X86EMUL_CONTINUE;
}

2332
static int em_lseg(struct x86_emulate_ctxt *ctxt)
2333
{
2334
	int seg = ctxt->src2.val;
2335 2336 2337
	unsigned short sel;
	int rc;

2338
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2339

2340
	rc = load_segment_descriptor(ctxt, sel, seg);
2341 2342 2343
	if (rc != X86EMUL_CONTINUE)
		return rc;

2344
	ctxt->dst.val = ctxt->src.val;
2345 2346 2347
	return rc;
}

2348 2349
static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
{
2350
#ifdef CONFIG_X86_64
2351 2352 2353 2354
	u32 eax, ebx, ecx, edx;

	eax = 0x80000001;
	ecx = 0;
2355
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2356
	return edx & bit(X86_FEATURE_LM);
2357 2358 2359
#else
	return false;
#endif
2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373
}

static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
{
	desc->g    = (flags >> 23) & 1;
	desc->d    = (flags >> 22) & 1;
	desc->l    = (flags >> 21) & 1;
	desc->avl  = (flags >> 20) & 1;
	desc->p    = (flags >> 15) & 1;
	desc->dpl  = (flags >> 13) & 3;
	desc->s    = (flags >> 12) & 1;
	desc->type = (flags >>  8) & 15;
}

2374 2375
static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
			   int n)
2376 2377 2378 2379 2380
{
	struct desc_struct desc;
	int offset;
	u16 selector;

2381
	selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2382 2383 2384 2385 2386 2387

	if (n < 3)
		offset = 0x7f84 + n * 12;
	else
		offset = 0x7f2c + (n - 3) * 12;

2388 2389 2390
	set_desc_base(&desc,      GET_SMSTATE(u32, smstate, offset + 8));
	set_desc_limit(&desc,     GET_SMSTATE(u32, smstate, offset + 4));
	rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2391 2392 2393 2394
	ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
	return X86EMUL_CONTINUE;
}

2395
#ifdef CONFIG_X86_64
2396 2397
static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
			   int n)
2398 2399 2400 2401 2402 2403 2404 2405
{
	struct desc_struct desc;
	int offset;
	u16 selector;
	u32 base3;

	offset = 0x7e00 + n * 16;

2406 2407 2408 2409 2410
	selector =                GET_SMSTATE(u16, smstate, offset);
	rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
	set_desc_limit(&desc,     GET_SMSTATE(u32, smstate, offset + 4));
	set_desc_base(&desc,      GET_SMSTATE(u32, smstate, offset + 8));
	base3 =                   GET_SMSTATE(u32, smstate, offset + 12);
2411 2412 2413 2414

	ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
	return X86EMUL_CONTINUE;
}
2415
#endif
2416 2417

static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2418
				    u64 cr0, u64 cr3, u64 cr4)
2419 2420
{
	int bad;
2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432
	u64 pcid;

	/* In order to later set CR4.PCIDE, CR3[11:0] must be zero.  */
	pcid = 0;
	if (cr4 & X86_CR4_PCIDE) {
		pcid = cr3 & 0xfff;
		cr3 &= ~0xfff;
	}

	bad = ctxt->ops->set_cr(ctxt, 3, cr3);
	if (bad)
		return X86EMUL_UNHANDLEABLE;
2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450

	/*
	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
	 * Then enable protected mode.	However, PCID cannot be enabled
	 * if EFER.LMA=0, so set it separately.
	 */
	bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
	if (bad)
		return X86EMUL_UNHANDLEABLE;

	bad = ctxt->ops->set_cr(ctxt, 0, cr0);
	if (bad)
		return X86EMUL_UNHANDLEABLE;

	if (cr4 & X86_CR4_PCIDE) {
		bad = ctxt->ops->set_cr(ctxt, 4, cr4);
		if (bad)
			return X86EMUL_UNHANDLEABLE;
2451 2452 2453 2454 2455 2456
		if (pcid) {
			bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
			if (bad)
				return X86EMUL_UNHANDLEABLE;
		}

2457 2458 2459 2460 2461
	}

	return X86EMUL_CONTINUE;
}

2462 2463
static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
			     const char *smstate)
2464 2465 2466 2467
{
	struct desc_struct desc;
	struct desc_ptr dt;
	u16 selector;
2468
	u32 val, cr0, cr3, cr4;
2469 2470
	int i;

2471 2472 2473 2474
	cr0 =                      GET_SMSTATE(u32, smstate, 0x7ffc);
	cr3 =                      GET_SMSTATE(u32, smstate, 0x7ff8);
	ctxt->eflags =             GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
	ctxt->_eip =               GET_SMSTATE(u32, smstate, 0x7ff0);
2475 2476

	for (i = 0; i < 8; i++)
2477
		*reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2478

2479
	val = GET_SMSTATE(u32, smstate, 0x7fcc);
2480
	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2481
	val = GET_SMSTATE(u32, smstate, 0x7fc8);
2482 2483
	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);

2484 2485 2486 2487
	selector =                 GET_SMSTATE(u32, smstate, 0x7fc4);
	set_desc_base(&desc,       GET_SMSTATE(u32, smstate, 0x7f64));
	set_desc_limit(&desc,      GET_SMSTATE(u32, smstate, 0x7f60));
	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smstate, 0x7f5c));
2488 2489
	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);

2490 2491 2492 2493
	selector =                 GET_SMSTATE(u32, smstate, 0x7fc0);
	set_desc_base(&desc,       GET_SMSTATE(u32, smstate, 0x7f80));
	set_desc_limit(&desc,      GET_SMSTATE(u32, smstate, 0x7f7c));
	rsm_set_desc_flags(&desc,  GET_SMSTATE(u32, smstate, 0x7f78));
2494 2495
	ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);

2496 2497
	dt.address =               GET_SMSTATE(u32, smstate, 0x7f74);
	dt.size =                  GET_SMSTATE(u32, smstate, 0x7f70);
2498 2499
	ctxt->ops->set_gdt(ctxt, &dt);

2500 2501
	dt.address =               GET_SMSTATE(u32, smstate, 0x7f58);
	dt.size =                  GET_SMSTATE(u32, smstate, 0x7f54);
2502 2503 2504
	ctxt->ops->set_idt(ctxt, &dt);

	for (i = 0; i < 6; i++) {
2505
		int r = rsm_load_seg_32(ctxt, smstate, i);
2506 2507 2508 2509
		if (r != X86EMUL_CONTINUE)
			return r;
	}

2510
	cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2511

2512
	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2513

2514
	return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2515 2516
}

2517
#ifdef CONFIG_X86_64
2518 2519
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
			     const char *smstate)
2520 2521 2522
{
	struct desc_struct desc;
	struct desc_ptr dt;
2523
	u64 val, cr0, cr3, cr4;
2524 2525
	u32 base3;
	u16 selector;
2526
	int i, r;
2527 2528

	for (i = 0; i < 16; i++)
2529
		*reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2530

2531 2532
	ctxt->_eip   = GET_SMSTATE(u64, smstate, 0x7f78);
	ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2533

2534
	val = GET_SMSTATE(u32, smstate, 0x7f68);
2535
	ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
2536
	val = GET_SMSTATE(u32, smstate, 0x7f60);
2537 2538
	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);

2539 2540 2541 2542 2543
	cr0 =                       GET_SMSTATE(u64, smstate, 0x7f58);
	cr3 =                       GET_SMSTATE(u64, smstate, 0x7f50);
	cr4 =                       GET_SMSTATE(u64, smstate, 0x7f48);
	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
	val =                       GET_SMSTATE(u64, smstate, 0x7ed0);
2544 2545
	ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);

2546 2547 2548 2549 2550
	selector =                  GET_SMSTATE(u32, smstate, 0x7e90);
	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smstate, 0x7e92) << 8);
	set_desc_limit(&desc,       GET_SMSTATE(u32, smstate, 0x7e94));
	set_desc_base(&desc,        GET_SMSTATE(u32, smstate, 0x7e98));
	base3 =                     GET_SMSTATE(u32, smstate, 0x7e9c);
2551 2552
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);

2553 2554
	dt.size =                   GET_SMSTATE(u32, smstate, 0x7e84);
	dt.address =                GET_SMSTATE(u64, smstate, 0x7e88);
2555 2556
	ctxt->ops->set_idt(ctxt, &dt);

2557 2558 2559 2560 2561
	selector =                  GET_SMSTATE(u32, smstate, 0x7e70);
	rsm_set_desc_flags(&desc,   GET_SMSTATE(u32, smstate, 0x7e72) << 8);
	set_desc_limit(&desc,       GET_SMSTATE(u32, smstate, 0x7e74));
	set_desc_base(&desc,        GET_SMSTATE(u32, smstate, 0x7e78));
	base3 =                     GET_SMSTATE(u32, smstate, 0x7e7c);
2562 2563
	ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);

2564 2565
	dt.size =                   GET_SMSTATE(u32, smstate, 0x7e64);
	dt.address =                GET_SMSTATE(u64, smstate, 0x7e68);
2566 2567
	ctxt->ops->set_gdt(ctxt, &dt);

2568
	r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2569 2570 2571
	if (r != X86EMUL_CONTINUE)
		return r;

2572
	for (i = 0; i < 6; i++) {
2573
		r = rsm_load_seg_64(ctxt, smstate, i);
2574 2575 2576 2577
		if (r != X86EMUL_CONTINUE)
			return r;
	}

2578
	return X86EMUL_CONTINUE;
2579
}
2580
#endif
2581

P
Paolo Bonzini 已提交
2582 2583
static int em_rsm(struct x86_emulate_ctxt *ctxt)
{
2584
	unsigned long cr0, cr4, efer;
2585
	char buf[512];
2586 2587 2588
	u64 smbase;
	int ret;

2589
	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
P
Paolo Bonzini 已提交
2590 2591
		return emulate_ud(ctxt);

2592 2593 2594 2595 2596 2597
	smbase = ctxt->ops->get_smbase(ctxt);

	ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
	if (ret != X86EMUL_CONTINUE)
		return X86EMUL_UNHANDLEABLE;

2598 2599 2600 2601 2602 2603
	if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
		ctxt->ops->set_nmi_mask(ctxt, false);

	ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
		~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));

2604 2605
	/*
	 * Get back to real mode, to prepare a safe state in which to load
2606 2607
	 * CR0/CR3/CR4/EFER.  It's all a bit more complicated if the vCPU
	 * supports long mode.
2608
	 */
2609 2610 2611 2612
	if (emulator_has_longmode(ctxt)) {
		struct desc_struct cs_desc;

		/* Zero CR4.PCIDE before CR0.PG.  */
2613 2614
		cr4 = ctxt->ops->get_cr(ctxt, 4);
		if (cr4 & X86_CR4_PCIDE)
2615 2616 2617 2618 2619 2620 2621 2622 2623 2624
			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);

		/* A 32-bit code segment is required to clear EFER.LMA.  */
		memset(&cs_desc, 0, sizeof(cs_desc));
		cs_desc.type = 0xb;
		cs_desc.s = cs_desc.g = cs_desc.p = 1;
		ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
	}

	/* For the 64-bit case, this will clear EFER.LMA.  */
2625 2626 2627
	cr0 = ctxt->ops->get_cr(ctxt, 0);
	if (cr0 & X86_CR0_PE)
		ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2628

2629 2630 2631 2632 2633 2634 2635 2636 2637 2638
	if (emulator_has_longmode(ctxt)) {
		/* Clear CR4.PAE before clearing EFER.LME. */
		cr4 = ctxt->ops->get_cr(ctxt, 4);
		if (cr4 & X86_CR4_PAE)
			ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);

		/* And finally go back to 32-bit mode.  */
		efer = 0;
		ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
	}
2639

2640 2641 2642 2643 2644
	/*
	 * Give pre_leave_smm() a chance to make ISA-specific changes to the
	 * vCPU state (e.g. enter guest mode) before loading state from the SMM
	 * state-save area.
	 */
2645
	if (ctxt->ops->pre_leave_smm(ctxt, buf))
2646 2647
		return X86EMUL_UNHANDLEABLE;

2648
#ifdef CONFIG_X86_64
2649
	if (emulator_has_longmode(ctxt))
2650
		ret = rsm_load_state_64(ctxt, buf);
2651
	else
2652
#endif
2653
		ret = rsm_load_state_32(ctxt, buf);
2654 2655 2656 2657 2658 2659

	if (ret != X86EMUL_CONTINUE) {
		/* FIXME: should triple fault */
		return X86EMUL_UNHANDLEABLE;
	}

2660 2661
	ctxt->ops->post_leave_smm(ctxt);

2662
	return X86EMUL_CONTINUE;
P
Paolo Bonzini 已提交
2663 2664
}

2665
static void
2666
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2667
			struct desc_struct *cs, struct desc_struct *ss)
2668 2669
{
	cs->l = 0;		/* will be adjusted later */
2670
	set_desc_base(cs, 0);	/* flat segment */
2671
	cs->g = 1;		/* 4kb granularity */
2672
	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
2673 2674 2675
	cs->type = 0x0b;	/* Read, Execute, Accessed */
	cs->s = 1;
	cs->dpl = 0;		/* will be adjusted later */
2676 2677
	cs->p = 1;
	cs->d = 1;
2678
	cs->avl = 0;
2679

2680 2681
	set_desc_base(ss, 0);	/* flat segment */
	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
2682 2683 2684
	ss->g = 1;		/* 4kb granularity */
	ss->s = 1;
	ss->type = 0x03;	/* Read/Write, Accessed */
2685
	ss->d = 1;		/* 32bit stack segment */
2686
	ss->dpl = 0;
2687
	ss->p = 1;
2688 2689
	ss->l = 0;
	ss->avl = 0;
2690 2691
}

2692 2693 2694 2695 2696
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;

	eax = ecx = 0;
2697
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2698
	return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2699 2700 2701 2702
		&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
		&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
}

2703 2704
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
{
2705
	const struct x86_emulate_ops *ops = ctxt->ops;
2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716
	u32 eax, ebx, ecx, edx;

	/*
	 * syscall should always be enabled in longmode - so only become
	 * vendor specific (cpuid) if other modes are active...
	 */
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return true;

	eax = 0x00000000;
	ecx = 0x00000000;
2717
	ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741
	/*
	 * Intel ("GenuineIntel")
	 * remark: Intel CPUs only support "syscall" in 64bit
	 * longmode. Also an 64bit guest with a
	 * 32bit compat-app running will #UD !! While this
	 * behaviour can be fixed (by emulating) into AMD
	 * response - CPUs of AMD can't behave like Intel.
	 */
	if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
		return false;

	/* AMD ("AuthenticAMD") */
	if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
		return true;

	/* AMD ("AMDisbetter!") */
	if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
		return true;
2742

2743 2744 2745 2746 2747 2748 2749 2750 2751 2752
	/* Hygon ("HygonGenuine") */
	if (ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx &&
	    ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx &&
	    edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx)
		return true;

	/*
	 * default: (not Intel, not AMD, not Hygon), apply Intel's
	 * stricter rules...
	 */
2753 2754 2755
	return false;
}

2756
static int em_syscall(struct x86_emulate_ctxt *ctxt)
2757
{
2758
	const struct x86_emulate_ops *ops = ctxt->ops;
2759
	struct desc_struct cs, ss;
2760
	u64 msr_data;
2761
	u16 cs_sel, ss_sel;
2762
	u64 efer = 0;
2763 2764

	/* syscall is not available in real mode */
2765
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2766 2767
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_ud(ctxt);
2768

2769 2770 2771
	if (!(em_syscall_is_enabled(ctxt)))
		return emulate_ud(ctxt);

2772
	ops->get_msr(ctxt, MSR_EFER, &efer);
2773 2774 2775
	if (!(efer & EFER_SCE))
		return emulate_ud(ctxt);

2776
	setup_syscalls_segments(ctxt, &cs, &ss);
2777
	ops->get_msr(ctxt, MSR_STAR, &msr_data);
2778
	msr_data >>= 32;
2779 2780
	cs_sel = (u16)(msr_data & 0xfffc);
	ss_sel = (u16)(msr_data + 8);
2781

2782
	if (efer & EFER_LMA) {
2783
		cs.d = 0;
2784 2785
		cs.l = 1;
	}
2786 2787
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2788

2789
	*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2790
	if (efer & EFER_LMA) {
2791
#ifdef CONFIG_X86_64
2792
		*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2793

2794
		ops->get_msr(ctxt,
2795 2796
			     ctxt->mode == X86EMUL_MODE_PROT64 ?
			     MSR_LSTAR : MSR_CSTAR, &msr_data);
2797
		ctxt->_eip = msr_data;
2798

2799
		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2800
		ctxt->eflags &= ~msr_data;
W
Wanpeng Li 已提交
2801
		ctxt->eflags |= X86_EFLAGS_FIXED;
2802 2803 2804
#endif
	} else {
		/* legacy mode */
2805
		ops->get_msr(ctxt, MSR_STAR, &msr_data);
2806
		ctxt->_eip = (u32)msr_data;
2807

2808
		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2809 2810
	}

2811
	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2812
	return X86EMUL_CONTINUE;
2813 2814
}

2815
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2816
{
2817
	const struct x86_emulate_ops *ops = ctxt->ops;
2818
	struct desc_struct cs, ss;
2819
	u64 msr_data;
2820
	u16 cs_sel, ss_sel;
2821
	u64 efer = 0;
2822

2823
	ops->get_msr(ctxt, MSR_EFER, &efer);
2824
	/* inject #GP if in real mode */
2825 2826
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return emulate_gp(ctxt, 0);
2827

2828 2829 2830 2831
	/*
	 * Not recognized on AMD in compat mode (but is recognized in legacy
	 * mode).
	 */
2832
	if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2833 2834 2835
	    && !vendor_intel(ctxt))
		return emulate_ud(ctxt);

2836
	/* sysenter/sysexit have not been tested in 64bit mode. */
2837
	if (ctxt->mode == X86EMUL_MODE_PROT64)
2838
		return X86EMUL_UNHANDLEABLE;
2839

2840
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2841 2842
	if ((msr_data & 0xfffc) == 0x0)
		return emulate_gp(ctxt, 0);
2843

2844
	setup_syscalls_segments(ctxt, &cs, &ss);
2845
	ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2846
	cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2847
	ss_sel = cs_sel + 8;
2848
	if (efer & EFER_LMA) {
2849
		cs.d = 0;
2850 2851 2852
		cs.l = 1;
	}

2853 2854
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2855

2856
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2857
	ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2858

2859
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2860 2861
	*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
							      (u32)msr_data;
2862

2863
	return X86EMUL_CONTINUE;
2864 2865
}

2866
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2867
{
2868
	const struct x86_emulate_ops *ops = ctxt->ops;
2869
	struct desc_struct cs, ss;
2870
	u64 msr_data, rcx, rdx;
2871
	int usermode;
X
Xiao Guangrong 已提交
2872
	u16 cs_sel = 0, ss_sel = 0;
2873

2874 2875
	/* inject #GP if in real mode or Virtual 8086 mode */
	if (ctxt->mode == X86EMUL_MODE_REAL ||
2876 2877
	    ctxt->mode == X86EMUL_MODE_VM86)
		return emulate_gp(ctxt, 0);
2878

2879
	setup_syscalls_segments(ctxt, &cs, &ss);
2880

2881
	if ((ctxt->rex_prefix & 0x8) != 0x0)
2882 2883 2884 2885
		usermode = X86EMUL_MODE_PROT64;
	else
		usermode = X86EMUL_MODE_PROT32;

2886 2887 2888
	rcx = reg_read(ctxt, VCPU_REGS_RCX);
	rdx = reg_read(ctxt, VCPU_REGS_RDX);

2889 2890
	cs.dpl = 3;
	ss.dpl = 3;
2891
	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2892 2893
	switch (usermode) {
	case X86EMUL_MODE_PROT32:
2894
		cs_sel = (u16)(msr_data + 16);
2895 2896
		if ((msr_data & 0xfffc) == 0x0)
			return emulate_gp(ctxt, 0);
2897
		ss_sel = (u16)(msr_data + 24);
2898 2899
		rcx = (u32)rcx;
		rdx = (u32)rdx;
2900 2901
		break;
	case X86EMUL_MODE_PROT64:
2902
		cs_sel = (u16)(msr_data + 32);
2903 2904
		if (msr_data == 0x0)
			return emulate_gp(ctxt, 0);
2905 2906
		ss_sel = cs_sel + 8;
		cs.d = 0;
2907
		cs.l = 1;
2908 2909
		if (emul_is_noncanonical_address(rcx, ctxt) ||
		    emul_is_noncanonical_address(rdx, ctxt))
2910
			return emulate_gp(ctxt, 0);
2911 2912
		break;
	}
2913 2914
	cs_sel |= SEGMENT_RPL_MASK;
	ss_sel |= SEGMENT_RPL_MASK;
2915

2916 2917
	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2918

2919 2920
	ctxt->_eip = rdx;
	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2921

2922
	return X86EMUL_CONTINUE;
2923 2924
}

2925
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2926 2927 2928 2929 2930 2931
{
	int iopl;
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return false;
	if (ctxt->mode == X86EMUL_MODE_VM86)
		return true;
2932
	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2933
	return ctxt->ops->cpl(ctxt) > iopl;
2934 2935
}

2936 2937 2938
#define VMWARE_PORT_VMPORT	(0x5658)
#define VMWARE_PORT_VMRPC	(0x5659)

2939 2940 2941
static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
					    u16 port, u16 len)
{
2942
	const struct x86_emulate_ops *ops = ctxt->ops;
2943
	struct desc_struct tr_seg;
2944
	u32 base3;
2945
	int r;
2946
	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2947
	unsigned mask = (1 << len) - 1;
2948
	unsigned long base;
2949

2950 2951 2952 2953 2954 2955 2956 2957
	/*
	 * VMware allows access to these ports even if denied
	 * by TSS I/O permission bitmap. Mimic behavior.
	 */
	if (enable_vmware_backdoor &&
	    ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
		return true;

2958
	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2959
	if (!tr_seg.p)
2960
		return false;
2961
	if (desc_limit_scaled(&tr_seg) < 103)
2962
		return false;
2963 2964 2965 2966
	base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
	base |= ((u64)base3) << 32;
#endif
2967
	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2968 2969
	if (r != X86EMUL_CONTINUE)
		return false;
2970
	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2971
		return false;
2972
	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2973 2974 2975 2976 2977 2978 2979 2980 2981 2982
	if (r != X86EMUL_CONTINUE)
		return false;
	if ((perm >> bit_idx) & mask)
		return false;
	return true;
}

static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
				 u16 port, u16 len)
{
2983 2984 2985
	if (ctxt->perm_ok)
		return true;

2986 2987
	if (emulator_bad_iopl(ctxt))
		if (!emulator_io_port_access_allowed(ctxt, port, len))
2988
			return false;
2989 2990 2991

	ctxt->perm_ok = true;

2992 2993 2994
	return true;
}

2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018
static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
{
	/*
	 * Intel CPUs mask the counter and pointers in quite strange
	 * manner when ECX is zero due to REP-string optimizations.
	 */
#ifdef CONFIG_X86_64
	if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
		return;

	*reg_write(ctxt, VCPU_REGS_RCX) = 0;

	switch (ctxt->b) {
	case 0xa4:	/* movsb */
	case 0xa5:	/* movsd/w */
		*reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
		/* fall through */
	case 0xaa:	/* stosb */
	case 0xab:	/* stosd/w */
		*reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
	}
#endif
}

3019 3020 3021
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_16 *tss)
{
3022
	tss->ip = ctxt->_eip;
3023
	tss->flag = ctxt->eflags;
3024 3025 3026 3027 3028 3029 3030 3031
	tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->si = reg_read(ctxt, VCPU_REGS_RSI);
	tss->di = reg_read(ctxt, VCPU_REGS_RDI);
3032

3033 3034 3035 3036 3037
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
3038 3039 3040 3041 3042 3043
}

static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_16 *tss)
{
	int ret;
3044
	u8 cpl;
3045

3046
	ctxt->_eip = tss->ip;
3047
	ctxt->eflags = tss->flag | 2;
3048 3049 3050 3051 3052 3053 3054 3055
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
3056 3057 3058 3059 3060

	/*
	 * SDM says that segment selectors are loaded before segment
	 * descriptors
	 */
3061 3062 3063 3064 3065
	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3066

3067 3068
	cpl = tss->cs & 3;

3069
	/*
G
Guo Chao 已提交
3070
	 * Now load segment descriptors. If fault happens at this stage
3071 3072
	 * it is handled in a context of new task
	 */
3073
	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3074
					X86_TRANSFER_TASK_SWITCH, NULL);
3075 3076
	if (ret != X86EMUL_CONTINUE)
		return ret;
3077
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3078
					X86_TRANSFER_TASK_SWITCH, NULL);
3079 3080
	if (ret != X86EMUL_CONTINUE)
		return ret;
3081
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3082
					X86_TRANSFER_TASK_SWITCH, NULL);
3083 3084
	if (ret != X86EMUL_CONTINUE)
		return ret;
3085
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3086
					X86_TRANSFER_TASK_SWITCH, NULL);
3087 3088
	if (ret != X86EMUL_CONTINUE)
		return ret;
3089
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3090
					X86_TRANSFER_TASK_SWITCH, NULL);
3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102
	if (ret != X86EMUL_CONTINUE)
		return ret;

	return X86EMUL_CONTINUE;
}

static int task_switch_16(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
	struct tss_segment_16 tss_seg;
	int ret;
3103
	u32 new_tss_base = get_desc_base(new_desc);
3104

3105
	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3106
	if (ret != X86EMUL_CONTINUE)
3107 3108
		return ret;

3109
	save_state_to_tss16(ctxt, &tss_seg);
3110

3111
	ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3112
	if (ret != X86EMUL_CONTINUE)
3113 3114
		return ret;

3115
	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3116
	if (ret != X86EMUL_CONTINUE)
3117 3118 3119 3120 3121
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

3122 3123
		ret = linear_write_system(ctxt, new_tss_base,
					  &tss_seg.prev_task_link,
3124
					  sizeof(tss_seg.prev_task_link));
3125
		if (ret != X86EMUL_CONTINUE)
3126 3127 3128
			return ret;
	}

3129
	return load_state_from_tss16(ctxt, &tss_seg);
3130 3131 3132 3133 3134
}

static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
				struct tss_segment_32 *tss)
{
3135
	/* CR3 and ldt selector are not saved intentionally */
3136
	tss->eip = ctxt->_eip;
3137
	tss->eflags = ctxt->eflags;
3138 3139 3140 3141 3142 3143 3144 3145
	tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
	tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
	tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
	tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
	tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
	tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
	tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
	tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3146

3147 3148 3149 3150 3151 3152
	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3153 3154 3155 3156 3157 3158
}

static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
				 struct tss_segment_32 *tss)
{
	int ret;
3159
	u8 cpl;
3160

3161
	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3162
		return emulate_gp(ctxt, 0);
3163
	ctxt->_eip = tss->eip;
3164
	ctxt->eflags = tss->eflags | 2;
3165 3166

	/* General purpose registers */
3167 3168 3169 3170 3171 3172 3173 3174
	*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
	*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
	*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
	*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
	*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
	*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
	*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3175 3176 3177

	/*
	 * SDM says that segment selectors are loaded before segment
3178 3179
	 * descriptors.  This is important because CPL checks will
	 * use CS.RPL.
3180
	 */
3181 3182 3183 3184 3185 3186 3187
	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3188

3189 3190 3191 3192 3193
	/*
	 * If we're switching between Protected Mode and VM86, we need to make
	 * sure to update the mode before loading the segment descriptors so
	 * that the selectors are interpreted correctly.
	 */
3194
	if (ctxt->eflags & X86_EFLAGS_VM) {
3195
		ctxt->mode = X86EMUL_MODE_VM86;
3196 3197
		cpl = 3;
	} else {
3198
		ctxt->mode = X86EMUL_MODE_PROT32;
3199 3200
		cpl = tss->cs & 3;
	}
3201

3202 3203 3204 3205
	/*
	 * Now load segment descriptors. If fault happenes at this stage
	 * it is handled in a context of new task
	 */
3206
	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3207
					cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3208 3209
	if (ret != X86EMUL_CONTINUE)
		return ret;
3210
	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3211
					X86_TRANSFER_TASK_SWITCH, NULL);
3212 3213
	if (ret != X86EMUL_CONTINUE)
		return ret;
3214
	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3215
					X86_TRANSFER_TASK_SWITCH, NULL);
3216 3217
	if (ret != X86EMUL_CONTINUE)
		return ret;
3218
	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3219
					X86_TRANSFER_TASK_SWITCH, NULL);
3220 3221
	if (ret != X86EMUL_CONTINUE)
		return ret;
3222
	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3223
					X86_TRANSFER_TASK_SWITCH, NULL);
3224 3225
	if (ret != X86EMUL_CONTINUE)
		return ret;
3226
	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3227
					X86_TRANSFER_TASK_SWITCH, NULL);
3228 3229
	if (ret != X86EMUL_CONTINUE)
		return ret;
3230
	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3231
					X86_TRANSFER_TASK_SWITCH, NULL);
3232

3233
	return ret;
3234 3235 3236 3237 3238 3239 3240 3241
}

static int task_switch_32(struct x86_emulate_ctxt *ctxt,
			  u16 tss_selector, u16 old_tss_sel,
			  ulong old_tss_base, struct desc_struct *new_desc)
{
	struct tss_segment_32 tss_seg;
	int ret;
3242
	u32 new_tss_base = get_desc_base(new_desc);
3243 3244
	u32 eip_offset = offsetof(struct tss_segment_32, eip);
	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3245

3246
	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3247
	if (ret != X86EMUL_CONTINUE)
3248 3249
		return ret;

3250
	save_state_to_tss32(ctxt, &tss_seg);
3251

3252
	/* Only GP registers and segment selectors are saved */
3253 3254
	ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
				  ldt_sel_offset - eip_offset);
3255
	if (ret != X86EMUL_CONTINUE)
3256 3257
		return ret;

3258
	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3259
	if (ret != X86EMUL_CONTINUE)
3260 3261 3262 3263 3264
		return ret;

	if (old_tss_sel != 0xffff) {
		tss_seg.prev_task_link = old_tss_sel;

3265 3266
		ret = linear_write_system(ctxt, new_tss_base,
					  &tss_seg.prev_task_link,
3267
					  sizeof(tss_seg.prev_task_link));
3268
		if (ret != X86EMUL_CONTINUE)
3269 3270 3271
			return ret;
	}

3272
	return load_state_from_tss32(ctxt, &tss_seg);
3273 3274 3275
}

static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3276
				   u16 tss_selector, int idt_index, int reason,
3277
				   bool has_error_code, u32 error_code)
3278
{
3279
	const struct x86_emulate_ops *ops = ctxt->ops;
3280 3281
	struct desc_struct curr_tss_desc, next_tss_desc;
	int ret;
3282
	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3283
	ulong old_tss_base =
3284
		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3285
	u32 desc_limit;
3286
	ulong desc_addr, dr7;
3287 3288 3289

	/* FIXME: old_tss_base == ~0 ? */

3290
	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3291 3292
	if (ret != X86EMUL_CONTINUE)
		return ret;
3293
	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3294 3295 3296 3297 3298
	if (ret != X86EMUL_CONTINUE)
		return ret;

	/* FIXME: check that next_tss_desc is tss */

3299 3300 3301 3302 3303
	/*
	 * Check privileges. The three cases are task switch caused by...
	 *
	 * 1. jmp/call/int to task gate: Check against DPL of the task gate
	 * 2. Exception/IRQ/iret: No check is performed
3304 3305
	 * 3. jmp/call to TSS/task-gate: No check is performed since the
	 *    hardware checks it before exiting.
3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321
	 */
	if (reason == TASK_SWITCH_GATE) {
		if (idt_index != -1) {
			/* Software interrupts */
			struct desc_struct task_gate_desc;
			int dpl;

			ret = read_interrupt_descriptor(ctxt, idt_index,
							&task_gate_desc);
			if (ret != X86EMUL_CONTINUE)
				return ret;

			dpl = task_gate_desc.dpl;
			if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
				return emulate_gp(ctxt, (idt_index << 3) | 0x2);
		}
3322 3323
	}

3324 3325 3326 3327
	desc_limit = desc_limit_scaled(&next_tss_desc);
	if (!next_tss_desc.p ||
	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
	     desc_limit < 0x2b)) {
3328
		return emulate_ts(ctxt, tss_selector & 0xfffc);
3329 3330 3331 3332
	}

	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3333
		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3334 3335 3336 3337 3338 3339
	}

	if (reason == TASK_SWITCH_IRET)
		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;

	/* set back link to prev task only if NT bit is set in eflags
G
Guo Chao 已提交
3340
	   note that old_tss_sel is not used after this point */
3341 3342 3343 3344
	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
		old_tss_sel = 0xffff;

	if (next_tss_desc.type & 8)
3345
		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3346 3347
				     old_tss_base, &next_tss_desc);
	else
3348
		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3349
				     old_tss_base, &next_tss_desc);
3350 3351
	if (ret != X86EMUL_CONTINUE)
		return ret;
3352 3353 3354 3355 3356 3357

	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;

	if (reason != TASK_SWITCH_IRET) {
		next_tss_desc.type |= (1 << 1); /* set busy flag */
3358
		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3359 3360
	}

3361
	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
3362
	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3363

3364
	if (has_error_code) {
3365 3366 3367
		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
		ctxt->lock_prefix = 0;
		ctxt->src.val = (unsigned long) error_code;
3368
		ret = em_push(ctxt);
3369 3370
	}

3371 3372 3373
	ops->get_dr(ctxt, 7, &dr7);
	ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));

3374 3375 3376 3377
	return ret;
}

int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3378
			 u16 tss_selector, int idt_index, int reason,
3379
			 bool has_error_code, u32 error_code)
3380 3381 3382
{
	int rc;

3383
	invalidate_registers(ctxt);
3384 3385
	ctxt->_eip = ctxt->eip;
	ctxt->dst.type = OP_NONE;
3386

3387
	rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3388
				     has_error_code, error_code);
3389

3390
	if (rc == X86EMUL_CONTINUE) {
3391
		ctxt->eip = ctxt->_eip;
3392 3393
		writeback_registers(ctxt);
	}
3394

3395
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3396 3397
}

3398 3399
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
		struct operand *op)
3400
{
3401
	int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3402

3403 3404
	register_address_increment(ctxt, reg, df * op->bytes);
	op->addr.mem.ea = register_address(ctxt, reg);
3405 3406
}

3407 3408 3409 3410 3411 3412
static int em_das(struct x86_emulate_ctxt *ctxt)
{
	u8 al, old_al;
	bool af, cf, old_cf;

	cf = ctxt->eflags & X86_EFLAGS_CF;
3413
	al = ctxt->dst.val;
3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430

	old_al = al;
	old_cf = cf;
	cf = false;
	af = ctxt->eflags & X86_EFLAGS_AF;
	if ((al & 0x0f) > 9 || af) {
		al -= 6;
		cf = old_cf | (al >= 250);
		af = true;
	} else {
		af = false;
	}
	if (old_al > 0x99 || old_cf) {
		al -= 0x60;
		cf = true;
	}

3431
	ctxt->dst.val = al;
3432
	/* Set PF, ZF, SF */
3433 3434 3435
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
3436
	fastop(ctxt, em_or);
3437 3438 3439 3440 3441 3442 3443 3444
	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
	if (cf)
		ctxt->eflags |= X86_EFLAGS_CF;
	if (af)
		ctxt->eflags |= X86_EFLAGS_AF;
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466
static int em_aam(struct x86_emulate_ctxt *ctxt)
{
	u8 al, ah;

	if (ctxt->src.val == 0)
		return emulate_de(ctxt);

	al = ctxt->dst.val & 0xff;
	ah = al / ctxt->src.val;
	al %= ctxt->src.val;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);

	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);

	return X86EMUL_CONTINUE;
}

3467 3468 3469 3470 3471 3472 3473 3474 3475
static int em_aad(struct x86_emulate_ctxt *ctxt)
{
	u8 al = ctxt->dst.val & 0xff;
	u8 ah = (ctxt->dst.val >> 8) & 0xff;

	al = (al + (ah * ctxt->src.val)) & 0xff;

	ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;

3476 3477 3478 3479 3480
	/* Set PF, ZF, SF */
	ctxt->src.type = OP_IMM;
	ctxt->src.val = 0;
	ctxt->src.bytes = 1;
	fastop(ctxt, em_or);
3481 3482 3483 3484

	return X86EMUL_CONTINUE;
}

3485 3486
static int em_call(struct x86_emulate_ctxt *ctxt)
{
3487
	int rc;
3488 3489 3490
	long rel = ctxt->src.val;

	ctxt->src.val = (unsigned long)ctxt->_eip;
3491 3492 3493
	rc = jmp_rel(ctxt, rel);
	if (rc != X86EMUL_CONTINUE)
		return rc;
3494 3495 3496
	return em_push(ctxt);
}

3497 3498 3499 3500 3501
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
	u16 sel, old_cs;
	ulong old_eip;
	int rc;
3502 3503 3504
	struct desc_struct old_desc, new_desc;
	const struct x86_emulate_ops *ops = ctxt->ops;
	int cpl = ctxt->ops->cpl(ctxt);
3505
	enum x86emul_mode prev_mode = ctxt->mode;
3506

3507
	old_eip = ctxt->_eip;
3508
	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3509

3510
	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3511 3512
	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
				       X86_TRANSFER_CALL_JMP, &new_desc);
3513
	if (rc != X86EMUL_CONTINUE)
3514
		return rc;
3515

3516
	rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3517 3518
	if (rc != X86EMUL_CONTINUE)
		goto fail;
3519

3520
	ctxt->src.val = old_cs;
3521
	rc = em_push(ctxt);
3522
	if (rc != X86EMUL_CONTINUE)
3523
		goto fail;
3524

3525
	ctxt->src.val = old_eip;
3526 3527 3528
	rc = em_push(ctxt);
	/* If we failed, we tainted the memory, but the very least we should
	   restore cs */
3529 3530
	if (rc != X86EMUL_CONTINUE) {
		pr_warn_once("faulting far call emulation tainted memory\n");
3531
		goto fail;
3532
	}
3533 3534 3535
	return rc;
fail:
	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3536
	ctxt->mode = prev_mode;
3537 3538
	return rc;

3539 3540
}

3541 3542 3543
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
	int rc;
3544
	unsigned long eip;
3545

3546 3547 3548 3549
	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
	if (rc != X86EMUL_CONTINUE)
		return rc;
	rc = assign_eip_near(ctxt, eip);
3550 3551
	if (rc != X86EMUL_CONTINUE)
		return rc;
3552
	rsp_increment(ctxt, ctxt->src.val);
3553 3554 3555
	return X86EMUL_CONTINUE;
}

3556 3557 3558
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
	/* Write back the register source. */
3559 3560
	ctxt->src.val = ctxt->dst.val;
	write_register_operand(&ctxt->src);
3561 3562

	/* Write back the memory destination with implicit LOCK prefix. */
3563 3564
	ctxt->dst.val = ctxt->src.orig_val;
	ctxt->lock_prefix = 1;
3565 3566 3567
	return X86EMUL_CONTINUE;
}

3568 3569
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
3570
	ctxt->dst.val = ctxt->src2.val;
3571
	return fastop(ctxt, em_imul);
3572 3573
}

3574 3575
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
3576 3577
	ctxt->dst.type = OP_REG;
	ctxt->dst.bytes = ctxt->src.bytes;
3578
	ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3579
	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3580 3581 3582 3583

	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3584 3585 3586 3587 3588 3589 3590 3591 3592 3593
static int em_rdpid(struct x86_emulate_ctxt *ctxt)
{
	u64 tsc_aux = 0;

	if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
		return emulate_gp(ctxt, 0);
	ctxt->dst.val = tsc_aux;
	return X86EMUL_CONTINUE;
}

3594 3595 3596 3597
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
	u64 tsc = 0;

3598
	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3599 3600
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
	*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3601 3602 3603
	return X86EMUL_CONTINUE;
}

3604 3605 3606 3607
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
{
	u64 pmc;

3608
	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3609
		return emulate_gp(ctxt, 0);
3610 3611
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3612 3613 3614
	return X86EMUL_CONTINUE;
}

3615 3616
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
3617
	memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3618 3619 3620
	return X86EMUL_CONTINUE;
}

B
Borislav Petkov 已提交
3621 3622 3623 3624 3625 3626 3627 3628 3629 3630
#define FFL(x) bit(X86_FEATURE_##x)

static int em_movbe(struct x86_emulate_ctxt *ctxt)
{
	u32 ebx, ecx, edx, eax = 1;
	u16 tmp;

	/*
	 * Check MOVBE is set in the guest-visible CPUID leaf.
	 */
3631
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
B
Borislav Petkov 已提交
3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655
	if (!(ecx & FFL(MOVBE)))
		return emulate_ud(ctxt);

	switch (ctxt->op_bytes) {
	case 2:
		/*
		 * From MOVBE definition: "...When the operand size is 16 bits,
		 * the upper word of the destination register remains unchanged
		 * ..."
		 *
		 * Both casting ->valptr and ->val to u16 breaks strict aliasing
		 * rules so we have to do the operation almost per hand.
		 */
		tmp = (u16)ctxt->src.val;
		ctxt->dst.val &= ~0xffffUL;
		ctxt->dst.val |= (unsigned long)swab16(tmp);
		break;
	case 4:
		ctxt->dst.val = swab32((u32)ctxt->src.val);
		break;
	case 8:
		ctxt->dst.val = swab64(ctxt->src.val);
		break;
	default:
3656
		BUG();
B
Borislav Petkov 已提交
3657 3658 3659 3660
	}
	return X86EMUL_CONTINUE;
}

3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

static int em_dr_write(struct x86_emulate_ctxt *ctxt)
{
	unsigned long val;

	if (ctxt->mode == X86EMUL_MODE_PROT64)
		val = ctxt->src.val & ~0ULL;
	else
		val = ctxt->src.val & ~0U;

	/* #UD condition is already handled. */
	if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
		return emulate_gp(ctxt, 0);

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3689 3690 3691 3692
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

3693 3694 3695
	msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
		| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
	if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3696 3697 3698 3699 3700 3701 3702 3703 3704
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
{
	u64 msr_data;

3705
	if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3706 3707
		return emulate_gp(ctxt, 0);

3708 3709
	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
	*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3710 3711 3712
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3713
static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3714
{
P
Paolo Bonzini 已提交
3715 3716 3717 3718
	if (segment > VCPU_SREG_GS &&
	    (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
	    ctxt->ops->cpl(ctxt) > 0)
		return emulate_gp(ctxt, 0);
3719

P
Paolo Bonzini 已提交
3720
	ctxt->dst.val = get_segment_selector(ctxt, segment);
3721 3722
	if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
		ctxt->dst.bytes = 2;
3723 3724 3725
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3726 3727 3728 3729 3730 3731 3732 3733
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->modrm_reg > VCPU_SREG_GS)
		return emulate_ud(ctxt);

	return em_store_sreg(ctxt, ctxt->modrm_reg);
}

3734 3735
static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
3736
	u16 sel = ctxt->src.val;
3737

3738
	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3739 3740
		return emulate_ud(ctxt);

3741
	if (ctxt->modrm_reg == VCPU_SREG_SS)
3742 3743 3744
		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;

	/* Disable writeback. */
3745 3746
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3747 3748
}

P
Paolo Bonzini 已提交
3749 3750 3751 3752 3753
static int em_sldt(struct x86_emulate_ctxt *ctxt)
{
	return em_store_sreg(ctxt, VCPU_SREG_LDTR);
}

A
Avi Kivity 已提交
3754 3755 3756 3757 3758 3759 3760 3761 3762
static int em_lldt(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
}

P
Paolo Bonzini 已提交
3763 3764 3765 3766 3767
static int em_str(struct x86_emulate_ctxt *ctxt)
{
	return em_store_sreg(ctxt, VCPU_SREG_TR);
}

A
Avi Kivity 已提交
3768 3769 3770 3771 3772 3773 3774 3775 3776
static int em_ltr(struct x86_emulate_ctxt *ctxt)
{
	u16 sel = ctxt->src.val;

	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
}

3777 3778
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
3779 3780 3781
	int rc;
	ulong linear;

3782
	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3783
	if (rc == X86EMUL_CONTINUE)
3784
		ctxt->ops->invlpg(ctxt, linear);
3785
	/* Disable writeback. */
3786
	ctxt->dst.type = OP_NONE;
3787 3788 3789
	return X86EMUL_CONTINUE;
}

3790 3791 3792 3793 3794 3795 3796 3797 3798 3799
static int em_clts(struct x86_emulate_ctxt *ctxt)
{
	ulong cr0;

	cr0 = ctxt->ops->get_cr(ctxt, 0);
	cr0 &= ~X86_CR0_TS;
	ctxt->ops->set_cr(ctxt, 0, cr0);
	return X86EMUL_CONTINUE;
}

3800
static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3801
{
3802
	int rc = ctxt->ops->fix_hypercall(ctxt);
3803 3804 3805 3806 3807

	if (rc != X86EMUL_CONTINUE)
		return rc;

	/* Let the processor re-execute the fixed hypercall */
3808
	ctxt->_eip = ctxt->eip;
3809
	/* Disable writeback. */
3810
	ctxt->dst.type = OP_NONE;
3811 3812 3813
	return X86EMUL_CONTINUE;
}

3814 3815 3816 3817 3818 3819
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
				  void (*get)(struct x86_emulate_ctxt *ctxt,
					      struct desc_ptr *ptr))
{
	struct desc_ptr desc_ptr;

P
Paolo Bonzini 已提交
3820 3821 3822 3823
	if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
	    ctxt->ops->cpl(ctxt) > 0)
		return emulate_gp(ctxt, 0);

3824 3825 3826 3827 3828 3829 3830 3831 3832
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
	get(ctxt, &desc_ptr);
	if (ctxt->op_bytes == 2) {
		ctxt->op_bytes = 4;
		desc_ptr.address &= 0x00ffffff;
	}
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
3833 3834
	return segmented_write_std(ctxt, ctxt->dst.addr.mem,
				   &desc_ptr, 2 + ctxt->op_bytes);
3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846
}

static int em_sgdt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
}

static int em_sidt(struct x86_emulate_ctxt *ctxt)
{
	return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}

3847
static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3848 3849 3850 3851
{
	struct desc_ptr desc_ptr;
	int rc;

3852 3853
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		ctxt->op_bytes = 8;
3854
	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3855
			     &desc_ptr.size, &desc_ptr.address,
3856
			     ctxt->op_bytes);
3857 3858
	if (rc != X86EMUL_CONTINUE)
		return rc;
3859
	if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3860
	    emul_is_noncanonical_address(desc_ptr.address, ctxt))
3861
		return emulate_gp(ctxt, 0);
3862 3863 3864 3865
	if (lgdt)
		ctxt->ops->set_gdt(ctxt, &desc_ptr);
	else
		ctxt->ops->set_idt(ctxt, &desc_ptr);
3866
	/* Disable writeback. */
3867
	ctxt->dst.type = OP_NONE;
3868 3869 3870
	return X86EMUL_CONTINUE;
}

3871 3872 3873 3874 3875
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
	return em_lgdt_lidt(ctxt, true);
}

3876 3877
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
3878
	return em_lgdt_lidt(ctxt, false);
3879 3880 3881 3882
}

static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
P
Paolo Bonzini 已提交
3883 3884 3885 3886
	if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
	    ctxt->ops->cpl(ctxt) > 0)
		return emulate_gp(ctxt, 0);

3887 3888
	if (ctxt->dst.type == OP_MEM)
		ctxt->dst.bytes = 2;
3889
	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3890 3891 3892 3893 3894 3895
	return X86EMUL_CONTINUE;
}

static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3896 3897
			  | (ctxt->src.val & 0x0f));
	ctxt->dst.type = OP_NONE;
3898 3899 3900
	return X86EMUL_CONTINUE;
}

3901 3902
static int em_loop(struct x86_emulate_ctxt *ctxt)
{
3903 3904
	int rc = X86EMUL_CONTINUE;

3905
	register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3906
	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3907
	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3908
		rc = jmp_rel(ctxt, ctxt->src.val);
3909

3910
	return rc;
3911 3912 3913 3914
}

static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{
3915 3916
	int rc = X86EMUL_CONTINUE;

3917
	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3918
		rc = jmp_rel(ctxt, ctxt->src.val);
3919

3920
	return rc;
3921 3922
}

3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940
static int em_in(struct x86_emulate_ctxt *ctxt)
{
	if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
			     &ctxt->dst.val))
		return X86EMUL_IO_NEEDED;

	return X86EMUL_CONTINUE;
}

static int em_out(struct x86_emulate_ctxt *ctxt)
{
	ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
				    &ctxt->src.val, 1);
	/* Disable writeback. */
	ctxt->dst.type = OP_NONE;
	return X86EMUL_CONTINUE;
}

3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->eflags &= ~X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

static int em_sti(struct x86_emulate_ctxt *ctxt)
{
	if (emulator_bad_iopl(ctxt))
		return emulate_gp(ctxt, 0);

	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
	ctxt->eflags |= X86_EFLAGS_IF;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3960 3961 3962
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ebx, ecx, edx;
K
Kyle Huey 已提交
3963 3964 3965 3966 3967 3968 3969
	u64 msr = 0;

	ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
	if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
	    ctxt->ops->cpl(ctxt)) {
		return emulate_gp(ctxt, 0);
	}
A
Avi Kivity 已提交
3970

3971 3972
	eax = reg_read(ctxt, VCPU_REGS_RAX);
	ecx = reg_read(ctxt, VCPU_REGS_RCX);
3973
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
3974 3975 3976 3977
	*reg_write(ctxt, VCPU_REGS_RAX) = eax;
	*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
	*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
	*reg_write(ctxt, VCPU_REGS_RDX) = edx;
A
Avi Kivity 已提交
3978 3979 3980
	return X86EMUL_CONTINUE;
}

P
Paolo Bonzini 已提交
3981 3982 3983 3984
static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
	u32 flags;

3985 3986
	flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
		X86_EFLAGS_SF;
P
Paolo Bonzini 已提交
3987 3988 3989 3990 3991 3992 3993
	flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;

	ctxt->eflags &= ~0xffUL;
	ctxt->eflags |= flags | X86_EFLAGS_FIXED;
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
3994 3995
static int em_lahf(struct x86_emulate_ctxt *ctxt)
{
3996 3997
	*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
	*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
A
Avi Kivity 已提交
3998 3999 4000
	return X86EMUL_CONTINUE;
}

A
Avi Kivity 已提交
4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015
static int em_bswap(struct x86_emulate_ctxt *ctxt)
{
	switch (ctxt->op_bytes) {
#ifdef CONFIG_X86_64
	case 8:
		asm("bswap %0" : "+r"(ctxt->dst.val));
		break;
#endif
	default:
		asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
		break;
	}
	return X86EMUL_CONTINUE;
}

4016 4017 4018 4019 4020 4021
static int em_clflush(struct x86_emulate_ctxt *ctxt)
{
	/* emulating clflush regardless of cpuid */
	return X86EMUL_CONTINUE;
}

4022 4023 4024 4025 4026 4027
static int em_movsxd(struct x86_emulate_ctxt *ctxt)
{
	ctxt->dst.val = (s32) ctxt->src.val;
	return X86EMUL_CONTINUE;
}

4028 4029 4030 4031
static int check_fxsr(struct x86_emulate_ctxt *ctxt)
{
	u32 eax = 1, ebx, ecx = 0, edx;

4032
	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048
	if (!(edx & FFL(FXSR)))
		return emulate_ud(ctxt);

	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
		return emulate_nm(ctxt);

	/*
	 * Don't emulate a case that should never be hit, instead of working
	 * around a lack of fxsave64/fxrstor64 on old compilers.
	 */
	if (ctxt->mode >= X86EMUL_MODE_PROT64)
		return X86EMUL_UNHANDLEABLE;

	return X86EMUL_CONTINUE;
}

4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067
/*
 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
 * and restore MXCSR.
 */
static size_t __fxstate_size(int nregs)
{
	return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
}

static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
{
	bool cr4_osfxsr;
	if (ctxt->mode == X86EMUL_MODE_PROT64)
		return __fxstate_size(16);

	cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
	return __fxstate_size(cr4_osfxsr ? 8 : 0);
}

4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099
/*
 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
 *  1) 16 bit mode
 *  2) 32 bit mode
 *     - like (1), but FIP and FDP (foo) are only 16 bit.  At least Intel CPUs
 *       preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
 *       save and restore
 *  3) 64-bit mode with REX.W prefix
 *     - like (2), but XMM 8-15 are being saved and restored
 *  4) 64-bit mode without REX.W prefix
 *     - like (3), but FIP and FDP are 64 bit
 *
 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
 * desired result.  (4) is not emulated.
 *
 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
 * and FPU DS) should match.
 */
static int em_fxsave(struct x86_emulate_ctxt *ctxt)
{
	struct fxregs_state fx_state;
	int rc;

	rc = check_fxsr(ctxt);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));

	if (rc != X86EMUL_CONTINUE)
		return rc;

4100 4101
	return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
		                   fxstate_size(ctxt));
4102 4103
}

4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123
/*
 * FXRSTOR might restore XMM registers not provided by the guest. Fill
 * in the host registers (via FXSAVE) instead, so they won't be modified.
 * (preemption has to stay disabled until FXRSTOR).
 *
 * Use noinline to keep the stack for other functions called by callers small.
 */
static noinline int fxregs_fixup(struct fxregs_state *fx_state,
				 const size_t used_size)
{
	struct fxregs_state fx_tmp;
	int rc;

	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
	memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
	       __fxstate_size(16) - used_size);

	return rc;
}

4124 4125 4126 4127
static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
{
	struct fxregs_state fx_state;
	int rc;
4128
	size_t size;
4129 4130 4131 4132 4133

	rc = check_fxsr(ctxt);
	if (rc != X86EMUL_CONTINUE)
		return rc;

4134 4135 4136 4137 4138
	size = fxstate_size(ctxt);
	rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
	if (rc != X86EMUL_CONTINUE)
		return rc;

4139
	if (size < __fxstate_size(16)) {
4140
		rc = fxregs_fixup(&fx_state, size);
4141 4142 4143
		if (rc != X86EMUL_CONTINUE)
			goto out;
	}
4144

4145 4146 4147 4148
	if (fx_state.mxcsr >> 16) {
		rc = emulate_gp(ctxt, 0);
		goto out;
	}
4149 4150 4151 4152

	if (rc == X86EMUL_CONTINUE)
		rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));

4153
out:
4154 4155 4156
	return rc;
}

4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170
static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
{
	u32 eax, ecx, edx;

	eax = reg_read(ctxt, VCPU_REGS_RAX);
	edx = reg_read(ctxt, VCPU_REGS_RDX);
	ecx = reg_read(ctxt, VCPU_REGS_RCX);

	if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184
static bool valid_cr(int nr)
{
	switch (nr) {
	case 0:
	case 2 ... 4:
	case 8:
		return true;
	default:
		return false;
	}
}

static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
4185
	if (!valid_cr(ctxt->modrm_reg))
4186 4187 4188 4189 4190 4191 4192
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
4193 4194
	u64 new_val = ctxt->src.val64;
	int cr = ctxt->modrm_reg;
4195
	u64 efer = 0;
4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212

	static u64 cr_reserved_bits[] = {
		0xffffffff00000000ULL,
		0, 0, 0, /* CR3 checked later */
		CR4_RESERVED_BITS,
		0, 0, 0,
		CR8_RESERVED_BITS,
	};

	if (!valid_cr(cr))
		return emulate_ud(ctxt);

	if (new_val & cr_reserved_bits[cr])
		return emulate_gp(ctxt, 0);

	switch (cr) {
	case 0: {
4213
		u64 cr4;
4214 4215 4216 4217
		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
			return emulate_gp(ctxt, 0);

4218 4219
		cr4 = ctxt->ops->get_cr(ctxt, 4);
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4220 4221 4222 4223 4224 4225 4226 4227 4228 4229

		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
		    !(cr4 & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	case 3: {
		u64 rsvd = 0;

4230
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4231 4232
		if (efer & EFER_LMA) {
			u64 maxphyaddr;
4233
			u32 eax, ebx, ecx, edx;
4234

4235 4236 4237 4238
			eax = 0x80000008;
			ecx = 0;
			if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
						 &edx, false))
4239 4240 4241
				maxphyaddr = eax & 0xff;
			else
				maxphyaddr = 36;
4242 4243
			rsvd = rsvd_bits(maxphyaddr, 63);
			if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
4244
				rsvd &= ~X86_CR3_PCID_NOFLUSH;
4245
		}
4246 4247 4248 4249 4250 4251 4252

		if (new_val & rsvd)
			return emulate_gp(ctxt, 0);

		break;
		}
	case 4: {
4253
		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264

		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
			return emulate_gp(ctxt, 0);

		break;
		}
	}

	return X86EMUL_CONTINUE;
}

4265 4266 4267 4268
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
	unsigned long dr7;

4269
	ctxt->ops->get_dr(ctxt, 7, &dr7);
4270 4271 4272 4273 4274 4275 4276

	/* Check if DR7.Global_Enable is set */
	return dr7 & (1 << 13);
}

static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
4277
	int dr = ctxt->modrm_reg;
4278 4279 4280 4281 4282
	u64 cr4;

	if (dr > 7)
		return emulate_ud(ctxt);

4283
	cr4 = ctxt->ops->get_cr(ctxt, 4);
4284 4285 4286
	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
		return emulate_ud(ctxt);

4287 4288 4289 4290
	if (check_dr7_gd(ctxt)) {
		ulong dr6;

		ctxt->ops->get_dr(ctxt, 6, &dr6);
4291
		dr6 &= ~DR_TRAP_BITS;
4292 4293
		dr6 |= DR6_BD | DR6_RTM;
		ctxt->ops->set_dr(ctxt, 6, dr6);
4294
		return emulate_db(ctxt);
4295
	}
4296 4297 4298 4299 4300 4301

	return X86EMUL_CONTINUE;
}

static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
4302 4303
	u64 new_val = ctxt->src.val64;
	int dr = ctxt->modrm_reg;
4304 4305 4306 4307 4308 4309 4310

	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
		return emulate_gp(ctxt, 0);

	return check_dr_read(ctxt);
}

4311 4312
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
4313
	u64 efer = 0;
4314

4315
	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4316 4317 4318 4319 4320 4321 4322 4323 4324

	if (!(efer & EFER_SVME))
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
4325
	u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4326 4327

	/* Valid physical address? */
4328
	if (rax & 0xffff000000000000ULL)
4329 4330 4331 4332 4333
		return emulate_gp(ctxt, 0);

	return check_svme(ctxt);
}

4334 4335
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{
4336
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4337

4338
	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4339 4340 4341 4342 4343
		return emulate_ud(ctxt);

	return X86EMUL_CONTINUE;
}

4344 4345
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
4346
	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4347
	u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4348

4349 4350 4351 4352 4353 4354 4355
	/*
	 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
	 * in Ring3 when CR4.PCE=0.
	 */
	if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
		return X86EMUL_CONTINUE;

4356
	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4357
	    ctxt->ops->check_pmc(ctxt, rcx))
4358 4359 4360 4361 4362
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

4363 4364
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
4365 4366
	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4367 4368 4369 4370 4371 4372 4373
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
4374 4375
	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4376 4377 4378 4379 4380
		return emulate_gp(ctxt, 0);

	return X86EMUL_CONTINUE;
}

4381
#define D(_y) { .flags = (_y) }
4382 4383 4384
#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
		      .intercept = x86_intercept_##_i, .check_perm = (_p) }
4385
#define N    D(NotImpl)
4386
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4387 4388
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4389
#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4390
#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4391
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4392
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4393
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4394
#define II(_f, _e, _i) \
4395
	{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4396
#define IIP(_f, _e, _i, _p) \
4397 4398
	{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
	  .intercept = x86_intercept_##_i, .check_perm = (_p) }
4399
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4400

4401
#define D2bv(_f)      D((_f) | ByteOp), D(_f)
4402
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4403
#define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
4404
#define F2bv(_f, _e)  F((_f) | ByteOp, _e), F(_f, _e)
4405 4406
#define I2bvIP(_f, _e, _i, _p) \
	IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4407

4408 4409 4410
#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
		F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
		F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4411

4412 4413
static const struct opcode group7_rm0[] = {
	N,
4414
	I(SrcNone | Priv | EmulateOnUD,	em_hypercall),
4415 4416 4417
	N, N, N, N, N, N,
};

4418
static const struct opcode group7_rm1[] = {
4419 4420
	DI(SrcNone | Priv, monitor),
	DI(SrcNone | Priv, mwait),
4421 4422 4423
	N, N, N, N, N, N,
};

4424 4425 4426 4427 4428 4429
static const struct opcode group7_rm2[] = {
	N,
	II(ImplicitOps | Priv,			em_xsetbv,	xsetbv),
	N, N, N, N, N, N,
};

4430
static const struct opcode group7_rm3[] = {
4431
	DIP(SrcNone | Prot | Priv,		vmrun,		check_svme_pa),
4432
	II(SrcNone  | Prot | EmulateOnUD,	em_hypercall,	vmmcall),
4433 4434 4435 4436 4437 4438
	DIP(SrcNone | Prot | Priv,		vmload,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		vmsave,		check_svme_pa),
	DIP(SrcNone | Prot | Priv,		stgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		clgi,		check_svme),
	DIP(SrcNone | Prot | Priv,		skinit,		check_svme),
	DIP(SrcNone | Prot | Priv,		invlpga,	check_svme),
4439
};
4440

4441
static const struct opcode group7_rm7[] = {
4442
	N,
4443
	DIP(SrcNone, rdtscp, check_rdtsc),
4444 4445
	N, N, N, N, N, N,
};
4446

4447
static const struct opcode group1[] = {
4448 4449 4450 4451 4452 4453 4454 4455
	F(Lock, em_add),
	F(Lock | PageTable, em_or),
	F(Lock, em_adc),
	F(Lock, em_sbb),
	F(Lock | PageTable, em_and),
	F(Lock, em_sub),
	F(Lock, em_xor),
	F(NoWrite, em_cmp),
4456 4457
};

4458
static const struct opcode group1A[] = {
4459
	I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4460 4461
};

4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472
static const struct opcode group2[] = {
	F(DstMem | ModRM, em_rol),
	F(DstMem | ModRM, em_ror),
	F(DstMem | ModRM, em_rcl),
	F(DstMem | ModRM, em_rcr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_shr),
	F(DstMem | ModRM, em_shl),
	F(DstMem | ModRM, em_sar),
};

4473
static const struct opcode group3[] = {
4474 4475
	F(DstMem | SrcImm | NoWrite, em_test),
	F(DstMem | SrcImm | NoWrite, em_test),
4476 4477
	F(DstMem | SrcNone | Lock, em_not),
	F(DstMem | SrcNone | Lock, em_neg),
4478 4479
	F(DstXacc | Src2Mem, em_mul_ex),
	F(DstXacc | Src2Mem, em_imul_ex),
4480 4481
	F(DstXacc | Src2Mem, em_div_ex),
	F(DstXacc | Src2Mem, em_idiv_ex),
4482 4483
};

4484
static const struct opcode group4[] = {
4485 4486
	F(ByteOp | DstMem | SrcNone | Lock, em_inc),
	F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4487 4488 4489
	N, N, N, N, N, N,
};

4490
static const struct opcode group5[] = {
4491 4492
	F(DstMem | SrcNone | Lock,		em_inc),
	F(DstMem | SrcNone | Lock,		em_dec),
4493
	I(SrcMem | NearBranch,			em_call_near_abs),
4494
	I(SrcMemFAddr | ImplicitOps,		em_call_far),
4495
	I(SrcMem | NearBranch,			em_jmp_abs),
4496
	I(SrcMemFAddr | ImplicitOps,		em_jmp_far),
4497
	I(SrcMem | Stack | TwoMemOp,		em_push), D(Undefined),
4498 4499
};

4500
static const struct opcode group6[] = {
P
Paolo Bonzini 已提交
4501 4502
	II(Prot | DstMem,	   em_sldt, sldt),
	II(Prot | DstMem,	   em_str, str),
A
Avi Kivity 已提交
4503
	II(Prot | Priv | SrcMem16, em_lldt, lldt),
A
Avi Kivity 已提交
4504
	II(Prot | Priv | SrcMem16, em_ltr, ltr),
4505 4506 4507
	N, N, N, N,
};

4508
static const struct group_dual group7 = { {
4509 4510
	II(Mov | DstMem,			em_sgdt, sgdt),
	II(Mov | DstMem,			em_sidt, sidt),
4511 4512 4513 4514 4515
	II(SrcMem | Priv,			em_lgdt, lgdt),
	II(SrcMem | Priv,			em_lidt, lidt),
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	II(SrcMem | ByteOp | Priv | NoAccess,	em_invlpg, invlpg),
4516
}, {
4517
	EXT(0, group7_rm0),
4518
	EXT(0, group7_rm1),
4519 4520
	EXT(0, group7_rm2),
	EXT(0, group7_rm3),
4521 4522 4523
	II(SrcNone | DstMem | Mov,		em_smsw, smsw), N,
	II(SrcMem16 | Mov | Priv,		em_lmsw, lmsw),
	EXT(0, group7_rm7),
4524 4525
} };

4526
static const struct opcode group8[] = {
4527
	N, N, N, N,
4528 4529 4530 4531
	F(DstMem | SrcImmByte | NoWrite,		em_bt),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_bts),
	F(DstMem | SrcImmByte | Lock,			em_btr),
	F(DstMem | SrcImmByte | Lock | PageTable,	em_btc),
4532 4533
};

P
Paolo Bonzini 已提交
4534 4535 4536 4537 4538 4539 4540 4541 4542
/*
 * The "memory" destination is actually always a register, since we come
 * from the register case of group9.
 */
static const struct gprefix pfx_0f_c7_7 = {
	N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
};


4543
static const struct group_dual group9 = { {
4544
	N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4545
}, {
P
Paolo Bonzini 已提交
4546 4547
	N, N, N, N, N, N, N,
	GP(0, &pfx_0f_c7_7),
4548 4549
} };

4550
static const struct opcode group11[] = {
4551
	I(DstMem | SrcImm | Mov | PageTable, em_mov),
4552
	X7(D(Undefined)),
4553 4554
};

4555
static const struct gprefix pfx_0f_ae_7 = {
4556
	I(SrcMem | ByteOp, em_clflush), N, N, N,
4557 4558 4559
};

static const struct group_dual group15 = { {
4560 4561 4562
	I(ModRM | Aligned16, em_fxsave),
	I(ModRM | Aligned16, em_fxrstor),
	N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4563 4564 4565 4566
}, {
	N, N, N, N, N, N, N, N,
} };

4567
static const struct gprefix pfx_0f_6f_0f_7f = {
4568
	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4569 4570
};

4571 4572 4573 4574
static const struct instr_dual instr_dual_0f_2b = {
	I(0, em_mov), N
};

4575
static const struct gprefix pfx_0f_2b = {
4576
	ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4577 4578
};

4579 4580 4581 4582
static const struct gprefix pfx_0f_10_0f_11 = {
	I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
};

4583
static const struct gprefix pfx_0f_28_0f_29 = {
4584
	I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4585 4586
};

4587 4588 4589 4590
static const struct gprefix pfx_0f_e7 = {
	N, I(Sse, em_mov), N, N,
};

4591
static const struct escape escape_d9 = { {
4592
	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_db = { {
	N, N, N, N, N, N, N, N,
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

static const struct escape escape_dd = { {
4634
	N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653
}, {
	/* 0xC0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xC8 - 0xCF */
	N, N, N, N, N, N, N, N,
	/* 0xD0 - 0xC7 */
	N, N, N, N, N, N, N, N,
	/* 0xD8 - 0xDF */
	N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xE7 */
	N, N, N, N, N, N, N, N,
	/* 0xE8 - 0xEF */
	N, N, N, N, N, N, N, N,
	/* 0xF0 - 0xF7 */
	N, N, N, N, N, N, N, N,
	/* 0xF8 - 0xFF */
	N, N, N, N, N, N, N, N,
} };

4654 4655 4656 4657
static const struct instr_dual instr_dual_0f_c3 = {
	I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
};

4658 4659 4660 4661
static const struct mode_dual mode_dual_63 = {
	N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
};

4662
static const struct opcode opcode_table[256] = {
4663
	/* 0x00 - 0x07 */
4664
	F6ALU(Lock, em_add),
4665 4666
	I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4667
	/* 0x08 - 0x0F */
4668
	F6ALU(Lock | PageTable, em_or),
4669 4670
	I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
	N,
4671
	/* 0x10 - 0x17 */
4672
	F6ALU(Lock, em_adc),
4673 4674
	I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4675
	/* 0x18 - 0x1F */
4676
	F6ALU(Lock, em_sbb),
4677 4678
	I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
	I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4679
	/* 0x20 - 0x27 */
4680
	F6ALU(Lock | PageTable, em_and), N, N,
4681
	/* 0x28 - 0x2F */
4682
	F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4683
	/* 0x30 - 0x37 */
4684
	F6ALU(Lock, em_xor), N, N,
4685
	/* 0x38 - 0x3F */
4686
	F6ALU(NoWrite, em_cmp), N, N,
4687
	/* 0x40 - 0x4F */
4688
	X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4689
	/* 0x50 - 0x57 */
4690
	X8(I(SrcReg | Stack, em_push)),
4691
	/* 0x58 - 0x5F */
4692
	X8(I(DstReg | Stack, em_pop)),
4693
	/* 0x60 - 0x67 */
4694 4695
	I(ImplicitOps | Stack | No64, em_pusha),
	I(ImplicitOps | Stack | No64, em_popa),
4696
	N, MD(ModRM, &mode_dual_63),
4697 4698
	N, N, N, N,
	/* 0x68 - 0x6F */
4699 4700
	I(SrcImm | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4701 4702
	I(SrcImmByte | Mov | Stack, em_push),
	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4703
	I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4704
	I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4705
	/* 0x70 - 0x7F */
4706
	X16(D(SrcImmByte | NearBranch)),
4707
	/* 0x80 - 0x87 */
4708 4709 4710 4711
	G(ByteOp | DstMem | SrcImm, group1),
	G(DstMem | SrcImm, group1),
	G(ByteOp | DstMem | SrcImm | No64, group1),
	G(DstMem | SrcImmByte, group1),
4712
	F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4713
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4714
	/* 0x88 - 0x8F */
4715
	I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4716
	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4717
	I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4718 4719 4720
	D(ModRM | SrcMem | NoAccess | DstReg),
	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
	G(0, group1A),
4721
	/* 0x90 - 0x97 */
4722
	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4723
	/* 0x98 - 0x9F */
4724
	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4725
	I(SrcImmFAddr | No64, em_call_far), N,
4726
	II(ImplicitOps | Stack, em_pushf, pushf),
P
Paolo Bonzini 已提交
4727 4728
	II(ImplicitOps | Stack, em_popf, popf),
	I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4729
	/* 0xA0 - 0xA7 */
4730
	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4731
	I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4732 4733
	I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
	F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4734
	/* 0xA8 - 0xAF */
4735
	F2bv(DstAcc | SrcImm | NoWrite, em_test),
4736 4737
	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4738
	F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4739
	/* 0xB0 - 0xB7 */
4740
	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4741
	/* 0xB8 - 0xBF */
4742
	X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4743
	/* 0xC0 - 0xC7 */
4744
	G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4745 4746
	I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
	I(ImplicitOps | NearBranch, em_ret),
4747 4748
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4749
	G(ByteOp, group11), G(0, group11),
4750
	/* 0xC8 - 0xCF */
A
Avi Kivity 已提交
4751
	I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4752 4753
	I(ImplicitOps | SrcImmU16, em_ret_far_imm),
	I(ImplicitOps, em_ret_far),
4754
	D(ImplicitOps), DI(SrcImmByte, intn),
4755
	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4756
	/* 0xD0 - 0xD7 */
4757 4758
	G(Src2One | ByteOp, group2), G(Src2One, group2),
	G(Src2CL | ByteOp, group2), G(Src2CL, group2),
P
Paolo Bonzini 已提交
4759
	I(DstAcc | SrcImmUByte | No64, em_aam),
P
Paolo Bonzini 已提交
4760 4761
	I(DstAcc | SrcImmUByte | No64, em_aad),
	F(DstAcc | ByteOp | No64, em_salc),
P
Paolo Bonzini 已提交
4762
	I(DstAcc | SrcXLat | ByteOp, em_mov),
4763
	/* 0xD8 - 0xDF */
4764
	N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4765
	/* 0xE0 - 0xE7 */
4766 4767
	X3(I(SrcImmByte | NearBranch, em_loop)),
	I(SrcImmByte | NearBranch, em_jcxz),
4768 4769
	I2bvIP(SrcImmUByte | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4770
	/* 0xE8 - 0xEF */
4771 4772 4773
	I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
	I(SrcImmFAddr | No64, em_jmp_far),
	D(SrcImmByte | ImplicitOps | NearBranch),
4774 4775
	I2bvIP(SrcDX | DstAcc, em_in,  in,  check_perm_in),
	I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4776
	/* 0xF0 - 0xF7 */
4777
	N, DI(ImplicitOps, icebp), N, N,
4778 4779
	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
	G(ByteOp, group3), G(0, group3),
4780
	/* 0xF8 - 0xFF */
4781 4782
	D(ImplicitOps), D(ImplicitOps),
	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4783 4784 4785
	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};

4786
static const struct opcode twobyte_table[256] = {
4787
	/* 0x00 - 0x0F */
4788
	G(0, group6), GD(0, &group7), N, N,
4789
	N, I(ImplicitOps | EmulateOnUD, em_syscall),
4790
	II(ImplicitOps | Priv, em_clts, clts), N,
4791
	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4792
	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4793
	/* 0x10 - 0x1F */
4794 4795 4796
	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
	N, N, N, N, N, N,
4797 4798
	D(ImplicitOps | ModRM | SrcMem | NoAccess),
	N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4799
	/* 0x20 - 0x2F */
4800 4801 4802 4803 4804 4805
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
						check_cr_write),
	IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
						check_dr_write),
4806
	N, N, N, N,
4807 4808
	GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
	GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4809
	N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4810
	N, N, N, N,
4811
	/* 0x30 - 0x3F */
4812
	II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4813
	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4814
	II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4815
	IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4816 4817
	I(ImplicitOps | EmulateOnUD, em_sysenter),
	I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4818
	N, N,
4819 4820
	N, N, N, N, N, N, N, N,
	/* 0x40 - 0x4F */
4821
	X16(D(DstReg | SrcMem | ModRM)),
4822 4823 4824
	/* 0x50 - 0x5F */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0x60 - 0x6F */
4825 4826 4827 4828
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4829
	/* 0x70 - 0x7F */
4830 4831 4832 4833
	N, N, N, N,
	N, N, N, N,
	N, N, N, N,
	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4834
	/* 0x80 - 0x8F */
4835
	X16(D(SrcImm | NearBranch)),
4836
	/* 0x90 - 0x9F */
4837
	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4838
	/* 0xA0 - 0xA7 */
4839
	I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4840 4841
	II(ImplicitOps, em_cpuid, cpuid),
	F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4842 4843
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4844
	/* 0xA8 - 0xAF */
4845
	I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4846
	II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4847
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4848 4849
	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4850
	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4851
	/* 0xB0 - 0xB7 */
4852
	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4853
	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4854
	F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4855 4856
	I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
	I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4857
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4858 4859
	/* 0xB8 - 0xBF */
	N, N,
4860
	G(BitOp, group8),
4861
	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4862 4863
	I(DstReg | SrcMem | ModRM, em_bsf_c),
	I(DstReg | SrcMem | ModRM, em_bsr_c),
4864
	D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
A
Avi Kivity 已提交
4865
	/* 0xC0 - 0xC7 */
4866
	F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4867
	N, ID(0, &instr_dual_0f_c3),
4868
	N, N, N, GD(0, &group9),
A
Avi Kivity 已提交
4869 4870
	/* 0xC8 - 0xCF */
	X8(I(DstReg, em_bswap)),
4871 4872 4873
	/* 0xD0 - 0xDF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
	/* 0xE0 - 0xEF */
4874 4875
	N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
	N, N, N, N, N, N, N, N,
4876 4877 4878 4879
	/* 0xF0 - 0xFF */
	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};

4880 4881 4882 4883 4884 4885 4886 4887
static const struct instr_dual instr_dual_0f_38_f0 = {
	I(DstReg | SrcMem | Mov, em_movbe), N
};

static const struct instr_dual instr_dual_0f_38_f1 = {
	I(DstMem | SrcReg | Mov, em_movbe), N
};

4888
static const struct gprefix three_byte_0f_38_f0 = {
4889
	ID(0, &instr_dual_0f_38_f0), N, N, N
4890 4891 4892
};

static const struct gprefix three_byte_0f_38_f1 = {
4893
	ID(0, &instr_dual_0f_38_f1), N, N, N
4894 4895 4896 4897 4898 4899 4900 4901 4902
};

/*
 * Insns below are selected by the prefix which indexed by the third opcode
 * byte.
 */
static const struct opcode opcode_map_0f_38[256] = {
	/* 0x00 - 0x7f */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
B
Borislav Petkov 已提交
4903 4904 4905
	/* 0x80 - 0xef */
	X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
	/* 0xf0 - 0xf1 */
4906 4907
	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
	GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
B
Borislav Petkov 已提交
4908 4909
	/* 0xf2 - 0xff */
	N, N, X4(N), X8(N)
4910 4911
};

4912 4913 4914 4915 4916
#undef D
#undef N
#undef G
#undef GD
#undef I
4917
#undef GP
4918
#undef EXT
4919
#undef MD
N
Nadav Amit 已提交
4920
#undef ID
4921

4922
#undef D2bv
4923
#undef D2bvIP
4924
#undef I2bv
4925
#undef I2bvIP
4926
#undef I6ALU
4927

4928
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4929 4930 4931
{
	unsigned size;

4932
	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944
	if (size == 8)
		size = 4;
	return size;
}

static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
		      unsigned size, bool sign_extension)
{
	int rc = X86EMUL_CONTINUE;

	op->type = OP_IMM;
	op->bytes = size;
4945
	op->addr.mem.ea = ctxt->_eip;
4946 4947 4948
	/* NB. Immediates are sign-extended as necessary. */
	switch (op->bytes) {
	case 1:
4949
		op->val = insn_fetch(s8, ctxt);
4950 4951
		break;
	case 2:
4952
		op->val = insn_fetch(s16, ctxt);
4953 4954
		break;
	case 4:
4955
		op->val = insn_fetch(s32, ctxt);
4956
		break;
4957 4958 4959
	case 8:
		op->val = insn_fetch(s64, ctxt);
		break;
4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977
	}
	if (!sign_extension) {
		switch (op->bytes) {
		case 1:
			op->val &= 0xff;
			break;
		case 2:
			op->val &= 0xffff;
			break;
		case 4:
			op->val &= 0xffffffff;
			break;
		}
	}
done:
	return rc;
}

4978 4979 4980 4981 4982 4983 4984
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
			  unsigned d)
{
	int rc = X86EMUL_CONTINUE;

	switch (d) {
	case OpReg:
4985
		decode_register_operand(ctxt, op);
4986 4987
		break;
	case OpImmUByte:
4988
		rc = decode_imm(ctxt, op, 1, false);
4989 4990
		break;
	case OpMem:
4991
		ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4992 4993 4994
	mem_common:
		*op = ctxt->memop;
		ctxt->memopp = op;
4995
		if (ctxt->d & BitOp)
4996 4997 4998
			fetch_bit_operand(ctxt);
		op->orig_val = op->val;
		break;
4999
	case OpMem64:
5000
		ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
5001
		goto mem_common;
5002 5003 5004
	case OpAcc:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
5005
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
5006 5007 5008
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026
	case OpAccLo:
		op->type = OP_REG;
		op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
	case OpAccHi:
		if (ctxt->d & ByteOp) {
			op->type = OP_NONE;
			break;
		}
		op->type = OP_REG;
		op->bytes = ctxt->op_bytes;
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
		fetch_register_operand(op);
		op->orig_val = op->val;
		break;
5027 5028 5029 5030
	case OpDI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
5031
			register_address(ctxt, VCPU_REGS_RDI);
5032 5033
		op->addr.mem.seg = VCPU_SREG_ES;
		op->val = 0;
5034
		op->count = 1;
5035 5036 5037 5038
		break;
	case OpDX:
		op->type = OP_REG;
		op->bytes = 2;
5039
		op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
5040 5041
		fetch_register_operand(op);
		break;
5042
	case OpCL:
5043
		op->type = OP_IMM;
5044
		op->bytes = 1;
5045
		op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
5046 5047 5048 5049 5050
		break;
	case OpImmByte:
		rc = decode_imm(ctxt, op, 1, true);
		break;
	case OpOne:
5051
		op->type = OP_IMM;
5052 5053 5054 5055 5056 5057
		op->bytes = 1;
		op->val = 1;
		break;
	case OpImm:
		rc = decode_imm(ctxt, op, imm_size(ctxt), true);
		break;
5058 5059 5060
	case OpImm64:
		rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
		break;
5061 5062
	case OpMem8:
		ctxt->memop.bytes = 1;
5063
		if (ctxt->memop.type == OP_REG) {
5064 5065
			ctxt->memop.addr.reg = decode_register(ctxt,
					ctxt->modrm_rm, true);
5066 5067
			fetch_register_operand(&ctxt->memop);
		}
5068
		goto mem_common;
5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084
	case OpMem16:
		ctxt->memop.bytes = 2;
		goto mem_common;
	case OpMem32:
		ctxt->memop.bytes = 4;
		goto mem_common;
	case OpImmU16:
		rc = decode_imm(ctxt, op, 2, false);
		break;
	case OpImmU:
		rc = decode_imm(ctxt, op, imm_size(ctxt), false);
		break;
	case OpSI:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
5085
			register_address(ctxt, VCPU_REGS_RSI);
B
Bandan Das 已提交
5086
		op->addr.mem.seg = ctxt->seg_override;
5087
		op->val = 0;
5088
		op->count = 1;
5089
		break;
P
Paolo Bonzini 已提交
5090 5091 5092 5093
	case OpXLat:
		op->type = OP_MEM;
		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
		op->addr.mem.ea =
5094
			address_mask(ctxt,
P
Paolo Bonzini 已提交
5095 5096
				reg_read(ctxt, VCPU_REGS_RBX) +
				(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
B
Bandan Das 已提交
5097
		op->addr.mem.seg = ctxt->seg_override;
P
Paolo Bonzini 已提交
5098 5099
		op->val = 0;
		break;
5100 5101 5102 5103 5104 5105 5106 5107 5108
	case OpImmFAddr:
		op->type = OP_IMM;
		op->addr.mem.ea = ctxt->_eip;
		op->bytes = ctxt->op_bytes + 2;
		insn_fetch_arr(op->valptr, op->bytes, ctxt);
		break;
	case OpMemFAddr:
		ctxt->memop.bytes = ctxt->op_bytes + 2;
		goto mem_common;
5109
	case OpES:
5110
		op->type = OP_IMM;
5111 5112 5113
		op->val = VCPU_SREG_ES;
		break;
	case OpCS:
5114
		op->type = OP_IMM;
5115 5116 5117
		op->val = VCPU_SREG_CS;
		break;
	case OpSS:
5118
		op->type = OP_IMM;
5119 5120 5121
		op->val = VCPU_SREG_SS;
		break;
	case OpDS:
5122
		op->type = OP_IMM;
5123 5124 5125
		op->val = VCPU_SREG_DS;
		break;
	case OpFS:
5126
		op->type = OP_IMM;
5127 5128 5129
		op->val = VCPU_SREG_FS;
		break;
	case OpGS:
5130
		op->type = OP_IMM;
5131 5132
		op->val = VCPU_SREG_GS;
		break;
5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143
	case OpImplicit:
		/* Special instructions do their own operand decoding. */
	default:
		op->type = OP_NONE; /* Disable writeback. */
		break;
	}

done:
	return rc;
}

5144
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
5145 5146 5147
{
	int rc = X86EMUL_CONTINUE;
	int mode = ctxt->mode;
5148
	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5149
	bool op_prefix = false;
B
Bandan Das 已提交
5150
	bool has_seg_override = false;
5151
	struct opcode opcode;
5152 5153
	u16 dummy;
	struct desc_struct desc;
5154

5155 5156
	ctxt->memop.type = OP_NONE;
	ctxt->memopp = NULL;
5157
	ctxt->_eip = ctxt->eip;
5158 5159
	ctxt->fetch.ptr = ctxt->fetch.data;
	ctxt->fetch.end = ctxt->fetch.data + insn_len;
B
Borislav Petkov 已提交
5160
	ctxt->opcode_len = 1;
5161
	if (insn_len > 0)
5162
		memcpy(ctxt->fetch.data, insn, insn_len);
5163
	else {
5164
		rc = __do_insn_fetch_bytes(ctxt, 1);
5165
		if (rc != X86EMUL_CONTINUE)
5166
			goto done;
5167
	}
5168 5169 5170 5171

	switch (mode) {
	case X86EMUL_MODE_REAL:
	case X86EMUL_MODE_VM86:
5172 5173 5174 5175 5176
		def_op_bytes = def_ad_bytes = 2;
		ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
		if (desc.d)
			def_op_bytes = def_ad_bytes = 4;
		break;
5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189
	case X86EMUL_MODE_PROT16:
		def_op_bytes = def_ad_bytes = 2;
		break;
	case X86EMUL_MODE_PROT32:
		def_op_bytes = def_ad_bytes = 4;
		break;
#ifdef CONFIG_X86_64
	case X86EMUL_MODE_PROT64:
		def_op_bytes = 4;
		def_ad_bytes = 8;
		break;
#endif
	default:
5190
		return EMULATION_FAILED;
5191 5192
	}

5193 5194
	ctxt->op_bytes = def_op_bytes;
	ctxt->ad_bytes = def_ad_bytes;
5195 5196 5197

	/* Legacy prefixes. */
	for (;;) {
5198
		switch (ctxt->b = insn_fetch(u8, ctxt)) {
5199
		case 0x66:	/* operand-size override */
5200
			op_prefix = true;
5201
			/* switch between 2/4 bytes */
5202
			ctxt->op_bytes = def_op_bytes ^ 6;
5203 5204 5205 5206
			break;
		case 0x67:	/* address-size override */
			if (mode == X86EMUL_MODE_PROT64)
				/* switch between 4/8 bytes */
5207
				ctxt->ad_bytes = def_ad_bytes ^ 12;
5208 5209
			else
				/* switch between 2/4 bytes */
5210
				ctxt->ad_bytes = def_ad_bytes ^ 6;
5211 5212 5213 5214 5215
			break;
		case 0x26:	/* ES override */
		case 0x2e:	/* CS override */
		case 0x36:	/* SS override */
		case 0x3e:	/* DS override */
B
Bandan Das 已提交
5216 5217
			has_seg_override = true;
			ctxt->seg_override = (ctxt->b >> 3) & 3;
5218 5219 5220
			break;
		case 0x64:	/* FS override */
		case 0x65:	/* GS override */
B
Bandan Das 已提交
5221 5222
			has_seg_override = true;
			ctxt->seg_override = ctxt->b & 7;
5223 5224 5225 5226
			break;
		case 0x40 ... 0x4f: /* REX */
			if (mode != X86EMUL_MODE_PROT64)
				goto done_prefixes;
5227
			ctxt->rex_prefix = ctxt->b;
5228 5229
			continue;
		case 0xf0:	/* LOCK */
5230
			ctxt->lock_prefix = 1;
5231 5232 5233
			break;
		case 0xf2:	/* REPNE/REPNZ */
		case 0xf3:	/* REP/REPE/REPZ */
5234
			ctxt->rep_prefix = ctxt->b;
5235 5236 5237 5238 5239 5240 5241
			break;
		default:
			goto done_prefixes;
		}

		/* Any legacy prefix after a REX prefix nullifies its effect. */

5242
		ctxt->rex_prefix = 0;
5243 5244 5245 5246 5247
	}

done_prefixes:

	/* REX prefix. */
5248 5249
	if (ctxt->rex_prefix & 8)
		ctxt->op_bytes = 8;	/* REX.W */
5250 5251

	/* Opcode byte(s). */
5252
	opcode = opcode_table[ctxt->b];
5253
	/* Two-byte opcode? */
5254
	if (ctxt->b == 0x0f) {
B
Borislav Petkov 已提交
5255
		ctxt->opcode_len = 2;
5256
		ctxt->b = insn_fetch(u8, ctxt);
5257
		opcode = twobyte_table[ctxt->b];
5258 5259 5260 5261 5262 5263 5264

		/* 0F_38 opcode map */
		if (ctxt->b == 0x38) {
			ctxt->opcode_len = 3;
			ctxt->b = insn_fetch(u8, ctxt);
			opcode = opcode_map_0f_38[ctxt->b];
		}
5265
	}
5266
	ctxt->d = opcode.flags;
5267

5268 5269 5270
	if (ctxt->d & ModRM)
		ctxt->modrm = insn_fetch(u8, ctxt);

5271 5272
	/* vex-prefix instructions are not implemented */
	if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5273
	    (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5274 5275 5276
		ctxt->d = NotImpl;
	}

5277 5278
	while (ctxt->d & GroupMask) {
		switch (ctxt->d & GroupMask) {
5279
		case Group:
5280
			goffset = (ctxt->modrm >> 3) & 7;
5281 5282 5283
			opcode = opcode.u.group[goffset];
			break;
		case GroupDual:
5284 5285
			goffset = (ctxt->modrm >> 3) & 7;
			if ((ctxt->modrm >> 6) == 3)
5286 5287 5288 5289 5290
				opcode = opcode.u.gdual->mod3[goffset];
			else
				opcode = opcode.u.gdual->mod012[goffset];
			break;
		case RMExt:
5291
			goffset = ctxt->modrm & 7;
5292
			opcode = opcode.u.group[goffset];
5293 5294
			break;
		case Prefix:
5295
			if (ctxt->rep_prefix && op_prefix)
5296
				return EMULATION_FAILED;
5297
			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5298 5299 5300 5301 5302 5303 5304
			switch (simd_prefix) {
			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
			}
			break;
5305 5306 5307 5308 5309 5310
		case Escape:
			if (ctxt->modrm > 0xbf)
				opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
			else
				opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
			break;
5311 5312 5313 5314 5315 5316
		case InstrDual:
			if ((ctxt->modrm >> 6) == 3)
				opcode = opcode.u.idual->mod3;
			else
				opcode = opcode.u.idual->mod012;
			break;
5317 5318 5319 5320 5321 5322
		case ModeDual:
			if (ctxt->mode == X86EMUL_MODE_PROT64)
				opcode = opcode.u.mdual->mode64;
			else
				opcode = opcode.u.mdual->mode32;
			break;
5323
		default:
5324
			return EMULATION_FAILED;
5325
		}
5326

5327
		ctxt->d &= ~(u64)GroupMask;
5328
		ctxt->d |= opcode.flags;
5329 5330
	}

5331 5332 5333 5334
	/* Unrecognised? */
	if (ctxt->d == 0)
		return EMULATION_FAILED;

5335
	ctxt->execute = opcode.u.execute;
5336

5337 5338 5339
	if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
		return EMULATION_FAILED;

5340
	if (unlikely(ctxt->d &
5341 5342
	    (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
	     No16))) {
5343 5344 5345 5346 5347 5348
		/*
		 * These are copied unconditionally here, and checked unconditionally
		 * in x86_emulate_insn.
		 */
		ctxt->check_perm = opcode.check_perm;
		ctxt->intercept = opcode.intercept;
5349

5350 5351
		if (ctxt->d & NotImpl)
			return EMULATION_FAILED;
5352

5353 5354 5355 5356 5357 5358
		if (mode == X86EMUL_MODE_PROT64) {
			if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
				ctxt->op_bytes = 8;
			else if (ctxt->d & NearBranch)
				ctxt->op_bytes = 8;
		}
5359

5360 5361 5362 5363 5364 5365 5366
		if (ctxt->d & Op3264) {
			if (mode == X86EMUL_MODE_PROT64)
				ctxt->op_bytes = 8;
			else
				ctxt->op_bytes = 4;
		}

5367 5368 5369
		if ((ctxt->d & No16) && ctxt->op_bytes == 2)
			ctxt->op_bytes = 4;

5370 5371 5372 5373 5374
		if (ctxt->d & Sse)
			ctxt->op_bytes = 16;
		else if (ctxt->d & Mmx)
			ctxt->op_bytes = 8;
	}
A
Avi Kivity 已提交
5375

5376
	/* ModRM and SIB bytes. */
5377
	if (ctxt->d & ModRM) {
5378
		rc = decode_modrm(ctxt, &ctxt->memop);
B
Bandan Das 已提交
5379 5380 5381 5382
		if (!has_seg_override) {
			has_seg_override = true;
			ctxt->seg_override = ctxt->modrm_seg;
		}
5383
	} else if (ctxt->d & MemAbs)
5384
		rc = decode_abs(ctxt, &ctxt->memop);
5385 5386 5387
	if (rc != X86EMUL_CONTINUE)
		goto done;

B
Bandan Das 已提交
5388 5389
	if (!has_seg_override)
		ctxt->seg_override = VCPU_SREG_DS;
5390

B
Bandan Das 已提交
5391
	ctxt->memop.addr.mem.seg = ctxt->seg_override;
5392 5393 5394 5395 5396

	/*
	 * Decode and fetch the source operand: register, memory
	 * or immediate.
	 */
5397
	rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5398 5399 5400
	if (rc != X86EMUL_CONTINUE)
		goto done;

5401 5402 5403 5404
	/*
	 * Decode and fetch the second source operand: register, memory
	 * or immediate.
	 */
5405
	rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5406 5407 5408
	if (rc != X86EMUL_CONTINUE)
		goto done;

5409
	/* Decode and fetch the destination operand: register or memory. */
5410
	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5411

5412
	if (ctxt->rip_relative && likely(ctxt->memopp))
5413 5414
		ctxt->memopp->addr.mem.ea = address_mask(ctxt,
					ctxt->memopp->addr.mem.ea + ctxt->_eip);
5415

5416
done:
5417 5418
	if (rc == X86EMUL_PROPAGATE_FAULT)
		ctxt->have_exception = true;
5419
	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5420 5421
}

5422 5423 5424 5425 5426
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
{
	return ctxt->d & PageTable;
}

5427 5428 5429 5430 5431 5432 5433 5434 5435
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
	/* The second termination condition only applies for REPE
	 * and REPNE. Test if the repeat string operation prefix is
	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
	 * corresponding termination condition according to:
	 * 	- if REPE/REPZ and ZF = 0 then done
	 * 	- if REPNE/REPNZ and ZF = 1 then done
	 */
5436 5437 5438
	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
5439
		 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5440
		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
5441
		    ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5442 5443 5444 5445 5446
		return true;

	return false;
}

A
Avi Kivity 已提交
5447 5448
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
R
Radim Krčmář 已提交
5449
	int rc;
A
Avi Kivity 已提交
5450

R
Radim Krčmář 已提交
5451
	rc = asm_safe("fwait");
A
Avi Kivity 已提交
5452

R
Radim Krčmář 已提交
5453
	if (unlikely(rc != X86EMUL_CONTINUE))
A
Avi Kivity 已提交
5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465
		return emulate_exception(ctxt, MF_VECTOR, 0, false);

	return X86EMUL_CONTINUE;
}

static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
				       struct operand *op)
{
	if (op->type == OP_MM)
		read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
}

5466 5467 5468
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{
	ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5469

5470 5471
	if (!(ctxt->d & ByteOp))
		fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5472

5473
	asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5474
	    : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5475
	      [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5476
	    : "c"(ctxt->src2.val));
5477

5478
	ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5479 5480
	if (!fop) /* exception is returned in fop variable */
		return emulate_de(ctxt);
5481 5482
	return X86EMUL_CONTINUE;
}
5483

5484 5485
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
B
Bandan Das 已提交
5486 5487
	memset(&ctxt->rip_relative, 0,
	       (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5488 5489 5490 5491 5492 5493

	ctxt->io_read.pos = 0;
	ctxt->io_read.end = 0;
	ctxt->mem_read.end = 0;
}

5494
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5495
{
5496
	const struct x86_emulate_ops *ops = ctxt->ops;
5497
	int rc = X86EMUL_CONTINUE;
5498
	int saved_dst_type = ctxt->dst.type;
5499
	unsigned emul_flags;
5500

5501
	ctxt->mem_read.pos = 0;
5502

5503 5504
	/* LOCK prefix is allowed only with some instructions */
	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5505
		rc = emulate_ud(ctxt);
5506 5507 5508
		goto done;
	}

5509
	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5510
		rc = emulate_ud(ctxt);
5511 5512 5513
		goto done;
	}

5514
	emul_flags = ctxt->ops->get_hflags(ctxt);
5515 5516 5517 5518 5519 5520 5521
	if (unlikely(ctxt->d &
		     (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
		if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
				(ctxt->d & Undefined)) {
			rc = emulate_ud(ctxt);
			goto done;
		}
A
Avi Kivity 已提交
5522

5523 5524 5525
		if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
		    || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
			rc = emulate_ud(ctxt);
A
Avi Kivity 已提交
5526
			goto done;
5527
		}
A
Avi Kivity 已提交
5528

5529 5530
		if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
			rc = emulate_nm(ctxt);
5531
			goto done;
5532
		}
5533

5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546
		if (ctxt->d & Mmx) {
			rc = flush_pending_x87_faults(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			/*
			 * Now that we know the fpu is exception safe, we can fetch
			 * operands from it.
			 */
			fetch_possible_mmx_operand(ctxt, &ctxt->src);
			fetch_possible_mmx_operand(ctxt, &ctxt->src2);
			if (!(ctxt->d & Mov))
				fetch_possible_mmx_operand(ctxt, &ctxt->dst);
		}
5547

5548
		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5549 5550 5551 5552 5553
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_PRE_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}
5554

5555 5556 5557 5558 5559 5560
		/* Instruction can only be executed in protected mode */
		if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
			rc = emulate_ud(ctxt);
			goto done;
		}

5561 5562
		/* Privileged instruction can be executed only in CPL=0 */
		if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5563 5564 5565 5566
			if (ctxt->d & PrivUD)
				rc = emulate_ud(ctxt);
			else
				rc = emulate_gp(ctxt, 0);
5567
			goto done;
5568
		}
5569

5570
		/* Do instruction specific permission checks */
5571
		if (ctxt->d & CheckPerm) {
5572 5573 5574 5575 5576
			rc = ctxt->check_perm(ctxt);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

5577
		if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5578 5579 5580 5581 5582 5583 5584 5585 5586
			rc = emulator_check_intercept(ctxt, ctxt->intercept,
						      X86_ICPT_POST_EXCEPT);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}

		if (ctxt->rep_prefix && (ctxt->d & String)) {
			/* All REP prefixes have the same first termination condition */
			if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5587
				string_registers_quirk(ctxt);
5588
				ctxt->eip = ctxt->_eip;
5589
				ctxt->eflags &= ~X86_EFLAGS_RF;
5590 5591
				goto done;
			}
5592 5593 5594
		}
	}

5595 5596 5597
	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
		rc = segmented_read(ctxt, ctxt->src.addr.mem,
				    ctxt->src.valptr, ctxt->src.bytes);
5598
		if (rc != X86EMUL_CONTINUE)
5599
			goto done;
5600
		ctxt->src.orig_val64 = ctxt->src.val64;
5601 5602
	}

5603 5604 5605
	if (ctxt->src2.type == OP_MEM) {
		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
				    &ctxt->src2.val, ctxt->src2.bytes);
5606 5607 5608 5609
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

5610
	if ((ctxt->d & DstMask) == ImplicitOps)
5611 5612 5613
		goto special_insn;


5614
	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5615
		/* optimisation - avoid slow emulated read if Mov */
5616 5617
		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
				   &ctxt->dst.val, ctxt->dst.bytes);
5618
		if (rc != X86EMUL_CONTINUE) {
5619 5620
			if (!(ctxt->d & NoWrite) &&
			    rc == X86EMUL_PROPAGATE_FAULT &&
5621 5622
			    ctxt->exception.vector == PF_VECTOR)
				ctxt->exception.error_code |= PFERR_WRITE_MASK;
5623
			goto done;
5624
		}
5625
	}
5626 5627
	/* Copy full 64-bit value for CMPXCHG8B.  */
	ctxt->dst.orig_val64 = ctxt->dst.val64;
5628

5629 5630
special_insn:

5631
	if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5632
		rc = emulator_check_intercept(ctxt, ctxt->intercept,
5633
					      X86_ICPT_POST_MEMACCESS);
5634 5635 5636 5637
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

5638
	if (ctxt->rep_prefix && (ctxt->d & String))
5639
		ctxt->eflags |= X86_EFLAGS_RF;
5640
	else
5641
		ctxt->eflags &= ~X86_EFLAGS_RF;
5642

5643
	if (ctxt->execute) {
5644 5645 5646 5647 5648 5649 5650
		if (ctxt->d & Fastop) {
			void (*fop)(struct fastop *) = (void *)ctxt->execute;
			rc = fastop(ctxt, fop);
			if (rc != X86EMUL_CONTINUE)
				goto done;
			goto writeback;
		}
5651
		rc = ctxt->execute(ctxt);
5652 5653 5654 5655 5656
		if (rc != X86EMUL_CONTINUE)
			goto done;
		goto writeback;
	}

B
Borislav Petkov 已提交
5657
	if (ctxt->opcode_len == 2)
A
Avi Kivity 已提交
5658
		goto twobyte_insn;
5659 5660
	else if (ctxt->opcode_len == 3)
		goto threebyte_insn;
A
Avi Kivity 已提交
5661

5662
	switch (ctxt->b) {
5663
	case 0x70 ... 0x7f: /* jcc (short) */
5664
		if (test_cc(ctxt->b, ctxt->eflags))
5665
			rc = jmp_rel(ctxt, ctxt->src.val);
5666
		break;
N
Nitin A Kamble 已提交
5667
	case 0x8d: /* lea r16/r32, m */
5668
		ctxt->dst.val = ctxt->src.addr.mem.ea;
N
Nitin A Kamble 已提交
5669
		break;
5670
	case 0x90 ... 0x97: /* nop / xchg reg, rax */
5671
		if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5672 5673 5674
			ctxt->dst.type = OP_NONE;
		else
			rc = em_xchg(ctxt);
5675
		break;
5676
	case 0x98: /* cbw/cwde/cdqe */
5677 5678 5679 5680
		switch (ctxt->op_bytes) {
		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5681 5682
		}
		break;
5683
	case 0xcc:		/* int3 */
5684 5685
		rc = emulate_int(ctxt, 3);
		break;
5686
	case 0xcd:		/* int n */
5687
		rc = emulate_int(ctxt, ctxt->src.val);
5688 5689
		break;
	case 0xce:		/* into */
5690
		if (ctxt->eflags & X86_EFLAGS_OF)
5691
			rc = emulate_int(ctxt, 4);
5692
		break;
5693
	case 0xe9: /* jmp rel */
5694
	case 0xeb: /* jmp rel short */
5695
		rc = jmp_rel(ctxt, ctxt->src.val);
5696
		ctxt->dst.type = OP_NONE; /* Disable writeback. */
5697
		break;
5698
	case 0xf4:              /* hlt */
5699
		ctxt->ops->halt(ctxt);
5700
		break;
5701 5702
	case 0xf5:	/* cmc */
		/* complement carry flag from eflags reg */
5703
		ctxt->eflags ^= X86_EFLAGS_CF;
5704 5705
		break;
	case 0xf8: /* clc */
5706
		ctxt->eflags &= ~X86_EFLAGS_CF;
5707
		break;
5708
	case 0xf9: /* stc */
5709
		ctxt->eflags |= X86_EFLAGS_CF;
5710
		break;
5711
	case 0xfc: /* cld */
5712
		ctxt->eflags &= ~X86_EFLAGS_DF;
5713 5714
		break;
	case 0xfd: /* std */
5715
		ctxt->eflags |= X86_EFLAGS_DF;
5716
		break;
5717 5718
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
5719
	}
5720

5721 5722 5723
	if (rc != X86EMUL_CONTINUE)
		goto done;

5724
writeback:
5725 5726 5727 5728 5729 5730
	if (ctxt->d & SrcWrite) {
		BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
		rc = writeback(ctxt, &ctxt->src);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
5731 5732 5733 5734 5735
	if (!(ctxt->d & NoWrite)) {
		rc = writeback(ctxt, &ctxt->dst);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}
5736

5737 5738 5739 5740
	/*
	 * restore dst type in case the decoding will be reused
	 * (happens for string instruction )
	 */
5741
	ctxt->dst.type = saved_dst_type;
5742

5743
	if ((ctxt->d & SrcMask) == SrcSI)
5744
		string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5745

5746
	if ((ctxt->d & DstMask) == DstDI)
5747
		string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5748

5749
	if (ctxt->rep_prefix && (ctxt->d & String)) {
5750
		unsigned int count;
5751
		struct read_cache *r = &ctxt->io_read;
5752 5753 5754 5755
		if ((ctxt->d & SrcMask) == SrcSI)
			count = ctxt->src.count;
		else
			count = ctxt->dst.count;
5756
		register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5757

5758 5759 5760 5761 5762
		if (!string_insn_completed(ctxt)) {
			/*
			 * Re-enter guest when pio read ahead buffer is empty
			 * or, if it is not used, after each 1024 iteration.
			 */
5763
			if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5764 5765 5766 5767 5768 5769
			    (r->end == 0 || r->end != r->pos)) {
				/*
				 * Reset read cache. Usually happens before
				 * decode, but since instruction is restarted
				 * we have to do it here.
				 */
5770
				ctxt->mem_read.end = 0;
5771
				writeback_registers(ctxt);
5772 5773 5774
				return EMULATION_RESTART;
			}
			goto done; /* skip rip writeback */
5775
		}
5776
		ctxt->eflags &= ~X86_EFLAGS_RF;
5777
	}
5778

5779
	ctxt->eip = ctxt->_eip;
5780 5781

done:
5782 5783
	if (rc == X86EMUL_PROPAGATE_FAULT) {
		WARN_ON(ctxt->exception.vector > 0x1f);
5784
		ctxt->have_exception = true;
5785
	}
5786 5787 5788
	if (rc == X86EMUL_INTERCEPTED)
		return EMULATION_INTERCEPTED;

5789 5790 5791
	if (rc == X86EMUL_CONTINUE)
		writeback_registers(ctxt);

5792
	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
A
Avi Kivity 已提交
5793 5794

twobyte_insn:
5795
	switch (ctxt->b) {
5796
	case 0x09:		/* wbinvd */
5797
		(ctxt->ops->wbinvd)(ctxt);
5798 5799
		break;
	case 0x08:		/* invd */
5800 5801
	case 0x0d:		/* GrpP (prefetch) */
	case 0x18:		/* Grp16 (prefetch/nop) */
P
Paolo Bonzini 已提交
5802
	case 0x1f:		/* nop */
5803 5804
		break;
	case 0x20: /* mov cr, reg */
5805
		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5806
		break;
A
Avi Kivity 已提交
5807
	case 0x21: /* mov from dr to reg */
5808
		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
A
Avi Kivity 已提交
5809 5810
		break;
	case 0x40 ... 0x4f:	/* cmov */
5811 5812
		if (test_cc(ctxt->b, ctxt->eflags))
			ctxt->dst.val = ctxt->src.val;
5813
		else if (ctxt->op_bytes != 4)
5814
			ctxt->dst.type = OP_NONE; /* no writeback */
A
Avi Kivity 已提交
5815
		break;
5816
	case 0x80 ... 0x8f: /* jnz rel, etc*/
5817
		if (test_cc(ctxt->b, ctxt->eflags))
5818
			rc = jmp_rel(ctxt, ctxt->src.val);
5819
		break;
5820
	case 0x90 ... 0x9f:     /* setcc r/m8 */
5821
		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5822
		break;
A
Avi Kivity 已提交
5823
	case 0xb6 ... 0xb7:	/* movzx */
5824
		ctxt->dst.bytes = ctxt->op_bytes;
5825
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5826
						       : (u16) ctxt->src.val;
A
Avi Kivity 已提交
5827 5828
		break;
	case 0xbe ... 0xbf:	/* movsx */
5829
		ctxt->dst.bytes = ctxt->op_bytes;
5830
		ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5831
							(s16) ctxt->src.val;
A
Avi Kivity 已提交
5832
		break;
5833 5834
	default:
		goto cannot_emulate;
A
Avi Kivity 已提交
5835
	}
5836

5837 5838
threebyte_insn:

5839 5840 5841
	if (rc != X86EMUL_CONTINUE)
		goto done;

A
Avi Kivity 已提交
5842 5843 5844
	goto writeback;

cannot_emulate:
5845
	return EMULATION_FAILED;
A
Avi Kivity 已提交
5846
}
5847 5848 5849 5850 5851 5852 5853 5854 5855 5856

void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
{
	invalidate_registers(ctxt);
}

void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
{
	writeback_registers(ctxt);
}
5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867

bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
{
	if (ctxt->rep_prefix && (ctxt->d & String))
		return false;

	if (ctxt->d & TwoMemOp)
		return false;

	return true;
}