emulate.c 72.1 KB
Newer Older
A
Avi Kivity 已提交
1
/******************************************************************************
2
 * emulate.c
A
Avi Kivity 已提交
3 4 5 6 7 8
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9
 * privileged instructions:
A
Avi Kivity 已提交
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * Copyright (C) 2006 Qumranet
 *
 *   Avi Kivity <avi@qumranet.com>
 *   Yaniv Kamay <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

#ifndef __KERNEL__
#include <stdio.h>
#include <stdint.h>
#include <public/xen.h>
M
Mike Day 已提交
26
#define DPRINTF(_f, _a ...) printf(_f , ## _a)
A
Avi Kivity 已提交
27
#else
28
#include <linux/kvm_host.h>
29
#include "kvm_cache_regs.h"
A
Avi Kivity 已提交
30 31 32
#define DPRINTF(x...) do {} while (0)
#endif
#include <linux/module.h>
33
#include <asm/kvm_emulate.h>
A
Avi Kivity 已提交
34

35
#include "x86.h"
36

A
Avi Kivity 已提交
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
/*
 * Opcode effective-address decode tables.
 * Note that we only emulate instructions that have at least one memory
 * operand (excluding implicit stack references). We assume that stack
 * references and instruction fetches will never occur in special memory
 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
 * not be handled.
 */

/* Operand sizes: 8-bit operands or specified/overridden size. */
#define ByteOp      (1<<0)	/* 8-bit operands. */
/* Destination operand type. */
#define ImplicitOps (1<<1)	/* Implicit in opcode. No generic decode. */
#define DstReg      (2<<1)	/* Register operand. */
#define DstMem      (3<<1)	/* Memory operand. */
52 53
#define DstAcc      (4<<1)      /* Destination Accumulator */
#define DstMask     (7<<1)
A
Avi Kivity 已提交
54
/* Source operand type. */
55 56 57 58 59 60 61 62
#define SrcNone     (0<<4)	/* No source operand. */
#define SrcImplicit (0<<4)	/* Source operand is implicit in the opcode. */
#define SrcReg      (1<<4)	/* Register operand. */
#define SrcMem      (2<<4)	/* Memory operand. */
#define SrcMem16    (3<<4)	/* Memory operand (16-bit). */
#define SrcMem32    (4<<4)	/* Memory operand (32-bit). */
#define SrcImm      (5<<4)	/* Immediate operand. */
#define SrcImmByte  (6<<4)	/* 8-bit sign-extended immediate operand. */
63
#define SrcOne      (7<<4)	/* Implied '1' */
64
#define SrcImmUByte (8<<4)      /* 8-bit unsigned immediate operand. */
65
#define SrcImmU     (9<<4)      /* Immediate operand, unsigned */
66
#define SrcMask     (0xf<<4)
A
Avi Kivity 已提交
67
/* Generic ModRM decode. */
68
#define ModRM       (1<<8)
A
Avi Kivity 已提交
69
/* Destination is only written; never read. */
70 71 72
#define Mov         (1<<9)
#define BitOp       (1<<10)
#define MemAbs      (1<<11)      /* Memory operand is absolute displacement */
73 74
#define String      (1<<12)     /* String instruction (rep capable) */
#define Stack       (1<<13)     /* Stack instruction (push/pop) */
75 76 77
#define Group       (1<<14)     /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual   (1<<15)     /* Alternate decoding of mod == 3 */
#define GroupMask   0xff        /* Group number stored in bits 0:7 */
78
/* Misc flags */
79
#define Lock        (1<<26) /* lock prefix is allowed for the instruction */
80
#define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
81
#define No64	    (1<<28)
82 83 84 85 86
/* Source 2 operand type */
#define Src2None    (0<<29)
#define Src2CL      (1<<29)
#define Src2ImmByte (2<<29)
#define Src2One     (3<<29)
87
#define Src2Imm16   (4<<29)
88 89 90
#define Src2Mem16   (5<<29) /* Used for Ep encoding. First argument has to be
			       in memory and second argument is located
			       immediately after the first one in memory. */
91
#define Src2Mask    (7<<29)
A
Avi Kivity 已提交
92

93
enum {
94
	Group1_80, Group1_81, Group1_82, Group1_83,
95
	Group1A, Group3_Byte, Group3, Group4, Group5, Group7,
96
	Group8, Group9,
97 98
};

99
static u32 opcode_table[256] = {
A
Avi Kivity 已提交
100
	/* 0x00 - 0x07 */
101
	ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
A
Avi Kivity 已提交
102
	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
103
	ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
104
	ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
A
Avi Kivity 已提交
105
	/* 0x08 - 0x0F */
106
	ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
A
Avi Kivity 已提交
107
	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
108 109
	ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
	ImplicitOps | Stack | No64, 0,
A
Avi Kivity 已提交
110
	/* 0x10 - 0x17 */
111
	ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
A
Avi Kivity 已提交
112
	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
113
	ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
114
	ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
A
Avi Kivity 已提交
115
	/* 0x18 - 0x1F */
116
	ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
A
Avi Kivity 已提交
117
	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
118
	ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
119
	ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
A
Avi Kivity 已提交
120
	/* 0x20 - 0x27 */
121
	ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
A
Avi Kivity 已提交
122
	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
123
	DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0,
A
Avi Kivity 已提交
124
	/* 0x28 - 0x2F */
125
	ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
A
Avi Kivity 已提交
126 127 128
	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
	0, 0, 0, 0,
	/* 0x30 - 0x37 */
129
	ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
A
Avi Kivity 已提交
130 131 132 133 134
	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
	0, 0, 0, 0,
	/* 0x38 - 0x3F */
	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
	ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
135 136
	ByteOp | DstAcc | SrcImm, DstAcc | SrcImm,
	0, 0,
137
	/* 0x40 - 0x47 */
138
	DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg,
139
	/* 0x48 - 0x4F */
140
	DstReg, DstReg, DstReg, DstReg,	DstReg, DstReg, DstReg, DstReg,
141
	/* 0x50 - 0x57 */
142 143
	SrcReg | Stack, SrcReg | Stack, SrcReg | Stack, SrcReg | Stack,
	SrcReg | Stack, SrcReg | Stack, SrcReg | Stack, SrcReg | Stack,
144
	/* 0x58 - 0x5F */
145 146
	DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack,
	DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack,
N
Nitin A Kamble 已提交
147
	/* 0x60 - 0x67 */
148 149
	ImplicitOps | Stack | No64, ImplicitOps | Stack | No64,
	0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ ,
N
Nitin A Kamble 已提交
150 151
	0, 0, 0, 0,
	/* 0x68 - 0x6F */
152
	SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0,
153 154
	SrcNone  | ByteOp  | ImplicitOps, SrcNone  | ImplicitOps, /* insb, insw/insd */
	SrcNone  | ByteOp  | ImplicitOps, SrcNone  | ImplicitOps, /* outsb, outsw/outsd */
155
	/* 0x70 - 0x77 */
156 157
	SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
	SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
158
	/* 0x78 - 0x7F */
159 160
	SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
	SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
A
Avi Kivity 已提交
161
	/* 0x80 - 0x87 */
162 163
	Group | Group1_80, Group | Group1_81,
	Group | Group1_82, Group | Group1_83,
A
Avi Kivity 已提交
164
	ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
165
	ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
A
Avi Kivity 已提交
166 167 168
	/* 0x88 - 0x8F */
	ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
	ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
169
	DstMem | SrcReg | ModRM | Mov, ModRM | DstReg,
170
	DstReg | SrcMem | ModRM | Mov, Group | Group1A,
171 172 173
	/* 0x90 - 0x97 */
	DstReg, DstReg, DstReg, DstReg,	DstReg, DstReg, DstReg, DstReg,
	/* 0x98 - 0x9F */
174
	0, 0, SrcImm | Src2Imm16 | No64, 0,
175
	ImplicitOps | Stack, ImplicitOps | Stack, 0, 0,
A
Avi Kivity 已提交
176
	/* 0xA0 - 0xA7 */
177 178
	ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs,
	ByteOp | DstMem | SrcReg | Mov | MemAbs, DstMem | SrcReg | Mov | MemAbs,
179 180
	ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
	ByteOp | ImplicitOps | String, ImplicitOps | String,
A
Avi Kivity 已提交
181
	/* 0xA8 - 0xAF */
182 183 184
	0, 0, ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
	ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
	ByteOp | ImplicitOps | String, ImplicitOps | String,
185 186 187 188 189 190 191 192 193 194
	/* 0xB0 - 0xB7 */
	ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
	ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
	ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
	ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
	/* 0xB8 - 0xBF */
	DstReg | SrcImm | Mov, DstReg | SrcImm | Mov,
	DstReg | SrcImm | Mov, DstReg | SrcImm | Mov,
	DstReg | SrcImm | Mov, DstReg | SrcImm | Mov,
	DstReg | SrcImm | Mov, DstReg | SrcImm | Mov,
A
Avi Kivity 已提交
195
	/* 0xC0 - 0xC7 */
196
	ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
197
	0, ImplicitOps | Stack, 0, 0,
198
	ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov,
A
Avi Kivity 已提交
199
	/* 0xC8 - 0xCF */
200
	0, 0, 0, ImplicitOps | Stack,
201
	ImplicitOps, SrcImmByte, ImplicitOps | No64, ImplicitOps,
A
Avi Kivity 已提交
202 203 204 205 206 207
	/* 0xD0 - 0xD7 */
	ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
	ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM,
	0, 0, 0, 0,
	/* 0xD8 - 0xDF */
	0, 0, 0, 0, 0, 0, 0, 0,
208
	/* 0xE0 - 0xE7 */
209
	0, 0, 0, 0,
210 211
	ByteOp | SrcImmUByte, SrcImmUByte,
	ByteOp | SrcImmUByte, SrcImmUByte,
212
	/* 0xE8 - 0xEF */
213
	SrcImm | Stack, SrcImm | ImplicitOps,
214
	SrcImmU | Src2Imm16 | No64, SrcImmByte | ImplicitOps,
215 216
	SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
	SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
A
Avi Kivity 已提交
217 218
	/* 0xF0 - 0xF7 */
	0, 0, 0, 0,
219
	ImplicitOps | Priv, ImplicitOps, Group | Group3_Byte, Group | Group3,
A
Avi Kivity 已提交
220
	/* 0xF8 - 0xFF */
221
	ImplicitOps, 0, ImplicitOps, ImplicitOps,
222
	ImplicitOps, ImplicitOps, Group | Group4, Group | Group5,
A
Avi Kivity 已提交
223 224
};

225
static u32 twobyte_table[256] = {
A
Avi Kivity 已提交
226
	/* 0x00 - 0x0F */
227 228 229 230
	0, Group | GroupDual | Group7, 0, 0,
	0, ImplicitOps, ImplicitOps | Priv, 0,
	ImplicitOps | Priv, ImplicitOps | Priv, 0, 0,
	0, ImplicitOps | ModRM, 0, 0,
A
Avi Kivity 已提交
231 232 233
	/* 0x10 - 0x1F */
	0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
	/* 0x20 - 0x2F */
234 235 236
	ModRM | ImplicitOps | Priv, ModRM | Priv,
	ModRM | ImplicitOps | Priv, ModRM | Priv,
	0, 0, 0, 0,
A
Avi Kivity 已提交
237 238
	0, 0, 0, 0, 0, 0, 0, 0,
	/* 0x30 - 0x3F */
239 240
	ImplicitOps | Priv, 0, ImplicitOps | Priv, 0,
	ImplicitOps, ImplicitOps | Priv, 0, 0,
241
	0, 0, 0, 0, 0, 0, 0, 0,
A
Avi Kivity 已提交
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
	/* 0x40 - 0x47 */
	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
	/* 0x48 - 0x4F */
	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
	DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
	/* 0x50 - 0x5F */
	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
	/* 0x60 - 0x6F */
	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
	/* 0x70 - 0x7F */
	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
	/* 0x80 - 0x8F */
259 260
	SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm,
	SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm,
A
Avi Kivity 已提交
261 262 263
	/* 0x90 - 0x9F */
	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
	/* 0xA0 - 0xA7 */
264 265
	ImplicitOps | Stack, ImplicitOps | Stack,
	0, DstMem | SrcReg | ModRM | BitOp,
266 267
	DstMem | SrcReg | Src2ImmByte | ModRM,
	DstMem | SrcReg | Src2CL | ModRM, 0, 0,
A
Avi Kivity 已提交
268
	/* 0xA8 - 0xAF */
269
	ImplicitOps | Stack, ImplicitOps | Stack,
270
	0, DstMem | SrcReg | ModRM | BitOp | Lock,
271 272 273
	DstMem | SrcReg | Src2ImmByte | ModRM,
	DstMem | SrcReg | Src2CL | ModRM,
	ModRM, 0,
A
Avi Kivity 已提交
274
	/* 0xB0 - 0xB7 */
275 276
	ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock,
	0, DstMem | SrcReg | ModRM | BitOp | Lock,
A
Avi Kivity 已提交
277 278 279
	0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
	    DstReg | SrcMem16 | ModRM | Mov,
	/* 0xB8 - 0xBF */
280 281
	0, 0,
	Group | Group8, DstMem | SrcReg | ModRM | BitOp | Lock,
A
Avi Kivity 已提交
282 283 284
	0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov,
	    DstReg | SrcMem16 | ModRM | Mov,
	/* 0xC0 - 0xCF */
285 286
	0, 0, 0, DstMem | SrcReg | ModRM | Mov,
	0, 0, 0, Group | GroupDual | Group9,
287
	0, 0, 0, 0, 0, 0, 0, 0,
A
Avi Kivity 已提交
288 289 290 291 292 293 294 295
	/* 0xD0 - 0xDF */
	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
	/* 0xE0 - 0xEF */
	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
	/* 0xF0 - 0xFF */
	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};

296
static u32 group_table[] = {
297
	[Group1_80*8] =
298 299 300 301 302 303 304 305
	ByteOp | DstMem | SrcImm | ModRM | Lock,
	ByteOp | DstMem | SrcImm | ModRM | Lock,
	ByteOp | DstMem | SrcImm | ModRM | Lock,
	ByteOp | DstMem | SrcImm | ModRM | Lock,
	ByteOp | DstMem | SrcImm | ModRM | Lock,
	ByteOp | DstMem | SrcImm | ModRM | Lock,
	ByteOp | DstMem | SrcImm | ModRM | Lock,
	ByteOp | DstMem | SrcImm | ModRM,
306
	[Group1_81*8] =
307 308 309 310 311 312 313 314
	DstMem | SrcImm | ModRM | Lock,
	DstMem | SrcImm | ModRM | Lock,
	DstMem | SrcImm | ModRM | Lock,
	DstMem | SrcImm | ModRM | Lock,
	DstMem | SrcImm | ModRM | Lock,
	DstMem | SrcImm | ModRM | Lock,
	DstMem | SrcImm | ModRM | Lock,
	DstMem | SrcImm | ModRM,
315
	[Group1_82*8] =
316 317 318 319 320 321 322 323
	ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
	ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
	ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
	ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
	ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
	ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
	ByteOp | DstMem | SrcImm | ModRM | No64 | Lock,
	ByteOp | DstMem | SrcImm | ModRM | No64,
324
	[Group1_83*8] =
325 326 327 328 329 330 331 332
	DstMem | SrcImmByte | ModRM | Lock,
	DstMem | SrcImmByte | ModRM | Lock,
	DstMem | SrcImmByte | ModRM | Lock,
	DstMem | SrcImmByte | ModRM | Lock,
	DstMem | SrcImmByte | ModRM | Lock,
	DstMem | SrcImmByte | ModRM | Lock,
	DstMem | SrcImmByte | ModRM | Lock,
	DstMem | SrcImmByte | ModRM,
333 334
	[Group1A*8] =
	DstMem | SrcNone | ModRM | Mov | Stack, 0, 0, 0, 0, 0, 0, 0,
335 336 337 338 339
	[Group3_Byte*8] =
	ByteOp | SrcImm | DstMem | ModRM, 0,
	ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
	0, 0, 0, 0,
	[Group3*8] =
340
	DstMem | SrcImm | ModRM, 0,
341
	DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
342
	0, 0, 0, 0,
343 344 345 346
	[Group4*8] =
	ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
	0, 0, 0, 0, 0, 0,
	[Group5*8] =
347 348
	DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
	SrcMem | ModRM | Stack, 0,
349 350
	SrcMem | ModRM | Stack, SrcMem | ModRM | Src2Mem16 | ImplicitOps,
	SrcMem | ModRM | Stack, 0,
351
	[Group7*8] =
352
	0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv,
353
	SrcNone | ModRM | DstMem | Mov, 0,
354
	SrcMem16 | ModRM | Mov | Priv, SrcMem | ModRM | ByteOp | Priv,
355 356
	[Group8*8] =
	0, 0, 0, 0,
357 358
	DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM | Lock,
	DstMem | SrcImmByte | ModRM | Lock, DstMem | SrcImmByte | ModRM | Lock,
359
	[Group9*8] =
360
	0, ImplicitOps | ModRM | Lock, 0, 0, 0, 0, 0, 0,
361 362
};

363
static u32 group2_table[] = {
364
	[Group7*8] =
365
	SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM | Priv,
366
	SrcNone | ModRM | DstMem | Mov, 0,
367
	SrcMem16 | ModRM | Mov | Priv, 0,
368 369
	[Group9*8] =
	0, 0, 0, 0, 0, 0, 0, 0,
370 371
};

A
Avi Kivity 已提交
372
/* EFLAGS bit definitions. */
373 374 375 376
#define EFLG_ID (1<<21)
#define EFLG_VIP (1<<20)
#define EFLG_VIF (1<<19)
#define EFLG_AC (1<<18)
377 378
#define EFLG_VM (1<<17)
#define EFLG_RF (1<<16)
379 380
#define EFLG_IOPL (3<<12)
#define EFLG_NT (1<<14)
A
Avi Kivity 已提交
381 382
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
383
#define EFLG_IF (1<<9)
384
#define EFLG_TF (1<<8)
A
Avi Kivity 已提交
385 386 387 388 389 390 391 392 393 394 395 396 397
#define EFLG_SF (1<<7)
#define EFLG_ZF (1<<6)
#define EFLG_AF (1<<4)
#define EFLG_PF (1<<2)
#define EFLG_CF (1<<0)

/*
 * Instruction emulation:
 * Most instructions are emulated directly via a fragment of inline assembly
 * code. This allows us to save/restore EFLAGS and thus very easily pick up
 * any modified flags.
 */

398
#if defined(CONFIG_X86_64)
A
Avi Kivity 已提交
399 400 401 402 403 404 405 406 407 408 409 410 411 412
#define _LO32 "k"		/* force 32-bit operand */
#define _STK  "%%rsp"		/* stack pointer */
#elif defined(__i386__)
#define _LO32 ""		/* force 32-bit operand */
#define _STK  "%%esp"		/* stack pointer */
#endif

/*
 * These EFLAGS bits are restored from saved value during emulation, and
 * any changes are written back to the saved value after emulation.
 */
#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)

/* Before executing instruction: restore necessary bits in EFLAGS. */
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
#define _PRE_EFLAGS(_sav, _msk, _tmp)					\
	/* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
	"movl %"_sav",%"_LO32 _tmp"; "                                  \
	"push %"_tmp"; "                                                \
	"push %"_tmp"; "                                                \
	"movl %"_msk",%"_LO32 _tmp"; "                                  \
	"andl %"_LO32 _tmp",("_STK"); "                                 \
	"pushf; "                                                       \
	"notl %"_LO32 _tmp"; "                                          \
	"andl %"_LO32 _tmp",("_STK"); "                                 \
	"andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); "	\
	"pop  %"_tmp"; "                                                \
	"orl  %"_LO32 _tmp",("_STK"); "                                 \
	"popf; "                                                        \
	"pop  %"_sav"; "
A
Avi Kivity 已提交
428 429 430 431 432 433 434 435 436

/* After executing instruction: write-back necessary bits in EFLAGS. */
#define _POST_EFLAGS(_sav, _msk, _tmp) \
	/* _sav |= EFLAGS & _msk; */		\
	"pushf; "				\
	"pop  %"_tmp"; "			\
	"andl %"_msk",%"_LO32 _tmp"; "		\
	"orl  %"_LO32 _tmp",%"_sav"; "

437 438 439 440 441 442
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif

443 444 445 446 447 448 449 450 451
#define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix)	\
	do {								\
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "4", "2")			\
			_op _suffix " %"_x"3,%1; "			\
			_POST_EFLAGS("0", "4", "2")			\
			: "=m" (_eflags), "=m" ((_dst).val),		\
			  "=&r" (_tmp)					\
			: _y ((_src).val), "i" (EFLAGS_MASK));		\
452
	} while (0)
453 454


A
Avi Kivity 已提交
455 456
/* Raw emulation: instruction has two explicit operands. */
#define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
457 458 459 460 461 462 463 464 465 466 467 468 469 470
	do {								\
		unsigned long _tmp;					\
									\
		switch ((_dst).bytes) {					\
		case 2:							\
			____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
			break;						\
		case 4:							\
			____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
			break;						\
		case 8:							\
			ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
			break;						\
		}							\
A
Avi Kivity 已提交
471 472 473 474
	} while (0)

#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
	do {								     \
475
		unsigned long _tmp;					     \
M
Mike Day 已提交
476
		switch ((_dst).bytes) {				             \
A
Avi Kivity 已提交
477
		case 1:							     \
478
			____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b");  \
A
Avi Kivity 已提交
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
			break;						     \
		default:						     \
			__emulate_2op_nobyte(_op, _src, _dst, _eflags,	     \
					     _wx, _wy, _lx, _ly, _qx, _qy);  \
			break;						     \
		}							     \
	} while (0)

/* Source operand is byte-sized and may be restricted to just %cl. */
#define emulate_2op_SrcB(_op, _src, _dst, _eflags)                      \
	__emulate_2op(_op, _src, _dst, _eflags,				\
		      "b", "c", "b", "c", "b", "c", "b", "c")

/* Source operand is byte, word, long or quad sized. */
#define emulate_2op_SrcV(_op, _src, _dst, _eflags)                      \
	__emulate_2op(_op, _src, _dst, _eflags,				\
		      "b", "q", "w", "r", _LO32, "r", "", "r")

/* Source operand is word, long or quad sized. */
#define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags)               \
	__emulate_2op_nobyte(_op, _src, _dst, _eflags,			\
			     "w", "r", _LO32, "r", "", "r")

502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
/* Instruction has three operands and one operand is stored in ECX register */
#define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) 	\
	do {									\
		unsigned long _tmp;						\
		_type _clv  = (_cl).val;  					\
		_type _srcv = (_src).val;    					\
		_type _dstv = (_dst).val;					\
										\
		__asm__ __volatile__ (						\
			_PRE_EFLAGS("0", "5", "2")				\
			_op _suffix " %4,%1 \n"					\
			_POST_EFLAGS("0", "5", "2")				\
			: "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp)		\
			: "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK)		\
			); 							\
										\
		(_cl).val  = (unsigned long) _clv;				\
		(_src).val = (unsigned long) _srcv;				\
		(_dst).val = (unsigned long) _dstv;				\
	} while (0)

#define emulate_2op_cl(_op, _cl, _src, _dst, _eflags)				\
	do {									\
		switch ((_dst).bytes) {						\
		case 2:								\
			__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,  	\
						"w", unsigned short);         	\
			break;							\
		case 4: 							\
			__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,  	\
						"l", unsigned int);           	\
			break;							\
		case 8:								\
			ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,	\
						"q", unsigned long));  		\
			break;							\
		}								\
	} while (0)

541
#define __emulate_1op(_op, _dst, _eflags, _suffix)			\
A
Avi Kivity 已提交
542 543 544
	do {								\
		unsigned long _tmp;					\
									\
545 546 547 548 549 550 551 552 553 554 555 556
		__asm__ __volatile__ (					\
			_PRE_EFLAGS("0", "3", "2")			\
			_op _suffix " %1; "				\
			_POST_EFLAGS("0", "3", "2")			\
			: "=m" (_eflags), "+m" ((_dst).val),		\
			  "=&r" (_tmp)					\
			: "i" (EFLAGS_MASK));				\
	} while (0)

/* Instruction has only one explicit operand (no source operand). */
#define emulate_1op(_op, _dst, _eflags)                                    \
	do {								\
M
Mike Day 已提交
557
		switch ((_dst).bytes) {				        \
558 559 560 561
		case 1:	__emulate_1op(_op, _dst, _eflags, "b"); break;	\
		case 2:	__emulate_1op(_op, _dst, _eflags, "w"); break;	\
		case 4:	__emulate_1op(_op, _dst, _eflags, "l"); break;	\
		case 8:	ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
A
Avi Kivity 已提交
562 563 564 565 566 567
		}							\
	} while (0)

/* Fetch next part of the instruction being emulated. */
#define insn_fetch(_type, _size, _eip)                                  \
({	unsigned long _x;						\
568
	rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size));		\
569
	if (rc != X86EMUL_CONTINUE)					\
A
Avi Kivity 已提交
570 571 572 573 574
		goto done;						\
	(_eip) += (_size);						\
	(_type)_x;							\
})

575 576 577 578 579
static inline unsigned long ad_mask(struct decode_cache *c)
{
	return (1UL << (c->ad_bytes << 3)) - 1;
}

A
Avi Kivity 已提交
580
/* Access/update address held in a register, based on addressing mode. */
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
static inline unsigned long
address_mask(struct decode_cache *c, unsigned long reg)
{
	if (c->ad_bytes == sizeof(unsigned long))
		return reg;
	else
		return reg & ad_mask(c);
}

static inline unsigned long
register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
{
	return base + address_mask(c, reg);
}

596 597 598 599 600 601 602 603
static inline void
register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
{
	if (c->ad_bytes == sizeof(unsigned long))
		*reg += inc;
	else
		*reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
}
A
Avi Kivity 已提交
604

605 606 607 608
static inline void jmp_rel(struct decode_cache *c, int rel)
{
	register_address_increment(c, &c->eip, rel);
}
609

610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
static void set_seg_override(struct decode_cache *c, int seg)
{
	c->has_seg_override = true;
	c->seg_override = seg;
}

static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
{
	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
		return 0;

	return kvm_x86_ops->get_segment_base(ctxt->vcpu, seg);
}

static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
				       struct decode_cache *c)
{
	if (!c->has_seg_override)
		return 0;

	return seg_base(ctxt, c->seg_override);
}

static unsigned long es_base(struct x86_emulate_ctxt *ctxt)
{
	return seg_base(ctxt, VCPU_SREG_ES);
}

static unsigned long ss_base(struct x86_emulate_ctxt *ctxt)
{
	return seg_base(ctxt, VCPU_SREG_SS);
}

643 644 645 646 647 648 649 650 651 652
static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
			      struct x86_emulate_ops *ops,
			      unsigned long linear, u8 *dest)
{
	struct fetch_cache *fc = &ctxt->decode.fetch;
	int rc;
	int size;

	if (linear < fc->start || linear >= fc->end) {
		size = min(15UL, PAGE_SIZE - offset_in_page(linear));
653
		rc = ops->fetch(linear, fc->data, size, ctxt->vcpu, NULL);
654
		if (rc != X86EMUL_CONTINUE)
655 656 657 658 659
			return rc;
		fc->start = linear;
		fc->end = linear + size;
	}
	*dest = fc->data[linear - fc->start];
660
	return X86EMUL_CONTINUE;
661 662 663 664 665 666
}

static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
			 struct x86_emulate_ops *ops,
			 unsigned long eip, void *dest, unsigned size)
{
667
	int rc;
668

669
	/* x86 instructions are limited to 15 bytes. */
670
	if (eip + size - ctxt->eip > 15)
671
		return X86EMUL_UNHANDLEABLE;
672 673 674
	eip += ctxt->cs_base;
	while (size--) {
		rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
675
		if (rc != X86EMUL_CONTINUE)
676 677
			return rc;
	}
678
	return X86EMUL_CONTINUE;
679 680
}

681 682 683 684 685 686 687
/*
 * Given the 'reg' portion of a ModRM byte, and a register block, return a
 * pointer into the block that addresses the relevant register.
 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
 */
static void *decode_register(u8 modrm_reg, unsigned long *regs,
			     int highbyte_regs)
A
Avi Kivity 已提交
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
{
	void *p;

	p = &regs[modrm_reg];
	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
		p = (unsigned char *)&regs[modrm_reg & 3] + 1;
	return p;
}

static int read_descriptor(struct x86_emulate_ctxt *ctxt,
			   struct x86_emulate_ops *ops,
			   void *ptr,
			   u16 *size, unsigned long *address, int op_bytes)
{
	int rc;

	if (op_bytes == 2)
		op_bytes = 3;
	*address = 0;
707
	rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
708
			   ctxt->vcpu, NULL);
709
	if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
710
		return rc;
711
	rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
712
			   ctxt->vcpu, NULL);
A
Avi Kivity 已提交
713 714 715
	return rc;
}

716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
static int test_cc(unsigned int condition, unsigned int flags)
{
	int rc = 0;

	switch ((condition & 15) >> 1) {
	case 0: /* o */
		rc |= (flags & EFLG_OF);
		break;
	case 1: /* b/c/nae */
		rc |= (flags & EFLG_CF);
		break;
	case 2: /* z/e */
		rc |= (flags & EFLG_ZF);
		break;
	case 3: /* be/na */
		rc |= (flags & (EFLG_CF|EFLG_ZF));
		break;
	case 4: /* s */
		rc |= (flags & EFLG_SF);
		break;
	case 5: /* p/pe */
		rc |= (flags & EFLG_PF);
		break;
	case 7: /* le/ng */
		rc |= (flags & EFLG_ZF);
		/* fall through */
	case 6: /* l/nge */
		rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
		break;
	}

	/* Odd condition identifiers (lsb == 1) have inverted sense. */
	return (!!rc ^ (condition & 1));
}

751 752 753 754
static void decode_register_operand(struct operand *op,
				    struct decode_cache *c,
				    int inhibit_bytereg)
{
755
	unsigned reg = c->modrm_reg;
756
	int highbyte_regs = c->rex_prefix == 0;
757 758 759

	if (!(c->d & ModRM))
		reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
760 761
	op->type = OP_REG;
	if ((c->d & ByteOp) && !inhibit_bytereg) {
762
		op->ptr = decode_register(reg, c->regs, highbyte_regs);
763 764 765
		op->val = *(u8 *)op->ptr;
		op->bytes = 1;
	} else {
766
		op->ptr = decode_register(reg, c->regs, 0);
767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
		op->bytes = c->op_bytes;
		switch (op->bytes) {
		case 2:
			op->val = *(u16 *)op->ptr;
			break;
		case 4:
			op->val = *(u32 *)op->ptr;
			break;
		case 8:
			op->val = *(u64 *) op->ptr;
			break;
		}
	}
	op->orig_val = op->val;
}

783 784 785 786 787
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
			struct x86_emulate_ops *ops)
{
	struct decode_cache *c = &ctxt->decode;
	u8 sib;
788
	int index_reg = 0, base_reg = 0, scale;
789
	int rc = X86EMUL_CONTINUE;
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804

	if (c->rex_prefix) {
		c->modrm_reg = (c->rex_prefix & 4) << 1;	/* REX.R */
		index_reg = (c->rex_prefix & 2) << 2; /* REX.X */
		c->modrm_rm = base_reg = (c->rex_prefix & 1) << 3; /* REG.B */
	}

	c->modrm = insn_fetch(u8, 1, c->eip);
	c->modrm_mod |= (c->modrm & 0xc0) >> 6;
	c->modrm_reg |= (c->modrm & 0x38) >> 3;
	c->modrm_rm |= (c->modrm & 0x07);
	c->modrm_ea = 0;
	c->use_modrm_ea = 1;

	if (c->modrm_mod == 3) {
805 806 807
		c->modrm_ptr = decode_register(c->modrm_rm,
					       c->regs, c->d & ByteOp);
		c->modrm_val = *(unsigned long *)c->modrm_ptr;
808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
		return rc;
	}

	if (c->ad_bytes == 2) {
		unsigned bx = c->regs[VCPU_REGS_RBX];
		unsigned bp = c->regs[VCPU_REGS_RBP];
		unsigned si = c->regs[VCPU_REGS_RSI];
		unsigned di = c->regs[VCPU_REGS_RDI];

		/* 16-bit ModR/M decode. */
		switch (c->modrm_mod) {
		case 0:
			if (c->modrm_rm == 6)
				c->modrm_ea += insn_fetch(u16, 2, c->eip);
			break;
		case 1:
			c->modrm_ea += insn_fetch(s8, 1, c->eip);
			break;
		case 2:
			c->modrm_ea += insn_fetch(u16, 2, c->eip);
			break;
		}
		switch (c->modrm_rm) {
		case 0:
			c->modrm_ea += bx + si;
			break;
		case 1:
			c->modrm_ea += bx + di;
			break;
		case 2:
			c->modrm_ea += bp + si;
			break;
		case 3:
			c->modrm_ea += bp + di;
			break;
		case 4:
			c->modrm_ea += si;
			break;
		case 5:
			c->modrm_ea += di;
			break;
		case 6:
			if (c->modrm_mod != 0)
				c->modrm_ea += bp;
			break;
		case 7:
			c->modrm_ea += bx;
			break;
		}
		if (c->modrm_rm == 2 || c->modrm_rm == 3 ||
		    (c->modrm_rm == 6 && c->modrm_mod != 0))
859 860
			if (!c->has_seg_override)
				set_seg_override(c, VCPU_SREG_SS);
861 862 863
		c->modrm_ea = (u16)c->modrm_ea;
	} else {
		/* 32/64-bit ModR/M decode. */
864
		if ((c->modrm_rm & 7) == 4) {
865 866 867 868 869
			sib = insn_fetch(u8, 1, c->eip);
			index_reg |= (sib >> 3) & 7;
			base_reg |= sib & 7;
			scale = sib >> 6;

870 871 872
			if ((base_reg & 7) == 5 && c->modrm_mod == 0)
				c->modrm_ea += insn_fetch(s32, 4, c->eip);
			else
873
				c->modrm_ea += c->regs[base_reg];
874
			if (index_reg != 4)
875
				c->modrm_ea += c->regs[index_reg] << scale;
876 877
		} else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) {
			if (ctxt->mode == X86EMUL_MODE_PROT64)
878
				c->rip_relative = 1;
879
		} else
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
			c->modrm_ea += c->regs[c->modrm_rm];
		switch (c->modrm_mod) {
		case 0:
			if (c->modrm_rm == 5)
				c->modrm_ea += insn_fetch(s32, 4, c->eip);
			break;
		case 1:
			c->modrm_ea += insn_fetch(s8, 1, c->eip);
			break;
		case 2:
			c->modrm_ea += insn_fetch(s32, 4, c->eip);
			break;
		}
	}
done:
	return rc;
}

static int decode_abs(struct x86_emulate_ctxt *ctxt,
		      struct x86_emulate_ops *ops)
{
	struct decode_cache *c = &ctxt->decode;
902
	int rc = X86EMUL_CONTINUE;
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918

	switch (c->ad_bytes) {
	case 2:
		c->modrm_ea = insn_fetch(u16, 2, c->eip);
		break;
	case 4:
		c->modrm_ea = insn_fetch(u32, 4, c->eip);
		break;
	case 8:
		c->modrm_ea = insn_fetch(u64, 8, c->eip);
		break;
	}
done:
	return rc;
}

A
Avi Kivity 已提交
919
int
920
x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
A
Avi Kivity 已提交
921
{
922
	struct decode_cache *c = &ctxt->decode;
923
	int rc = X86EMUL_CONTINUE;
A
Avi Kivity 已提交
924
	int mode = ctxt->mode;
925
	int def_op_bytes, def_ad_bytes, group;
A
Avi Kivity 已提交
926 927 928

	/* Shadow copy of register state. Committed on successful emulation. */

929
	memset(c, 0, sizeof(struct decode_cache));
930
	c->eip = ctxt->eip;
931
	ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS);
932
	memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
A
Avi Kivity 已提交
933 934 935

	switch (mode) {
	case X86EMUL_MODE_REAL:
936
	case X86EMUL_MODE_VM86:
A
Avi Kivity 已提交
937
	case X86EMUL_MODE_PROT16:
938
		def_op_bytes = def_ad_bytes = 2;
A
Avi Kivity 已提交
939 940
		break;
	case X86EMUL_MODE_PROT32:
941
		def_op_bytes = def_ad_bytes = 4;
A
Avi Kivity 已提交
942
		break;
943
#ifdef CONFIG_X86_64
A
Avi Kivity 已提交
944
	case X86EMUL_MODE_PROT64:
945 946
		def_op_bytes = 4;
		def_ad_bytes = 8;
A
Avi Kivity 已提交
947 948 949 950 951 952
		break;
#endif
	default:
		return -1;
	}

953 954 955
	c->op_bytes = def_op_bytes;
	c->ad_bytes = def_ad_bytes;

A
Avi Kivity 已提交
956
	/* Legacy prefixes. */
957
	for (;;) {
958
		switch (c->b = insn_fetch(u8, 1, c->eip)) {
A
Avi Kivity 已提交
959
		case 0x66:	/* operand-size override */
960 961
			/* switch between 2/4 bytes */
			c->op_bytes = def_op_bytes ^ 6;
A
Avi Kivity 已提交
962 963 964
			break;
		case 0x67:	/* address-size override */
			if (mode == X86EMUL_MODE_PROT64)
965
				/* switch between 4/8 bytes */
966
				c->ad_bytes = def_ad_bytes ^ 12;
A
Avi Kivity 已提交
967
			else
968
				/* switch between 2/4 bytes */
969
				c->ad_bytes = def_ad_bytes ^ 6;
A
Avi Kivity 已提交
970
			break;
971
		case 0x26:	/* ES override */
A
Avi Kivity 已提交
972
		case 0x2e:	/* CS override */
973
		case 0x36:	/* SS override */
A
Avi Kivity 已提交
974
		case 0x3e:	/* DS override */
975
			set_seg_override(c, (c->b >> 3) & 3);
A
Avi Kivity 已提交
976 977 978
			break;
		case 0x64:	/* FS override */
		case 0x65:	/* GS override */
979
			set_seg_override(c, c->b & 7);
A
Avi Kivity 已提交
980
			break;
981 982 983
		case 0x40 ... 0x4f: /* REX */
			if (mode != X86EMUL_MODE_PROT64)
				goto done_prefixes;
984
			c->rex_prefix = c->b;
985
			continue;
A
Avi Kivity 已提交
986
		case 0xf0:	/* LOCK */
987
			c->lock_prefix = 1;
A
Avi Kivity 已提交
988
			break;
989
		case 0xf2:	/* REPNE/REPNZ */
990 991
			c->rep_prefix = REPNE_PREFIX;
			break;
A
Avi Kivity 已提交
992
		case 0xf3:	/* REP/REPE/REPZ */
993
			c->rep_prefix = REPE_PREFIX;
A
Avi Kivity 已提交
994 995 996 997
			break;
		default:
			goto done_prefixes;
		}
998 999 1000

		/* Any legacy prefix after a REX prefix nullifies its effect. */

1001
		c->rex_prefix = 0;
A
Avi Kivity 已提交
1002 1003 1004 1005 1006
	}

done_prefixes:

	/* REX prefix. */
1007
	if (c->rex_prefix)
1008
		if (c->rex_prefix & 8)
1009
			c->op_bytes = 8;	/* REX.W */
A
Avi Kivity 已提交
1010 1011

	/* Opcode byte(s). */
1012 1013
	c->d = opcode_table[c->b];
	if (c->d == 0) {
A
Avi Kivity 已提交
1014
		/* Two-byte opcode? */
1015 1016 1017 1018
		if (c->b == 0x0f) {
			c->twobyte = 1;
			c->b = insn_fetch(u8, 1, c->eip);
			c->d = twobyte_table[c->b];
A
Avi Kivity 已提交
1019
		}
1020
	}
A
Avi Kivity 已提交
1021

1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
	if (c->d & Group) {
		group = c->d & GroupMask;
		c->modrm = insn_fetch(u8, 1, c->eip);
		--c->eip;

		group = (group << 3) + ((c->modrm >> 3) & 7);
		if ((c->d & GroupDual) && (c->modrm >> 6) == 3)
			c->d = group2_table[group];
		else
			c->d = group_table[group];
	}

	/* Unrecognised? */
	if (c->d == 0) {
		DPRINTF("Cannot emulate %02x\n", c->b);
		return -1;
A
Avi Kivity 已提交
1038 1039
	}

1040 1041 1042
	if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
		c->op_bytes = 8;

A
Avi Kivity 已提交
1043
	/* ModRM and SIB bytes. */
1044 1045 1046 1047
	if (c->d & ModRM)
		rc = decode_modrm(ctxt, ops);
	else if (c->d & MemAbs)
		rc = decode_abs(ctxt, ops);
1048
	if (rc != X86EMUL_CONTINUE)
1049
		goto done;
A
Avi Kivity 已提交
1050

1051 1052
	if (!c->has_seg_override)
		set_seg_override(c, VCPU_SREG_DS);
1053

1054 1055
	if (!(!c->twobyte && c->b == 0x8d))
		c->modrm_ea += seg_override_base(ctxt, c);
1056 1057 1058

	if (c->ad_bytes != 8)
		c->modrm_ea = (u32)c->modrm_ea;
A
Avi Kivity 已提交
1059 1060 1061 1062
	/*
	 * Decode and fetch the source operand: register, memory
	 * or immediate.
	 */
1063
	switch (c->d & SrcMask) {
A
Avi Kivity 已提交
1064 1065 1066
	case SrcNone:
		break;
	case SrcReg:
1067
		decode_register_operand(&c->src, c, 0);
A
Avi Kivity 已提交
1068 1069
		break;
	case SrcMem16:
1070
		c->src.bytes = 2;
A
Avi Kivity 已提交
1071 1072
		goto srcmem_common;
	case SrcMem32:
1073
		c->src.bytes = 4;
A
Avi Kivity 已提交
1074 1075
		goto srcmem_common;
	case SrcMem:
1076 1077
		c->src.bytes = (c->d & ByteOp) ? 1 :
							   c->op_bytes;
1078
		/* Don't fetch the address for invlpg: it could be unmapped. */
M
Mike Day 已提交
1079
		if (c->twobyte && c->b == 0x01 && c->modrm_reg == 7)
1080
			break;
M
Mike Day 已提交
1081
	srcmem_common:
1082 1083 1084 1085
		/*
		 * For instructions with a ModR/M byte, switch to register
		 * access if Mod = 3.
		 */
1086 1087
		if ((c->d & ModRM) && c->modrm_mod == 3) {
			c->src.type = OP_REG;
1088
			c->src.val = c->modrm_val;
1089
			c->src.ptr = c->modrm_ptr;
1090 1091
			break;
		}
1092
		c->src.type = OP_MEM;
A
Avi Kivity 已提交
1093 1094
		break;
	case SrcImm:
1095
	case SrcImmU:
1096 1097 1098 1099 1100
		c->src.type = OP_IMM;
		c->src.ptr = (unsigned long *)c->eip;
		c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
		if (c->src.bytes == 8)
			c->src.bytes = 4;
A
Avi Kivity 已提交
1101
		/* NB. Immediates are sign-extended as necessary. */
1102
		switch (c->src.bytes) {
A
Avi Kivity 已提交
1103
		case 1:
1104
			c->src.val = insn_fetch(s8, 1, c->eip);
A
Avi Kivity 已提交
1105 1106
			break;
		case 2:
1107
			c->src.val = insn_fetch(s16, 2, c->eip);
A
Avi Kivity 已提交
1108 1109
			break;
		case 4:
1110
			c->src.val = insn_fetch(s32, 4, c->eip);
A
Avi Kivity 已提交
1111 1112
			break;
		}
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
		if ((c->d & SrcMask) == SrcImmU) {
			switch (c->src.bytes) {
			case 1:
				c->src.val &= 0xff;
				break;
			case 2:
				c->src.val &= 0xffff;
				break;
			case 4:
				c->src.val &= 0xffffffff;
				break;
			}
		}
A
Avi Kivity 已提交
1126 1127
		break;
	case SrcImmByte:
1128
	case SrcImmUByte:
1129 1130 1131
		c->src.type = OP_IMM;
		c->src.ptr = (unsigned long *)c->eip;
		c->src.bytes = 1;
1132 1133 1134 1135
		if ((c->d & SrcMask) == SrcImmByte)
			c->src.val = insn_fetch(s8, 1, c->eip);
		else
			c->src.val = insn_fetch(u8, 1, c->eip);
A
Avi Kivity 已提交
1136
		break;
1137 1138 1139 1140
	case SrcOne:
		c->src.bytes = 1;
		c->src.val = 1;
		break;
A
Avi Kivity 已提交
1141 1142
	}

1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
	/*
	 * Decode and fetch the second source operand: register, memory
	 * or immediate.
	 */
	switch (c->d & Src2Mask) {
	case Src2None:
		break;
	case Src2CL:
		c->src2.bytes = 1;
		c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
		break;
	case Src2ImmByte:
		c->src2.type = OP_IMM;
		c->src2.ptr = (unsigned long *)c->eip;
		c->src2.bytes = 1;
		c->src2.val = insn_fetch(u8, 1, c->eip);
		break;
1160 1161 1162 1163 1164 1165
	case Src2Imm16:
		c->src2.type = OP_IMM;
		c->src2.ptr = (unsigned long *)c->eip;
		c->src2.bytes = 2;
		c->src2.val = insn_fetch(u16, 2, c->eip);
		break;
1166 1167 1168 1169
	case Src2One:
		c->src2.bytes = 1;
		c->src2.val = 1;
		break;
1170 1171 1172 1173
	case Src2Mem16:
		c->src2.bytes = 2;
		c->src2.type = OP_MEM;
		break;
1174 1175
	}

1176
	/* Decode and fetch the destination operand: register or memory. */
1177
	switch (c->d & DstMask) {
1178 1179
	case ImplicitOps:
		/* Special instructions do their own operand decoding. */
1180
		return 0;
1181
	case DstReg:
1182
		decode_register_operand(&c->dst, c,
1183
			 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
1184 1185
		break;
	case DstMem:
1186
		if ((c->d & ModRM) && c->modrm_mod == 3) {
1187
			c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1188
			c->dst.type = OP_REG;
1189
			c->dst.val = c->dst.orig_val = c->modrm_val;
1190
			c->dst.ptr = c->modrm_ptr;
1191 1192
			break;
		}
1193 1194
		c->dst.type = OP_MEM;
		break;
1195 1196
	case DstAcc:
		c->dst.type = OP_REG;
1197
		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1198
		c->dst.ptr = &c->regs[VCPU_REGS_RAX];
1199
		switch (c->dst.bytes) {
1200 1201 1202 1203 1204 1205 1206 1207 1208
			case 1:
				c->dst.val = *(u8 *)c->dst.ptr;
				break;
			case 2:
				c->dst.val = *(u16 *)c->dst.ptr;
				break;
			case 4:
				c->dst.val = *(u32 *)c->dst.ptr;
				break;
1209 1210 1211
			case 8:
				c->dst.val = *(u64 *)c->dst.ptr;
				break;
1212 1213 1214
		}
		c->dst.orig_val = c->dst.val;
		break;
1215 1216
	}

1217 1218 1219
	if (c->rip_relative)
		c->modrm_ea += c->eip;

1220 1221 1222 1223
done:
	return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
}

1224 1225 1226 1227 1228 1229 1230
static inline void emulate_push(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;

	c->dst.type  = OP_MEM;
	c->dst.bytes = c->op_bytes;
	c->dst.val = c->src.val;
1231
	register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1232
	c->dst.ptr = (void *) register_address(c, ss_base(ctxt),
1233 1234 1235
					       c->regs[VCPU_REGS_RSP]);
}

1236
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1237 1238
		       struct x86_emulate_ops *ops,
		       void *dest, int len)
1239 1240 1241 1242
{
	struct decode_cache *c = &ctxt->decode;
	int rc;

1243 1244
	rc = ops->read_emulated(register_address(c, ss_base(ctxt),
						 c->regs[VCPU_REGS_RSP]),
1245
				dest, len, ctxt->vcpu);
1246
	if (rc != X86EMUL_CONTINUE)
1247 1248
		return rc;

1249
	register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
1250 1251
	return rc;
}
1252

1253 1254 1255 1256 1257 1258 1259
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
		       struct x86_emulate_ops *ops,
		       void *dest, int len)
{
	int rc;
	unsigned long val, change_mask;
	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1260
	int cpl = ops->cpl(ctxt->vcpu);
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295

	rc = emulate_pop(ctxt, ops, &val, len);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;

	switch(ctxt->mode) {
	case X86EMUL_MODE_PROT64:
	case X86EMUL_MODE_PROT32:
	case X86EMUL_MODE_PROT16:
		if (cpl == 0)
			change_mask |= EFLG_IOPL;
		if (cpl <= iopl)
			change_mask |= EFLG_IF;
		break;
	case X86EMUL_MODE_VM86:
		if (iopl < 3) {
			kvm_inject_gp(ctxt->vcpu, 0);
			return X86EMUL_PROPAGATE_FAULT;
		}
		change_mask |= EFLG_IF;
		break;
	default: /* real mode */
		change_mask |= (EFLG_IOPL | EFLG_IF);
		break;
	}

	*(unsigned long *)dest =
		(ctxt->eflags & ~change_mask) | (val & change_mask);

	return rc;
}

1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314
static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
{
	struct decode_cache *c = &ctxt->decode;
	struct kvm_segment segment;

	kvm_x86_ops->get_segment(ctxt->vcpu, &segment, seg);

	c->src.val = segment.selector;
	emulate_push(ctxt);
}

static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
			     struct x86_emulate_ops *ops, int seg)
{
	struct decode_cache *c = &ctxt->decode;
	unsigned long selector;
	int rc;

	rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
1315
	if (rc != X86EMUL_CONTINUE)
1316 1317
		return rc;

1318
	rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, seg);
1319 1320 1321
	return rc;
}

1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
static void emulate_pusha(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;
	unsigned long old_esp = c->regs[VCPU_REGS_RSP];
	int reg = VCPU_REGS_RAX;

	while (reg <= VCPU_REGS_RDI) {
		(reg == VCPU_REGS_RSP) ?
		(c->src.val = old_esp) : (c->src.val = c->regs[reg]);

		emulate_push(ctxt);
		++reg;
	}
}

static int emulate_popa(struct x86_emulate_ctxt *ctxt,
			struct x86_emulate_ops *ops)
{
	struct decode_cache *c = &ctxt->decode;
1341
	int rc = X86EMUL_CONTINUE;
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
	int reg = VCPU_REGS_RDI;

	while (reg >= VCPU_REGS_RAX) {
		if (reg == VCPU_REGS_RSP) {
			register_address_increment(c, &c->regs[VCPU_REGS_RSP],
							c->op_bytes);
			--reg;
		}

		rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
1352
		if (rc != X86EMUL_CONTINUE)
1353 1354 1355 1356 1357 1358
			break;
		--reg;
	}
	return rc;
}

1359 1360 1361 1362 1363
static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
				struct x86_emulate_ops *ops)
{
	struct decode_cache *c = &ctxt->decode;

1364
	return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
1365 1366
}

1367
static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
1368
{
1369
	struct decode_cache *c = &ctxt->decode;
1370 1371
	switch (c->modrm_reg) {
	case 0:	/* rol */
1372
		emulate_2op_SrcB("rol", c->src, c->dst, ctxt->eflags);
1373 1374
		break;
	case 1:	/* ror */
1375
		emulate_2op_SrcB("ror", c->src, c->dst, ctxt->eflags);
1376 1377
		break;
	case 2:	/* rcl */
1378
		emulate_2op_SrcB("rcl", c->src, c->dst, ctxt->eflags);
1379 1380
		break;
	case 3:	/* rcr */
1381
		emulate_2op_SrcB("rcr", c->src, c->dst, ctxt->eflags);
1382 1383 1384
		break;
	case 4:	/* sal/shl */
	case 6:	/* sal/shl */
1385
		emulate_2op_SrcB("sal", c->src, c->dst, ctxt->eflags);
1386 1387
		break;
	case 5:	/* shr */
1388
		emulate_2op_SrcB("shr", c->src, c->dst, ctxt->eflags);
1389 1390
		break;
	case 7:	/* sar */
1391
		emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags);
1392 1393 1394 1395 1396
		break;
	}
}

static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
1397
			       struct x86_emulate_ops *ops)
1398 1399 1400 1401 1402
{
	struct decode_cache *c = &ctxt->decode;

	switch (c->modrm_reg) {
	case 0 ... 1:	/* test */
1403
		emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
1404 1405 1406 1407 1408
		break;
	case 2:	/* not */
		c->dst.val = ~c->dst.val;
		break;
	case 3:	/* neg */
1409
		emulate_1op("neg", c->dst, ctxt->eflags);
1410 1411
		break;
	default:
1412
		return 0;
1413
	}
1414
	return 1;
1415 1416 1417
}

static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1418
			       struct x86_emulate_ops *ops)
1419 1420 1421 1422 1423
{
	struct decode_cache *c = &ctxt->decode;

	switch (c->modrm_reg) {
	case 0:	/* inc */
1424
		emulate_1op("inc", c->dst, ctxt->eflags);
1425 1426
		break;
	case 1:	/* dec */
1427
		emulate_1op("dec", c->dst, ctxt->eflags);
1428
		break;
1429 1430 1431 1432 1433 1434 1435 1436
	case 2: /* call near abs */ {
		long int old_eip;
		old_eip = c->eip;
		c->eip = c->src.val;
		c->src.val = old_eip;
		emulate_push(ctxt);
		break;
	}
1437
	case 4: /* jmp abs */
1438
		c->eip = c->src.val;
1439 1440
		break;
	case 6:	/* push */
1441
		emulate_push(ctxt);
1442 1443
		break;
	}
1444
	return X86EMUL_CONTINUE;
1445 1446 1447 1448
}

static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
			       struct x86_emulate_ops *ops,
1449
			       unsigned long memop)
1450 1451 1452 1453 1454
{
	struct decode_cache *c = &ctxt->decode;
	u64 old, new;
	int rc;

1455
	rc = ops->read_emulated(memop, &old, 8, ctxt->vcpu);
1456
	if (rc != X86EMUL_CONTINUE)
1457 1458 1459 1460 1461 1462 1463
		return rc;

	if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
	    ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {

		c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
		c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1464
		ctxt->eflags &= ~EFLG_ZF;
1465 1466 1467 1468 1469

	} else {
		new = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
		       (u32) c->regs[VCPU_REGS_RBX];

1470
		rc = ops->cmpxchg_emulated(memop, &old, &new, 8, ctxt->vcpu);
1471
		if (rc != X86EMUL_CONTINUE)
1472
			return rc;
1473
		ctxt->eflags |= EFLG_ZF;
1474
	}
1475
	return X86EMUL_CONTINUE;
1476 1477
}

1478 1479 1480 1481 1482 1483 1484 1485
static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
			   struct x86_emulate_ops *ops)
{
	struct decode_cache *c = &ctxt->decode;
	int rc;
	unsigned long cs;

	rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
1486
	if (rc != X86EMUL_CONTINUE)
1487 1488 1489 1490
		return rc;
	if (c->op_bytes == 4)
		c->eip = (u32)c->eip;
	rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1491
	if (rc != X86EMUL_CONTINUE)
1492
		return rc;
1493
	rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, VCPU_SREG_CS);
1494 1495 1496
	return rc;
}

1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
static inline int writeback(struct x86_emulate_ctxt *ctxt,
			    struct x86_emulate_ops *ops)
{
	int rc;
	struct decode_cache *c = &ctxt->decode;

	switch (c->dst.type) {
	case OP_REG:
		/* The 4-byte case *is* correct:
		 * in 64-bit mode we zero-extend.
		 */
		switch (c->dst.bytes) {
		case 1:
			*(u8 *)c->dst.ptr = (u8)c->dst.val;
			break;
		case 2:
			*(u16 *)c->dst.ptr = (u16)c->dst.val;
			break;
		case 4:
			*c->dst.ptr = (u32)c->dst.val;
			break;	/* 64b: zero-ext */
		case 8:
			*c->dst.ptr = c->dst.val;
			break;
		}
		break;
	case OP_MEM:
		if (c->lock_prefix)
			rc = ops->cmpxchg_emulated(
					(unsigned long)c->dst.ptr,
					&c->dst.orig_val,
					&c->dst.val,
					c->dst.bytes,
					ctxt->vcpu);
		else
			rc = ops->write_emulated(
					(unsigned long)c->dst.ptr,
					&c->dst.val,
					c->dst.bytes,
					ctxt->vcpu);
1537
		if (rc != X86EMUL_CONTINUE)
1538
			return rc;
1539 1540 1541 1542
		break;
	case OP_NONE:
		/* no writeback */
		break;
1543 1544 1545
	default:
		break;
	}
1546
	return X86EMUL_CONTINUE;
1547 1548
}

1549
static void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
{
	u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(ctxt->vcpu, mask);
	/*
	 * an sti; sti; sequence only disable interrupts for the first
	 * instruction. So, if the last instruction, be it emulated or
	 * not, left the system with the INT_STI flag enabled, it
	 * means that the last instruction is an sti. We should not
	 * leave the flag on in this case. The same goes for mov ss
	 */
	if (!(int_shadow & mask))
		ctxt->interruptibility = mask;
}

1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
static inline void
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
	struct kvm_segment *cs, struct kvm_segment *ss)
{
	memset(cs, 0, sizeof(struct kvm_segment));
	kvm_x86_ops->get_segment(ctxt->vcpu, cs, VCPU_SREG_CS);
	memset(ss, 0, sizeof(struct kvm_segment));

	cs->l = 0;		/* will be adjusted later */
	cs->base = 0;		/* flat segment */
	cs->g = 1;		/* 4kb granularity */
	cs->limit = 0xffffffff;	/* 4GB limit */
	cs->type = 0x0b;	/* Read, Execute, Accessed */
	cs->s = 1;
	cs->dpl = 0;		/* will be adjusted later */
	cs->present = 1;
	cs->db = 1;

	ss->unusable = 0;
	ss->base = 0;		/* flat segment */
	ss->limit = 0xffffffff;	/* 4GB limit */
	ss->g = 1;		/* 4kb granularity */
	ss->s = 1;
	ss->type = 0x03;	/* Read/Write, Accessed */
	ss->db = 1;		/* 32bit stack segment */
	ss->dpl = 0;
	ss->present = 1;
}

static int
emulate_syscall(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;
	struct kvm_segment cs, ss;
	u64 msr_data;

	/* syscall is not available in real mode */
1600 1601 1602 1603 1604
	if (ctxt->mode == X86EMUL_MODE_REAL ||
	    ctxt->mode == X86EMUL_MODE_VM86) {
		kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
		return X86EMUL_PROPAGATE_FAULT;
	}
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640

	setup_syscalls_segments(ctxt, &cs, &ss);

	kvm_x86_ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
	msr_data >>= 32;
	cs.selector = (u16)(msr_data & 0xfffc);
	ss.selector = (u16)(msr_data + 8);

	if (is_long_mode(ctxt->vcpu)) {
		cs.db = 0;
		cs.l = 1;
	}
	kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS);
	kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS);

	c->regs[VCPU_REGS_RCX] = c->eip;
	if (is_long_mode(ctxt->vcpu)) {
#ifdef CONFIG_X86_64
		c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;

		kvm_x86_ops->get_msr(ctxt->vcpu,
			ctxt->mode == X86EMUL_MODE_PROT64 ?
			MSR_LSTAR : MSR_CSTAR, &msr_data);
		c->eip = msr_data;

		kvm_x86_ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
		ctxt->eflags &= ~(msr_data | EFLG_RF);
#endif
	} else {
		/* legacy mode */
		kvm_x86_ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
		c->eip = (u32)msr_data;

		ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
	}

1641
	return X86EMUL_CONTINUE;
1642 1643
}

1644 1645 1646 1647 1648 1649 1650
static int
emulate_sysenter(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;
	struct kvm_segment cs, ss;
	u64 msr_data;

1651 1652
	/* inject #GP if in real mode */
	if (ctxt->mode == X86EMUL_MODE_REAL) {
1653
		kvm_inject_gp(ctxt->vcpu, 0);
1654
		return X86EMUL_PROPAGATE_FAULT;
1655 1656 1657 1658 1659
	}

	/* XXX sysenter/sysexit have not been tested in 64bit mode.
	* Therefore, we inject an #UD.
	*/
1660 1661 1662 1663
	if (ctxt->mode == X86EMUL_MODE_PROT64) {
		kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
		return X86EMUL_PROPAGATE_FAULT;
	}
1664 1665 1666 1667 1668 1669 1670 1671

	setup_syscalls_segments(ctxt, &cs, &ss);

	kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
	switch (ctxt->mode) {
	case X86EMUL_MODE_PROT32:
		if ((msr_data & 0xfffc) == 0x0) {
			kvm_inject_gp(ctxt->vcpu, 0);
1672
			return X86EMUL_PROPAGATE_FAULT;
1673 1674 1675 1676 1677
		}
		break;
	case X86EMUL_MODE_PROT64:
		if (msr_data == 0x0) {
			kvm_inject_gp(ctxt->vcpu, 0);
1678
			return X86EMUL_PROPAGATE_FAULT;
1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702
		}
		break;
	}

	ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
	cs.selector = (u16)msr_data;
	cs.selector &= ~SELECTOR_RPL_MASK;
	ss.selector = cs.selector + 8;
	ss.selector &= ~SELECTOR_RPL_MASK;
	if (ctxt->mode == X86EMUL_MODE_PROT64
		|| is_long_mode(ctxt->vcpu)) {
		cs.db = 0;
		cs.l = 1;
	}

	kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS);
	kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS);

	kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
	c->eip = msr_data;

	kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
	c->regs[VCPU_REGS_RSP] = msr_data;

1703
	return X86EMUL_CONTINUE;
1704 1705
}

1706 1707 1708 1709 1710 1711 1712 1713
static int
emulate_sysexit(struct x86_emulate_ctxt *ctxt)
{
	struct decode_cache *c = &ctxt->decode;
	struct kvm_segment cs, ss;
	u64 msr_data;
	int usermode;

1714 1715 1716
	/* inject #GP if in real mode or Virtual 8086 mode */
	if (ctxt->mode == X86EMUL_MODE_REAL ||
	    ctxt->mode == X86EMUL_MODE_VM86) {
1717
		kvm_inject_gp(ctxt->vcpu, 0);
1718
		return X86EMUL_PROPAGATE_FAULT;
1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735
	}

	setup_syscalls_segments(ctxt, &cs, &ss);

	if ((c->rex_prefix & 0x8) != 0x0)
		usermode = X86EMUL_MODE_PROT64;
	else
		usermode = X86EMUL_MODE_PROT32;

	cs.dpl = 3;
	ss.dpl = 3;
	kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
	switch (usermode) {
	case X86EMUL_MODE_PROT32:
		cs.selector = (u16)(msr_data + 16);
		if ((msr_data & 0xfffc) == 0x0) {
			kvm_inject_gp(ctxt->vcpu, 0);
1736
			return X86EMUL_PROPAGATE_FAULT;
1737 1738 1739 1740 1741 1742 1743
		}
		ss.selector = (u16)(msr_data + 24);
		break;
	case X86EMUL_MODE_PROT64:
		cs.selector = (u16)(msr_data + 32);
		if (msr_data == 0x0) {
			kvm_inject_gp(ctxt->vcpu, 0);
1744
			return X86EMUL_PROPAGATE_FAULT;
1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
		}
		ss.selector = cs.selector + 8;
		cs.db = 0;
		cs.l = 1;
		break;
	}
	cs.selector |= SELECTOR_RPL_MASK;
	ss.selector |= SELECTOR_RPL_MASK;

	kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS);
	kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS);

	c->eip = ctxt->vcpu->arch.regs[VCPU_REGS_RDX];
	c->regs[VCPU_REGS_RSP] = ctxt->vcpu->arch.regs[VCPU_REGS_RCX];

1760
	return X86EMUL_CONTINUE;
1761 1762
}

1763 1764
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
			      struct x86_emulate_ops *ops)
1765 1766 1767 1768 1769 1770 1771
{
	int iopl;
	if (ctxt->mode == X86EMUL_MODE_REAL)
		return false;
	if (ctxt->mode == X86EMUL_MODE_VM86)
		return true;
	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1772
	return ops->cpl(ctxt->vcpu) > iopl;
1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
}

static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
					    struct x86_emulate_ops *ops,
					    u16 port, u16 len)
{
	struct kvm_segment tr_seg;
	int r;
	u16 io_bitmap_ptr;
	u8 perm, bit_idx = port & 0x7;
	unsigned mask = (1 << len) - 1;

	kvm_get_segment(ctxt->vcpu, &tr_seg, VCPU_SREG_TR);
	if (tr_seg.unusable)
		return false;
	if (tr_seg.limit < 103)
		return false;
	r = ops->read_std(tr_seg.base + 102, &io_bitmap_ptr, 2, ctxt->vcpu,
			  NULL);
	if (r != X86EMUL_CONTINUE)
		return false;
	if (io_bitmap_ptr + port/8 > tr_seg.limit)
		return false;
	r = ops->read_std(tr_seg.base + io_bitmap_ptr + port/8, &perm, 1,
			  ctxt->vcpu, NULL);
	if (r != X86EMUL_CONTINUE)
		return false;
	if ((perm >> bit_idx) & mask)
		return false;
	return true;
}

static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
				 struct x86_emulate_ops *ops,
				 u16 port, u16 len)
{
1809
	if (emulator_bad_iopl(ctxt, ops))
1810 1811 1812 1813 1814
		if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
			return false;
	return true;
}

1815
int
1816
x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1817
{
1818
	unsigned long memop = 0;
1819
	u64 msr_data;
1820
	unsigned long saved_eip = 0;
1821
	struct decode_cache *c = &ctxt->decode;
1822 1823
	unsigned int port;
	int io_dir_in;
1824
	int rc = X86EMUL_CONTINUE;
1825

1826 1827
	ctxt->interruptibility = 0;

1828 1829 1830 1831 1832
	/* Shadow copy of register state. Committed on successful emulation.
	 * NOTE: we can copy them from vcpu as x86_decode_insn() doesn't
	 * modify them.
	 */

1833
	memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
1834 1835
	saved_eip = c->eip;

1836 1837 1838 1839 1840
	if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
		kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
		goto done;
	}

1841
	/* LOCK prefix is allowed only with some instructions */
1842
	if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
1843 1844 1845 1846
		kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
		goto done;
	}

1847
	/* Privileged instruction can be executed only in CPL=0 */
1848
	if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
1849 1850 1851 1852
		kvm_inject_gp(ctxt->vcpu, 0);
		goto done;
	}

1853
	if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs))
1854
		memop = c->modrm_ea;
1855

1856 1857
	if (c->rep_prefix && (c->d & String)) {
		/* All REP prefixes have the same first termination condition */
1858
		if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
1859
			kvm_rip_write(ctxt->vcpu, c->eip);
1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872
			goto done;
		}
		/* The second termination condition only applies for REPE
		 * and REPNE. Test if the repeat string operation prefix is
		 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
		 * corresponding termination condition according to:
		 * 	- if REPE/REPZ and ZF = 0 then done
		 * 	- if REPNE/REPNZ and ZF = 1 then done
		 */
		if ((c->b == 0xa6) || (c->b == 0xa7) ||
				(c->b == 0xae) || (c->b == 0xaf)) {
			if ((c->rep_prefix == REPE_PREFIX) &&
				((ctxt->eflags & EFLG_ZF) == 0)) {
1873
					kvm_rip_write(ctxt->vcpu, c->eip);
1874 1875 1876 1877
					goto done;
			}
			if ((c->rep_prefix == REPNE_PREFIX) &&
				((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) {
1878
				kvm_rip_write(ctxt->vcpu, c->eip);
1879 1880 1881
				goto done;
			}
		}
1882
		register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1);
1883
		c->eip = ctxt->eip;
1884 1885
	}

1886
	if (c->src.type == OP_MEM) {
1887
		c->src.ptr = (unsigned long *)memop;
1888
		c->src.val = 0;
M
Mike Day 已提交
1889 1890 1891 1892
		rc = ops->read_emulated((unsigned long)c->src.ptr,
					&c->src.val,
					c->src.bytes,
					ctxt->vcpu);
1893
		if (rc != X86EMUL_CONTINUE)
1894 1895 1896 1897
			goto done;
		c->src.orig_val = c->src.val;
	}

1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
	if (c->src2.type == OP_MEM) {
		c->src2.ptr = (unsigned long *)(memop + c->src.bytes);
		c->src2.val = 0;
		rc = ops->read_emulated((unsigned long)c->src2.ptr,
					&c->src2.val,
					c->src2.bytes,
					ctxt->vcpu);
		if (rc != X86EMUL_CONTINUE)
			goto done;
	}

1909 1910 1911 1912 1913
	if ((c->d & DstMask) == ImplicitOps)
		goto special_insn;


	if (c->dst.type == OP_MEM) {
1914
		c->dst.ptr = (unsigned long *)memop;
1915 1916
		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
		c->dst.val = 0;
1917 1918
		if (c->d & BitOp) {
			unsigned long mask = ~(c->dst.bytes * 8 - 1);
1919

1920 1921
			c->dst.ptr = (void *)c->dst.ptr +
						   (c->src.val & mask) / 8;
1922
		}
1923 1924 1925 1926 1927 1928 1929 1930 1931
		if (!(c->d & Mov)) {
			/* optimisation - avoid slow emulated read */
			rc = ops->read_emulated((unsigned long)c->dst.ptr,
						&c->dst.val,
						c->dst.bytes,
						ctxt->vcpu);
			if (rc != X86EMUL_CONTINUE)
				goto done;
		}
1932
	}
1933
	c->dst.orig_val = c->dst.val;
1934

1935 1936
special_insn:

1937
	if (c->twobyte)
A
Avi Kivity 已提交
1938 1939
		goto twobyte_insn;

1940
	switch (c->b) {
A
Avi Kivity 已提交
1941 1942
	case 0x00 ... 0x05:
	      add:		/* add */
1943
		emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
1944
		break;
1945 1946 1947 1948 1949
	case 0x06:		/* push es */
		emulate_push_sreg(ctxt, VCPU_SREG_ES);
		break;
	case 0x07:		/* pop es */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
1950
		if (rc != X86EMUL_CONTINUE)
1951 1952
			goto done;
		break;
A
Avi Kivity 已提交
1953 1954
	case 0x08 ... 0x0d:
	      or:		/* or */
1955
		emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
1956
		break;
1957 1958 1959
	case 0x0e:		/* push cs */
		emulate_push_sreg(ctxt, VCPU_SREG_CS);
		break;
A
Avi Kivity 已提交
1960 1961
	case 0x10 ... 0x15:
	      adc:		/* adc */
1962
		emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
1963
		break;
1964 1965 1966 1967 1968
	case 0x16:		/* push ss */
		emulate_push_sreg(ctxt, VCPU_SREG_SS);
		break;
	case 0x17:		/* pop ss */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
1969
		if (rc != X86EMUL_CONTINUE)
1970 1971
			goto done;
		break;
A
Avi Kivity 已提交
1972 1973
	case 0x18 ... 0x1d:
	      sbb:		/* sbb */
1974
		emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
1975
		break;
1976 1977 1978 1979 1980
	case 0x1e:		/* push ds */
		emulate_push_sreg(ctxt, VCPU_SREG_DS);
		break;
	case 0x1f:		/* pop ds */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
1981
		if (rc != X86EMUL_CONTINUE)
1982 1983
			goto done;
		break;
1984
	case 0x20 ... 0x25:
A
Avi Kivity 已提交
1985
	      and:		/* and */
1986
		emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
1987 1988 1989
		break;
	case 0x28 ... 0x2d:
	      sub:		/* sub */
1990
		emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
1991 1992 1993
		break;
	case 0x30 ... 0x35:
	      xor:		/* xor */
1994
		emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
1995 1996 1997
		break;
	case 0x38 ... 0x3d:
	      cmp:		/* cmp */
1998
		emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
1999
		break;
2000 2001 2002 2003 2004 2005 2006
	case 0x40 ... 0x47: /* inc r16/r32 */
		emulate_1op("inc", c->dst, ctxt->eflags);
		break;
	case 0x48 ... 0x4f: /* dec r16/r32 */
		emulate_1op("dec", c->dst, ctxt->eflags);
		break;
	case 0x50 ... 0x57:  /* push reg */
2007
		emulate_push(ctxt);
2008 2009 2010
		break;
	case 0x58 ... 0x5f: /* pop reg */
	pop_instruction:
2011
		rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes);
2012
		if (rc != X86EMUL_CONTINUE)
2013 2014
			goto done;
		break;
2015 2016 2017 2018 2019
	case 0x60:	/* pusha */
		emulate_pusha(ctxt);
		break;
	case 0x61:	/* popa */
		rc = emulate_popa(ctxt, ops);
2020
		if (rc != X86EMUL_CONTINUE)
2021 2022
			goto done;
		break;
A
Avi Kivity 已提交
2023
	case 0x63:		/* movsxd */
2024
		if (ctxt->mode != X86EMUL_MODE_PROT64)
A
Avi Kivity 已提交
2025
			goto cannot_emulate;
2026
		c->dst.val = (s32) c->src.val;
A
Avi Kivity 已提交
2027
		break;
2028
	case 0x68: /* push imm */
2029 2030 2031 2032 2033
	case 0x6a: /* push imm8 */
		emulate_push(ctxt);
		break;
	case 0x6c:		/* insb */
	case 0x6d:		/* insw/insd */
2034 2035 2036 2037 2038 2039
		if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
					  (c->d & ByteOp) ? 1 : c->op_bytes)) {
			kvm_inject_gp(ctxt->vcpu, 0);
			goto done;
		}
		if (kvm_emulate_pio_string(ctxt->vcpu,
2040 2041 2042
				1,
				(c->d & ByteOp) ? 1 : c->op_bytes,
				c->rep_prefix ?
2043
				address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
2044
				(ctxt->eflags & EFLG_DF),
2045
				register_address(c, es_base(ctxt),
2046 2047 2048 2049 2050 2051 2052 2053 2054
						 c->regs[VCPU_REGS_RDI]),
				c->rep_prefix,
				c->regs[VCPU_REGS_RDX]) == 0) {
			c->eip = saved_eip;
			return -1;
		}
		return 0;
	case 0x6e:		/* outsb */
	case 0x6f:		/* outsw/outsd */
2055 2056 2057 2058 2059
		if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
					  (c->d & ByteOp) ? 1 : c->op_bytes)) {
			kvm_inject_gp(ctxt->vcpu, 0);
			goto done;
		}
A
Avi Kivity 已提交
2060
		if (kvm_emulate_pio_string(ctxt->vcpu,
2061 2062 2063
				0,
				(c->d & ByteOp) ? 1 : c->op_bytes,
				c->rep_prefix ?
2064
				address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
2065
				(ctxt->eflags & EFLG_DF),
2066 2067
					 register_address(c,
					  seg_override_base(ctxt, c),
2068 2069 2070 2071 2072 2073 2074
						 c->regs[VCPU_REGS_RSI]),
				c->rep_prefix,
				c->regs[VCPU_REGS_RDX]) == 0) {
			c->eip = saved_eip;
			return -1;
		}
		return 0;
2075
	case 0x70 ... 0x7f: /* jcc (short) */
2076
		if (test_cc(c->b, ctxt->eflags))
2077
			jmp_rel(c, c->src.val);
2078
		break;
A
Avi Kivity 已提交
2079
	case 0x80 ... 0x83:	/* Grp1 */
2080
		switch (c->modrm_reg) {
A
Avi Kivity 已提交
2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099
		case 0:
			goto add;
		case 1:
			goto or;
		case 2:
			goto adc;
		case 3:
			goto sbb;
		case 4:
			goto and;
		case 5:
			goto sub;
		case 6:
			goto xor;
		case 7:
			goto cmp;
		}
		break;
	case 0x84 ... 0x85:
2100
		emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
2101 2102
		break;
	case 0x86 ... 0x87:	/* xchg */
2103
	xchg:
A
Avi Kivity 已提交
2104
		/* Write back the register source. */
2105
		switch (c->dst.bytes) {
A
Avi Kivity 已提交
2106
		case 1:
2107
			*(u8 *) c->src.ptr = (u8) c->dst.val;
A
Avi Kivity 已提交
2108 2109
			break;
		case 2:
2110
			*(u16 *) c->src.ptr = (u16) c->dst.val;
A
Avi Kivity 已提交
2111 2112
			break;
		case 4:
2113
			*c->src.ptr = (u32) c->dst.val;
A
Avi Kivity 已提交
2114 2115
			break;	/* 64b reg: zero-extend */
		case 8:
2116
			*c->src.ptr = c->dst.val;
A
Avi Kivity 已提交
2117 2118 2119 2120 2121 2122
			break;
		}
		/*
		 * Write back the memory destination with implicit LOCK
		 * prefix.
		 */
2123 2124
		c->dst.val = c->src.val;
		c->lock_prefix = 1;
A
Avi Kivity 已提交
2125 2126
		break;
	case 0x88 ... 0x8b:	/* mov */
2127
		goto mov;
2128 2129 2130
	case 0x8c: { /* mov r/m, sreg */
		struct kvm_segment segreg;

2131
		if (c->modrm_reg <= VCPU_SREG_GS)
2132 2133
			kvm_get_segment(ctxt->vcpu, &segreg, c->modrm_reg);
		else {
2134 2135
			kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
			goto done;
2136 2137 2138 2139
		}
		c->dst.val = segreg.selector;
		break;
	}
N
Nitin A Kamble 已提交
2140
	case 0x8d: /* lea r16/r32, m */
2141
		c->dst.val = c->modrm_ea;
N
Nitin A Kamble 已提交
2142
		break;
2143 2144 2145 2146
	case 0x8e: { /* mov seg, r/m16 */
		uint16_t sel;

		sel = c->src.val;
2147

2148 2149
		if (c->modrm_reg == VCPU_SREG_CS ||
		    c->modrm_reg > VCPU_SREG_GS) {
2150 2151 2152 2153
			kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
			goto done;
		}

2154
		if (c->modrm_reg == VCPU_SREG_SS)
2155
			toggle_interruptibility(ctxt, KVM_X86_SHADOW_INT_MOV_SS);
2156

2157
		rc = kvm_load_segment_descriptor(ctxt->vcpu, sel, c->modrm_reg);
2158 2159 2160 2161

		c->dst.type = OP_NONE;  /* Disable writeback. */
		break;
	}
A
Avi Kivity 已提交
2162
	case 0x8f:		/* pop (sole member of Grp1a) */
2163
		rc = emulate_grp1a(ctxt, ops);
2164
		if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
2165 2166
			goto done;
		break;
2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177
	case 0x90: /* nop / xchg r8,rax */
		if (!(c->rex_prefix & 1)) { /* nop */
			c->dst.type = OP_NONE;
			break;
		}
	case 0x91 ... 0x97: /* xchg reg,rax */
		c->src.type = c->dst.type = OP_REG;
		c->src.bytes = c->dst.bytes = c->op_bytes;
		c->src.ptr = (unsigned long *) &c->regs[VCPU_REGS_RAX];
		c->src.val = *(c->src.ptr);
		goto xchg;
N
Nitin A Kamble 已提交
2178
	case 0x9c: /* pushf */
2179
		c->src.val =  (unsigned long) ctxt->eflags;
2180 2181
		emulate_push(ctxt);
		break;
N
Nitin A Kamble 已提交
2182
	case 0x9d: /* popf */
A
Avi Kivity 已提交
2183
		c->dst.type = OP_REG;
2184
		c->dst.ptr = (unsigned long *) &ctxt->eflags;
A
Avi Kivity 已提交
2185
		c->dst.bytes = c->op_bytes;
2186 2187 2188 2189
		rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
		if (rc != X86EMUL_CONTINUE)
			goto done;
		break;
2190 2191 2192 2193 2194 2195 2196
	case 0xa0 ... 0xa1:	/* mov */
		c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
		c->dst.val = c->src.val;
		break;
	case 0xa2 ... 0xa3:	/* mov */
		c->dst.val = (unsigned long)c->regs[VCPU_REGS_RAX];
		break;
A
Avi Kivity 已提交
2197
	case 0xa4 ... 0xa5:	/* movs */
2198 2199
		c->dst.type = OP_MEM;
		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2200
		c->dst.ptr = (unsigned long *)register_address(c,
2201
						   es_base(ctxt),
2202
						   c->regs[VCPU_REGS_RDI]);
2203 2204 2205
		rc = ops->read_emulated(register_address(c,
						seg_override_base(ctxt, c),
						c->regs[VCPU_REGS_RSI]),
2206
					&c->dst.val,
2207 2208
					c->dst.bytes, ctxt->vcpu);
		if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
2209
			goto done;
2210
		register_address_increment(c, &c->regs[VCPU_REGS_RSI],
2211
				       (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
2212
							   : c->dst.bytes);
2213
		register_address_increment(c, &c->regs[VCPU_REGS_RDI],
2214
				       (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
2215
							   : c->dst.bytes);
A
Avi Kivity 已提交
2216 2217
		break;
	case 0xa6 ... 0xa7:	/* cmps */
2218 2219
		c->src.type = OP_NONE; /* Disable writeback. */
		c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2220
		c->src.ptr = (unsigned long *)register_address(c,
2221
				       seg_override_base(ctxt, c),
2222
						   c->regs[VCPU_REGS_RSI]);
2223 2224 2225 2226 2227
		rc = ops->read_emulated((unsigned long)c->src.ptr,
					&c->src.val,
					c->src.bytes,
					ctxt->vcpu);
		if (rc != X86EMUL_CONTINUE)
2228 2229 2230 2231
			goto done;

		c->dst.type = OP_NONE; /* Disable writeback. */
		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2232
		c->dst.ptr = (unsigned long *)register_address(c,
2233
						   es_base(ctxt),
2234
						   c->regs[VCPU_REGS_RDI]);
2235 2236 2237 2238 2239
		rc = ops->read_emulated((unsigned long)c->dst.ptr,
					&c->dst.val,
					c->dst.bytes,
					ctxt->vcpu);
		if (rc != X86EMUL_CONTINUE)
2240 2241 2242 2243 2244 2245
			goto done;

		DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr);

		emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);

2246
		register_address_increment(c, &c->regs[VCPU_REGS_RSI],
2247 2248
				       (ctxt->eflags & EFLG_DF) ? -c->src.bytes
								  : c->src.bytes);
2249
		register_address_increment(c, &c->regs[VCPU_REGS_RDI],
2250 2251 2252 2253
				       (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
								  : c->dst.bytes);

		break;
A
Avi Kivity 已提交
2254
	case 0xaa ... 0xab:	/* stos */
2255 2256
		c->dst.type = OP_MEM;
		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2257
		c->dst.ptr = (unsigned long *)register_address(c,
2258
						   es_base(ctxt),
2259
						   c->regs[VCPU_REGS_RDI]);
2260
		c->dst.val = c->regs[VCPU_REGS_RAX];
2261
		register_address_increment(c, &c->regs[VCPU_REGS_RDI],
2262
				       (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
2263
							   : c->dst.bytes);
A
Avi Kivity 已提交
2264 2265
		break;
	case 0xac ... 0xad:	/* lods */
2266 2267 2268
		c->dst.type = OP_REG;
		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
		c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
2269 2270 2271 2272 2273 2274 2275
		rc = ops->read_emulated(register_address(c,
						seg_override_base(ctxt, c),
						c->regs[VCPU_REGS_RSI]),
					&c->dst.val,
					c->dst.bytes,
					ctxt->vcpu);
		if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
2276
			goto done;
2277
		register_address_increment(c, &c->regs[VCPU_REGS_RSI],
2278
				       (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
2279
							   : c->dst.bytes);
A
Avi Kivity 已提交
2280 2281 2282 2283
		break;
	case 0xae ... 0xaf:	/* scas */
		DPRINTF("Urk! I don't handle SCAS.\n");
		goto cannot_emulate;
2284
	case 0xb0 ... 0xbf: /* mov r, imm */
2285
		goto mov;
2286 2287 2288
	case 0xc0 ... 0xc1:
		emulate_grp2(ctxt);
		break;
2289
	case 0xc3: /* ret */
A
Avi Kivity 已提交
2290
		c->dst.type = OP_REG;
2291
		c->dst.ptr = &c->eip;
A
Avi Kivity 已提交
2292
		c->dst.bytes = c->op_bytes;
2293
		goto pop_instruction;
2294 2295 2296 2297
	case 0xc6 ... 0xc7:	/* mov (sole member of Grp11) */
	mov:
		c->dst.val = c->src.val;
		break;
2298 2299
	case 0xcb:		/* ret far */
		rc = emulate_ret_far(ctxt, ops);
2300
		if (rc != X86EMUL_CONTINUE)
2301 2302
			goto done;
		break;
2303 2304 2305 2306 2307 2308 2309 2310
	case 0xd0 ... 0xd1:	/* Grp2 */
		c->src.val = 1;
		emulate_grp2(ctxt);
		break;
	case 0xd2 ... 0xd3:	/* Grp2 */
		c->src.val = c->regs[VCPU_REGS_RCX];
		emulate_grp2(ctxt);
		break;
2311 2312
	case 0xe4: 	/* inb */
	case 0xe5: 	/* in */
2313
		port = c->src.val;
2314 2315 2316 2317
		io_dir_in = 1;
		goto do_io;
	case 0xe6: /* outb */
	case 0xe7: /* out */
2318
		port = c->src.val;
2319 2320
		io_dir_in = 0;
		goto do_io;
2321
	case 0xe8: /* call (near) */ {
2322
		long int rel = c->src.val;
2323
		c->src.val = (unsigned long) c->eip;
2324
		jmp_rel(c, rel);
2325 2326
		emulate_push(ctxt);
		break;
2327 2328
	}
	case 0xe9: /* jmp rel */
2329
		goto jmp;
2330
	case 0xea: /* jmp far */
2331
	jump_far:
2332 2333 2334
		if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val,
						VCPU_SREG_CS))
			goto done;
2335

2336
		c->eip = c->src.val;
2337 2338 2339
		break;
	case 0xeb:
	      jmp:		/* jmp rel short */
2340
		jmp_rel(c, c->src.val);
2341
		c->dst.type = OP_NONE; /* Disable writeback. */
2342
		break;
2343 2344 2345 2346 2347 2348 2349 2350 2351
	case 0xec: /* in al,dx */
	case 0xed: /* in (e/r)ax,dx */
		port = c->regs[VCPU_REGS_RDX];
		io_dir_in = 1;
		goto do_io;
	case 0xee: /* out al,dx */
	case 0xef: /* out (e/r)ax,dx */
		port = c->regs[VCPU_REGS_RDX];
		io_dir_in = 0;
2352 2353 2354 2355 2356 2357 2358
	do_io:
		if (!emulator_io_permited(ctxt, ops, port,
					  (c->d & ByteOp) ? 1 : c->op_bytes)) {
			kvm_inject_gp(ctxt->vcpu, 0);
			goto done;
		}
		if (kvm_emulate_pio(ctxt->vcpu, io_dir_in,
2359 2360 2361 2362 2363
				   (c->d & ByteOp) ? 1 : c->op_bytes,
				   port) != 0) {
			c->eip = saved_eip;
			goto cannot_emulate;
		}
2364
		break;
2365
	case 0xf4:              /* hlt */
2366
		ctxt->vcpu->arch.halt_request = 1;
2367
		break;
2368 2369 2370 2371 2372
	case 0xf5:	/* cmc */
		/* complement carry flag from eflags reg */
		ctxt->eflags ^= EFLG_CF;
		c->dst.type = OP_NONE;	/* Disable writeback. */
		break;
2373
	case 0xf6 ... 0xf7:	/* Grp3 */
2374 2375
		if (!emulate_grp3(ctxt, ops))
			goto cannot_emulate;
2376
		break;
2377 2378 2379 2380 2381
	case 0xf8: /* clc */
		ctxt->eflags &= ~EFLG_CF;
		c->dst.type = OP_NONE;	/* Disable writeback. */
		break;
	case 0xfa: /* cli */
2382
		if (emulator_bad_iopl(ctxt, ops))
2383 2384 2385 2386 2387
			kvm_inject_gp(ctxt->vcpu, 0);
		else {
			ctxt->eflags &= ~X86_EFLAGS_IF;
			c->dst.type = OP_NONE;	/* Disable writeback. */
		}
2388 2389
		break;
	case 0xfb: /* sti */
2390
		if (emulator_bad_iopl(ctxt, ops))
2391 2392
			kvm_inject_gp(ctxt->vcpu, 0);
		else {
2393
			toggle_interruptibility(ctxt, KVM_X86_SHADOW_INT_STI);
2394 2395 2396
			ctxt->eflags |= X86_EFLAGS_IF;
			c->dst.type = OP_NONE;	/* Disable writeback. */
		}
2397
		break;
2398 2399 2400 2401 2402 2403 2404 2405
	case 0xfc: /* cld */
		ctxt->eflags &= ~EFLG_DF;
		c->dst.type = OP_NONE;	/* Disable writeback. */
		break;
	case 0xfd: /* std */
		ctxt->eflags |= EFLG_DF;
		c->dst.type = OP_NONE;	/* Disable writeback. */
		break;
2406 2407
	case 0xfe: /* Grp4 */
	grp45:
2408
		rc = emulate_grp45(ctxt, ops);
2409
		if (rc != X86EMUL_CONTINUE)
2410 2411
			goto done;
		break;
2412 2413 2414 2415
	case 0xff: /* Grp5 */
		if (c->modrm_reg == 5)
			goto jump_far;
		goto grp45;
A
Avi Kivity 已提交
2416
	}
2417 2418 2419

writeback:
	rc = writeback(ctxt, ops);
2420
	if (rc != X86EMUL_CONTINUE)
2421 2422 2423
		goto done;

	/* Commit shadow register state. */
2424
	memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
2425
	kvm_rip_write(ctxt->vcpu, c->eip);
2426 2427 2428 2429 2430 2431 2432

done:
	if (rc == X86EMUL_UNHANDLEABLE) {
		c->eip = saved_eip;
		return -1;
	}
	return 0;
A
Avi Kivity 已提交
2433 2434

twobyte_insn:
2435
	switch (c->b) {
A
Avi Kivity 已提交
2436
	case 0x01: /* lgdt, lidt, lmsw */
2437
		switch (c->modrm_reg) {
A
Avi Kivity 已提交
2438 2439 2440
			u16 size;
			unsigned long address;

2441
		case 0: /* vmcall */
2442
			if (c->modrm_mod != 3 || c->modrm_rm != 1)
2443 2444
				goto cannot_emulate;

2445
			rc = kvm_fix_hypercall(ctxt->vcpu);
2446
			if (rc != X86EMUL_CONTINUE)
2447 2448
				goto done;

2449
			/* Let the processor re-execute the fixed hypercall */
2450
			c->eip = ctxt->eip;
2451 2452
			/* Disable writeback. */
			c->dst.type = OP_NONE;
2453
			break;
A
Avi Kivity 已提交
2454
		case 2: /* lgdt */
2455 2456
			rc = read_descriptor(ctxt, ops, c->src.ptr,
					     &size, &address, c->op_bytes);
2457
			if (rc != X86EMUL_CONTINUE)
A
Avi Kivity 已提交
2458 2459
				goto done;
			realmode_lgdt(ctxt->vcpu, size, address);
2460 2461
			/* Disable writeback. */
			c->dst.type = OP_NONE;
A
Avi Kivity 已提交
2462
			break;
2463
		case 3: /* lidt/vmmcall */
2464 2465 2466 2467
			if (c->modrm_mod == 3) {
				switch (c->modrm_rm) {
				case 1:
					rc = kvm_fix_hypercall(ctxt->vcpu);
2468
					if (rc != X86EMUL_CONTINUE)
2469 2470 2471 2472 2473
						goto done;
					break;
				default:
					goto cannot_emulate;
				}
2474
			} else {
2475
				rc = read_descriptor(ctxt, ops, c->src.ptr,
2476
						     &size, &address,
2477
						     c->op_bytes);
2478
				if (rc != X86EMUL_CONTINUE)
2479 2480 2481
					goto done;
				realmode_lidt(ctxt->vcpu, size, address);
			}
2482 2483
			/* Disable writeback. */
			c->dst.type = OP_NONE;
A
Avi Kivity 已提交
2484 2485
			break;
		case 4: /* smsw */
2486
			c->dst.bytes = 2;
2487
			c->dst.val = ops->get_cr(0, ctxt->vcpu);
A
Avi Kivity 已提交
2488 2489
			break;
		case 6: /* lmsw */
2490 2491
			ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0ful) |
				    (c->src.val & 0x0f), ctxt->vcpu);
2492
			c->dst.type = OP_NONE;
A
Avi Kivity 已提交
2493
			break;
2494 2495 2496
		case 5: /* not defined */
			kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
			goto done;
A
Avi Kivity 已提交
2497
		case 7: /* invlpg*/
2498
			emulate_invlpg(ctxt->vcpu, memop);
2499 2500
			/* Disable writeback. */
			c->dst.type = OP_NONE;
A
Avi Kivity 已提交
2501 2502 2503 2504 2505
			break;
		default:
			goto cannot_emulate;
		}
		break;
2506
	case 0x05: 		/* syscall */
2507 2508 2509
		rc = emulate_syscall(ctxt);
		if (rc != X86EMUL_CONTINUE)
			goto done;
2510 2511
		else
			goto writeback;
2512
		break;
2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523
	case 0x06:
		emulate_clts(ctxt->vcpu);
		c->dst.type = OP_NONE;
		break;
	case 0x08:		/* invd */
	case 0x09:		/* wbinvd */
	case 0x0d:		/* GrpP (prefetch) */
	case 0x18:		/* Grp16 (prefetch/nop) */
		c->dst.type = OP_NONE;
		break;
	case 0x20: /* mov cr, reg */
2524 2525 2526 2527 2528 2529 2530
		switch (c->modrm_reg) {
		case 1:
		case 5 ... 7:
		case 9 ... 15:
			kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
			goto done;
		}
2531
		c->regs[c->modrm_rm] = ops->get_cr(c->modrm_reg, ctxt->vcpu);
2532 2533
		c->dst.type = OP_NONE;	/* no writeback */
		break;
A
Avi Kivity 已提交
2534
	case 0x21: /* mov from dr to reg */
2535 2536 2537 2538 2539 2540
		if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
		    (c->modrm_reg == 4 || c->modrm_reg == 5)) {
			kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
			goto done;
		}
		emulator_get_dr(ctxt, c->modrm_reg, &c->regs[c->modrm_rm]);
2541
		c->dst.type = OP_NONE;	/* no writeback */
A
Avi Kivity 已提交
2542
		break;
2543
	case 0x22: /* mov reg, cr */
2544
		ops->set_cr(c->modrm_reg, c->modrm_val, ctxt->vcpu);
2545 2546
		c->dst.type = OP_NONE;
		break;
A
Avi Kivity 已提交
2547
	case 0x23: /* mov from reg to dr */
2548 2549 2550 2551 2552 2553
		if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
		    (c->modrm_reg == 4 || c->modrm_reg == 5)) {
			kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
			goto done;
		}
		emulator_set_dr(ctxt, c->modrm_reg, c->regs[c->modrm_rm]);
2554
		c->dst.type = OP_NONE;	/* no writeback */
A
Avi Kivity 已提交
2555
		break;
2556 2557 2558 2559
	case 0x30:
		/* wrmsr */
		msr_data = (u32)c->regs[VCPU_REGS_RAX]
			| ((u64)c->regs[VCPU_REGS_RDX] << 32);
2560
		if (kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
2561
			kvm_inject_gp(ctxt->vcpu, 0);
2562
			goto done;
2563 2564 2565 2566 2567 2568
		}
		rc = X86EMUL_CONTINUE;
		c->dst.type = OP_NONE;
		break;
	case 0x32:
		/* rdmsr */
2569
		if (kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
2570
			kvm_inject_gp(ctxt->vcpu, 0);
2571
			goto done;
2572 2573 2574 2575 2576 2577 2578
		} else {
			c->regs[VCPU_REGS_RAX] = (u32)msr_data;
			c->regs[VCPU_REGS_RDX] = msr_data >> 32;
		}
		rc = X86EMUL_CONTINUE;
		c->dst.type = OP_NONE;
		break;
2579
	case 0x34:		/* sysenter */
2580 2581 2582
		rc = emulate_sysenter(ctxt);
		if (rc != X86EMUL_CONTINUE)
			goto done;
2583 2584
		else
			goto writeback;
2585 2586
		break;
	case 0x35:		/* sysexit */
2587 2588 2589
		rc = emulate_sysexit(ctxt);
		if (rc != X86EMUL_CONTINUE)
			goto done;
2590 2591
		else
			goto writeback;
2592
		break;
A
Avi Kivity 已提交
2593
	case 0x40 ... 0x4f:	/* cmov */
2594
		c->dst.val = c->dst.orig_val = c->src.val;
2595 2596
		if (!test_cc(c->b, ctxt->eflags))
			c->dst.type = OP_NONE; /* no writeback */
A
Avi Kivity 已提交
2597
		break;
2598
	case 0x80 ... 0x8f: /* jnz rel, etc*/
2599
		if (test_cc(c->b, ctxt->eflags))
2600
			jmp_rel(c, c->src.val);
2601 2602
		c->dst.type = OP_NONE;
		break;
2603 2604 2605 2606 2607
	case 0xa0:	  /* push fs */
		emulate_push_sreg(ctxt, VCPU_SREG_FS);
		break;
	case 0xa1:	 /* pop fs */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
2608
		if (rc != X86EMUL_CONTINUE)
2609 2610
			goto done;
		break;
2611 2612
	case 0xa3:
	      bt:		/* bt */
Q
Qing He 已提交
2613
		c->dst.type = OP_NONE;
2614 2615
		/* only subword offset */
		c->src.val &= (c->dst.bytes << 3) - 1;
2616
		emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags);
2617
		break;
2618 2619 2620 2621
	case 0xa4: /* shld imm8, r, r/m */
	case 0xa5: /* shld cl, r, r/m */
		emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
		break;
2622 2623 2624 2625 2626
	case 0xa8:	/* push gs */
		emulate_push_sreg(ctxt, VCPU_SREG_GS);
		break;
	case 0xa9:	/* pop gs */
		rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);
2627
		if (rc != X86EMUL_CONTINUE)
2628 2629
			goto done;
		break;
2630 2631
	case 0xab:
	      bts:		/* bts */
2632 2633
		/* only subword offset */
		c->src.val &= (c->dst.bytes << 3) - 1;
2634
		emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
2635
		break;
2636 2637 2638 2639
	case 0xac: /* shrd imm8, r, r/m */
	case 0xad: /* shrd cl, r, r/m */
		emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags);
		break;
2640 2641
	case 0xae:              /* clflush */
		break;
A
Avi Kivity 已提交
2642 2643 2644 2645 2646
	case 0xb0 ... 0xb1:	/* cmpxchg */
		/*
		 * Save real source value, then compare EAX against
		 * destination.
		 */
2647 2648
		c->src.orig_val = c->src.val;
		c->src.val = c->regs[VCPU_REGS_RAX];
2649 2650
		emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
		if (ctxt->eflags & EFLG_ZF) {
A
Avi Kivity 已提交
2651
			/* Success: write back to memory. */
2652
			c->dst.val = c->src.orig_val;
A
Avi Kivity 已提交
2653 2654
		} else {
			/* Failure: write the value we saw to EAX. */
2655 2656
			c->dst.type = OP_REG;
			c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
A
Avi Kivity 已提交
2657 2658 2659 2660
		}
		break;
	case 0xb3:
	      btr:		/* btr */
2661 2662
		/* only subword offset */
		c->src.val &= (c->dst.bytes << 3) - 1;
2663
		emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
A
Avi Kivity 已提交
2664 2665
		break;
	case 0xb6 ... 0xb7:	/* movzx */
2666 2667 2668
		c->dst.bytes = c->op_bytes;
		c->dst.val = (c->d & ByteOp) ? (u8) c->src.val
						       : (u16) c->src.val;
A
Avi Kivity 已提交
2669 2670
		break;
	case 0xba:		/* Grp8 */
2671
		switch (c->modrm_reg & 3) {
A
Avi Kivity 已提交
2672 2673 2674 2675 2676 2677 2678 2679 2680 2681
		case 0:
			goto bt;
		case 1:
			goto bts;
		case 2:
			goto btr;
		case 3:
			goto btc;
		}
		break;
2682 2683
	case 0xbb:
	      btc:		/* btc */
2684 2685
		/* only subword offset */
		c->src.val &= (c->dst.bytes << 3) - 1;
2686
		emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
2687
		break;
A
Avi Kivity 已提交
2688
	case 0xbe ... 0xbf:	/* movsx */
2689 2690 2691
		c->dst.bytes = c->op_bytes;
		c->dst.val = (c->d & ByteOp) ? (s8) c->src.val :
							(s16) c->src.val;
A
Avi Kivity 已提交
2692
		break;
2693
	case 0xc3:		/* movnti */
2694 2695 2696
		c->dst.bytes = c->op_bytes;
		c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val :
							(u64) c->src.val;
2697
		break;
A
Avi Kivity 已提交
2698
	case 0xc7:		/* Grp9 (cmpxchg8b) */
2699
		rc = emulate_grp9(ctxt, ops, memop);
2700
		if (rc != X86EMUL_CONTINUE)
2701
			goto done;
2702
		c->dst.type = OP_NONE;
2703
		break;
A
Avi Kivity 已提交
2704 2705 2706 2707
	}
	goto writeback;

cannot_emulate:
2708
	DPRINTF("Cannot emulate %02x\n", c->b);
2709
	c->eip = saved_eip;
A
Avi Kivity 已提交
2710 2711
	return -1;
}