kvm_emulate.h 13.8 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10
/******************************************************************************
 * x86_emulate.h
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

H
H. Peter Anvin 已提交
11 12
#ifndef _ASM_X86_KVM_X86_EMULATE_H
#define _ASM_X86_KVM_X86_EMULATE_H
A
Avi Kivity 已提交
13

14 15
#include <asm/desc_defs.h>

A
Avi Kivity 已提交
16
struct x86_emulate_ctxt;
17 18
enum x86_intercept;
enum x86_intercept_stage;
A
Avi Kivity 已提交
19

20 21 22 23
struct x86_exception {
	u8 vector;
	bool error_code_valid;
	u16 error_code;
24 25
	bool nested_page_fault;
	u64 address; /* cr2 or nested page fault gpa */
26 27
};

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
/*
 * This struct is used to carry enough information from the instruction
 * decoder to main KVM so that a decision can be made whether the
 * instruction needs to be intercepted or not.
 */
struct x86_instruction_info {
	u8  intercept;          /* which intercept                      */
	u8  rep_prefix;         /* rep prefix?                          */
	u8  modrm_mod;		/* mod part of modrm			*/
	u8  modrm_reg;          /* index of register used               */
	u8  modrm_rm;		/* rm part of modrm			*/
	u64 src_val;            /* value of source operand              */
	u8  src_bytes;          /* size of source operand               */
	u8  dst_bytes;          /* size of destination operand          */
	u8  ad_bytes;           /* size of src/dst address              */
	u64 next_rip;           /* rip following the instruction        */
};

A
Avi Kivity 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/*
 * x86_emulate_ops:
 *
 * These operations represent the instruction emulator's interface to memory.
 * There are two categories of operation: those that act on ordinary memory
 * regions (*_std), and those that act on memory regions known to require
 * special treatment or emulation (*_emulated).
 *
 * The emulator assumes that an instruction accesses only one 'emulated memory'
 * location, that this location is the given linear faulting address (cr2), and
 * that this is one of the instruction's data operands. Instruction fetches and
 * stack operations are assumed never to access emulated memory. The emulator
 * automatically deduces which operand of a string-move operation is accessing
 * emulated memory, and assumes that the other operand accesses normal memory.
 *
 * NOTES:
 *  1. The emulator isn't very smart about emulated vs. standard memory.
 *     'Emulated memory' access addresses should be checked for sanity.
 *     'Normal memory' accesses may fault, and the caller must arrange to
 *     detect and handle reentrancy into the emulator via recursive faults.
 *     Accesses may be unaligned and may cross page boundaries.
 *  2. If the access fails (cannot emulate, or a standard access faults) then
 *     it is up to the memop to propagate the fault to the guest VM via
 *     some out-of-band mechanism, unknown to the emulator. The memop signals
 *     failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
 *     then immediately bail.
 *  3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
 *     cmpxchg8b_emulated need support 8-byte accesses.
 *  4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
 */
/* Access completed successfully: continue emulation as normal. */
#define X86EMUL_CONTINUE        0
/* Access is unhandleable: bail from emulation and return error to caller. */
#define X86EMUL_UNHANDLEABLE    1
/* Terminate emulation but return success to the caller. */
#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
82 83
#define X86EMUL_RETRY_INSTR     3 /* retry the instruction for some reason */
#define X86EMUL_CMPXCHG_FAILED  4 /* cmpxchg did not see expected value */
84
#define X86EMUL_IO_NEEDED       5 /* IO is needed to complete emulation */
85
#define X86EMUL_INTERCEPTED     6 /* Intercepted by nested VMCB/VMCS */
86

A
Avi Kivity 已提交
87
struct x86_emulate_ops {
88 89 90 91 92 93 94 95 96 97 98 99 100
	/*
	 * read_gpr: read a general purpose register (rax - r15)
	 *
	 * @reg: gpr number.
	 */
	ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg);
	/*
	 * write_gpr: write a general purpose register (rax - r15)
	 *
	 * @reg: gpr number.
	 * @val: value to write.
	 */
	void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val);
A
Avi Kivity 已提交
101 102
	/*
	 * read_std: Read bytes of standard (non-emulated/special) memory.
103
	 *           Used for descriptor reading.
A
Avi Kivity 已提交
104 105 106 107
	 *  @addr:  [IN ] Linear address from which to read.
	 *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
	 *  @bytes: [IN ] Number of bytes to read from memory.
	 */
108 109 110
	int (*read_std)(struct x86_emulate_ctxt *ctxt,
			unsigned long addr, void *val,
			unsigned int bytes,
111
			struct x86_exception *fault);
112

113 114 115 116 117 118 119
	/*
	 * write_std: Write bytes of standard (non-emulated/special) memory.
	 *            Used for descriptor writing.
	 *  @addr:  [IN ] Linear address to which to write.
	 *  @val:   [OUT] Value write to memory, zero-extended to 'u_long'.
	 *  @bytes: [IN ] Number of bytes to write to memory.
	 */
120 121
	int (*write_std)(struct x86_emulate_ctxt *ctxt,
			 unsigned long addr, void *val, unsigned int bytes,
122
			 struct x86_exception *fault);
123 124 125 126 127 128 129
	/*
	 * fetch: Read bytes of standard (non-emulated/special) memory.
	 *        Used for instruction fetch.
	 *  @addr:  [IN ] Linear address from which to read.
	 *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
	 *  @bytes: [IN ] Number of bytes to read from memory.
	 */
130 131
	int (*fetch)(struct x86_emulate_ctxt *ctxt,
		     unsigned long addr, void *val, unsigned int bytes,
132
		     struct x86_exception *fault);
A
Avi Kivity 已提交
133 134 135 136 137 138 139

	/*
	 * read_emulated: Read bytes from emulated/special memory area.
	 *  @addr:  [IN ] Linear address from which to read.
	 *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
	 *  @bytes: [IN ] Number of bytes to read from memory.
	 */
140 141 142
	int (*read_emulated)(struct x86_emulate_ctxt *ctxt,
			     unsigned long addr, void *val, unsigned int bytes,
			     struct x86_exception *fault);
A
Avi Kivity 已提交
143 144

	/*
145
	 * write_emulated: Write bytes to emulated/special memory area.
A
Avi Kivity 已提交
146 147 148 149 150
	 *  @addr:  [IN ] Linear address to which to write.
	 *  @val:   [IN ] Value to write to memory (low-order bytes used as
	 *                required).
	 *  @bytes: [IN ] Number of bytes to write to memory.
	 */
151 152
	int (*write_emulated)(struct x86_emulate_ctxt *ctxt,
			      unsigned long addr, const void *val,
153
			      unsigned int bytes,
154
			      struct x86_exception *fault);
A
Avi Kivity 已提交
155 156 157 158 159 160 161 162 163

	/*
	 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
	 *                   emulated/special memory area.
	 *  @addr:  [IN ] Linear address to access.
	 *  @old:   [IN ] Value expected to be current at @addr.
	 *  @new:   [IN ] Value to write to @addr.
	 *  @bytes: [IN ] Number of bytes to access using CMPXCHG.
	 */
164 165
	int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt,
				unsigned long addr,
166 167 168
				const void *old,
				const void *new,
				unsigned int bytes,
169
				struct x86_exception *fault);
170
	void (*invlpg)(struct x86_emulate_ctxt *ctxt, ulong addr);
171

172 173 174
	int (*pio_in_emulated)(struct x86_emulate_ctxt *ctxt,
			       int size, unsigned short port, void *val,
			       unsigned int count);
175

176 177 178
	int (*pio_out_emulated)(struct x86_emulate_ctxt *ctxt,
				int size, unsigned short port, const void *val,
				unsigned int count);
179

180 181 182 183
	bool (*get_segment)(struct x86_emulate_ctxt *ctxt, u16 *selector,
			    struct desc_struct *desc, u32 *base3, int seg);
	void (*set_segment)(struct x86_emulate_ctxt *ctxt, u16 selector,
			    struct desc_struct *desc, u32 base3, int seg);
184 185 186 187
	unsigned long (*get_cached_segment_base)(struct x86_emulate_ctxt *ctxt,
						 int seg);
	void (*get_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
	void (*get_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
188 189
	void (*set_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
	void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
190 191
	ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr);
	int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val);
192
	void (*set_rflags)(struct x86_emulate_ctxt *ctxt, ulong val);
193 194 195 196 197
	int (*cpl)(struct x86_emulate_ctxt *ctxt);
	int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
	int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
	int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
	int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
198
	int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
199
	void (*halt)(struct x86_emulate_ctxt *ctxt);
200
	void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
201
	int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
202 203
	void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */
	void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */
204
	int (*intercept)(struct x86_emulate_ctxt *ctxt,
205
			 struct x86_instruction_info *info,
206
			 enum x86_intercept_stage stage);
207

208 209
	void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
			  u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
A
Avi Kivity 已提交
210 211
};

A
Avi Kivity 已提交
212 213
typedef u32 __attribute__((vector_size(16))) sse128_t;

214 215
/* Type, address-of, and value of an instruction's operand. */
struct operand {
216
	enum { OP_REG, OP_MEM, OP_MEM_STR, OP_IMM, OP_XMM, OP_MM, OP_NONE } type;
217
	unsigned int bytes;
218
	unsigned int count;
219 220 221 222
	union {
		unsigned long orig_val;
		u64 orig_val64;
	};
223 224
	union {
		unsigned long *reg;
225 226 227 228
		struct segmented_address {
			ulong ea;
			unsigned seg;
		} mem;
A
Avi Kivity 已提交
229
		unsigned xmm;
A
Avi Kivity 已提交
230
		unsigned mm;
231
	} addr;
232 233
	union {
		unsigned long val;
234
		u64 val64;
235
		char valptr[sizeof(unsigned long) + 2];
A
Avi Kivity 已提交
236
		sse128_t vec_val;
A
Avi Kivity 已提交
237
		u64 mm_val;
238
		void *data;
239
	};
240 241
};

242 243 244 245 246 247
struct fetch_cache {
	u8 data[15];
	unsigned long start;
	unsigned long end;
};

248 249 250 251 252 253
struct read_cache {
	u8 data[1024];
	unsigned long pos;
	unsigned long end;
};

254 255 256 257 258 259 260 261 262
/* Execution mode, passed to the emulator. */
enum x86emul_mode {
	X86EMUL_MODE_REAL,	/* Real mode.             */
	X86EMUL_MODE_VM86,	/* Virtual 8086 mode.     */
	X86EMUL_MODE_PROT16,	/* 16-bit protected mode. */
	X86EMUL_MODE_PROT32,	/* 32-bit protected mode. */
	X86EMUL_MODE_PROT64,	/* 64-bit (long) mode.    */
};

263
struct x86_emulate_ctxt {
264
	const struct x86_emulate_ops *ops;
265 266 267 268 269

	/* Register state before/after emulation. */
	unsigned long eflags;
	unsigned long eip; /* eip before instruction emulation */
	/* Emulated execution mode, represented by an X86EMUL_MODE value. */
270
	enum x86emul_mode mode;
271 272 273 274 275 276 277 278 279 280 281 282

	/* interruptibility state, as a result of execution of STI or MOV SS */
	int interruptibility;

	bool guest_mode; /* guest running a nested guest */
	bool perm_ok; /* do not check permissions if true */
	bool only_vendor_specific_insn;

	bool have_exception;
	struct x86_exception exception;

	/* decode cache */
283 284
	u8 twobyte;
	u8 b;
285
	u8 intercept;
286 287 288 289
	u8 lock_prefix;
	u8 rep_prefix;
	u8 op_bytes;
	u8 ad_bytes;
290
	u8 rex_prefix;
291
	struct operand src;
292
	struct operand src2;
293
	struct operand dst;
294 295
	bool has_seg_override;
	u8 seg_override;
296
	u64 d;
297
	int (*execute)(struct x86_emulate_ctxt *ctxt);
298
	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
299 300 301 302 303
	/* modrm */
	u8 modrm;
	u8 modrm_mod;
	u8 modrm_reg;
	u8 modrm_rm;
304
	u8 modrm_seg;
305
	bool rip_relative;
306
	unsigned long _eip;
A
Avi Kivity 已提交
307
	struct operand memop;
308 309
	u32 regs_valid;  /* bitmaps of registers in _regs[] that can be read */
	u32 regs_dirty;  /* bitmaps of registers in _regs[] that have been written */
310
	/* Fields above regs are cleared together. */
311
	unsigned long _regs[NR_VCPU_REGS];
312
	struct operand *memopp;
313
	struct fetch_cache fetch;
314
	struct read_cache io_read;
315
	struct read_cache mem_read;
316 317
};

318
/* Repeat String Operation Prefix */
319 320
#define REPE_PREFIX	0xf3
#define REPNE_PREFIX	0xf2
321

322 323 324 325 326 327 328 329 330 331 332 333 334
/* CPUID vendors */
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65

#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273

#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547
#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e
#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69

335
enum x86_intercept_stage {
336
	X86_ICTP_NONE = 0,   /* Allow zero-init to not match anything */
337 338 339 340 341 342 343
	X86_ICPT_PRE_EXCEPT,
	X86_ICPT_POST_EXCEPT,
	X86_ICPT_POST_MEMACCESS,
};

enum x86_intercept {
	x86_intercept_none,
344 345 346
	x86_intercept_cr_read,
	x86_intercept_cr_write,
	x86_intercept_clts,
347 348
	x86_intercept_lmsw,
	x86_intercept_smsw,
349 350
	x86_intercept_dr_read,
	x86_intercept_dr_write,
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
	x86_intercept_lidt,
	x86_intercept_sidt,
	x86_intercept_lgdt,
	x86_intercept_sgdt,
	x86_intercept_lldt,
	x86_intercept_sldt,
	x86_intercept_ltr,
	x86_intercept_str,
	x86_intercept_rdtsc,
	x86_intercept_rdpmc,
	x86_intercept_pushf,
	x86_intercept_popf,
	x86_intercept_cpuid,
	x86_intercept_rsm,
	x86_intercept_iret,
	x86_intercept_intn,
	x86_intercept_invd,
	x86_intercept_pause,
	x86_intercept_hlt,
	x86_intercept_invlpg,
	x86_intercept_invlpga,
	x86_intercept_vmrun,
	x86_intercept_vmload,
	x86_intercept_vmsave,
	x86_intercept_vmmcall,
	x86_intercept_stgi,
	x86_intercept_clgi,
	x86_intercept_skinit,
	x86_intercept_rdtscp,
	x86_intercept_icebp,
	x86_intercept_wbinvd,
	x86_intercept_monitor,
	x86_intercept_mwait,
384 385
	x86_intercept_rdmsr,
	x86_intercept_wrmsr,
386 387 388 389
	x86_intercept_in,
	x86_intercept_ins,
	x86_intercept_out,
	x86_intercept_outs,
390 391 392 393

	nr_x86_intercepts
};

A
Avi Kivity 已提交
394
/* Host execution mode. */
S
Sheng Yang 已提交
395
#if defined(CONFIG_X86_32)
A
Avi Kivity 已提交
396
#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
397
#elif defined(CONFIG_X86_64)
A
Avi Kivity 已提交
398 399 400
#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
#endif

401
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
402
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
403 404 405
#define EMULATION_FAILED -1
#define EMULATION_OK 0
#define EMULATION_RESTART 1
406
#define EMULATION_INTERCEPTED 2
407
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
408
int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
409
			 u16 tss_selector, int idt_index, int reason,
410
			 bool has_error_code, u32 error_code);
411
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
412 413 414
void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt);
void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt);

H
H. Peter Anvin 已提交
415
#endif /* _ASM_X86_KVM_X86_EMULATE_H */