kvm_emulate.h 11.9 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10
/******************************************************************************
 * x86_emulate.h
 *
 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
 *
 * Copyright (c) 2005 Keir Fraser
 *
 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
 */

H
H. Peter Anvin 已提交
11 12
#ifndef _ASM_X86_KVM_X86_EMULATE_H
#define _ASM_X86_KVM_X86_EMULATE_H
A
Avi Kivity 已提交
13

14 15
#include <asm/desc_defs.h>

A
Avi Kivity 已提交
16
struct x86_emulate_ctxt;
17 18
enum x86_intercept;
enum x86_intercept_stage;
A
Avi Kivity 已提交
19

20 21 22 23
struct x86_exception {
	u8 vector;
	bool error_code_valid;
	u16 error_code;
24 25
	bool nested_page_fault;
	u64 address; /* cr2 or nested page fault gpa */
26 27
};

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
/*
 * This struct is used to carry enough information from the instruction
 * decoder to main KVM so that a decision can be made whether the
 * instruction needs to be intercepted or not.
 */
struct x86_instruction_info {
	u8  intercept;          /* which intercept                      */
	u8  rep_prefix;         /* rep prefix?                          */
	u8  modrm_mod;		/* mod part of modrm			*/
	u8  modrm_reg;          /* index of register used               */
	u8  modrm_rm;		/* rm part of modrm			*/
	u64 src_val;            /* value of source operand              */
	u8  src_bytes;          /* size of source operand               */
	u8  dst_bytes;          /* size of destination operand          */
	u8  ad_bytes;           /* size of src/dst address              */
	u64 next_rip;           /* rip following the instruction        */
};

A
Avi Kivity 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
/*
 * x86_emulate_ops:
 *
 * These operations represent the instruction emulator's interface to memory.
 * There are two categories of operation: those that act on ordinary memory
 * regions (*_std), and those that act on memory regions known to require
 * special treatment or emulation (*_emulated).
 *
 * The emulator assumes that an instruction accesses only one 'emulated memory'
 * location, that this location is the given linear faulting address (cr2), and
 * that this is one of the instruction's data operands. Instruction fetches and
 * stack operations are assumed never to access emulated memory. The emulator
 * automatically deduces which operand of a string-move operation is accessing
 * emulated memory, and assumes that the other operand accesses normal memory.
 *
 * NOTES:
 *  1. The emulator isn't very smart about emulated vs. standard memory.
 *     'Emulated memory' access addresses should be checked for sanity.
 *     'Normal memory' accesses may fault, and the caller must arrange to
 *     detect and handle reentrancy into the emulator via recursive faults.
 *     Accesses may be unaligned and may cross page boundaries.
 *  2. If the access fails (cannot emulate, or a standard access faults) then
 *     it is up to the memop to propagate the fault to the guest VM via
 *     some out-of-band mechanism, unknown to the emulator. The memop signals
 *     failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
 *     then immediately bail.
 *  3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
 *     cmpxchg8b_emulated need support 8-byte accesses.
 *  4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
 */
/* Access completed successfully: continue emulation as normal. */
#define X86EMUL_CONTINUE        0
/* Access is unhandleable: bail from emulation and return error to caller. */
#define X86EMUL_UNHANDLEABLE    1
/* Terminate emulation but return success to the caller. */
#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
82 83
#define X86EMUL_RETRY_INSTR     3 /* retry the instruction for some reason */
#define X86EMUL_CMPXCHG_FAILED  4 /* cmpxchg did not see expected value */
84
#define X86EMUL_IO_NEEDED       5 /* IO is needed to complete emulation */
85
#define X86EMUL_INTERCEPTED     6 /* Intercepted by nested VMCB/VMCS */
86

A
Avi Kivity 已提交
87 88 89
struct x86_emulate_ops {
	/*
	 * read_std: Read bytes of standard (non-emulated/special) memory.
90
	 *           Used for descriptor reading.
A
Avi Kivity 已提交
91 92 93 94
	 *  @addr:  [IN ] Linear address from which to read.
	 *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
	 *  @bytes: [IN ] Number of bytes to read from memory.
	 */
95
	int (*read_std)(unsigned long addr, void *val,
96 97
			unsigned int bytes, struct kvm_vcpu *vcpu,
			struct x86_exception *fault);
98

99 100 101 102 103 104 105 106
	/*
	 * write_std: Write bytes of standard (non-emulated/special) memory.
	 *            Used for descriptor writing.
	 *  @addr:  [IN ] Linear address to which to write.
	 *  @val:   [OUT] Value write to memory, zero-extended to 'u_long'.
	 *  @bytes: [IN ] Number of bytes to write to memory.
	 */
	int (*write_std)(unsigned long addr, void *val,
107 108
			 unsigned int bytes, struct kvm_vcpu *vcpu,
			 struct x86_exception *fault);
109 110 111 112 113 114 115 116
	/*
	 * fetch: Read bytes of standard (non-emulated/special) memory.
	 *        Used for instruction fetch.
	 *  @addr:  [IN ] Linear address from which to read.
	 *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
	 *  @bytes: [IN ] Number of bytes to read from memory.
	 */
	int (*fetch)(unsigned long addr, void *val,
117 118
		     unsigned int bytes, struct kvm_vcpu *vcpu,
		     struct x86_exception *fault);
A
Avi Kivity 已提交
119 120 121 122 123 124 125

	/*
	 * read_emulated: Read bytes from emulated/special memory area.
	 *  @addr:  [IN ] Linear address from which to read.
	 *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
	 *  @bytes: [IN ] Number of bytes to read from memory.
	 */
126 127 128
	int (*read_emulated)(unsigned long addr,
			     void *val,
			     unsigned int bytes,
129
			     struct x86_exception *fault,
130
			     struct kvm_vcpu *vcpu);
A
Avi Kivity 已提交
131 132

	/*
133
	 * write_emulated: Write bytes to emulated/special memory area.
A
Avi Kivity 已提交
134 135 136 137 138
	 *  @addr:  [IN ] Linear address to which to write.
	 *  @val:   [IN ] Value to write to memory (low-order bytes used as
	 *                required).
	 *  @bytes: [IN ] Number of bytes to write to memory.
	 */
139 140 141
	int (*write_emulated)(unsigned long addr,
			      const void *val,
			      unsigned int bytes,
142
			      struct x86_exception *fault,
143
			      struct kvm_vcpu *vcpu);
A
Avi Kivity 已提交
144 145 146 147 148 149 150 151 152

	/*
	 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
	 *                   emulated/special memory area.
	 *  @addr:  [IN ] Linear address to access.
	 *  @old:   [IN ] Value expected to be current at @addr.
	 *  @new:   [IN ] Value to write to @addr.
	 *  @bytes: [IN ] Number of bytes to access using CMPXCHG.
	 */
153 154 155 156
	int (*cmpxchg_emulated)(unsigned long addr,
				const void *old,
				const void *new,
				unsigned int bytes,
157
				struct x86_exception *fault,
158
				struct kvm_vcpu *vcpu);
159 160 161 162 163 164 165

	int (*pio_in_emulated)(int size, unsigned short port, void *val,
			       unsigned int count, struct kvm_vcpu *vcpu);

	int (*pio_out_emulated)(int size, unsigned short port, const void *val,
				unsigned int count, struct kvm_vcpu *vcpu);

166
	bool (*get_cached_descriptor)(struct desc_struct *desc, u32 *base3,
167
				      int seg, struct kvm_vcpu *vcpu);
168
	void (*set_cached_descriptor)(struct desc_struct *desc, u32 base3,
169 170 171
				      int seg, struct kvm_vcpu *vcpu);
	u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu);
	void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu);
172
	unsigned long (*get_cached_segment_base)(int seg, struct kvm_vcpu *vcpu);
173
	void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
174
	void (*get_idt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
175
	ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu);
176
	int (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu);
177
	int (*cpl)(struct kvm_vcpu *vcpu);
178 179
	int (*get_dr)(int dr, unsigned long *dest, struct kvm_vcpu *vcpu);
	int (*set_dr)(int dr, unsigned long value, struct kvm_vcpu *vcpu);
180 181
	int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
182 183
	void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */
	void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */
184 185
	int (*intercept)(struct kvm_vcpu *vcpu,
			 struct x86_instruction_info *info,
186
			 enum x86_intercept_stage stage);
A
Avi Kivity 已提交
187 188
};

A
Avi Kivity 已提交
189 190
typedef u32 __attribute__((vector_size(16))) sse128_t;

191 192
/* Type, address-of, and value of an instruction's operand. */
struct operand {
A
Avi Kivity 已提交
193
	enum { OP_REG, OP_MEM, OP_IMM, OP_XMM, OP_NONE } type;
194
	unsigned int bytes;
195 196 197 198
	union {
		unsigned long orig_val;
		u64 orig_val64;
	};
199 200
	union {
		unsigned long *reg;
201 202 203 204
		struct segmented_address {
			ulong ea;
			unsigned seg;
		} mem;
A
Avi Kivity 已提交
205
		unsigned xmm;
206
	} addr;
207 208
	union {
		unsigned long val;
209
		u64 val64;
210
		char valptr[sizeof(unsigned long) + 2];
A
Avi Kivity 已提交
211
		sse128_t vec_val;
212
	};
213 214
};

215 216 217 218 219 220
struct fetch_cache {
	u8 data[15];
	unsigned long start;
	unsigned long end;
};

221 222 223 224 225 226
struct read_cache {
	u8 data[1024];
	unsigned long pos;
	unsigned long end;
};

227 228 229
struct decode_cache {
	u8 twobyte;
	u8 b;
230
	u8 intercept;
231 232 233 234
	u8 lock_prefix;
	u8 rep_prefix;
	u8 op_bytes;
	u8 ad_bytes;
235
	u8 rex_prefix;
236
	struct operand src;
237
	struct operand src2;
238
	struct operand dst;
239 240
	bool has_seg_override;
	u8 seg_override;
241
	unsigned int d;
242
	int (*execute)(struct x86_emulate_ctxt *ctxt);
243
	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
244
	unsigned long regs[NR_VCPU_REGS];
245
	unsigned long eip;
246 247 248 249 250
	/* modrm */
	u8 modrm;
	u8 modrm_mod;
	u8 modrm_reg;
	u8 modrm_rm;
251
	u8 modrm_seg;
252
	bool rip_relative;
253
	struct fetch_cache fetch;
254
	struct read_cache io_read;
255
	struct read_cache mem_read;
256 257
};

A
Avi Kivity 已提交
258
struct x86_emulate_ctxt {
259 260
	struct x86_emulate_ops *ops;

A
Avi Kivity 已提交
261 262 263 264
	/* Register state before/after emulation. */
	struct kvm_vcpu *vcpu;

	unsigned long eflags;
265
	unsigned long eip; /* eip before instruction emulation */
A
Avi Kivity 已提交
266 267
	/* Emulated execution mode, represented by an X86EMUL_MODE value. */
	int mode;
268
	u32 cs_base;
269

270 271 272
	/* interruptibility state, as a result of execution of STI or MOV SS */
	int interruptibility;

273
	bool guest_mode; /* guest running a nested guest */
274
	bool perm_ok; /* do not check permissions if true */
275
	bool only_vendor_specific_insn;
276

277 278
	bool have_exception;
	struct x86_exception exception;
279

280 281
	/* decode cache */
	struct decode_cache decode;
A
Avi Kivity 已提交
282 283
};

284
/* Repeat String Operation Prefix */
285 286
#define REPE_PREFIX	0xf3
#define REPNE_PREFIX	0xf2
287

A
Avi Kivity 已提交
288 289
/* Execution mode, passed to the emulator. */
#define X86EMUL_MODE_REAL     0	/* Real mode.             */
290
#define X86EMUL_MODE_VM86     1	/* Virtual 8086 mode.     */
A
Avi Kivity 已提交
291 292 293 294
#define X86EMUL_MODE_PROT16   2	/* 16-bit protected mode. */
#define X86EMUL_MODE_PROT32   4	/* 32-bit protected mode. */
#define X86EMUL_MODE_PROT64   8	/* 64-bit (long) mode.    */

295 296 297 298
/* any protected mode   */
#define X86EMUL_MODE_PROT     (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \
			       X86EMUL_MODE_PROT64)

299 300 301 302 303 304 305 306
enum x86_intercept_stage {
	X86_ICPT_PRE_EXCEPT,
	X86_ICPT_POST_EXCEPT,
	X86_ICPT_POST_MEMACCESS,
};

enum x86_intercept {
	x86_intercept_none,
307 308 309
	x86_intercept_cr_read,
	x86_intercept_cr_write,
	x86_intercept_clts,
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
	x86_intercept_lmsw,
	x86_intercept_smsw,
	x86_intercept_lidt,
	x86_intercept_sidt,
	x86_intercept_lgdt,
	x86_intercept_sgdt,
	x86_intercept_lldt,
	x86_intercept_sldt,
	x86_intercept_ltr,
	x86_intercept_str,
	x86_intercept_rdtsc,
	x86_intercept_rdpmc,
	x86_intercept_pushf,
	x86_intercept_popf,
	x86_intercept_cpuid,
	x86_intercept_rsm,
	x86_intercept_iret,
	x86_intercept_intn,
	x86_intercept_invd,
	x86_intercept_pause,
	x86_intercept_hlt,
	x86_intercept_invlpg,
	x86_intercept_invlpga,
	x86_intercept_vmrun,
	x86_intercept_vmload,
	x86_intercept_vmsave,
	x86_intercept_vmmcall,
	x86_intercept_stgi,
	x86_intercept_clgi,
	x86_intercept_skinit,
	x86_intercept_rdtscp,
	x86_intercept_icebp,
	x86_intercept_wbinvd,
	x86_intercept_monitor,
	x86_intercept_mwait,
345 346 347 348

	nr_x86_intercepts
};

A
Avi Kivity 已提交
349
/* Host execution mode. */
S
Sheng Yang 已提交
350
#if defined(CONFIG_X86_32)
A
Avi Kivity 已提交
351
#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
352
#elif defined(CONFIG_X86_64)
A
Avi Kivity 已提交
353 354 355
#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
#endif

356
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
357 358 359
#define EMULATION_FAILED -1
#define EMULATION_OK 0
#define EMULATION_RESTART 1
360
#define EMULATION_INTERCEPTED 2
361
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
362
int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
363 364
			 u16 tss_selector, int reason,
			 bool has_error_code, u32 error_code);
365 366
int emulate_int_real(struct x86_emulate_ctxt *ctxt,
		     struct x86_emulate_ops *ops, int irq);
H
H. Peter Anvin 已提交
367
#endif /* _ASM_X86_KVM_X86_EMULATE_H */