kvm_host.h 27.2 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10
 * Kernel-based Virtual Machine driver for Linux
 *
 * This header defines architecture specific interfaces, x86 version
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

H
H. Peter Anvin 已提交
11 12
#ifndef _ASM_X86_KVM_HOST_H
#define _ASM_X86_KVM_HOST_H
13

14 15
#include <linux/types.h>
#include <linux/mm.h>
16
#include <linux/mmu_notifier.h>
17
#include <linux/tracepoint.h>
18
#include <linux/cpumask.h>
19
#include <linux/irq_work.h>
20 21 22

#include <linux/kvm.h>
#include <linux/kvm_para.h>
23
#include <linux/kvm_types.h>
24
#include <linux/perf_event.h>
25

26
#include <asm/pvclock-abi.h>
27
#include <asm/desc.h>
S
Sheng Yang 已提交
28
#include <asm/mtrr.h>
29
#include <asm/msr-index.h>
30
#include <asm/asm.h>
31

32
#define KVM_MAX_VCPUS 254
33
#define KVM_SOFT_MAX_VCPUS 160
34 35 36
#define KVM_MEMORY_SLOTS 32
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4
37 38
#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)

A
Avi Kivity 已提交
39
#define KVM_MMIO_SIZE 16
40 41

#define KVM_PIO_PAGE_OFFSET 1
42
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
43

44 45 46 47 48
#define CR0_RESERVED_BITS                                               \
	(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
			  | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
			  | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))

49 50
#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
51 52
#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS |	\
				  0xFFFFFF0000000000ULL)
53 54 55 56
#define CR4_RESERVED_BITS                                               \
	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR  \
57
			  | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_RDWRGSFS \
58 59 60 61 62
			  | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))

#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)


63 64

#define INVALID_PAGE (~(hpa_t)0)
65 66
#define VALID_PAGE(x) ((x) != INVALID_PAGE)

67 68
#define UNMAPPED_GVA (~(gpa_t)0)

69
/* KVM Hugepage definitions for x86 */
70
#define KVM_NR_PAGE_SIZES	3
71 72
#define KVM_HPAGE_GFN_SHIFT(x)	(((x) - 1) * 9)
#define KVM_HPAGE_SHIFT(x)	(PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
73 74 75
#define KVM_HPAGE_SIZE(x)	(1UL << KVM_HPAGE_SHIFT(x))
#define KVM_HPAGE_MASK(x)	(~(KVM_HPAGE_SIZE(x) - 1))
#define KVM_PAGES_PER_HPAGE(x)	(KVM_HPAGE_SIZE(x) / PAGE_SIZE)
M
Marcelo Tosatti 已提交
76

77
#define DE_VECTOR 0
J
Jan Kiszka 已提交
78
#define DB_VECTOR 1
79 80 81
#define BP_VECTOR 3
#define OF_VECTOR 4
#define BR_VECTOR 5
82 83 84 85 86 87 88 89
#define UD_VECTOR 6
#define NM_VECTOR 7
#define DF_VECTOR 8
#define TS_VECTOR 10
#define NP_VECTOR 11
#define SS_VECTOR 12
#define GP_VECTOR 13
#define PF_VECTOR 14
90
#define MF_VECTOR 16
91
#define MC_VECTOR 18
92 93 94 95 96 97

#define SELECTOR_TI_MASK (1 << 2)
#define SELECTOR_RPL_MASK 0x03

#define IOPL_SHIFT 12

98 99
#define KVM_PERMILLE_MMU_PAGES 20
#define KVM_MIN_ALLOC_MMU_PAGES 64
100 101
#define KVM_MMU_HASH_SHIFT 10
#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
102 103
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
104
#define KVM_MAX_CPUID_ENTRIES 80
S
Sheng Yang 已提交
105
#define KVM_NR_FIXED_MTRR_REGION 88
A
Avi Kivity 已提交
106
#define KVM_NR_VAR_MTRR 8
107

108 109
#define ASYNC_PF_PER_VCPU 64

110
extern raw_spinlock_t kvm_lock;
111 112
extern struct list_head vm_list;

113 114
struct kvm_vcpu;
struct kvm;
115
struct kvm_async_pf;
116

117
enum kvm_reg {
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
	VCPU_REGS_RAX = 0,
	VCPU_REGS_RCX = 1,
	VCPU_REGS_RDX = 2,
	VCPU_REGS_RBX = 3,
	VCPU_REGS_RSP = 4,
	VCPU_REGS_RBP = 5,
	VCPU_REGS_RSI = 6,
	VCPU_REGS_RDI = 7,
#ifdef CONFIG_X86_64
	VCPU_REGS_R8 = 8,
	VCPU_REGS_R9 = 9,
	VCPU_REGS_R10 = 10,
	VCPU_REGS_R11 = 11,
	VCPU_REGS_R12 = 12,
	VCPU_REGS_R13 = 13,
	VCPU_REGS_R14 = 14,
	VCPU_REGS_R15 = 15,
#endif
136
	VCPU_REGS_RIP,
137 138 139
	NR_VCPU_REGS
};

A
Avi Kivity 已提交
140 141
enum kvm_reg_ex {
	VCPU_EXREG_PDPTR = NR_VCPU_REGS,
142
	VCPU_EXREG_CR3,
A
Avi Kivity 已提交
143
	VCPU_EXREG_RFLAGS,
A
Avi Kivity 已提交
144
	VCPU_EXREG_CPL,
A
Avi Kivity 已提交
145
	VCPU_EXREG_SEGMENTS,
A
Avi Kivity 已提交
146 147
};

148
enum {
149
	VCPU_SREG_ES,
150
	VCPU_SREG_CS,
151
	VCPU_SREG_SS,
152 153 154 155 156 157 158
	VCPU_SREG_DS,
	VCPU_SREG_FS,
	VCPU_SREG_GS,
	VCPU_SREG_TR,
	VCPU_SREG_LDTR,
};

159
#include <asm/kvm_emulate.h>
160

161 162
#define KVM_NR_MEM_OBJS 40

163 164 165 166 167 168 169 170 171 172 173 174 175
#define KVM_NR_DB_REGS	4

#define DR6_BD		(1 << 13)
#define DR6_BS		(1 << 14)
#define DR6_FIXED_1	0xffff0ff0
#define DR6_VOLATILE	0x0000e00f

#define DR7_BP_EN_MASK	0x000000ff
#define DR7_GE		(1 << 9)
#define DR7_GD		(1 << 13)
#define DR7_FIXED_1	0x00000400
#define DR7_VOLATILE	0xffff23ff

176 177
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC	0
178 179 180 181 182 183 184
/*
 * The following bit is set with PV-EOI, unset on EOI.
 * We detect PV-EOI changes by guest by comparing
 * this bit with PV-EOI in guest memory.
 * See the implementation in apic_update_pv_eoi.
 */
#define KVM_APIC_PV_EOI_PENDING	1
185

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
/*
 * We don't want allocation failures within the mmu code, so we preallocate
 * enough memory for a single page fault in a cache.
 */
struct kvm_mmu_memory_cache {
	int nobjs;
	void *objects[KVM_NR_MEM_OBJS];
};

/*
 * kvm_mmu_page_role, below, is defined as:
 *
 *   bits 0:3 - total guest paging levels (2-4, or zero for real mode)
 *   bits 4:7 - page table level for this shadow (1-4)
 *   bits 8:9 - page table quadrant for 2-level guests
201 202
 *   bit   16 - direct mapping of virtual to physical mapping at gfn
 *              used for real mode and two-dimensional paging
203 204 205 206 207
 *   bits 17:19 - common access permissions for all ptes in this shadow page
 */
union kvm_mmu_page_role {
	unsigned word;
	struct {
208
		unsigned level:4;
209
		unsigned cr4_pae:1;
210 211
		unsigned quadrant:2;
		unsigned pad_for_nice_hex_output:6;
212
		unsigned direct:1;
213
		unsigned access:3;
214
		unsigned invalid:1;
215
		unsigned nxe:1;
216
		unsigned cr0_wp:1;
217
		unsigned smep_andnot_wp:1;
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
	};
};

struct kvm_mmu_page {
	struct list_head link;
	struct hlist_node hash_link;

	/*
	 * The following two entries are used to key the shadow page in the
	 * hash table.
	 */
	gfn_t gfn;
	union kvm_mmu_page_role role;

	u64 *spt;
	/* hold the gfn of each spte inside spt */
	gfn_t *gfns;
235 236 237 238
	/*
	 * One bit set per slot which has memory
	 * in this shadow page.
	 */
239
	DECLARE_BITMAP(slot_bitmap, KVM_MEM_SLOTS_NUM);
240
	bool unsync;
241
	int root_count;          /* Currently serving as active root */
242
	unsigned int unsync_children;
243
	unsigned long parent_ptes;	/* Reverse mapping for parent_pte */
244
	DECLARE_BITMAP(unsync_child_bitmap, 512);
245 246 247 248 249

#ifdef CONFIG_X86_32
	int clear_spte_count;
#endif

250
	int write_flooding_count;
251 252
};

253 254 255 256 257 258 259
struct kvm_pio_request {
	unsigned long count;
	int in;
	int port;
	int size;
};

260 261 262 263 264 265 266
/*
 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
 * 32-bit).  The kvm_mmu structure abstracts the details of the current mmu
 * mode.
 */
struct kvm_mmu {
	void (*new_cr3)(struct kvm_vcpu *vcpu);
267
	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
268
	unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
269
	u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
270 271
	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
			  bool prefault);
272 273
	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
				  struct x86_exception *fault);
274
	void (*free)(struct kvm_vcpu *vcpu);
275
	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
276
			    struct x86_exception *exception);
277
	gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
278
	int (*sync_page)(struct kvm_vcpu *vcpu,
279
			 struct kvm_mmu_page *sp);
M
Marcelo Tosatti 已提交
280
	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
281
	void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
282
			   u64 *spte, const void *pte);
283 284 285
	hpa_t root_hpa;
	int root_level;
	int shadow_root_level;
286
	union kvm_mmu_page_role base_role;
287
	bool direct_map;
288 289

	u64 *pae_root;
290
	u64 *lm_root;
291
	u64 rsvd_bits_mask[2][4];
292

293 294
	bool nx;

295
	u64 pdptrs[4]; /* pae */
296 297
};

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
enum pmc_type {
	KVM_PMC_GP = 0,
	KVM_PMC_FIXED,
};

struct kvm_pmc {
	enum pmc_type type;
	u8 idx;
	u64 counter;
	u64 eventsel;
	struct perf_event *perf_event;
	struct kvm_vcpu *vcpu;
};

struct kvm_pmu {
	unsigned nr_arch_gp_counters;
	unsigned nr_arch_fixed_counters;
	unsigned available_event_types;
	u64 fixed_ctr_ctrl;
	u64 global_ctrl;
	u64 global_status;
	u64 global_ovf_ctrl;
	u64 counter_bitmask[2];
	u64 global_ctrl_mask;
	u8 version;
	struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
	struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
	struct irq_work irq_work;
	u64 reprogram_pmi;
};

329
struct kvm_vcpu_arch {
330 331 332 333 334 335 336
	/*
	 * rip and regs accesses must go through
	 * kvm_{register,rip}_{read,write} functions.
	 */
	unsigned long regs[NR_VCPU_REGS];
	u32 regs_avail;
	u32 regs_dirty;
337 338

	unsigned long cr0;
339
	unsigned long cr0_guest_owned_bits;
340 341 342
	unsigned long cr2;
	unsigned long cr3;
	unsigned long cr4;
343
	unsigned long cr4_guest_owned_bits;
344
	unsigned long cr8;
345
	u32 hflags;
346
	u64 efer;
347 348
	u64 apic_base;
	struct kvm_lapic *apic;    /* kernel irqchip context */
349
	unsigned long apic_attention;
350
	int32_t apic_arb_prio;
351 352 353
	int mp_state;
	int sipi_vector;
	u64 ia32_misc_enable_msr;
354
	bool tpr_access_reporting;
355

356 357 358 359 360 361 362
	/*
	 * Paging state of the vcpu
	 *
	 * If the vcpu runs in guest mode with two level paging this still saves
	 * the paging mode of the l1 guest. This context is always used to
	 * handle faults.
	 */
363
	struct kvm_mmu mmu;
364

365 366 367 368 369 370 371 372 373 374
	/*
	 * Paging state of an L2 guest (used for nested npt)
	 *
	 * This context will save all necessary information to walk page tables
	 * of the an L2 guest. This context is only initialized for page table
	 * walking and not for faulting since we never handle l2 page faults on
	 * the host.
	 */
	struct kvm_mmu nested_mmu;

375 376 377 378 379 380
	/*
	 * Pointer to the mmu context currently used for
	 * gva_to_gpa translations.
	 */
	struct kvm_mmu *walk_mmu;

381
	struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
382 383 384
	struct kvm_mmu_memory_cache mmu_page_cache;
	struct kvm_mmu_memory_cache mmu_page_header_cache;

S
Sheng Yang 已提交
385
	struct fpu guest_fpu;
386
	u64 xcr0;
387 388 389 390

	struct kvm_pio_request pio;
	void *pio_data;

391 392
	u8 event_exit_inst_len;

393 394 395
	struct kvm_queued_exception {
		bool pending;
		bool has_error_code;
396
		bool reinject;
397 398 399 400
		u8 nr;
		u32 error_code;
	} exception;

A
Avi Kivity 已提交
401 402
	struct kvm_queued_interrupt {
		bool pending;
403
		bool soft;
A
Avi Kivity 已提交
404 405 406
		u8 nr;
	} interrupt;

407 408 409
	int halt_request; /* real mode on Intel only */

	int cpuid_nent;
410
	struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
411 412 413
	/* emulate context */

	struct x86_emulate_ctxt emulate_ctxt;
414 415
	bool emulate_regs_need_sync_to_vcpu;
	bool emulate_regs_need_sync_from_vcpu;
416 417

	gpa_t time;
418
	struct pvclock_vcpu_time_info hv_clock;
Z
Zachary Amsden 已提交
419
	unsigned int hw_tsc_khz;
420 421
	unsigned int time_offset;
	struct page *time_page;
G
Glauber Costa 已提交
422 423 424 425 426 427 428 429 430

	struct {
		u64 msr_val;
		u64 last_steal;
		u64 accum_steal;
		struct gfn_to_hva_cache stime;
		struct kvm_steal_time steal;
	} st;

431 432
	u64 last_guest_tsc;
	u64 last_kernel_ns;
433
	u64 last_host_tsc;
434
	u64 tsc_offset_adjustment;
435 436 437
	u64 this_tsc_nsec;
	u64 this_tsc_write;
	u8  this_tsc_generation;
Z
Zachary Amsden 已提交
438
	bool tsc_catchup;
439 440 441 442
	bool tsc_always_catchup;
	s8 virtual_tsc_shift;
	u32 virtual_tsc_mult;
	u32 virtual_tsc_khz;
443

A
Avi Kivity 已提交
444 445 446
	atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
	unsigned nmi_pending; /* NMI queued after currently running handler */
	bool nmi_injected;    /* Trying to inject an NMI this entry */
A
Avi Kivity 已提交
447

S
Sheng Yang 已提交
448 449
	struct mtrr_state_type mtrr_state;
	u32 pat;
450 451 452 453 454 455

	int switch_db_regs;
	unsigned long db[KVM_NR_DB_REGS];
	unsigned long dr6;
	unsigned long dr7;
	unsigned long eff_db[KVM_NR_DB_REGS];
H
Huang Ying 已提交
456 457 458 459 460

	u64 mcg_cap;
	u64 mcg_status;
	u64 mcg_ctl;
	u64 *mce_banks;
461

462 463 464 465 466
	/* Cache MMIO info */
	u64 mmio_gva;
	unsigned access;
	gfn_t mmio_gfn;

467 468
	struct kvm_pmu pmu;

469 470
	/* used for guest single stepping over the given code position */
	unsigned long singlestep_rip;
J
Jan Kiszka 已提交
471

G
Gleb Natapov 已提交
472 473
	/* fields used by HYPER-V emulation */
	u64 hv_vapic;
474 475

	cpumask_var_t wbinvd_dirty_mask;
476

477 478 479
	unsigned long last_retry_eip;
	unsigned long last_retry_addr;

480 481 482
	struct {
		bool halted;
		gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
483 484
		struct gfn_to_hva_cache data;
		u64 msr_val;
485
		u32 id;
486
		bool send_user_only;
487
	} apf;
488 489 490 491 492 493

	/* OSVW MSRs (AMD only) */
	struct {
		u64 length;
		u64 status;
	} osvw;
494 495 496 497 498

	struct {
		u64 msr_val;
		struct gfn_to_hva_cache data;
	} pv_eoi;
499 500
};

501 502 503 504 505 506 507 508 509
struct kvm_lpage_info {
	unsigned long rmap_pde;
	int write_count;
};

struct kvm_arch_memory_slot {
	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
};

510
struct kvm_arch {
511
	unsigned int n_used_mmu_pages;
512
	unsigned int n_requested_mmu_pages;
513
	unsigned int n_max_mmu_pages;
514
	unsigned int indirect_shadow_pages;
515 516 517 518 519
	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
	/*
	 * Hash table of struct kvm_mmu_page.
	 */
	struct list_head active_mmu_pages;
B
Ben-Ami Yassour 已提交
520
	struct list_head assigned_dev_head;
J
Joerg Roedel 已提交
521
	struct iommu_domain *iommu_domain;
522
	int iommu_flags;
523 524
	struct kvm_pic *vpic;
	struct kvm_ioapic *vioapic;
S
Sheng Yang 已提交
525
	struct kvm_pit *vpit;
526
	int vapics_in_nmi_mode;
527 528 529

	unsigned int tss_addr;
	struct page *apic_access_page;
530 531

	gpa_t wall_clock;
532 533 534

	struct page *ept_identity_pagetable;
	bool ept_identity_pagetable_done;
535
	gpa_t ept_identity_map_addr;
536 537

	unsigned long irq_sources_bitmap;
538
	s64 kvmclock_offset;
539
	raw_spinlock_t tsc_write_lock;
Z
Zachary Amsden 已提交
540 541
	u64 last_tsc_nsec;
	u64 last_tsc_write;
542
	u32 last_tsc_khz;
543 544 545 546
	u64 cur_tsc_nsec;
	u64 cur_tsc_write;
	u64 cur_tsc_offset;
	u8  cur_tsc_generation;
E
Ed Swierk 已提交
547 548

	struct kvm_xen_hvm_config xen_hvm_config;
549 550 551 552

	/* fields used by HYPER-V emulation */
	u64 hv_guest_os_id;
	u64 hv_hypercall;
553 554 555 556

	#ifdef CONFIG_KVM_MMU_AUDIT
	int audit_point;
	#endif
557 558
};

559 560 561 562 563 564 565
struct kvm_vm_stat {
	u32 mmu_shadow_zapped;
	u32 mmu_pte_write;
	u32 mmu_pte_updated;
	u32 mmu_pde_zapped;
	u32 mmu_flooded;
	u32 mmu_recycled;
A
Avi Kivity 已提交
566
	u32 mmu_cache_miss;
567
	u32 mmu_unsync;
568
	u32 remote_tlb_flush;
M
Marcelo Tosatti 已提交
569
	u32 lpages;
570 571
};

572 573 574 575 576 577 578 579 580 581 582
struct kvm_vcpu_stat {
	u32 pf_fixed;
	u32 pf_guest;
	u32 tlb_flush;
	u32 invlpg;

	u32 exits;
	u32 io_exits;
	u32 mmio_exits;
	u32 signal_exits;
	u32 irq_window_exits;
583
	u32 nmi_window_exits;
584 585 586 587 588 589 590 591 592
	u32 halt_exits;
	u32 halt_wakeup;
	u32 request_irq_exits;
	u32 irq_exits;
	u32 host_state_reload;
	u32 efer_reload;
	u32 fpu_reload;
	u32 insn_emulation;
	u32 insn_emulation_fail;
A
Amit Shah 已提交
593
	u32 hypercalls;
594
	u32 irq_injections;
595
	u32 nmi_injections;
596
};
597

598 599
struct x86_instruction_info;

600 601 602
struct kvm_x86_ops {
	int (*cpu_has_kvm_support)(void);          /* __init */
	int (*disabled_by_bios)(void);             /* __init */
603
	int (*hardware_enable)(void *dummy);
604 605 606 607
	void (*hardware_disable)(void *dummy);
	void (*check_processor_compatibility)(void *rtn);
	int (*hardware_setup)(void);               /* __init */
	void (*hardware_unsetup)(void);            /* __exit */
608
	bool (*cpu_has_accelerated_tpr)(void);
609
	void (*cpuid_update)(struct kvm_vcpu *vcpu);
610 611 612 613 614 615 616 617 618 619

	/* Create, but do not attach this VCPU */
	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
	void (*vcpu_free)(struct kvm_vcpu *vcpu);
	int (*vcpu_reset)(struct kvm_vcpu *vcpu);

	void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
	void (*vcpu_put)(struct kvm_vcpu *vcpu);

620 621
	void (*set_guest_debug)(struct kvm_vcpu *vcpu,
				struct kvm_guest_debug *dbg);
622 623 624 625 626
	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
	int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
	void (*get_segment)(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg);
627
	int (*get_cpl)(struct kvm_vcpu *vcpu);
628 629 630
	void (*set_segment)(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg);
	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
631
	void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
632
	void (*decache_cr3)(struct kvm_vcpu *vcpu);
633 634 635
	void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
636
	int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
637
	void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
638 639 640 641
	void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
642
	void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
643
	void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
644 645
	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
A
Avi Kivity 已提交
646
	void (*fpu_activate)(struct kvm_vcpu *vcpu);
647
	void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
648 649 650

	void (*tlb_flush)(struct kvm_vcpu *vcpu);

A
Avi Kivity 已提交
651 652
	void (*run)(struct kvm_vcpu *vcpu);
	int (*handle_exit)(struct kvm_vcpu *vcpu);
653
	void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
654 655
	void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
	u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
656 657
	void (*patch_hypercall)(struct kvm_vcpu *vcpu,
				unsigned char *hypercall_addr);
658
	void (*set_irq)(struct kvm_vcpu *vcpu);
659
	void (*set_nmi)(struct kvm_vcpu *vcpu);
660
	void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
661 662
				bool has_error_code, u32 error_code,
				bool reinject);
A
Avi Kivity 已提交
663
	void (*cancel_injection)(struct kvm_vcpu *vcpu);
664
	int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
665
	int (*nmi_allowed)(struct kvm_vcpu *vcpu);
J
Jan Kiszka 已提交
666 667
	bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
	void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
668 669 670
	void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
	void (*enable_irq_window)(struct kvm_vcpu *vcpu);
	void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
671
	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
672
	int (*get_tdp_level)(void);
673
	u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
674
	int (*get_lpage_level)(void);
675
	bool (*rdtscp_supported)(void);
676
	void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
677

678 679
	void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);

680 681
	void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);

682 683
	bool (*has_wbinvd_exit)(void);

684
	void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
685 686
	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);

687
	u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
N
Nadav Har'El 已提交
688
	u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu);
689

690
	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
691 692 693 694

	int (*check_intercept)(struct kvm_vcpu *vcpu,
			       struct x86_instruction_info *info,
			       enum x86_intercept_stage stage);
695 696
};

697
struct kvm_arch_async_pf {
698
	u32 token;
699
	gfn_t gfn;
X
Xiao Guangrong 已提交
700
	unsigned long cr3;
701
	bool direct_map;
702 703
};

704 705
extern struct kvm_x86_ops *kvm_x86_ops;

706 707 708 709 710 711 712 713 714 715 716
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
					   s64 adjustment)
{
	kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
}

static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{
	kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
}

717 718 719 720 721 722
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);

void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
int kvm_mmu_setup(struct kvm_vcpu *vcpu);
S
Sheng Yang 已提交
723
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
724
		u64 dirty_mask, u64 nx_mask, u64 x_mask);
725 726 727

int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
728 729 730
void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
				     struct kvm_memory_slot *slot,
				     gfn_t gfn_offset, unsigned long mask);
731
void kvm_mmu_zap_all(struct kvm *kvm);
732
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
733 734
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);

735
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
736

737
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
738
			  const void *val, int bytes);
739
u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
740 741

extern bool tdp_enabled;
742

743 744
u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);

745 746 747 748 749 750 751
/* control of guest tsc rate supported? */
extern bool kvm_has_tsc_control;
/* minimum supported tsc_khz for guests */
extern u32  kvm_min_guest_tsc_khz;
/* maximum supported tsc_khz for guests */
extern u32  kvm_max_guest_tsc_khz;

752 753 754 755 756 757
enum emulation_result {
	EMULATE_DONE,       /* no further processing */
	EMULATE_DO_MMIO,      /* kvm_run filled with mmio request */
	EMULATE_FAIL,         /* can't emulate this instruction */
};

758 759
#define EMULTYPE_NO_DECODE	    (1 << 0)
#define EMULTYPE_TRAP_UD	    (1 << 1)
760
#define EMULTYPE_SKIP		    (1 << 2)
761
#define EMULTYPE_RETRY		    (1 << 3)
762 763
int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
			    int emulation_type, void *insn, int insn_len);
764 765 766 767

static inline int emulate_instruction(struct kvm_vcpu *vcpu,
			int emulation_type)
{
768
	return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
769 770
}

771
void kvm_enable_efer_bits(u64);
772 773 774 775 776
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);

struct x86_emulate_ctxt;

777
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
778 779
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
780
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
781

782
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
783
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
784

785 786
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
		    int reason, bool has_error_code, u32 error_code);
787

788
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
789
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
790
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
A
Andre Przywara 已提交
791
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
792 793
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
794 795
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
796
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
797
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
798 799 800 801

int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);

802 803
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
A
Avi Kivity 已提交
804
bool kvm_rdpmc(struct kvm_vcpu *vcpu);
805

806 807
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
808 809
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
810
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
811 812 813
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			    gfn_t gfn, void *data, int offset, int len,
			    u32 access);
814
void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
815
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
816

817
int kvm_pic_set_irq(void *opaque, int irq, int level);
818

819 820
void kvm_inject_nmi(struct kvm_vcpu *vcpu);

821
int fx_init(struct kvm_vcpu *vcpu);
822

823
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
824
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
825
		       const u8 *new, int bytes);
826
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
827 828 829 830
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
831
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
832
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
833 834 835 836 837 838 839 840
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
			      struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
				struct x86_exception *exception);
841 842 843

int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);

844 845
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
		       void *insn, int insn_len);
M
Marcelo Tosatti 已提交
846
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
847

848
void kvm_enable_tdp(void);
849
void kvm_disable_tdp(void);
850

851
int complete_pio(struct kvm_vcpu *vcpu);
852
bool kvm_check_iopl(struct kvm_vcpu *vcpu);
853

854 855 856 857 858
static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
{
	return gpa;
}

859 860 861 862 863 864 865
static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
{
	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);

	return (struct kvm_mmu_page *)page_private(page);
}

866
static inline u16 kvm_read_ldt(void)
867 868 869 870 871 872
{
	u16 ldt;
	asm("sldt %0" : "=g"(ldt));
	return ldt;
}

873
static inline void kvm_load_ldt(u16 sel)
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892
{
	asm("lldt %0" : : "rm"(sel));
}

#ifdef CONFIG_X86_64
static inline unsigned long read_msr(unsigned long msr)
{
	u64 value;

	rdmsrl(msr, value);
	return value;
}
#endif

static inline u32 get_rdx_init_val(void)
{
	return 0x600; /* P6 family */
}

893 894 895 896 897
static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
{
	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
}

898 899 900 901
#define TSS_IOPB_BASE_OFFSET 0x66
#define TSS_BASE_SIZE 0x68
#define TSS_IOPB_SIZE (65536 / 8)
#define TSS_REDIRECTION_SIZE (256 / 8)
902 903
#define RMODE_TSS_SIZE							\
	(TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
904

905 906 907 908 909 910 911
enum {
	TASK_SWITCH_CALL = 0,
	TASK_SWITCH_IRET = 1,
	TASK_SWITCH_JMP = 2,
	TASK_SWITCH_GATE = 3,
};

912
#define HF_GIF_MASK		(1 << 0)
A
Alexander Graf 已提交
913 914
#define HF_HIF_MASK		(1 << 1)
#define HF_VINTR_MASK		(1 << 2)
915
#define HF_NMI_MASK		(1 << 3)
916
#define HF_IRET_MASK		(1 << 4)
917
#define HF_GUEST_MASK		(1 << 5) /* VCPU is in guest-mode */
918

919 920 921 922 923
/*
 * Hardware virtualization extension instructions may fault if a
 * reboot turns off virtualization while processes are running.
 * Trap the fault and ignore the instruction if that happens.
 */
924 925
asmlinkage void kvm_spurious_fault(void);
extern bool kvm_rebooting;
926

927
#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn)	\
928
	"666: " insn "\n\t" \
929
	"668: \n\t"                           \
930
	".pushsection .fixup, \"ax\" \n" \
931
	"667: \n\t" \
932
	cleanup_insn "\n\t"		      \
933 934
	"cmpb $0, kvm_rebooting \n\t"	      \
	"jne 668b \n\t"      		      \
935
	__ASM_SIZE(push) " $666b \n\t"	      \
936
	"call kvm_spurious_fault \n\t"	      \
937
	".popsection \n\t" \
938
	_ASM_EXTABLE(666b, 667b)
939

940 941 942
#define __kvm_handle_fault_on_reboot(insn)		\
	____kvm_handle_fault_on_reboot(insn, "")

943 944 945
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_age_hva(struct kvm *kvm, unsigned long hva);
A
Andrea Arcangeli 已提交
946
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
947
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
948
int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
949 950
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
951
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
952

A
Avi Kivity 已提交
953
void kvm_define_shared_msr(unsigned index, u32 msr);
954
void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
A
Avi Kivity 已提交
955

J
Jan Kiszka 已提交
956 957
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);

958 959 960 961
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work);
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work);
G
Gleb Natapov 已提交
962 963
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
			       struct kvm_async_pf *work);
964
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
965 966
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);

967 968
void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);

969 970 971 972 973 974 975 976 977 978 979 980 981
int kvm_is_in_guest(void);

void kvm_pmu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
void kvm_pmu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
void kvm_deliver_pmi(struct kvm_vcpu *vcpu);

H
H. Peter Anvin 已提交
982
#endif /* _ASM_X86_KVM_HOST_H */