kvm_host.h 27.3 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10
 * Kernel-based Virtual Machine driver for Linux
 *
 * This header defines architecture specific interfaces, x86 version
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

H
H. Peter Anvin 已提交
11 12
#ifndef _ASM_X86_KVM_HOST_H
#define _ASM_X86_KVM_HOST_H
13

14 15
#include <linux/types.h>
#include <linux/mm.h>
16
#include <linux/mmu_notifier.h>
17
#include <linux/tracepoint.h>
18
#include <linux/cpumask.h>
19
#include <linux/irq_work.h>
20 21 22

#include <linux/kvm.h>
#include <linux/kvm_para.h>
23
#include <linux/kvm_types.h>
24
#include <linux/perf_event.h>
25

26
#include <asm/pvclock-abi.h>
27
#include <asm/desc.h>
S
Sheng Yang 已提交
28
#include <asm/mtrr.h>
29
#include <asm/msr-index.h>
30
#include <asm/asm.h>
31

32
#define KVM_MAX_VCPUS 254
33
#define KVM_SOFT_MAX_VCPUS 160
34 35 36
#define KVM_MEMORY_SLOTS 32
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4
37 38
#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)

A
Avi Kivity 已提交
39
#define KVM_MMIO_SIZE 16
40 41

#define KVM_PIO_PAGE_OFFSET 1
42
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
43

44 45 46 47 48
#define CR0_RESERVED_BITS                                               \
	(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
			  | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
			  | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))

49 50
#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
51
#define CR3_PCID_ENABLED_RESERVED_BITS 0xFFFFFF0000000000ULL
52 53
#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS |	\
				  0xFFFFFF0000000000ULL)
54 55 56
#define CR4_RESERVED_BITS                                               \
	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
57
			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
58
			  | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_RDWRGSFS \
59 60 61 62 63
			  | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))

#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)


64 65

#define INVALID_PAGE (~(hpa_t)0)
66 67
#define VALID_PAGE(x) ((x) != INVALID_PAGE)

68 69
#define UNMAPPED_GVA (~(gpa_t)0)

70
/* KVM Hugepage definitions for x86 */
71
#define KVM_NR_PAGE_SIZES	3
72 73
#define KVM_HPAGE_GFN_SHIFT(x)	(((x) - 1) * 9)
#define KVM_HPAGE_SHIFT(x)	(PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
74 75 76
#define KVM_HPAGE_SIZE(x)	(1UL << KVM_HPAGE_SHIFT(x))
#define KVM_HPAGE_MASK(x)	(~(KVM_HPAGE_SIZE(x) - 1))
#define KVM_PAGES_PER_HPAGE(x)	(KVM_HPAGE_SIZE(x) / PAGE_SIZE)
M
Marcelo Tosatti 已提交
77

78
#define DE_VECTOR 0
J
Jan Kiszka 已提交
79
#define DB_VECTOR 1
80 81 82
#define BP_VECTOR 3
#define OF_VECTOR 4
#define BR_VECTOR 5
83 84 85 86 87 88 89 90
#define UD_VECTOR 6
#define NM_VECTOR 7
#define DF_VECTOR 8
#define TS_VECTOR 10
#define NP_VECTOR 11
#define SS_VECTOR 12
#define GP_VECTOR 13
#define PF_VECTOR 14
91
#define MF_VECTOR 16
92
#define MC_VECTOR 18
93 94 95 96 97 98

#define SELECTOR_TI_MASK (1 << 2)
#define SELECTOR_RPL_MASK 0x03

#define IOPL_SHIFT 12

99 100
#define KVM_PERMILLE_MMU_PAGES 20
#define KVM_MIN_ALLOC_MMU_PAGES 64
101 102
#define KVM_MMU_HASH_SHIFT 10
#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
103 104
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
105
#define KVM_MAX_CPUID_ENTRIES 80
S
Sheng Yang 已提交
106
#define KVM_NR_FIXED_MTRR_REGION 88
A
Avi Kivity 已提交
107
#define KVM_NR_VAR_MTRR 8
108

109 110
#define ASYNC_PF_PER_VCPU 64

111
extern raw_spinlock_t kvm_lock;
112 113
extern struct list_head vm_list;

114 115
struct kvm_vcpu;
struct kvm;
116
struct kvm_async_pf;
117

118
enum kvm_reg {
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
	VCPU_REGS_RAX = 0,
	VCPU_REGS_RCX = 1,
	VCPU_REGS_RDX = 2,
	VCPU_REGS_RBX = 3,
	VCPU_REGS_RSP = 4,
	VCPU_REGS_RBP = 5,
	VCPU_REGS_RSI = 6,
	VCPU_REGS_RDI = 7,
#ifdef CONFIG_X86_64
	VCPU_REGS_R8 = 8,
	VCPU_REGS_R9 = 9,
	VCPU_REGS_R10 = 10,
	VCPU_REGS_R11 = 11,
	VCPU_REGS_R12 = 12,
	VCPU_REGS_R13 = 13,
	VCPU_REGS_R14 = 14,
	VCPU_REGS_R15 = 15,
#endif
137
	VCPU_REGS_RIP,
138 139 140
	NR_VCPU_REGS
};

A
Avi Kivity 已提交
141 142
enum kvm_reg_ex {
	VCPU_EXREG_PDPTR = NR_VCPU_REGS,
143
	VCPU_EXREG_CR3,
A
Avi Kivity 已提交
144
	VCPU_EXREG_RFLAGS,
A
Avi Kivity 已提交
145
	VCPU_EXREG_CPL,
A
Avi Kivity 已提交
146
	VCPU_EXREG_SEGMENTS,
A
Avi Kivity 已提交
147 148
};

149
enum {
150
	VCPU_SREG_ES,
151
	VCPU_SREG_CS,
152
	VCPU_SREG_SS,
153 154 155 156 157 158 159
	VCPU_SREG_DS,
	VCPU_SREG_FS,
	VCPU_SREG_GS,
	VCPU_SREG_TR,
	VCPU_SREG_LDTR,
};

160
#include <asm/kvm_emulate.h>
161

162 163
#define KVM_NR_MEM_OBJS 40

164 165 166 167 168 169 170 171 172 173 174 175 176
#define KVM_NR_DB_REGS	4

#define DR6_BD		(1 << 13)
#define DR6_BS		(1 << 14)
#define DR6_FIXED_1	0xffff0ff0
#define DR6_VOLATILE	0x0000e00f

#define DR7_BP_EN_MASK	0x000000ff
#define DR7_GE		(1 << 9)
#define DR7_GD		(1 << 13)
#define DR7_FIXED_1	0x00000400
#define DR7_VOLATILE	0xffff23ff

177 178
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC	0
179 180 181 182 183 184 185
/*
 * The following bit is set with PV-EOI, unset on EOI.
 * We detect PV-EOI changes by guest by comparing
 * this bit with PV-EOI in guest memory.
 * See the implementation in apic_update_pv_eoi.
 */
#define KVM_APIC_PV_EOI_PENDING	1
186

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
/*
 * We don't want allocation failures within the mmu code, so we preallocate
 * enough memory for a single page fault in a cache.
 */
struct kvm_mmu_memory_cache {
	int nobjs;
	void *objects[KVM_NR_MEM_OBJS];
};

/*
 * kvm_mmu_page_role, below, is defined as:
 *
 *   bits 0:3 - total guest paging levels (2-4, or zero for real mode)
 *   bits 4:7 - page table level for this shadow (1-4)
 *   bits 8:9 - page table quadrant for 2-level guests
202 203
 *   bit   16 - direct mapping of virtual to physical mapping at gfn
 *              used for real mode and two-dimensional paging
204 205 206 207 208
 *   bits 17:19 - common access permissions for all ptes in this shadow page
 */
union kvm_mmu_page_role {
	unsigned word;
	struct {
209
		unsigned level:4;
210
		unsigned cr4_pae:1;
211 212
		unsigned quadrant:2;
		unsigned pad_for_nice_hex_output:6;
213
		unsigned direct:1;
214
		unsigned access:3;
215
		unsigned invalid:1;
216
		unsigned nxe:1;
217
		unsigned cr0_wp:1;
218
		unsigned smep_andnot_wp:1;
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
	};
};

struct kvm_mmu_page {
	struct list_head link;
	struct hlist_node hash_link;

	/*
	 * The following two entries are used to key the shadow page in the
	 * hash table.
	 */
	gfn_t gfn;
	union kvm_mmu_page_role role;

	u64 *spt;
	/* hold the gfn of each spte inside spt */
	gfn_t *gfns;
236 237 238 239
	/*
	 * One bit set per slot which has memory
	 * in this shadow page.
	 */
240
	DECLARE_BITMAP(slot_bitmap, KVM_MEM_SLOTS_NUM);
241
	bool unsync;
242
	int root_count;          /* Currently serving as active root */
243
	unsigned int unsync_children;
244
	unsigned long parent_ptes;	/* Reverse mapping for parent_pte */
245
	DECLARE_BITMAP(unsync_child_bitmap, 512);
246 247 248 249 250

#ifdef CONFIG_X86_32
	int clear_spte_count;
#endif

251
	int write_flooding_count;
252 253
};

254 255 256 257 258 259 260
struct kvm_pio_request {
	unsigned long count;
	int in;
	int port;
	int size;
};

261 262 263 264 265 266 267
/*
 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
 * 32-bit).  The kvm_mmu structure abstracts the details of the current mmu
 * mode.
 */
struct kvm_mmu {
	void (*new_cr3)(struct kvm_vcpu *vcpu);
268
	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
269
	unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
270
	u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
271 272
	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
			  bool prefault);
273 274
	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
				  struct x86_exception *fault);
275
	void (*free)(struct kvm_vcpu *vcpu);
276
	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
277
			    struct x86_exception *exception);
278
	gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
279
	int (*sync_page)(struct kvm_vcpu *vcpu,
280
			 struct kvm_mmu_page *sp);
M
Marcelo Tosatti 已提交
281
	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
282
	void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
283
			   u64 *spte, const void *pte);
284 285 286
	hpa_t root_hpa;
	int root_level;
	int shadow_root_level;
287
	union kvm_mmu_page_role base_role;
288
	bool direct_map;
289 290

	u64 *pae_root;
291
	u64 *lm_root;
292
	u64 rsvd_bits_mask[2][4];
293

294 295
	bool nx;

296
	u64 pdptrs[4]; /* pae */
297 298
};

299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
enum pmc_type {
	KVM_PMC_GP = 0,
	KVM_PMC_FIXED,
};

struct kvm_pmc {
	enum pmc_type type;
	u8 idx;
	u64 counter;
	u64 eventsel;
	struct perf_event *perf_event;
	struct kvm_vcpu *vcpu;
};

struct kvm_pmu {
	unsigned nr_arch_gp_counters;
	unsigned nr_arch_fixed_counters;
	unsigned available_event_types;
	u64 fixed_ctr_ctrl;
	u64 global_ctrl;
	u64 global_status;
	u64 global_ovf_ctrl;
	u64 counter_bitmask[2];
	u64 global_ctrl_mask;
	u8 version;
	struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
	struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
	struct irq_work irq_work;
	u64 reprogram_pmi;
};

330
struct kvm_vcpu_arch {
331 332 333 334 335 336 337
	/*
	 * rip and regs accesses must go through
	 * kvm_{register,rip}_{read,write} functions.
	 */
	unsigned long regs[NR_VCPU_REGS];
	u32 regs_avail;
	u32 regs_dirty;
338 339

	unsigned long cr0;
340
	unsigned long cr0_guest_owned_bits;
341 342 343
	unsigned long cr2;
	unsigned long cr3;
	unsigned long cr4;
344
	unsigned long cr4_guest_owned_bits;
345
	unsigned long cr8;
346
	u32 hflags;
347
	u64 efer;
348 349
	u64 apic_base;
	struct kvm_lapic *apic;    /* kernel irqchip context */
350
	unsigned long apic_attention;
351
	int32_t apic_arb_prio;
352 353 354
	int mp_state;
	int sipi_vector;
	u64 ia32_misc_enable_msr;
355
	bool tpr_access_reporting;
356

357 358 359 360 361 362 363
	/*
	 * Paging state of the vcpu
	 *
	 * If the vcpu runs in guest mode with two level paging this still saves
	 * the paging mode of the l1 guest. This context is always used to
	 * handle faults.
	 */
364
	struct kvm_mmu mmu;
365

366 367 368 369 370 371 372 373 374 375
	/*
	 * Paging state of an L2 guest (used for nested npt)
	 *
	 * This context will save all necessary information to walk page tables
	 * of the an L2 guest. This context is only initialized for page table
	 * walking and not for faulting since we never handle l2 page faults on
	 * the host.
	 */
	struct kvm_mmu nested_mmu;

376 377 378 379 380 381
	/*
	 * Pointer to the mmu context currently used for
	 * gva_to_gpa translations.
	 */
	struct kvm_mmu *walk_mmu;

382
	struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
383 384 385
	struct kvm_mmu_memory_cache mmu_page_cache;
	struct kvm_mmu_memory_cache mmu_page_header_cache;

S
Sheng Yang 已提交
386
	struct fpu guest_fpu;
387
	u64 xcr0;
388 389 390 391

	struct kvm_pio_request pio;
	void *pio_data;

392 393
	u8 event_exit_inst_len;

394 395 396
	struct kvm_queued_exception {
		bool pending;
		bool has_error_code;
397
		bool reinject;
398 399 400 401
		u8 nr;
		u32 error_code;
	} exception;

A
Avi Kivity 已提交
402 403
	struct kvm_queued_interrupt {
		bool pending;
404
		bool soft;
A
Avi Kivity 已提交
405 406 407
		u8 nr;
	} interrupt;

408 409 410
	int halt_request; /* real mode on Intel only */

	int cpuid_nent;
411
	struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
412 413 414
	/* emulate context */

	struct x86_emulate_ctxt emulate_ctxt;
415 416
	bool emulate_regs_need_sync_to_vcpu;
	bool emulate_regs_need_sync_from_vcpu;
417 418

	gpa_t time;
419
	struct pvclock_vcpu_time_info hv_clock;
Z
Zachary Amsden 已提交
420
	unsigned int hw_tsc_khz;
421 422
	unsigned int time_offset;
	struct page *time_page;
G
Glauber Costa 已提交
423 424 425 426 427 428 429 430 431

	struct {
		u64 msr_val;
		u64 last_steal;
		u64 accum_steal;
		struct gfn_to_hva_cache stime;
		struct kvm_steal_time steal;
	} st;

432 433
	u64 last_guest_tsc;
	u64 last_kernel_ns;
434
	u64 last_host_tsc;
435
	u64 tsc_offset_adjustment;
436 437 438
	u64 this_tsc_nsec;
	u64 this_tsc_write;
	u8  this_tsc_generation;
Z
Zachary Amsden 已提交
439
	bool tsc_catchup;
440 441 442 443
	bool tsc_always_catchup;
	s8 virtual_tsc_shift;
	u32 virtual_tsc_mult;
	u32 virtual_tsc_khz;
444

A
Avi Kivity 已提交
445 446 447
	atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
	unsigned nmi_pending; /* NMI queued after currently running handler */
	bool nmi_injected;    /* Trying to inject an NMI this entry */
A
Avi Kivity 已提交
448

S
Sheng Yang 已提交
449 450
	struct mtrr_state_type mtrr_state;
	u32 pat;
451 452 453 454 455 456

	int switch_db_regs;
	unsigned long db[KVM_NR_DB_REGS];
	unsigned long dr6;
	unsigned long dr7;
	unsigned long eff_db[KVM_NR_DB_REGS];
H
Huang Ying 已提交
457 458 459 460 461

	u64 mcg_cap;
	u64 mcg_status;
	u64 mcg_ctl;
	u64 *mce_banks;
462

463 464 465 466 467
	/* Cache MMIO info */
	u64 mmio_gva;
	unsigned access;
	gfn_t mmio_gfn;

468 469
	struct kvm_pmu pmu;

470 471
	/* used for guest single stepping over the given code position */
	unsigned long singlestep_rip;
J
Jan Kiszka 已提交
472

G
Gleb Natapov 已提交
473 474
	/* fields used by HYPER-V emulation */
	u64 hv_vapic;
475 476

	cpumask_var_t wbinvd_dirty_mask;
477

478 479 480
	unsigned long last_retry_eip;
	unsigned long last_retry_addr;

481 482 483
	struct {
		bool halted;
		gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
484 485
		struct gfn_to_hva_cache data;
		u64 msr_val;
486
		u32 id;
487
		bool send_user_only;
488
	} apf;
489 490 491 492 493 494

	/* OSVW MSRs (AMD only) */
	struct {
		u64 length;
		u64 status;
	} osvw;
495 496 497 498 499

	struct {
		u64 msr_val;
		struct gfn_to_hva_cache data;
	} pv_eoi;
500 501
};

502 503 504 505 506 507 508 509 510
struct kvm_lpage_info {
	unsigned long rmap_pde;
	int write_count;
};

struct kvm_arch_memory_slot {
	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
};

511
struct kvm_arch {
512
	unsigned int n_used_mmu_pages;
513
	unsigned int n_requested_mmu_pages;
514
	unsigned int n_max_mmu_pages;
515
	unsigned int indirect_shadow_pages;
516 517 518 519 520
	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
	/*
	 * Hash table of struct kvm_mmu_page.
	 */
	struct list_head active_mmu_pages;
B
Ben-Ami Yassour 已提交
521
	struct list_head assigned_dev_head;
J
Joerg Roedel 已提交
522
	struct iommu_domain *iommu_domain;
523
	int iommu_flags;
524 525
	struct kvm_pic *vpic;
	struct kvm_ioapic *vioapic;
S
Sheng Yang 已提交
526
	struct kvm_pit *vpit;
527
	int vapics_in_nmi_mode;
528 529 530

	unsigned int tss_addr;
	struct page *apic_access_page;
531 532

	gpa_t wall_clock;
533 534 535

	struct page *ept_identity_pagetable;
	bool ept_identity_pagetable_done;
536
	gpa_t ept_identity_map_addr;
537 538

	unsigned long irq_sources_bitmap;
539
	s64 kvmclock_offset;
540
	raw_spinlock_t tsc_write_lock;
Z
Zachary Amsden 已提交
541 542
	u64 last_tsc_nsec;
	u64 last_tsc_write;
543
	u32 last_tsc_khz;
544 545 546 547
	u64 cur_tsc_nsec;
	u64 cur_tsc_write;
	u64 cur_tsc_offset;
	u8  cur_tsc_generation;
E
Ed Swierk 已提交
548 549

	struct kvm_xen_hvm_config xen_hvm_config;
550 551 552 553

	/* fields used by HYPER-V emulation */
	u64 hv_guest_os_id;
	u64 hv_hypercall;
554 555 556 557

	#ifdef CONFIG_KVM_MMU_AUDIT
	int audit_point;
	#endif
558 559
};

560 561 562 563 564 565 566
struct kvm_vm_stat {
	u32 mmu_shadow_zapped;
	u32 mmu_pte_write;
	u32 mmu_pte_updated;
	u32 mmu_pde_zapped;
	u32 mmu_flooded;
	u32 mmu_recycled;
A
Avi Kivity 已提交
567
	u32 mmu_cache_miss;
568
	u32 mmu_unsync;
569
	u32 remote_tlb_flush;
M
Marcelo Tosatti 已提交
570
	u32 lpages;
571 572
};

573 574 575 576 577 578 579 580 581 582 583
struct kvm_vcpu_stat {
	u32 pf_fixed;
	u32 pf_guest;
	u32 tlb_flush;
	u32 invlpg;

	u32 exits;
	u32 io_exits;
	u32 mmio_exits;
	u32 signal_exits;
	u32 irq_window_exits;
584
	u32 nmi_window_exits;
585 586 587 588 589 590 591 592 593
	u32 halt_exits;
	u32 halt_wakeup;
	u32 request_irq_exits;
	u32 irq_exits;
	u32 host_state_reload;
	u32 efer_reload;
	u32 fpu_reload;
	u32 insn_emulation;
	u32 insn_emulation_fail;
A
Amit Shah 已提交
594
	u32 hypercalls;
595
	u32 irq_injections;
596
	u32 nmi_injections;
597
};
598

599 600
struct x86_instruction_info;

601 602 603
struct kvm_x86_ops {
	int (*cpu_has_kvm_support)(void);          /* __init */
	int (*disabled_by_bios)(void);             /* __init */
604
	int (*hardware_enable)(void *dummy);
605 606 607 608
	void (*hardware_disable)(void *dummy);
	void (*check_processor_compatibility)(void *rtn);
	int (*hardware_setup)(void);               /* __init */
	void (*hardware_unsetup)(void);            /* __exit */
609
	bool (*cpu_has_accelerated_tpr)(void);
610
	void (*cpuid_update)(struct kvm_vcpu *vcpu);
611 612 613 614 615 616 617 618 619 620

	/* Create, but do not attach this VCPU */
	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
	void (*vcpu_free)(struct kvm_vcpu *vcpu);
	int (*vcpu_reset)(struct kvm_vcpu *vcpu);

	void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
	void (*vcpu_put)(struct kvm_vcpu *vcpu);

621 622
	void (*set_guest_debug)(struct kvm_vcpu *vcpu,
				struct kvm_guest_debug *dbg);
623 624 625 626 627
	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
	int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
	void (*get_segment)(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg);
628
	int (*get_cpl)(struct kvm_vcpu *vcpu);
629 630 631
	void (*set_segment)(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg);
	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
632
	void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
633
	void (*decache_cr3)(struct kvm_vcpu *vcpu);
634 635 636
	void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
637
	int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
638
	void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
639 640 641 642
	void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
643
	void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
644
	void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
645 646
	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
A
Avi Kivity 已提交
647
	void (*fpu_activate)(struct kvm_vcpu *vcpu);
648
	void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
649 650 651

	void (*tlb_flush)(struct kvm_vcpu *vcpu);

A
Avi Kivity 已提交
652 653
	void (*run)(struct kvm_vcpu *vcpu);
	int (*handle_exit)(struct kvm_vcpu *vcpu);
654
	void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
655 656
	void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
	u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
657 658
	void (*patch_hypercall)(struct kvm_vcpu *vcpu,
				unsigned char *hypercall_addr);
659
	void (*set_irq)(struct kvm_vcpu *vcpu);
660
	void (*set_nmi)(struct kvm_vcpu *vcpu);
661
	void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
662 663
				bool has_error_code, u32 error_code,
				bool reinject);
A
Avi Kivity 已提交
664
	void (*cancel_injection)(struct kvm_vcpu *vcpu);
665
	int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
666
	int (*nmi_allowed)(struct kvm_vcpu *vcpu);
J
Jan Kiszka 已提交
667 668
	bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
	void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
669 670 671
	void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
	void (*enable_irq_window)(struct kvm_vcpu *vcpu);
	void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
672
	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
673
	int (*get_tdp_level)(void);
674
	u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
675
	int (*get_lpage_level)(void);
676
	bool (*rdtscp_supported)(void);
677
	bool (*invpcid_supported)(void);
678
	void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
679

680 681
	void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);

682 683
	void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);

684 685
	bool (*has_wbinvd_exit)(void);

686
	void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
687 688
	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);

689
	u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
N
Nadav Har'El 已提交
690
	u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu);
691

692
	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
693 694 695 696

	int (*check_intercept)(struct kvm_vcpu *vcpu,
			       struct x86_instruction_info *info,
			       enum x86_intercept_stage stage);
697 698
};

699
struct kvm_arch_async_pf {
700
	u32 token;
701
	gfn_t gfn;
X
Xiao Guangrong 已提交
702
	unsigned long cr3;
703
	bool direct_map;
704 705
};

706 707
extern struct kvm_x86_ops *kvm_x86_ops;

708 709 710 711 712 713 714 715 716 717 718
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
					   s64 adjustment)
{
	kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
}

static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{
	kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
}

719 720 721 722 723 724
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);

void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
int kvm_mmu_setup(struct kvm_vcpu *vcpu);
S
Sheng Yang 已提交
725
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
726
		u64 dirty_mask, u64 nx_mask, u64 x_mask);
727 728 729

int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
730 731 732
void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
				     struct kvm_memory_slot *slot,
				     gfn_t gfn_offset, unsigned long mask);
733
void kvm_mmu_zap_all(struct kvm *kvm);
734
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
735 736
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);

737
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
738

739
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
740
			  const void *val, int bytes);
741
u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
742 743

extern bool tdp_enabled;
744

745 746
u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);

747 748 749 750 751 752 753
/* control of guest tsc rate supported? */
extern bool kvm_has_tsc_control;
/* minimum supported tsc_khz for guests */
extern u32  kvm_min_guest_tsc_khz;
/* maximum supported tsc_khz for guests */
extern u32  kvm_max_guest_tsc_khz;

754 755 756 757 758 759
enum emulation_result {
	EMULATE_DONE,       /* no further processing */
	EMULATE_DO_MMIO,      /* kvm_run filled with mmio request */
	EMULATE_FAIL,         /* can't emulate this instruction */
};

760 761
#define EMULTYPE_NO_DECODE	    (1 << 0)
#define EMULTYPE_TRAP_UD	    (1 << 1)
762
#define EMULTYPE_SKIP		    (1 << 2)
763
#define EMULTYPE_RETRY		    (1 << 3)
764 765
int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
			    int emulation_type, void *insn, int insn_len);
766 767 768 769

static inline int emulate_instruction(struct kvm_vcpu *vcpu,
			int emulation_type)
{
770
	return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
771 772
}

773
void kvm_enable_efer_bits(u64);
774 775 776 777 778
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);

struct x86_emulate_ctxt;

779
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
780 781
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
782
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
783

784
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
785
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
786

787 788
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
		    int reason, bool has_error_code, u32 error_code);
789

790
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
791
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
792
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
A
Andre Przywara 已提交
793
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
794 795
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
796 797
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
798
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
799
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
800 801 802 803

int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);

804 805
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
A
Avi Kivity 已提交
806
bool kvm_rdpmc(struct kvm_vcpu *vcpu);
807

808 809
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
810 811
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
812
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
813 814 815
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			    gfn_t gfn, void *data, int offset, int len,
			    u32 access);
816
void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
817
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
818

819
int kvm_pic_set_irq(void *opaque, int irq, int level);
820

821 822
void kvm_inject_nmi(struct kvm_vcpu *vcpu);

823
int fx_init(struct kvm_vcpu *vcpu);
824

825
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
826
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
827
		       const u8 *new, int bytes);
828
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
829 830 831 832
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
833
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
834
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
835 836 837 838 839 840 841 842
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
			      struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
				struct x86_exception *exception);
843 844 845

int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);

846 847
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
		       void *insn, int insn_len);
M
Marcelo Tosatti 已提交
848
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
849

850
void kvm_enable_tdp(void);
851
void kvm_disable_tdp(void);
852

853
int complete_pio(struct kvm_vcpu *vcpu);
854
bool kvm_check_iopl(struct kvm_vcpu *vcpu);
855

856 857 858 859 860
static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
{
	return gpa;
}

861 862 863 864 865 866 867
static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
{
	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);

	return (struct kvm_mmu_page *)page_private(page);
}

868
static inline u16 kvm_read_ldt(void)
869 870 871 872 873 874
{
	u16 ldt;
	asm("sldt %0" : "=g"(ldt));
	return ldt;
}

875
static inline void kvm_load_ldt(u16 sel)
876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894
{
	asm("lldt %0" : : "rm"(sel));
}

#ifdef CONFIG_X86_64
static inline unsigned long read_msr(unsigned long msr)
{
	u64 value;

	rdmsrl(msr, value);
	return value;
}
#endif

static inline u32 get_rdx_init_val(void)
{
	return 0x600; /* P6 family */
}

895 896 897 898 899
static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
{
	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
}

900 901 902 903
#define TSS_IOPB_BASE_OFFSET 0x66
#define TSS_BASE_SIZE 0x68
#define TSS_IOPB_SIZE (65536 / 8)
#define TSS_REDIRECTION_SIZE (256 / 8)
904 905
#define RMODE_TSS_SIZE							\
	(TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
906

907 908 909 910 911 912 913
enum {
	TASK_SWITCH_CALL = 0,
	TASK_SWITCH_IRET = 1,
	TASK_SWITCH_JMP = 2,
	TASK_SWITCH_GATE = 3,
};

914
#define HF_GIF_MASK		(1 << 0)
A
Alexander Graf 已提交
915 916
#define HF_HIF_MASK		(1 << 1)
#define HF_VINTR_MASK		(1 << 2)
917
#define HF_NMI_MASK		(1 << 3)
918
#define HF_IRET_MASK		(1 << 4)
919
#define HF_GUEST_MASK		(1 << 5) /* VCPU is in guest-mode */
920

921 922 923 924 925
/*
 * Hardware virtualization extension instructions may fault if a
 * reboot turns off virtualization while processes are running.
 * Trap the fault and ignore the instruction if that happens.
 */
926 927
asmlinkage void kvm_spurious_fault(void);
extern bool kvm_rebooting;
928

929
#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn)	\
930
	"666: " insn "\n\t" \
931
	"668: \n\t"                           \
932
	".pushsection .fixup, \"ax\" \n" \
933
	"667: \n\t" \
934
	cleanup_insn "\n\t"		      \
935 936
	"cmpb $0, kvm_rebooting \n\t"	      \
	"jne 668b \n\t"      		      \
937
	__ASM_SIZE(push) " $666b \n\t"	      \
938
	"call kvm_spurious_fault \n\t"	      \
939
	".popsection \n\t" \
940
	_ASM_EXTABLE(666b, 667b)
941

942 943 944
#define __kvm_handle_fault_on_reboot(insn)		\
	____kvm_handle_fault_on_reboot(insn, "")

945 946 947
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_age_hva(struct kvm *kvm, unsigned long hva);
A
Andrea Arcangeli 已提交
948
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
949
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
950
int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
951 952
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
953
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
954

A
Avi Kivity 已提交
955
void kvm_define_shared_msr(unsigned index, u32 msr);
956
void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
A
Avi Kivity 已提交
957

J
Jan Kiszka 已提交
958 959
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);

960 961 962 963
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work);
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work);
G
Gleb Natapov 已提交
964 965
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
			       struct kvm_async_pf *work);
966
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
967 968
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);

969 970
void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);

971 972 973 974 975 976 977 978 979 980 981 982 983
int kvm_is_in_guest(void);

void kvm_pmu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
void kvm_pmu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
void kvm_deliver_pmi(struct kvm_vcpu *vcpu);

H
H. Peter Anvin 已提交
984
#endif /* _ASM_X86_KVM_HOST_H */