kvm_host.h 47.6 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3 4 5 6 7
 * Kernel-based Virtual Machine driver for Linux
 *
 * This header defines architecture specific interfaces, x86 version
 */

H
H. Peter Anvin 已提交
8 9
#ifndef _ASM_X86_KVM_HOST_H
#define _ASM_X86_KVM_HOST_H
10

11 12
#include <linux/types.h>
#include <linux/mm.h>
13
#include <linux/mmu_notifier.h>
14
#include <linux/tracepoint.h>
15
#include <linux/cpumask.h>
16
#include <linux/irq_work.h>
17
#include <linux/irq.h>
18 19 20

#include <linux/kvm.h>
#include <linux/kvm_para.h>
21
#include <linux/kvm_types.h>
22
#include <linux/perf_event.h>
23 24
#include <linux/pvclock_gtod.h>
#include <linux/clocksource.h>
F
Feng Wu 已提交
25
#include <linux/irqbypass.h>
26
#include <linux/hyperv.h>
27

28
#include <asm/apic.h>
29
#include <asm/pvclock-abi.h>
30
#include <asm/desc.h>
S
Sheng Yang 已提交
31
#include <asm/mtrr.h>
32
#include <asm/msr-index.h>
33
#include <asm/asm.h>
34
#include <asm/kvm_page_track.h>
35
#include <asm/kvm_vcpu_regs.h>
36
#include <asm/hyperv-tlfs.h>
37

38 39
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS

40
#define KVM_MAX_VCPUS 288
41
#define KVM_SOFT_MAX_VCPUS 240
42
#define KVM_MAX_VCPU_ID 1023
43
#define KVM_USER_MEM_SLOTS 509
44 45
/* memory slots that are not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 3
46
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
47

48
#define KVM_HALT_POLL_NS_DEFAULT 200000
49

50 51
#define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS

52
/* x86-specific vcpu->requests bit members */
53 54 55 56 57
#define KVM_REQ_MIGRATE_TIMER		KVM_ARCH_REQ(0)
#define KVM_REQ_REPORT_TPR_ACCESS	KVM_ARCH_REQ(1)
#define KVM_REQ_TRIPLE_FAULT		KVM_ARCH_REQ(2)
#define KVM_REQ_MMU_SYNC		KVM_ARCH_REQ(3)
#define KVM_REQ_CLOCK_UPDATE		KVM_ARCH_REQ(4)
58
#define KVM_REQ_LOAD_CR3		KVM_ARCH_REQ(5)
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
#define KVM_REQ_EVENT			KVM_ARCH_REQ(6)
#define KVM_REQ_APF_HALT		KVM_ARCH_REQ(7)
#define KVM_REQ_STEAL_UPDATE		KVM_ARCH_REQ(8)
#define KVM_REQ_NMI			KVM_ARCH_REQ(9)
#define KVM_REQ_PMU			KVM_ARCH_REQ(10)
#define KVM_REQ_PMI			KVM_ARCH_REQ(11)
#define KVM_REQ_SMI			KVM_ARCH_REQ(12)
#define KVM_REQ_MASTERCLOCK_UPDATE	KVM_ARCH_REQ(13)
#define KVM_REQ_MCLOCK_INPROGRESS \
	KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_SCAN_IOAPIC \
	KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_GLOBAL_CLOCK_UPDATE	KVM_ARCH_REQ(16)
#define KVM_REQ_APIC_PAGE_RELOAD \
	KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_HV_CRASH		KVM_ARCH_REQ(18)
#define KVM_REQ_IOAPIC_EOI_EXIT		KVM_ARCH_REQ(19)
#define KVM_REQ_HV_RESET		KVM_ARCH_REQ(20)
#define KVM_REQ_HV_EXIT			KVM_ARCH_REQ(21)
#define KVM_REQ_HV_STIMER		KVM_ARCH_REQ(22)
79
#define KVM_REQ_LOAD_EOI_EXITMAP	KVM_ARCH_REQ(23)
80
#define KVM_REQ_GET_VMCS12_PAGES	KVM_ARCH_REQ(24)
81

82 83 84 85 86 87 88 89
#define CR0_RESERVED_BITS                                               \
	(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
			  | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
			  | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))

#define CR4_RESERVED_BITS                                               \
	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
90
			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
91
			  | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
92
			  | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
P
Paolo Bonzini 已提交
93
			  | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP))
94 95 96 97

#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)


98 99

#define INVALID_PAGE (~(hpa_t)0)
100 101
#define VALID_PAGE(x) ((x) != INVALID_PAGE)

102 103
#define UNMAPPED_GVA (~(gpa_t)0)

104
/* KVM Hugepage definitions for x86 */
105 106 107 108 109 110 111 112 113
enum {
	PT_PAGE_TABLE_LEVEL   = 1,
	PT_DIRECTORY_LEVEL    = 2,
	PT_PDPE_LEVEL         = 3,
	/* set max level to the biggest one */
	PT_MAX_HUGEPAGE_LEVEL = PT_PDPE_LEVEL,
};
#define KVM_NR_PAGE_SIZES	(PT_MAX_HUGEPAGE_LEVEL - \
				 PT_PAGE_TABLE_LEVEL + 1)
114 115
#define KVM_HPAGE_GFN_SHIFT(x)	(((x) - 1) * 9)
#define KVM_HPAGE_SHIFT(x)	(PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
116 117 118
#define KVM_HPAGE_SIZE(x)	(1UL << KVM_HPAGE_SHIFT(x))
#define KVM_HPAGE_MASK(x)	(~(KVM_HPAGE_SIZE(x) - 1))
#define KVM_PAGES_PER_HPAGE(x)	(KVM_HPAGE_SIZE(x) / PAGE_SIZE)
M
Marcelo Tosatti 已提交
119

120 121 122 123 124 125 126
static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
{
	/* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
}

127
#define KVM_PERMILLE_MMU_PAGES 20
128
#define KVM_MIN_ALLOC_MMU_PAGES 64UL
129
#define KVM_MMU_HASH_SHIFT 12
130
#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
131 132
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
133
#define KVM_MAX_CPUID_ENTRIES 80
S
Sheng Yang 已提交
134
#define KVM_NR_FIXED_MTRR_REGION 88
135
#define KVM_NR_VAR_MTRR 8
136

137 138
#define ASYNC_PF_PER_VCPU 64

139
enum kvm_reg {
140 141 142 143 144 145 146 147
	VCPU_REGS_RAX = __VCPU_REGS_RAX,
	VCPU_REGS_RCX = __VCPU_REGS_RCX,
	VCPU_REGS_RDX = __VCPU_REGS_RDX,
	VCPU_REGS_RBX = __VCPU_REGS_RBX,
	VCPU_REGS_RSP = __VCPU_REGS_RSP,
	VCPU_REGS_RBP = __VCPU_REGS_RBP,
	VCPU_REGS_RSI = __VCPU_REGS_RSI,
	VCPU_REGS_RDI = __VCPU_REGS_RDI,
148
#ifdef CONFIG_X86_64
149 150 151 152 153 154 155 156
	VCPU_REGS_R8  = __VCPU_REGS_R8,
	VCPU_REGS_R9  = __VCPU_REGS_R9,
	VCPU_REGS_R10 = __VCPU_REGS_R10,
	VCPU_REGS_R11 = __VCPU_REGS_R11,
	VCPU_REGS_R12 = __VCPU_REGS_R12,
	VCPU_REGS_R13 = __VCPU_REGS_R13,
	VCPU_REGS_R14 = __VCPU_REGS_R14,
	VCPU_REGS_R15 = __VCPU_REGS_R15,
157
#endif
158
	VCPU_REGS_RIP,
159 160 161
	NR_VCPU_REGS
};

A
Avi Kivity 已提交
162 163
enum kvm_reg_ex {
	VCPU_EXREG_PDPTR = NR_VCPU_REGS,
164
	VCPU_EXREG_CR3,
A
Avi Kivity 已提交
165
	VCPU_EXREG_RFLAGS,
A
Avi Kivity 已提交
166
	VCPU_EXREG_SEGMENTS,
A
Avi Kivity 已提交
167 168
};

169
enum {
170
	VCPU_SREG_ES,
171
	VCPU_SREG_CS,
172
	VCPU_SREG_SS,
173 174 175 176 177 178 179
	VCPU_SREG_DS,
	VCPU_SREG_FS,
	VCPU_SREG_GS,
	VCPU_SREG_TR,
	VCPU_SREG_LDTR,
};

180
#include <asm/kvm_emulate.h>
181

182 183
#define KVM_NR_MEM_OBJS 40

184 185 186 187
#define KVM_NR_DB_REGS	4

#define DR6_BD		(1 << 13)
#define DR6_BS		(1 << 14)
188
#define DR6_BT		(1 << 15)
189 190 191 192
#define DR6_RTM		(1 << 16)
#define DR6_FIXED_1	0xfffe0ff0
#define DR6_INIT	0xffff0ff0
#define DR6_VOLATILE	0x0001e00f
193 194 195 196 197

#define DR7_BP_EN_MASK	0x000000ff
#define DR7_GE		(1 << 9)
#define DR7_GD		(1 << 13)
#define DR7_FIXED_1	0x00000400
198
#define DR7_VOLATILE	0xffff2bff
199

200 201 202 203 204
#define PFERR_PRESENT_BIT 0
#define PFERR_WRITE_BIT 1
#define PFERR_USER_BIT 2
#define PFERR_RSVD_BIT 3
#define PFERR_FETCH_BIT 4
205
#define PFERR_PK_BIT 5
206 207
#define PFERR_GUEST_FINAL_BIT 32
#define PFERR_GUEST_PAGE_BIT 33
208 209 210 211 212 213

#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
214
#define PFERR_PK_MASK (1U << PFERR_PK_BIT)
215 216 217 218 219 220
#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)

#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK |	\
				 PFERR_WRITE_MASK |		\
				 PFERR_PRESENT_MASK)
221

222 223
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC	0
224 225 226 227 228 229 230
/*
 * The following bit is set with PV-EOI, unset on EOI.
 * We detect PV-EOI changes by guest by comparing
 * this bit with PV-EOI in guest memory.
 * See the implementation in apic_update_pv_eoi.
 */
#define KVM_APIC_PV_EOI_PENDING	1
231

F
Feng Wu 已提交
232 233
struct kvm_kernel_irq_routing_entry;

234 235 236 237 238 239 240 241 242
/*
 * We don't want allocation failures within the mmu code, so we preallocate
 * enough memory for a single page fault in a cache.
 */
struct kvm_mmu_memory_cache {
	int nobjs;
	void *objects[KVM_NR_MEM_OBJS];
};

243 244 245 246 247
/*
 * the pages used as guest page table on soft mmu are tracked by
 * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
 * by indirect shadow page can not be more than 15 bits.
 *
248
 * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
249 250
 * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
 */
251
union kvm_mmu_page_role {
252
	u32 word;
253
	struct {
254
		unsigned level:4;
255
		unsigned gpte_is_8_bytes:1;
256
		unsigned quadrant:2;
257
		unsigned direct:1;
258
		unsigned access:3;
259
		unsigned invalid:1;
260
		unsigned nxe:1;
261
		unsigned cr0_wp:1;
262
		unsigned smep_andnot_wp:1;
263
		unsigned smap_andnot_wp:1;
264
		unsigned ad_disabled:1;
265 266
		unsigned guest_mode:1;
		unsigned :6;
267 268 269 270 271 272 273 274

		/*
		 * This is left at the top of the word so that
		 * kvm_memslots_for_spte_role can extract it with a
		 * simple shift.  While there is room, give it a whole
		 * byte so it is also faster to load it from memory.
		 */
		unsigned smm:8;
275 276 277
	};
};

278
union kvm_mmu_extended_role {
279 280 281 282 283 284
/*
 * This structure complements kvm_mmu_page_role caching everything needed for
 * MMU configuration. If nothing in both these structures changed, MMU
 * re-configuration can be skipped. @valid bit is set on first usage so we don't
 * treat all-zero structure as valid data.
 */
285
	u32 word;
286 287 288
	struct {
		unsigned int valid:1;
		unsigned int execonly:1;
289
		unsigned int cr0_pg:1;
290
		unsigned int cr4_pae:1;
291 292 293 294
		unsigned int cr4_pse:1;
		unsigned int cr4_pke:1;
		unsigned int cr4_smap:1;
		unsigned int cr4_smep:1;
295
		unsigned int cr4_la57:1;
296
		unsigned int maxphyaddr:6;
297
	};
298 299 300 301 302 303 304 305 306 307
};

union kvm_mmu_role {
	u64 as_u64;
	struct {
		union kvm_mmu_page_role base;
		union kvm_mmu_extended_role ext;
	};
};

308 309 310 311
struct kvm_rmap_head {
	unsigned long val;
};

312 313 314
struct kvm_mmu_page {
	struct list_head link;
	struct hlist_node hash_link;
315
	bool unsync;
316
	u8 mmu_valid_gen;
317
	bool mmio_cached;
318 319 320 321 322 323

	/*
	 * The following two entries are used to key the shadow page in the
	 * hash table.
	 */
	union kvm_mmu_page_role role;
324
	gfn_t gfn;
325 326 327 328

	u64 *spt;
	/* hold the gfn of each spte inside spt */
	gfn_t *gfns;
329
	int root_count;          /* Currently serving as active root */
330
	unsigned int unsync_children;
331
	struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
332
	DECLARE_BITMAP(unsync_child_bitmap, 512);
333 334

#ifdef CONFIG_X86_32
335 336 337 338
	/*
	 * Used out of the mmu-lock to avoid reading spte values while an
	 * update is in progress; see the comments in __get_spte_lockless().
	 */
339 340 341
	int clear_spte_count;
#endif

342
	/* Number of writes since the last time traversal visited this page.  */
343
	atomic_t write_flooding_count;
344 345
};

346
struct kvm_pio_request {
347
	unsigned long linear_rip;
348 349 350 351 352 353
	unsigned long count;
	int in;
	int port;
	int size;
};

354
#define PT64_ROOT_MAX_LEVEL 5
355

356
struct rsvd_bits_validate {
357
	u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL];
358 359 360
	u64 bad_mt_xwr;
};

361 362 363 364 365 366 367 368
struct kvm_mmu_root_info {
	gpa_t cr3;
	hpa_t hpa;
};

#define KVM_MMU_ROOT_INFO_INVALID \
	((struct kvm_mmu_root_info) { .cr3 = INVALID_PAGE, .hpa = INVALID_PAGE })

369 370
#define KVM_MMU_NUM_PREV_ROOTS 3

371
/*
372 373 374
 * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
 * and 2-level 32-bit).  The kvm_mmu structure abstracts the details of the
 * current mmu mode.
375 376
 */
struct kvm_mmu {
377
	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
378
	unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
379
	u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
380 381
	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
			  bool prefault);
382 383
	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
				  struct x86_exception *fault);
384
	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
385
			    struct x86_exception *exception);
386 387
	gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
			       struct x86_exception *exception);
388
	int (*sync_page)(struct kvm_vcpu *vcpu,
389
			 struct kvm_mmu_page *sp);
390
	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
391
	void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
392
			   u64 *spte, const void *pte);
393
	hpa_t root_hpa;
394
	gpa_t root_cr3;
395
	union kvm_mmu_role mmu_role;
396 397 398
	u8 root_level;
	u8 shadow_root_level;
	u8 ept_ad;
399
	bool direct_map;
400
	struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
401

402 403 404 405 406 407 408
	/*
	 * Bitmap; bit set = permission fault
	 * Byte index: page fault error code [4:1]
	 * Bit index: pte permissions in ACC_* format
	 */
	u8 permissions[16];

409 410 411 412 413 414 415 416
	/*
	* The pkru_mask indicates if protection key checks are needed.  It
	* consists of 16 domains indexed by page fault error code bits [4:1],
	* with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
	* Each domain has 2 bits which are ANDed with AD and WD from PKRU.
	*/
	u32 pkru_mask;

417
	u64 *pae_root;
418
	u64 *lm_root;
419 420 421 422 423 424 425 426

	/*
	 * check zero bits on shadow page table entries, these
	 * bits include not only hardware reserved bits but also
	 * the bits spte never used.
	 */
	struct rsvd_bits_validate shadow_zero_check;

427
	struct rsvd_bits_validate guest_rsvd_check;
428

429 430
	/* Can have large pages at levels 2..last_nonleaf_level-1. */
	u8 last_nonleaf_level;
A
Avi Kivity 已提交
431

432 433
	bool nx;

434
	u64 pdptrs[4]; /* pae */
435 436
};

437 438 439 440 441
struct kvm_tlb_range {
	u64 start_gfn;
	u64 pages;
};

442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
enum pmc_type {
	KVM_PMC_GP = 0,
	KVM_PMC_FIXED,
};

struct kvm_pmc {
	enum pmc_type type;
	u8 idx;
	u64 counter;
	u64 eventsel;
	struct perf_event *perf_event;
	struct kvm_vcpu *vcpu;
};

struct kvm_pmu {
	unsigned nr_arch_gp_counters;
	unsigned nr_arch_fixed_counters;
	unsigned available_event_types;
	u64 fixed_ctr_ctrl;
	u64 global_ctrl;
	u64 global_status;
	u64 global_ovf_ctrl;
	u64 counter_bitmask[2];
	u64 global_ctrl_mask;
466
	u64 global_ovf_ctrl_mask;
467
	u64 reserved_bits;
468
	u8 version;
469 470
	struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
	struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
471 472 473 474
	struct irq_work irq_work;
	u64 reprogram_pmi;
};

475 476
struct kvm_pmu_ops;

477 478
enum {
	KVM_DEBUGREG_BP_ENABLED = 1,
479
	KVM_DEBUGREG_WONT_EXIT = 2,
480
	KVM_DEBUGREG_RELOAD = 4,
481 482
};

483 484 485
struct kvm_mtrr_range {
	u64 base;
	u64 mask;
X
Xiao Guangrong 已提交
486
	struct list_head node;
487 488
};

489
struct kvm_mtrr {
490
	struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
491
	mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
492
	u64 deftype;
X
Xiao Guangrong 已提交
493 494

	struct list_head head;
495 496
};

A
Andrey Smetanin 已提交
497 498 499 500
/* Hyper-V SynIC timer */
struct kvm_vcpu_hv_stimer {
	struct hrtimer timer;
	int index;
501
	union hv_stimer_config config;
A
Andrey Smetanin 已提交
502 503 504 505 506 507
	u64 count;
	u64 exp_time;
	struct hv_message msg;
	bool msg_pending;
};

508 509 510 511 512 513 514 515 516 517 518
/* Hyper-V synthetic interrupt controller (SynIC)*/
struct kvm_vcpu_hv_synic {
	u64 version;
	u64 control;
	u64 msg_page;
	u64 evt_page;
	atomic64_t sint[HV_SYNIC_SINT_COUNT];
	atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
	DECLARE_BITMAP(auto_eoi_bitmap, 256);
	DECLARE_BITMAP(vec_bitmap, 256);
	bool active;
519
	bool dont_zero_synic_pages;
520 521
};

522 523
/* Hyper-V per vcpu emulation context */
struct kvm_vcpu_hv {
524
	u32 vp_index;
525
	u64 hv_vapic;
526
	s64 runtime_offset;
527
	struct kvm_vcpu_hv_synic synic;
A
Andrey Smetanin 已提交
528
	struct kvm_hyperv_exit exit;
A
Andrey Smetanin 已提交
529 530
	struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
	DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
531
	cpumask_t tlb_flush;
532 533
};

534
struct kvm_vcpu_arch {
535 536 537 538 539 540 541
	/*
	 * rip and regs accesses must go through
	 * kvm_{register,rip}_{read,write} functions.
	 */
	unsigned long regs[NR_VCPU_REGS];
	u32 regs_avail;
	u32 regs_dirty;
542 543

	unsigned long cr0;
544
	unsigned long cr0_guest_owned_bits;
545 546 547
	unsigned long cr2;
	unsigned long cr3;
	unsigned long cr4;
548
	unsigned long cr4_guest_owned_bits;
549
	unsigned long cr8;
550
	u32 pkru;
551
	u32 hflags;
552
	u64 efer;
553 554
	u64 apic_base;
	struct kvm_lapic *apic;    /* kernel irqchip context */
555
	bool apicv_active;
556
	bool load_eoi_exitmap_pending;
557
	DECLARE_BITMAP(ioapic_handled_vectors, 256);
558
	unsigned long apic_attention;
559
	int32_t apic_arb_prio;
560 561
	int mp_state;
	u64 ia32_misc_enable_msr;
P
Paolo Bonzini 已提交
562
	u64 smbase;
563
	u64 smi_count;
564
	bool tpr_access_reporting;
W
Wanpeng Li 已提交
565
	u64 ia32_xss;
566
	u64 microcode_version;
567
	u64 arch_capabilities;
568

569 570 571 572 573 574 575
	/*
	 * Paging state of the vcpu
	 *
	 * If the vcpu runs in guest mode with two level paging this still saves
	 * the paging mode of the l1 guest. This context is always used to
	 * handle faults.
	 */
576 577 578 579
	struct kvm_mmu *mmu;

	/* Non-nested MMU for L1 */
	struct kvm_mmu root_mmu;
580

581 582 583
	/* L1 MMU when running nested */
	struct kvm_mmu guest_mmu;

584 585 586 587 588 589 590 591 592 593
	/*
	 * Paging state of an L2 guest (used for nested npt)
	 *
	 * This context will save all necessary information to walk page tables
	 * of the an L2 guest. This context is only initialized for page table
	 * walking and not for faulting since we never handle l2 page faults on
	 * the host.
	 */
	struct kvm_mmu nested_mmu;

594 595 596 597 598 599
	/*
	 * Pointer to the mmu context currently used for
	 * gva_to_gpa translations.
	 */
	struct kvm_mmu *walk_mmu;

600
	struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
601 602 603
	struct kvm_mmu_memory_cache mmu_page_cache;
	struct kvm_mmu_memory_cache mmu_page_header_cache;

604 605
	/*
	 * QEMU userspace and the guest each have their own FPU state.
606 607 608
	 * In vcpu_run, we switch between the user and guest FPU contexts.
	 * While running a VCPU, the VCPU thread will have the guest FPU
	 * context.
609 610 611 612 613 614
	 *
	 * Note that while the PKRU state lives inside the fpu registers,
	 * it is switched out separately at VMENTER and VMEXIT time. The
	 * "guest_fpu" state here contains the guest FPU context, with the
	 * host PRKU bits.
	 */
615
	struct fpu *user_fpu;
616
	struct fpu *guest_fpu;
617

618
	u64 xcr0;
619
	u64 guest_supported_xcr0;
620
	u32 guest_xstate_size;
621 622 623 624

	struct kvm_pio_request pio;
	void *pio_data;

625 626
	u8 event_exit_inst_len;

627 628
	struct kvm_queued_exception {
		bool pending;
629
		bool injected;
630 631 632
		bool has_error_code;
		u8 nr;
		u32 error_code;
633 634
		unsigned long payload;
		bool has_payload;
635
		u8 nested_apf;
636 637
	} exception;

A
Avi Kivity 已提交
638
	struct kvm_queued_interrupt {
639
		bool injected;
640
		bool soft;
A
Avi Kivity 已提交
641 642 643
		u8 nr;
	} interrupt;

644 645 646
	int halt_request; /* real mode on Intel only */

	int cpuid_nent;
647
	struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
648 649 650

	int maxphyaddr;

651 652 653
	/* emulate context */

	struct x86_emulate_ctxt emulate_ctxt;
654 655
	bool emulate_regs_need_sync_to_vcpu;
	bool emulate_regs_need_sync_from_vcpu;
656
	int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
657 658

	gpa_t time;
659
	struct pvclock_vcpu_time_info hv_clock;
Z
Zachary Amsden 已提交
660
	unsigned int hw_tsc_khz;
661 662
	struct gfn_to_hva_cache pv_time;
	bool pv_time_enabled;
663 664
	/* set guest stopped flag in pvclock flags field */
	bool pvclock_set_guest_stopped_request;
G
Glauber Costa 已提交
665 666 667 668 669 670 671 672

	struct {
		u64 msr_val;
		u64 last_steal;
		struct gfn_to_hva_cache stime;
		struct kvm_steal_time steal;
	} st;

673
	u64 tsc_offset;
674
	u64 last_guest_tsc;
675
	u64 last_host_tsc;
676
	u64 tsc_offset_adjustment;
677 678
	u64 this_tsc_nsec;
	u64 this_tsc_write;
T
Tomasz Grabiec 已提交
679
	u64 this_tsc_generation;
Z
Zachary Amsden 已提交
680
	bool tsc_catchup;
681 682 683 684
	bool tsc_always_catchup;
	s8 virtual_tsc_shift;
	u32 virtual_tsc_mult;
	u32 virtual_tsc_khz;
W
Will Auld 已提交
685
	s64 ia32_tsc_adjust_msr;
686
	u64 msr_ia32_power_ctl;
687
	u64 tsc_scaling_ratio;
688

A
Avi Kivity 已提交
689 690 691
	atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
	unsigned nmi_pending; /* NMI queued after currently running handler */
	bool nmi_injected;    /* Trying to inject an NMI this entry */
692
	bool smi_pending;    /* SMI queued after currently running handler */
A
Avi Kivity 已提交
693

694
	struct kvm_mtrr mtrr_state;
695
	u64 pat;
696

697
	unsigned switch_db_regs;
698 699 700 701
	unsigned long db[KVM_NR_DB_REGS];
	unsigned long dr6;
	unsigned long dr7;
	unsigned long eff_db[KVM_NR_DB_REGS];
702
	unsigned long guest_debug_dr7;
K
Kyle Huey 已提交
703 704
	u64 msr_platform_info;
	u64 msr_misc_features_enables;
H
Huang Ying 已提交
705 706 707 708

	u64 mcg_cap;
	u64 mcg_status;
	u64 mcg_ctl;
709
	u64 mcg_ext_ctl;
H
Huang Ying 已提交
710
	u64 *mce_banks;
711

712 713
	/* Cache MMIO info */
	u64 mmio_gva;
714
	unsigned mmio_access;
715
	gfn_t mmio_gfn;
716
	u64 mmio_gen;
717

718 719
	struct kvm_pmu pmu;

720 721
	/* used for guest single stepping over the given code position */
	unsigned long singlestep_rip;
J
Jan Kiszka 已提交
722

723
	struct kvm_vcpu_hv hyperv;
724 725

	cpumask_var_t wbinvd_dirty_mask;
726

727 728 729
	unsigned long last_retry_eip;
	unsigned long last_retry_addr;

730 731 732
	struct {
		bool halted;
		gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
733 734
		struct gfn_to_hva_cache data;
		u64 msr_val;
735
		u32 id;
736
		bool send_user_only;
737
		u32 host_apf_reason;
738
		unsigned long nested_apf_token;
739
		bool delivery_as_pf_vmexit;
740
	} apf;
741 742 743 744 745 746

	/* OSVW MSRs (AMD only) */
	struct {
		u64 length;
		u64 status;
	} osvw;
747 748 749 750 751

	struct {
		u64 msr_val;
		struct gfn_to_hva_cache data;
	} pv_eoi;
752

753 754
	u64 msr_kvm_poll_control;

755 756 757 758 759 760
	/*
	 * Indicate whether the access faults on its page table in guest
	 * which is set when fix page fault and used to detect unhandeable
	 * instruction.
	 */
	bool write_fault_to_shadow_pgtable;
761 762 763

	/* set at EPT violation at this point */
	unsigned long exit_qualification;
764 765 766 767 768

	/* pv related host specific info */
	struct {
		bool pv_unhalted;
	} pv;
769 770

	int pending_ioapic_eoi;
771
	int pending_external_vector;
772

773
	/* GPA available */
774
	bool gpa_available;
775
	gpa_t gpa_val;
776 777 778

	/* be preempted when it's in kernel-mode(cpl=0) */
	bool preempted_in_kernel;
P
Paolo Bonzini 已提交
779 780 781

	/* Flush the L1 Data cache for L1TF mitigation on VMENTER */
	bool l1tf_flush_l1d;
782 783 784

	/* AMD MSRC001_0015 Hardware Configuration */
	u64 msr_hwcr;
785 786
};

787
struct kvm_lpage_info {
788
	int disallow_lpage;
789 790 791
};

struct kvm_arch_memory_slot {
792
	struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
793
	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
794
	unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
795 796
};

797 798 799 800 801 802 803 804 805 806 807
/*
 * We use as the mode the number of bits allocated in the LDR for the
 * logical processor ID.  It happens that these are all powers of two.
 * This makes it is very easy to detect cases where the APICs are
 * configured for multiple modes; in that case, we cannot use the map and
 * hence cannot use kvm_irq_delivery_to_apic_fast either.
 */
#define KVM_APIC_MODE_XAPIC_CLUSTER          4
#define KVM_APIC_MODE_XAPIC_FLAT             8
#define KVM_APIC_MODE_X2APIC                16

808 809
struct kvm_apic_map {
	struct rcu_head rcu;
810
	u8 mode;
R
Radim Krčmář 已提交
811
	u32 max_apic_id;
812 813 814 815
	union {
		struct kvm_lapic *xapic_flat_map[8];
		struct kvm_lapic *xapic_cluster_map[16][4];
	};
R
Radim Krčmář 已提交
816
	struct kvm_lapic *phys_map[];
817 818
};

819 820
/* Hyper-V emulation context */
struct kvm_hv {
821
	struct mutex hv_lock;
822 823 824
	u64 hv_guest_os_id;
	u64 hv_hypercall;
	u64 hv_tsc_page;
825 826 827 828

	/* Hyper-v based guest crash (NT kernel bugcheck) parameters */
	u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
	u64 hv_crash_ctl;
P
Paolo Bonzini 已提交
829 830

	HV_REFERENCE_TSC_PAGE tsc_ref;
831 832

	struct idr conn_to_evt;
833 834 835 836

	u64 hv_reenlightenment_control;
	u64 hv_tsc_emulation_control;
	u64 hv_tsc_emulation_status;
837 838 839

	/* How many vCPUs have VP index != vCPU index */
	atomic_t num_mismatched_vp_indexes;
840 841

	struct hv_partition_assist_pg *hv_pa_pg;
842 843
};

844 845 846 847 848 849
enum kvm_irqchip_mode {
	KVM_IRQCHIP_NONE,
	KVM_IRQCHIP_KERNEL,       /* created with KVM_CREATE_IRQCHIP */
	KVM_IRQCHIP_SPLIT,        /* created with KVM_CAP_SPLIT_IRQCHIP */
};

850
struct kvm_arch {
851 852 853
	unsigned long n_used_mmu_pages;
	unsigned long n_requested_mmu_pages;
	unsigned long n_max_mmu_pages;
854
	unsigned int indirect_shadow_pages;
855
	u8 mmu_valid_gen;
856 857 858 859 860
	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
	/*
	 * Hash table of struct kvm_mmu_page.
	 */
	struct list_head active_mmu_pages;
861
	struct list_head zapped_obsolete_pages;
862
	struct kvm_page_track_notifier_node mmu_sp_tracker;
863
	struct kvm_page_track_notifier_head track_notifier_head;
864

B
Ben-Ami Yassour 已提交
865
	struct list_head assigned_dev_head;
J
Joerg Roedel 已提交
866
	struct iommu_domain *iommu_domain;
867
	bool iommu_noncoherent;
868 869
#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
	atomic_t noncoherent_dma_count;
870 871
#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
	atomic_t assigned_device_count;
872 873
	struct kvm_pic *vpic;
	struct kvm_ioapic *vioapic;
S
Sheng Yang 已提交
874
	struct kvm_pit *vpit;
875
	atomic_t vapics_in_nmi_mode;
876 877
	struct mutex apic_map_lock;
	struct kvm_apic_map *apic_map;
878

879
	bool apic_access_page_done;
880 881

	gpa_t wall_clock;
882

883
	bool mwait_in_guest;
884
	bool hlt_in_guest;
885
	bool pause_in_guest;
886
	bool cstate_in_guest;
887

888
	unsigned long irq_sources_bitmap;
889
	s64 kvmclock_offset;
890
	raw_spinlock_t tsc_write_lock;
Z
Zachary Amsden 已提交
891 892
	u64 last_tsc_nsec;
	u64 last_tsc_write;
893
	u32 last_tsc_khz;
894 895 896
	u64 cur_tsc_nsec;
	u64 cur_tsc_write;
	u64 cur_tsc_offset;
T
Tomasz Grabiec 已提交
897
	u64 cur_tsc_generation;
898
	int nr_vcpus_matched_tsc;
E
Ed Swierk 已提交
899

900 901 902
	spinlock_t pvclock_gtod_sync_lock;
	bool use_master_clock;
	u64 master_kernel_ns;
903
	u64 master_cycle_now;
904
	struct delayed_work kvmclock_update_work;
905
	struct delayed_work kvmclock_sync_work;
906

E
Ed Swierk 已提交
907
	struct kvm_xen_hvm_config xen_hvm_config;
908

909 910 911
	/* reads protected by irq_srcu, writes by irq_lock */
	struct hlist_head mask_notifier_list;

912
	struct kvm_hv hyperv;
913 914 915 916

	#ifdef CONFIG_KVM_MMU_AUDIT
	int audit_point;
	#endif
917

918
	bool backwards_tsc_observed;
919
	bool boot_vcpu_runs_old_kvmclock;
920
	u32 bsp_vcpu_id;
921 922

	u64 disabled_quirks;
923

924
	enum kvm_irqchip_mode irqchip_mode;
925
	u8 nr_reserved_ioapic_pins;
926 927

	bool disabled_lapic_found;
928

929
	bool x2apic_format;
930
	bool x2apic_broadcast_quirk_disabled;
931 932

	bool guest_can_read_msr_platform_info;
933
	bool exception_payload_enabled;
E
Eric Hankland 已提交
934 935

	struct kvm_pmu_event_filter *pmu_event_filter;
936 937
};

938
struct kvm_vm_stat {
939 940 941 942 943 944 945 946 947 948
	ulong mmu_shadow_zapped;
	ulong mmu_pte_write;
	ulong mmu_pte_updated;
	ulong mmu_pde_zapped;
	ulong mmu_flooded;
	ulong mmu_recycled;
	ulong mmu_cache_miss;
	ulong mmu_unsync;
	ulong remote_tlb_flush;
	ulong lpages;
949
	ulong max_mmu_page_hash_collisions;
950 951
};

952
struct kvm_vcpu_stat {
953 954 955 956 957 958 959 960 961 962 963
	u64 pf_fixed;
	u64 pf_guest;
	u64 tlb_flush;
	u64 invlpg;

	u64 exits;
	u64 io_exits;
	u64 mmio_exits;
	u64 signal_exits;
	u64 irq_window_exits;
	u64 nmi_window_exits;
P
Paolo Bonzini 已提交
964
	u64 l1d_flush;
965 966 967 968 969 970 971 972 973 974 975 976 977 978
	u64 halt_exits;
	u64 halt_successful_poll;
	u64 halt_attempted_poll;
	u64 halt_poll_invalid;
	u64 halt_wakeup;
	u64 request_irq_exits;
	u64 irq_exits;
	u64 host_state_reload;
	u64 fpu_reload;
	u64 insn_emulation;
	u64 insn_emulation_fail;
	u64 hypercalls;
	u64 irq_injections;
	u64 nmi_injections;
979
	u64 req_event;
980
};
981

982 983
struct x86_instruction_info;

984 985 986 987 988 989
struct msr_data {
	bool host_initiated;
	u32 index;
	u64 data;
};

P
Paolo Bonzini 已提交
990 991
struct kvm_lapic_irq {
	u32 vector;
992 993 994 995
	u16 delivery_mode;
	u16 dest_mode;
	bool level;
	u16 trig_mode;
P
Paolo Bonzini 已提交
996 997
	u32 shorthand;
	u32 dest_id;
998
	bool msi_redir_hint;
P
Paolo Bonzini 已提交
999 1000
};

1001 1002 1003
struct kvm_x86_ops {
	int (*cpu_has_kvm_support)(void);          /* __init */
	int (*disabled_by_bios)(void);             /* __init */
1004 1005
	int (*hardware_enable)(void);
	void (*hardware_disable)(void);
1006
	int (*check_processor_compatibility)(void);/* __init */
1007 1008
	int (*hardware_setup)(void);               /* __init */
	void (*hardware_unsetup)(void);            /* __exit */
1009
	bool (*cpu_has_accelerated_tpr)(void);
1010
	bool (*has_emulated_msr)(int index);
1011
	void (*cpuid_update)(struct kvm_vcpu *vcpu);
1012

1013 1014
	struct kvm *(*vm_alloc)(void);
	void (*vm_free)(struct kvm *);
1015 1016 1017
	int (*vm_init)(struct kvm *kvm);
	void (*vm_destroy)(struct kvm *kvm);

1018 1019 1020
	/* Create, but do not attach this VCPU */
	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
	void (*vcpu_free)(struct kvm_vcpu *vcpu);
1021
	void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
1022 1023 1024 1025 1026

	void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
	void (*vcpu_put)(struct kvm_vcpu *vcpu);

1027
	void (*update_bp_intercept)(struct kvm_vcpu *vcpu);
1028
	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1029
	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1030 1031 1032
	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
	void (*get_segment)(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg);
1033
	int (*get_cpl)(struct kvm_vcpu *vcpu);
1034 1035 1036
	void (*set_segment)(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg);
	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
1037
	void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
1038
	void (*decache_cr3)(struct kvm_vcpu *vcpu);
1039 1040 1041
	void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
1042
	int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1043
	void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
1044 1045 1046 1047
	void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
J
Jan Kiszka 已提交
1048 1049
	u64 (*get_dr6)(struct kvm_vcpu *vcpu);
	void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
1050
	void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
1051
	void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
1052
	void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
1053 1054 1055
	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);

1056
	void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
1057
	int  (*tlb_remote_flush)(struct kvm *kvm);
1058 1059
	int  (*tlb_remote_flush_with_range)(struct kvm *kvm,
			struct kvm_tlb_range *range);
1060

1061 1062 1063 1064 1065 1066 1067
	/*
	 * Flush any TLB entries associated with the given GVA.
	 * Does not need to flush GPA->HPA mappings.
	 * Can potentially get non-canonical addresses through INVLPGs, which
	 * the implementation may choose to ignore if appropriate.
	 */
	void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
1068

A
Avi Kivity 已提交
1069 1070
	void (*run)(struct kvm_vcpu *vcpu);
	int (*handle_exit)(struct kvm_vcpu *vcpu);
1071
	int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
1072
	void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
1073
	u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
1074 1075
	void (*patch_hypercall)(struct kvm_vcpu *vcpu,
				unsigned char *hypercall_addr);
1076
	void (*set_irq)(struct kvm_vcpu *vcpu);
1077
	void (*set_nmi)(struct kvm_vcpu *vcpu);
1078
	void (*queue_exception)(struct kvm_vcpu *vcpu);
A
Avi Kivity 已提交
1079
	void (*cancel_injection)(struct kvm_vcpu *vcpu);
1080
	int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
1081
	int (*nmi_allowed)(struct kvm_vcpu *vcpu);
J
Jan Kiszka 已提交
1082 1083
	bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
	void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
1084 1085
	void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
	void (*enable_irq_window)(struct kvm_vcpu *vcpu);
1086
	void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
1087
	bool (*get_enable_apicv)(struct kvm_vcpu *vcpu);
1088
	void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
1089
	void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
1090
	void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
1091
	bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
1092
	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
1093
	void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
1094
	void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
1095
	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
1096
	int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
1097
	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
1098
	int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
1099
	int (*get_tdp_level)(struct kvm_vcpu *vcpu);
1100
	u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1101
	int (*get_lpage_level)(void);
1102
	bool (*rdtscp_supported)(void);
1103
	bool (*invpcid_supported)(void);
1104

1105 1106
	void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);

1107 1108
	void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);

1109 1110
	bool (*has_wbinvd_exit)(void);

1111
	u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
1112 1113
	/* Returns actual tsc_offset set in active VMCS */
	u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
1114

1115
	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
1116 1117 1118 1119

	int (*check_intercept)(struct kvm_vcpu *vcpu,
			       struct x86_instruction_info *info,
			       enum x86_intercept_stage stage);
1120
	void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
1121
	bool (*mpx_supported)(void);
1122
	bool (*xsaves_supported)(void);
1123
	bool (*umip_emulated)(void);
1124
	bool (*pt_supported)(void);
1125 1126

	int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
1127
	void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
1128 1129

	void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154

	/*
	 * Arch-specific dirty logging hooks. These hooks are only supposed to
	 * be valid if the specific arch has hardware-accelerated dirty logging
	 * mechanism. Currently only for PML on VMX.
	 *
	 *  - slot_enable_log_dirty:
	 *	called when enabling log dirty mode for the slot.
	 *  - slot_disable_log_dirty:
	 *	called when disabling log dirty mode for the slot.
	 *	also called when slot is created with log dirty disabled.
	 *  - flush_log_dirty:
	 *	called before reporting dirty_bitmap to userspace.
	 *  - enable_log_dirty_pt_masked:
	 *	called when reenabling log dirty for the GFNs in the mask after
	 *	corresponding bits are cleared in slot->dirty_bitmap.
	 */
	void (*slot_enable_log_dirty)(struct kvm *kvm,
				      struct kvm_memory_slot *slot);
	void (*slot_disable_log_dirty)(struct kvm *kvm,
				       struct kvm_memory_slot *slot);
	void (*flush_log_dirty)(struct kvm *kvm);
	void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
					   struct kvm_memory_slot *slot,
					   gfn_t offset, unsigned long mask);
1155 1156
	int (*write_log_dirty)(struct kvm_vcpu *vcpu);

1157 1158
	/* pmu operations of sub-arch */
	const struct kvm_pmu_ops *pmu_ops;
1159

1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
	/*
	 * Architecture specific hooks for vCPU blocking due to
	 * HLT instruction.
	 * Returns for .pre_block():
	 *    - 0 means continue to block the vCPU.
	 *    - 1 means we cannot block the vCPU since some event
	 *        happens during this period, such as, 'ON' bit in
	 *        posted-interrupts descriptor is set.
	 */
	int (*pre_block)(struct kvm_vcpu *vcpu);
	void (*post_block)(struct kvm_vcpu *vcpu);
1171 1172 1173 1174

	void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
	void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);

1175 1176
	int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
			      uint32_t guest_irq, bool set);
1177
	void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
1178
	bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
1179

1180 1181
	int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
			    bool *expired);
1182
	void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
1183 1184

	void (*setup_mce)(struct kvm_vcpu *vcpu);
1185

1186 1187 1188 1189 1190 1191
	int (*get_nested_state)(struct kvm_vcpu *vcpu,
				struct kvm_nested_state __user *user_kvm_nested_state,
				unsigned user_data_size);
	int (*set_nested_state)(struct kvm_vcpu *vcpu,
				struct kvm_nested_state __user *user_kvm_nested_state,
				struct kvm_nested_state *kvm_state);
1192
	bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
1193

1194
	int (*smi_allowed)(struct kvm_vcpu *vcpu);
1195
	int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
1196
	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
1197
	int (*enable_smi_window)(struct kvm_vcpu *vcpu);
1198 1199

	int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
1200 1201
	int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
	int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1202 1203

	int (*get_msr_feature)(struct kvm_msr_entry *entry);
1204 1205 1206

	int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
				   uint16_t *vmcs_version);
1207
	uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
1208 1209

	bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
1210 1211

	bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
1212
	int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
1213 1214
};

1215
struct kvm_arch_async_pf {
1216
	u32 token;
1217
	gfn_t gfn;
X
Xiao Guangrong 已提交
1218
	unsigned long cr3;
1219
	bool direct_map;
1220 1221
};

1222
extern struct kvm_x86_ops *kvm_x86_ops;
1223
extern struct kmem_cache *x86_fpu_cache;
1224

1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
	return kvm_x86_ops->vm_alloc();
}

static inline void kvm_arch_free_vm(struct kvm *kvm)
{
	return kvm_x86_ops->vm_free(kvm);
}

1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
{
	if (kvm_x86_ops->tlb_remote_flush &&
	    !kvm_x86_ops->tlb_remote_flush(kvm))
		return 0;
	else
		return -ENOTSUPP;
}

1246 1247 1248 1249 1250
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);

void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
1251 1252
void kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
S
Sheng Yang 已提交
1253
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
1254
		u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
1255
		u64 acc_track_mask, u64 me_mask);
1256

1257
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
1258 1259
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
				      struct kvm_memory_slot *memslot);
1260
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
1261
				   const struct kvm_memory_slot *memslot);
1262 1263 1264 1265 1266 1267 1268 1269 1270
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
				   struct kvm_memory_slot *memslot);
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
					struct kvm_memory_slot *memslot);
void kvm_mmu_slot_set_dirty(struct kvm *kvm,
			    struct kvm_memory_slot *memslot);
void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
				   struct kvm_memory_slot *slot,
				   gfn_t gfn_offset, unsigned long mask);
1271
void kvm_mmu_zap_all(struct kvm *kvm);
1272
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
1273 1274
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
1275

1276
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
1277
bool pdptrs_changed(struct kvm_vcpu *vcpu);
1278

1279
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1280
			  const void *val, int bytes);
1281

1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
struct kvm_irq_mask_notifier {
	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
	int irq;
	struct hlist_node link;
};

void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
				    struct kvm_irq_mask_notifier *kimn);
void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
				      struct kvm_irq_mask_notifier *kimn);
void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
			     bool mask);

1295
extern bool tdp_enabled;
1296

1297 1298
u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);

1299 1300 1301 1302
/* control of guest tsc rate supported? */
extern bool kvm_has_tsc_control;
/* maximum supported tsc_khz for guests */
extern u32  kvm_max_guest_tsc_khz;
1303 1304 1305 1306
/* number of bits of the fractional part of the TSC scaling ratio */
extern u8   kvm_tsc_scaling_ratio_frac_bits;
/* maximum allowed value of TSC scaling ratio */
extern u64  kvm_max_tsc_scaling_ratio;
1307 1308
/* 1ull << kvm_tsc_scaling_ratio_frac_bits */
extern u64  kvm_default_tsc_scaling_ratio;
1309

1310
extern u64 kvm_mce_cap_supported;
1311

1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341
/*
 * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
 *			userspace I/O) to indicate that the emulation context
 *			should be resued as is, i.e. skip initialization of
 *			emulation context, instruction fetch and decode.
 *
 * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
 *		      Indicates that only select instructions (tagged with
 *		      EmulateOnUD) should be emulated (to minimize the emulator
 *		      attack surface).  See also EMULTYPE_TRAP_UD_FORCED.
 *
 * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
 *		   decode the instruction length.  For use *only* by
 *		   kvm_x86_ops->skip_emulated_instruction() implementations.
 *
 * EMULTYPE_ALLOW_RETRY - Set when the emulator should resume the guest to
 *			  retry native execution under certain conditions.
 *
 * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
 *			     triggered by KVM's magic "force emulation" prefix,
 *			     which is opt in via module param (off by default).
 *			     Bypasses EmulateOnUD restriction despite emulating
 *			     due to an intercepted #UD (see EMULTYPE_TRAP_UD).
 *			     Used to test the full emulator from userspace.
 *
 * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
 *			backdoor emulation, which is opt in via module param.
 *			VMware backoor emulation handles select instructions
 *			and reinjects the #GP for all other cases.
 */
1342 1343
#define EMULTYPE_NO_DECODE	    (1 << 0)
#define EMULTYPE_TRAP_UD	    (1 << 1)
1344
#define EMULTYPE_SKIP		    (1 << 2)
1345
#define EMULTYPE_ALLOW_RETRY	    (1 << 3)
1346
#define EMULTYPE_TRAP_UD_FORCED	    (1 << 4)
1347
#define EMULTYPE_VMWARE_GP	    (1 << 5)
1348 1349 1350
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
					void *insn, int insn_len);
1351

1352
void kvm_enable_efer_bits(u64);
1353
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
1354 1355
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
1356 1357
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
1358 1359 1360

struct x86_emulate_ctxt;

1361
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
1362
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
1363
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
1364
int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1365
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
1366

1367
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
1368
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
1369
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
1370

1371 1372
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
		    int reason, bool has_error_code, u32 error_code);
1373

1374
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
1375
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
1376
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
A
Andre Przywara 已提交
1377
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
1378 1379
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
1380 1381
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
1382
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
1383
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
1384

1385
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1386
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1387

1388 1389
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
A
Avi Kivity 已提交
1390
bool kvm_rdpmc(struct kvm_vcpu *vcpu);
1391

1392 1393
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1394 1395
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1396
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
1397 1398 1399
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			    gfn_t gfn, void *data, int offset, int len,
			    u32 access);
1400
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
1401
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
1402

1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414
static inline int __kvm_irq_line_state(unsigned long *irq_state,
				       int irq_source_id, int level)
{
	/* Logical OR for level trig interrupt */
	if (level)
		__set_bit(irq_source_id, irq_state);
	else
		__clear_bit(irq_source_id, irq_state);

	return !!(*irq_state);
}

1415 1416 1417
#define KVM_MMU_ROOT_CURRENT		BIT(0)
#define KVM_MMU_ROOT_PREVIOUS(i)	BIT(1+i)
#define KVM_MMU_ROOTS_ALL		(~0UL)
1418

1419 1420
int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
1421

1422 1423
void kvm_inject_nmi(struct kvm_vcpu *vcpu);

1424
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1425 1426 1427 1428
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
1429
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
1430 1431
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			ulong roots_to_free);
1432 1433
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
			   struct x86_exception *exception);
1434 1435 1436 1437 1438 1439 1440 1441
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
			      struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
				struct x86_exception *exception);
1442

1443 1444
void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);

1445 1446
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);

1447
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
1448
		       void *insn, int insn_len);
M
Marcelo Tosatti 已提交
1449
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
1450
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
1451
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
1452

1453
void kvm_enable_tdp(void);
1454
void kvm_disable_tdp(void);
1455

1456 1457
static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
				  struct x86_exception *exception)
1458 1459 1460 1461
{
	return gpa;
}

1462 1463 1464 1465 1466 1467 1468
static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
{
	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);

	return (struct kvm_mmu_page *)page_private(page);
}

1469
static inline u16 kvm_read_ldt(void)
1470 1471 1472 1473 1474 1475
{
	u16 ldt;
	asm("sldt %0" : "=g"(ldt));
	return ldt;
}

1476
static inline void kvm_load_ldt(u16 sel)
1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
{
	asm("lldt %0" : : "rm"(sel));
}

#ifdef CONFIG_X86_64
static inline unsigned long read_msr(unsigned long msr)
{
	u64 value;

	rdmsrl(msr, value);
	return value;
}
#endif

static inline u32 get_rdx_init_val(void)
{
	return 0x600; /* P6 family */
}

1496 1497 1498 1499 1500
static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
{
	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
}

1501 1502 1503 1504
#define TSS_IOPB_BASE_OFFSET 0x66
#define TSS_BASE_SIZE 0x68
#define TSS_IOPB_SIZE (65536 / 8)
#define TSS_REDIRECTION_SIZE (256 / 8)
1505 1506
#define RMODE_TSS_SIZE							\
	(TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
1507

1508 1509 1510 1511 1512 1513 1514
enum {
	TASK_SWITCH_CALL = 0,
	TASK_SWITCH_IRET = 1,
	TASK_SWITCH_JMP = 2,
	TASK_SWITCH_GATE = 3,
};

1515
#define HF_GIF_MASK		(1 << 0)
A
Alexander Graf 已提交
1516 1517
#define HF_HIF_MASK		(1 << 1)
#define HF_VINTR_MASK		(1 << 2)
1518
#define HF_NMI_MASK		(1 << 3)
1519
#define HF_IRET_MASK		(1 << 4)
1520
#define HF_GUEST_MASK		(1 << 5) /* VCPU is in guest-mode */
1521 1522
#define HF_SMM_MASK		(1 << 6)
#define HF_SMM_INSIDE_NMI_MASK	(1 << 7)
1523

1524 1525 1526 1527 1528
#define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
#define KVM_ADDRESS_SPACE_NUM 2

#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
1529

1530
asmlinkage void kvm_spurious_fault(void);
1531

1532 1533 1534
/*
 * Hardware virtualization extension instructions may fault if a
 * reboot turns off virtualization while processes are running.
1535 1536
 * Usually after catching the fault we just panic; during reboot
 * instead the instruction is ignored.
1537
 */
1538
#define __kvm_handle_fault_on_reboot(insn)				\
1539 1540 1541 1542 1543 1544
	"666: \n\t"							\
	insn "\n\t"							\
	"jmp	668f \n\t"						\
	"667: \n\t"							\
	"call	kvm_spurious_fault \n\t"				\
	"668: \n\t"							\
1545
	_ASM_EXTABLE(666b, 667b)
1546

1547
#define KVM_ARCH_WANT_MMU_NOTIFIER
1548
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
A
Andres Lagar-Cavilla 已提交
1549
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
A
Andrea Arcangeli 已提交
1550
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
1551
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
1552
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
1553 1554
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
1555
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1556
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
1557
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
1558

1559
int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
1560
		    unsigned long ipi_bitmap_high, u32 min,
1561 1562
		    unsigned long icr, int op_64_bit);

A
Avi Kivity 已提交
1563
void kvm_define_shared_msr(unsigned index, u32 msr);
1564
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
A
Avi Kivity 已提交
1565

1566
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
1567
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
1568

1569
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
J
Jan Kiszka 已提交
1570 1571
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);

1572 1573 1574
void kvm_make_mclock_inprogress_request(struct kvm *kvm);
void kvm_make_scan_ioapic_request(struct kvm *kvm);

1575 1576 1577 1578
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work);
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work);
G
Gleb Natapov 已提交
1579 1580
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
			       struct kvm_async_pf *work);
1581
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
1582 1583
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);

1584 1585
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1586
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
1587

1588 1589
int kvm_is_in_guest(void);

1590 1591
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1592 1593
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
1594

1595 1596 1597
bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
			     struct kvm_vcpu **dest_vcpu);

1598
void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
F
Feng Wu 已提交
1599
		     struct kvm_lapic_irq *irq);
P
Paolo Bonzini 已提交
1600

1601 1602 1603 1604 1605 1606 1607
static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
{
	/* We can only post Fixed and LowPrio IRQs */
	return (irq->delivery_mode == dest_Fixed ||
		irq->delivery_mode == dest_LowestPrio);
}

1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops->vcpu_blocking)
		kvm_x86_ops->vcpu_blocking(vcpu);
}

static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops->vcpu_unblocking)
		kvm_x86_ops->vcpu_unblocking(vcpu);
}

1620
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
1621

1622 1623 1624
static inline int kvm_cpu_get_apicid(int mps_cpu)
{
#ifdef CONFIG_X86_LOCAL_APIC
1625
	return default_cpu_present_to_apicid(mps_cpu);
1626 1627 1628 1629 1630 1631
#else
	WARN_ON_ONCE(1);
	return BAD_APICID;
#endif
}

1632 1633 1634
#define put_smstate(type, buf, offset, val)                      \
	*(type *)((buf) + (offset) - 0x7e00) = val

1635 1636 1637
#define GET_SMSTATE(type, buf, offset)		\
	(*(type *)((buf) + (offset) - 0x7e00))

H
H. Peter Anvin 已提交
1638
#endif /* _ASM_X86_KVM_HOST_H */