kvm_host.h 48.6 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3 4 5 6 7
 * Kernel-based Virtual Machine driver for Linux
 *
 * This header defines architecture specific interfaces, x86 version
 */

H
H. Peter Anvin 已提交
8 9
#ifndef _ASM_X86_KVM_HOST_H
#define _ASM_X86_KVM_HOST_H
10

11 12
#include <linux/types.h>
#include <linux/mm.h>
13
#include <linux/mmu_notifier.h>
14
#include <linux/tracepoint.h>
15
#include <linux/cpumask.h>
16
#include <linux/irq_work.h>
17
#include <linux/irq.h>
18 19 20

#include <linux/kvm.h>
#include <linux/kvm_para.h>
21
#include <linux/kvm_types.h>
22
#include <linux/perf_event.h>
23 24
#include <linux/pvclock_gtod.h>
#include <linux/clocksource.h>
F
Feng Wu 已提交
25
#include <linux/irqbypass.h>
26
#include <linux/hyperv.h>
27

28
#include <asm/apic.h>
29
#include <asm/pvclock-abi.h>
30
#include <asm/desc.h>
S
Sheng Yang 已提交
31
#include <asm/mtrr.h>
32
#include <asm/msr-index.h>
33
#include <asm/asm.h>
34
#include <asm/kvm_page_track.h>
35
#include <asm/kvm_vcpu_regs.h>
36
#include <asm/hyperv-tlfs.h>
37

38 39
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS

40
#define KVM_MAX_VCPUS 288
41
#define KVM_SOFT_MAX_VCPUS 240
42
#define KVM_MAX_VCPU_ID 1023
43
#define KVM_USER_MEM_SLOTS 509
44 45
/* memory slots that are not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 3
46
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
47

48
#define KVM_HALT_POLL_NS_DEFAULT 200000
49

50 51
#define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS

52
/* x86-specific vcpu->requests bit members */
53 54 55 56 57
#define KVM_REQ_MIGRATE_TIMER		KVM_ARCH_REQ(0)
#define KVM_REQ_REPORT_TPR_ACCESS	KVM_ARCH_REQ(1)
#define KVM_REQ_TRIPLE_FAULT		KVM_ARCH_REQ(2)
#define KVM_REQ_MMU_SYNC		KVM_ARCH_REQ(3)
#define KVM_REQ_CLOCK_UPDATE		KVM_ARCH_REQ(4)
58
#define KVM_REQ_LOAD_CR3		KVM_ARCH_REQ(5)
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
#define KVM_REQ_EVENT			KVM_ARCH_REQ(6)
#define KVM_REQ_APF_HALT		KVM_ARCH_REQ(7)
#define KVM_REQ_STEAL_UPDATE		KVM_ARCH_REQ(8)
#define KVM_REQ_NMI			KVM_ARCH_REQ(9)
#define KVM_REQ_PMU			KVM_ARCH_REQ(10)
#define KVM_REQ_PMI			KVM_ARCH_REQ(11)
#define KVM_REQ_SMI			KVM_ARCH_REQ(12)
#define KVM_REQ_MASTERCLOCK_UPDATE	KVM_ARCH_REQ(13)
#define KVM_REQ_MCLOCK_INPROGRESS \
	KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_SCAN_IOAPIC \
	KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_GLOBAL_CLOCK_UPDATE	KVM_ARCH_REQ(16)
#define KVM_REQ_APIC_PAGE_RELOAD \
	KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_HV_CRASH		KVM_ARCH_REQ(18)
#define KVM_REQ_IOAPIC_EOI_EXIT		KVM_ARCH_REQ(19)
#define KVM_REQ_HV_RESET		KVM_ARCH_REQ(20)
#define KVM_REQ_HV_EXIT			KVM_ARCH_REQ(21)
#define KVM_REQ_HV_STIMER		KVM_ARCH_REQ(22)
79
#define KVM_REQ_LOAD_EOI_EXITMAP	KVM_ARCH_REQ(23)
80
#define KVM_REQ_GET_VMCS12_PAGES	KVM_ARCH_REQ(24)
81

82 83 84 85 86 87 88 89
#define CR0_RESERVED_BITS                                               \
	(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
			  | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
			  | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))

#define CR4_RESERVED_BITS                                               \
	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
90
			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
91
			  | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
92
			  | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
P
Paolo Bonzini 已提交
93
			  | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP))
94 95 96 97

#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)


98 99

#define INVALID_PAGE (~(hpa_t)0)
100 101
#define VALID_PAGE(x) ((x) != INVALID_PAGE)

102 103
#define UNMAPPED_GVA (~(gpa_t)0)

104
/* KVM Hugepage definitions for x86 */
105 106 107 108 109 110 111 112 113
enum {
	PT_PAGE_TABLE_LEVEL   = 1,
	PT_DIRECTORY_LEVEL    = 2,
	PT_PDPE_LEVEL         = 3,
	/* set max level to the biggest one */
	PT_MAX_HUGEPAGE_LEVEL = PT_PDPE_LEVEL,
};
#define KVM_NR_PAGE_SIZES	(PT_MAX_HUGEPAGE_LEVEL - \
				 PT_PAGE_TABLE_LEVEL + 1)
114 115
#define KVM_HPAGE_GFN_SHIFT(x)	(((x) - 1) * 9)
#define KVM_HPAGE_SHIFT(x)	(PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
116 117 118
#define KVM_HPAGE_SIZE(x)	(1UL << KVM_HPAGE_SHIFT(x))
#define KVM_HPAGE_MASK(x)	(~(KVM_HPAGE_SIZE(x) - 1))
#define KVM_PAGES_PER_HPAGE(x)	(KVM_HPAGE_SIZE(x) / PAGE_SIZE)
M
Marcelo Tosatti 已提交
119

120 121 122 123 124 125 126
static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
{
	/* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
}

127
#define KVM_PERMILLE_MMU_PAGES 20
128
#define KVM_MIN_ALLOC_MMU_PAGES 64UL
129
#define KVM_MMU_HASH_SHIFT 12
130
#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
131 132
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
133
#define KVM_MAX_CPUID_ENTRIES 80
S
Sheng Yang 已提交
134
#define KVM_NR_FIXED_MTRR_REGION 88
135
#define KVM_NR_VAR_MTRR 8
136

137 138
#define ASYNC_PF_PER_VCPU 64

139
enum kvm_reg {
140 141 142 143 144 145 146 147
	VCPU_REGS_RAX = __VCPU_REGS_RAX,
	VCPU_REGS_RCX = __VCPU_REGS_RCX,
	VCPU_REGS_RDX = __VCPU_REGS_RDX,
	VCPU_REGS_RBX = __VCPU_REGS_RBX,
	VCPU_REGS_RSP = __VCPU_REGS_RSP,
	VCPU_REGS_RBP = __VCPU_REGS_RBP,
	VCPU_REGS_RSI = __VCPU_REGS_RSI,
	VCPU_REGS_RDI = __VCPU_REGS_RDI,
148
#ifdef CONFIG_X86_64
149 150 151 152 153 154 155 156
	VCPU_REGS_R8  = __VCPU_REGS_R8,
	VCPU_REGS_R9  = __VCPU_REGS_R9,
	VCPU_REGS_R10 = __VCPU_REGS_R10,
	VCPU_REGS_R11 = __VCPU_REGS_R11,
	VCPU_REGS_R12 = __VCPU_REGS_R12,
	VCPU_REGS_R13 = __VCPU_REGS_R13,
	VCPU_REGS_R14 = __VCPU_REGS_R14,
	VCPU_REGS_R15 = __VCPU_REGS_R15,
157
#endif
158
	VCPU_REGS_RIP,
159
	NR_VCPU_REGS,
160

A
Avi Kivity 已提交
161
	VCPU_EXREG_PDPTR = NR_VCPU_REGS,
162
	VCPU_EXREG_CR3,
A
Avi Kivity 已提交
163
	VCPU_EXREG_RFLAGS,
A
Avi Kivity 已提交
164
	VCPU_EXREG_SEGMENTS,
A
Avi Kivity 已提交
165 166
};

167
enum {
168
	VCPU_SREG_ES,
169
	VCPU_SREG_CS,
170
	VCPU_SREG_SS,
171 172 173 174 175 176 177
	VCPU_SREG_DS,
	VCPU_SREG_FS,
	VCPU_SREG_GS,
	VCPU_SREG_TR,
	VCPU_SREG_LDTR,
};

178
#include <asm/kvm_emulate.h>
179

180 181
#define KVM_NR_MEM_OBJS 40

182 183 184 185
#define KVM_NR_DB_REGS	4

#define DR6_BD		(1 << 13)
#define DR6_BS		(1 << 14)
186
#define DR6_BT		(1 << 15)
187 188 189 190
#define DR6_RTM		(1 << 16)
#define DR6_FIXED_1	0xfffe0ff0
#define DR6_INIT	0xffff0ff0
#define DR6_VOLATILE	0x0001e00f
191 192 193 194 195

#define DR7_BP_EN_MASK	0x000000ff
#define DR7_GE		(1 << 9)
#define DR7_GD		(1 << 13)
#define DR7_FIXED_1	0x00000400
196
#define DR7_VOLATILE	0xffff2bff
197

198 199 200 201 202
#define PFERR_PRESENT_BIT 0
#define PFERR_WRITE_BIT 1
#define PFERR_USER_BIT 2
#define PFERR_RSVD_BIT 3
#define PFERR_FETCH_BIT 4
203
#define PFERR_PK_BIT 5
204 205
#define PFERR_GUEST_FINAL_BIT 32
#define PFERR_GUEST_PAGE_BIT 33
206 207 208 209 210 211

#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
212
#define PFERR_PK_MASK (1U << PFERR_PK_BIT)
213 214 215 216 217 218
#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)

#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK |	\
				 PFERR_WRITE_MASK |		\
				 PFERR_PRESENT_MASK)
219

220 221
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC	0
222 223 224 225 226 227 228
/*
 * The following bit is set with PV-EOI, unset on EOI.
 * We detect PV-EOI changes by guest by comparing
 * this bit with PV-EOI in guest memory.
 * See the implementation in apic_update_pv_eoi.
 */
#define KVM_APIC_PV_EOI_PENDING	1
229

F
Feng Wu 已提交
230 231
struct kvm_kernel_irq_routing_entry;

232 233 234 235 236 237 238 239 240
/*
 * We don't want allocation failures within the mmu code, so we preallocate
 * enough memory for a single page fault in a cache.
 */
struct kvm_mmu_memory_cache {
	int nobjs;
	void *objects[KVM_NR_MEM_OBJS];
};

241 242 243 244 245
/*
 * the pages used as guest page table on soft mmu are tracked by
 * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
 * by indirect shadow page can not be more than 15 bits.
 *
246
 * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
247 248
 * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
 */
249
union kvm_mmu_page_role {
250
	u32 word;
251
	struct {
252
		unsigned level:4;
253
		unsigned gpte_is_8_bytes:1;
254
		unsigned quadrant:2;
255
		unsigned direct:1;
256
		unsigned access:3;
257
		unsigned invalid:1;
258
		unsigned nxe:1;
259
		unsigned cr0_wp:1;
260
		unsigned smep_andnot_wp:1;
261
		unsigned smap_andnot_wp:1;
262
		unsigned ad_disabled:1;
263 264
		unsigned guest_mode:1;
		unsigned :6;
265 266 267 268 269 270 271 272

		/*
		 * This is left at the top of the word so that
		 * kvm_memslots_for_spte_role can extract it with a
		 * simple shift.  While there is room, give it a whole
		 * byte so it is also faster to load it from memory.
		 */
		unsigned smm:8;
273 274 275
	};
};

276
union kvm_mmu_extended_role {
277 278 279 280 281 282
/*
 * This structure complements kvm_mmu_page_role caching everything needed for
 * MMU configuration. If nothing in both these structures changed, MMU
 * re-configuration can be skipped. @valid bit is set on first usage so we don't
 * treat all-zero structure as valid data.
 */
283
	u32 word;
284 285 286
	struct {
		unsigned int valid:1;
		unsigned int execonly:1;
287
		unsigned int cr0_pg:1;
288
		unsigned int cr4_pae:1;
289 290 291 292
		unsigned int cr4_pse:1;
		unsigned int cr4_pke:1;
		unsigned int cr4_smap:1;
		unsigned int cr4_smep:1;
293
		unsigned int cr4_la57:1;
294
		unsigned int maxphyaddr:6;
295
	};
296 297 298 299 300 301 302 303 304 305
};

union kvm_mmu_role {
	u64 as_u64;
	struct {
		union kvm_mmu_page_role base;
		union kvm_mmu_extended_role ext;
	};
};

306 307 308 309
struct kvm_rmap_head {
	unsigned long val;
};

310 311 312
struct kvm_mmu_page {
	struct list_head link;
	struct hlist_node hash_link;
313 314
	struct list_head lpage_disallowed_link;

315
	bool unsync;
316
	u8 mmu_valid_gen;
317
	bool mmio_cached;
P
Paolo Bonzini 已提交
318
	bool lpage_disallowed; /* Can't be replaced by an equiv large page */
319 320 321 322 323 324

	/*
	 * The following two entries are used to key the shadow page in the
	 * hash table.
	 */
	union kvm_mmu_page_role role;
325
	gfn_t gfn;
326 327 328 329

	u64 *spt;
	/* hold the gfn of each spte inside spt */
	gfn_t *gfns;
330
	int root_count;          /* Currently serving as active root */
331
	unsigned int unsync_children;
332
	struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
333
	DECLARE_BITMAP(unsync_child_bitmap, 512);
334 335

#ifdef CONFIG_X86_32
336 337 338 339
	/*
	 * Used out of the mmu-lock to avoid reading spte values while an
	 * update is in progress; see the comments in __get_spte_lockless().
	 */
340 341 342
	int clear_spte_count;
#endif

343
	/* Number of writes since the last time traversal visited this page.  */
344
	atomic_t write_flooding_count;
345 346
};

347
struct kvm_pio_request {
348
	unsigned long linear_rip;
349 350 351 352 353 354
	unsigned long count;
	int in;
	int port;
	int size;
};

355
#define PT64_ROOT_MAX_LEVEL 5
356

357
struct rsvd_bits_validate {
358
	u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL];
359 360 361
	u64 bad_mt_xwr;
};

362 363 364 365 366 367 368 369
struct kvm_mmu_root_info {
	gpa_t cr3;
	hpa_t hpa;
};

#define KVM_MMU_ROOT_INFO_INVALID \
	((struct kvm_mmu_root_info) { .cr3 = INVALID_PAGE, .hpa = INVALID_PAGE })

370 371
#define KVM_MMU_NUM_PREV_ROOTS 3

372
/*
373 374 375
 * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
 * and 2-level 32-bit).  The kvm_mmu structure abstracts the details of the
 * current mmu mode.
376 377
 */
struct kvm_mmu {
378
	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
379
	unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
380
	u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
381 382
	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
			  bool prefault);
383 384
	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
				  struct x86_exception *fault);
385
	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
386
			    struct x86_exception *exception);
387 388
	gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
			       struct x86_exception *exception);
389
	int (*sync_page)(struct kvm_vcpu *vcpu,
390
			 struct kvm_mmu_page *sp);
391
	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
392
	void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
393
			   u64 *spte, const void *pte);
394
	hpa_t root_hpa;
395
	gpa_t root_cr3;
396
	union kvm_mmu_role mmu_role;
397 398 399
	u8 root_level;
	u8 shadow_root_level;
	u8 ept_ad;
400
	bool direct_map;
401
	struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
402

403 404 405 406 407 408 409
	/*
	 * Bitmap; bit set = permission fault
	 * Byte index: page fault error code [4:1]
	 * Bit index: pte permissions in ACC_* format
	 */
	u8 permissions[16];

410 411 412 413 414 415 416 417
	/*
	* The pkru_mask indicates if protection key checks are needed.  It
	* consists of 16 domains indexed by page fault error code bits [4:1],
	* with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
	* Each domain has 2 bits which are ANDed with AD and WD from PKRU.
	*/
	u32 pkru_mask;

418
	u64 *pae_root;
419
	u64 *lm_root;
420 421 422 423 424 425 426 427

	/*
	 * check zero bits on shadow page table entries, these
	 * bits include not only hardware reserved bits but also
	 * the bits spte never used.
	 */
	struct rsvd_bits_validate shadow_zero_check;

428
	struct rsvd_bits_validate guest_rsvd_check;
429

430 431
	/* Can have large pages at levels 2..last_nonleaf_level-1. */
	u8 last_nonleaf_level;
A
Avi Kivity 已提交
432

433 434
	bool nx;

435
	u64 pdptrs[4]; /* pae */
436 437
};

438 439 440 441 442
struct kvm_tlb_range {
	u64 start_gfn;
	u64 pages;
};

443 444 445 446 447 448 449 450 451 452 453 454
enum pmc_type {
	KVM_PMC_GP = 0,
	KVM_PMC_FIXED,
};

struct kvm_pmc {
	enum pmc_type type;
	u8 idx;
	u64 counter;
	u64 eventsel;
	struct perf_event *perf_event;
	struct kvm_vcpu *vcpu;
455 456 457 458 459
	/*
	 * eventsel value for general purpose counters,
	 * ctrl value for fixed counters.
	 */
	u64 current_config;
460 461 462 463 464 465 466 467 468 469 470 471
};

struct kvm_pmu {
	unsigned nr_arch_gp_counters;
	unsigned nr_arch_fixed_counters;
	unsigned available_event_types;
	u64 fixed_ctr_ctrl;
	u64 global_ctrl;
	u64 global_status;
	u64 global_ovf_ctrl;
	u64 counter_bitmask[2];
	u64 global_ctrl_mask;
472
	u64 global_ovf_ctrl_mask;
473
	u64 reserved_bits;
474
	u8 version;
475 476
	struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
	struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
477
	struct irq_work irq_work;
478
	DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);
479 480 481 482 483 484 485 486 487 488 489 490 491 492
	DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX);
	DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX);

	/*
	 * The gate to release perf_events not marked in
	 * pmc_in_use only once in a vcpu time slice.
	 */
	bool need_cleanup;

	/*
	 * The total number of programmed perf_events and it helps to avoid
	 * redundant check before cleanup if guest don't use vPMU at all.
	 */
	u8 event_count;
493 494
};

495 496
struct kvm_pmu_ops;

497 498
enum {
	KVM_DEBUGREG_BP_ENABLED = 1,
499
	KVM_DEBUGREG_WONT_EXIT = 2,
500
	KVM_DEBUGREG_RELOAD = 4,
501 502
};

503 504 505
struct kvm_mtrr_range {
	u64 base;
	u64 mask;
X
Xiao Guangrong 已提交
506
	struct list_head node;
507 508
};

509
struct kvm_mtrr {
510
	struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
511
	mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
512
	u64 deftype;
X
Xiao Guangrong 已提交
513 514

	struct list_head head;
515 516
};

A
Andrey Smetanin 已提交
517 518 519 520
/* Hyper-V SynIC timer */
struct kvm_vcpu_hv_stimer {
	struct hrtimer timer;
	int index;
521
	union hv_stimer_config config;
A
Andrey Smetanin 已提交
522 523 524 525 526 527
	u64 count;
	u64 exp_time;
	struct hv_message msg;
	bool msg_pending;
};

528 529 530 531 532 533 534 535 536 537 538
/* Hyper-V synthetic interrupt controller (SynIC)*/
struct kvm_vcpu_hv_synic {
	u64 version;
	u64 control;
	u64 msg_page;
	u64 evt_page;
	atomic64_t sint[HV_SYNIC_SINT_COUNT];
	atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
	DECLARE_BITMAP(auto_eoi_bitmap, 256);
	DECLARE_BITMAP(vec_bitmap, 256);
	bool active;
539
	bool dont_zero_synic_pages;
540 541
};

542 543
/* Hyper-V per vcpu emulation context */
struct kvm_vcpu_hv {
544
	u32 vp_index;
545
	u64 hv_vapic;
546
	s64 runtime_offset;
547
	struct kvm_vcpu_hv_synic synic;
A
Andrey Smetanin 已提交
548
	struct kvm_hyperv_exit exit;
A
Andrey Smetanin 已提交
549 550
	struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
	DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
551
	cpumask_t tlb_flush;
552 553
};

554
struct kvm_vcpu_arch {
555 556 557 558 559 560 561
	/*
	 * rip and regs accesses must go through
	 * kvm_{register,rip}_{read,write} functions.
	 */
	unsigned long regs[NR_VCPU_REGS];
	u32 regs_avail;
	u32 regs_dirty;
562 563

	unsigned long cr0;
564
	unsigned long cr0_guest_owned_bits;
565 566 567
	unsigned long cr2;
	unsigned long cr3;
	unsigned long cr4;
568
	unsigned long cr4_guest_owned_bits;
569
	unsigned long cr8;
570
	u32 pkru;
571
	u32 hflags;
572
	u64 efer;
573 574
	u64 apic_base;
	struct kvm_lapic *apic;    /* kernel irqchip context */
575
	bool apicv_active;
576
	bool load_eoi_exitmap_pending;
577
	DECLARE_BITMAP(ioapic_handled_vectors, 256);
578
	unsigned long apic_attention;
579
	int32_t apic_arb_prio;
580 581
	int mp_state;
	u64 ia32_misc_enable_msr;
P
Paolo Bonzini 已提交
582
	u64 smbase;
583
	u64 smi_count;
584
	bool tpr_access_reporting;
585
	bool xsaves_enabled;
W
Wanpeng Li 已提交
586
	u64 ia32_xss;
587
	u64 microcode_version;
588
	u64 arch_capabilities;
589

590 591 592 593 594 595 596
	/*
	 * Paging state of the vcpu
	 *
	 * If the vcpu runs in guest mode with two level paging this still saves
	 * the paging mode of the l1 guest. This context is always used to
	 * handle faults.
	 */
597 598 599 600
	struct kvm_mmu *mmu;

	/* Non-nested MMU for L1 */
	struct kvm_mmu root_mmu;
601

602 603 604
	/* L1 MMU when running nested */
	struct kvm_mmu guest_mmu;

605 606 607 608 609 610 611 612 613 614
	/*
	 * Paging state of an L2 guest (used for nested npt)
	 *
	 * This context will save all necessary information to walk page tables
	 * of the an L2 guest. This context is only initialized for page table
	 * walking and not for faulting since we never handle l2 page faults on
	 * the host.
	 */
	struct kvm_mmu nested_mmu;

615 616 617 618 619 620
	/*
	 * Pointer to the mmu context currently used for
	 * gva_to_gpa translations.
	 */
	struct kvm_mmu *walk_mmu;

621
	struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
622 623 624
	struct kvm_mmu_memory_cache mmu_page_cache;
	struct kvm_mmu_memory_cache mmu_page_header_cache;

625 626
	/*
	 * QEMU userspace and the guest each have their own FPU state.
627 628 629
	 * In vcpu_run, we switch between the user and guest FPU contexts.
	 * While running a VCPU, the VCPU thread will have the guest FPU
	 * context.
630 631 632 633 634 635
	 *
	 * Note that while the PKRU state lives inside the fpu registers,
	 * it is switched out separately at VMENTER and VMEXIT time. The
	 * "guest_fpu" state here contains the guest FPU context, with the
	 * host PRKU bits.
	 */
636
	struct fpu *user_fpu;
637
	struct fpu *guest_fpu;
638

639
	u64 xcr0;
640
	u64 guest_supported_xcr0;
641
	u32 guest_xstate_size;
642 643 644 645

	struct kvm_pio_request pio;
	void *pio_data;

646 647
	u8 event_exit_inst_len;

648 649
	struct kvm_queued_exception {
		bool pending;
650
		bool injected;
651 652 653
		bool has_error_code;
		u8 nr;
		u32 error_code;
654 655
		unsigned long payload;
		bool has_payload;
656
		u8 nested_apf;
657 658
	} exception;

A
Avi Kivity 已提交
659
	struct kvm_queued_interrupt {
660
		bool injected;
661
		bool soft;
A
Avi Kivity 已提交
662 663 664
		u8 nr;
	} interrupt;

665 666 667
	int halt_request; /* real mode on Intel only */

	int cpuid_nent;
668
	struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
669 670 671

	int maxphyaddr;

672 673 674
	/* emulate context */

	struct x86_emulate_ctxt emulate_ctxt;
675 676
	bool emulate_regs_need_sync_to_vcpu;
	bool emulate_regs_need_sync_from_vcpu;
677
	int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
678 679

	gpa_t time;
680
	struct pvclock_vcpu_time_info hv_clock;
Z
Zachary Amsden 已提交
681
	unsigned int hw_tsc_khz;
682 683
	struct gfn_to_hva_cache pv_time;
	bool pv_time_enabled;
684 685
	/* set guest stopped flag in pvclock flags field */
	bool pvclock_set_guest_stopped_request;
G
Glauber Costa 已提交
686 687 688 689 690 691 692 693

	struct {
		u64 msr_val;
		u64 last_steal;
		struct gfn_to_hva_cache stime;
		struct kvm_steal_time steal;
	} st;

694
	u64 tsc_offset;
695
	u64 last_guest_tsc;
696
	u64 last_host_tsc;
697
	u64 tsc_offset_adjustment;
698 699
	u64 this_tsc_nsec;
	u64 this_tsc_write;
T
Tomasz Grabiec 已提交
700
	u64 this_tsc_generation;
Z
Zachary Amsden 已提交
701
	bool tsc_catchup;
702 703 704 705
	bool tsc_always_catchup;
	s8 virtual_tsc_shift;
	u32 virtual_tsc_mult;
	u32 virtual_tsc_khz;
W
Will Auld 已提交
706
	s64 ia32_tsc_adjust_msr;
707
	u64 msr_ia32_power_ctl;
708
	u64 tsc_scaling_ratio;
709

A
Avi Kivity 已提交
710 711 712
	atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
	unsigned nmi_pending; /* NMI queued after currently running handler */
	bool nmi_injected;    /* Trying to inject an NMI this entry */
713
	bool smi_pending;    /* SMI queued after currently running handler */
A
Avi Kivity 已提交
714

715
	struct kvm_mtrr mtrr_state;
716
	u64 pat;
717

718
	unsigned switch_db_regs;
719 720 721 722
	unsigned long db[KVM_NR_DB_REGS];
	unsigned long dr6;
	unsigned long dr7;
	unsigned long eff_db[KVM_NR_DB_REGS];
723
	unsigned long guest_debug_dr7;
K
Kyle Huey 已提交
724 725
	u64 msr_platform_info;
	u64 msr_misc_features_enables;
H
Huang Ying 已提交
726 727 728 729

	u64 mcg_cap;
	u64 mcg_status;
	u64 mcg_ctl;
730
	u64 mcg_ext_ctl;
H
Huang Ying 已提交
731
	u64 *mce_banks;
732

733 734
	/* Cache MMIO info */
	u64 mmio_gva;
735
	unsigned mmio_access;
736
	gfn_t mmio_gfn;
737
	u64 mmio_gen;
738

739 740
	struct kvm_pmu pmu;

741 742
	/* used for guest single stepping over the given code position */
	unsigned long singlestep_rip;
J
Jan Kiszka 已提交
743

744
	struct kvm_vcpu_hv hyperv;
745 746

	cpumask_var_t wbinvd_dirty_mask;
747

748 749 750
	unsigned long last_retry_eip;
	unsigned long last_retry_addr;

751 752 753
	struct {
		bool halted;
		gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
754 755
		struct gfn_to_hva_cache data;
		u64 msr_val;
756
		u32 id;
757
		bool send_user_only;
758
		u32 host_apf_reason;
759
		unsigned long nested_apf_token;
760
		bool delivery_as_pf_vmexit;
761
	} apf;
762 763 764 765 766 767

	/* OSVW MSRs (AMD only) */
	struct {
		u64 length;
		u64 status;
	} osvw;
768 769 770 771 772

	struct {
		u64 msr_val;
		struct gfn_to_hva_cache data;
	} pv_eoi;
773

774 775
	u64 msr_kvm_poll_control;

776 777 778 779 780 781
	/*
	 * Indicate whether the access faults on its page table in guest
	 * which is set when fix page fault and used to detect unhandeable
	 * instruction.
	 */
	bool write_fault_to_shadow_pgtable;
782 783 784

	/* set at EPT violation at this point */
	unsigned long exit_qualification;
785 786 787 788 789

	/* pv related host specific info */
	struct {
		bool pv_unhalted;
	} pv;
790 791

	int pending_ioapic_eoi;
792
	int pending_external_vector;
793

794
	/* GPA available */
795
	bool gpa_available;
796
	gpa_t gpa_val;
797 798 799

	/* be preempted when it's in kernel-mode(cpl=0) */
	bool preempted_in_kernel;
P
Paolo Bonzini 已提交
800 801 802

	/* Flush the L1 Data cache for L1TF mitigation on VMENTER */
	bool l1tf_flush_l1d;
803 804 805

	/* AMD MSRC001_0015 Hardware Configuration */
	u64 msr_hwcr;
806 807
};

808
struct kvm_lpage_info {
809
	int disallow_lpage;
810 811 812
};

struct kvm_arch_memory_slot {
813
	struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
814
	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
815
	unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
816 817
};

818 819 820 821 822 823 824 825 826 827 828
/*
 * We use as the mode the number of bits allocated in the LDR for the
 * logical processor ID.  It happens that these are all powers of two.
 * This makes it is very easy to detect cases where the APICs are
 * configured for multiple modes; in that case, we cannot use the map and
 * hence cannot use kvm_irq_delivery_to_apic_fast either.
 */
#define KVM_APIC_MODE_XAPIC_CLUSTER          4
#define KVM_APIC_MODE_XAPIC_FLAT             8
#define KVM_APIC_MODE_X2APIC                16

829 830
struct kvm_apic_map {
	struct rcu_head rcu;
831
	u8 mode;
R
Radim Krčmář 已提交
832
	u32 max_apic_id;
833 834 835 836
	union {
		struct kvm_lapic *xapic_flat_map[8];
		struct kvm_lapic *xapic_cluster_map[16][4];
	};
R
Radim Krčmář 已提交
837
	struct kvm_lapic *phys_map[];
838 839
};

840 841
/* Hyper-V emulation context */
struct kvm_hv {
842
	struct mutex hv_lock;
843 844 845
	u64 hv_guest_os_id;
	u64 hv_hypercall;
	u64 hv_tsc_page;
846 847 848 849

	/* Hyper-v based guest crash (NT kernel bugcheck) parameters */
	u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
	u64 hv_crash_ctl;
P
Paolo Bonzini 已提交
850 851

	HV_REFERENCE_TSC_PAGE tsc_ref;
852 853

	struct idr conn_to_evt;
854 855 856 857

	u64 hv_reenlightenment_control;
	u64 hv_tsc_emulation_control;
	u64 hv_tsc_emulation_status;
858 859 860

	/* How many vCPUs have VP index != vCPU index */
	atomic_t num_mismatched_vp_indexes;
861 862

	struct hv_partition_assist_pg *hv_pa_pg;
863 864
};

865 866 867 868 869 870
enum kvm_irqchip_mode {
	KVM_IRQCHIP_NONE,
	KVM_IRQCHIP_KERNEL,       /* created with KVM_CREATE_IRQCHIP */
	KVM_IRQCHIP_SPLIT,        /* created with KVM_CAP_SPLIT_IRQCHIP */
};

871
struct kvm_arch {
872 873 874
	unsigned long n_used_mmu_pages;
	unsigned long n_requested_mmu_pages;
	unsigned long n_max_mmu_pages;
875
	unsigned int indirect_shadow_pages;
876
	u8 mmu_valid_gen;
877 878 879 880 881
	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
	/*
	 * Hash table of struct kvm_mmu_page.
	 */
	struct list_head active_mmu_pages;
882
	struct list_head zapped_obsolete_pages;
883
	struct list_head lpage_disallowed_mmu_pages;
884
	struct kvm_page_track_notifier_node mmu_sp_tracker;
885
	struct kvm_page_track_notifier_head track_notifier_head;
886

B
Ben-Ami Yassour 已提交
887
	struct list_head assigned_dev_head;
J
Joerg Roedel 已提交
888
	struct iommu_domain *iommu_domain;
889
	bool iommu_noncoherent;
890 891
#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
	atomic_t noncoherent_dma_count;
892 893
#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
	atomic_t assigned_device_count;
894 895
	struct kvm_pic *vpic;
	struct kvm_ioapic *vioapic;
S
Sheng Yang 已提交
896
	struct kvm_pit *vpit;
897
	atomic_t vapics_in_nmi_mode;
898 899
	struct mutex apic_map_lock;
	struct kvm_apic_map *apic_map;
900

901
	bool apic_access_page_done;
902 903

	gpa_t wall_clock;
904

905
	bool mwait_in_guest;
906
	bool hlt_in_guest;
907
	bool pause_in_guest;
908
	bool cstate_in_guest;
909

910
	unsigned long irq_sources_bitmap;
911
	s64 kvmclock_offset;
912
	raw_spinlock_t tsc_write_lock;
Z
Zachary Amsden 已提交
913 914
	u64 last_tsc_nsec;
	u64 last_tsc_write;
915
	u32 last_tsc_khz;
916 917 918
	u64 cur_tsc_nsec;
	u64 cur_tsc_write;
	u64 cur_tsc_offset;
T
Tomasz Grabiec 已提交
919
	u64 cur_tsc_generation;
920
	int nr_vcpus_matched_tsc;
E
Ed Swierk 已提交
921

922 923 924
	spinlock_t pvclock_gtod_sync_lock;
	bool use_master_clock;
	u64 master_kernel_ns;
925
	u64 master_cycle_now;
926
	struct delayed_work kvmclock_update_work;
927
	struct delayed_work kvmclock_sync_work;
928

E
Ed Swierk 已提交
929
	struct kvm_xen_hvm_config xen_hvm_config;
930

931 932 933
	/* reads protected by irq_srcu, writes by irq_lock */
	struct hlist_head mask_notifier_list;

934
	struct kvm_hv hyperv;
935 936 937 938

	#ifdef CONFIG_KVM_MMU_AUDIT
	int audit_point;
	#endif
939

940
	bool backwards_tsc_observed;
941
	bool boot_vcpu_runs_old_kvmclock;
942
	u32 bsp_vcpu_id;
943 944

	u64 disabled_quirks;
945

946
	enum kvm_irqchip_mode irqchip_mode;
947
	u8 nr_reserved_ioapic_pins;
948 949

	bool disabled_lapic_found;
950

951
	bool x2apic_format;
952
	bool x2apic_broadcast_quirk_disabled;
953 954

	bool guest_can_read_msr_platform_info;
955
	bool exception_payload_enabled;
E
Eric Hankland 已提交
956 957

	struct kvm_pmu_event_filter *pmu_event_filter;
958
	struct task_struct *nx_lpage_recovery_thread;
959 960
};

961
struct kvm_vm_stat {
962 963 964 965 966 967 968 969 970 971
	ulong mmu_shadow_zapped;
	ulong mmu_pte_write;
	ulong mmu_pte_updated;
	ulong mmu_pde_zapped;
	ulong mmu_flooded;
	ulong mmu_recycled;
	ulong mmu_cache_miss;
	ulong mmu_unsync;
	ulong remote_tlb_flush;
	ulong lpages;
P
Paolo Bonzini 已提交
972
	ulong nx_lpage_splits;
973
	ulong max_mmu_page_hash_collisions;
974 975
};

976
struct kvm_vcpu_stat {
977 978 979 980 981 982 983 984 985 986 987
	u64 pf_fixed;
	u64 pf_guest;
	u64 tlb_flush;
	u64 invlpg;

	u64 exits;
	u64 io_exits;
	u64 mmio_exits;
	u64 signal_exits;
	u64 irq_window_exits;
	u64 nmi_window_exits;
P
Paolo Bonzini 已提交
988
	u64 l1d_flush;
989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
	u64 halt_exits;
	u64 halt_successful_poll;
	u64 halt_attempted_poll;
	u64 halt_poll_invalid;
	u64 halt_wakeup;
	u64 request_irq_exits;
	u64 irq_exits;
	u64 host_state_reload;
	u64 fpu_reload;
	u64 insn_emulation;
	u64 insn_emulation_fail;
	u64 hypercalls;
	u64 irq_injections;
	u64 nmi_injections;
1003
	u64 req_event;
1004
};
1005

1006 1007
struct x86_instruction_info;

1008 1009 1010 1011 1012 1013
struct msr_data {
	bool host_initiated;
	u32 index;
	u64 data;
};

P
Paolo Bonzini 已提交
1014 1015
struct kvm_lapic_irq {
	u32 vector;
1016 1017 1018 1019
	u16 delivery_mode;
	u16 dest_mode;
	bool level;
	u16 trig_mode;
P
Paolo Bonzini 已提交
1020 1021
	u32 shorthand;
	u32 dest_id;
1022
	bool msi_redir_hint;
P
Paolo Bonzini 已提交
1023 1024
};

1025 1026 1027 1028 1029
static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
{
	return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
}

1030 1031 1032
struct kvm_x86_ops {
	int (*cpu_has_kvm_support)(void);          /* __init */
	int (*disabled_by_bios)(void);             /* __init */
1033 1034
	int (*hardware_enable)(void);
	void (*hardware_disable)(void);
1035
	int (*check_processor_compatibility)(void);/* __init */
1036 1037
	int (*hardware_setup)(void);               /* __init */
	void (*hardware_unsetup)(void);            /* __exit */
1038
	bool (*cpu_has_accelerated_tpr)(void);
1039
	bool (*has_emulated_msr)(int index);
1040
	void (*cpuid_update)(struct kvm_vcpu *vcpu);
1041

1042 1043
	struct kvm *(*vm_alloc)(void);
	void (*vm_free)(struct kvm *);
1044 1045 1046
	int (*vm_init)(struct kvm *kvm);
	void (*vm_destroy)(struct kvm *kvm);

1047 1048 1049
	/* Create, but do not attach this VCPU */
	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
	void (*vcpu_free)(struct kvm_vcpu *vcpu);
1050
	void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
1051 1052 1053 1054 1055

	void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
	void (*vcpu_put)(struct kvm_vcpu *vcpu);

1056
	void (*update_bp_intercept)(struct kvm_vcpu *vcpu);
1057
	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1058
	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1059 1060 1061
	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
	void (*get_segment)(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg);
1062
	int (*get_cpl)(struct kvm_vcpu *vcpu);
1063 1064 1065
	void (*set_segment)(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg);
	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
1066
	void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
1067 1068 1069
	void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
1070
	int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1071
	void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
1072 1073 1074 1075
	void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
J
Jan Kiszka 已提交
1076 1077
	u64 (*get_dr6)(struct kvm_vcpu *vcpu);
	void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
1078
	void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
1079
	void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
1080
	void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
1081 1082 1083
	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);

1084
	void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
1085
	int  (*tlb_remote_flush)(struct kvm *kvm);
1086 1087
	int  (*tlb_remote_flush_with_range)(struct kvm *kvm,
			struct kvm_tlb_range *range);
1088

1089 1090 1091 1092 1093 1094 1095
	/*
	 * Flush any TLB entries associated with the given GVA.
	 * Does not need to flush GPA->HPA mappings.
	 * Can potentially get non-canonical addresses through INVLPGs, which
	 * the implementation may choose to ignore if appropriate.
	 */
	void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
1096

A
Avi Kivity 已提交
1097 1098
	void (*run)(struct kvm_vcpu *vcpu);
	int (*handle_exit)(struct kvm_vcpu *vcpu);
1099
	int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
1100
	void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
1101
	u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
1102 1103
	void (*patch_hypercall)(struct kvm_vcpu *vcpu,
				unsigned char *hypercall_addr);
1104
	void (*set_irq)(struct kvm_vcpu *vcpu);
1105
	void (*set_nmi)(struct kvm_vcpu *vcpu);
1106
	void (*queue_exception)(struct kvm_vcpu *vcpu);
A
Avi Kivity 已提交
1107
	void (*cancel_injection)(struct kvm_vcpu *vcpu);
1108
	int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
1109
	int (*nmi_allowed)(struct kvm_vcpu *vcpu);
J
Jan Kiszka 已提交
1110 1111
	bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
	void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
1112 1113
	void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
	void (*enable_irq_window)(struct kvm_vcpu *vcpu);
1114
	void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
1115
	bool (*get_enable_apicv)(struct kvm *kvm);
1116
	void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
1117
	void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
1118
	void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
1119
	bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
1120
	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
1121
	void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
1122
	void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
1123
	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
1124
	int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
1125
	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
1126
	int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
1127
	int (*get_tdp_level)(struct kvm_vcpu *vcpu);
1128
	u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1129
	int (*get_lpage_level)(void);
1130
	bool (*rdtscp_supported)(void);
1131
	bool (*invpcid_supported)(void);
1132

1133 1134
	void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);

1135 1136
	void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);

1137 1138
	bool (*has_wbinvd_exit)(void);

1139
	u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
1140 1141
	/* Returns actual tsc_offset set in active VMCS */
	u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
1142

1143
	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
1144 1145 1146 1147

	int (*check_intercept)(struct kvm_vcpu *vcpu,
			       struct x86_instruction_info *info,
			       enum x86_intercept_stage stage);
1148
	void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
1149
	bool (*mpx_supported)(void);
1150
	bool (*xsaves_supported)(void);
1151
	bool (*umip_emulated)(void);
1152
	bool (*pt_supported)(void);
1153 1154

	int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
1155
	void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
1156 1157

	void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182

	/*
	 * Arch-specific dirty logging hooks. These hooks are only supposed to
	 * be valid if the specific arch has hardware-accelerated dirty logging
	 * mechanism. Currently only for PML on VMX.
	 *
	 *  - slot_enable_log_dirty:
	 *	called when enabling log dirty mode for the slot.
	 *  - slot_disable_log_dirty:
	 *	called when disabling log dirty mode for the slot.
	 *	also called when slot is created with log dirty disabled.
	 *  - flush_log_dirty:
	 *	called before reporting dirty_bitmap to userspace.
	 *  - enable_log_dirty_pt_masked:
	 *	called when reenabling log dirty for the GFNs in the mask after
	 *	corresponding bits are cleared in slot->dirty_bitmap.
	 */
	void (*slot_enable_log_dirty)(struct kvm *kvm,
				      struct kvm_memory_slot *slot);
	void (*slot_disable_log_dirty)(struct kvm *kvm,
				       struct kvm_memory_slot *slot);
	void (*flush_log_dirty)(struct kvm *kvm);
	void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
					   struct kvm_memory_slot *slot,
					   gfn_t offset, unsigned long mask);
1183 1184
	int (*write_log_dirty)(struct kvm_vcpu *vcpu);

1185 1186
	/* pmu operations of sub-arch */
	const struct kvm_pmu_ops *pmu_ops;
1187

1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
	/*
	 * Architecture specific hooks for vCPU blocking due to
	 * HLT instruction.
	 * Returns for .pre_block():
	 *    - 0 means continue to block the vCPU.
	 *    - 1 means we cannot block the vCPU since some event
	 *        happens during this period, such as, 'ON' bit in
	 *        posted-interrupts descriptor is set.
	 */
	int (*pre_block)(struct kvm_vcpu *vcpu);
	void (*post_block)(struct kvm_vcpu *vcpu);
1199 1200 1201 1202

	void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
	void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);

1203 1204
	int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
			      uint32_t guest_irq, bool set);
1205
	void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
1206
	bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
1207

1208 1209
	int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
			    bool *expired);
1210
	void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
1211 1212

	void (*setup_mce)(struct kvm_vcpu *vcpu);
1213

1214 1215 1216 1217 1218 1219
	int (*get_nested_state)(struct kvm_vcpu *vcpu,
				struct kvm_nested_state __user *user_kvm_nested_state,
				unsigned user_data_size);
	int (*set_nested_state)(struct kvm_vcpu *vcpu,
				struct kvm_nested_state __user *user_kvm_nested_state,
				struct kvm_nested_state *kvm_state);
1220
	bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
1221

1222
	int (*smi_allowed)(struct kvm_vcpu *vcpu);
1223
	int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
1224
	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
1225
	int (*enable_smi_window)(struct kvm_vcpu *vcpu);
1226 1227

	int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
1228 1229
	int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
	int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1230 1231

	int (*get_msr_feature)(struct kvm_msr_entry *entry);
1232 1233 1234

	int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
				   uint16_t *vmcs_version);
1235
	uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
1236 1237

	bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
1238 1239

	bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
1240
	int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
1241 1242
};

1243
struct kvm_arch_async_pf {
1244
	u32 token;
1245
	gfn_t gfn;
X
Xiao Guangrong 已提交
1246
	unsigned long cr3;
1247
	bool direct_map;
1248 1249
};

1250
extern struct kvm_x86_ops *kvm_x86_ops;
1251
extern struct kmem_cache *x86_fpu_cache;
1252

1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
	return kvm_x86_ops->vm_alloc();
}

static inline void kvm_arch_free_vm(struct kvm *kvm)
{
	return kvm_x86_ops->vm_free(kvm);
}

1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
{
	if (kvm_x86_ops->tlb_remote_flush &&
	    !kvm_x86_ops->tlb_remote_flush(kvm))
		return 0;
	else
		return -ENOTSUPP;
}

1274 1275 1276 1277 1278
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);

void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
1279 1280
void kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
S
Sheng Yang 已提交
1281
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
1282
		u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
1283
		u64 acc_track_mask, u64 me_mask);
1284

1285
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
1286 1287
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
				      struct kvm_memory_slot *memslot);
1288
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
1289
				   const struct kvm_memory_slot *memslot);
1290 1291 1292 1293 1294 1295 1296 1297 1298
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
				   struct kvm_memory_slot *memslot);
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
					struct kvm_memory_slot *memslot);
void kvm_mmu_slot_set_dirty(struct kvm *kvm,
			    struct kvm_memory_slot *memslot);
void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
				   struct kvm_memory_slot *slot,
				   gfn_t gfn_offset, unsigned long mask);
1299
void kvm_mmu_zap_all(struct kvm *kvm);
1300
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
1301 1302
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
1303

1304
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
1305
bool pdptrs_changed(struct kvm_vcpu *vcpu);
1306

1307
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1308
			  const void *val, int bytes);
1309

1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
struct kvm_irq_mask_notifier {
	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
	int irq;
	struct hlist_node link;
};

void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
				    struct kvm_irq_mask_notifier *kimn);
void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
				      struct kvm_irq_mask_notifier *kimn);
void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
			     bool mask);

1323
extern bool tdp_enabled;
1324

1325 1326
u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);

1327 1328 1329 1330
/* control of guest tsc rate supported? */
extern bool kvm_has_tsc_control;
/* maximum supported tsc_khz for guests */
extern u32  kvm_max_guest_tsc_khz;
1331 1332 1333 1334
/* number of bits of the fractional part of the TSC scaling ratio */
extern u8   kvm_tsc_scaling_ratio_frac_bits;
/* maximum allowed value of TSC scaling ratio */
extern u64  kvm_max_tsc_scaling_ratio;
1335 1336
/* 1ull << kvm_tsc_scaling_ratio_frac_bits */
extern u64  kvm_default_tsc_scaling_ratio;
1337

1338
extern u64 kvm_mce_cap_supported;
1339

1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
/*
 * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
 *			userspace I/O) to indicate that the emulation context
 *			should be resued as is, i.e. skip initialization of
 *			emulation context, instruction fetch and decode.
 *
 * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
 *		      Indicates that only select instructions (tagged with
 *		      EmulateOnUD) should be emulated (to minimize the emulator
 *		      attack surface).  See also EMULTYPE_TRAP_UD_FORCED.
 *
 * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
 *		   decode the instruction length.  For use *only* by
 *		   kvm_x86_ops->skip_emulated_instruction() implementations.
 *
 * EMULTYPE_ALLOW_RETRY - Set when the emulator should resume the guest to
 *			  retry native execution under certain conditions.
 *
 * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
 *			     triggered by KVM's magic "force emulation" prefix,
 *			     which is opt in via module param (off by default).
 *			     Bypasses EmulateOnUD restriction despite emulating
 *			     due to an intercepted #UD (see EMULTYPE_TRAP_UD).
 *			     Used to test the full emulator from userspace.
 *
 * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
 *			backdoor emulation, which is opt in via module param.
 *			VMware backoor emulation handles select instructions
 *			and reinjects the #GP for all other cases.
 */
1370 1371
#define EMULTYPE_NO_DECODE	    (1 << 0)
#define EMULTYPE_TRAP_UD	    (1 << 1)
1372
#define EMULTYPE_SKIP		    (1 << 2)
1373
#define EMULTYPE_ALLOW_RETRY	    (1 << 3)
1374
#define EMULTYPE_TRAP_UD_FORCED	    (1 << 4)
1375
#define EMULTYPE_VMWARE_GP	    (1 << 5)
1376 1377 1378
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
					void *insn, int insn_len);
1379

1380
void kvm_enable_efer_bits(u64);
1381
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
1382
int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
1383 1384
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
1385 1386
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
1387 1388 1389

struct x86_emulate_ctxt;

1390
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
1391
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
1392
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
1393
int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1394
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
1395

1396
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
1397
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
1398
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
1399

1400 1401
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
		    int reason, bool has_error_code, u32 error_code);
1402

1403
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
1404
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
1405
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
A
Andre Przywara 已提交
1406
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
1407 1408
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
1409 1410
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
1411
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
1412
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
1413

1414
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1415
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1416

1417 1418
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
A
Avi Kivity 已提交
1419
bool kvm_rdpmc(struct kvm_vcpu *vcpu);
1420

1421 1422
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1423 1424
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1425
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
1426 1427 1428
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			    gfn_t gfn, void *data, int offset, int len,
			    u32 access);
1429
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
1430
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
1431

1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443
static inline int __kvm_irq_line_state(unsigned long *irq_state,
				       int irq_source_id, int level)
{
	/* Logical OR for level trig interrupt */
	if (level)
		__set_bit(irq_source_id, irq_state);
	else
		__clear_bit(irq_source_id, irq_state);

	return !!(*irq_state);
}

1444 1445 1446
#define KVM_MMU_ROOT_CURRENT		BIT(0)
#define KVM_MMU_ROOT_PREVIOUS(i)	BIT(1+i)
#define KVM_MMU_ROOTS_ALL		(~0UL)
1447

1448 1449
int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
1450

1451 1452
void kvm_inject_nmi(struct kvm_vcpu *vcpu);

1453
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1454 1455 1456 1457
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
1458
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
1459 1460
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			ulong roots_to_free);
1461 1462
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
			   struct x86_exception *exception);
1463 1464 1465 1466 1467 1468 1469 1470
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
			      struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
				struct x86_exception *exception);
1471

1472 1473
void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);

1474 1475
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);

1476
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
1477
		       void *insn, int insn_len);
M
Marcelo Tosatti 已提交
1478
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
1479
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
1480
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
1481

1482
void kvm_enable_tdp(void);
1483
void kvm_disable_tdp(void);
1484

1485 1486
static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
				  struct x86_exception *exception)
1487 1488 1489 1490
{
	return gpa;
}

1491 1492 1493 1494 1495 1496 1497
static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
{
	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);

	return (struct kvm_mmu_page *)page_private(page);
}

1498
static inline u16 kvm_read_ldt(void)
1499 1500 1501 1502 1503 1504
{
	u16 ldt;
	asm("sldt %0" : "=g"(ldt));
	return ldt;
}

1505
static inline void kvm_load_ldt(u16 sel)
1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
{
	asm("lldt %0" : : "rm"(sel));
}

#ifdef CONFIG_X86_64
static inline unsigned long read_msr(unsigned long msr)
{
	u64 value;

	rdmsrl(msr, value);
	return value;
}
#endif

static inline u32 get_rdx_init_val(void)
{
	return 0x600; /* P6 family */
}

1525 1526 1527 1528 1529
static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
{
	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
}

1530 1531 1532 1533
#define TSS_IOPB_BASE_OFFSET 0x66
#define TSS_BASE_SIZE 0x68
#define TSS_IOPB_SIZE (65536 / 8)
#define TSS_REDIRECTION_SIZE (256 / 8)
1534 1535
#define RMODE_TSS_SIZE							\
	(TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
1536

1537 1538 1539 1540 1541 1542 1543
enum {
	TASK_SWITCH_CALL = 0,
	TASK_SWITCH_IRET = 1,
	TASK_SWITCH_JMP = 2,
	TASK_SWITCH_GATE = 3,
};

1544
#define HF_GIF_MASK		(1 << 0)
A
Alexander Graf 已提交
1545 1546
#define HF_HIF_MASK		(1 << 1)
#define HF_VINTR_MASK		(1 << 2)
1547
#define HF_NMI_MASK		(1 << 3)
1548
#define HF_IRET_MASK		(1 << 4)
1549
#define HF_GUEST_MASK		(1 << 5) /* VCPU is in guest-mode */
1550 1551
#define HF_SMM_MASK		(1 << 6)
#define HF_SMM_INSIDE_NMI_MASK	(1 << 7)
1552

1553 1554 1555 1556 1557
#define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
#define KVM_ADDRESS_SPACE_NUM 2

#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
1558

1559
asmlinkage void kvm_spurious_fault(void);
1560

1561 1562 1563
/*
 * Hardware virtualization extension instructions may fault if a
 * reboot turns off virtualization while processes are running.
1564 1565
 * Usually after catching the fault we just panic; during reboot
 * instead the instruction is ignored.
1566
 */
1567
#define __kvm_handle_fault_on_reboot(insn)				\
1568 1569 1570 1571 1572 1573
	"666: \n\t"							\
	insn "\n\t"							\
	"jmp	668f \n\t"						\
	"667: \n\t"							\
	"call	kvm_spurious_fault \n\t"				\
	"668: \n\t"							\
1574
	_ASM_EXTABLE(666b, 667b)
1575

1576
#define KVM_ARCH_WANT_MMU_NOTIFIER
1577
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
A
Andres Lagar-Cavilla 已提交
1578
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
A
Andrea Arcangeli 已提交
1579
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
1580
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
1581
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
1582 1583
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
1584
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1585
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
1586
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
1587

1588
int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
1589
		    unsigned long ipi_bitmap_high, u32 min,
1590 1591
		    unsigned long icr, int op_64_bit);

A
Avi Kivity 已提交
1592
void kvm_define_shared_msr(unsigned index, u32 msr);
1593
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
A
Avi Kivity 已提交
1594

1595
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
1596
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
1597

1598
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
J
Jan Kiszka 已提交
1599 1600
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);

1601 1602
void kvm_make_mclock_inprogress_request(struct kvm *kvm);
void kvm_make_scan_ioapic_request(struct kvm *kvm);
1603 1604
void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
				       unsigned long *vcpu_bitmap);
1605

1606 1607 1608 1609
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work);
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work);
G
Gleb Natapov 已提交
1610 1611
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
			       struct kvm_async_pf *work);
1612
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
1613 1614
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);

1615 1616
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1617
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
1618

1619 1620
int kvm_is_in_guest(void);

1621 1622
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1623 1624
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
1625

1626 1627 1628
bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
			     struct kvm_vcpu **dest_vcpu);

1629
void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
F
Feng Wu 已提交
1630
		     struct kvm_lapic_irq *irq);
P
Paolo Bonzini 已提交
1631

1632 1633 1634 1635 1636 1637 1638
static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
{
	/* We can only post Fixed and LowPrio IRQs */
	return (irq->delivery_mode == dest_Fixed ||
		irq->delivery_mode == dest_LowestPrio);
}

1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops->vcpu_blocking)
		kvm_x86_ops->vcpu_blocking(vcpu);
}

static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops->vcpu_unblocking)
		kvm_x86_ops->vcpu_unblocking(vcpu);
}

1651
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
1652

1653 1654 1655
static inline int kvm_cpu_get_apicid(int mps_cpu)
{
#ifdef CONFIG_X86_LOCAL_APIC
1656
	return default_cpu_present_to_apicid(mps_cpu);
1657 1658 1659 1660 1661 1662
#else
	WARN_ON_ONCE(1);
	return BAD_APICID;
#endif
}

1663 1664 1665
#define put_smstate(type, buf, offset, val)                      \
	*(type *)((buf) + (offset) - 0x7e00) = val

1666 1667 1668
#define GET_SMSTATE(type, buf, offset)		\
	(*(type *)((buf) + (offset) - 0x7e00))

H
H. Peter Anvin 已提交
1669
#endif /* _ASM_X86_KVM_HOST_H */