kvm_host.h 49.4 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3 4 5 6 7
 * Kernel-based Virtual Machine driver for Linux
 *
 * This header defines architecture specific interfaces, x86 version
 */

H
H. Peter Anvin 已提交
8 9
#ifndef _ASM_X86_KVM_HOST_H
#define _ASM_X86_KVM_HOST_H
10

11 12
#include <linux/types.h>
#include <linux/mm.h>
13
#include <linux/mmu_notifier.h>
14
#include <linux/tracepoint.h>
15
#include <linux/cpumask.h>
16
#include <linux/irq_work.h>
17
#include <linux/irq.h>
18 19 20

#include <linux/kvm.h>
#include <linux/kvm_para.h>
21
#include <linux/kvm_types.h>
22
#include <linux/perf_event.h>
23 24
#include <linux/pvclock_gtod.h>
#include <linux/clocksource.h>
F
Feng Wu 已提交
25
#include <linux/irqbypass.h>
26
#include <linux/hyperv.h>
27

28
#include <asm/apic.h>
29
#include <asm/pvclock-abi.h>
30
#include <asm/desc.h>
S
Sheng Yang 已提交
31
#include <asm/mtrr.h>
32
#include <asm/msr-index.h>
33
#include <asm/asm.h>
34
#include <asm/kvm_page_track.h>
35
#include <asm/kvm_vcpu_regs.h>
36
#include <asm/hyperv-tlfs.h>
37

38 39
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS

40
#define KVM_MAX_VCPUS 288
41
#define KVM_SOFT_MAX_VCPUS 240
42
#define KVM_MAX_VCPU_ID 1023
43
#define KVM_USER_MEM_SLOTS 509
44 45
/* memory slots that are not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 3
46
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
47

48
#define KVM_HALT_POLL_NS_DEFAULT 200000
49

50 51
#define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS

52
/* x86-specific vcpu->requests bit members */
53 54 55 56 57
#define KVM_REQ_MIGRATE_TIMER		KVM_ARCH_REQ(0)
#define KVM_REQ_REPORT_TPR_ACCESS	KVM_ARCH_REQ(1)
#define KVM_REQ_TRIPLE_FAULT		KVM_ARCH_REQ(2)
#define KVM_REQ_MMU_SYNC		KVM_ARCH_REQ(3)
#define KVM_REQ_CLOCK_UPDATE		KVM_ARCH_REQ(4)
58
#define KVM_REQ_LOAD_CR3		KVM_ARCH_REQ(5)
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
#define KVM_REQ_EVENT			KVM_ARCH_REQ(6)
#define KVM_REQ_APF_HALT		KVM_ARCH_REQ(7)
#define KVM_REQ_STEAL_UPDATE		KVM_ARCH_REQ(8)
#define KVM_REQ_NMI			KVM_ARCH_REQ(9)
#define KVM_REQ_PMU			KVM_ARCH_REQ(10)
#define KVM_REQ_PMI			KVM_ARCH_REQ(11)
#define KVM_REQ_SMI			KVM_ARCH_REQ(12)
#define KVM_REQ_MASTERCLOCK_UPDATE	KVM_ARCH_REQ(13)
#define KVM_REQ_MCLOCK_INPROGRESS \
	KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_SCAN_IOAPIC \
	KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_GLOBAL_CLOCK_UPDATE	KVM_ARCH_REQ(16)
#define KVM_REQ_APIC_PAGE_RELOAD \
	KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_HV_CRASH		KVM_ARCH_REQ(18)
#define KVM_REQ_IOAPIC_EOI_EXIT		KVM_ARCH_REQ(19)
#define KVM_REQ_HV_RESET		KVM_ARCH_REQ(20)
#define KVM_REQ_HV_EXIT			KVM_ARCH_REQ(21)
#define KVM_REQ_HV_STIMER		KVM_ARCH_REQ(22)
79
#define KVM_REQ_LOAD_EOI_EXITMAP	KVM_ARCH_REQ(23)
80
#define KVM_REQ_GET_VMCS12_PAGES	KVM_ARCH_REQ(24)
81 82
#define KVM_REQ_APICV_UPDATE \
	KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
83

84 85 86 87 88 89 90 91
#define CR0_RESERVED_BITS                                               \
	(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
			  | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
			  | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))

#define CR4_RESERVED_BITS                                               \
	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
92
			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
93
			  | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
94
			  | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
P
Paolo Bonzini 已提交
95
			  | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP))
96 97 98 99

#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)


100 101

#define INVALID_PAGE (~(hpa_t)0)
102 103
#define VALID_PAGE(x) ((x) != INVALID_PAGE)

104 105
#define UNMAPPED_GVA (~(gpa_t)0)

106
/* KVM Hugepage definitions for x86 */
107 108 109 110 111 112 113 114 115
enum {
	PT_PAGE_TABLE_LEVEL   = 1,
	PT_DIRECTORY_LEVEL    = 2,
	PT_PDPE_LEVEL         = 3,
	/* set max level to the biggest one */
	PT_MAX_HUGEPAGE_LEVEL = PT_PDPE_LEVEL,
};
#define KVM_NR_PAGE_SIZES	(PT_MAX_HUGEPAGE_LEVEL - \
				 PT_PAGE_TABLE_LEVEL + 1)
116 117
#define KVM_HPAGE_GFN_SHIFT(x)	(((x) - 1) * 9)
#define KVM_HPAGE_SHIFT(x)	(PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
118 119 120
#define KVM_HPAGE_SIZE(x)	(1UL << KVM_HPAGE_SHIFT(x))
#define KVM_HPAGE_MASK(x)	(~(KVM_HPAGE_SIZE(x) - 1))
#define KVM_PAGES_PER_HPAGE(x)	(KVM_HPAGE_SIZE(x) / PAGE_SIZE)
M
Marcelo Tosatti 已提交
121

122 123 124 125 126 127 128
static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
{
	/* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
}

129
#define KVM_PERMILLE_MMU_PAGES 20
130
#define KVM_MIN_ALLOC_MMU_PAGES 64UL
131
#define KVM_MMU_HASH_SHIFT 12
132
#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
133 134
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
135
#define KVM_MAX_CPUID_ENTRIES 80
S
Sheng Yang 已提交
136
#define KVM_NR_FIXED_MTRR_REGION 88
137
#define KVM_NR_VAR_MTRR 8
138

139 140
#define ASYNC_PF_PER_VCPU 64

141
enum kvm_reg {
142 143 144 145 146 147 148 149
	VCPU_REGS_RAX = __VCPU_REGS_RAX,
	VCPU_REGS_RCX = __VCPU_REGS_RCX,
	VCPU_REGS_RDX = __VCPU_REGS_RDX,
	VCPU_REGS_RBX = __VCPU_REGS_RBX,
	VCPU_REGS_RSP = __VCPU_REGS_RSP,
	VCPU_REGS_RBP = __VCPU_REGS_RBP,
	VCPU_REGS_RSI = __VCPU_REGS_RSI,
	VCPU_REGS_RDI = __VCPU_REGS_RDI,
150
#ifdef CONFIG_X86_64
151 152 153 154 155 156 157 158
	VCPU_REGS_R8  = __VCPU_REGS_R8,
	VCPU_REGS_R9  = __VCPU_REGS_R9,
	VCPU_REGS_R10 = __VCPU_REGS_R10,
	VCPU_REGS_R11 = __VCPU_REGS_R11,
	VCPU_REGS_R12 = __VCPU_REGS_R12,
	VCPU_REGS_R13 = __VCPU_REGS_R13,
	VCPU_REGS_R14 = __VCPU_REGS_R14,
	VCPU_REGS_R15 = __VCPU_REGS_R15,
159
#endif
160
	VCPU_REGS_RIP,
161
	NR_VCPU_REGS,
162

A
Avi Kivity 已提交
163
	VCPU_EXREG_PDPTR = NR_VCPU_REGS,
164
	VCPU_EXREG_CR3,
A
Avi Kivity 已提交
165
	VCPU_EXREG_RFLAGS,
A
Avi Kivity 已提交
166
	VCPU_EXREG_SEGMENTS,
A
Avi Kivity 已提交
167 168
};

169
enum {
170
	VCPU_SREG_ES,
171
	VCPU_SREG_CS,
172
	VCPU_SREG_SS,
173 174 175 176 177 178 179
	VCPU_SREG_DS,
	VCPU_SREG_FS,
	VCPU_SREG_GS,
	VCPU_SREG_TR,
	VCPU_SREG_LDTR,
};

180 181 182 183 184
enum exit_fastpath_completion {
	EXIT_FASTPATH_NONE,
	EXIT_FASTPATH_SKIP_EMUL_INS,
};

185
#include <asm/kvm_emulate.h>
186

187 188
#define KVM_NR_MEM_OBJS 40

189 190 191 192
#define KVM_NR_DB_REGS	4

#define DR6_BD		(1 << 13)
#define DR6_BS		(1 << 14)
193
#define DR6_BT		(1 << 15)
194 195 196 197
#define DR6_RTM		(1 << 16)
#define DR6_FIXED_1	0xfffe0ff0
#define DR6_INIT	0xffff0ff0
#define DR6_VOLATILE	0x0001e00f
198 199 200 201 202

#define DR7_BP_EN_MASK	0x000000ff
#define DR7_GE		(1 << 9)
#define DR7_GD		(1 << 13)
#define DR7_FIXED_1	0x00000400
203
#define DR7_VOLATILE	0xffff2bff
204

205 206 207 208 209
#define PFERR_PRESENT_BIT 0
#define PFERR_WRITE_BIT 1
#define PFERR_USER_BIT 2
#define PFERR_RSVD_BIT 3
#define PFERR_FETCH_BIT 4
210
#define PFERR_PK_BIT 5
211 212
#define PFERR_GUEST_FINAL_BIT 32
#define PFERR_GUEST_PAGE_BIT 33
213 214 215 216 217 218

#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
219
#define PFERR_PK_MASK (1U << PFERR_PK_BIT)
220 221 222 223 224 225
#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)

#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK |	\
				 PFERR_WRITE_MASK |		\
				 PFERR_PRESENT_MASK)
226

227 228
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC	0
229 230 231 232 233 234 235
/*
 * The following bit is set with PV-EOI, unset on EOI.
 * We detect PV-EOI changes by guest by comparing
 * this bit with PV-EOI in guest memory.
 * See the implementation in apic_update_pv_eoi.
 */
#define KVM_APIC_PV_EOI_PENDING	1
236

F
Feng Wu 已提交
237 238
struct kvm_kernel_irq_routing_entry;

239 240 241 242 243 244 245 246 247
/*
 * We don't want allocation failures within the mmu code, so we preallocate
 * enough memory for a single page fault in a cache.
 */
struct kvm_mmu_memory_cache {
	int nobjs;
	void *objects[KVM_NR_MEM_OBJS];
};

248 249 250 251 252
/*
 * the pages used as guest page table on soft mmu are tracked by
 * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
 * by indirect shadow page can not be more than 15 bits.
 *
253
 * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
254 255
 * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
 */
256
union kvm_mmu_page_role {
257
	u32 word;
258
	struct {
259
		unsigned level:4;
260
		unsigned gpte_is_8_bytes:1;
261
		unsigned quadrant:2;
262
		unsigned direct:1;
263
		unsigned access:3;
264
		unsigned invalid:1;
265
		unsigned nxe:1;
266
		unsigned cr0_wp:1;
267
		unsigned smep_andnot_wp:1;
268
		unsigned smap_andnot_wp:1;
269
		unsigned ad_disabled:1;
270 271
		unsigned guest_mode:1;
		unsigned :6;
272 273 274 275 276 277 278 279

		/*
		 * This is left at the top of the word so that
		 * kvm_memslots_for_spte_role can extract it with a
		 * simple shift.  While there is room, give it a whole
		 * byte so it is also faster to load it from memory.
		 */
		unsigned smm:8;
280 281 282
	};
};

283
union kvm_mmu_extended_role {
284 285 286 287 288 289
/*
 * This structure complements kvm_mmu_page_role caching everything needed for
 * MMU configuration. If nothing in both these structures changed, MMU
 * re-configuration can be skipped. @valid bit is set on first usage so we don't
 * treat all-zero structure as valid data.
 */
290
	u32 word;
291 292 293
	struct {
		unsigned int valid:1;
		unsigned int execonly:1;
294
		unsigned int cr0_pg:1;
295
		unsigned int cr4_pae:1;
296 297 298 299
		unsigned int cr4_pse:1;
		unsigned int cr4_pke:1;
		unsigned int cr4_smap:1;
		unsigned int cr4_smep:1;
300
		unsigned int cr4_la57:1;
301
		unsigned int maxphyaddr:6;
302
	};
303 304 305 306 307 308 309 310 311 312
};

union kvm_mmu_role {
	u64 as_u64;
	struct {
		union kvm_mmu_page_role base;
		union kvm_mmu_extended_role ext;
	};
};

313 314 315 316
struct kvm_rmap_head {
	unsigned long val;
};

317 318 319
struct kvm_mmu_page {
	struct list_head link;
	struct hlist_node hash_link;
320 321
	struct list_head lpage_disallowed_link;

322
	bool unsync;
323
	u8 mmu_valid_gen;
324
	bool mmio_cached;
P
Paolo Bonzini 已提交
325
	bool lpage_disallowed; /* Can't be replaced by an equiv large page */
326 327 328 329 330 331

	/*
	 * The following two entries are used to key the shadow page in the
	 * hash table.
	 */
	union kvm_mmu_page_role role;
332
	gfn_t gfn;
333 334 335 336

	u64 *spt;
	/* hold the gfn of each spte inside spt */
	gfn_t *gfns;
337
	int root_count;          /* Currently serving as active root */
338
	unsigned int unsync_children;
339
	struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
340
	DECLARE_BITMAP(unsync_child_bitmap, 512);
341 342

#ifdef CONFIG_X86_32
343 344 345 346
	/*
	 * Used out of the mmu-lock to avoid reading spte values while an
	 * update is in progress; see the comments in __get_spte_lockless().
	 */
347 348 349
	int clear_spte_count;
#endif

350
	/* Number of writes since the last time traversal visited this page.  */
351
	atomic_t write_flooding_count;
352 353
};

354
struct kvm_pio_request {
355
	unsigned long linear_rip;
356 357 358 359 360 361
	unsigned long count;
	int in;
	int port;
	int size;
};

362
#define PT64_ROOT_MAX_LEVEL 5
363

364
struct rsvd_bits_validate {
365
	u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL];
366 367 368
	u64 bad_mt_xwr;
};

369 370 371 372 373 374 375 376
struct kvm_mmu_root_info {
	gpa_t cr3;
	hpa_t hpa;
};

#define KVM_MMU_ROOT_INFO_INVALID \
	((struct kvm_mmu_root_info) { .cr3 = INVALID_PAGE, .hpa = INVALID_PAGE })

377 378
#define KVM_MMU_NUM_PREV_ROOTS 3

379
/*
380 381 382
 * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
 * and 2-level 32-bit).  The kvm_mmu structure abstracts the details of the
 * current mmu mode.
383 384
 */
struct kvm_mmu {
385
	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
386
	unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
387
	u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
388
	int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err,
389
			  bool prefault);
390 391
	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
				  struct x86_exception *fault);
392 393
	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa,
			    u32 access, struct x86_exception *exception);
394 395
	gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
			       struct x86_exception *exception);
396
	int (*sync_page)(struct kvm_vcpu *vcpu,
397
			 struct kvm_mmu_page *sp);
398
	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
399
	void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
400
			   u64 *spte, const void *pte);
401
	hpa_t root_hpa;
402
	gpa_t root_cr3;
403
	union kvm_mmu_role mmu_role;
404 405 406
	u8 root_level;
	u8 shadow_root_level;
	u8 ept_ad;
407
	bool direct_map;
408
	struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
409

410 411 412 413 414 415 416
	/*
	 * Bitmap; bit set = permission fault
	 * Byte index: page fault error code [4:1]
	 * Bit index: pte permissions in ACC_* format
	 */
	u8 permissions[16];

417 418 419 420 421 422 423 424
	/*
	* The pkru_mask indicates if protection key checks are needed.  It
	* consists of 16 domains indexed by page fault error code bits [4:1],
	* with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
	* Each domain has 2 bits which are ANDed with AD and WD from PKRU.
	*/
	u32 pkru_mask;

425
	u64 *pae_root;
426
	u64 *lm_root;
427 428 429 430 431 432 433 434

	/*
	 * check zero bits on shadow page table entries, these
	 * bits include not only hardware reserved bits but also
	 * the bits spte never used.
	 */
	struct rsvd_bits_validate shadow_zero_check;

435
	struct rsvd_bits_validate guest_rsvd_check;
436

437 438
	/* Can have large pages at levels 2..last_nonleaf_level-1. */
	u8 last_nonleaf_level;
A
Avi Kivity 已提交
439

440 441
	bool nx;

442
	u64 pdptrs[4]; /* pae */
443 444
};

445 446 447 448 449
struct kvm_tlb_range {
	u64 start_gfn;
	u64 pages;
};

450 451 452 453 454 455 456 457 458 459 460 461
enum pmc_type {
	KVM_PMC_GP = 0,
	KVM_PMC_FIXED,
};

struct kvm_pmc {
	enum pmc_type type;
	u8 idx;
	u64 counter;
	u64 eventsel;
	struct perf_event *perf_event;
	struct kvm_vcpu *vcpu;
462 463 464 465 466
	/*
	 * eventsel value for general purpose counters,
	 * ctrl value for fixed counters.
	 */
	u64 current_config;
467 468 469 470 471 472 473 474 475 476 477 478
};

struct kvm_pmu {
	unsigned nr_arch_gp_counters;
	unsigned nr_arch_fixed_counters;
	unsigned available_event_types;
	u64 fixed_ctr_ctrl;
	u64 global_ctrl;
	u64 global_status;
	u64 global_ovf_ctrl;
	u64 counter_bitmask[2];
	u64 global_ctrl_mask;
479
	u64 global_ovf_ctrl_mask;
480
	u64 reserved_bits;
481
	u8 version;
482 483
	struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
	struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
484
	struct irq_work irq_work;
485
	DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);
486 487 488 489 490 491 492 493 494 495 496 497 498 499
	DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX);
	DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX);

	/*
	 * The gate to release perf_events not marked in
	 * pmc_in_use only once in a vcpu time slice.
	 */
	bool need_cleanup;

	/*
	 * The total number of programmed perf_events and it helps to avoid
	 * redundant check before cleanup if guest don't use vPMU at all.
	 */
	u8 event_count;
500 501
};

502 503
struct kvm_pmu_ops;

504 505
enum {
	KVM_DEBUGREG_BP_ENABLED = 1,
506
	KVM_DEBUGREG_WONT_EXIT = 2,
507
	KVM_DEBUGREG_RELOAD = 4,
508 509
};

510 511 512
struct kvm_mtrr_range {
	u64 base;
	u64 mask;
X
Xiao Guangrong 已提交
513
	struct list_head node;
514 515
};

516
struct kvm_mtrr {
517
	struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
518
	mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
519
	u64 deftype;
X
Xiao Guangrong 已提交
520 521

	struct list_head head;
522 523
};

A
Andrey Smetanin 已提交
524 525 526 527
/* Hyper-V SynIC timer */
struct kvm_vcpu_hv_stimer {
	struct hrtimer timer;
	int index;
528
	union hv_stimer_config config;
A
Andrey Smetanin 已提交
529 530 531 532 533 534
	u64 count;
	u64 exp_time;
	struct hv_message msg;
	bool msg_pending;
};

535 536 537 538 539 540 541 542 543 544 545
/* Hyper-V synthetic interrupt controller (SynIC)*/
struct kvm_vcpu_hv_synic {
	u64 version;
	u64 control;
	u64 msg_page;
	u64 evt_page;
	atomic64_t sint[HV_SYNIC_SINT_COUNT];
	atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
	DECLARE_BITMAP(auto_eoi_bitmap, 256);
	DECLARE_BITMAP(vec_bitmap, 256);
	bool active;
546
	bool dont_zero_synic_pages;
547 548
};

549 550
/* Hyper-V per vcpu emulation context */
struct kvm_vcpu_hv {
551
	u32 vp_index;
552
	u64 hv_vapic;
553
	s64 runtime_offset;
554
	struct kvm_vcpu_hv_synic synic;
A
Andrey Smetanin 已提交
555
	struct kvm_hyperv_exit exit;
A
Andrey Smetanin 已提交
556 557
	struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
	DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
558
	cpumask_t tlb_flush;
559 560
};

561
struct kvm_vcpu_arch {
562 563 564 565 566 567 568
	/*
	 * rip and regs accesses must go through
	 * kvm_{register,rip}_{read,write} functions.
	 */
	unsigned long regs[NR_VCPU_REGS];
	u32 regs_avail;
	u32 regs_dirty;
569 570

	unsigned long cr0;
571
	unsigned long cr0_guest_owned_bits;
572 573 574
	unsigned long cr2;
	unsigned long cr3;
	unsigned long cr4;
575
	unsigned long cr4_guest_owned_bits;
576
	unsigned long cr8;
577
	u32 pkru;
578
	u32 hflags;
579
	u64 efer;
580 581
	u64 apic_base;
	struct kvm_lapic *apic;    /* kernel irqchip context */
582
	bool apicv_active;
583
	bool load_eoi_exitmap_pending;
584
	DECLARE_BITMAP(ioapic_handled_vectors, 256);
585
	unsigned long apic_attention;
586
	int32_t apic_arb_prio;
587 588
	int mp_state;
	u64 ia32_misc_enable_msr;
P
Paolo Bonzini 已提交
589
	u64 smbase;
590
	u64 smi_count;
591
	bool tpr_access_reporting;
592
	bool xsaves_enabled;
W
Wanpeng Li 已提交
593
	u64 ia32_xss;
594
	u64 microcode_version;
595
	u64 arch_capabilities;
596

597 598 599 600 601 602 603
	/*
	 * Paging state of the vcpu
	 *
	 * If the vcpu runs in guest mode with two level paging this still saves
	 * the paging mode of the l1 guest. This context is always used to
	 * handle faults.
	 */
604 605 606 607
	struct kvm_mmu *mmu;

	/* Non-nested MMU for L1 */
	struct kvm_mmu root_mmu;
608

609 610 611
	/* L1 MMU when running nested */
	struct kvm_mmu guest_mmu;

612 613 614 615
	/*
	 * Paging state of an L2 guest (used for nested npt)
	 *
	 * This context will save all necessary information to walk page tables
M
Miaohe Lin 已提交
616
	 * of an L2 guest. This context is only initialized for page table
617 618 619 620 621
	 * walking and not for faulting since we never handle l2 page faults on
	 * the host.
	 */
	struct kvm_mmu nested_mmu;

622 623 624 625 626 627
	/*
	 * Pointer to the mmu context currently used for
	 * gva_to_gpa translations.
	 */
	struct kvm_mmu *walk_mmu;

628
	struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
629 630 631
	struct kvm_mmu_memory_cache mmu_page_cache;
	struct kvm_mmu_memory_cache mmu_page_header_cache;

632 633
	/*
	 * QEMU userspace and the guest each have their own FPU state.
634 635 636
	 * In vcpu_run, we switch between the user and guest FPU contexts.
	 * While running a VCPU, the VCPU thread will have the guest FPU
	 * context.
637 638 639 640 641 642
	 *
	 * Note that while the PKRU state lives inside the fpu registers,
	 * it is switched out separately at VMENTER and VMEXIT time. The
	 * "guest_fpu" state here contains the guest FPU context, with the
	 * host PRKU bits.
	 */
643
	struct fpu *user_fpu;
644
	struct fpu *guest_fpu;
645

646
	u64 xcr0;
647
	u64 guest_supported_xcr0;
648
	u32 guest_xstate_size;
649 650 651 652

	struct kvm_pio_request pio;
	void *pio_data;

653 654
	u8 event_exit_inst_len;

655 656
	struct kvm_queued_exception {
		bool pending;
657
		bool injected;
658 659 660
		bool has_error_code;
		u8 nr;
		u32 error_code;
661 662
		unsigned long payload;
		bool has_payload;
663
		u8 nested_apf;
664 665
	} exception;

A
Avi Kivity 已提交
666
	struct kvm_queued_interrupt {
667
		bool injected;
668
		bool soft;
A
Avi Kivity 已提交
669 670 671
		u8 nr;
	} interrupt;

672 673 674
	int halt_request; /* real mode on Intel only */

	int cpuid_nent;
675
	struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
676 677 678

	int maxphyaddr;

679 680 681
	/* emulate context */

	struct x86_emulate_ctxt emulate_ctxt;
682 683
	bool emulate_regs_need_sync_to_vcpu;
	bool emulate_regs_need_sync_from_vcpu;
684
	int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
685 686

	gpa_t time;
687
	struct pvclock_vcpu_time_info hv_clock;
Z
Zachary Amsden 已提交
688
	unsigned int hw_tsc_khz;
689 690
	struct gfn_to_hva_cache pv_time;
	bool pv_time_enabled;
691 692
	/* set guest stopped flag in pvclock flags field */
	bool pvclock_set_guest_stopped_request;
G
Glauber Costa 已提交
693 694

	struct {
695
		u8 preempted;
G
Glauber Costa 已提交
696 697
		u64 msr_val;
		u64 last_steal;
698
		struct gfn_to_pfn_cache cache;
G
Glauber Costa 已提交
699 700
	} st;

701
	u64 tsc_offset;
702
	u64 last_guest_tsc;
703
	u64 last_host_tsc;
704
	u64 tsc_offset_adjustment;
705 706
	u64 this_tsc_nsec;
	u64 this_tsc_write;
T
Tomasz Grabiec 已提交
707
	u64 this_tsc_generation;
Z
Zachary Amsden 已提交
708
	bool tsc_catchup;
709 710 711 712
	bool tsc_always_catchup;
	s8 virtual_tsc_shift;
	u32 virtual_tsc_mult;
	u32 virtual_tsc_khz;
W
Will Auld 已提交
713
	s64 ia32_tsc_adjust_msr;
714
	u64 msr_ia32_power_ctl;
715
	u64 tsc_scaling_ratio;
716

A
Avi Kivity 已提交
717 718 719
	atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
	unsigned nmi_pending; /* NMI queued after currently running handler */
	bool nmi_injected;    /* Trying to inject an NMI this entry */
720
	bool smi_pending;    /* SMI queued after currently running handler */
A
Avi Kivity 已提交
721

722
	struct kvm_mtrr mtrr_state;
723
	u64 pat;
724

725
	unsigned switch_db_regs;
726 727 728 729
	unsigned long db[KVM_NR_DB_REGS];
	unsigned long dr6;
	unsigned long dr7;
	unsigned long eff_db[KVM_NR_DB_REGS];
730
	unsigned long guest_debug_dr7;
K
Kyle Huey 已提交
731 732
	u64 msr_platform_info;
	u64 msr_misc_features_enables;
H
Huang Ying 已提交
733 734 735 736

	u64 mcg_cap;
	u64 mcg_status;
	u64 mcg_ctl;
737
	u64 mcg_ext_ctl;
H
Huang Ying 已提交
738
	u64 *mce_banks;
739

740 741
	/* Cache MMIO info */
	u64 mmio_gva;
742
	unsigned mmio_access;
743
	gfn_t mmio_gfn;
744
	u64 mmio_gen;
745

746 747
	struct kvm_pmu pmu;

748 749
	/* used for guest single stepping over the given code position */
	unsigned long singlestep_rip;
J
Jan Kiszka 已提交
750

751
	struct kvm_vcpu_hv hyperv;
752 753

	cpumask_var_t wbinvd_dirty_mask;
754

755 756 757
	unsigned long last_retry_eip;
	unsigned long last_retry_addr;

758 759 760
	struct {
		bool halted;
		gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
761 762
		struct gfn_to_hva_cache data;
		u64 msr_val;
763
		u32 id;
764
		bool send_user_only;
765
		u32 host_apf_reason;
766
		unsigned long nested_apf_token;
767
		bool delivery_as_pf_vmexit;
768
	} apf;
769 770 771 772 773 774

	/* OSVW MSRs (AMD only) */
	struct {
		u64 length;
		u64 status;
	} osvw;
775 776 777 778 779

	struct {
		u64 msr_val;
		struct gfn_to_hva_cache data;
	} pv_eoi;
780

781 782
	u64 msr_kvm_poll_control;

783 784 785 786 787 788
	/*
	 * Indicate whether the access faults on its page table in guest
	 * which is set when fix page fault and used to detect unhandeable
	 * instruction.
	 */
	bool write_fault_to_shadow_pgtable;
789 790 791

	/* set at EPT violation at this point */
	unsigned long exit_qualification;
792 793 794 795 796

	/* pv related host specific info */
	struct {
		bool pv_unhalted;
	} pv;
797 798

	int pending_ioapic_eoi;
799
	int pending_external_vector;
800

801
	/* GPA available */
802
	bool gpa_available;
803
	gpa_t gpa_val;
804 805 806

	/* be preempted when it's in kernel-mode(cpl=0) */
	bool preempted_in_kernel;
P
Paolo Bonzini 已提交
807 808 809

	/* Flush the L1 Data cache for L1TF mitigation on VMENTER */
	bool l1tf_flush_l1d;
810 811 812

	/* AMD MSRC001_0015 Hardware Configuration */
	u64 msr_hwcr;
813 814
};

815
struct kvm_lpage_info {
816
	int disallow_lpage;
817 818 819
};

struct kvm_arch_memory_slot {
820
	struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
821
	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
822
	unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
823 824
};

825 826 827 828 829 830 831 832 833 834 835
/*
 * We use as the mode the number of bits allocated in the LDR for the
 * logical processor ID.  It happens that these are all powers of two.
 * This makes it is very easy to detect cases where the APICs are
 * configured for multiple modes; in that case, we cannot use the map and
 * hence cannot use kvm_irq_delivery_to_apic_fast either.
 */
#define KVM_APIC_MODE_XAPIC_CLUSTER          4
#define KVM_APIC_MODE_XAPIC_FLAT             8
#define KVM_APIC_MODE_X2APIC                16

836 837
struct kvm_apic_map {
	struct rcu_head rcu;
838
	u8 mode;
R
Radim Krčmář 已提交
839
	u32 max_apic_id;
840 841 842 843
	union {
		struct kvm_lapic *xapic_flat_map[8];
		struct kvm_lapic *xapic_cluster_map[16][4];
	};
R
Radim Krčmář 已提交
844
	struct kvm_lapic *phys_map[];
845 846
};

847 848
/* Hyper-V emulation context */
struct kvm_hv {
849
	struct mutex hv_lock;
850 851 852
	u64 hv_guest_os_id;
	u64 hv_hypercall;
	u64 hv_tsc_page;
853 854 855 856

	/* Hyper-v based guest crash (NT kernel bugcheck) parameters */
	u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
	u64 hv_crash_ctl;
P
Paolo Bonzini 已提交
857 858

	HV_REFERENCE_TSC_PAGE tsc_ref;
859 860

	struct idr conn_to_evt;
861 862 863 864

	u64 hv_reenlightenment_control;
	u64 hv_tsc_emulation_control;
	u64 hv_tsc_emulation_status;
865 866 867

	/* How many vCPUs have VP index != vCPU index */
	atomic_t num_mismatched_vp_indexes;
868 869

	struct hv_partition_assist_pg *hv_pa_pg;
870 871
};

872 873 874 875 876 877
enum kvm_irqchip_mode {
	KVM_IRQCHIP_NONE,
	KVM_IRQCHIP_KERNEL,       /* created with KVM_CREATE_IRQCHIP */
	KVM_IRQCHIP_SPLIT,        /* created with KVM_CAP_SPLIT_IRQCHIP */
};

878
#define APICV_INHIBIT_REASON_DISABLE    0
879
#define APICV_INHIBIT_REASON_HYPERV     1
880
#define APICV_INHIBIT_REASON_NESTED     2
881
#define APICV_INHIBIT_REASON_IRQWIN     3
882
#define APICV_INHIBIT_REASON_PIT_REINJ  4
883

884
struct kvm_arch {
885 886 887
	unsigned long n_used_mmu_pages;
	unsigned long n_requested_mmu_pages;
	unsigned long n_max_mmu_pages;
888
	unsigned int indirect_shadow_pages;
889
	u8 mmu_valid_gen;
890 891 892 893 894
	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
	/*
	 * Hash table of struct kvm_mmu_page.
	 */
	struct list_head active_mmu_pages;
895
	struct list_head zapped_obsolete_pages;
896
	struct list_head lpage_disallowed_mmu_pages;
897
	struct kvm_page_track_notifier_node mmu_sp_tracker;
898
	struct kvm_page_track_notifier_head track_notifier_head;
899

B
Ben-Ami Yassour 已提交
900
	struct list_head assigned_dev_head;
J
Joerg Roedel 已提交
901
	struct iommu_domain *iommu_domain;
902
	bool iommu_noncoherent;
903 904
#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
	atomic_t noncoherent_dma_count;
905 906
#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
	atomic_t assigned_device_count;
907 908
	struct kvm_pic *vpic;
	struct kvm_ioapic *vioapic;
S
Sheng Yang 已提交
909
	struct kvm_pit *vpit;
910
	atomic_t vapics_in_nmi_mode;
911 912
	struct mutex apic_map_lock;
	struct kvm_apic_map *apic_map;
913

914
	bool apic_access_page_done;
915
	unsigned long apicv_inhibit_reasons;
916 917

	gpa_t wall_clock;
918

919
	bool mwait_in_guest;
920
	bool hlt_in_guest;
921
	bool pause_in_guest;
922
	bool cstate_in_guest;
923

924
	unsigned long irq_sources_bitmap;
925
	s64 kvmclock_offset;
926
	raw_spinlock_t tsc_write_lock;
Z
Zachary Amsden 已提交
927 928
	u64 last_tsc_nsec;
	u64 last_tsc_write;
929
	u32 last_tsc_khz;
930 931 932
	u64 cur_tsc_nsec;
	u64 cur_tsc_write;
	u64 cur_tsc_offset;
T
Tomasz Grabiec 已提交
933
	u64 cur_tsc_generation;
934
	int nr_vcpus_matched_tsc;
E
Ed Swierk 已提交
935

936 937 938
	spinlock_t pvclock_gtod_sync_lock;
	bool use_master_clock;
	u64 master_kernel_ns;
939
	u64 master_cycle_now;
940
	struct delayed_work kvmclock_update_work;
941
	struct delayed_work kvmclock_sync_work;
942

E
Ed Swierk 已提交
943
	struct kvm_xen_hvm_config xen_hvm_config;
944

945 946 947
	/* reads protected by irq_srcu, writes by irq_lock */
	struct hlist_head mask_notifier_list;

948
	struct kvm_hv hyperv;
949 950 951 952

	#ifdef CONFIG_KVM_MMU_AUDIT
	int audit_point;
	#endif
953

954
	bool backwards_tsc_observed;
955
	bool boot_vcpu_runs_old_kvmclock;
956
	u32 bsp_vcpu_id;
957 958

	u64 disabled_quirks;
959

960
	enum kvm_irqchip_mode irqchip_mode;
961
	u8 nr_reserved_ioapic_pins;
962 963

	bool disabled_lapic_found;
964

965
	bool x2apic_format;
966
	bool x2apic_broadcast_quirk_disabled;
967 968

	bool guest_can_read_msr_platform_info;
969
	bool exception_payload_enabled;
E
Eric Hankland 已提交
970 971

	struct kvm_pmu_event_filter *pmu_event_filter;
972
	struct task_struct *nx_lpage_recovery_thread;
973 974
};

975
struct kvm_vm_stat {
976 977 978 979 980 981 982 983 984 985
	ulong mmu_shadow_zapped;
	ulong mmu_pte_write;
	ulong mmu_pte_updated;
	ulong mmu_pde_zapped;
	ulong mmu_flooded;
	ulong mmu_recycled;
	ulong mmu_cache_miss;
	ulong mmu_unsync;
	ulong remote_tlb_flush;
	ulong lpages;
P
Paolo Bonzini 已提交
986
	ulong nx_lpage_splits;
987
	ulong max_mmu_page_hash_collisions;
988 989
};

990
struct kvm_vcpu_stat {
991 992 993 994 995 996 997 998 999 1000 1001
	u64 pf_fixed;
	u64 pf_guest;
	u64 tlb_flush;
	u64 invlpg;

	u64 exits;
	u64 io_exits;
	u64 mmio_exits;
	u64 signal_exits;
	u64 irq_window_exits;
	u64 nmi_window_exits;
P
Paolo Bonzini 已提交
1002
	u64 l1d_flush;
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
	u64 halt_exits;
	u64 halt_successful_poll;
	u64 halt_attempted_poll;
	u64 halt_poll_invalid;
	u64 halt_wakeup;
	u64 request_irq_exits;
	u64 irq_exits;
	u64 host_state_reload;
	u64 fpu_reload;
	u64 insn_emulation;
	u64 insn_emulation_fail;
	u64 hypercalls;
	u64 irq_injections;
	u64 nmi_injections;
1017
	u64 req_event;
1018
};
1019

1020 1021
struct x86_instruction_info;

1022 1023 1024 1025 1026 1027
struct msr_data {
	bool host_initiated;
	u32 index;
	u64 data;
};

P
Paolo Bonzini 已提交
1028 1029
struct kvm_lapic_irq {
	u32 vector;
1030 1031 1032 1033
	u16 delivery_mode;
	u16 dest_mode;
	bool level;
	u16 trig_mode;
P
Paolo Bonzini 已提交
1034 1035
	u32 shorthand;
	u32 dest_id;
1036
	bool msi_redir_hint;
P
Paolo Bonzini 已提交
1037 1038
};

1039 1040 1041 1042 1043
static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
{
	return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
}

1044 1045 1046
struct kvm_x86_ops {
	int (*cpu_has_kvm_support)(void);          /* __init */
	int (*disabled_by_bios)(void);             /* __init */
1047 1048
	int (*hardware_enable)(void);
	void (*hardware_disable)(void);
1049
	int (*check_processor_compatibility)(void);/* __init */
1050 1051
	int (*hardware_setup)(void);               /* __init */
	void (*hardware_unsetup)(void);            /* __exit */
1052
	bool (*cpu_has_accelerated_tpr)(void);
1053
	bool (*has_emulated_msr)(int index);
1054
	void (*cpuid_update)(struct kvm_vcpu *vcpu);
1055

1056 1057
	struct kvm *(*vm_alloc)(void);
	void (*vm_free)(struct kvm *);
1058 1059 1060
	int (*vm_init)(struct kvm *kvm);
	void (*vm_destroy)(struct kvm *kvm);

1061
	/* Create, but do not attach this VCPU */
1062
	int (*vcpu_create)(struct kvm_vcpu *vcpu);
1063
	void (*vcpu_free)(struct kvm_vcpu *vcpu);
1064
	void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
1065 1066 1067 1068 1069

	void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
	void (*vcpu_put)(struct kvm_vcpu *vcpu);

1070
	void (*update_bp_intercept)(struct kvm_vcpu *vcpu);
1071
	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1072
	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1073 1074 1075
	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
	void (*get_segment)(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg);
1076
	int (*get_cpl)(struct kvm_vcpu *vcpu);
1077 1078 1079
	void (*set_segment)(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg);
	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
1080
	void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
1081 1082 1083
	void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
1084
	int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1085
	void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
1086 1087 1088 1089
	void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
J
Jan Kiszka 已提交
1090 1091
	u64 (*get_dr6)(struct kvm_vcpu *vcpu);
	void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
1092
	void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
1093
	void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
1094
	void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
1095 1096 1097
	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);

1098
	void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
1099
	int  (*tlb_remote_flush)(struct kvm *kvm);
1100 1101
	int  (*tlb_remote_flush_with_range)(struct kvm *kvm,
			struct kvm_tlb_range *range);
1102

1103 1104 1105 1106 1107 1108 1109
	/*
	 * Flush any TLB entries associated with the given GVA.
	 * Does not need to flush GPA->HPA mappings.
	 * Can potentially get non-canonical addresses through INVLPGs, which
	 * the implementation may choose to ignore if appropriate.
	 */
	void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
1110

A
Avi Kivity 已提交
1111
	void (*run)(struct kvm_vcpu *vcpu);
1112 1113
	int (*handle_exit)(struct kvm_vcpu *vcpu,
		enum exit_fastpath_completion exit_fastpath);
1114
	int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
1115
	void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
1116
	u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
1117 1118
	void (*patch_hypercall)(struct kvm_vcpu *vcpu,
				unsigned char *hypercall_addr);
1119
	void (*set_irq)(struct kvm_vcpu *vcpu);
1120
	void (*set_nmi)(struct kvm_vcpu *vcpu);
1121
	void (*queue_exception)(struct kvm_vcpu *vcpu);
A
Avi Kivity 已提交
1122
	void (*cancel_injection)(struct kvm_vcpu *vcpu);
1123
	int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
1124
	int (*nmi_allowed)(struct kvm_vcpu *vcpu);
J
Jan Kiszka 已提交
1125 1126
	bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
	void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
1127 1128
	void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
	void (*enable_irq_window)(struct kvm_vcpu *vcpu);
1129
	void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
1130
	bool (*check_apicv_inhibit_reasons)(ulong bit);
1131
	void (*pre_update_apicv_exec_ctrl)(struct kvm *kvm, bool activate);
1132
	void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
1133
	void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
1134
	void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
1135
	bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
1136
	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
1137
	void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
1138
	void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
1139
	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
1140
	int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
1141
	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
1142
	int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
1143
	int (*get_tdp_level)(struct kvm_vcpu *vcpu);
1144
	u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1145
	int (*get_lpage_level)(void);
1146
	bool (*rdtscp_supported)(void);
1147
	bool (*invpcid_supported)(void);
1148

1149 1150
	void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);

1151 1152
	void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);

1153 1154
	bool (*has_wbinvd_exit)(void);

1155
	u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
1156 1157
	/* Returns actual tsc_offset set in active VMCS */
	u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
1158

1159
	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
1160 1161 1162 1163

	int (*check_intercept)(struct kvm_vcpu *vcpu,
			       struct x86_instruction_info *info,
			       enum x86_intercept_stage stage);
1164 1165
	void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu,
		enum exit_fastpath_completion *exit_fastpath);
1166
	bool (*mpx_supported)(void);
1167
	bool (*xsaves_supported)(void);
1168
	bool (*umip_emulated)(void);
1169
	bool (*pt_supported)(void);
1170
	bool (*pku_supported)(void);
1171 1172

	int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
1173
	void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
1174 1175

	void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200

	/*
	 * Arch-specific dirty logging hooks. These hooks are only supposed to
	 * be valid if the specific arch has hardware-accelerated dirty logging
	 * mechanism. Currently only for PML on VMX.
	 *
	 *  - slot_enable_log_dirty:
	 *	called when enabling log dirty mode for the slot.
	 *  - slot_disable_log_dirty:
	 *	called when disabling log dirty mode for the slot.
	 *	also called when slot is created with log dirty disabled.
	 *  - flush_log_dirty:
	 *	called before reporting dirty_bitmap to userspace.
	 *  - enable_log_dirty_pt_masked:
	 *	called when reenabling log dirty for the GFNs in the mask after
	 *	corresponding bits are cleared in slot->dirty_bitmap.
	 */
	void (*slot_enable_log_dirty)(struct kvm *kvm,
				      struct kvm_memory_slot *slot);
	void (*slot_disable_log_dirty)(struct kvm *kvm,
				       struct kvm_memory_slot *slot);
	void (*flush_log_dirty)(struct kvm *kvm);
	void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
					   struct kvm_memory_slot *slot,
					   gfn_t offset, unsigned long mask);
1201 1202
	int (*write_log_dirty)(struct kvm_vcpu *vcpu);

1203 1204
	/* pmu operations of sub-arch */
	const struct kvm_pmu_ops *pmu_ops;
1205

1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
	/*
	 * Architecture specific hooks for vCPU blocking due to
	 * HLT instruction.
	 * Returns for .pre_block():
	 *    - 0 means continue to block the vCPU.
	 *    - 1 means we cannot block the vCPU since some event
	 *        happens during this period, such as, 'ON' bit in
	 *        posted-interrupts descriptor is set.
	 */
	int (*pre_block)(struct kvm_vcpu *vcpu);
	void (*post_block)(struct kvm_vcpu *vcpu);
1217 1218 1219 1220

	void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
	void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);

1221 1222
	int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
			      uint32_t guest_irq, bool set);
1223
	void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
1224
	bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
1225

1226 1227
	int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
			    bool *expired);
1228
	void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
1229 1230

	void (*setup_mce)(struct kvm_vcpu *vcpu);
1231

1232 1233 1234 1235 1236 1237
	int (*get_nested_state)(struct kvm_vcpu *vcpu,
				struct kvm_nested_state __user *user_kvm_nested_state,
				unsigned user_data_size);
	int (*set_nested_state)(struct kvm_vcpu *vcpu,
				struct kvm_nested_state __user *user_kvm_nested_state,
				struct kvm_nested_state *kvm_state);
1238
	bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
1239

1240
	int (*smi_allowed)(struct kvm_vcpu *vcpu);
1241
	int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
1242
	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
1243
	int (*enable_smi_window)(struct kvm_vcpu *vcpu);
1244 1245

	int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
1246 1247
	int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
	int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1248 1249

	int (*get_msr_feature)(struct kvm_msr_entry *entry);
1250 1251 1252

	int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
				   uint16_t *vmcs_version);
1253
	uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
1254 1255

	bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
1256 1257

	bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
1258
	int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
1259 1260
};

1261
struct kvm_arch_async_pf {
1262
	u32 token;
1263
	gfn_t gfn;
X
Xiao Guangrong 已提交
1264
	unsigned long cr3;
1265
	bool direct_map;
1266 1267
};

1268
extern struct kvm_x86_ops *kvm_x86_ops;
1269
extern struct kmem_cache *x86_fpu_cache;
1270

1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
	return kvm_x86_ops->vm_alloc();
}

static inline void kvm_arch_free_vm(struct kvm *kvm)
{
	return kvm_x86_ops->vm_free(kvm);
}

1282 1283 1284 1285 1286 1287 1288 1289 1290 1291
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
{
	if (kvm_x86_ops->tlb_remote_flush &&
	    !kvm_x86_ops->tlb_remote_flush(kvm))
		return 0;
	else
		return -ENOTSUPP;
}

1292 1293 1294 1295 1296
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);

void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
1297 1298
void kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
S
Sheng Yang 已提交
1299
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
1300
		u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
1301
		u64 acc_track_mask, u64 me_mask);
1302

1303
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
1304 1305
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
				      struct kvm_memory_slot *memslot);
1306
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
1307
				   const struct kvm_memory_slot *memslot);
1308 1309 1310 1311 1312 1313 1314 1315 1316
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
				   struct kvm_memory_slot *memslot);
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
					struct kvm_memory_slot *memslot);
void kvm_mmu_slot_set_dirty(struct kvm *kvm,
			    struct kvm_memory_slot *memslot);
void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
				   struct kvm_memory_slot *slot,
				   gfn_t gfn_offset, unsigned long mask);
1317
void kvm_mmu_zap_all(struct kvm *kvm);
1318
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
1319 1320
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
1321

1322
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
1323
bool pdptrs_changed(struct kvm_vcpu *vcpu);
1324

1325
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1326
			  const void *val, int bytes);
1327

1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
struct kvm_irq_mask_notifier {
	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
	int irq;
	struct hlist_node link;
};

void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
				    struct kvm_irq_mask_notifier *kimn);
void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
				      struct kvm_irq_mask_notifier *kimn);
void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
			     bool mask);

1341
extern bool tdp_enabled;
1342

1343 1344
u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);

1345 1346 1347 1348
/* control of guest tsc rate supported? */
extern bool kvm_has_tsc_control;
/* maximum supported tsc_khz for guests */
extern u32  kvm_max_guest_tsc_khz;
1349 1350 1351 1352
/* number of bits of the fractional part of the TSC scaling ratio */
extern u8   kvm_tsc_scaling_ratio_frac_bits;
/* maximum allowed value of TSC scaling ratio */
extern u64  kvm_max_tsc_scaling_ratio;
1353 1354
/* 1ull << kvm_tsc_scaling_ratio_frac_bits */
extern u64  kvm_default_tsc_scaling_ratio;
1355

1356
extern u64 kvm_mce_cap_supported;
1357

1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387
/*
 * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
 *			userspace I/O) to indicate that the emulation context
 *			should be resued as is, i.e. skip initialization of
 *			emulation context, instruction fetch and decode.
 *
 * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
 *		      Indicates that only select instructions (tagged with
 *		      EmulateOnUD) should be emulated (to minimize the emulator
 *		      attack surface).  See also EMULTYPE_TRAP_UD_FORCED.
 *
 * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
 *		   decode the instruction length.  For use *only* by
 *		   kvm_x86_ops->skip_emulated_instruction() implementations.
 *
 * EMULTYPE_ALLOW_RETRY - Set when the emulator should resume the guest to
 *			  retry native execution under certain conditions.
 *
 * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
 *			     triggered by KVM's magic "force emulation" prefix,
 *			     which is opt in via module param (off by default).
 *			     Bypasses EmulateOnUD restriction despite emulating
 *			     due to an intercepted #UD (see EMULTYPE_TRAP_UD).
 *			     Used to test the full emulator from userspace.
 *
 * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
 *			backdoor emulation, which is opt in via module param.
 *			VMware backoor emulation handles select instructions
 *			and reinjects the #GP for all other cases.
 */
1388 1389
#define EMULTYPE_NO_DECODE	    (1 << 0)
#define EMULTYPE_TRAP_UD	    (1 << 1)
1390
#define EMULTYPE_SKIP		    (1 << 2)
1391
#define EMULTYPE_ALLOW_RETRY	    (1 << 3)
1392
#define EMULTYPE_TRAP_UD_FORCED	    (1 << 4)
1393
#define EMULTYPE_VMWARE_GP	    (1 << 5)
1394 1395 1396
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
					void *insn, int insn_len);
1397

1398
void kvm_enable_efer_bits(u64);
1399
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
1400
int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
1401 1402
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
1403 1404
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
1405 1406 1407

struct x86_emulate_ctxt;

1408
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
1409
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
1410
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
1411
int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1412
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
1413

1414
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
1415
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
1416
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
1417

1418 1419
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
		    int reason, bool has_error_code, u32 error_code);
1420

1421
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
1422
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
1423
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
A
Andre Przywara 已提交
1424
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
1425 1426
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
1427 1428
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
1429
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
1430
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
1431

1432
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1433
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1434

1435 1436
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
A
Avi Kivity 已提交
1437
bool kvm_rdpmc(struct kvm_vcpu *vcpu);
1438

1439 1440
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1441 1442
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1443
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
1444 1445 1446
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			    gfn_t gfn, void *data, int offset, int len,
			    u32 access);
1447
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
1448
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
1449

1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
static inline int __kvm_irq_line_state(unsigned long *irq_state,
				       int irq_source_id, int level)
{
	/* Logical OR for level trig interrupt */
	if (level)
		__set_bit(irq_source_id, irq_state);
	else
		__clear_bit(irq_source_id, irq_state);

	return !!(*irq_state);
}

1462 1463 1464
#define KVM_MMU_ROOT_CURRENT		BIT(0)
#define KVM_MMU_ROOT_PREVIOUS(i)	BIT(1+i)
#define KVM_MMU_ROOTS_ALL		(~0UL)
1465

1466 1467
int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
1468

1469 1470
void kvm_inject_nmi(struct kvm_vcpu *vcpu);

1471
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1472 1473 1474 1475
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
1476
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
1477 1478
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			ulong roots_to_free);
1479 1480
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
			   struct x86_exception *exception);
1481 1482 1483 1484 1485 1486 1487 1488
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
			      struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
				struct x86_exception *exception);
1489

1490 1491
bool kvm_apicv_activated(struct kvm *kvm);
void kvm_apicv_init(struct kvm *kvm, bool enable);
1492 1493 1494
void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
void kvm_request_apicv_update(struct kvm *kvm, bool activate,
			      unsigned long bit);
1495

1496 1497
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);

1498
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
1499
		       void *insn, int insn_len);
M
Marcelo Tosatti 已提交
1500
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
1501
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
1502
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
1503

1504
void kvm_enable_tdp(void);
1505
void kvm_disable_tdp(void);
1506

1507 1508
static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
				  struct x86_exception *exception)
1509 1510 1511 1512
{
	return gpa;
}

1513 1514 1515 1516 1517 1518 1519
static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
{
	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);

	return (struct kvm_mmu_page *)page_private(page);
}

1520
static inline u16 kvm_read_ldt(void)
1521 1522 1523 1524 1525 1526
{
	u16 ldt;
	asm("sldt %0" : "=g"(ldt));
	return ldt;
}

1527
static inline void kvm_load_ldt(u16 sel)
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
{
	asm("lldt %0" : : "rm"(sel));
}

#ifdef CONFIG_X86_64
static inline unsigned long read_msr(unsigned long msr)
{
	u64 value;

	rdmsrl(msr, value);
	return value;
}
#endif

static inline u32 get_rdx_init_val(void)
{
	return 0x600; /* P6 family */
}

1547 1548 1549 1550 1551
static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
{
	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
}

1552 1553 1554 1555
#define TSS_IOPB_BASE_OFFSET 0x66
#define TSS_BASE_SIZE 0x68
#define TSS_IOPB_SIZE (65536 / 8)
#define TSS_REDIRECTION_SIZE (256 / 8)
1556 1557
#define RMODE_TSS_SIZE							\
	(TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
1558

1559 1560 1561 1562 1563 1564 1565
enum {
	TASK_SWITCH_CALL = 0,
	TASK_SWITCH_IRET = 1,
	TASK_SWITCH_JMP = 2,
	TASK_SWITCH_GATE = 3,
};

1566
#define HF_GIF_MASK		(1 << 0)
A
Alexander Graf 已提交
1567 1568
#define HF_HIF_MASK		(1 << 1)
#define HF_VINTR_MASK		(1 << 2)
1569
#define HF_NMI_MASK		(1 << 3)
1570
#define HF_IRET_MASK		(1 << 4)
1571
#define HF_GUEST_MASK		(1 << 5) /* VCPU is in guest-mode */
1572 1573
#define HF_SMM_MASK		(1 << 6)
#define HF_SMM_INSIDE_NMI_MASK	(1 << 7)
1574

1575 1576 1577 1578 1579
#define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
#define KVM_ADDRESS_SPACE_NUM 2

#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
1580

1581
asmlinkage void kvm_spurious_fault(void);
1582

1583 1584 1585
/*
 * Hardware virtualization extension instructions may fault if a
 * reboot turns off virtualization while processes are running.
1586 1587
 * Usually after catching the fault we just panic; during reboot
 * instead the instruction is ignored.
1588
 */
1589
#define __kvm_handle_fault_on_reboot(insn)				\
1590 1591 1592 1593 1594 1595
	"666: \n\t"							\
	insn "\n\t"							\
	"jmp	668f \n\t"						\
	"667: \n\t"							\
	"call	kvm_spurious_fault \n\t"				\
	"668: \n\t"							\
1596
	_ASM_EXTABLE(666b, 667b)
1597

1598
#define KVM_ARCH_WANT_MMU_NOTIFIER
1599
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
A
Andres Lagar-Cavilla 已提交
1600
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
A
Andrea Arcangeli 已提交
1601
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
1602
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
1603
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
1604 1605
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
1606
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1607
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
1608
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
1609

1610
int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
1611
		    unsigned long ipi_bitmap_high, u32 min,
1612 1613
		    unsigned long icr, int op_64_bit);

A
Avi Kivity 已提交
1614
void kvm_define_shared_msr(unsigned index, u32 msr);
1615
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
A
Avi Kivity 已提交
1616

1617
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
1618
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
1619

1620
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
J
Jan Kiszka 已提交
1621 1622
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);

1623 1624
void kvm_make_mclock_inprogress_request(struct kvm *kvm);
void kvm_make_scan_ioapic_request(struct kvm *kvm);
1625 1626
void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
				       unsigned long *vcpu_bitmap);
1627

1628 1629 1630 1631
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work);
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work);
G
Gleb Natapov 已提交
1632 1633
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
			       struct kvm_async_pf *work);
1634
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
1635 1636
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);

1637 1638
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1639
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
1640

1641 1642
int kvm_is_in_guest(void);

1643
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1644 1645
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
1646

1647 1648 1649
bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
			     struct kvm_vcpu **dest_vcpu);

1650
void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
F
Feng Wu 已提交
1651
		     struct kvm_lapic_irq *irq);
P
Paolo Bonzini 已提交
1652

1653 1654 1655 1656 1657 1658 1659
static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
{
	/* We can only post Fixed and LowPrio IRQs */
	return (irq->delivery_mode == dest_Fixed ||
		irq->delivery_mode == dest_LowestPrio);
}

1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops->vcpu_blocking)
		kvm_x86_ops->vcpu_blocking(vcpu);
}

static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops->vcpu_unblocking)
		kvm_x86_ops->vcpu_unblocking(vcpu);
}

1672
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
1673

1674 1675 1676
static inline int kvm_cpu_get_apicid(int mps_cpu)
{
#ifdef CONFIG_X86_LOCAL_APIC
1677
	return default_cpu_present_to_apicid(mps_cpu);
1678 1679 1680 1681 1682 1683
#else
	WARN_ON_ONCE(1);
	return BAD_APICID;
#endif
}

1684 1685 1686
#define put_smstate(type, buf, offset, val)                      \
	*(type *)((buf) + (offset) - 0x7e00) = val

1687 1688 1689
#define GET_SMSTATE(type, buf, offset)		\
	(*(type *)((buf) + (offset) - 0x7e00))

H
H. Peter Anvin 已提交
1690
#endif /* _ASM_X86_KVM_HOST_H */