kvm_host.h 47.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3 4 5 6 7
 * Kernel-based Virtual Machine driver for Linux
 *
 * This header defines architecture specific interfaces, x86 version
 */

H
H. Peter Anvin 已提交
8 9
#ifndef _ASM_X86_KVM_HOST_H
#define _ASM_X86_KVM_HOST_H
10

11 12
#include <linux/types.h>
#include <linux/mm.h>
13
#include <linux/mmu_notifier.h>
14
#include <linux/tracepoint.h>
15
#include <linux/cpumask.h>
16
#include <linux/irq_work.h>
17
#include <linux/irq.h>
18 19 20

#include <linux/kvm.h>
#include <linux/kvm_para.h>
21
#include <linux/kvm_types.h>
22
#include <linux/perf_event.h>
23 24
#include <linux/pvclock_gtod.h>
#include <linux/clocksource.h>
F
Feng Wu 已提交
25
#include <linux/irqbypass.h>
26
#include <linux/hyperv.h>
27

28
#include <asm/apic.h>
29
#include <asm/pvclock-abi.h>
30
#include <asm/desc.h>
S
Sheng Yang 已提交
31
#include <asm/mtrr.h>
32
#include <asm/msr-index.h>
33
#include <asm/asm.h>
34
#include <asm/kvm_page_track.h>
35
#include <asm/kvm_vcpu_regs.h>
36
#include <asm/hyperv-tlfs.h>
37

38 39
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS

40
#define KVM_MAX_VCPUS 288
41
#define KVM_SOFT_MAX_VCPUS 240
42
#define KVM_MAX_VCPU_ID 1023
43
#define KVM_USER_MEM_SLOTS 509
44 45
/* memory slots that are not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 3
46
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
47

48
#define KVM_HALT_POLL_NS_DEFAULT 200000
49

50 51
#define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS

52
/* x86-specific vcpu->requests bit members */
53 54 55 56 57
#define KVM_REQ_MIGRATE_TIMER		KVM_ARCH_REQ(0)
#define KVM_REQ_REPORT_TPR_ACCESS	KVM_ARCH_REQ(1)
#define KVM_REQ_TRIPLE_FAULT		KVM_ARCH_REQ(2)
#define KVM_REQ_MMU_SYNC		KVM_ARCH_REQ(3)
#define KVM_REQ_CLOCK_UPDATE		KVM_ARCH_REQ(4)
58
#define KVM_REQ_LOAD_CR3		KVM_ARCH_REQ(5)
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
#define KVM_REQ_EVENT			KVM_ARCH_REQ(6)
#define KVM_REQ_APF_HALT		KVM_ARCH_REQ(7)
#define KVM_REQ_STEAL_UPDATE		KVM_ARCH_REQ(8)
#define KVM_REQ_NMI			KVM_ARCH_REQ(9)
#define KVM_REQ_PMU			KVM_ARCH_REQ(10)
#define KVM_REQ_PMI			KVM_ARCH_REQ(11)
#define KVM_REQ_SMI			KVM_ARCH_REQ(12)
#define KVM_REQ_MASTERCLOCK_UPDATE	KVM_ARCH_REQ(13)
#define KVM_REQ_MCLOCK_INPROGRESS \
	KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_SCAN_IOAPIC \
	KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_GLOBAL_CLOCK_UPDATE	KVM_ARCH_REQ(16)
#define KVM_REQ_APIC_PAGE_RELOAD \
	KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_HV_CRASH		KVM_ARCH_REQ(18)
#define KVM_REQ_IOAPIC_EOI_EXIT		KVM_ARCH_REQ(19)
#define KVM_REQ_HV_RESET		KVM_ARCH_REQ(20)
#define KVM_REQ_HV_EXIT			KVM_ARCH_REQ(21)
#define KVM_REQ_HV_STIMER		KVM_ARCH_REQ(22)
79
#define KVM_REQ_LOAD_EOI_EXITMAP	KVM_ARCH_REQ(23)
80
#define KVM_REQ_GET_VMCS12_PAGES	KVM_ARCH_REQ(24)
81

82 83 84 85 86 87 88 89
#define CR0_RESERVED_BITS                                               \
	(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
			  | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
			  | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))

#define CR4_RESERVED_BITS                                               \
	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
90
			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
91
			  | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
92
			  | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
P
Paolo Bonzini 已提交
93
			  | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP))
94 95 96 97

#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)


98 99

#define INVALID_PAGE (~(hpa_t)0)
100 101
#define VALID_PAGE(x) ((x) != INVALID_PAGE)

102 103
#define UNMAPPED_GVA (~(gpa_t)0)

104
/* KVM Hugepage definitions for x86 */
105 106 107 108 109 110 111 112 113
enum {
	PT_PAGE_TABLE_LEVEL   = 1,
	PT_DIRECTORY_LEVEL    = 2,
	PT_PDPE_LEVEL         = 3,
	/* set max level to the biggest one */
	PT_MAX_HUGEPAGE_LEVEL = PT_PDPE_LEVEL,
};
#define KVM_NR_PAGE_SIZES	(PT_MAX_HUGEPAGE_LEVEL - \
				 PT_PAGE_TABLE_LEVEL + 1)
114 115
#define KVM_HPAGE_GFN_SHIFT(x)	(((x) - 1) * 9)
#define KVM_HPAGE_SHIFT(x)	(PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
116 117 118
#define KVM_HPAGE_SIZE(x)	(1UL << KVM_HPAGE_SHIFT(x))
#define KVM_HPAGE_MASK(x)	(~(KVM_HPAGE_SIZE(x) - 1))
#define KVM_PAGES_PER_HPAGE(x)	(KVM_HPAGE_SIZE(x) / PAGE_SIZE)
M
Marcelo Tosatti 已提交
119

120 121 122 123 124 125 126
static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
{
	/* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
}

127
#define KVM_PERMILLE_MMU_PAGES 20
128
#define KVM_MIN_ALLOC_MMU_PAGES 64UL
129
#define KVM_MMU_HASH_SHIFT 12
130
#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
131 132
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
133
#define KVM_MAX_CPUID_ENTRIES 80
S
Sheng Yang 已提交
134
#define KVM_NR_FIXED_MTRR_REGION 88
135
#define KVM_NR_VAR_MTRR 8
136

137 138
#define ASYNC_PF_PER_VCPU 64

139
enum kvm_reg {
140 141 142 143 144 145 146 147
	VCPU_REGS_RAX = __VCPU_REGS_RAX,
	VCPU_REGS_RCX = __VCPU_REGS_RCX,
	VCPU_REGS_RDX = __VCPU_REGS_RDX,
	VCPU_REGS_RBX = __VCPU_REGS_RBX,
	VCPU_REGS_RSP = __VCPU_REGS_RSP,
	VCPU_REGS_RBP = __VCPU_REGS_RBP,
	VCPU_REGS_RSI = __VCPU_REGS_RSI,
	VCPU_REGS_RDI = __VCPU_REGS_RDI,
148
#ifdef CONFIG_X86_64
149 150 151 152 153 154 155 156
	VCPU_REGS_R8  = __VCPU_REGS_R8,
	VCPU_REGS_R9  = __VCPU_REGS_R9,
	VCPU_REGS_R10 = __VCPU_REGS_R10,
	VCPU_REGS_R11 = __VCPU_REGS_R11,
	VCPU_REGS_R12 = __VCPU_REGS_R12,
	VCPU_REGS_R13 = __VCPU_REGS_R13,
	VCPU_REGS_R14 = __VCPU_REGS_R14,
	VCPU_REGS_R15 = __VCPU_REGS_R15,
157
#endif
158
	VCPU_REGS_RIP,
159 160 161
	NR_VCPU_REGS
};

A
Avi Kivity 已提交
162 163
enum kvm_reg_ex {
	VCPU_EXREG_PDPTR = NR_VCPU_REGS,
164
	VCPU_EXREG_CR3,
A
Avi Kivity 已提交
165
	VCPU_EXREG_RFLAGS,
A
Avi Kivity 已提交
166
	VCPU_EXREG_SEGMENTS,
A
Avi Kivity 已提交
167 168
};

169
enum {
170
	VCPU_SREG_ES,
171
	VCPU_SREG_CS,
172
	VCPU_SREG_SS,
173 174 175 176 177 178 179
	VCPU_SREG_DS,
	VCPU_SREG_FS,
	VCPU_SREG_GS,
	VCPU_SREG_TR,
	VCPU_SREG_LDTR,
};

180
#include <asm/kvm_emulate.h>
181

182 183
#define KVM_NR_MEM_OBJS 40

184 185 186 187
#define KVM_NR_DB_REGS	4

#define DR6_BD		(1 << 13)
#define DR6_BS		(1 << 14)
188
#define DR6_BT		(1 << 15)
189 190 191 192
#define DR6_RTM		(1 << 16)
#define DR6_FIXED_1	0xfffe0ff0
#define DR6_INIT	0xffff0ff0
#define DR6_VOLATILE	0x0001e00f
193 194 195 196 197

#define DR7_BP_EN_MASK	0x000000ff
#define DR7_GE		(1 << 9)
#define DR7_GD		(1 << 13)
#define DR7_FIXED_1	0x00000400
198
#define DR7_VOLATILE	0xffff2bff
199

200 201 202 203 204
#define PFERR_PRESENT_BIT 0
#define PFERR_WRITE_BIT 1
#define PFERR_USER_BIT 2
#define PFERR_RSVD_BIT 3
#define PFERR_FETCH_BIT 4
205
#define PFERR_PK_BIT 5
206 207
#define PFERR_GUEST_FINAL_BIT 32
#define PFERR_GUEST_PAGE_BIT 33
208 209 210 211 212 213

#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
214
#define PFERR_PK_MASK (1U << PFERR_PK_BIT)
215 216 217 218 219 220
#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)

#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK |	\
				 PFERR_WRITE_MASK |		\
				 PFERR_PRESENT_MASK)
221

222 223
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC	0
224 225 226 227 228 229 230
/*
 * The following bit is set with PV-EOI, unset on EOI.
 * We detect PV-EOI changes by guest by comparing
 * this bit with PV-EOI in guest memory.
 * See the implementation in apic_update_pv_eoi.
 */
#define KVM_APIC_PV_EOI_PENDING	1
231

F
Feng Wu 已提交
232 233
struct kvm_kernel_irq_routing_entry;

234 235 236 237 238 239 240 241 242
/*
 * We don't want allocation failures within the mmu code, so we preallocate
 * enough memory for a single page fault in a cache.
 */
struct kvm_mmu_memory_cache {
	int nobjs;
	void *objects[KVM_NR_MEM_OBJS];
};

243 244 245 246 247
/*
 * the pages used as guest page table on soft mmu are tracked by
 * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
 * by indirect shadow page can not be more than 15 bits.
 *
248
 * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
249 250
 * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
 */
251
union kvm_mmu_page_role {
252
	u32 word;
253
	struct {
254
		unsigned level:4;
255
		unsigned gpte_is_8_bytes:1;
256
		unsigned quadrant:2;
257
		unsigned direct:1;
258
		unsigned access:3;
259
		unsigned invalid:1;
260
		unsigned nxe:1;
261
		unsigned cr0_wp:1;
262
		unsigned smep_andnot_wp:1;
263
		unsigned smap_andnot_wp:1;
264
		unsigned ad_disabled:1;
265 266
		unsigned guest_mode:1;
		unsigned :6;
267 268 269 270 271 272 273 274

		/*
		 * This is left at the top of the word so that
		 * kvm_memslots_for_spte_role can extract it with a
		 * simple shift.  While there is room, give it a whole
		 * byte so it is also faster to load it from memory.
		 */
		unsigned smm:8;
275 276 277
	};
};

278
union kvm_mmu_extended_role {
279 280 281 282 283 284
/*
 * This structure complements kvm_mmu_page_role caching everything needed for
 * MMU configuration. If nothing in both these structures changed, MMU
 * re-configuration can be skipped. @valid bit is set on first usage so we don't
 * treat all-zero structure as valid data.
 */
285
	u32 word;
286 287 288
	struct {
		unsigned int valid:1;
		unsigned int execonly:1;
289
		unsigned int cr0_pg:1;
290
		unsigned int cr4_pae:1;
291 292 293 294
		unsigned int cr4_pse:1;
		unsigned int cr4_pke:1;
		unsigned int cr4_smap:1;
		unsigned int cr4_smep:1;
295
		unsigned int cr4_la57:1;
296
		unsigned int maxphyaddr:6;
297
	};
298 299 300 301 302 303 304 305 306 307
};

union kvm_mmu_role {
	u64 as_u64;
	struct {
		union kvm_mmu_page_role base;
		union kvm_mmu_extended_role ext;
	};
};

308 309 310 311
struct kvm_rmap_head {
	unsigned long val;
};

312 313 314
struct kvm_mmu_page {
	struct list_head link;
	struct hlist_node hash_link;
315
	bool unsync;
316
	u8 mmu_valid_gen;
317
	bool mmio_cached;
P
Paolo Bonzini 已提交
318
	bool lpage_disallowed; /* Can't be replaced by an equiv large page */
319 320 321 322 323 324

	/*
	 * The following two entries are used to key the shadow page in the
	 * hash table.
	 */
	union kvm_mmu_page_role role;
325
	gfn_t gfn;
326 327 328 329

	u64 *spt;
	/* hold the gfn of each spte inside spt */
	gfn_t *gfns;
330
	int root_count;          /* Currently serving as active root */
331
	unsigned int unsync_children;
332
	struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
333
	DECLARE_BITMAP(unsync_child_bitmap, 512);
334 335

#ifdef CONFIG_X86_32
336 337 338 339
	/*
	 * Used out of the mmu-lock to avoid reading spte values while an
	 * update is in progress; see the comments in __get_spte_lockless().
	 */
340 341 342
	int clear_spte_count;
#endif

343
	/* Number of writes since the last time traversal visited this page.  */
344
	atomic_t write_flooding_count;
345 346
};

347
struct kvm_pio_request {
348
	unsigned long linear_rip;
349 350 351 352 353 354
	unsigned long count;
	int in;
	int port;
	int size;
};

355
#define PT64_ROOT_MAX_LEVEL 5
356

357
struct rsvd_bits_validate {
358
	u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL];
359 360 361
	u64 bad_mt_xwr;
};

362 363 364 365 366 367 368 369
struct kvm_mmu_root_info {
	gpa_t cr3;
	hpa_t hpa;
};

#define KVM_MMU_ROOT_INFO_INVALID \
	((struct kvm_mmu_root_info) { .cr3 = INVALID_PAGE, .hpa = INVALID_PAGE })

370 371
#define KVM_MMU_NUM_PREV_ROOTS 3

372
/*
373 374 375
 * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
 * and 2-level 32-bit).  The kvm_mmu structure abstracts the details of the
 * current mmu mode.
376 377
 */
struct kvm_mmu {
378
	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
379
	unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
380
	u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
381 382
	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
			  bool prefault);
383 384
	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
				  struct x86_exception *fault);
385
	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
386
			    struct x86_exception *exception);
387 388
	gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
			       struct x86_exception *exception);
389
	int (*sync_page)(struct kvm_vcpu *vcpu,
390
			 struct kvm_mmu_page *sp);
391
	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
392
	void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
393
			   u64 *spte, const void *pte);
394
	hpa_t root_hpa;
395
	gpa_t root_cr3;
396
	union kvm_mmu_role mmu_role;
397 398 399
	u8 root_level;
	u8 shadow_root_level;
	u8 ept_ad;
400
	bool direct_map;
401
	struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
402

403 404 405 406 407 408 409
	/*
	 * Bitmap; bit set = permission fault
	 * Byte index: page fault error code [4:1]
	 * Bit index: pte permissions in ACC_* format
	 */
	u8 permissions[16];

410 411 412 413 414 415 416 417
	/*
	* The pkru_mask indicates if protection key checks are needed.  It
	* consists of 16 domains indexed by page fault error code bits [4:1],
	* with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
	* Each domain has 2 bits which are ANDed with AD and WD from PKRU.
	*/
	u32 pkru_mask;

418
	u64 *pae_root;
419
	u64 *lm_root;
420 421 422 423 424 425 426 427

	/*
	 * check zero bits on shadow page table entries, these
	 * bits include not only hardware reserved bits but also
	 * the bits spte never used.
	 */
	struct rsvd_bits_validate shadow_zero_check;

428
	struct rsvd_bits_validate guest_rsvd_check;
429

430 431
	/* Can have large pages at levels 2..last_nonleaf_level-1. */
	u8 last_nonleaf_level;
A
Avi Kivity 已提交
432

433 434
	bool nx;

435
	u64 pdptrs[4]; /* pae */
436 437
};

438 439 440 441 442
struct kvm_tlb_range {
	u64 start_gfn;
	u64 pages;
};

443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
enum pmc_type {
	KVM_PMC_GP = 0,
	KVM_PMC_FIXED,
};

struct kvm_pmc {
	enum pmc_type type;
	u8 idx;
	u64 counter;
	u64 eventsel;
	struct perf_event *perf_event;
	struct kvm_vcpu *vcpu;
};

struct kvm_pmu {
	unsigned nr_arch_gp_counters;
	unsigned nr_arch_fixed_counters;
	unsigned available_event_types;
	u64 fixed_ctr_ctrl;
	u64 global_ctrl;
	u64 global_status;
	u64 global_ovf_ctrl;
	u64 counter_bitmask[2];
	u64 global_ctrl_mask;
467
	u64 global_ovf_ctrl_mask;
468
	u64 reserved_bits;
469
	u8 version;
470 471
	struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
	struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
472 473 474 475
	struct irq_work irq_work;
	u64 reprogram_pmi;
};

476 477
struct kvm_pmu_ops;

478 479
enum {
	KVM_DEBUGREG_BP_ENABLED = 1,
480
	KVM_DEBUGREG_WONT_EXIT = 2,
481
	KVM_DEBUGREG_RELOAD = 4,
482 483
};

484 485 486
struct kvm_mtrr_range {
	u64 base;
	u64 mask;
X
Xiao Guangrong 已提交
487
	struct list_head node;
488 489
};

490
struct kvm_mtrr {
491
	struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
492
	mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
493
	u64 deftype;
X
Xiao Guangrong 已提交
494 495

	struct list_head head;
496 497
};

A
Andrey Smetanin 已提交
498 499 500 501
/* Hyper-V SynIC timer */
struct kvm_vcpu_hv_stimer {
	struct hrtimer timer;
	int index;
502
	union hv_stimer_config config;
A
Andrey Smetanin 已提交
503 504 505 506 507 508
	u64 count;
	u64 exp_time;
	struct hv_message msg;
	bool msg_pending;
};

509 510 511 512 513 514 515 516 517 518 519
/* Hyper-V synthetic interrupt controller (SynIC)*/
struct kvm_vcpu_hv_synic {
	u64 version;
	u64 control;
	u64 msg_page;
	u64 evt_page;
	atomic64_t sint[HV_SYNIC_SINT_COUNT];
	atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
	DECLARE_BITMAP(auto_eoi_bitmap, 256);
	DECLARE_BITMAP(vec_bitmap, 256);
	bool active;
520
	bool dont_zero_synic_pages;
521 522
};

523 524
/* Hyper-V per vcpu emulation context */
struct kvm_vcpu_hv {
525
	u32 vp_index;
526
	u64 hv_vapic;
527
	s64 runtime_offset;
528
	struct kvm_vcpu_hv_synic synic;
A
Andrey Smetanin 已提交
529
	struct kvm_hyperv_exit exit;
A
Andrey Smetanin 已提交
530 531
	struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
	DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
532
	cpumask_t tlb_flush;
533 534
};

535
struct kvm_vcpu_arch {
536 537 538 539 540 541 542
	/*
	 * rip and regs accesses must go through
	 * kvm_{register,rip}_{read,write} functions.
	 */
	unsigned long regs[NR_VCPU_REGS];
	u32 regs_avail;
	u32 regs_dirty;
543 544

	unsigned long cr0;
545
	unsigned long cr0_guest_owned_bits;
546 547 548
	unsigned long cr2;
	unsigned long cr3;
	unsigned long cr4;
549
	unsigned long cr4_guest_owned_bits;
550
	unsigned long cr8;
551
	u32 pkru;
552
	u32 hflags;
553
	u64 efer;
554 555
	u64 apic_base;
	struct kvm_lapic *apic;    /* kernel irqchip context */
556
	bool apicv_active;
557
	bool load_eoi_exitmap_pending;
558
	DECLARE_BITMAP(ioapic_handled_vectors, 256);
559
	unsigned long apic_attention;
560
	int32_t apic_arb_prio;
561 562
	int mp_state;
	u64 ia32_misc_enable_msr;
P
Paolo Bonzini 已提交
563
	u64 smbase;
564
	u64 smi_count;
565
	bool tpr_access_reporting;
W
Wanpeng Li 已提交
566
	u64 ia32_xss;
567
	u64 microcode_version;
568
	u64 arch_capabilities;
569

570 571 572 573 574 575 576
	/*
	 * Paging state of the vcpu
	 *
	 * If the vcpu runs in guest mode with two level paging this still saves
	 * the paging mode of the l1 guest. This context is always used to
	 * handle faults.
	 */
577 578 579 580
	struct kvm_mmu *mmu;

	/* Non-nested MMU for L1 */
	struct kvm_mmu root_mmu;
581

582 583 584
	/* L1 MMU when running nested */
	struct kvm_mmu guest_mmu;

585 586 587 588 589 590 591 592 593 594
	/*
	 * Paging state of an L2 guest (used for nested npt)
	 *
	 * This context will save all necessary information to walk page tables
	 * of the an L2 guest. This context is only initialized for page table
	 * walking and not for faulting since we never handle l2 page faults on
	 * the host.
	 */
	struct kvm_mmu nested_mmu;

595 596 597 598 599 600
	/*
	 * Pointer to the mmu context currently used for
	 * gva_to_gpa translations.
	 */
	struct kvm_mmu *walk_mmu;

601
	struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
602 603 604
	struct kvm_mmu_memory_cache mmu_page_cache;
	struct kvm_mmu_memory_cache mmu_page_header_cache;

605 606
	/*
	 * QEMU userspace and the guest each have their own FPU state.
607 608 609
	 * In vcpu_run, we switch between the user and guest FPU contexts.
	 * While running a VCPU, the VCPU thread will have the guest FPU
	 * context.
610 611 612 613 614 615
	 *
	 * Note that while the PKRU state lives inside the fpu registers,
	 * it is switched out separately at VMENTER and VMEXIT time. The
	 * "guest_fpu" state here contains the guest FPU context, with the
	 * host PRKU bits.
	 */
616
	struct fpu *user_fpu;
617
	struct fpu *guest_fpu;
618

619
	u64 xcr0;
620
	u64 guest_supported_xcr0;
621
	u32 guest_xstate_size;
622 623 624 625

	struct kvm_pio_request pio;
	void *pio_data;

626 627
	u8 event_exit_inst_len;

628 629
	struct kvm_queued_exception {
		bool pending;
630
		bool injected;
631 632 633
		bool has_error_code;
		u8 nr;
		u32 error_code;
634 635
		unsigned long payload;
		bool has_payload;
636
		u8 nested_apf;
637 638
	} exception;

A
Avi Kivity 已提交
639
	struct kvm_queued_interrupt {
640
		bool injected;
641
		bool soft;
A
Avi Kivity 已提交
642 643 644
		u8 nr;
	} interrupt;

645 646 647
	int halt_request; /* real mode on Intel only */

	int cpuid_nent;
648
	struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
649 650 651

	int maxphyaddr;

652 653 654
	/* emulate context */

	struct x86_emulate_ctxt emulate_ctxt;
655 656
	bool emulate_regs_need_sync_to_vcpu;
	bool emulate_regs_need_sync_from_vcpu;
657
	int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
658 659

	gpa_t time;
660
	struct pvclock_vcpu_time_info hv_clock;
Z
Zachary Amsden 已提交
661
	unsigned int hw_tsc_khz;
662 663
	struct gfn_to_hva_cache pv_time;
	bool pv_time_enabled;
664 665
	/* set guest stopped flag in pvclock flags field */
	bool pvclock_set_guest_stopped_request;
G
Glauber Costa 已提交
666 667 668 669 670 671 672 673

	struct {
		u64 msr_val;
		u64 last_steal;
		struct gfn_to_hva_cache stime;
		struct kvm_steal_time steal;
	} st;

674
	u64 tsc_offset;
675
	u64 last_guest_tsc;
676
	u64 last_host_tsc;
677
	u64 tsc_offset_adjustment;
678 679
	u64 this_tsc_nsec;
	u64 this_tsc_write;
T
Tomasz Grabiec 已提交
680
	u64 this_tsc_generation;
Z
Zachary Amsden 已提交
681
	bool tsc_catchup;
682 683 684 685
	bool tsc_always_catchup;
	s8 virtual_tsc_shift;
	u32 virtual_tsc_mult;
	u32 virtual_tsc_khz;
W
Will Auld 已提交
686
	s64 ia32_tsc_adjust_msr;
687
	u64 msr_ia32_power_ctl;
688
	u64 tsc_scaling_ratio;
689

A
Avi Kivity 已提交
690 691 692
	atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
	unsigned nmi_pending; /* NMI queued after currently running handler */
	bool nmi_injected;    /* Trying to inject an NMI this entry */
693
	bool smi_pending;    /* SMI queued after currently running handler */
A
Avi Kivity 已提交
694

695
	struct kvm_mtrr mtrr_state;
696
	u64 pat;
697

698
	unsigned switch_db_regs;
699 700 701 702
	unsigned long db[KVM_NR_DB_REGS];
	unsigned long dr6;
	unsigned long dr7;
	unsigned long eff_db[KVM_NR_DB_REGS];
703
	unsigned long guest_debug_dr7;
K
Kyle Huey 已提交
704 705
	u64 msr_platform_info;
	u64 msr_misc_features_enables;
H
Huang Ying 已提交
706 707 708 709

	u64 mcg_cap;
	u64 mcg_status;
	u64 mcg_ctl;
710
	u64 mcg_ext_ctl;
H
Huang Ying 已提交
711
	u64 *mce_banks;
712

713 714
	/* Cache MMIO info */
	u64 mmio_gva;
715
	unsigned mmio_access;
716
	gfn_t mmio_gfn;
717
	u64 mmio_gen;
718

719 720
	struct kvm_pmu pmu;

721 722
	/* used for guest single stepping over the given code position */
	unsigned long singlestep_rip;
J
Jan Kiszka 已提交
723

724
	struct kvm_vcpu_hv hyperv;
725 726

	cpumask_var_t wbinvd_dirty_mask;
727

728 729 730
	unsigned long last_retry_eip;
	unsigned long last_retry_addr;

731 732 733
	struct {
		bool halted;
		gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
734 735
		struct gfn_to_hva_cache data;
		u64 msr_val;
736
		u32 id;
737
		bool send_user_only;
738
		u32 host_apf_reason;
739
		unsigned long nested_apf_token;
740
		bool delivery_as_pf_vmexit;
741
	} apf;
742 743 744 745 746 747

	/* OSVW MSRs (AMD only) */
	struct {
		u64 length;
		u64 status;
	} osvw;
748 749 750 751 752

	struct {
		u64 msr_val;
		struct gfn_to_hva_cache data;
	} pv_eoi;
753

754 755
	u64 msr_kvm_poll_control;

756 757 758 759 760 761
	/*
	 * Indicate whether the access faults on its page table in guest
	 * which is set when fix page fault and used to detect unhandeable
	 * instruction.
	 */
	bool write_fault_to_shadow_pgtable;
762 763 764

	/* set at EPT violation at this point */
	unsigned long exit_qualification;
765 766 767 768 769

	/* pv related host specific info */
	struct {
		bool pv_unhalted;
	} pv;
770 771

	int pending_ioapic_eoi;
772
	int pending_external_vector;
773

774
	/* GPA available */
775
	bool gpa_available;
776
	gpa_t gpa_val;
777 778 779

	/* be preempted when it's in kernel-mode(cpl=0) */
	bool preempted_in_kernel;
P
Paolo Bonzini 已提交
780 781 782

	/* Flush the L1 Data cache for L1TF mitigation on VMENTER */
	bool l1tf_flush_l1d;
783 784 785

	/* AMD MSRC001_0015 Hardware Configuration */
	u64 msr_hwcr;
786 787
};

788
struct kvm_lpage_info {
789
	int disallow_lpage;
790 791 792
};

struct kvm_arch_memory_slot {
793
	struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
794
	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
795
	unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
796 797
};

798 799 800 801 802 803 804 805 806 807 808
/*
 * We use as the mode the number of bits allocated in the LDR for the
 * logical processor ID.  It happens that these are all powers of two.
 * This makes it is very easy to detect cases where the APICs are
 * configured for multiple modes; in that case, we cannot use the map and
 * hence cannot use kvm_irq_delivery_to_apic_fast either.
 */
#define KVM_APIC_MODE_XAPIC_CLUSTER          4
#define KVM_APIC_MODE_XAPIC_FLAT             8
#define KVM_APIC_MODE_X2APIC                16

809 810
struct kvm_apic_map {
	struct rcu_head rcu;
811
	u8 mode;
R
Radim Krčmář 已提交
812
	u32 max_apic_id;
813 814 815 816
	union {
		struct kvm_lapic *xapic_flat_map[8];
		struct kvm_lapic *xapic_cluster_map[16][4];
	};
R
Radim Krčmář 已提交
817
	struct kvm_lapic *phys_map[];
818 819
};

820 821
/* Hyper-V emulation context */
struct kvm_hv {
822
	struct mutex hv_lock;
823 824 825
	u64 hv_guest_os_id;
	u64 hv_hypercall;
	u64 hv_tsc_page;
826 827 828 829

	/* Hyper-v based guest crash (NT kernel bugcheck) parameters */
	u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
	u64 hv_crash_ctl;
P
Paolo Bonzini 已提交
830 831

	HV_REFERENCE_TSC_PAGE tsc_ref;
832 833

	struct idr conn_to_evt;
834 835 836 837

	u64 hv_reenlightenment_control;
	u64 hv_tsc_emulation_control;
	u64 hv_tsc_emulation_status;
838 839 840

	/* How many vCPUs have VP index != vCPU index */
	atomic_t num_mismatched_vp_indexes;
841 842

	struct hv_partition_assist_pg *hv_pa_pg;
843 844
};

845 846 847 848 849 850
enum kvm_irqchip_mode {
	KVM_IRQCHIP_NONE,
	KVM_IRQCHIP_KERNEL,       /* created with KVM_CREATE_IRQCHIP */
	KVM_IRQCHIP_SPLIT,        /* created with KVM_CAP_SPLIT_IRQCHIP */
};

851
struct kvm_arch {
852 853 854
	unsigned long n_used_mmu_pages;
	unsigned long n_requested_mmu_pages;
	unsigned long n_max_mmu_pages;
855
	unsigned int indirect_shadow_pages;
856
	u8 mmu_valid_gen;
857 858 859 860 861
	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
	/*
	 * Hash table of struct kvm_mmu_page.
	 */
	struct list_head active_mmu_pages;
862
	struct list_head zapped_obsolete_pages;
863
	struct kvm_page_track_notifier_node mmu_sp_tracker;
864
	struct kvm_page_track_notifier_head track_notifier_head;
865

B
Ben-Ami Yassour 已提交
866
	struct list_head assigned_dev_head;
J
Joerg Roedel 已提交
867
	struct iommu_domain *iommu_domain;
868
	bool iommu_noncoherent;
869 870
#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
	atomic_t noncoherent_dma_count;
871 872
#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
	atomic_t assigned_device_count;
873 874
	struct kvm_pic *vpic;
	struct kvm_ioapic *vioapic;
S
Sheng Yang 已提交
875
	struct kvm_pit *vpit;
876
	atomic_t vapics_in_nmi_mode;
877 878
	struct mutex apic_map_lock;
	struct kvm_apic_map *apic_map;
879

880
	bool apic_access_page_done;
881 882

	gpa_t wall_clock;
883

884
	bool mwait_in_guest;
885
	bool hlt_in_guest;
886
	bool pause_in_guest;
887
	bool cstate_in_guest;
888

889
	unsigned long irq_sources_bitmap;
890
	s64 kvmclock_offset;
891
	raw_spinlock_t tsc_write_lock;
Z
Zachary Amsden 已提交
892 893
	u64 last_tsc_nsec;
	u64 last_tsc_write;
894
	u32 last_tsc_khz;
895 896 897
	u64 cur_tsc_nsec;
	u64 cur_tsc_write;
	u64 cur_tsc_offset;
T
Tomasz Grabiec 已提交
898
	u64 cur_tsc_generation;
899
	int nr_vcpus_matched_tsc;
E
Ed Swierk 已提交
900

901 902 903
	spinlock_t pvclock_gtod_sync_lock;
	bool use_master_clock;
	u64 master_kernel_ns;
904
	u64 master_cycle_now;
905
	struct delayed_work kvmclock_update_work;
906
	struct delayed_work kvmclock_sync_work;
907

E
Ed Swierk 已提交
908
	struct kvm_xen_hvm_config xen_hvm_config;
909

910 911 912
	/* reads protected by irq_srcu, writes by irq_lock */
	struct hlist_head mask_notifier_list;

913
	struct kvm_hv hyperv;
914 915 916 917

	#ifdef CONFIG_KVM_MMU_AUDIT
	int audit_point;
	#endif
918

919
	bool backwards_tsc_observed;
920
	bool boot_vcpu_runs_old_kvmclock;
921
	u32 bsp_vcpu_id;
922 923

	u64 disabled_quirks;
924

925
	enum kvm_irqchip_mode irqchip_mode;
926
	u8 nr_reserved_ioapic_pins;
927 928

	bool disabled_lapic_found;
929

930
	bool x2apic_format;
931
	bool x2apic_broadcast_quirk_disabled;
932 933

	bool guest_can_read_msr_platform_info;
934
	bool exception_payload_enabled;
E
Eric Hankland 已提交
935 936

	struct kvm_pmu_event_filter *pmu_event_filter;
937 938
};

939
struct kvm_vm_stat {
940 941 942 943 944 945 946 947 948 949
	ulong mmu_shadow_zapped;
	ulong mmu_pte_write;
	ulong mmu_pte_updated;
	ulong mmu_pde_zapped;
	ulong mmu_flooded;
	ulong mmu_recycled;
	ulong mmu_cache_miss;
	ulong mmu_unsync;
	ulong remote_tlb_flush;
	ulong lpages;
P
Paolo Bonzini 已提交
950
	ulong nx_lpage_splits;
951
	ulong max_mmu_page_hash_collisions;
952 953
};

954
struct kvm_vcpu_stat {
955 956 957 958 959 960 961 962 963 964 965
	u64 pf_fixed;
	u64 pf_guest;
	u64 tlb_flush;
	u64 invlpg;

	u64 exits;
	u64 io_exits;
	u64 mmio_exits;
	u64 signal_exits;
	u64 irq_window_exits;
	u64 nmi_window_exits;
P
Paolo Bonzini 已提交
966
	u64 l1d_flush;
967 968 969 970 971 972 973 974 975 976 977 978 979 980
	u64 halt_exits;
	u64 halt_successful_poll;
	u64 halt_attempted_poll;
	u64 halt_poll_invalid;
	u64 halt_wakeup;
	u64 request_irq_exits;
	u64 irq_exits;
	u64 host_state_reload;
	u64 fpu_reload;
	u64 insn_emulation;
	u64 insn_emulation_fail;
	u64 hypercalls;
	u64 irq_injections;
	u64 nmi_injections;
981
	u64 req_event;
982
};
983

984 985
struct x86_instruction_info;

986 987 988 989 990 991
struct msr_data {
	bool host_initiated;
	u32 index;
	u64 data;
};

P
Paolo Bonzini 已提交
992 993
struct kvm_lapic_irq {
	u32 vector;
994 995 996 997
	u16 delivery_mode;
	u16 dest_mode;
	bool level;
	u16 trig_mode;
P
Paolo Bonzini 已提交
998 999
	u32 shorthand;
	u32 dest_id;
1000
	bool msi_redir_hint;
P
Paolo Bonzini 已提交
1001 1002
};

1003 1004 1005
struct kvm_x86_ops {
	int (*cpu_has_kvm_support)(void);          /* __init */
	int (*disabled_by_bios)(void);             /* __init */
1006 1007
	int (*hardware_enable)(void);
	void (*hardware_disable)(void);
1008
	int (*check_processor_compatibility)(void);/* __init */
1009 1010
	int (*hardware_setup)(void);               /* __init */
	void (*hardware_unsetup)(void);            /* __exit */
1011
	bool (*cpu_has_accelerated_tpr)(void);
1012
	bool (*has_emulated_msr)(int index);
1013
	void (*cpuid_update)(struct kvm_vcpu *vcpu);
1014

1015 1016
	struct kvm *(*vm_alloc)(void);
	void (*vm_free)(struct kvm *);
1017 1018 1019
	int (*vm_init)(struct kvm *kvm);
	void (*vm_destroy)(struct kvm *kvm);

1020 1021 1022
	/* Create, but do not attach this VCPU */
	struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
	void (*vcpu_free)(struct kvm_vcpu *vcpu);
1023
	void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
1024 1025 1026 1027 1028

	void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
	void (*vcpu_put)(struct kvm_vcpu *vcpu);

1029
	void (*update_bp_intercept)(struct kvm_vcpu *vcpu);
1030
	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1031
	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1032 1033 1034
	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
	void (*get_segment)(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg);
1035
	int (*get_cpl)(struct kvm_vcpu *vcpu);
1036 1037 1038
	void (*set_segment)(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg);
	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
1039
	void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
1040
	void (*decache_cr3)(struct kvm_vcpu *vcpu);
1041 1042 1043
	void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
1044
	int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1045
	void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
1046 1047 1048 1049
	void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
J
Jan Kiszka 已提交
1050 1051
	u64 (*get_dr6)(struct kvm_vcpu *vcpu);
	void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
1052
	void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
1053
	void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
1054
	void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
1055 1056 1057
	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);

1058
	void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
1059
	int  (*tlb_remote_flush)(struct kvm *kvm);
1060 1061
	int  (*tlb_remote_flush_with_range)(struct kvm *kvm,
			struct kvm_tlb_range *range);
1062

1063 1064 1065 1066 1067 1068 1069
	/*
	 * Flush any TLB entries associated with the given GVA.
	 * Does not need to flush GPA->HPA mappings.
	 * Can potentially get non-canonical addresses through INVLPGs, which
	 * the implementation may choose to ignore if appropriate.
	 */
	void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
1070

A
Avi Kivity 已提交
1071 1072
	void (*run)(struct kvm_vcpu *vcpu);
	int (*handle_exit)(struct kvm_vcpu *vcpu);
1073
	int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
1074
	void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
1075
	u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
1076 1077
	void (*patch_hypercall)(struct kvm_vcpu *vcpu,
				unsigned char *hypercall_addr);
1078
	void (*set_irq)(struct kvm_vcpu *vcpu);
1079
	void (*set_nmi)(struct kvm_vcpu *vcpu);
1080
	void (*queue_exception)(struct kvm_vcpu *vcpu);
A
Avi Kivity 已提交
1081
	void (*cancel_injection)(struct kvm_vcpu *vcpu);
1082
	int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
1083
	int (*nmi_allowed)(struct kvm_vcpu *vcpu);
J
Jan Kiszka 已提交
1084 1085
	bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
	void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
1086 1087
	void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
	void (*enable_irq_window)(struct kvm_vcpu *vcpu);
1088
	void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
1089
	bool (*get_enable_apicv)(struct kvm_vcpu *vcpu);
1090
	void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
1091
	void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
1092
	void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
1093
	bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
1094
	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
1095
	void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
1096
	void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
1097
	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
1098
	int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
1099
	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
1100
	int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
1101
	int (*get_tdp_level)(struct kvm_vcpu *vcpu);
1102
	u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1103
	int (*get_lpage_level)(void);
1104
	bool (*rdtscp_supported)(void);
1105
	bool (*invpcid_supported)(void);
1106

1107 1108
	void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);

1109 1110
	void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);

1111 1112
	bool (*has_wbinvd_exit)(void);

1113
	u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
1114 1115
	/* Returns actual tsc_offset set in active VMCS */
	u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
1116

1117
	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
1118 1119 1120 1121

	int (*check_intercept)(struct kvm_vcpu *vcpu,
			       struct x86_instruction_info *info,
			       enum x86_intercept_stage stage);
1122
	void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
1123
	bool (*mpx_supported)(void);
1124
	bool (*xsaves_supported)(void);
1125
	bool (*umip_emulated)(void);
1126
	bool (*pt_supported)(void);
1127 1128

	int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
1129
	void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
1130 1131

	void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156

	/*
	 * Arch-specific dirty logging hooks. These hooks are only supposed to
	 * be valid if the specific arch has hardware-accelerated dirty logging
	 * mechanism. Currently only for PML on VMX.
	 *
	 *  - slot_enable_log_dirty:
	 *	called when enabling log dirty mode for the slot.
	 *  - slot_disable_log_dirty:
	 *	called when disabling log dirty mode for the slot.
	 *	also called when slot is created with log dirty disabled.
	 *  - flush_log_dirty:
	 *	called before reporting dirty_bitmap to userspace.
	 *  - enable_log_dirty_pt_masked:
	 *	called when reenabling log dirty for the GFNs in the mask after
	 *	corresponding bits are cleared in slot->dirty_bitmap.
	 */
	void (*slot_enable_log_dirty)(struct kvm *kvm,
				      struct kvm_memory_slot *slot);
	void (*slot_disable_log_dirty)(struct kvm *kvm,
				       struct kvm_memory_slot *slot);
	void (*flush_log_dirty)(struct kvm *kvm);
	void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
					   struct kvm_memory_slot *slot,
					   gfn_t offset, unsigned long mask);
1157 1158
	int (*write_log_dirty)(struct kvm_vcpu *vcpu);

1159 1160
	/* pmu operations of sub-arch */
	const struct kvm_pmu_ops *pmu_ops;
1161

1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
	/*
	 * Architecture specific hooks for vCPU blocking due to
	 * HLT instruction.
	 * Returns for .pre_block():
	 *    - 0 means continue to block the vCPU.
	 *    - 1 means we cannot block the vCPU since some event
	 *        happens during this period, such as, 'ON' bit in
	 *        posted-interrupts descriptor is set.
	 */
	int (*pre_block)(struct kvm_vcpu *vcpu);
	void (*post_block)(struct kvm_vcpu *vcpu);
1173 1174 1175 1176

	void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
	void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);

1177 1178
	int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
			      uint32_t guest_irq, bool set);
1179
	void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
1180
	bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
1181

1182 1183
	int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
			    bool *expired);
1184
	void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
1185 1186

	void (*setup_mce)(struct kvm_vcpu *vcpu);
1187

1188 1189 1190 1191 1192 1193
	int (*get_nested_state)(struct kvm_vcpu *vcpu,
				struct kvm_nested_state __user *user_kvm_nested_state,
				unsigned user_data_size);
	int (*set_nested_state)(struct kvm_vcpu *vcpu,
				struct kvm_nested_state __user *user_kvm_nested_state,
				struct kvm_nested_state *kvm_state);
1194
	bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
1195

1196
	int (*smi_allowed)(struct kvm_vcpu *vcpu);
1197
	int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
1198
	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
1199
	int (*enable_smi_window)(struct kvm_vcpu *vcpu);
1200 1201

	int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
1202 1203
	int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
	int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1204 1205

	int (*get_msr_feature)(struct kvm_msr_entry *entry);
1206 1207 1208

	int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
				   uint16_t *vmcs_version);
1209
	uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
1210 1211

	bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
1212 1213

	bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
1214
	int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
1215 1216
};

1217
struct kvm_arch_async_pf {
1218
	u32 token;
1219
	gfn_t gfn;
X
Xiao Guangrong 已提交
1220
	unsigned long cr3;
1221
	bool direct_map;
1222 1223
};

1224
extern struct kvm_x86_ops *kvm_x86_ops;
1225
extern struct kmem_cache *x86_fpu_cache;
1226

1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
	return kvm_x86_ops->vm_alloc();
}

static inline void kvm_arch_free_vm(struct kvm *kvm)
{
	return kvm_x86_ops->vm_free(kvm);
}

1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
{
	if (kvm_x86_ops->tlb_remote_flush &&
	    !kvm_x86_ops->tlb_remote_flush(kvm))
		return 0;
	else
		return -ENOTSUPP;
}

1248 1249 1250 1251 1252
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);

void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
1253 1254
void kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
S
Sheng Yang 已提交
1255
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
1256
		u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
1257
		u64 acc_track_mask, u64 me_mask);
1258

1259
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
1260 1261
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
				      struct kvm_memory_slot *memslot);
1262
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
1263
				   const struct kvm_memory_slot *memslot);
1264 1265 1266 1267 1268 1269 1270 1271 1272
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
				   struct kvm_memory_slot *memslot);
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
					struct kvm_memory_slot *memslot);
void kvm_mmu_slot_set_dirty(struct kvm *kvm,
			    struct kvm_memory_slot *memslot);
void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
				   struct kvm_memory_slot *slot,
				   gfn_t gfn_offset, unsigned long mask);
1273
void kvm_mmu_zap_all(struct kvm *kvm);
1274
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
1275 1276
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
1277

1278
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
1279
bool pdptrs_changed(struct kvm_vcpu *vcpu);
1280

1281
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1282
			  const void *val, int bytes);
1283

1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
struct kvm_irq_mask_notifier {
	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
	int irq;
	struct hlist_node link;
};

void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
				    struct kvm_irq_mask_notifier *kimn);
void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
				      struct kvm_irq_mask_notifier *kimn);
void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
			     bool mask);

1297
extern bool tdp_enabled;
1298

1299 1300
u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);

1301 1302 1303 1304
/* control of guest tsc rate supported? */
extern bool kvm_has_tsc_control;
/* maximum supported tsc_khz for guests */
extern u32  kvm_max_guest_tsc_khz;
1305 1306 1307 1308
/* number of bits of the fractional part of the TSC scaling ratio */
extern u8   kvm_tsc_scaling_ratio_frac_bits;
/* maximum allowed value of TSC scaling ratio */
extern u64  kvm_max_tsc_scaling_ratio;
1309 1310
/* 1ull << kvm_tsc_scaling_ratio_frac_bits */
extern u64  kvm_default_tsc_scaling_ratio;
1311

1312
extern u64 kvm_mce_cap_supported;
1313

1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
/*
 * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
 *			userspace I/O) to indicate that the emulation context
 *			should be resued as is, i.e. skip initialization of
 *			emulation context, instruction fetch and decode.
 *
 * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
 *		      Indicates that only select instructions (tagged with
 *		      EmulateOnUD) should be emulated (to minimize the emulator
 *		      attack surface).  See also EMULTYPE_TRAP_UD_FORCED.
 *
 * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
 *		   decode the instruction length.  For use *only* by
 *		   kvm_x86_ops->skip_emulated_instruction() implementations.
 *
 * EMULTYPE_ALLOW_RETRY - Set when the emulator should resume the guest to
 *			  retry native execution under certain conditions.
 *
 * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
 *			     triggered by KVM's magic "force emulation" prefix,
 *			     which is opt in via module param (off by default).
 *			     Bypasses EmulateOnUD restriction despite emulating
 *			     due to an intercepted #UD (see EMULTYPE_TRAP_UD).
 *			     Used to test the full emulator from userspace.
 *
 * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
 *			backdoor emulation, which is opt in via module param.
 *			VMware backoor emulation handles select instructions
 *			and reinjects the #GP for all other cases.
 */
1344 1345
#define EMULTYPE_NO_DECODE	    (1 << 0)
#define EMULTYPE_TRAP_UD	    (1 << 1)
1346
#define EMULTYPE_SKIP		    (1 << 2)
1347
#define EMULTYPE_ALLOW_RETRY	    (1 << 3)
1348
#define EMULTYPE_TRAP_UD_FORCED	    (1 << 4)
1349
#define EMULTYPE_VMWARE_GP	    (1 << 5)
1350 1351 1352
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
					void *insn, int insn_len);
1353

1354
void kvm_enable_efer_bits(u64);
1355
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
1356 1357
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
1358 1359
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
1360 1361 1362

struct x86_emulate_ctxt;

1363
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
1364
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
1365
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
1366
int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1367
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
1368

1369
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
1370
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
1371
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
1372

1373 1374
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
		    int reason, bool has_error_code, u32 error_code);
1375

1376
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
1377
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
1378
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
A
Andre Przywara 已提交
1379
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
1380 1381
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
1382 1383
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
1384
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
1385
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
1386

1387
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1388
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1389

1390 1391
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
A
Avi Kivity 已提交
1392
bool kvm_rdpmc(struct kvm_vcpu *vcpu);
1393

1394 1395
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1396 1397
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1398
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
1399 1400 1401
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			    gfn_t gfn, void *data, int offset, int len,
			    u32 access);
1402
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
1403
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
1404

1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
static inline int __kvm_irq_line_state(unsigned long *irq_state,
				       int irq_source_id, int level)
{
	/* Logical OR for level trig interrupt */
	if (level)
		__set_bit(irq_source_id, irq_state);
	else
		__clear_bit(irq_source_id, irq_state);

	return !!(*irq_state);
}

1417 1418 1419
#define KVM_MMU_ROOT_CURRENT		BIT(0)
#define KVM_MMU_ROOT_PREVIOUS(i)	BIT(1+i)
#define KVM_MMU_ROOTS_ALL		(~0UL)
1420

1421 1422
int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
1423

1424 1425
void kvm_inject_nmi(struct kvm_vcpu *vcpu);

1426
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1427 1428 1429 1430
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
1431
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
1432 1433
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			ulong roots_to_free);
1434 1435
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
			   struct x86_exception *exception);
1436 1437 1438 1439 1440 1441 1442 1443
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
			      struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
				struct x86_exception *exception);
1444

1445 1446
void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);

1447 1448
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);

1449
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
1450
		       void *insn, int insn_len);
M
Marcelo Tosatti 已提交
1451
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
1452
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
1453
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
1454

1455
void kvm_enable_tdp(void);
1456
void kvm_disable_tdp(void);
1457

1458 1459
static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
				  struct x86_exception *exception)
1460 1461 1462 1463
{
	return gpa;
}

1464 1465 1466 1467 1468 1469 1470
static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
{
	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);

	return (struct kvm_mmu_page *)page_private(page);
}

1471
static inline u16 kvm_read_ldt(void)
1472 1473 1474 1475 1476 1477
{
	u16 ldt;
	asm("sldt %0" : "=g"(ldt));
	return ldt;
}

1478
static inline void kvm_load_ldt(u16 sel)
1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
{
	asm("lldt %0" : : "rm"(sel));
}

#ifdef CONFIG_X86_64
static inline unsigned long read_msr(unsigned long msr)
{
	u64 value;

	rdmsrl(msr, value);
	return value;
}
#endif

static inline u32 get_rdx_init_val(void)
{
	return 0x600; /* P6 family */
}

1498 1499 1500 1501 1502
static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
{
	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
}

1503 1504 1505 1506
#define TSS_IOPB_BASE_OFFSET 0x66
#define TSS_BASE_SIZE 0x68
#define TSS_IOPB_SIZE (65536 / 8)
#define TSS_REDIRECTION_SIZE (256 / 8)
1507 1508
#define RMODE_TSS_SIZE							\
	(TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
1509

1510 1511 1512 1513 1514 1515 1516
enum {
	TASK_SWITCH_CALL = 0,
	TASK_SWITCH_IRET = 1,
	TASK_SWITCH_JMP = 2,
	TASK_SWITCH_GATE = 3,
};

1517
#define HF_GIF_MASK		(1 << 0)
A
Alexander Graf 已提交
1518 1519
#define HF_HIF_MASK		(1 << 1)
#define HF_VINTR_MASK		(1 << 2)
1520
#define HF_NMI_MASK		(1 << 3)
1521
#define HF_IRET_MASK		(1 << 4)
1522
#define HF_GUEST_MASK		(1 << 5) /* VCPU is in guest-mode */
1523 1524
#define HF_SMM_MASK		(1 << 6)
#define HF_SMM_INSIDE_NMI_MASK	(1 << 7)
1525

1526 1527 1528 1529 1530
#define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
#define KVM_ADDRESS_SPACE_NUM 2

#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
1531

1532
asmlinkage void kvm_spurious_fault(void);
1533

1534 1535 1536
/*
 * Hardware virtualization extension instructions may fault if a
 * reboot turns off virtualization while processes are running.
1537 1538
 * Usually after catching the fault we just panic; during reboot
 * instead the instruction is ignored.
1539
 */
1540
#define __kvm_handle_fault_on_reboot(insn)				\
1541 1542 1543 1544 1545 1546
	"666: \n\t"							\
	insn "\n\t"							\
	"jmp	668f \n\t"						\
	"667: \n\t"							\
	"call	kvm_spurious_fault \n\t"				\
	"668: \n\t"							\
1547
	_ASM_EXTABLE(666b, 667b)
1548

1549
#define KVM_ARCH_WANT_MMU_NOTIFIER
1550
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
A
Andres Lagar-Cavilla 已提交
1551
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
A
Andrea Arcangeli 已提交
1552
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
1553
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
1554
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
1555 1556
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
1557
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1558
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
1559
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
1560

1561
int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
1562
		    unsigned long ipi_bitmap_high, u32 min,
1563 1564
		    unsigned long icr, int op_64_bit);

A
Avi Kivity 已提交
1565
void kvm_define_shared_msr(unsigned index, u32 msr);
1566
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
A
Avi Kivity 已提交
1567

1568
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
1569
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
1570

1571
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
J
Jan Kiszka 已提交
1572 1573
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);

1574 1575 1576
void kvm_make_mclock_inprogress_request(struct kvm *kvm);
void kvm_make_scan_ioapic_request(struct kvm *kvm);

1577 1578 1579 1580
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work);
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work);
G
Gleb Natapov 已提交
1581 1582
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
			       struct kvm_async_pf *work);
1583
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
1584 1585
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);

1586 1587
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1588
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
1589

1590 1591
int kvm_is_in_guest(void);

1592 1593
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1594 1595
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
1596

1597 1598 1599
bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
			     struct kvm_vcpu **dest_vcpu);

1600
void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
F
Feng Wu 已提交
1601
		     struct kvm_lapic_irq *irq);
P
Paolo Bonzini 已提交
1602

1603 1604 1605 1606 1607 1608 1609
static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
{
	/* We can only post Fixed and LowPrio IRQs */
	return (irq->delivery_mode == dest_Fixed ||
		irq->delivery_mode == dest_LowestPrio);
}

1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops->vcpu_blocking)
		kvm_x86_ops->vcpu_blocking(vcpu);
}

static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops->vcpu_unblocking)
		kvm_x86_ops->vcpu_unblocking(vcpu);
}

1622
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
1623

1624 1625 1626
static inline int kvm_cpu_get_apicid(int mps_cpu)
{
#ifdef CONFIG_X86_LOCAL_APIC
1627
	return default_cpu_present_to_apicid(mps_cpu);
1628 1629 1630 1631 1632 1633
#else
	WARN_ON_ONCE(1);
	return BAD_APICID;
#endif
}

1634 1635 1636
#define put_smstate(type, buf, offset, val)                      \
	*(type *)((buf) + (offset) - 0x7e00) = val

1637 1638 1639
#define GET_SMSTATE(type, buf, offset)		\
	(*(type *)((buf) + (offset) - 0x7e00))

H
H. Peter Anvin 已提交
1640
#endif /* _ASM_X86_KVM_HOST_H */