kvm_host.h 48.9 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3 4 5 6 7
 * Kernel-based Virtual Machine driver for Linux
 *
 * This header defines architecture specific interfaces, x86 version
 */

H
H. Peter Anvin 已提交
8 9
#ifndef _ASM_X86_KVM_HOST_H
#define _ASM_X86_KVM_HOST_H
10

11 12
#include <linux/types.h>
#include <linux/mm.h>
13
#include <linux/mmu_notifier.h>
14
#include <linux/tracepoint.h>
15
#include <linux/cpumask.h>
16
#include <linux/irq_work.h>
17
#include <linux/irq.h>
18 19 20

#include <linux/kvm.h>
#include <linux/kvm_para.h>
21
#include <linux/kvm_types.h>
22
#include <linux/perf_event.h>
23 24
#include <linux/pvclock_gtod.h>
#include <linux/clocksource.h>
F
Feng Wu 已提交
25
#include <linux/irqbypass.h>
26
#include <linux/hyperv.h>
27

28
#include <asm/apic.h>
29
#include <asm/pvclock-abi.h>
30
#include <asm/desc.h>
S
Sheng Yang 已提交
31
#include <asm/mtrr.h>
32
#include <asm/msr-index.h>
33
#include <asm/asm.h>
34
#include <asm/kvm_page_track.h>
35
#include <asm/kvm_vcpu_regs.h>
36
#include <asm/hyperv-tlfs.h>
37

38 39
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS

40
#define KVM_MAX_VCPUS 288
41
#define KVM_SOFT_MAX_VCPUS 240
42
#define KVM_MAX_VCPU_ID 1023
43
#define KVM_USER_MEM_SLOTS 509
44 45
/* memory slots that are not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 3
46
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
47

48
#define KVM_HALT_POLL_NS_DEFAULT 200000
49

50 51
#define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS

52
/* x86-specific vcpu->requests bit members */
53 54 55 56 57
#define KVM_REQ_MIGRATE_TIMER		KVM_ARCH_REQ(0)
#define KVM_REQ_REPORT_TPR_ACCESS	KVM_ARCH_REQ(1)
#define KVM_REQ_TRIPLE_FAULT		KVM_ARCH_REQ(2)
#define KVM_REQ_MMU_SYNC		KVM_ARCH_REQ(3)
#define KVM_REQ_CLOCK_UPDATE		KVM_ARCH_REQ(4)
58
#define KVM_REQ_LOAD_CR3		KVM_ARCH_REQ(5)
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
#define KVM_REQ_EVENT			KVM_ARCH_REQ(6)
#define KVM_REQ_APF_HALT		KVM_ARCH_REQ(7)
#define KVM_REQ_STEAL_UPDATE		KVM_ARCH_REQ(8)
#define KVM_REQ_NMI			KVM_ARCH_REQ(9)
#define KVM_REQ_PMU			KVM_ARCH_REQ(10)
#define KVM_REQ_PMI			KVM_ARCH_REQ(11)
#define KVM_REQ_SMI			KVM_ARCH_REQ(12)
#define KVM_REQ_MASTERCLOCK_UPDATE	KVM_ARCH_REQ(13)
#define KVM_REQ_MCLOCK_INPROGRESS \
	KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_SCAN_IOAPIC \
	KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_GLOBAL_CLOCK_UPDATE	KVM_ARCH_REQ(16)
#define KVM_REQ_APIC_PAGE_RELOAD \
	KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_HV_CRASH		KVM_ARCH_REQ(18)
#define KVM_REQ_IOAPIC_EOI_EXIT		KVM_ARCH_REQ(19)
#define KVM_REQ_HV_RESET		KVM_ARCH_REQ(20)
#define KVM_REQ_HV_EXIT			KVM_ARCH_REQ(21)
#define KVM_REQ_HV_STIMER		KVM_ARCH_REQ(22)
79
#define KVM_REQ_LOAD_EOI_EXITMAP	KVM_ARCH_REQ(23)
80
#define KVM_REQ_GET_VMCS12_PAGES	KVM_ARCH_REQ(24)
81

82 83 84 85 86 87 88 89
#define CR0_RESERVED_BITS                                               \
	(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
			  | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
			  | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))

#define CR4_RESERVED_BITS                                               \
	(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
			  | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
90
			  | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
91
			  | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
92
			  | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
P
Paolo Bonzini 已提交
93
			  | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP))
94 95 96 97

#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)


98 99

#define INVALID_PAGE (~(hpa_t)0)
100 101
#define VALID_PAGE(x) ((x) != INVALID_PAGE)

102 103
#define UNMAPPED_GVA (~(gpa_t)0)

104
/* KVM Hugepage definitions for x86 */
105 106 107 108 109 110 111 112 113
enum {
	PT_PAGE_TABLE_LEVEL   = 1,
	PT_DIRECTORY_LEVEL    = 2,
	PT_PDPE_LEVEL         = 3,
	/* set max level to the biggest one */
	PT_MAX_HUGEPAGE_LEVEL = PT_PDPE_LEVEL,
};
#define KVM_NR_PAGE_SIZES	(PT_MAX_HUGEPAGE_LEVEL - \
				 PT_PAGE_TABLE_LEVEL + 1)
114 115
#define KVM_HPAGE_GFN_SHIFT(x)	(((x) - 1) * 9)
#define KVM_HPAGE_SHIFT(x)	(PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
116 117 118
#define KVM_HPAGE_SIZE(x)	(1UL << KVM_HPAGE_SHIFT(x))
#define KVM_HPAGE_MASK(x)	(~(KVM_HPAGE_SIZE(x) - 1))
#define KVM_PAGES_PER_HPAGE(x)	(KVM_HPAGE_SIZE(x) / PAGE_SIZE)
M
Marcelo Tosatti 已提交
119

120 121 122 123 124 125 126
static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
{
	/* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
}

127
#define KVM_PERMILLE_MMU_PAGES 20
128
#define KVM_MIN_ALLOC_MMU_PAGES 64UL
129
#define KVM_MMU_HASH_SHIFT 12
130
#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
131 132
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
133
#define KVM_MAX_CPUID_ENTRIES 80
S
Sheng Yang 已提交
134
#define KVM_NR_FIXED_MTRR_REGION 88
135
#define KVM_NR_VAR_MTRR 8
136

137 138
#define ASYNC_PF_PER_VCPU 64

139
enum kvm_reg {
140 141 142 143 144 145 146 147
	VCPU_REGS_RAX = __VCPU_REGS_RAX,
	VCPU_REGS_RCX = __VCPU_REGS_RCX,
	VCPU_REGS_RDX = __VCPU_REGS_RDX,
	VCPU_REGS_RBX = __VCPU_REGS_RBX,
	VCPU_REGS_RSP = __VCPU_REGS_RSP,
	VCPU_REGS_RBP = __VCPU_REGS_RBP,
	VCPU_REGS_RSI = __VCPU_REGS_RSI,
	VCPU_REGS_RDI = __VCPU_REGS_RDI,
148
#ifdef CONFIG_X86_64
149 150 151 152 153 154 155 156
	VCPU_REGS_R8  = __VCPU_REGS_R8,
	VCPU_REGS_R9  = __VCPU_REGS_R9,
	VCPU_REGS_R10 = __VCPU_REGS_R10,
	VCPU_REGS_R11 = __VCPU_REGS_R11,
	VCPU_REGS_R12 = __VCPU_REGS_R12,
	VCPU_REGS_R13 = __VCPU_REGS_R13,
	VCPU_REGS_R14 = __VCPU_REGS_R14,
	VCPU_REGS_R15 = __VCPU_REGS_R15,
157
#endif
158
	VCPU_REGS_RIP,
159
	NR_VCPU_REGS,
160

A
Avi Kivity 已提交
161
	VCPU_EXREG_PDPTR = NR_VCPU_REGS,
162
	VCPU_EXREG_CR3,
A
Avi Kivity 已提交
163
	VCPU_EXREG_RFLAGS,
A
Avi Kivity 已提交
164
	VCPU_EXREG_SEGMENTS,
A
Avi Kivity 已提交
165 166
};

167
enum {
168
	VCPU_SREG_ES,
169
	VCPU_SREG_CS,
170
	VCPU_SREG_SS,
171 172 173 174 175 176 177
	VCPU_SREG_DS,
	VCPU_SREG_FS,
	VCPU_SREG_GS,
	VCPU_SREG_TR,
	VCPU_SREG_LDTR,
};

178 179 180 181 182
enum exit_fastpath_completion {
	EXIT_FASTPATH_NONE,
	EXIT_FASTPATH_SKIP_EMUL_INS,
};

183
#include <asm/kvm_emulate.h>
184

185 186
#define KVM_NR_MEM_OBJS 40

187 188 189 190
#define KVM_NR_DB_REGS	4

#define DR6_BD		(1 << 13)
#define DR6_BS		(1 << 14)
191
#define DR6_BT		(1 << 15)
192 193 194 195
#define DR6_RTM		(1 << 16)
#define DR6_FIXED_1	0xfffe0ff0
#define DR6_INIT	0xffff0ff0
#define DR6_VOLATILE	0x0001e00f
196 197 198 199 200

#define DR7_BP_EN_MASK	0x000000ff
#define DR7_GE		(1 << 9)
#define DR7_GD		(1 << 13)
#define DR7_FIXED_1	0x00000400
201
#define DR7_VOLATILE	0xffff2bff
202

203 204 205 206 207
#define PFERR_PRESENT_BIT 0
#define PFERR_WRITE_BIT 1
#define PFERR_USER_BIT 2
#define PFERR_RSVD_BIT 3
#define PFERR_FETCH_BIT 4
208
#define PFERR_PK_BIT 5
209 210
#define PFERR_GUEST_FINAL_BIT 32
#define PFERR_GUEST_PAGE_BIT 33
211 212 213 214 215 216

#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
217
#define PFERR_PK_MASK (1U << PFERR_PK_BIT)
218 219 220 221 222 223
#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)

#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK |	\
				 PFERR_WRITE_MASK |		\
				 PFERR_PRESENT_MASK)
224

225 226
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC	0
227 228 229 230 231 232 233
/*
 * The following bit is set with PV-EOI, unset on EOI.
 * We detect PV-EOI changes by guest by comparing
 * this bit with PV-EOI in guest memory.
 * See the implementation in apic_update_pv_eoi.
 */
#define KVM_APIC_PV_EOI_PENDING	1
234

F
Feng Wu 已提交
235 236
struct kvm_kernel_irq_routing_entry;

237 238 239 240 241 242 243 244 245
/*
 * We don't want allocation failures within the mmu code, so we preallocate
 * enough memory for a single page fault in a cache.
 */
struct kvm_mmu_memory_cache {
	int nobjs;
	void *objects[KVM_NR_MEM_OBJS];
};

246 247 248 249 250
/*
 * the pages used as guest page table on soft mmu are tracked by
 * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
 * by indirect shadow page can not be more than 15 bits.
 *
251
 * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
252 253
 * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
 */
254
union kvm_mmu_page_role {
255
	u32 word;
256
	struct {
257
		unsigned level:4;
258
		unsigned gpte_is_8_bytes:1;
259
		unsigned quadrant:2;
260
		unsigned direct:1;
261
		unsigned access:3;
262
		unsigned invalid:1;
263
		unsigned nxe:1;
264
		unsigned cr0_wp:1;
265
		unsigned smep_andnot_wp:1;
266
		unsigned smap_andnot_wp:1;
267
		unsigned ad_disabled:1;
268 269
		unsigned guest_mode:1;
		unsigned :6;
270 271 272 273 274 275 276 277

		/*
		 * This is left at the top of the word so that
		 * kvm_memslots_for_spte_role can extract it with a
		 * simple shift.  While there is room, give it a whole
		 * byte so it is also faster to load it from memory.
		 */
		unsigned smm:8;
278 279 280
	};
};

281
union kvm_mmu_extended_role {
282 283 284 285 286 287
/*
 * This structure complements kvm_mmu_page_role caching everything needed for
 * MMU configuration. If nothing in both these structures changed, MMU
 * re-configuration can be skipped. @valid bit is set on first usage so we don't
 * treat all-zero structure as valid data.
 */
288
	u32 word;
289 290 291
	struct {
		unsigned int valid:1;
		unsigned int execonly:1;
292
		unsigned int cr0_pg:1;
293
		unsigned int cr4_pae:1;
294 295 296 297
		unsigned int cr4_pse:1;
		unsigned int cr4_pke:1;
		unsigned int cr4_smap:1;
		unsigned int cr4_smep:1;
298
		unsigned int cr4_la57:1;
299
		unsigned int maxphyaddr:6;
300
	};
301 302 303 304 305 306 307 308 309 310
};

union kvm_mmu_role {
	u64 as_u64;
	struct {
		union kvm_mmu_page_role base;
		union kvm_mmu_extended_role ext;
	};
};

311 312 313 314
struct kvm_rmap_head {
	unsigned long val;
};

315 316 317
struct kvm_mmu_page {
	struct list_head link;
	struct hlist_node hash_link;
318 319
	struct list_head lpage_disallowed_link;

320
	bool unsync;
321
	u8 mmu_valid_gen;
322
	bool mmio_cached;
P
Paolo Bonzini 已提交
323
	bool lpage_disallowed; /* Can't be replaced by an equiv large page */
324 325 326 327 328 329

	/*
	 * The following two entries are used to key the shadow page in the
	 * hash table.
	 */
	union kvm_mmu_page_role role;
330
	gfn_t gfn;
331 332 333 334

	u64 *spt;
	/* hold the gfn of each spte inside spt */
	gfn_t *gfns;
335
	int root_count;          /* Currently serving as active root */
336
	unsigned int unsync_children;
337
	struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
338
	DECLARE_BITMAP(unsync_child_bitmap, 512);
339 340

#ifdef CONFIG_X86_32
341 342 343 344
	/*
	 * Used out of the mmu-lock to avoid reading spte values while an
	 * update is in progress; see the comments in __get_spte_lockless().
	 */
345 346 347
	int clear_spte_count;
#endif

348
	/* Number of writes since the last time traversal visited this page.  */
349
	atomic_t write_flooding_count;
350 351
};

352
struct kvm_pio_request {
353
	unsigned long linear_rip;
354 355 356 357 358 359
	unsigned long count;
	int in;
	int port;
	int size;
};

360
#define PT64_ROOT_MAX_LEVEL 5
361

362
struct rsvd_bits_validate {
363
	u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL];
364 365 366
	u64 bad_mt_xwr;
};

367 368 369 370 371 372 373 374
struct kvm_mmu_root_info {
	gpa_t cr3;
	hpa_t hpa;
};

#define KVM_MMU_ROOT_INFO_INVALID \
	((struct kvm_mmu_root_info) { .cr3 = INVALID_PAGE, .hpa = INVALID_PAGE })

375 376
#define KVM_MMU_NUM_PREV_ROOTS 3

377
/*
378 379 380
 * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
 * and 2-level 32-bit).  The kvm_mmu structure abstracts the details of the
 * current mmu mode.
381 382
 */
struct kvm_mmu {
383
	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
384
	unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
385
	u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
386
	int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err,
387
			  bool prefault);
388 389
	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
				  struct x86_exception *fault);
390 391
	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa,
			    u32 access, struct x86_exception *exception);
392 393
	gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
			       struct x86_exception *exception);
394
	int (*sync_page)(struct kvm_vcpu *vcpu,
395
			 struct kvm_mmu_page *sp);
396
	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
397
	void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
398
			   u64 *spte, const void *pte);
399
	hpa_t root_hpa;
400
	gpa_t root_cr3;
401
	union kvm_mmu_role mmu_role;
402 403 404
	u8 root_level;
	u8 shadow_root_level;
	u8 ept_ad;
405
	bool direct_map;
406
	struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
407

408 409 410 411 412 413 414
	/*
	 * Bitmap; bit set = permission fault
	 * Byte index: page fault error code [4:1]
	 * Bit index: pte permissions in ACC_* format
	 */
	u8 permissions[16];

415 416 417 418 419 420 421 422
	/*
	* The pkru_mask indicates if protection key checks are needed.  It
	* consists of 16 domains indexed by page fault error code bits [4:1],
	* with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
	* Each domain has 2 bits which are ANDed with AD and WD from PKRU.
	*/
	u32 pkru_mask;

423
	u64 *pae_root;
424
	u64 *lm_root;
425 426 427 428 429 430 431 432

	/*
	 * check zero bits on shadow page table entries, these
	 * bits include not only hardware reserved bits but also
	 * the bits spte never used.
	 */
	struct rsvd_bits_validate shadow_zero_check;

433
	struct rsvd_bits_validate guest_rsvd_check;
434

435 436
	/* Can have large pages at levels 2..last_nonleaf_level-1. */
	u8 last_nonleaf_level;
A
Avi Kivity 已提交
437

438 439
	bool nx;

440
	u64 pdptrs[4]; /* pae */
441 442
};

443 444 445 446 447
struct kvm_tlb_range {
	u64 start_gfn;
	u64 pages;
};

448 449 450 451 452 453 454 455 456 457 458 459
enum pmc_type {
	KVM_PMC_GP = 0,
	KVM_PMC_FIXED,
};

struct kvm_pmc {
	enum pmc_type type;
	u8 idx;
	u64 counter;
	u64 eventsel;
	struct perf_event *perf_event;
	struct kvm_vcpu *vcpu;
460 461 462 463 464
	/*
	 * eventsel value for general purpose counters,
	 * ctrl value for fixed counters.
	 */
	u64 current_config;
465 466 467 468 469 470 471 472 473 474 475 476
};

struct kvm_pmu {
	unsigned nr_arch_gp_counters;
	unsigned nr_arch_fixed_counters;
	unsigned available_event_types;
	u64 fixed_ctr_ctrl;
	u64 global_ctrl;
	u64 global_status;
	u64 global_ovf_ctrl;
	u64 counter_bitmask[2];
	u64 global_ctrl_mask;
477
	u64 global_ovf_ctrl_mask;
478
	u64 reserved_bits;
479
	u8 version;
480 481
	struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
	struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
482
	struct irq_work irq_work;
483
	DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);
484 485 486 487 488 489 490 491 492 493 494 495 496 497
	DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX);
	DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX);

	/*
	 * The gate to release perf_events not marked in
	 * pmc_in_use only once in a vcpu time slice.
	 */
	bool need_cleanup;

	/*
	 * The total number of programmed perf_events and it helps to avoid
	 * redundant check before cleanup if guest don't use vPMU at all.
	 */
	u8 event_count;
498 499
};

500 501
struct kvm_pmu_ops;

502 503
enum {
	KVM_DEBUGREG_BP_ENABLED = 1,
504
	KVM_DEBUGREG_WONT_EXIT = 2,
505
	KVM_DEBUGREG_RELOAD = 4,
506 507
};

508 509 510
struct kvm_mtrr_range {
	u64 base;
	u64 mask;
X
Xiao Guangrong 已提交
511
	struct list_head node;
512 513
};

514
struct kvm_mtrr {
515
	struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
516
	mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
517
	u64 deftype;
X
Xiao Guangrong 已提交
518 519

	struct list_head head;
520 521
};

A
Andrey Smetanin 已提交
522 523 524 525
/* Hyper-V SynIC timer */
struct kvm_vcpu_hv_stimer {
	struct hrtimer timer;
	int index;
526
	union hv_stimer_config config;
A
Andrey Smetanin 已提交
527 528 529 530 531 532
	u64 count;
	u64 exp_time;
	struct hv_message msg;
	bool msg_pending;
};

533 534 535 536 537 538 539 540 541 542 543
/* Hyper-V synthetic interrupt controller (SynIC)*/
struct kvm_vcpu_hv_synic {
	u64 version;
	u64 control;
	u64 msg_page;
	u64 evt_page;
	atomic64_t sint[HV_SYNIC_SINT_COUNT];
	atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
	DECLARE_BITMAP(auto_eoi_bitmap, 256);
	DECLARE_BITMAP(vec_bitmap, 256);
	bool active;
544
	bool dont_zero_synic_pages;
545 546
};

547 548
/* Hyper-V per vcpu emulation context */
struct kvm_vcpu_hv {
549
	u32 vp_index;
550
	u64 hv_vapic;
551
	s64 runtime_offset;
552
	struct kvm_vcpu_hv_synic synic;
A
Andrey Smetanin 已提交
553
	struct kvm_hyperv_exit exit;
A
Andrey Smetanin 已提交
554 555
	struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
	DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
556
	cpumask_t tlb_flush;
557 558
};

559
struct kvm_vcpu_arch {
560 561 562 563 564 565 566
	/*
	 * rip and regs accesses must go through
	 * kvm_{register,rip}_{read,write} functions.
	 */
	unsigned long regs[NR_VCPU_REGS];
	u32 regs_avail;
	u32 regs_dirty;
567 568

	unsigned long cr0;
569
	unsigned long cr0_guest_owned_bits;
570 571 572
	unsigned long cr2;
	unsigned long cr3;
	unsigned long cr4;
573
	unsigned long cr4_guest_owned_bits;
574
	unsigned long cr8;
575
	u32 pkru;
576
	u32 hflags;
577
	u64 efer;
578 579
	u64 apic_base;
	struct kvm_lapic *apic;    /* kernel irqchip context */
580
	bool apicv_active;
581
	bool load_eoi_exitmap_pending;
582
	DECLARE_BITMAP(ioapic_handled_vectors, 256);
583
	unsigned long apic_attention;
584
	int32_t apic_arb_prio;
585 586
	int mp_state;
	u64 ia32_misc_enable_msr;
P
Paolo Bonzini 已提交
587
	u64 smbase;
588
	u64 smi_count;
589
	bool tpr_access_reporting;
590
	bool xsaves_enabled;
W
Wanpeng Li 已提交
591
	u64 ia32_xss;
592
	u64 microcode_version;
593
	u64 arch_capabilities;
594

595 596 597 598 599 600 601
	/*
	 * Paging state of the vcpu
	 *
	 * If the vcpu runs in guest mode with two level paging this still saves
	 * the paging mode of the l1 guest. This context is always used to
	 * handle faults.
	 */
602 603 604 605
	struct kvm_mmu *mmu;

	/* Non-nested MMU for L1 */
	struct kvm_mmu root_mmu;
606

607 608 609
	/* L1 MMU when running nested */
	struct kvm_mmu guest_mmu;

610 611 612 613
	/*
	 * Paging state of an L2 guest (used for nested npt)
	 *
	 * This context will save all necessary information to walk page tables
M
Miaohe Lin 已提交
614
	 * of an L2 guest. This context is only initialized for page table
615 616 617 618 619
	 * walking and not for faulting since we never handle l2 page faults on
	 * the host.
	 */
	struct kvm_mmu nested_mmu;

620 621 622 623 624 625
	/*
	 * Pointer to the mmu context currently used for
	 * gva_to_gpa translations.
	 */
	struct kvm_mmu *walk_mmu;

626
	struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
627 628 629
	struct kvm_mmu_memory_cache mmu_page_cache;
	struct kvm_mmu_memory_cache mmu_page_header_cache;

630 631
	/*
	 * QEMU userspace and the guest each have their own FPU state.
632 633 634
	 * In vcpu_run, we switch between the user and guest FPU contexts.
	 * While running a VCPU, the VCPU thread will have the guest FPU
	 * context.
635 636 637 638 639 640
	 *
	 * Note that while the PKRU state lives inside the fpu registers,
	 * it is switched out separately at VMENTER and VMEXIT time. The
	 * "guest_fpu" state here contains the guest FPU context, with the
	 * host PRKU bits.
	 */
641
	struct fpu *user_fpu;
642
	struct fpu *guest_fpu;
643

644
	u64 xcr0;
645
	u64 guest_supported_xcr0;
646
	u32 guest_xstate_size;
647 648 649 650

	struct kvm_pio_request pio;
	void *pio_data;

651 652
	u8 event_exit_inst_len;

653 654
	struct kvm_queued_exception {
		bool pending;
655
		bool injected;
656 657 658
		bool has_error_code;
		u8 nr;
		u32 error_code;
659 660
		unsigned long payload;
		bool has_payload;
661
		u8 nested_apf;
662 663
	} exception;

A
Avi Kivity 已提交
664
	struct kvm_queued_interrupt {
665
		bool injected;
666
		bool soft;
A
Avi Kivity 已提交
667 668 669
		u8 nr;
	} interrupt;

670 671 672
	int halt_request; /* real mode on Intel only */

	int cpuid_nent;
673
	struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
674 675 676

	int maxphyaddr;

677 678 679
	/* emulate context */

	struct x86_emulate_ctxt emulate_ctxt;
680 681
	bool emulate_regs_need_sync_to_vcpu;
	bool emulate_regs_need_sync_from_vcpu;
682
	int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
683 684

	gpa_t time;
685
	struct pvclock_vcpu_time_info hv_clock;
Z
Zachary Amsden 已提交
686
	unsigned int hw_tsc_khz;
687 688
	struct gfn_to_hva_cache pv_time;
	bool pv_time_enabled;
689 690
	/* set guest stopped flag in pvclock flags field */
	bool pvclock_set_guest_stopped_request;
G
Glauber Costa 已提交
691 692

	struct {
693
		u8 preempted;
G
Glauber Costa 已提交
694 695
		u64 msr_val;
		u64 last_steal;
696
		struct gfn_to_pfn_cache cache;
G
Glauber Costa 已提交
697 698
	} st;

699
	u64 tsc_offset;
700
	u64 last_guest_tsc;
701
	u64 last_host_tsc;
702
	u64 tsc_offset_adjustment;
703 704
	u64 this_tsc_nsec;
	u64 this_tsc_write;
T
Tomasz Grabiec 已提交
705
	u64 this_tsc_generation;
Z
Zachary Amsden 已提交
706
	bool tsc_catchup;
707 708 709 710
	bool tsc_always_catchup;
	s8 virtual_tsc_shift;
	u32 virtual_tsc_mult;
	u32 virtual_tsc_khz;
W
Will Auld 已提交
711
	s64 ia32_tsc_adjust_msr;
712
	u64 msr_ia32_power_ctl;
713
	u64 tsc_scaling_ratio;
714

A
Avi Kivity 已提交
715 716 717
	atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
	unsigned nmi_pending; /* NMI queued after currently running handler */
	bool nmi_injected;    /* Trying to inject an NMI this entry */
718
	bool smi_pending;    /* SMI queued after currently running handler */
A
Avi Kivity 已提交
719

720
	struct kvm_mtrr mtrr_state;
721
	u64 pat;
722

723
	unsigned switch_db_regs;
724 725 726 727
	unsigned long db[KVM_NR_DB_REGS];
	unsigned long dr6;
	unsigned long dr7;
	unsigned long eff_db[KVM_NR_DB_REGS];
728
	unsigned long guest_debug_dr7;
K
Kyle Huey 已提交
729 730
	u64 msr_platform_info;
	u64 msr_misc_features_enables;
H
Huang Ying 已提交
731 732 733 734

	u64 mcg_cap;
	u64 mcg_status;
	u64 mcg_ctl;
735
	u64 mcg_ext_ctl;
H
Huang Ying 已提交
736
	u64 *mce_banks;
737

738 739
	/* Cache MMIO info */
	u64 mmio_gva;
740
	unsigned mmio_access;
741
	gfn_t mmio_gfn;
742
	u64 mmio_gen;
743

744 745
	struct kvm_pmu pmu;

746 747
	/* used for guest single stepping over the given code position */
	unsigned long singlestep_rip;
J
Jan Kiszka 已提交
748

749
	struct kvm_vcpu_hv hyperv;
750 751

	cpumask_var_t wbinvd_dirty_mask;
752

753 754 755
	unsigned long last_retry_eip;
	unsigned long last_retry_addr;

756 757 758
	struct {
		bool halted;
		gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
759 760
		struct gfn_to_hva_cache data;
		u64 msr_val;
761
		u32 id;
762
		bool send_user_only;
763
		u32 host_apf_reason;
764
		unsigned long nested_apf_token;
765
		bool delivery_as_pf_vmexit;
766
	} apf;
767 768 769 770 771 772

	/* OSVW MSRs (AMD only) */
	struct {
		u64 length;
		u64 status;
	} osvw;
773 774 775 776 777

	struct {
		u64 msr_val;
		struct gfn_to_hva_cache data;
	} pv_eoi;
778

779 780
	u64 msr_kvm_poll_control;

781 782 783 784 785 786
	/*
	 * Indicate whether the access faults on its page table in guest
	 * which is set when fix page fault and used to detect unhandeable
	 * instruction.
	 */
	bool write_fault_to_shadow_pgtable;
787 788 789

	/* set at EPT violation at this point */
	unsigned long exit_qualification;
790 791 792 793 794

	/* pv related host specific info */
	struct {
		bool pv_unhalted;
	} pv;
795 796

	int pending_ioapic_eoi;
797
	int pending_external_vector;
798

799
	/* GPA available */
800
	bool gpa_available;
801
	gpa_t gpa_val;
802 803 804

	/* be preempted when it's in kernel-mode(cpl=0) */
	bool preempted_in_kernel;
P
Paolo Bonzini 已提交
805 806 807

	/* Flush the L1 Data cache for L1TF mitigation on VMENTER */
	bool l1tf_flush_l1d;
808 809 810

	/* AMD MSRC001_0015 Hardware Configuration */
	u64 msr_hwcr;
811 812
};

813
struct kvm_lpage_info {
814
	int disallow_lpage;
815 816 817
};

struct kvm_arch_memory_slot {
818
	struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
819
	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
820
	unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
821 822
};

823 824 825 826 827 828 829 830 831 832 833
/*
 * We use as the mode the number of bits allocated in the LDR for the
 * logical processor ID.  It happens that these are all powers of two.
 * This makes it is very easy to detect cases where the APICs are
 * configured for multiple modes; in that case, we cannot use the map and
 * hence cannot use kvm_irq_delivery_to_apic_fast either.
 */
#define KVM_APIC_MODE_XAPIC_CLUSTER          4
#define KVM_APIC_MODE_XAPIC_FLAT             8
#define KVM_APIC_MODE_X2APIC                16

834 835
struct kvm_apic_map {
	struct rcu_head rcu;
836
	u8 mode;
R
Radim Krčmář 已提交
837
	u32 max_apic_id;
838 839 840 841
	union {
		struct kvm_lapic *xapic_flat_map[8];
		struct kvm_lapic *xapic_cluster_map[16][4];
	};
R
Radim Krčmář 已提交
842
	struct kvm_lapic *phys_map[];
843 844
};

845 846
/* Hyper-V emulation context */
struct kvm_hv {
847
	struct mutex hv_lock;
848 849 850
	u64 hv_guest_os_id;
	u64 hv_hypercall;
	u64 hv_tsc_page;
851 852 853 854

	/* Hyper-v based guest crash (NT kernel bugcheck) parameters */
	u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
	u64 hv_crash_ctl;
P
Paolo Bonzini 已提交
855 856

	HV_REFERENCE_TSC_PAGE tsc_ref;
857 858

	struct idr conn_to_evt;
859 860 861 862

	u64 hv_reenlightenment_control;
	u64 hv_tsc_emulation_control;
	u64 hv_tsc_emulation_status;
863 864 865

	/* How many vCPUs have VP index != vCPU index */
	atomic_t num_mismatched_vp_indexes;
866 867

	struct hv_partition_assist_pg *hv_pa_pg;
868 869
};

870 871 872 873 874 875
enum kvm_irqchip_mode {
	KVM_IRQCHIP_NONE,
	KVM_IRQCHIP_KERNEL,       /* created with KVM_CREATE_IRQCHIP */
	KVM_IRQCHIP_SPLIT,        /* created with KVM_CAP_SPLIT_IRQCHIP */
};

876 877
#define APICV_INHIBIT_REASON_DISABLE    0

878
struct kvm_arch {
879 880 881
	unsigned long n_used_mmu_pages;
	unsigned long n_requested_mmu_pages;
	unsigned long n_max_mmu_pages;
882
	unsigned int indirect_shadow_pages;
883
	u8 mmu_valid_gen;
884 885 886 887 888
	struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
	/*
	 * Hash table of struct kvm_mmu_page.
	 */
	struct list_head active_mmu_pages;
889
	struct list_head zapped_obsolete_pages;
890
	struct list_head lpage_disallowed_mmu_pages;
891
	struct kvm_page_track_notifier_node mmu_sp_tracker;
892
	struct kvm_page_track_notifier_head track_notifier_head;
893

B
Ben-Ami Yassour 已提交
894
	struct list_head assigned_dev_head;
J
Joerg Roedel 已提交
895
	struct iommu_domain *iommu_domain;
896
	bool iommu_noncoherent;
897 898
#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
	atomic_t noncoherent_dma_count;
899 900
#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
	atomic_t assigned_device_count;
901 902
	struct kvm_pic *vpic;
	struct kvm_ioapic *vioapic;
S
Sheng Yang 已提交
903
	struct kvm_pit *vpit;
904
	atomic_t vapics_in_nmi_mode;
905 906
	struct mutex apic_map_lock;
	struct kvm_apic_map *apic_map;
907

908
	bool apic_access_page_done;
909
	unsigned long apicv_inhibit_reasons;
910 911

	gpa_t wall_clock;
912

913
	bool mwait_in_guest;
914
	bool hlt_in_guest;
915
	bool pause_in_guest;
916
	bool cstate_in_guest;
917

918
	unsigned long irq_sources_bitmap;
919
	s64 kvmclock_offset;
920
	raw_spinlock_t tsc_write_lock;
Z
Zachary Amsden 已提交
921 922
	u64 last_tsc_nsec;
	u64 last_tsc_write;
923
	u32 last_tsc_khz;
924 925 926
	u64 cur_tsc_nsec;
	u64 cur_tsc_write;
	u64 cur_tsc_offset;
T
Tomasz Grabiec 已提交
927
	u64 cur_tsc_generation;
928
	int nr_vcpus_matched_tsc;
E
Ed Swierk 已提交
929

930 931 932
	spinlock_t pvclock_gtod_sync_lock;
	bool use_master_clock;
	u64 master_kernel_ns;
933
	u64 master_cycle_now;
934
	struct delayed_work kvmclock_update_work;
935
	struct delayed_work kvmclock_sync_work;
936

E
Ed Swierk 已提交
937
	struct kvm_xen_hvm_config xen_hvm_config;
938

939 940 941
	/* reads protected by irq_srcu, writes by irq_lock */
	struct hlist_head mask_notifier_list;

942
	struct kvm_hv hyperv;
943 944 945 946

	#ifdef CONFIG_KVM_MMU_AUDIT
	int audit_point;
	#endif
947

948
	bool backwards_tsc_observed;
949
	bool boot_vcpu_runs_old_kvmclock;
950
	u32 bsp_vcpu_id;
951 952

	u64 disabled_quirks;
953

954
	enum kvm_irqchip_mode irqchip_mode;
955
	u8 nr_reserved_ioapic_pins;
956 957

	bool disabled_lapic_found;
958

959
	bool x2apic_format;
960
	bool x2apic_broadcast_quirk_disabled;
961 962

	bool guest_can_read_msr_platform_info;
963
	bool exception_payload_enabled;
E
Eric Hankland 已提交
964 965

	struct kvm_pmu_event_filter *pmu_event_filter;
966
	struct task_struct *nx_lpage_recovery_thread;
967 968
};

969
struct kvm_vm_stat {
970 971 972 973 974 975 976 977 978 979
	ulong mmu_shadow_zapped;
	ulong mmu_pte_write;
	ulong mmu_pte_updated;
	ulong mmu_pde_zapped;
	ulong mmu_flooded;
	ulong mmu_recycled;
	ulong mmu_cache_miss;
	ulong mmu_unsync;
	ulong remote_tlb_flush;
	ulong lpages;
P
Paolo Bonzini 已提交
980
	ulong nx_lpage_splits;
981
	ulong max_mmu_page_hash_collisions;
982 983
};

984
struct kvm_vcpu_stat {
985 986 987 988 989 990 991 992 993 994 995
	u64 pf_fixed;
	u64 pf_guest;
	u64 tlb_flush;
	u64 invlpg;

	u64 exits;
	u64 io_exits;
	u64 mmio_exits;
	u64 signal_exits;
	u64 irq_window_exits;
	u64 nmi_window_exits;
P
Paolo Bonzini 已提交
996
	u64 l1d_flush;
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
	u64 halt_exits;
	u64 halt_successful_poll;
	u64 halt_attempted_poll;
	u64 halt_poll_invalid;
	u64 halt_wakeup;
	u64 request_irq_exits;
	u64 irq_exits;
	u64 host_state_reload;
	u64 fpu_reload;
	u64 insn_emulation;
	u64 insn_emulation_fail;
	u64 hypercalls;
	u64 irq_injections;
	u64 nmi_injections;
1011
	u64 req_event;
1012
};
1013

1014 1015
struct x86_instruction_info;

1016 1017 1018 1019 1020 1021
struct msr_data {
	bool host_initiated;
	u32 index;
	u64 data;
};

P
Paolo Bonzini 已提交
1022 1023
struct kvm_lapic_irq {
	u32 vector;
1024 1025 1026 1027
	u16 delivery_mode;
	u16 dest_mode;
	bool level;
	u16 trig_mode;
P
Paolo Bonzini 已提交
1028 1029
	u32 shorthand;
	u32 dest_id;
1030
	bool msi_redir_hint;
P
Paolo Bonzini 已提交
1031 1032
};

1033 1034 1035 1036 1037
static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
{
	return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
}

1038 1039 1040
struct kvm_x86_ops {
	int (*cpu_has_kvm_support)(void);          /* __init */
	int (*disabled_by_bios)(void);             /* __init */
1041 1042
	int (*hardware_enable)(void);
	void (*hardware_disable)(void);
1043
	int (*check_processor_compatibility)(void);/* __init */
1044 1045
	int (*hardware_setup)(void);               /* __init */
	void (*hardware_unsetup)(void);            /* __exit */
1046
	bool (*cpu_has_accelerated_tpr)(void);
1047
	bool (*has_emulated_msr)(int index);
1048
	void (*cpuid_update)(struct kvm_vcpu *vcpu);
1049

1050 1051
	struct kvm *(*vm_alloc)(void);
	void (*vm_free)(struct kvm *);
1052 1053 1054
	int (*vm_init)(struct kvm *kvm);
	void (*vm_destroy)(struct kvm *kvm);

1055
	/* Create, but do not attach this VCPU */
1056
	int (*vcpu_create)(struct kvm_vcpu *vcpu);
1057
	void (*vcpu_free)(struct kvm_vcpu *vcpu);
1058
	void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
1059 1060 1061 1062 1063

	void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
	void (*vcpu_put)(struct kvm_vcpu *vcpu);

1064
	void (*update_bp_intercept)(struct kvm_vcpu *vcpu);
1065
	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1066
	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1067 1068 1069
	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
	void (*get_segment)(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg);
1070
	int (*get_cpl)(struct kvm_vcpu *vcpu);
1071 1072 1073
	void (*set_segment)(struct kvm_vcpu *vcpu,
			    struct kvm_segment *var, int seg);
	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
1074
	void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
1075 1076 1077
	void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
1078
	int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1079
	void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
1080 1081 1082 1083
	void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
	void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
J
Jan Kiszka 已提交
1084 1085
	u64 (*get_dr6)(struct kvm_vcpu *vcpu);
	void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
1086
	void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
1087
	void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
1088
	void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
1089 1090 1091
	unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
	void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);

1092
	void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
1093
	int  (*tlb_remote_flush)(struct kvm *kvm);
1094 1095
	int  (*tlb_remote_flush_with_range)(struct kvm *kvm,
			struct kvm_tlb_range *range);
1096

1097 1098 1099 1100 1101 1102 1103
	/*
	 * Flush any TLB entries associated with the given GVA.
	 * Does not need to flush GPA->HPA mappings.
	 * Can potentially get non-canonical addresses through INVLPGs, which
	 * the implementation may choose to ignore if appropriate.
	 */
	void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
1104

A
Avi Kivity 已提交
1105
	void (*run)(struct kvm_vcpu *vcpu);
1106 1107
	int (*handle_exit)(struct kvm_vcpu *vcpu,
		enum exit_fastpath_completion exit_fastpath);
1108
	int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
1109
	void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
1110
	u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
1111 1112
	void (*patch_hypercall)(struct kvm_vcpu *vcpu,
				unsigned char *hypercall_addr);
1113
	void (*set_irq)(struct kvm_vcpu *vcpu);
1114
	void (*set_nmi)(struct kvm_vcpu *vcpu);
1115
	void (*queue_exception)(struct kvm_vcpu *vcpu);
A
Avi Kivity 已提交
1116
	void (*cancel_injection)(struct kvm_vcpu *vcpu);
1117
	int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
1118
	int (*nmi_allowed)(struct kvm_vcpu *vcpu);
J
Jan Kiszka 已提交
1119 1120
	bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
	void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
1121 1122
	void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
	void (*enable_irq_window)(struct kvm_vcpu *vcpu);
1123
	void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
1124
	void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
1125
	void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
1126
	void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
1127
	bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
1128
	void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
1129
	void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
1130
	void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
1131
	void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
1132
	int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
1133
	int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
1134
	int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
1135
	int (*get_tdp_level)(struct kvm_vcpu *vcpu);
1136
	u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1137
	int (*get_lpage_level)(void);
1138
	bool (*rdtscp_supported)(void);
1139
	bool (*invpcid_supported)(void);
1140

1141 1142
	void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);

1143 1144
	void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);

1145 1146
	bool (*has_wbinvd_exit)(void);

1147
	u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
1148 1149
	/* Returns actual tsc_offset set in active VMCS */
	u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
1150

1151
	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
1152 1153 1154 1155

	int (*check_intercept)(struct kvm_vcpu *vcpu,
			       struct x86_instruction_info *info,
			       enum x86_intercept_stage stage);
1156 1157
	void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu,
		enum exit_fastpath_completion *exit_fastpath);
1158
	bool (*mpx_supported)(void);
1159
	bool (*xsaves_supported)(void);
1160
	bool (*umip_emulated)(void);
1161
	bool (*pt_supported)(void);
1162
	bool (*pku_supported)(void);
1163 1164

	int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
1165
	void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
1166 1167

	void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192

	/*
	 * Arch-specific dirty logging hooks. These hooks are only supposed to
	 * be valid if the specific arch has hardware-accelerated dirty logging
	 * mechanism. Currently only for PML on VMX.
	 *
	 *  - slot_enable_log_dirty:
	 *	called when enabling log dirty mode for the slot.
	 *  - slot_disable_log_dirty:
	 *	called when disabling log dirty mode for the slot.
	 *	also called when slot is created with log dirty disabled.
	 *  - flush_log_dirty:
	 *	called before reporting dirty_bitmap to userspace.
	 *  - enable_log_dirty_pt_masked:
	 *	called when reenabling log dirty for the GFNs in the mask after
	 *	corresponding bits are cleared in slot->dirty_bitmap.
	 */
	void (*slot_enable_log_dirty)(struct kvm *kvm,
				      struct kvm_memory_slot *slot);
	void (*slot_disable_log_dirty)(struct kvm *kvm,
				       struct kvm_memory_slot *slot);
	void (*flush_log_dirty)(struct kvm *kvm);
	void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
					   struct kvm_memory_slot *slot,
					   gfn_t offset, unsigned long mask);
1193 1194
	int (*write_log_dirty)(struct kvm_vcpu *vcpu);

1195 1196
	/* pmu operations of sub-arch */
	const struct kvm_pmu_ops *pmu_ops;
1197

1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
	/*
	 * Architecture specific hooks for vCPU blocking due to
	 * HLT instruction.
	 * Returns for .pre_block():
	 *    - 0 means continue to block the vCPU.
	 *    - 1 means we cannot block the vCPU since some event
	 *        happens during this period, such as, 'ON' bit in
	 *        posted-interrupts descriptor is set.
	 */
	int (*pre_block)(struct kvm_vcpu *vcpu);
	void (*post_block)(struct kvm_vcpu *vcpu);
1209 1210 1211 1212

	void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
	void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);

1213 1214
	int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
			      uint32_t guest_irq, bool set);
1215
	void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
1216
	bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
1217

1218 1219
	int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
			    bool *expired);
1220
	void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
1221 1222

	void (*setup_mce)(struct kvm_vcpu *vcpu);
1223

1224 1225 1226 1227 1228 1229
	int (*get_nested_state)(struct kvm_vcpu *vcpu,
				struct kvm_nested_state __user *user_kvm_nested_state,
				unsigned user_data_size);
	int (*set_nested_state)(struct kvm_vcpu *vcpu,
				struct kvm_nested_state __user *user_kvm_nested_state,
				struct kvm_nested_state *kvm_state);
1230
	bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
1231

1232
	int (*smi_allowed)(struct kvm_vcpu *vcpu);
1233
	int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
1234
	int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
1235
	int (*enable_smi_window)(struct kvm_vcpu *vcpu);
1236 1237

	int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
1238 1239
	int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
	int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1240 1241

	int (*get_msr_feature)(struct kvm_msr_entry *entry);
1242 1243 1244

	int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
				   uint16_t *vmcs_version);
1245
	uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
1246 1247

	bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
1248 1249

	bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
1250
	int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
1251 1252
};

1253
struct kvm_arch_async_pf {
1254
	u32 token;
1255
	gfn_t gfn;
X
Xiao Guangrong 已提交
1256
	unsigned long cr3;
1257
	bool direct_map;
1258 1259
};

1260
extern struct kvm_x86_ops *kvm_x86_ops;
1261
extern struct kmem_cache *x86_fpu_cache;
1262

1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
	return kvm_x86_ops->vm_alloc();
}

static inline void kvm_arch_free_vm(struct kvm *kvm)
{
	return kvm_x86_ops->vm_free(kvm);
}

1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
{
	if (kvm_x86_ops->tlb_remote_flush &&
	    !kvm_x86_ops->tlb_remote_flush(kvm))
		return 0;
	else
		return -ENOTSUPP;
}

1284 1285 1286 1287 1288
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);

void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
1289 1290
void kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
S
Sheng Yang 已提交
1291
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
1292
		u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
1293
		u64 acc_track_mask, u64 me_mask);
1294

1295
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
1296 1297
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
				      struct kvm_memory_slot *memslot);
1298
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
1299
				   const struct kvm_memory_slot *memslot);
1300 1301 1302 1303 1304 1305 1306 1307 1308
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
				   struct kvm_memory_slot *memslot);
void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
					struct kvm_memory_slot *memslot);
void kvm_mmu_slot_set_dirty(struct kvm *kvm,
			    struct kvm_memory_slot *memslot);
void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
				   struct kvm_memory_slot *slot,
				   gfn_t gfn_offset, unsigned long mask);
1309
void kvm_mmu_zap_all(struct kvm *kvm);
1310
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
1311 1312
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
1313

1314
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
1315
bool pdptrs_changed(struct kvm_vcpu *vcpu);
1316

1317
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1318
			  const void *val, int bytes);
1319

1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
struct kvm_irq_mask_notifier {
	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
	int irq;
	struct hlist_node link;
};

void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
				    struct kvm_irq_mask_notifier *kimn);
void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
				      struct kvm_irq_mask_notifier *kimn);
void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
			     bool mask);

1333
extern bool tdp_enabled;
1334

1335 1336
u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);

1337 1338 1339 1340
/* control of guest tsc rate supported? */
extern bool kvm_has_tsc_control;
/* maximum supported tsc_khz for guests */
extern u32  kvm_max_guest_tsc_khz;
1341 1342 1343 1344
/* number of bits of the fractional part of the TSC scaling ratio */
extern u8   kvm_tsc_scaling_ratio_frac_bits;
/* maximum allowed value of TSC scaling ratio */
extern u64  kvm_max_tsc_scaling_ratio;
1345 1346
/* 1ull << kvm_tsc_scaling_ratio_frac_bits */
extern u64  kvm_default_tsc_scaling_ratio;
1347

1348
extern u64 kvm_mce_cap_supported;
1349

1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
/*
 * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
 *			userspace I/O) to indicate that the emulation context
 *			should be resued as is, i.e. skip initialization of
 *			emulation context, instruction fetch and decode.
 *
 * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
 *		      Indicates that only select instructions (tagged with
 *		      EmulateOnUD) should be emulated (to minimize the emulator
 *		      attack surface).  See also EMULTYPE_TRAP_UD_FORCED.
 *
 * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
 *		   decode the instruction length.  For use *only* by
 *		   kvm_x86_ops->skip_emulated_instruction() implementations.
 *
 * EMULTYPE_ALLOW_RETRY - Set when the emulator should resume the guest to
 *			  retry native execution under certain conditions.
 *
 * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
 *			     triggered by KVM's magic "force emulation" prefix,
 *			     which is opt in via module param (off by default).
 *			     Bypasses EmulateOnUD restriction despite emulating
 *			     due to an intercepted #UD (see EMULTYPE_TRAP_UD).
 *			     Used to test the full emulator from userspace.
 *
 * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
 *			backdoor emulation, which is opt in via module param.
 *			VMware backoor emulation handles select instructions
 *			and reinjects the #GP for all other cases.
 */
1380 1381
#define EMULTYPE_NO_DECODE	    (1 << 0)
#define EMULTYPE_TRAP_UD	    (1 << 1)
1382
#define EMULTYPE_SKIP		    (1 << 2)
1383
#define EMULTYPE_ALLOW_RETRY	    (1 << 3)
1384
#define EMULTYPE_TRAP_UD_FORCED	    (1 << 4)
1385
#define EMULTYPE_VMWARE_GP	    (1 << 5)
1386 1387 1388
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
					void *insn, int insn_len);
1389

1390
void kvm_enable_efer_bits(u64);
1391
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
1392
int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
1393 1394
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
1395 1396
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
1397 1398 1399

struct x86_emulate_ctxt;

1400
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
1401
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
1402
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
1403
int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1404
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
1405

1406
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
1407
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
1408
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
1409

1410 1411
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
		    int reason, bool has_error_code, u32 error_code);
1412

1413
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
1414
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
1415
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
A
Andre Przywara 已提交
1416
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
1417 1418
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
1419 1420
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
1421
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
1422
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
1423

1424
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1425
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1426

1427 1428
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
A
Avi Kivity 已提交
1429
bool kvm_rdpmc(struct kvm_vcpu *vcpu);
1430

1431 1432
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1433 1434
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1435
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
1436 1437 1438
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			    gfn_t gfn, void *data, int offset, int len,
			    u32 access);
1439
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
1440
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
1441

1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
static inline int __kvm_irq_line_state(unsigned long *irq_state,
				       int irq_source_id, int level)
{
	/* Logical OR for level trig interrupt */
	if (level)
		__set_bit(irq_source_id, irq_state);
	else
		__clear_bit(irq_source_id, irq_state);

	return !!(*irq_state);
}

1454 1455 1456
#define KVM_MMU_ROOT_CURRENT		BIT(0)
#define KVM_MMU_ROOT_PREVIOUS(i)	BIT(1+i)
#define KVM_MMU_ROOTS_ALL		(~0UL)
1457

1458 1459
int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
1460

1461 1462
void kvm_inject_nmi(struct kvm_vcpu *vcpu);

1463
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1464 1465 1466 1467
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
1468
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
1469 1470
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			ulong roots_to_free);
1471 1472
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
			   struct x86_exception *exception);
1473 1474 1475 1476 1477 1478 1479 1480
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
			      struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
				struct x86_exception *exception);
1481

1482
void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
1483 1484
bool kvm_apicv_activated(struct kvm *kvm);
void kvm_apicv_init(struct kvm *kvm, bool enable);
1485

1486 1487
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);

1488
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
1489
		       void *insn, int insn_len);
M
Marcelo Tosatti 已提交
1490
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
1491
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
1492
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
1493

1494
void kvm_enable_tdp(void);
1495
void kvm_disable_tdp(void);
1496

1497 1498
static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
				  struct x86_exception *exception)
1499 1500 1501 1502
{
	return gpa;
}

1503 1504 1505 1506 1507 1508 1509
static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
{
	struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);

	return (struct kvm_mmu_page *)page_private(page);
}

1510
static inline u16 kvm_read_ldt(void)
1511 1512 1513 1514 1515 1516
{
	u16 ldt;
	asm("sldt %0" : "=g"(ldt));
	return ldt;
}

1517
static inline void kvm_load_ldt(u16 sel)
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
{
	asm("lldt %0" : : "rm"(sel));
}

#ifdef CONFIG_X86_64
static inline unsigned long read_msr(unsigned long msr)
{
	u64 value;

	rdmsrl(msr, value);
	return value;
}
#endif

static inline u32 get_rdx_init_val(void)
{
	return 0x600; /* P6 family */
}

1537 1538 1539 1540 1541
static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
{
	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
}

1542 1543 1544 1545
#define TSS_IOPB_BASE_OFFSET 0x66
#define TSS_BASE_SIZE 0x68
#define TSS_IOPB_SIZE (65536 / 8)
#define TSS_REDIRECTION_SIZE (256 / 8)
1546 1547
#define RMODE_TSS_SIZE							\
	(TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
1548

1549 1550 1551 1552 1553 1554 1555
enum {
	TASK_SWITCH_CALL = 0,
	TASK_SWITCH_IRET = 1,
	TASK_SWITCH_JMP = 2,
	TASK_SWITCH_GATE = 3,
};

1556
#define HF_GIF_MASK		(1 << 0)
A
Alexander Graf 已提交
1557 1558
#define HF_HIF_MASK		(1 << 1)
#define HF_VINTR_MASK		(1 << 2)
1559
#define HF_NMI_MASK		(1 << 3)
1560
#define HF_IRET_MASK		(1 << 4)
1561
#define HF_GUEST_MASK		(1 << 5) /* VCPU is in guest-mode */
1562 1563
#define HF_SMM_MASK		(1 << 6)
#define HF_SMM_INSIDE_NMI_MASK	(1 << 7)
1564

1565 1566 1567 1568 1569
#define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
#define KVM_ADDRESS_SPACE_NUM 2

#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
1570

1571
asmlinkage void kvm_spurious_fault(void);
1572

1573 1574 1575
/*
 * Hardware virtualization extension instructions may fault if a
 * reboot turns off virtualization while processes are running.
1576 1577
 * Usually after catching the fault we just panic; during reboot
 * instead the instruction is ignored.
1578
 */
1579
#define __kvm_handle_fault_on_reboot(insn)				\
1580 1581 1582 1583 1584 1585
	"666: \n\t"							\
	insn "\n\t"							\
	"jmp	668f \n\t"						\
	"667: \n\t"							\
	"call	kvm_spurious_fault \n\t"				\
	"668: \n\t"							\
1586
	_ASM_EXTABLE(666b, 667b)
1587

1588
#define KVM_ARCH_WANT_MMU_NOTIFIER
1589
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
A
Andres Lagar-Cavilla 已提交
1590
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
A
Andrea Arcangeli 已提交
1591
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
1592
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
1593
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
1594 1595
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
1596
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1597
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
1598
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
1599

1600
int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
1601
		    unsigned long ipi_bitmap_high, u32 min,
1602 1603
		    unsigned long icr, int op_64_bit);

A
Avi Kivity 已提交
1604
void kvm_define_shared_msr(unsigned index, u32 msr);
1605
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
A
Avi Kivity 已提交
1606

1607
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
1608
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
1609

1610
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
J
Jan Kiszka 已提交
1611 1612
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);

1613 1614
void kvm_make_mclock_inprogress_request(struct kvm *kvm);
void kvm_make_scan_ioapic_request(struct kvm *kvm);
1615 1616
void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
				       unsigned long *vcpu_bitmap);
1617

1618 1619 1620 1621
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work);
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work);
G
Gleb Natapov 已提交
1622 1623
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
			       struct kvm_async_pf *work);
1624
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
1625 1626
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);

1627 1628
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1629
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
1630

1631 1632
int kvm_is_in_guest(void);

1633
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1634 1635
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
1636

1637 1638 1639
bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
			     struct kvm_vcpu **dest_vcpu);

1640
void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
F
Feng Wu 已提交
1641
		     struct kvm_lapic_irq *irq);
P
Paolo Bonzini 已提交
1642

1643 1644 1645 1646 1647 1648 1649
static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
{
	/* We can only post Fixed and LowPrio IRQs */
	return (irq->delivery_mode == dest_Fixed ||
		irq->delivery_mode == dest_LowestPrio);
}

1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops->vcpu_blocking)
		kvm_x86_ops->vcpu_blocking(vcpu);
}

static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops->vcpu_unblocking)
		kvm_x86_ops->vcpu_unblocking(vcpu);
}

1662
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
1663

1664 1665 1666
static inline int kvm_cpu_get_apicid(int mps_cpu)
{
#ifdef CONFIG_X86_LOCAL_APIC
1667
	return default_cpu_present_to_apicid(mps_cpu);
1668 1669 1670 1671 1672 1673
#else
	WARN_ON_ONCE(1);
	return BAD_APICID;
#endif
}

1674 1675 1676
#define put_smstate(type, buf, offset, val)                      \
	*(type *)((buf) + (offset) - 0x7e00) = val

1677 1678 1679
#define GET_SMSTATE(type, buf, offset)		\
	(*(type *)((buf) + (offset) - 0x7e00))

H
H. Peter Anvin 已提交
1680
#endif /* _ASM_X86_KVM_HOST_H */