kvm_host.h 29.7 KB
Newer Older
1 2
#ifndef __KVM_HOST_H
#define __KVM_HOST_H
A
Avi Kivity 已提交
3 4 5 6 7 8 9

/*
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 */

#include <linux/types.h>
10
#include <linux/hardirq.h>
A
Avi Kivity 已提交
11 12 13
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
M
Markus Rechberger 已提交
14 15
#include <linux/signal.h>
#include <linux/sched.h>
16
#include <linux/bug.h>
A
Avi Kivity 已提交
17
#include <linux/mm.h>
18
#include <linux/mmu_notifier.h>
19
#include <linux/preempt.h>
20
#include <linux/msi.h>
21
#include <linux/slab.h>
22
#include <linux/rcupdate.h>
23
#include <linux/ratelimit.h>
X
Xiao Guangrong 已提交
24
#include <linux/err.h>
25
#include <linux/irqflags.h>
26
#include <linux/context_tracking.h>
A
Alexey Dobriyan 已提交
27
#include <asm/signal.h>
A
Avi Kivity 已提交
28 29

#include <linux/kvm.h>
I
Ingo Molnar 已提交
30
#include <linux/kvm_para.h>
A
Avi Kivity 已提交
31

32
#include <linux/kvm_types.h>
33

34
#include <asm/kvm_host.h>
35

A
Avi Kivity 已提交
36 37 38 39
#ifndef KVM_MMIO_SIZE
#define KVM_MMIO_SIZE 8
#endif

40 41 42 43 44 45 46
/*
 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
 * in kvm, other bits are visible for userspace which are defined in
 * include/linux/kvm_h.
 */
#define KVM_MEMSLOT_INVALID	(1UL << 16)

47 48
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS	2
A
Avi Kivity 已提交
49

50 51
/*
 * For the normal pfn, the highest 12 bits should be zero,
52 53
 * so we can mask bit 62 ~ bit 52  to indicate the error pfn,
 * mask bit 63 to indicate the noslot pfn.
54
 */
55 56 57
#define KVM_PFN_ERR_MASK	(0x7ffULL << 52)
#define KVM_PFN_ERR_NOSLOT_MASK	(0xfffULL << 52)
#define KVM_PFN_NOSLOT		(0x1ULL << 63)
58 59 60

#define KVM_PFN_ERR_FAULT	(KVM_PFN_ERR_MASK)
#define KVM_PFN_ERR_HWPOISON	(KVM_PFN_ERR_MASK + 1)
61
#define KVM_PFN_ERR_RO_FAULT	(KVM_PFN_ERR_MASK + 2)
62

63 64 65 66
/*
 * error pfns indicate that the gfn is in slot but faild to
 * translate it to pfn on host.
 */
67
static inline bool is_error_pfn(pfn_t pfn)
X
Xiao Guangrong 已提交
68
{
69
	return !!(pfn & KVM_PFN_ERR_MASK);
X
Xiao Guangrong 已提交
70 71
}

72 73 74 75 76 77
/*
 * error_noslot pfns indicate that the gfn can not be
 * translated to pfn - it is not in slot or failed to
 * translate it to pfn.
 */
static inline bool is_error_noslot_pfn(pfn_t pfn)
X
Xiao Guangrong 已提交
78
{
79
	return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
X
Xiao Guangrong 已提交
80 81
}

82 83
/* noslot pfn indicates that the gfn is not in slot. */
static inline bool is_noslot_pfn(pfn_t pfn)
X
Xiao Guangrong 已提交
84
{
85
	return pfn == KVM_PFN_NOSLOT;
X
Xiao Guangrong 已提交
86 87
}

88 89 90 91 92 93
/*
 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
 * provide own defines and kvm_is_error_hva
 */
#ifndef KVM_HVA_ERR_BAD

94 95
#define KVM_HVA_ERR_BAD		(PAGE_OFFSET)
#define KVM_HVA_ERR_RO_BAD	(PAGE_OFFSET + PAGE_SIZE)
X
Xiao Guangrong 已提交
96 97 98

static inline bool kvm_is_error_hva(unsigned long addr)
{
99
	return addr >= PAGE_OFFSET;
X
Xiao Guangrong 已提交
100 101
}

102 103
#endif

104 105
#define KVM_ERR_PTR_BAD_PAGE	(ERR_PTR(-ENOENT))

106
static inline bool is_error_page(struct page *page)
107 108 109 110
{
	return IS_ERR(page);
}

111 112 113
/*
 * vcpu->requests bit members
 */
114
#define KVM_REQ_TLB_FLUSH          0
115
#define KVM_REQ_MIGRATE_TIMER      1
116
#define KVM_REQ_REPORT_TPR_ACCESS  2
117
#define KVM_REQ_MMU_RELOAD         3
J
Joerg Roedel 已提交
118
#define KVM_REQ_TRIPLE_FAULT       4
119
#define KVM_REQ_PENDING_TIMER      5
120
#define KVM_REQ_UNHALT             6
121
#define KVM_REQ_MMU_SYNC           7
Z
Zachary Amsden 已提交
122
#define KVM_REQ_CLOCK_UPDATE       8
123
#define KVM_REQ_KICK               9
124
#define KVM_REQ_DEACTIVATE_FPU    10
125
#define KVM_REQ_EVENT             11
126
#define KVM_REQ_APF_HALT          12
G
Glauber Costa 已提交
127
#define KVM_REQ_STEAL_UPDATE      13
A
Avi Kivity 已提交
128
#define KVM_REQ_NMI               14
129 130 131 132 133 134 135
#define KVM_REQ_PMU               15
#define KVM_REQ_PMI               16
#define KVM_REQ_WATCHDOG          17
#define KVM_REQ_MASTERCLOCK_UPDATE 18
#define KVM_REQ_MCLOCK_INPROGRESS 19
#define KVM_REQ_EPR_EXIT          20
#define KVM_REQ_SCAN_IOAPIC       21
136
#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
137 138
#define KVM_REQ_ENABLE_IBS        23
#define KVM_REQ_DISABLE_IBS       24
A
Avi Kivity 已提交
139

140 141
#define KVM_USERSPACE_IRQ_SOURCE_ID		0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
142

143
struct kvm;
A
Avi Kivity 已提交
144
struct kvm_vcpu;
145
extern struct kmem_cache *kvm_vcpu_cache;
A
Avi Kivity 已提交
146

147
extern spinlock_t kvm_lock;
148 149
extern struct list_head vm_list;

150 151 152 153 154 155
struct kvm_io_range {
	gpa_t addr;
	int len;
	struct kvm_io_device *dev;
};

156
#define NR_IOBUS_DEVS 1000
157

158
struct kvm_io_bus {
159 160
	int dev_count;
	int ioeventfd_count;
161
	struct kvm_io_range range[];
162 163
};

M
Marcelo Tosatti 已提交
164 165 166
enum kvm_bus {
	KVM_MMIO_BUS,
	KVM_PIO_BUS,
167
	KVM_VIRTIO_CCW_NOTIFY_BUS,
168
	KVM_FAST_MMIO_BUS,
M
Marcelo Tosatti 已提交
169 170 171 172 173
	KVM_NR_BUSES
};

int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
		     int len, const void *val);
C
Cornelia Huck 已提交
174 175
int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
			    int len, const void *val, long cookie);
M
Marcelo Tosatti 已提交
176
int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
177
		    void *val);
178 179
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
			    int len, struct kvm_io_device *dev);
M
Marcelo Tosatti 已提交
180 181
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
			      struct kvm_io_device *dev);
182

183 184 185 186 187 188 189 190 191 192
#ifdef CONFIG_KVM_ASYNC_PF
struct kvm_async_pf {
	struct work_struct work;
	struct list_head link;
	struct list_head queue;
	struct kvm_vcpu *vcpu;
	struct mm_struct *mm;
	gva_t gva;
	unsigned long addr;
	struct kvm_arch_async_pf arch;
193
	bool   wakeup_all;
194 195 196 197
};

void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
198
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
199
		       struct kvm_arch_async_pf *arch);
200
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
201 202
#endif

203 204 205
enum {
	OUTSIDE_GUEST_MODE,
	IN_GUEST_MODE,
206 207
	EXITING_GUEST_MODE,
	READING_SHADOW_PAGE_TABLES,
208 209
};

A
Avi Kivity 已提交
210 211 212 213 214 215 216 217 218 219
/*
 * Sometimes a large or cross-page mmio needs to be broken up into separate
 * exits for userspace servicing.
 */
struct kvm_mmio_fragment {
	gpa_t gpa;
	void *data;
	unsigned len;
};

220 221
struct kvm_vcpu {
	struct kvm *kvm;
222
#ifdef CONFIG_PREEMPT_NOTIFIERS
223
	struct preempt_notifier preempt_notifier;
224
#endif
225
	int cpu;
226
	int vcpu_id;
227 228
	int srcu_idx;
	int mode;
229
	unsigned long requests;
J
Jan Kiszka 已提交
230
	unsigned long guest_debug;
231 232 233

	struct mutex mutex;
	struct kvm_run *run;
234

235
	int fpu_active;
236
	int guest_fpu_loaded, guest_xcr0_loaded;
237
	wait_queue_head_t wq;
238
	struct pid *pid;
239 240 241 242
	int sigset_active;
	sigset_t sigset;
	struct kvm_vcpu_stat stat;

243
#ifdef CONFIG_HAS_IOMEM
244 245 246
	int mmio_needed;
	int mmio_read_completed;
	int mmio_is_write;
A
Avi Kivity 已提交
247 248 249
	int mmio_cur_fragment;
	int mmio_nr_fragments;
	struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
250
#endif
A
Avi Kivity 已提交
251

252 253 254 255 256 257 258 259 260
#ifdef CONFIG_KVM_ASYNC_PF
	struct {
		u32 queued;
		struct list_head queue;
		struct list_head done;
		spinlock_t lock;
	} async_pf;
#endif

261 262 263 264 265 266 267 268 269 270 271 272
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
	/*
	 * Cpu relax intercept or pause loop exit optimization
	 * in_spin_loop: set when a vcpu does a pause loop exit
	 *  or cpu relax intercepted.
	 * dy_eligible: indicates whether vcpu is eligible for directed yield.
	 */
	struct {
		bool in_spin_loop;
		bool dy_eligible;
	} spin_loop;
#endif
273
	bool preempted;
274 275 276
	struct kvm_vcpu_arch arch;
};

277 278 279 280 281
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
	return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
}

282 283 284 285 286 287
/*
 * Some of the bitops functions do not support too long bitmaps.
 * This number must be determined not to exceed such limits.
 */
#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)

A
Avi Kivity 已提交
288 289 290 291
struct kvm_memory_slot {
	gfn_t base_gfn;
	unsigned long npages;
	unsigned long *dirty_bitmap;
292
	struct kvm_arch_memory_slot arch;
293
	unsigned long userspace_addr;
294
	u32 flags;
295
	short id;
A
Avi Kivity 已提交
296 297
};

298 299 300 301 302
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
{
	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
}

303 304 305 306 307 308 309 310
struct kvm_s390_adapter_int {
	u64 ind_addr;
	u64 summary_addr;
	u64 ind_offset;
	u32 summary_offset;
	u32 adapter_id;
};

311 312
struct kvm_kernel_irq_routing_entry {
	u32 gsi;
313
	u32 type;
314
	int (*set)(struct kvm_kernel_irq_routing_entry *e,
315 316
		   struct kvm *kvm, int irq_source_id, int level,
		   bool line_status);
317 318 319 320 321
	union {
		struct {
			unsigned irqchip;
			unsigned pin;
		} irqchip;
S
Sheng Yang 已提交
322
		struct msi_msg msi;
323
		struct kvm_s390_adapter_int adapter;
324
	};
325 326 327
	struct hlist_node link;
};

328
struct kvm_irq_routing_table;
329

330 331 332 333
#ifndef KVM_PRIVATE_MEM_SLOTS
#define KVM_PRIVATE_MEM_SLOTS 0
#endif

334
#ifndef KVM_MEM_SLOTS_NUM
335
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
336 337
#endif

338 339 340 341 342
/*
 * Note:
 * memslots are not sorted by id anymore, please use id_to_memslot()
 * to get the memslot by its id.
 */
343
struct kvm_memslots {
344
	u64 generation;
345
	struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
346
	/* The mapping table from slot id to the index in memslots[]. */
347
	short id_to_index[KVM_MEM_SLOTS_NUM];
348 349
};

A
Avi Kivity 已提交
350
struct kvm {
351
	spinlock_t mmu_lock;
352
	struct mutex slots_lock;
353
	struct mm_struct *mm; /* userspace tied to this vm */
354
	struct kvm_memslots *memslots;
355
	struct srcu_struct srcu;
356
	struct srcu_struct irq_srcu;
357 358 359
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
	u32 bsp_vcpu_id;
#endif
R
Rusty Russell 已提交
360
	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
361
	atomic_t online_vcpus;
362
	int last_boosted_vcpu;
363
	struct list_head vm_list;
364
	struct mutex lock;
M
Marcelo Tosatti 已提交
365
	struct kvm_io_bus *buses[KVM_NR_BUSES];
G
Gregory Haskins 已提交
366 367 368 369
#ifdef CONFIG_HAVE_KVM_EVENTFD
	struct {
		spinlock_t        lock;
		struct list_head  items;
370 371
		struct list_head  resampler_list;
		struct mutex      resampler_lock;
G
Gregory Haskins 已提交
372
	} irqfds;
G
Gregory Haskins 已提交
373
	struct list_head ioeventfds;
G
Gregory Haskins 已提交
374
#endif
375
	struct kvm_vm_stat stat;
376
	struct kvm_arch arch;
I
Izik Eidus 已提交
377
	atomic_t users_count;
378 379
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
380 381
	spinlock_t ring_lock;
	struct list_head coalesced_zones;
382
#endif
383

384
	struct mutex irq_lock;
385
#ifdef CONFIG_HAVE_KVM_IRQCHIP
386
	/*
387
	 * Update side is protected by irq_lock.
388
	 */
A
Arnd Bergmann 已提交
389
	struct kvm_irq_routing_table __rcu *irq_routing;
390
	struct hlist_head mask_notifier_list;
391 392
#endif
#ifdef CONFIG_HAVE_KVM_IRQFD
393
	struct hlist_head irq_ack_notifier_list;
394 395
#endif

396
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
397 398 399 400
	struct mmu_notifier mmu_notifier;
	unsigned long mmu_notifier_seq;
	long mmu_notifier_count;
#endif
401
	long tlbs_dirty;
402
	struct list_head devices;
A
Avi Kivity 已提交
403 404
};

405 406 407 408 409 410 411 412 413
#define kvm_err(fmt, ...) \
	pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
#define kvm_info(fmt, ...) \
	pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
#define kvm_debug(fmt, ...) \
	pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
#define kvm_pr_unimpl(fmt, ...) \
	pr_err_ratelimited("kvm [%i]: " fmt, \
			   task_tgid_nr(current), ## __VA_ARGS__)
414

415 416 417
/* The guest did something we don't support. */
#define vcpu_unimpl(vcpu, fmt, ...)					\
	kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
A
Avi Kivity 已提交
418

419 420 421 422 423 424 425
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
	smp_rmb();
	return kvm->vcpus[i];
}

#define kvm_for_each_vcpu(idx, vcpup, kvm) \
426 427 428 429
	for (idx = 0; \
	     idx < atomic_read(&kvm->online_vcpus) && \
	     (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
	     idx++)
430

431 432
#define kvm_for_each_memslot(memslot, slots)	\
	for (memslot = &slots->memslots[0];	\
433 434
	      memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
		memslot++)
435

R
Rusty Russell 已提交
436 437 438
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);

439
int __must_check vcpu_load(struct kvm_vcpu *vcpu);
440 441
void vcpu_put(struct kvm_vcpu *vcpu);

442
#ifdef CONFIG_HAVE_KVM_IRQFD
443 444 445 446 447 448 449 450 451 452 453 454
int kvm_irqfd_init(void);
void kvm_irqfd_exit(void);
#else
static inline int kvm_irqfd_init(void)
{
	return 0;
}

static inline void kvm_irqfd_exit(void)
{
}
#endif
455
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
456
		  struct module *module);
457
void kvm_exit(void);
A
Avi Kivity 已提交
458

I
Izik Eidus 已提交
459 460 461
void kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);

462 463 464 465 466 467 468
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
{
	return rcu_dereference_check(kvm->memslots,
			srcu_read_lock_held(&kvm->srcu)
			|| lockdep_is_held(&kvm->slots_lock));
}

469 470 471
static inline struct kvm_memory_slot *
id_to_memslot(struct kvm_memslots *slots, int id)
{
472 473
	int index = slots->id_to_index[id];
	struct kvm_memory_slot *slot;
474

475
	slot = &slots->memslots[index];
476

477 478
	WARN_ON(slot->id != id);
	return slot;
479 480
}

481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
/*
 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
 * - create a new memory slot
 * - delete an existing memory slot
 * - modify an existing memory slot
 *   -- move it in the guest physical memory space
 *   -- just change its flags
 *
 * Since flags can be changed by some of these operations, the following
 * differentiation is the best we can do for __kvm_set_memory_region():
 */
enum kvm_mr_change {
	KVM_MR_CREATE,
	KVM_MR_DELETE,
	KVM_MR_MOVE,
	KVM_MR_FLAGS_ONLY,
};

499
int kvm_set_memory_region(struct kvm *kvm,
500
			  struct kvm_userspace_memory_region *mem);
501
int __kvm_set_memory_region(struct kvm *kvm,
502
			    struct kvm_userspace_memory_region *mem);
503
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
504
			   struct kvm_memory_slot *dont);
505 506
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
			    unsigned long npages);
507
void kvm_arch_memslots_updated(struct kvm *kvm);
508 509 510
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				struct kvm_memory_slot *memslot,
				struct kvm_userspace_memory_region *mem,
511
				enum kvm_mr_change change);
512
void kvm_arch_commit_memory_region(struct kvm *kvm,
513
				struct kvm_userspace_memory_region *mem,
514 515
				const struct kvm_memory_slot *old,
				enum kvm_mr_change change);
516
bool kvm_largepages_enabled(void);
517
void kvm_disable_largepages(void);
518 519 520 521 522
/* flush all memory translations */
void kvm_arch_flush_shadow_all(struct kvm *kvm);
/* flush memory translations pointing to 'slot' */
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
				   struct kvm_memory_slot *slot);
523

524 525 526
int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
			    int nr_pages);

A
Avi Kivity 已提交
527
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
M
Marcelo Tosatti 已提交
528
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
529
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
X
Xiao Guangrong 已提交
530
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
531 532
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
533 534
void kvm_set_page_accessed(struct page *page);

535
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
536 537
pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
		       bool write_fault, bool *writable);
538
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
539 540
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
		      bool *writable);
541
pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
542 543
pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);

544 545 546 547 548
void kvm_release_pfn_clean(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn);
void kvm_set_pfn_accessed(pfn_t pfn);
void kvm_get_pfn(pfn_t pfn);

549 550
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
			int len);
551 552
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
			  unsigned long len);
553
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
554 555
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			   void *data, unsigned long len);
556 557 558 559
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
			 int offset, int len);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
		    unsigned long len);
560 561 562
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			   void *data, unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
563
			      gpa_t gpa, unsigned long len);
564 565
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
A
Avi Kivity 已提交
566
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
567
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
J
Joerg Roedel 已提交
568
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
A
Avi Kivity 已提交
569 570
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);

571
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
572
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
573
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
Z
Zhai, Edwin 已提交
574
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
575 576
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
577

578
void kvm_flush_remote_tlbs(struct kvm *kvm);
579
void kvm_reload_remote_mmus(struct kvm *kvm);
580
void kvm_make_mclock_inprogress_request(struct kvm *kvm);
581
void kvm_make_scan_ioapic_request(struct kvm *kvm);
A
Avi Kivity 已提交
582

583 584
long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg);
585 586
long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg);
587
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
588

589
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
590

591 592 593 594 595
int kvm_get_dirty_log(struct kvm *kvm,
			struct kvm_dirty_log *log, int *is_dirty);
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
				struct kvm_dirty_log *log);

596 597
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
			bool line_status);
598 599
long kvm_arch_vm_ioctl(struct file *filp,
		       unsigned int ioctl, unsigned long arg);
600

601 602 603
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);

604 605 606
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
				    struct kvm_translation *tr);

607 608 609 610 611 612
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs);
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs);
613 614 615 616
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state);
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state);
J
Jan Kiszka 已提交
617 618
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
					struct kvm_guest_debug *dbg);
619 620
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);

621 622
int kvm_arch_init(void *opaque);
void kvm_arch_exit(void);
623

624 625 626 627 628 629 630
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);

void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
631
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
632
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
633
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
634

635
int kvm_arch_hardware_enable(void *garbage);
636 637 638 639
void kvm_arch_hardware_disable(void *garbage);
int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
void kvm_arch_check_processor_compat(void *rtn);
640
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
641
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
642

643 644 645
void *kvm_kvzalloc(unsigned long size);
void kvm_kvfree(const void *addr);

646 647 648 649 650 651 652 653 654 655 656 657
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
}

static inline void kvm_arch_free_vm(struct kvm *kvm)
{
	kfree(kvm);
}
#endif

658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
#else
static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
{
}

static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
{
}

static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
{
	return false;
}
#endif

677 678
static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
{
679 680 681
#ifdef __KVM_HAVE_ARCH_WQP
	return vcpu->arch.wqp;
#else
682 683
	return &vcpu->wq;
#endif
684
}
685

686
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
687
void kvm_arch_destroy_vm(struct kvm *kvm);
688
void kvm_arch_sync_events(struct kvm *kvm);
689

690
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
691
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
692

X
Xiao Guangrong 已提交
693
bool kvm_is_mmio_pfn(pfn_t pfn);
694

B
Ben-Ami Yassour 已提交
695 696 697 698 699 700 701 702 703 704
struct kvm_irq_ack_notifier {
	struct hlist_node link;
	unsigned gsi;
	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
};

struct kvm_assigned_dev_kernel {
	struct kvm_irq_ack_notifier ack_notifier;
	struct list_head list;
	int assigned_dev_id;
705
	int host_segnr;
B
Ben-Ami Yassour 已提交
706 707
	int host_busnr;
	int host_devfn;
S
Sheng Yang 已提交
708
	unsigned int entries_nr;
B
Ben-Ami Yassour 已提交
709
	int host_irq;
710
	bool host_irq_disabled;
711
	bool pci_2_3;
S
Sheng Yang 已提交
712
	struct msix_entry *host_msix_entries;
B
Ben-Ami Yassour 已提交
713
	int guest_irq;
714
	struct msix_entry *guest_msix_entries;
715
	unsigned long irq_requested_type;
716
	int irq_source_id;
717
	int flags;
B
Ben-Ami Yassour 已提交
718 719
	struct pci_dev *dev;
	struct kvm *kvm;
720
	spinlock_t intx_lock;
721
	spinlock_t intx_mask_lock;
722
	char irq_name[32];
723
	struct pci_saved_state *pci_saved_state;
B
Ben-Ami Yassour 已提交
724
};
725 726 727 728 729 730 731 732 733 734 735

struct kvm_irq_mask_notifier {
	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
	int irq;
	struct hlist_node link;
};

void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
				    struct kvm_irq_mask_notifier *kimn);
void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
				      struct kvm_irq_mask_notifier *kimn);
736 737
void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
			     bool mask);
738

739 740 741
int kvm_irq_map_gsi(struct kvm *kvm,
		    struct kvm_kernel_irq_routing_entry *entries, int gsi);
int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
742

743 744
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
		bool line_status);
M
Michael S. Tsirkin 已提交
745
int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
746
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
747
		int irq_source_id, int level, bool line_status);
748
bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
749
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
750 751
void kvm_register_irq_ack_notifier(struct kvm *kvm,
				   struct kvm_irq_ack_notifier *kian);
752 753
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
				   struct kvm_irq_ack_notifier *kian);
754 755
int kvm_request_irq_source_id(struct kvm *kvm);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
B
Ben-Ami Yassour 已提交
756

757
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
758
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
759
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
W
Weidong Han 已提交
760
int kvm_iommu_map_guest(struct kvm *kvm);
B
Ben-Ami Yassour 已提交
761
int kvm_iommu_unmap_guest(struct kvm *kvm);
W
Weidong Han 已提交
762 763
int kvm_assign_device(struct kvm *kvm,
		      struct kvm_assigned_dev_kernel *assigned_dev);
W
Weidong Han 已提交
764 765
int kvm_deassign_device(struct kvm *kvm,
			struct kvm_assigned_dev_kernel *assigned_dev);
766
#else
B
Ben-Ami Yassour 已提交
767
static inline int kvm_iommu_map_pages(struct kvm *kvm,
768
				      struct kvm_memory_slot *slot)
B
Ben-Ami Yassour 已提交
769 770 771 772
{
	return 0;
}

773 774 775 776 777
static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
					 struct kvm_memory_slot *slot)
{
}

B
Ben-Ami Yassour 已提交
778 779 780 781
static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
{
	return 0;
}
782
#endif
B
Ben-Ami Yassour 已提交
783

784 785 786 787 788 789 790 791 792 793
static inline void kvm_guest_enter(void)
{
	unsigned long flags;

	BUG_ON(preemptible());

	local_irq_save(flags);
	guest_enter();
	local_irq_restore(flags);

794 795
	/* KVM does not hold any references to rcu protected data when it
	 * switches CPU into a guest mode. In fact switching to a guest mode
M
Michael S. Tsirkin 已提交
796
	 * is very similar to exiting to userspace from rcu point of view. In
797 798 799 800 801
	 * addition CPU may stay in a guest mode for quite a long time (up to
	 * one time slice). Lets treat guest mode as quiescent state, just like
	 * we do with user-mode execution.
	 */
	rcu_virt_note_context_switch(smp_processor_id());
802 803 804 805
}

static inline void kvm_guest_exit(void)
{
806 807 808 809 810
	unsigned long flags;

	local_irq_save(flags);
	guest_exit();
	local_irq_restore(flags);
811 812
}

813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
/*
 * search_memslots() and __gfn_to_memslot() are here because they are
 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
 * gfn_to_memslot() itself isn't here as an inline because that would
 * bloat other code too much.
 */
static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots *slots, gfn_t gfn)
{
	struct kvm_memory_slot *memslot;

	kvm_for_each_memslot(memslot, slots)
		if (gfn >= memslot->base_gfn &&
		      gfn < memslot->base_gfn + memslot->npages)
			return memslot;

	return NULL;
}

static inline struct kvm_memory_slot *
__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
{
	return search_memslots(slots, gfn);
}

838 839 840 841 842 843
static inline unsigned long
__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
{
	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
}

844 845 846 847 848
static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
{
	return gfn_to_memslot(kvm, gfn)->id;
}

849 850
static inline gfn_t
hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
851
{
852 853 854
	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;

	return slot->base_gfn + gfn_offset;
855 856
}

A
Avi Kivity 已提交
857 858 859 860
static inline gpa_t gfn_to_gpa(gfn_t gfn)
{
	return (gpa_t)gfn << PAGE_SHIFT;
}
A
Avi Kivity 已提交
861

862 863 864 865 866
static inline gfn_t gpa_to_gfn(gpa_t gpa)
{
	return (gfn_t)(gpa >> PAGE_SHIFT);
}

B
Ben-Ami Yassour 已提交
867 868 869 870 871
static inline hpa_t pfn_to_hpa(pfn_t pfn)
{
	return (hpa_t)pfn << PAGE_SHIFT;
}

872 873 874 875 876 877 878
static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
{
	unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));

	return kvm_is_error_hva(hva);
}

M
Marcelo Tosatti 已提交
879
static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
880 881 882 883
{
	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
}

884 885 886 887 888
enum kvm_stat_kind {
	KVM_STAT_VM,
	KVM_STAT_VCPU,
};

889 890 891
struct kvm_stats_debugfs_item {
	const char *name;
	int offset;
892
	enum kvm_stat_kind kind;
893 894 895
	struct dentry *dentry;
};
extern struct kvm_stats_debugfs_item debugfs_entries[];
896
extern struct dentry *kvm_debugfs_dir;
897

898
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
899
static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
900
{
901
	if (unlikely(kvm->mmu_notifier_count))
902 903
		return 1;
	/*
904 905 906 907 908 909 910 911
	 * Ensure the read of mmu_notifier_count happens before the read
	 * of mmu_notifier_seq.  This interacts with the smp_wmb() in
	 * mmu_notifier_invalidate_range_end to make sure that the caller
	 * either sees the old (non-zero) value of mmu_notifier_count or
	 * the new (incremented) value of mmu_notifier_seq.
	 * PowerPC Book3s HV KVM calls this under a per-page lock
	 * rather than under kvm->mmu_lock, for scalability, so
	 * can't rely on kvm->mmu_lock to keep things ordered.
912
	 */
913
	smp_rmb();
914
	if (kvm->mmu_notifier_seq != mmu_seq)
915 916 917 918 919
		return 1;
	return 0;
}
#endif

920
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
921

922 923 924
#ifdef CONFIG_S390
#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
#else
925
#define KVM_MAX_IRQ_ROUTES 1024
926
#endif
927 928 929 930 931 932

int kvm_setup_default_irq_routing(struct kvm *kvm);
int kvm_set_irq_routing(struct kvm *kvm,
			const struct kvm_irq_routing_entry *entries,
			unsigned nr,
			unsigned flags);
933
int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
934
			  const struct kvm_irq_routing_entry *ue);
935 936 937 938 939 940 941 942
void kvm_free_irq_routing(struct kvm *kvm);

#else

static inline void kvm_free_irq_routing(struct kvm *kvm) {}

#endif

943 944
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);

G
Gregory Haskins 已提交
945 946
#ifdef CONFIG_HAVE_KVM_EVENTFD

G
Gregory Haskins 已提交
947
void kvm_eventfd_init(struct kvm *kvm);
948 949
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);

950
#ifdef CONFIG_HAVE_KVM_IRQFD
951
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
G
Gregory Haskins 已提交
952
void kvm_irqfd_release(struct kvm *kvm);
953
void kvm_irq_routing_update(struct kvm *);
954 955 956 957 958 959 960 961
#else
static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
{
	return -EINVAL;
}

static inline void kvm_irqfd_release(struct kvm *kvm) {}
#endif
G
Gregory Haskins 已提交
962 963 964

#else

G
Gregory Haskins 已提交
965
static inline void kvm_eventfd_init(struct kvm *kvm) {}
966

967
static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
968 969 970 971 972
{
	return -EINVAL;
}

static inline void kvm_irqfd_release(struct kvm *kvm) {}
973

A
Alexander Graf 已提交
974
#ifdef CONFIG_HAVE_KVM_IRQCHIP
975
static inline void kvm_irq_routing_update(struct kvm *kvm)
976 977
{
}
A
Alexander Graf 已提交
978
#endif
979

G
Gregory Haskins 已提交
980 981 982 983
static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
	return -ENOSYS;
}
G
Gregory Haskins 已提交
984 985 986

#endif /* CONFIG_HAVE_KVM_EVENTFD */

987
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
988 989
static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
{
990
	return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
991
}
992 993 994 995 996 997 998

bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);

#else

static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }

A
Avi Kivity 已提交
999
#endif
1000

1001
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
1002 1003 1004 1005

long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
				  unsigned long arg);

1006 1007
void kvm_free_all_assigned_devices(struct kvm *kvm);

1008 1009 1010 1011 1012 1013 1014 1015
#else

static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
						unsigned long arg)
{
	return -ENOTTY;
}

1016 1017
static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {}

1018
#endif
1019

1020 1021 1022 1023 1024 1025 1026
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
	set_bit(req, &vcpu->requests);
}

static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
{
1027 1028 1029 1030 1031 1032
	if (test_bit(req, &vcpu->requests)) {
		clear_bit(req, &vcpu->requests);
		return true;
	} else {
		return false;
	}
1033 1034
}

1035 1036
extern bool kvm_rebooting;

S
Scott Wood 已提交
1037 1038 1039 1040 1041 1042
struct kvm_device_ops;

struct kvm_device {
	struct kvm_device_ops *ops;
	struct kvm *kvm;
	void *private;
1043
	struct list_head vm_node;
S
Scott Wood 已提交
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
};

/* create, destroy, and name are mandatory */
struct kvm_device_ops {
	const char *name;
	int (*create)(struct kvm_device *dev, u32 type);

	/*
	 * Destroy is responsible for freeing dev.
	 *
	 * Destroy may be called before or after destructors are called
	 * on emulated I/O regions, depending on whether a reference is
	 * held by a vcpu or other kvm component that gets destroyed
	 * after the emulated I/O.
	 */
	void (*destroy)(struct kvm_device *dev);

	int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
	int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
	int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
	long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
		      unsigned long arg);
};

void kvm_device_get(struct kvm_device *dev);
void kvm_device_put(struct kvm_device *dev);
struct kvm_device *kvm_device_from_filp(struct file *filp);

1072
extern struct kvm_device_ops kvm_mpic_ops;
1073
extern struct kvm_device_ops kvm_xics_ops;
A
Alex Williamson 已提交
1074
extern struct kvm_device_ops kvm_vfio_ops;
1075
extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
1076
extern struct kvm_device_ops kvm_flic_ops;
1077

1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT

static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
{
	vcpu->spin_loop.in_spin_loop = val;
}
static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
{
	vcpu->spin_loop.dy_eligible = val;
}

#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */

static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
{
}

static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
{
}
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1099
#endif
1100