kvm_host.h 26.1 KB
Newer Older
1 2
#ifndef __KVM_HOST_H
#define __KVM_HOST_H
A
Avi Kivity 已提交
3 4 5 6 7 8 9

/*
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 */

#include <linux/types.h>
10
#include <linux/hardirq.h>
A
Avi Kivity 已提交
11 12 13
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
M
Markus Rechberger 已提交
14 15
#include <linux/signal.h>
#include <linux/sched.h>
16
#include <linux/bug.h>
A
Avi Kivity 已提交
17
#include <linux/mm.h>
18
#include <linux/mmu_notifier.h>
19
#include <linux/preempt.h>
20
#include <linux/msi.h>
21
#include <linux/slab.h>
22
#include <linux/rcupdate.h>
23
#include <linux/ratelimit.h>
X
Xiao Guangrong 已提交
24
#include <linux/err.h>
A
Alexey Dobriyan 已提交
25
#include <asm/signal.h>
A
Avi Kivity 已提交
26 27

#include <linux/kvm.h>
I
Ingo Molnar 已提交
28
#include <linux/kvm_para.h>
A
Avi Kivity 已提交
29

30
#include <linux/kvm_types.h>
31

32
#include <asm/kvm_host.h>
33

A
Avi Kivity 已提交
34 35 36 37
#ifndef KVM_MMIO_SIZE
#define KVM_MMIO_SIZE 8
#endif

38 39 40 41 42 43 44
/*
 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
 * in kvm, other bits are visible for userspace which are defined in
 * include/linux/kvm_h.
 */
#define KVM_MEMSLOT_INVALID	(1UL << 16)

A
Avi Kivity 已提交
45 46 47 48 49 50 51 52 53 54 55 56 57 58
/*
 * If we support unaligned MMIO, at most one fragment will be split into two:
 */
#ifdef KVM_UNALIGNED_MMIO
#  define KVM_EXTRA_MMIO_FRAGMENTS 1
#else
#  define KVM_EXTRA_MMIO_FRAGMENTS 0
#endif

#define KVM_USER_MMIO_SIZE 8

#define KVM_MAX_MMIO_FRAGMENTS \
	(KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)

59 60 61 62 63 64 65 66 67
/*
 * For the normal pfn, the highest 12 bits should be zero,
 * so we can mask these bits to indicate the error.
 */
#define KVM_PFN_ERR_MASK	(0xfffULL << 52)

#define KVM_PFN_ERR_FAULT	(KVM_PFN_ERR_MASK)
#define KVM_PFN_ERR_HWPOISON	(KVM_PFN_ERR_MASK + 1)
#define KVM_PFN_ERR_BAD		(KVM_PFN_ERR_MASK + 2)
68

69
static inline bool is_error_pfn(pfn_t pfn)
X
Xiao Guangrong 已提交
70
{
71
	return !!(pfn & KVM_PFN_ERR_MASK);
X
Xiao Guangrong 已提交
72 73
}

74
static inline bool is_noslot_pfn(pfn_t pfn)
X
Xiao Guangrong 已提交
75
{
76
	return pfn == KVM_PFN_ERR_BAD;
X
Xiao Guangrong 已提交
77 78
}

79
static inline bool is_invalid_pfn(pfn_t pfn)
X
Xiao Guangrong 已提交
80 81 82 83
{
	return !is_noslot_pfn(pfn) && is_error_pfn(pfn);
}

84 85
#define KVM_ERR_PTR_BAD_PAGE	(ERR_PTR(-ENOENT))

86
static inline bool is_error_page(struct page *page)
87 88 89 90
{
	return IS_ERR(page);
}

91 92 93
/*
 * vcpu->requests bit members
 */
94
#define KVM_REQ_TLB_FLUSH          0
95
#define KVM_REQ_MIGRATE_TIMER      1
96
#define KVM_REQ_REPORT_TPR_ACCESS  2
97
#define KVM_REQ_MMU_RELOAD         3
J
Joerg Roedel 已提交
98
#define KVM_REQ_TRIPLE_FAULT       4
99
#define KVM_REQ_PENDING_TIMER      5
100
#define KVM_REQ_UNHALT             6
101
#define KVM_REQ_MMU_SYNC           7
Z
Zachary Amsden 已提交
102
#define KVM_REQ_CLOCK_UPDATE       8
103
#define KVM_REQ_KICK               9
104
#define KVM_REQ_DEACTIVATE_FPU    10
105
#define KVM_REQ_EVENT             11
106
#define KVM_REQ_APF_HALT          12
G
Glauber Costa 已提交
107
#define KVM_REQ_STEAL_UPDATE      13
A
Avi Kivity 已提交
108
#define KVM_REQ_NMI               14
109
#define KVM_REQ_IMMEDIATE_EXIT    15
110 111
#define KVM_REQ_PMU               16
#define KVM_REQ_PMI               17
A
Avi Kivity 已提交
112

113 114
#define KVM_USERSPACE_IRQ_SOURCE_ID	0

115
struct kvm;
A
Avi Kivity 已提交
116
struct kvm_vcpu;
117
extern struct kmem_cache *kvm_vcpu_cache;
A
Avi Kivity 已提交
118

119 120 121 122 123 124
struct kvm_io_range {
	gpa_t addr;
	int len;
	struct kvm_io_device *dev;
};

125
#define NR_IOBUS_DEVS 1000
126

127 128
struct kvm_io_bus {
	int                   dev_count;
129
	struct kvm_io_range range[];
130 131
};

M
Marcelo Tosatti 已提交
132 133 134 135 136 137 138 139 140
enum kvm_bus {
	KVM_MMIO_BUS,
	KVM_PIO_BUS,
	KVM_NR_BUSES
};

int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
		     int len, const void *val);
int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
141
		    void *val);
142 143
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
			    int len, struct kvm_io_device *dev);
M
Marcelo Tosatti 已提交
144 145
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
			      struct kvm_io_device *dev);
146

147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
#ifdef CONFIG_KVM_ASYNC_PF
struct kvm_async_pf {
	struct work_struct work;
	struct list_head link;
	struct list_head queue;
	struct kvm_vcpu *vcpu;
	struct mm_struct *mm;
	gva_t gva;
	unsigned long addr;
	struct kvm_arch_async_pf arch;
	struct page *page;
	bool done;
};

void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
		       struct kvm_arch_async_pf *arch);
165
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
166 167
#endif

168 169 170
enum {
	OUTSIDE_GUEST_MODE,
	IN_GUEST_MODE,
171 172
	EXITING_GUEST_MODE,
	READING_SHADOW_PAGE_TABLES,
173 174
};

A
Avi Kivity 已提交
175 176 177 178 179 180 181 182 183 184
/*
 * Sometimes a large or cross-page mmio needs to be broken up into separate
 * exits for userspace servicing.
 */
struct kvm_mmio_fragment {
	gpa_t gpa;
	void *data;
	unsigned len;
};

185 186
struct kvm_vcpu {
	struct kvm *kvm;
187
#ifdef CONFIG_PREEMPT_NOTIFIERS
188
	struct preempt_notifier preempt_notifier;
189
#endif
190
	int cpu;
191
	int vcpu_id;
192 193
	int srcu_idx;
	int mode;
194
	unsigned long requests;
J
Jan Kiszka 已提交
195
	unsigned long guest_debug;
196 197 198

	struct mutex mutex;
	struct kvm_run *run;
199

200
	int fpu_active;
201
	int guest_fpu_loaded, guest_xcr0_loaded;
202
	wait_queue_head_t wq;
203
	struct pid *pid;
204 205 206 207
	int sigset_active;
	sigset_t sigset;
	struct kvm_vcpu_stat stat;

208
#ifdef CONFIG_HAS_IOMEM
209 210 211
	int mmio_needed;
	int mmio_read_completed;
	int mmio_is_write;
A
Avi Kivity 已提交
212 213 214
	int mmio_cur_fragment;
	int mmio_nr_fragments;
	struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
215
#endif
A
Avi Kivity 已提交
216

217 218 219 220 221 222 223 224 225
#ifdef CONFIG_KVM_ASYNC_PF
	struct {
		u32 queued;
		struct list_head queue;
		struct list_head done;
		spinlock_t lock;
	} async_pf;
#endif

226 227 228 229 230 231 232 233 234 235 236 237
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
	/*
	 * Cpu relax intercept or pause loop exit optimization
	 * in_spin_loop: set when a vcpu does a pause loop exit
	 *  or cpu relax intercepted.
	 * dy_eligible: indicates whether vcpu is eligible for directed yield.
	 */
	struct {
		bool in_spin_loop;
		bool dy_eligible;
	} spin_loop;
#endif
238 239 240
	struct kvm_vcpu_arch arch;
};

241 242 243 244 245
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
	return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
}

246 247 248 249 250 251
/*
 * Some of the bitops functions do not support too long bitmaps.
 * This number must be determined not to exceed such limits.
 */
#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)

A
Avi Kivity 已提交
252 253 254 255 256
struct kvm_memory_slot {
	gfn_t base_gfn;
	unsigned long npages;
	unsigned long flags;
	unsigned long *dirty_bitmap;
257
	struct kvm_arch_memory_slot arch;
258
	unsigned long userspace_addr;
259
	int user_alloc;
260
	int id;
A
Avi Kivity 已提交
261 262
};

263 264 265 266 267
static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
{
	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
}

268 269
struct kvm_kernel_irq_routing_entry {
	u32 gsi;
270
	u32 type;
271
	int (*set)(struct kvm_kernel_irq_routing_entry *e,
272
		   struct kvm *kvm, int irq_source_id, int level);
273 274 275 276 277
	union {
		struct {
			unsigned irqchip;
			unsigned pin;
		} irqchip;
S
Sheng Yang 已提交
278
		struct msi_msg msi;
279
	};
280 281 282
	struct hlist_node link;
};

283 284
#ifdef __KVM_HAVE_IOAPIC

285
struct kvm_irq_routing_table {
286
	int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
287 288 289 290 291 292 293
	struct kvm_kernel_irq_routing_entry *rt_entries;
	u32 nr_rt_entries;
	/*
	 * Array indexed by gsi. Each entry contains list of irq chips
	 * the gsi is connected to.
	 */
	struct hlist_head map[0];
294 295
};

296 297 298 299 300 301
#else

struct kvm_irq_routing_table {};

#endif

302 303 304 305
#ifndef KVM_MEM_SLOTS_NUM
#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#endif

306 307 308 309 310
/*
 * Note:
 * memslots are not sorted by id anymore, please use id_to_memslot()
 * to get the memslot by its id.
 */
311
struct kvm_memslots {
312
	u64 generation;
313
	struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
314 315
	/* The mapping table from slot id to the index in memslots[]. */
	int id_to_index[KVM_MEM_SLOTS_NUM];
316 317
};

A
Avi Kivity 已提交
318
struct kvm {
319
	spinlock_t mmu_lock;
320
	struct mutex slots_lock;
321
	struct mm_struct *mm; /* userspace tied to this vm */
322
	struct kvm_memslots *memslots;
323
	struct srcu_struct srcu;
324 325 326
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
	u32 bsp_vcpu_id;
#endif
R
Rusty Russell 已提交
327
	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
328
	atomic_t online_vcpus;
329
	int last_boosted_vcpu;
330
	struct list_head vm_list;
331
	struct mutex lock;
M
Marcelo Tosatti 已提交
332
	struct kvm_io_bus *buses[KVM_NR_BUSES];
G
Gregory Haskins 已提交
333 334 335 336 337
#ifdef CONFIG_HAVE_KVM_EVENTFD
	struct {
		spinlock_t        lock;
		struct list_head  items;
	} irqfds;
G
Gregory Haskins 已提交
338
	struct list_head ioeventfds;
G
Gregory Haskins 已提交
339
#endif
340
	struct kvm_vm_stat stat;
341
	struct kvm_arch arch;
I
Izik Eidus 已提交
342
	atomic_t users_count;
343 344
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
345 346
	spinlock_t ring_lock;
	struct list_head coalesced_zones;
347
#endif
348

349
	struct mutex irq_lock;
350
#ifdef CONFIG_HAVE_KVM_IRQCHIP
351 352 353 354
	/*
	 * Update side is protected by irq_lock and,
	 * if configured, irqfds.lock.
	 */
A
Arnd Bergmann 已提交
355
	struct kvm_irq_routing_table __rcu *irq_routing;
356
	struct hlist_head mask_notifier_list;
357
	struct hlist_head irq_ack_notifier_list;
358 359
#endif

360
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
361 362 363 364
	struct mmu_notifier mmu_notifier;
	unsigned long mmu_notifier_seq;
	long mmu_notifier_count;
#endif
365
	long tlbs_dirty;
A
Avi Kivity 已提交
366 367
};

368 369 370 371 372 373 374 375 376
#define kvm_err(fmt, ...) \
	pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
#define kvm_info(fmt, ...) \
	pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
#define kvm_debug(fmt, ...) \
	pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
#define kvm_pr_unimpl(fmt, ...) \
	pr_err_ratelimited("kvm [%i]: " fmt, \
			   task_tgid_nr(current), ## __VA_ARGS__)
377

378 379 380
/* The guest did something we don't support. */
#define vcpu_unimpl(vcpu, fmt, ...)					\
	kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
A
Avi Kivity 已提交
381

382 383 384 385 386 387 388
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
	smp_rmb();
	return kvm->vcpus[i];
}

#define kvm_for_each_vcpu(idx, vcpup, kvm) \
389 390 391 392
	for (idx = 0; \
	     idx < atomic_read(&kvm->online_vcpus) && \
	     (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
	     idx++)
393

394 395
#define kvm_for_each_memslot(memslot, slots)	\
	for (memslot = &slots->memslots[0];	\
396 397
	      memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
		memslot++)
398

R
Rusty Russell 已提交
399 400 401
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);

402 403 404
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);

405
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
406
		  struct module *module);
407
void kvm_exit(void);
A
Avi Kivity 已提交
408

I
Izik Eidus 已提交
409 410
void kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
411
void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
I
Izik Eidus 已提交
412

413 414 415 416 417 418 419
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
{
	return rcu_dereference_check(kvm->memslots,
			srcu_read_lock_held(&kvm->srcu)
			|| lockdep_is_held(&kvm->slots_lock));
}

420 421 422
static inline struct kvm_memory_slot *
id_to_memslot(struct kvm_memslots *slots, int id)
{
423 424
	int index = slots->id_to_index[id];
	struct kvm_memory_slot *slot;
425

426
	slot = &slots->memslots[index];
427

428 429
	WARN_ON(slot->id != id);
	return slot;
430 431
}

I
Izik Eidus 已提交
432
int kvm_is_error_hva(unsigned long addr);
433 434 435
int kvm_set_memory_region(struct kvm *kvm,
			  struct kvm_userspace_memory_region *mem,
			  int user_alloc);
436 437 438
int __kvm_set_memory_region(struct kvm *kvm,
			    struct kvm_userspace_memory_region *mem,
			    int user_alloc);
439 440 441
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
			   struct kvm_memory_slot *dont);
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
442 443 444 445 446 447
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				struct kvm_memory_slot *memslot,
				struct kvm_memory_slot old,
				struct kvm_userspace_memory_region *mem,
				int user_alloc);
void kvm_arch_commit_memory_region(struct kvm *kvm,
448 449 450
				struct kvm_userspace_memory_region *mem,
				struct kvm_memory_slot old,
				int user_alloc);
451
bool kvm_largepages_enabled(void);
452
void kvm_disable_largepages(void);
453
void kvm_arch_flush_shadow(struct kvm *kvm);
454

455 456 457
int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
			    int nr_pages);

A
Avi Kivity 已提交
458
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
M
Marcelo Tosatti 已提交
459
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
460 461
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
462 463 464
void kvm_set_page_dirty(struct page *page);
void kvm_set_page_accessed(struct page *page);

465
pfn_t hva_to_pfn_atomic(unsigned long addr);
466
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
467 468
pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
		       bool write_fault, bool *writable);
469
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
470 471
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
		      bool *writable);
472
pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
473
void kvm_release_pfn_dirty(pfn_t pfn);
474 475 476 477 478
void kvm_release_pfn_clean(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn);
void kvm_set_pfn_accessed(pfn_t pfn);
void kvm_get_pfn(pfn_t pfn);

479 480
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
			int len);
481 482
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
			  unsigned long len);
483
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
484 485
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			   void *data, unsigned long len);
486 487 488 489
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
			 int offset, int len);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
		    unsigned long len);
490 491 492 493
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			   void *data, unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
			      gpa_t gpa);
494 495
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
A
Avi Kivity 已提交
496
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
497
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
J
Joerg Roedel 已提交
498
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
A
Avi Kivity 已提交
499
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
500 501
void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
			     gfn_t gfn);
A
Avi Kivity 已提交
502

503
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
504
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
505
bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
Z
Zhai, Edwin 已提交
506
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
A
Avi Kivity 已提交
507
void kvm_resched(struct kvm_vcpu *vcpu);
508 509
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
510

511
void kvm_flush_remote_tlbs(struct kvm *kvm);
512
void kvm_reload_remote_mmus(struct kvm *kvm);
A
Avi Kivity 已提交
513

514 515
long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg);
516 517
long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg);
518
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
519 520 521

int kvm_dev_ioctl_check_extension(long ext);

522 523 524 525 526
int kvm_get_dirty_log(struct kvm *kvm,
			struct kvm_dirty_log *log, int *is_dirty);
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
				struct kvm_dirty_log *log);

527 528 529 530
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
				   struct
				   kvm_userspace_memory_region *mem,
				   int user_alloc);
531
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
532 533
long kvm_arch_vm_ioctl(struct file *filp,
		       unsigned int ioctl, unsigned long arg);
534

535 536 537
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);

538 539 540
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
				    struct kvm_translation *tr);

541 542 543 544 545 546
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs);
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs);
547 548 549 550
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state);
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state);
J
Jan Kiszka 已提交
551 552
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
					struct kvm_guest_debug *dbg);
553 554
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);

555 556
int kvm_arch_init(void *opaque);
void kvm_arch_exit(void);
557

558 559 560 561 562 563 564
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);

void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
565
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
566
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
567 568

int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
569
int kvm_arch_hardware_enable(void *garbage);
570 571 572 573
void kvm_arch_hardware_disable(void *garbage);
int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
void kvm_arch_check_processor_compat(void *rtn);
574
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
575
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
576

577 578
void kvm_free_physmem(struct kvm *kvm);

579 580 581
void *kvm_kvzalloc(unsigned long size);
void kvm_kvfree(const void *addr);

582 583 584 585 586 587 588 589 590 591 592 593
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
}

static inline void kvm_arch_free_vm(struct kvm *kvm)
{
	kfree(kvm);
}
#endif

594 595
static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
{
596 597 598
#ifdef __KVM_HAVE_ARCH_WQP
	return vcpu->arch.wqp;
#else
599 600
	return &vcpu->wq;
#endif
601
}
602

603
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
604
void kvm_arch_destroy_vm(struct kvm *kvm);
605
void kvm_free_all_assigned_devices(struct kvm *kvm);
606
void kvm_arch_sync_events(struct kvm *kvm);
607

608
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
609
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
610

X
Xiao Guangrong 已提交
611
bool kvm_is_mmio_pfn(pfn_t pfn);
612

B
Ben-Ami Yassour 已提交
613 614 615 616 617 618 619 620 621 622
struct kvm_irq_ack_notifier {
	struct hlist_node link;
	unsigned gsi;
	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
};

struct kvm_assigned_dev_kernel {
	struct kvm_irq_ack_notifier ack_notifier;
	struct list_head list;
	int assigned_dev_id;
623
	int host_segnr;
B
Ben-Ami Yassour 已提交
624 625
	int host_busnr;
	int host_devfn;
S
Sheng Yang 已提交
626
	unsigned int entries_nr;
B
Ben-Ami Yassour 已提交
627
	int host_irq;
628
	bool host_irq_disabled;
629
	bool pci_2_3;
S
Sheng Yang 已提交
630
	struct msix_entry *host_msix_entries;
B
Ben-Ami Yassour 已提交
631
	int guest_irq;
632
	struct msix_entry *guest_msix_entries;
633
	unsigned long irq_requested_type;
634
	int irq_source_id;
635
	int flags;
B
Ben-Ami Yassour 已提交
636 637
	struct pci_dev *dev;
	struct kvm *kvm;
638
	spinlock_t intx_lock;
639
	spinlock_t intx_mask_lock;
640
	char irq_name[32];
641
	struct pci_saved_state *pci_saved_state;
B
Ben-Ami Yassour 已提交
642
};
643 644 645 646 647 648 649 650 651 652 653

struct kvm_irq_mask_notifier {
	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
	int irq;
	struct hlist_node link;
};

void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
				    struct kvm_irq_mask_notifier *kimn);
void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
				      struct kvm_irq_mask_notifier *kimn);
654 655
void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
			     bool mask);
656

657 658 659 660 661 662
#ifdef __KVM_HAVE_IOAPIC
void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
				   union kvm_ioapic_redirect_entry *entry,
				   unsigned long *deliver_bitmask);
#endif
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
663 664
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
		int irq_source_id, int level);
665
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
666 667
void kvm_register_irq_ack_notifier(struct kvm *kvm,
				   struct kvm_irq_ack_notifier *kian);
668 669
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
				   struct kvm_irq_ack_notifier *kian);
670 671
int kvm_request_irq_source_id(struct kvm *kvm);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
B
Ben-Ami Yassour 已提交
672

673 674 675
/* For vcpu->arch.iommu_flags */
#define KVM_IOMMU_CACHE_COHERENCY	0x1

J
Joerg Roedel 已提交
676
#ifdef CONFIG_IOMMU_API
677
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
678
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
W
Weidong Han 已提交
679
int kvm_iommu_map_guest(struct kvm *kvm);
B
Ben-Ami Yassour 已提交
680
int kvm_iommu_unmap_guest(struct kvm *kvm);
W
Weidong Han 已提交
681 682
int kvm_assign_device(struct kvm *kvm,
		      struct kvm_assigned_dev_kernel *assigned_dev);
W
Weidong Han 已提交
683 684
int kvm_deassign_device(struct kvm *kvm,
			struct kvm_assigned_dev_kernel *assigned_dev);
J
Joerg Roedel 已提交
685
#else /* CONFIG_IOMMU_API */
B
Ben-Ami Yassour 已提交
686
static inline int kvm_iommu_map_pages(struct kvm *kvm,
687
				      struct kvm_memory_slot *slot)
B
Ben-Ami Yassour 已提交
688 689 690 691
{
	return 0;
}

692 693 694 695 696
static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
					 struct kvm_memory_slot *slot)
{
}

W
Weidong Han 已提交
697
static inline int kvm_iommu_map_guest(struct kvm *kvm)
B
Ben-Ami Yassour 已提交
698 699 700 701 702 703 704 705
{
	return -ENODEV;
}

static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
{
	return 0;
}
W
Weidong Han 已提交
706 707 708 709 710 711

static inline int kvm_assign_device(struct kvm *kvm,
		struct kvm_assigned_dev_kernel *assigned_dev)
{
	return 0;
}
W
Weidong Han 已提交
712 713 714 715 716 717

static inline int kvm_deassign_device(struct kvm *kvm,
		struct kvm_assigned_dev_kernel *assigned_dev)
{
	return 0;
}
J
Joerg Roedel 已提交
718
#endif /* CONFIG_IOMMU_API */
B
Ben-Ami Yassour 已提交
719

720 721
static inline void kvm_guest_enter(void)
{
722
	BUG_ON(preemptible());
723
	account_system_vtime(current);
724
	current->flags |= PF_VCPU;
725 726 727 728 729 730 731 732
	/* KVM does not hold any references to rcu protected data when it
	 * switches CPU into a guest mode. In fact switching to a guest mode
	 * is very similar to exiting to userspase from rcu point of view. In
	 * addition CPU may stay in a guest mode for quite a long time (up to
	 * one time slice). Lets treat guest mode as quiescent state, just like
	 * we do with user-mode execution.
	 */
	rcu_virt_note_context_switch(smp_processor_id());
733 734 735 736
}

static inline void kvm_guest_exit(void)
{
737
	account_system_vtime(current);
738 739 740
	current->flags &= ~PF_VCPU;
}

741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
/*
 * search_memslots() and __gfn_to_memslot() are here because they are
 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
 * gfn_to_memslot() itself isn't here as an inline because that would
 * bloat other code too much.
 */
static inline struct kvm_memory_slot *
search_memslots(struct kvm_memslots *slots, gfn_t gfn)
{
	struct kvm_memory_slot *memslot;

	kvm_for_each_memslot(memslot, slots)
		if (gfn >= memslot->base_gfn &&
		      gfn < memslot->base_gfn + memslot->npages)
			return memslot;

	return NULL;
}

static inline struct kvm_memory_slot *
__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
{
	return search_memslots(slots, gfn);
}

766 767 768 769 770
static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
{
	return gfn_to_memslot(kvm, gfn)->id;
}

771 772 773 774 775 776 777
static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
{
	/* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
}

778 779 780 781 782 783 784 785
static inline gfn_t
hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
{
	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;

	return slot->base_gfn + gfn_offset;
}

786 787 788 789 790 791
static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
					       gfn_t gfn)
{
	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
}

A
Avi Kivity 已提交
792 793 794 795
static inline gpa_t gfn_to_gpa(gfn_t gfn)
{
	return (gpa_t)gfn << PAGE_SHIFT;
}
A
Avi Kivity 已提交
796

797 798 799 800 801
static inline gfn_t gpa_to_gfn(gpa_t gpa)
{
	return (gfn_t)(gpa >> PAGE_SHIFT);
}

B
Ben-Ami Yassour 已提交
802 803 804 805 806
static inline hpa_t pfn_to_hpa(pfn_t pfn)
{
	return (hpa_t)pfn << PAGE_SHIFT;
}

M
Marcelo Tosatti 已提交
807
static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
808 809 810 811
{
	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
}

812 813 814 815 816
enum kvm_stat_kind {
	KVM_STAT_VM,
	KVM_STAT_VCPU,
};

817 818 819
struct kvm_stats_debugfs_item {
	const char *name;
	int offset;
820
	enum kvm_stat_kind kind;
821 822 823
	struct dentry *dentry;
};
extern struct kvm_stats_debugfs_item debugfs_entries[];
824
extern struct dentry *kvm_debugfs_dir;
825

826
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
827 828 829 830 831
static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
{
	if (unlikely(vcpu->kvm->mmu_notifier_count))
		return 1;
	/*
832 833 834 835 836 837 838 839
	 * Ensure the read of mmu_notifier_count happens before the read
	 * of mmu_notifier_seq.  This interacts with the smp_wmb() in
	 * mmu_notifier_invalidate_range_end to make sure that the caller
	 * either sees the old (non-zero) value of mmu_notifier_count or
	 * the new (incremented) value of mmu_notifier_seq.
	 * PowerPC Book3s HV KVM calls this under a per-page lock
	 * rather than under kvm->mmu_lock, for scalability, so
	 * can't rely on kvm->mmu_lock to keep things ordered.
840
	 */
841
	smp_rmb();
842 843 844 845 846 847
	if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
		return 1;
	return 0;
}
#endif

848
#ifdef KVM_CAP_IRQ_ROUTING
849 850 851 852 853 854 855 856 857 858

#define KVM_MAX_IRQ_ROUTES 1024

int kvm_setup_default_irq_routing(struct kvm *kvm);
int kvm_set_irq_routing(struct kvm *kvm,
			const struct kvm_irq_routing_entry *entries,
			unsigned nr,
			unsigned flags);
void kvm_free_irq_routing(struct kvm *kvm);

859 860
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);

861 862 863 864 865 866
#else

static inline void kvm_free_irq_routing(struct kvm *kvm) {}

#endif

G
Gregory Haskins 已提交
867 868
#ifdef CONFIG_HAVE_KVM_EVENTFD

G
Gregory Haskins 已提交
869
void kvm_eventfd_init(struct kvm *kvm);
870
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
G
Gregory Haskins 已提交
871
void kvm_irqfd_release(struct kvm *kvm);
872
void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
G
Gregory Haskins 已提交
873
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
G
Gregory Haskins 已提交
874 875 876

#else

G
Gregory Haskins 已提交
877
static inline void kvm_eventfd_init(struct kvm *kvm) {}
878

879
static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
880 881 882 883 884
{
	return -EINVAL;
}

static inline void kvm_irqfd_release(struct kvm *kvm) {}
885

A
Alexander Graf 已提交
886
#ifdef CONFIG_HAVE_KVM_IRQCHIP
887 888 889 890 891
static inline void kvm_irq_routing_update(struct kvm *kvm,
					  struct kvm_irq_routing_table *irq_rt)
{
	rcu_assign_pointer(kvm->irq_routing, irq_rt);
}
A
Alexander Graf 已提交
892
#endif
893

G
Gregory Haskins 已提交
894 895 896 897
static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
	return -ENOSYS;
}
G
Gregory Haskins 已提交
898 899 900

#endif /* CONFIG_HAVE_KVM_EVENTFD */

901
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
902 903
static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
{
904
	return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
905
}
906 907 908 909 910 911 912

bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);

#else

static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }

A
Avi Kivity 已提交
913
#endif
914 915 916 917 918 919 920 921 922 923 924 925 926 927

#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT

long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
				  unsigned long arg);

#else

static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
						unsigned long arg)
{
	return -ENOTTY;
}

928
#endif
929

930 931 932 933 934 935 936
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
	set_bit(req, &vcpu->requests);
}

static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
{
937 938 939 940 941 942
	if (test_bit(req, &vcpu->requests)) {
		clear_bit(req, &vcpu->requests);
		return true;
	} else {
		return false;
	}
943 944
}

945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT

static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
{
	vcpu->spin_loop.in_spin_loop = val;
}
static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
{
	vcpu->spin_loop.dy_eligible = val;
}

#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */

static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
{
}

static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
{
}

966 967 968 969 970
static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
{
	return true;
}

971
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
972 973
#endif