x86.c 251.7 KB
Newer Older
1 2 3 4 5 6
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * derived from drivers/kvm/kvm_main.c
 *
 * Copyright (C) 2006 Qumranet, Inc.
B
Ben-Ami Yassour 已提交
7 8
 * Copyright (C) 2008 Qumranet, Inc.
 * Copyright IBM Corporation, 2008
N
Nicolas Kaiser 已提交
9
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 11 12 13
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
B
Ben-Ami Yassour 已提交
14 15
 *   Amit Shah    <amit.shah@qumranet.com>
 *   Ben-Ami Yassour <benami@il.ibm.com>
16 17 18 19 20 21
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

22
#include <linux/kvm_host.h>
23
#include "irq.h"
24
#include "mmu.h"
S
Sheng Yang 已提交
25
#include "i8254.h"
26
#include "tss.h"
27
#include "kvm_cache_regs.h"
28
#include "x86.h"
A
Avi Kivity 已提交
29
#include "cpuid.h"
30
#include "pmu.h"
31
#include "hyperv.h"
32

33
#include <linux/clocksource.h>
B
Ben-Ami Yassour 已提交
34
#include <linux/interrupt.h>
35 36 37
#include <linux/kvm.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
38 39
#include <linux/export.h>
#include <linux/moduleparam.h>
40
#include <linux/mman.h>
41
#include <linux/highmem.h>
J
Joerg Roedel 已提交
42
#include <linux/iommu.h>
B
Ben-Ami Yassour 已提交
43
#include <linux/intel-iommu.h>
44
#include <linux/cpufreq.h>
A
Avi Kivity 已提交
45
#include <linux/user-return-notifier.h>
46
#include <linux/srcu.h>
47
#include <linux/slab.h>
48
#include <linux/perf_event.h>
49
#include <linux/uaccess.h>
50
#include <linux/hash.h>
51
#include <linux/pci.h>
52 53
#include <linux/timekeeper_internal.h>
#include <linux/pvclock_gtod.h>
F
Feng Wu 已提交
54 55
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
56
#include <linux/sched/stat.h>
57
#include <linux/mem_encrypt.h>
58

A
Avi Kivity 已提交
59
#include <trace/events/kvm.h>
X
Xiao Guangrong 已提交
60

61
#include <asm/debugreg.h>
62
#include <asm/msr.h>
63
#include <asm/desc.h>
H
Huang Ying 已提交
64
#include <asm/mce.h>
65
#include <linux/kernel_stat.h>
66
#include <asm/fpu/internal.h> /* Ugh! */
67
#include <asm/pvclock.h>
68
#include <asm/div64.h>
69
#include <asm/irq_remapping.h>
70
#include <asm/mshyperv.h>
71
#include <asm/hypervisor.h>
72

73 74 75
#define CREATE_TRACE_POINTS
#include "trace.h"

76
#define MAX_IO_MSRS 256
H
Huang Ying 已提交
77
#define KVM_MAX_MCE_BANKS 32
78 79
u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
H
Huang Ying 已提交
80

81 82 83
#define emul_to_vcpu(ctxt) \
	container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)

84 85 86 87 88
/* EFER defaults:
 * - enable syscall per default because its emulated by KVM
 * - enable LME and LMA per default on 64 bit KVM
 */
#ifdef CONFIG_X86_64
89 90
static
u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
91
#else
92
static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
93
#endif
94

95 96
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
97

98 99
#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
                                    KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
100

101
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
A
Avi Kivity 已提交
102
static void process_nmi(struct kvm_vcpu *vcpu);
103
static void enter_smm(struct kvm_vcpu *vcpu);
104
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
K
Ken Hofsass 已提交
105 106
static void store_regs(struct kvm_vcpu *vcpu);
static int sync_regs(struct kvm_vcpu *vcpu);
107

108
struct kvm_x86_ops *kvm_x86_ops __read_mostly;
109
EXPORT_SYMBOL_GPL(kvm_x86_ops);
110

111
static bool __read_mostly ignore_msrs = 0;
112
module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
113

114 115 116
static bool __read_mostly report_ignored_msrs = true;
module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);

117
unsigned int min_timer_period_us = 200;
118 119
module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);

120 121 122
static bool __read_mostly kvmclock_periodic_sync = true;
module_param(kvmclock_periodic_sync, bool, S_IRUGO);

123
bool __read_mostly kvm_has_tsc_control;
124
EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
125
u32  __read_mostly kvm_max_guest_tsc_khz;
126
EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
127 128 129 130
u8   __read_mostly kvm_tsc_scaling_ratio_frac_bits;
EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);
u64  __read_mostly kvm_max_tsc_scaling_ratio;
EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
131 132
u64 __read_mostly kvm_default_tsc_scaling_ratio;
EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
133

134
/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
135
static u32 __read_mostly tsc_tolerance_ppm = 250;
136 137
module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);

138
/* lapic timer advance (tscdeadline mode only) in nanoseconds */
139
unsigned int __read_mostly lapic_timer_advance_ns = 1000;
140
module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
141
EXPORT_SYMBOL_GPL(lapic_timer_advance_ns);
142

143 144 145
static bool __read_mostly vector_hashing = true;
module_param(vector_hashing, bool, S_IRUGO);

146 147 148 149
bool __read_mostly enable_vmware_backdoor = false;
module_param(enable_vmware_backdoor, bool, S_IRUGO);
EXPORT_SYMBOL_GPL(enable_vmware_backdoor);

150 151 152
static bool __read_mostly force_emulation_prefix = false;
module_param(force_emulation_prefix, bool, S_IRUGO);

A
Avi Kivity 已提交
153 154 155 156
#define KVM_NR_SHARED_MSRS 16

struct kvm_shared_msrs_global {
	int nr;
157
	u32 msrs[KVM_NR_SHARED_MSRS];
A
Avi Kivity 已提交
158 159 160 161 162
};

struct kvm_shared_msrs {
	struct user_return_notifier urn;
	bool registered;
163 164 165 166
	struct kvm_shared_msr_values {
		u64 host;
		u64 curr;
	} values[KVM_NR_SHARED_MSRS];
A
Avi Kivity 已提交
167 168 169
};

static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
170
static struct kvm_shared_msrs __percpu *shared_msrs;
A
Avi Kivity 已提交
171

172
struct kvm_stats_debugfs_item debugfs_entries[] = {
173 174 175 176 177 178 179 180 181
	{ "pf_fixed", VCPU_STAT(pf_fixed) },
	{ "pf_guest", VCPU_STAT(pf_guest) },
	{ "tlb_flush", VCPU_STAT(tlb_flush) },
	{ "invlpg", VCPU_STAT(invlpg) },
	{ "exits", VCPU_STAT(exits) },
	{ "io_exits", VCPU_STAT(io_exits) },
	{ "mmio_exits", VCPU_STAT(mmio_exits) },
	{ "signal_exits", VCPU_STAT(signal_exits) },
	{ "irq_window", VCPU_STAT(irq_window_exits) },
182
	{ "nmi_window", VCPU_STAT(nmi_window_exits) },
183
	{ "halt_exits", VCPU_STAT(halt_exits) },
184
	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
185
	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
186
	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
187
	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
A
Amit Shah 已提交
188
	{ "hypercalls", VCPU_STAT(hypercalls) },
189 190 191 192 193 194
	{ "request_irq", VCPU_STAT(request_irq_exits) },
	{ "irq_exits", VCPU_STAT(irq_exits) },
	{ "host_state_reload", VCPU_STAT(host_state_reload) },
	{ "fpu_reload", VCPU_STAT(fpu_reload) },
	{ "insn_emulation", VCPU_STAT(insn_emulation) },
	{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
195
	{ "irq_injections", VCPU_STAT(irq_injections) },
196
	{ "nmi_injections", VCPU_STAT(nmi_injections) },
197
	{ "req_event", VCPU_STAT(req_event) },
P
Paolo Bonzini 已提交
198
	{ "l1d_flush", VCPU_STAT(l1d_flush) },
A
Avi Kivity 已提交
199 200 201 202 203 204
	{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
	{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
	{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
	{ "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
	{ "mmu_flooded", VM_STAT(mmu_flooded) },
	{ "mmu_recycled", VM_STAT(mmu_recycled) },
A
Avi Kivity 已提交
205
	{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
206
	{ "mmu_unsync", VM_STAT(mmu_unsync) },
207
	{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
M
Marcelo Tosatti 已提交
208
	{ "largepages", VM_STAT(lpages) },
209 210
	{ "max_mmu_page_hash_collisions",
		VM_STAT(max_mmu_page_hash_collisions) },
211 212 213
	{ NULL }
};

214 215
u64 __read_mostly host_xcr0;

216
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
217

218 219 220 221 222 223 224
static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
{
	int i;
	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
		vcpu->arch.apf.gfns[i] = ~0;
}

A
Avi Kivity 已提交
225 226 227 228 229
static void kvm_on_user_return(struct user_return_notifier *urn)
{
	unsigned slot;
	struct kvm_shared_msrs *locals
		= container_of(urn, struct kvm_shared_msrs, urn);
230
	struct kvm_shared_msr_values *values;
231 232 233 234 235 236 237 238 239 240 241 242
	unsigned long flags;

	/*
	 * Disabling irqs at this point since the following code could be
	 * interrupted and executed through kvm_arch_hardware_disable()
	 */
	local_irq_save(flags);
	if (locals->registered) {
		locals->registered = false;
		user_return_notifier_unregister(urn);
	}
	local_irq_restore(flags);
A
Avi Kivity 已提交
243
	for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
244 245 246 247
		values = &locals->values[slot];
		if (values->host != values->curr) {
			wrmsrl(shared_msrs_global.msrs[slot], values->host);
			values->curr = values->host;
A
Avi Kivity 已提交
248 249 250 251
		}
	}
}

252
static void shared_msr_update(unsigned slot, u32 msr)
A
Avi Kivity 已提交
253 254
{
	u64 value;
255 256
	unsigned int cpu = smp_processor_id();
	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
A
Avi Kivity 已提交
257

258 259 260 261 262 263 264 265 266 267 268 269 270
	/* only read, and nobody should modify it at this time,
	 * so don't need lock */
	if (slot >= shared_msrs_global.nr) {
		printk(KERN_ERR "kvm: invalid MSR slot!");
		return;
	}
	rdmsrl_safe(msr, &value);
	smsr->values[slot].host = value;
	smsr->values[slot].curr = value;
}

void kvm_define_shared_msr(unsigned slot, u32 msr)
{
271
	BUG_ON(slot >= KVM_NR_SHARED_MSRS);
272
	shared_msrs_global.msrs[slot] = msr;
A
Avi Kivity 已提交
273 274 275 276 277 278 279 280 281 282
	if (slot >= shared_msrs_global.nr)
		shared_msrs_global.nr = slot + 1;
}
EXPORT_SYMBOL_GPL(kvm_define_shared_msr);

static void kvm_shared_msr_cpu_online(void)
{
	unsigned i;

	for (i = 0; i < shared_msrs_global.nr; ++i)
283
		shared_msr_update(i, shared_msrs_global.msrs[i]);
A
Avi Kivity 已提交
284 285
}

286
int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
A
Avi Kivity 已提交
287
{
288 289
	unsigned int cpu = smp_processor_id();
	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
290
	int err;
A
Avi Kivity 已提交
291

292
	if (((value ^ smsr->values[slot].curr) & mask) == 0)
293
		return 0;
294
	smsr->values[slot].curr = value;
295 296 297 298
	err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
	if (err)
		return 1;

A
Avi Kivity 已提交
299 300 301 302 303
	if (!smsr->registered) {
		smsr->urn.on_user_return = kvm_on_user_return;
		user_return_notifier_register(&smsr->urn);
		smsr->registered = true;
	}
304
	return 0;
A
Avi Kivity 已提交
305 306 307
}
EXPORT_SYMBOL_GPL(kvm_set_shared_msr);

308
static void drop_user_return_notifiers(void)
309
{
310 311
	unsigned int cpu = smp_processor_id();
	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
312 313 314 315 316

	if (smsr->registered)
		kvm_on_user_return(&smsr->urn);
}

317 318
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
{
319
	return vcpu->arch.apic_base;
320 321 322
}
EXPORT_SYMBOL_GPL(kvm_get_apic_base);

323 324 325 326 327 328
enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
{
	return kvm_apic_mode(kvm_get_apic_base(vcpu));
}
EXPORT_SYMBOL_GPL(kvm_get_apic_mode);

329 330
int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
331 332
	enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
	enum lapic_mode new_mode = kvm_apic_mode(msr_info->data);
333 334
	u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff |
		(guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
335

336
	if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
337
		return 1;
338 339 340 341 342 343
	if (!msr_info->host_initiated) {
		if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC)
			return 1;
		if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC)
			return 1;
	}
344 345 346

	kvm_lapic_set_base(vcpu, msr_info->data);
	return 0;
347 348 349
}
EXPORT_SYMBOL_GPL(kvm_set_apic_base);

350
asmlinkage __visible void kvm_spurious_fault(void)
351 352 353 354 355 356
{
	/* Fault while not rebooting.  We want the trace. */
	BUG();
}
EXPORT_SYMBOL_GPL(kvm_spurious_fault);

357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
#define EXCPT_BENIGN		0
#define EXCPT_CONTRIBUTORY	1
#define EXCPT_PF		2

static int exception_class(int vector)
{
	switch (vector) {
	case PF_VECTOR:
		return EXCPT_PF;
	case DE_VECTOR:
	case TS_VECTOR:
	case NP_VECTOR:
	case SS_VECTOR:
	case GP_VECTOR:
		return EXCPT_CONTRIBUTORY;
	default:
		break;
	}
	return EXCPT_BENIGN;
}

378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
#define EXCPT_FAULT		0
#define EXCPT_TRAP		1
#define EXCPT_ABORT		2
#define EXCPT_INTERRUPT		3

static int exception_type(int vector)
{
	unsigned int mask;

	if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
		return EXCPT_INTERRUPT;

	mask = 1 << vector;

	/* #DB is trap, as instruction watchpoints are handled elsewhere */
	if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
		return EXCPT_TRAP;

	if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
		return EXCPT_ABORT;

	/* Reserved exceptions will result in fault */
	return EXCPT_FAULT;
}

403 404 405 406 407 408 409 410 411 412
void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
{
	unsigned nr = vcpu->arch.exception.nr;
	bool has_payload = vcpu->arch.exception.has_payload;
	unsigned long payload = vcpu->arch.exception.payload;

	if (!has_payload)
		return;

	switch (nr) {
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
	case DB_VECTOR:
		/*
		 * "Certain debug exceptions may clear bit 0-3.  The
		 * remaining contents of the DR6 register are never
		 * cleared by the processor".
		 */
		vcpu->arch.dr6 &= ~DR_TRAP_BITS;
		/*
		 * DR6.RTM is set by all #DB exceptions that don't clear it.
		 */
		vcpu->arch.dr6 |= DR6_RTM;
		vcpu->arch.dr6 |= payload;
		/*
		 * Bit 16 should be set in the payload whenever the #DB
		 * exception should clear DR6.RTM. This makes the payload
		 * compatible with the pending debug exceptions under VMX.
		 * Though not currently documented in the SDM, this also
		 * makes the payload compatible with the exit qualification
		 * for #DB exceptions under VMX.
		 */
		vcpu->arch.dr6 ^= payload & DR6_RTM;
		break;
435 436 437 438 439 440 441 442 443 444
	case PF_VECTOR:
		vcpu->arch.cr2 = payload;
		break;
	}

	vcpu->arch.exception.has_payload = false;
	vcpu->arch.exception.payload = 0;
}
EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload);

445
static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
446
		unsigned nr, bool has_error, u32 error_code,
447
	        bool has_payload, unsigned long payload, bool reinject)
448 449 450 451
{
	u32 prev_nr;
	int class1, class2;

452 453
	kvm_make_request(KVM_REQ_EVENT, vcpu);

454
	if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
455
	queue:
456 457
		if (has_error && !is_protmode(vcpu))
			has_error = false;
458 459 460 461 462 463 464 465 466 467 468
		if (reinject) {
			/*
			 * On vmentry, vcpu->arch.exception.pending is only
			 * true if an event injection was blocked by
			 * nested_run_pending.  In that case, however,
			 * vcpu_enter_guest requests an immediate exit,
			 * and the guest shouldn't proceed far enough to
			 * need reinjection.
			 */
			WARN_ON_ONCE(vcpu->arch.exception.pending);
			vcpu->arch.exception.injected = true;
469 470 471 472 473 474 475 476
			if (WARN_ON_ONCE(has_payload)) {
				/*
				 * A reinjected event has already
				 * delivered its payload.
				 */
				has_payload = false;
				payload = 0;
			}
477 478 479 480
		} else {
			vcpu->arch.exception.pending = true;
			vcpu->arch.exception.injected = false;
		}
481 482 483
		vcpu->arch.exception.has_error_code = has_error;
		vcpu->arch.exception.nr = nr;
		vcpu->arch.exception.error_code = error_code;
484 485
		vcpu->arch.exception.has_payload = has_payload;
		vcpu->arch.exception.payload = payload;
486 487 488
		/*
		 * In guest mode, payload delivery should be deferred,
		 * so that the L1 hypervisor can intercept #PF before
489 490 491 492 493 494 495
		 * CR2 is modified (or intercept #DB before DR6 is
		 * modified under nVMX).  However, for ABI
		 * compatibility with KVM_GET_VCPU_EVENTS and
		 * KVM_SET_VCPU_EVENTS, we can't delay payload
		 * delivery unless userspace has enabled this
		 * functionality via the per-VM capability,
		 * KVM_CAP_EXCEPTION_PAYLOAD.
496 497 498 499
		 */
		if (!vcpu->kvm->arch.exception_payload_enabled ||
		    !is_guest_mode(vcpu))
			kvm_deliver_exception_payload(vcpu);
500 501 502 503 504 505 506
		return;
	}

	/* to check exception */
	prev_nr = vcpu->arch.exception.nr;
	if (prev_nr == DF_VECTOR) {
		/* triple fault -> shutdown */
507
		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
508 509 510 511 512 513
		return;
	}
	class1 = exception_class(prev_nr);
	class2 = exception_class(nr);
	if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
		|| (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
514 515 516 517 518
		/*
		 * Generate double fault per SDM Table 5-5.  Set
		 * exception.pending = true so that the double fault
		 * can trigger a nested vmexit.
		 */
519
		vcpu->arch.exception.pending = true;
520
		vcpu->arch.exception.injected = false;
521 522 523
		vcpu->arch.exception.has_error_code = true;
		vcpu->arch.exception.nr = DF_VECTOR;
		vcpu->arch.exception.error_code = 0;
524 525
		vcpu->arch.exception.has_payload = false;
		vcpu->arch.exception.payload = 0;
526 527 528 529 530 531 532
	} else
		/* replace previous exception with a new one in a hope
		   that instruction re-execution will regenerate lost
		   exception */
		goto queue;
}

533 534
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
535
	kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false);
536 537 538
}
EXPORT_SYMBOL_GPL(kvm_queue_exception);

539 540
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
541
	kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true);
542 543 544
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception);

545 546 547 548 549 550
static void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
				  unsigned long payload)
{
	kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false);
}

551 552 553 554 555 556 557
static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
				    u32 error_code, unsigned long payload)
{
	kvm_multiple_exception(vcpu, nr, true, error_code,
			       true, payload, false);
}

558
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
559
{
560 561 562
	if (err)
		kvm_inject_gp(vcpu, 0);
	else
563 564 565
		return kvm_skip_emulated_instruction(vcpu);

	return 1;
566 567
}
EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
568

569
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
570 571
{
	++vcpu->stat.pf_guest;
572 573
	vcpu->arch.exception.nested_apf =
		is_guest_mode(vcpu) && fault->async_page_fault;
574
	if (vcpu->arch.exception.nested_apf) {
575
		vcpu->arch.apf.nested_apf_token = fault->address;
576 577 578 579 580
		kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
	} else {
		kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code,
					fault->address);
	}
581
}
N
Nadav Har'El 已提交
582
EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
583

584
static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
585
{
586 587
	if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
588
	else
589
		vcpu->arch.mmu->inject_page_fault(vcpu, fault);
590 591

	return fault->nested_page_fault;
592 593
}

594 595
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
{
A
Avi Kivity 已提交
596 597
	atomic_inc(&vcpu->arch.nmi_queued);
	kvm_make_request(KVM_REQ_NMI, vcpu);
598 599 600
}
EXPORT_SYMBOL_GPL(kvm_inject_nmi);

601 602
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
603
	kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false);
604 605 606
}
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);

607 608
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
609
	kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true);
610 611 612
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);

613 614 615 616 617
/*
 * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
 * a #GP and return false.
 */
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
618
{
619 620 621 622
	if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
		return true;
	kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
	return false;
623
}
624
EXPORT_SYMBOL_GPL(kvm_require_cpl);
625

626 627 628 629 630 631 632 633 634 635
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
{
	if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
		return true;

	kvm_queue_exception(vcpu, UD_VECTOR);
	return false;
}
EXPORT_SYMBOL_GPL(kvm_require_dr);

636 637
/*
 * This function will be used to read from the physical memory of the currently
638
 * running guest. The difference to kvm_vcpu_read_guest_page is that this function
639 640 641 642 643 644
 * can read from guest physical or from the guest's guest physical memory.
 */
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			    gfn_t ngfn, void *data, int offset, int len,
			    u32 access)
{
645
	struct x86_exception exception;
646 647 648 649
	gfn_t real_gfn;
	gpa_t ngpa;

	ngpa     = gfn_to_gpa(ngfn);
650
	real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
651 652 653 654 655
	if (real_gfn == UNMAPPED_GVA)
		return -EFAULT;

	real_gfn = gpa_to_gfn(real_gfn);

656
	return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len);
657 658 659
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);

660
static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
661 662 663 664 665 666
			       void *data, int offset, int len, u32 access)
{
	return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
				       data, offset, len, access);
}

667 668 669
/*
 * Load the pae pdptrs.  Return true is they are all valid.
 */
670
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
671 672 673 674 675
{
	gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
	unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
	int i;
	int ret;
676
	u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
677

678 679 680
	ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
				      offset * sizeof(u64), sizeof(pdpte),
				      PFERR_USER_MASK|PFERR_WRITE_MASK);
681 682 683 684 685
	if (ret < 0) {
		ret = 0;
		goto out;
	}
	for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
B
Bandan Das 已提交
686
		if ((pdpte[i] & PT_PRESENT_MASK) &&
687
		    (pdpte[i] &
688
		     vcpu->arch.mmu->guest_rsvd_check.rsvd_bits_mask[0][2])) {
689 690 691 692 693 694
			ret = 0;
			goto out;
		}
	}
	ret = 1;

695
	memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
A
Avi Kivity 已提交
696 697 698 699
	__set_bit(VCPU_EXREG_PDPTR,
		  (unsigned long *)&vcpu->arch.regs_avail);
	__set_bit(VCPU_EXREG_PDPTR,
		  (unsigned long *)&vcpu->arch.regs_dirty);
700 701 702 703
out:

	return ret;
}
704
EXPORT_SYMBOL_GPL(load_pdptrs);
705

706
bool pdptrs_changed(struct kvm_vcpu *vcpu)
707
{
708
	u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
709
	bool changed = true;
710 711
	int offset;
	gfn_t gfn;
712 713
	int r;

714
	if (is_long_mode(vcpu) || !is_pae(vcpu) || !is_paging(vcpu))
715 716
		return false;

A
Avi Kivity 已提交
717 718 719 720
	if (!test_bit(VCPU_EXREG_PDPTR,
		      (unsigned long *)&vcpu->arch.regs_avail))
		return true;

721 722
	gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT;
	offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1);
723 724
	r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
				       PFERR_USER_MASK | PFERR_WRITE_MASK);
725 726
	if (r < 0)
		goto out;
727
	changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
728 729 730 731
out:

	return changed;
}
732
EXPORT_SYMBOL_GPL(pdptrs_changed);
733

734
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
735
{
736
	unsigned long old_cr0 = kvm_read_cr0(vcpu);
737
	unsigned long update_bits = X86_CR0_PG | X86_CR0_WP;
738

739 740
	cr0 |= X86_CR0_ET;

741
#ifdef CONFIG_X86_64
742 743
	if (cr0 & 0xffffffff00000000UL)
		return 1;
744 745 746
#endif

	cr0 &= ~CR0_RESERVED_BITS;
747

748 749
	if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
		return 1;
750

751 752
	if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
		return 1;
753 754 755

	if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
#ifdef CONFIG_X86_64
756
		if ((vcpu->arch.efer & EFER_LME)) {
757 758
			int cs_db, cs_l;

759 760
			if (!is_pae(vcpu))
				return 1;
761
			kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
762 763
			if (cs_l)
				return 1;
764 765
		} else
#endif
766
		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
767
						 kvm_read_cr3(vcpu)))
768
			return 1;
769 770
	}

771 772 773
	if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
		return 1;

774 775
	kvm_x86_ops->set_cr0(vcpu, cr0);

776
	if ((cr0 ^ old_cr0) & X86_CR0_PG) {
777
		kvm_clear_async_pf_completion_queue(vcpu);
778 779
		kvm_async_pf_hash_reset(vcpu);
	}
780

781 782
	if ((cr0 ^ old_cr0) & update_bits)
		kvm_mmu_reset_context(vcpu);
783

784 785 786
	if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
	    kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
	    !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
787 788
		kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);

789 790
	return 0;
}
791
EXPORT_SYMBOL_GPL(kvm_set_cr0);
792

793
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
794
{
795
	(void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
796
}
797
EXPORT_SYMBOL_GPL(kvm_lmsw);
798

799 800 801 802 803
static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
{
	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
			!vcpu->guest_xcr0_loaded) {
		/* kvm_set_xcr() also depends on this */
804 805
		if (vcpu->arch.xcr0 != host_xcr0)
			xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
806 807 808 809 810 811 812 813 814 815 816 817 818
		vcpu->guest_xcr0_loaded = 1;
	}
}

static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
{
	if (vcpu->guest_xcr0_loaded) {
		if (vcpu->arch.xcr0 != host_xcr0)
			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
		vcpu->guest_xcr0_loaded = 0;
	}
}

819
static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
820
{
821 822
	u64 xcr0 = xcr;
	u64 old_xcr0 = vcpu->arch.xcr0;
823
	u64 valid_bits;
824 825 826 827

	/* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
	if (index != XCR_XFEATURE_ENABLED_MASK)
		return 1;
D
Dave Hansen 已提交
828
	if (!(xcr0 & XFEATURE_MASK_FP))
829
		return 1;
D
Dave Hansen 已提交
830
	if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
831
		return 1;
832 833 834 835 836 837

	/*
	 * Do not allow the guest to set bits that we do not support
	 * saving.  However, xcr0 bit 0 is always set, even if the
	 * emulated CPU does not support XSAVE (see fx_init).
	 */
D
Dave Hansen 已提交
838
	valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
839
	if (xcr0 & ~valid_bits)
840
		return 1;
841

D
Dave Hansen 已提交
842 843
	if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
	    (!(xcr0 & XFEATURE_MASK_BNDCSR)))
844 845
		return 1;

D
Dave Hansen 已提交
846 847
	if (xcr0 & XFEATURE_MASK_AVX512) {
		if (!(xcr0 & XFEATURE_MASK_YMM))
848
			return 1;
D
Dave Hansen 已提交
849
		if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
850 851
			return 1;
	}
852
	vcpu->arch.xcr0 = xcr0;
853

D
Dave Hansen 已提交
854
	if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
855
		kvm_update_cpuid(vcpu);
856 857 858 859 860
	return 0;
}

int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
861 862
	if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
	    __kvm_set_xcr(vcpu, index, xcr)) {
863 864 865 866 867 868 869
		kvm_inject_gp(vcpu, 0);
		return 1;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_xcr);

870
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
871
{
872
	unsigned long old_cr4 = kvm_read_cr4(vcpu);
873
	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
874
				   X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
875

876 877
	if (cr4 & CR4_RESERVED_BITS)
		return 1;
878

879
	if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
880 881
		return 1;

882
	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
883 884
		return 1;

885
	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
886 887
		return 1;

888
	if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
F
Feng Wu 已提交
889 890
		return 1;

891
	if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
892 893
		return 1;

894
	if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
895 896
		return 1;

P
Paolo Bonzini 已提交
897 898 899
	if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
		return 1;

900
	if (is_long_mode(vcpu)) {
901 902
		if (!(cr4 & X86_CR4_PAE))
			return 1;
903 904
	} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
		   && ((cr4 ^ old_cr4) & pdptr_bits)
905 906
		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
				   kvm_read_cr3(vcpu)))
907 908
		return 1;

909
	if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
910
		if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
911 912 913 914 915 916 917
			return 1;

		/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
		if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
			return 1;
	}

918
	if (kvm_x86_ops->set_cr4(vcpu, cr4))
919
		return 1;
920

921 922
	if (((cr4 ^ old_cr4) & pdptr_bits) ||
	    (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
923
		kvm_mmu_reset_context(vcpu);
924

925
	if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
A
Avi Kivity 已提交
926
		kvm_update_cpuid(vcpu);
927

928 929
	return 0;
}
930
EXPORT_SYMBOL_GPL(kvm_set_cr4);
931

932
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
933
{
934
	bool skip_tlb_flush = false;
935
#ifdef CONFIG_X86_64
936 937
	bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);

938
	if (pcid_enabled) {
939 940
		skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
		cr3 &= ~X86_CR3_PCID_NOFLUSH;
941
	}
942
#endif
N
Nadav Amit 已提交
943

944
	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
945 946
		if (!skip_tlb_flush) {
			kvm_mmu_sync_roots(vcpu);
947
			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
948
		}
949
		return 0;
950 951
	}

952
	if (is_long_mode(vcpu) &&
953
	    (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)))
954 955
		return 1;
	else if (is_pae(vcpu) && is_paging(vcpu) &&
956
		   !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
N
Nadav Amit 已提交
957
		return 1;
958

959
	kvm_mmu_new_cr3(vcpu, cr3, skip_tlb_flush);
960
	vcpu->arch.cr3 = cr3;
961
	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
962

963 964
	return 0;
}
965
EXPORT_SYMBOL_GPL(kvm_set_cr3);
966

A
Andre Przywara 已提交
967
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
968
{
969 970
	if (cr8 & CR8_RESERVED_BITS)
		return 1;
971
	if (lapic_in_kernel(vcpu))
972 973
		kvm_lapic_set_tpr(vcpu, cr8);
	else
974
		vcpu->arch.cr8 = cr8;
975 976
	return 0;
}
977
EXPORT_SYMBOL_GPL(kvm_set_cr8);
978

979
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
980
{
981
	if (lapic_in_kernel(vcpu))
982 983
		return kvm_lapic_get_cr8(vcpu);
	else
984
		return vcpu->arch.cr8;
985
}
986
EXPORT_SYMBOL_GPL(kvm_get_cr8);
987

988 989 990 991 992 993 994 995 996 997 998
static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
{
	int i;

	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
		for (i = 0; i < KVM_NR_DB_REGS; i++)
			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
	}
}

J
Jan Kiszka 已提交
999 1000 1001 1002 1003 1004
static void kvm_update_dr6(struct kvm_vcpu *vcpu)
{
	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
		kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6);
}

1005 1006 1007 1008 1009 1010 1011 1012 1013
static void kvm_update_dr7(struct kvm_vcpu *vcpu)
{
	unsigned long dr7;

	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
		dr7 = vcpu->arch.guest_debug_dr7;
	else
		dr7 = vcpu->arch.dr7;
	kvm_x86_ops->set_dr7(vcpu, dr7);
1014 1015 1016
	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
	if (dr7 & DR7_BP_EN_MASK)
		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
1017 1018
}

1019 1020 1021 1022
static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
{
	u64 fixed = DR6_FIXED_1;

1023
	if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
1024 1025 1026 1027
		fixed |= DR6_RTM;
	return fixed;
}

1028
static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
{
	switch (dr) {
	case 0 ... 3:
		vcpu->arch.db[dr] = val;
		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
			vcpu->arch.eff_db[dr] = val;
		break;
	case 4:
		/* fall through */
	case 6:
1039 1040
		if (val & 0xffffffff00000000ULL)
			return -1; /* #GP */
1041
		vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
J
Jan Kiszka 已提交
1042
		kvm_update_dr6(vcpu);
1043 1044 1045 1046
		break;
	case 5:
		/* fall through */
	default: /* 7 */
1047 1048
		if (val & 0xffffffff00000000ULL)
			return -1; /* #GP */
1049
		vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
1050
		kvm_update_dr7(vcpu);
1051 1052 1053 1054 1055
		break;
	}

	return 0;
}
1056 1057 1058

int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{
1059
	if (__kvm_set_dr(vcpu, dr, val)) {
1060
		kvm_inject_gp(vcpu, 0);
1061 1062 1063
		return 1;
	}
	return 0;
1064
}
1065 1066
EXPORT_SYMBOL_GPL(kvm_set_dr);

1067
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
1068 1069 1070 1071 1072 1073 1074 1075
{
	switch (dr) {
	case 0 ... 3:
		*val = vcpu->arch.db[dr];
		break;
	case 4:
		/* fall through */
	case 6:
J
Jan Kiszka 已提交
1076 1077 1078 1079
		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
			*val = vcpu->arch.dr6;
		else
			*val = kvm_x86_ops->get_dr6(vcpu);
1080 1081 1082 1083 1084 1085 1086
		break;
	case 5:
		/* fall through */
	default: /* 7 */
		*val = vcpu->arch.dr7;
		break;
	}
1087 1088
	return 0;
}
1089 1090
EXPORT_SYMBOL_GPL(kvm_get_dr);

A
Avi Kivity 已提交
1091 1092 1093 1094 1095 1096
bool kvm_rdpmc(struct kvm_vcpu *vcpu)
{
	u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
	u64 data;
	int err;

1097
	err = kvm_pmu_rdpmc(vcpu, ecx, &data);
A
Avi Kivity 已提交
1098 1099 1100 1101 1102 1103 1104 1105
	if (err)
		return err;
	kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
	kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
	return err;
}
EXPORT_SYMBOL_GPL(kvm_rdpmc);

1106 1107 1108 1109 1110
/*
 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
 *
 * This list is modified at module load time to reflect the
1111
 * capabilities of the host cpu. This capabilities test skips MSRs that are
1112 1113
 * kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs
 * may depend on host virtualization features rather than host cpu features.
1114
 */
1115

1116 1117
static u32 msrs_to_save[] = {
	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
B
Brian Gerst 已提交
1118
	MSR_STAR,
1119 1120 1121
#ifdef CONFIG_X86_64
	MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
1122
	MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
1123
	MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
1124
	MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES
1125 1126 1127 1128
};

static unsigned num_msrs_to_save;

1129 1130 1131 1132 1133
static u32 emulated_msrs[] = {
	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
	HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
1134
	HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
1135 1136
	HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
	HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
1137
	HV_X64_MSR_RESET,
1138
	HV_X64_MSR_VP_INDEX,
1139
	HV_X64_MSR_VP_RUNTIME,
1140
	HV_X64_MSR_SCONTROL,
A
Andrey Smetanin 已提交
1141
	HV_X64_MSR_STIMER0_CONFIG,
1142
	HV_X64_MSR_VP_ASSIST_PAGE,
1143 1144 1145 1146
	HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
	HV_X64_MSR_TSC_EMULATION_STATUS,

	MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
1147 1148
	MSR_KVM_PV_EOI_EN,

W
Will Auld 已提交
1149
	MSR_IA32_TSC_ADJUST,
1150
	MSR_IA32_TSCDEADLINE,
1151
	MSR_IA32_MISC_ENABLE,
1152 1153
	MSR_IA32_MCG_STATUS,
	MSR_IA32_MCG_CTL,
1154
	MSR_IA32_MCG_EXT_CTL,
P
Paolo Bonzini 已提交
1155
	MSR_IA32_SMBASE,
1156
	MSR_SMI_COUNT,
K
Kyle Huey 已提交
1157 1158
	MSR_PLATFORM_INFO,
	MSR_MISC_FEATURES_ENABLES,
1159
	MSR_AMD64_VIRT_SPEC_CTRL,
1160 1161
};

1162 1163
static unsigned num_emulated_msrs;

1164 1165 1166 1167 1168
/*
 * List of msr numbers which are used to expose MSR-based features that
 * can be used by a hypervisor to validate requested CPU features.
 */
static u32 msr_based_features[] = {
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
	MSR_IA32_VMX_BASIC,
	MSR_IA32_VMX_TRUE_PINBASED_CTLS,
	MSR_IA32_VMX_PINBASED_CTLS,
	MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
	MSR_IA32_VMX_PROCBASED_CTLS,
	MSR_IA32_VMX_TRUE_EXIT_CTLS,
	MSR_IA32_VMX_EXIT_CTLS,
	MSR_IA32_VMX_TRUE_ENTRY_CTLS,
	MSR_IA32_VMX_ENTRY_CTLS,
	MSR_IA32_VMX_MISC,
	MSR_IA32_VMX_CR0_FIXED0,
	MSR_IA32_VMX_CR0_FIXED1,
	MSR_IA32_VMX_CR4_FIXED0,
	MSR_IA32_VMX_CR4_FIXED1,
	MSR_IA32_VMX_VMCS_ENUM,
	MSR_IA32_VMX_PROCBASED_CTLS2,
	MSR_IA32_VMX_EPT_VPID_CAP,
	MSR_IA32_VMX_VMFUNC,

1188
	MSR_F10H_DECFG,
1189
	MSR_IA32_UCODE_REV,
1190
	MSR_IA32_ARCH_CAPABILITIES,
1191 1192 1193 1194
};

static unsigned int num_msr_based_features;

1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
u64 kvm_get_arch_capabilities(void)
{
	u64 data;

	rdmsrl_safe(MSR_IA32_ARCH_CAPABILITIES, &data);

	/*
	 * If we're doing cache flushes (either "always" or "cond")
	 * we will do one whenever the guest does a vmlaunch/vmresume.
	 * If an outer hypervisor is doing the cache flush for us
	 * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that
	 * capability to the guest too, and if EPT is disabled we're not
	 * vulnerable.  Overall, only VMENTER_L1D_FLUSH_NEVER will
	 * require a nested hypervisor to do a flush of its own.
	 */
	if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
		data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;

	return data;
}
EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities);

1217 1218 1219
static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
{
	switch (msr->index) {
1220
	case MSR_IA32_ARCH_CAPABILITIES:
1221 1222 1223
		msr->data = kvm_get_arch_capabilities();
		break;
	case MSR_IA32_UCODE_REV:
1224
		rdmsrl_safe(msr->index, &msr->data);
1225
		break;
1226 1227 1228 1229 1230 1231 1232
	default:
		if (kvm_x86_ops->get_msr_feature(msr))
			return 1;
	}
	return 0;
}

1233 1234 1235
static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
	struct kvm_msr_entry msr;
1236
	int r;
1237 1238

	msr.index = index;
1239 1240 1241
	r = kvm_get_msr_feature(&msr);
	if (r)
		return r;
1242 1243 1244 1245 1246 1247

	*data = msr.data;

	return 0;
}

1248
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1249
{
1250
	if (efer & efer_reserved_bits)
1251
		return false;
1252

1253
	if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
1254
			return false;
A
Alexander Graf 已提交
1255

1256
	if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
1257
			return false;
1258

1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
	return true;
}
EXPORT_SYMBOL_GPL(kvm_valid_efer);

static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
	u64 old_efer = vcpu->arch.efer;

	if (!kvm_valid_efer(vcpu, efer))
		return 1;

	if (is_paging(vcpu)
	    && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
		return 1;

1274
	efer &= ~EFER_LMA;
1275
	efer |= vcpu->arch.efer & EFER_LMA;
1276

1277 1278
	kvm_x86_ops->set_efer(vcpu, efer);

1279 1280 1281 1282
	/* Update reserved bits */
	if ((efer ^ old_efer) & EFER_NX)
		kvm_mmu_reset_context(vcpu);

1283
	return 0;
1284 1285
}

1286 1287 1288 1289 1290 1291
void kvm_enable_efer_bits(u64 mask)
{
       efer_reserved_bits &= ~mask;
}
EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);

1292 1293 1294 1295 1296
/*
 * Writes msr value into into the appropriate "register".
 * Returns 0 on success, non-0 otherwise.
 * Assumes vcpu_load() was already called.
 */
1297
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1298
{
1299 1300 1301 1302 1303 1304
	switch (msr->index) {
	case MSR_FS_BASE:
	case MSR_GS_BASE:
	case MSR_KERNEL_GS_BASE:
	case MSR_CSTAR:
	case MSR_LSTAR:
1305
		if (is_noncanonical_address(msr->data, vcpu))
1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
			return 1;
		break;
	case MSR_IA32_SYSENTER_EIP:
	case MSR_IA32_SYSENTER_ESP:
		/*
		 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
		 * non-canonical address is written on Intel but not on
		 * AMD (which ignores the top 32-bits, because it does
		 * not implement 64-bit SYSENTER).
		 *
		 * 64-bit code should hence be able to write a non-canonical
		 * value on AMD.  Making the address canonical ensures that
		 * vmentry does not fail on Intel after writing a non-canonical
		 * value, and that something deterministic happens if the guest
		 * invokes 64-bit SYSENTER.
		 */
1322
		msr->data = get_canonical(msr->data, vcpu_virt_addr_bits(vcpu));
1323
	}
1324
	return kvm_x86_ops->set_msr(vcpu, msr);
1325
}
1326
EXPORT_SYMBOL_GPL(kvm_set_msr);
1327

1328 1329 1330
/*
 * Adapt set_msr() to msr_io()'s calling convention
 */
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
	struct msr_data msr;
	int r;

	msr.index = index;
	msr.host_initiated = true;
	r = kvm_get_msr(vcpu, &msr);
	if (r)
		return r;

	*data = msr.data;
	return 0;
}

1346 1347
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
1348 1349 1350 1351 1352 1353
	struct msr_data msr;

	msr.data = *data;
	msr.index = index;
	msr.host_initiated = true;
	return kvm_set_msr(vcpu, &msr);
1354 1355
}

1356 1357 1358 1359 1360 1361
#ifdef CONFIG_X86_64
struct pvclock_gtod_data {
	seqcount_t	seq;

	struct { /* extract of a clocksource struct */
		int vclock_mode;
1362 1363
		u64	cycle_last;
		u64	mask;
1364 1365 1366 1367
		u32	mult;
		u32	shift;
	} clock;

1368 1369
	u64		boot_ns;
	u64		nsec_base;
1370
	u64		wall_time_sec;
1371 1372 1373 1374 1375 1376 1377
};

static struct pvclock_gtod_data pvclock_gtod_data;

static void update_pvclock_gtod(struct timekeeper *tk)
{
	struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
1378 1379
	u64 boot_ns;

1380
	boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
1381 1382 1383 1384

	write_seqcount_begin(&vdata->seq);

	/* copy pvclock gtod data */
1385 1386 1387 1388 1389
	vdata->clock.vclock_mode	= tk->tkr_mono.clock->archdata.vclock_mode;
	vdata->clock.cycle_last		= tk->tkr_mono.cycle_last;
	vdata->clock.mask		= tk->tkr_mono.mask;
	vdata->clock.mult		= tk->tkr_mono.mult;
	vdata->clock.shift		= tk->tkr_mono.shift;
1390

1391
	vdata->boot_ns			= boot_ns;
1392
	vdata->nsec_base		= tk->tkr_mono.xtime_nsec;
1393

1394 1395
	vdata->wall_time_sec            = tk->xtime_sec;

1396 1397 1398 1399
	write_seqcount_end(&vdata->seq);
}
#endif

1400 1401 1402 1403 1404 1405 1406 1407 1408
void kvm_set_pending_timer(struct kvm_vcpu *vcpu)
{
	/*
	 * Note: KVM_REQ_PENDING_TIMER is implicitly checked in
	 * vcpu_enter_guest.  This function is only called from
	 * the physical CPU that is running vcpu.
	 */
	kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
}
1409

1410 1411
static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
{
1412 1413
	int version;
	int r;
1414
	struct pvclock_wall_clock wc;
A
Arnd Bergmann 已提交
1415
	struct timespec64 boot;
1416 1417 1418 1419

	if (!wall_clock)
		return;

1420 1421 1422 1423 1424 1425 1426 1427
	r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
	if (r)
		return;

	if (version & 1)
		++version;  /* first time write, random junk */

	++version;
1428

1429 1430
	if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
		return;
1431

1432 1433
	/*
	 * The guest calculates current wall clock time by adding
Z
Zachary Amsden 已提交
1434
	 * system time (updated by kvm_guest_time_update below) to the
1435 1436 1437
	 * wall clock specified here.  guest system time equals host
	 * system time for us, thus we must fill in host boot time here.
	 */
A
Arnd Bergmann 已提交
1438
	getboottime64(&boot);
1439

1440
	if (kvm->arch.kvmclock_offset) {
A
Arnd Bergmann 已提交
1441 1442
		struct timespec64 ts = ns_to_timespec64(kvm->arch.kvmclock_offset);
		boot = timespec64_sub(boot, ts);
1443
	}
A
Arnd Bergmann 已提交
1444
	wc.sec = (u32)boot.tv_sec; /* overflow in 2106 guest time */
1445 1446
	wc.nsec = boot.tv_nsec;
	wc.version = version;
1447 1448 1449 1450 1451 1452 1453

	kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));

	version++;
	kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
}

1454 1455
static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
{
1456 1457
	do_shl32_div32(dividend, divisor);
	return dividend;
1458 1459
}

1460
static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
1461
			       s8 *pshift, u32 *pmultiplier)
1462
{
1463
	uint64_t scaled64;
1464 1465 1466 1467
	int32_t  shift = 0;
	uint64_t tps64;
	uint32_t tps32;

1468 1469
	tps64 = base_hz;
	scaled64 = scaled_hz;
1470
	while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
1471 1472 1473 1474 1475
		tps64 >>= 1;
		shift--;
	}

	tps32 = (uint32_t)tps64;
1476 1477
	while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
		if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
1478 1479 1480
			scaled64 >>= 1;
		else
			tps32 <<= 1;
1481 1482 1483
		shift++;
	}

1484 1485
	*pshift = shift;
	*pmultiplier = div_frac(scaled64, tps32);
1486

1487 1488
	pr_debug("%s: base_hz %llu => %llu, shift %d, mul %u\n",
		 __func__, base_hz, scaled_hz, shift, *pmultiplier);
1489 1490
}

1491
#ifdef CONFIG_X86_64
1492
static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
1493
#endif
1494

1495
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
1496
static unsigned long max_tsc_khz;
1497

1498
static u32 adjust_tsc_khz(u32 khz, s32 ppm)
1499
{
1500 1501 1502
	u64 v = (u64)khz * (1000000 + ppm);
	do_div(v, 1000000);
	return v;
1503 1504
}

1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
{
	u64 ratio;

	/* Guest TSC same frequency as host TSC? */
	if (!scale) {
		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
		return 0;
	}

	/* TSC scaling supported? */
	if (!kvm_has_tsc_control) {
		if (user_tsc_khz > tsc_khz) {
			vcpu->arch.tsc_catchup = 1;
			vcpu->arch.tsc_always_catchup = 1;
			return 0;
		} else {
			WARN(1, "user requested TSC rate below hardware speed\n");
			return -1;
		}
	}

	/* TSC scaling required  - calculate ratio */
	ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits,
				user_tsc_khz, tsc_khz);

	if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
		WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
			  user_tsc_khz);
		return -1;
	}

	vcpu->arch.tsc_scaling_ratio = ratio;
	return 0;
}

1541
static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
1542
{
1543 1544
	u32 thresh_lo, thresh_hi;
	int use_scaling = 0;
1545

1546
	/* tsc_khz can be zero if TSC calibration fails */
1547
	if (user_tsc_khz == 0) {
1548 1549
		/* set tsc_scaling_ratio to a safe value */
		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
1550
		return -1;
1551
	}
1552

Z
Zachary Amsden 已提交
1553
	/* Compute a scale to convert nanoseconds in TSC cycles */
1554
	kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
1555 1556
			   &vcpu->arch.virtual_tsc_shift,
			   &vcpu->arch.virtual_tsc_mult);
1557
	vcpu->arch.virtual_tsc_khz = user_tsc_khz;
1558 1559 1560 1561 1562 1563 1564 1565 1566

	/*
	 * Compute the variation in TSC rate which is acceptable
	 * within the range of tolerance and decide if the
	 * rate being applied is within that bounds of the hardware
	 * rate.  If so, no scaling or compensation need be done.
	 */
	thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
	thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
1567 1568
	if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
		pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi);
1569 1570
		use_scaling = 1;
	}
1571
	return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
Z
Zachary Amsden 已提交
1572 1573 1574 1575
}

static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
{
1576
	u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
1577 1578
				      vcpu->arch.virtual_tsc_mult,
				      vcpu->arch.virtual_tsc_shift);
1579
	tsc += vcpu->arch.this_tsc_write;
Z
Zachary Amsden 已提交
1580 1581 1582
	return tsc;
}

1583 1584 1585 1586 1587
static inline int gtod_is_based_on_tsc(int mode)
{
	return mode == VCLOCK_TSC || mode == VCLOCK_HVCLOCK;
}

1588
static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
1589 1590 1591 1592 1593 1594 1595 1596 1597
{
#ifdef CONFIG_X86_64
	bool vcpus_matched;
	struct kvm_arch *ka = &vcpu->kvm->arch;
	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;

	vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
			 atomic_read(&vcpu->kvm->online_vcpus));

1598 1599 1600 1601 1602 1603 1604 1605 1606
	/*
	 * Once the masterclock is enabled, always perform request in
	 * order to update it.
	 *
	 * In order to enable masterclock, the host clocksource must be TSC
	 * and the vcpus need to have matched TSCs.  When that happens,
	 * perform request to enable masterclock.
	 */
	if (ka->use_master_clock ||
1607
	    (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched))
1608 1609 1610 1611 1612 1613 1614 1615
		kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);

	trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
			    atomic_read(&vcpu->kvm->online_vcpus),
		            ka->use_master_clock, gtod->clock.vclock_mode);
#endif
}

W
Will Auld 已提交
1616 1617
static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
{
1618
	u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
W
Will Auld 已提交
1619 1620 1621
	vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
}

1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648
/*
 * Multiply tsc by a fixed point number represented by ratio.
 *
 * The most significant 64-N bits (mult) of ratio represent the
 * integral part of the fixed point number; the remaining N bits
 * (frac) represent the fractional part, ie. ratio represents a fixed
 * point number (mult + frac * 2^(-N)).
 *
 * N equals to kvm_tsc_scaling_ratio_frac_bits.
 */
static inline u64 __scale_tsc(u64 ratio, u64 tsc)
{
	return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
}

u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
{
	u64 _tsc = tsc;
	u64 ratio = vcpu->arch.tsc_scaling_ratio;

	if (ratio != kvm_default_tsc_scaling_ratio)
		_tsc = __scale_tsc(ratio, tsc);

	return _tsc;
}
EXPORT_SYMBOL_GPL(kvm_scale_tsc);

1649 1650 1651 1652 1653 1654 1655 1656 1657
static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
	u64 tsc;

	tsc = kvm_scale_tsc(vcpu, rdtsc());

	return target_tsc - tsc;
}

1658 1659
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
1660 1661 1662
	u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);

	return tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
1663 1664 1665
}
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);

1666 1667
static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
1668
	vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset);
1669 1670
}

1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
static inline bool kvm_check_tsc_unstable(void)
{
#ifdef CONFIG_X86_64
	/*
	 * TSC is marked unstable when we're running on Hyper-V,
	 * 'TSC page' clocksource is good.
	 */
	if (pvclock_gtod_data.clock.vclock_mode == VCLOCK_HVCLOCK)
		return false;
#endif
	return check_tsc_unstable();
}

1684
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
1685 1686
{
	struct kvm *kvm = vcpu->kvm;
Z
Zachary Amsden 已提交
1687
	u64 offset, ns, elapsed;
1688
	unsigned long flags;
1689
	bool matched;
T
Tomasz Grabiec 已提交
1690
	bool already_matched;
1691
	u64 data = msr->data;
1692
	bool synchronizing = false;
1693

1694
	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1695
	offset = kvm_compute_tsc_offset(vcpu, data);
1696
	ns = ktime_get_boot_ns();
Z
Zachary Amsden 已提交
1697
	elapsed = ns - kvm->arch.last_tsc_nsec;
1698

1699
	if (vcpu->arch.virtual_tsc_khz) {
1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718
		if (data == 0 && msr->host_initiated) {
			/*
			 * detection of vcpu initialization -- need to sync
			 * with other vCPUs. This particularly helps to keep
			 * kvm_clock stable after CPU hotplug
			 */
			synchronizing = true;
		} else {
			u64 tsc_exp = kvm->arch.last_tsc_write +
						nsec_to_cycles(vcpu, elapsed);
			u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
			/*
			 * Special case: TSC write with a small delta (1 second)
			 * of virtual cycle time against real time is
			 * interpreted as an attempt to synchronize the CPU.
			 */
			synchronizing = data < tsc_exp + tsc_hz &&
					data + tsc_hz > tsc_exp;
		}
1719
	}
Z
Zachary Amsden 已提交
1720 1721

	/*
1722 1723 1724 1725 1726
	 * For a reliable TSC, we can match TSC offsets, and for an unstable
	 * TSC, we add elapsed time in this computation.  We could let the
	 * compensation code attempt to catch up if we fall behind, but
	 * it's better to try to match offsets from the beginning.
         */
1727
	if (synchronizing &&
1728
	    vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
1729
		if (!kvm_check_tsc_unstable()) {
1730
			offset = kvm->arch.cur_tsc_offset;
Z
Zachary Amsden 已提交
1731 1732
			pr_debug("kvm: matched tsc offset for %llu\n", data);
		} else {
1733
			u64 delta = nsec_to_cycles(vcpu, elapsed);
1734
			data += delta;
1735
			offset = kvm_compute_tsc_offset(vcpu, data);
1736
			pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
Z
Zachary Amsden 已提交
1737
		}
1738
		matched = true;
T
Tomasz Grabiec 已提交
1739
		already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
1740 1741 1742 1743 1744 1745
	} else {
		/*
		 * We split periods of matched TSC writes into generations.
		 * For each generation, we track the original measured
		 * nanosecond time, offset, and write, so if TSCs are in
		 * sync, we can match exact offset, and if not, we can match
G
Guo Chao 已提交
1746
		 * exact software computation in compute_guest_tsc()
1747 1748 1749 1750 1751 1752 1753
		 *
		 * These values are tracked in kvm->arch.cur_xxx variables.
		 */
		kvm->arch.cur_tsc_generation++;
		kvm->arch.cur_tsc_nsec = ns;
		kvm->arch.cur_tsc_write = data;
		kvm->arch.cur_tsc_offset = offset;
1754
		matched = false;
T
Tomasz Grabiec 已提交
1755
		pr_debug("kvm: new tsc generation %llu, clock %llu\n",
1756
			 kvm->arch.cur_tsc_generation, data);
Z
Zachary Amsden 已提交
1757
	}
1758 1759 1760 1761 1762

	/*
	 * We also track th most recent recorded KHZ, write and time to
	 * allow the matching interval to be extended at each write.
	 */
Z
Zachary Amsden 已提交
1763 1764
	kvm->arch.last_tsc_nsec = ns;
	kvm->arch.last_tsc_write = data;
1765
	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
1766

1767
	vcpu->arch.last_guest_tsc = data;
1768 1769 1770 1771 1772 1773

	/* Keep track of which generation this VCPU has synchronized to */
	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;

1774
	if (!msr->host_initiated && guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST))
W
Will Auld 已提交
1775
		update_ia32_tsc_adjust_msr(vcpu, offset);
1776

1777
	kvm_vcpu_write_tsc_offset(vcpu, offset);
1778
	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1779 1780

	spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
T
Tomasz Grabiec 已提交
1781
	if (!matched) {
1782
		kvm->arch.nr_vcpus_matched_tsc = 0;
T
Tomasz Grabiec 已提交
1783 1784 1785
	} else if (!already_matched) {
		kvm->arch.nr_vcpus_matched_tsc++;
	}
1786 1787 1788

	kvm_track_tsc_matching(vcpu);
	spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
1789
}
1790

1791 1792
EXPORT_SYMBOL_GPL(kvm_write_tsc);

1793 1794 1795
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
					   s64 adjustment)
{
1796 1797
	u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
	kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
1798 1799 1800 1801 1802 1803 1804
}

static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{
	if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
		WARN_ON(adjustment < 0);
	adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
1805
	adjust_tsc_offset_guest(vcpu, adjustment);
1806 1807
}

1808 1809
#ifdef CONFIG_X86_64

1810
static u64 read_tsc(void)
1811
{
1812
	u64 ret = (u64)rdtsc_ordered();
1813
	u64 last = pvclock_gtod_data.clock.cycle_last;
1814 1815 1816 1817 1818 1819

	if (likely(ret >= last))
		return ret;

	/*
	 * GCC likes to generate cmov here, but this branch is extremely
1820
	 * predictable (it's just a function of time and the likely is
1821 1822 1823 1824 1825 1826 1827 1828 1829
	 * very likely) and there's a data dependence, so force GCC
	 * to generate a branch instead.  I don't barrier() because
	 * we don't actually need a barrier, and if this function
	 * ever gets inlined it will generate worse code.
	 */
	asm volatile ("");
	return last;
}

1830
static inline u64 vgettsc(u64 *tsc_timestamp, int *mode)
1831 1832 1833
{
	long v;
	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858
	u64 tsc_pg_val;

	switch (gtod->clock.vclock_mode) {
	case VCLOCK_HVCLOCK:
		tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(),
						  tsc_timestamp);
		if (tsc_pg_val != U64_MAX) {
			/* TSC page valid */
			*mode = VCLOCK_HVCLOCK;
			v = (tsc_pg_val - gtod->clock.cycle_last) &
				gtod->clock.mask;
		} else {
			/* TSC page invalid */
			*mode = VCLOCK_NONE;
		}
		break;
	case VCLOCK_TSC:
		*mode = VCLOCK_TSC;
		*tsc_timestamp = read_tsc();
		v = (*tsc_timestamp - gtod->clock.cycle_last) &
			gtod->clock.mask;
		break;
	default:
		*mode = VCLOCK_NONE;
	}
1859

1860 1861
	if (*mode == VCLOCK_NONE)
		*tsc_timestamp = v = 0;
1862 1863 1864 1865

	return v * gtod->clock.mult;
}

1866
static int do_monotonic_boot(s64 *t, u64 *tsc_timestamp)
1867
{
1868
	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1869 1870
	unsigned long seq;
	int mode;
1871
	u64 ns;
1872 1873 1874

	do {
		seq = read_seqcount_begin(&gtod->seq);
1875
		ns = gtod->nsec_base;
1876
		ns += vgettsc(tsc_timestamp, &mode);
1877
		ns >>= gtod->clock.shift;
1878
		ns += gtod->boot_ns;
1879
	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
1880
	*t = ns;
1881 1882 1883 1884

	return mode;
}

1885
static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
1886 1887 1888 1889 1890 1891 1892 1893 1894 1895
{
	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
	unsigned long seq;
	int mode;
	u64 ns;

	do {
		seq = read_seqcount_begin(&gtod->seq);
		ts->tv_sec = gtod->wall_time_sec;
		ns = gtod->nsec_base;
1896
		ns += vgettsc(tsc_timestamp, &mode);
1897 1898 1899 1900 1901 1902 1903 1904 1905
		ns >>= gtod->clock.shift;
	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));

	ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
	ts->tv_nsec = ns;

	return mode;
}

1906 1907
/* returns true if host is using TSC based clocksource */
static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
1908 1909
{
	/* checked again under seqlock below */
1910
	if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
1911 1912
		return false;

1913 1914
	return gtod_is_based_on_tsc(do_monotonic_boot(kernel_ns,
						      tsc_timestamp));
1915
}
1916

1917
/* returns true if host is using TSC based clocksource */
1918
static bool kvm_get_walltime_and_clockread(struct timespec64 *ts,
1919
					   u64 *tsc_timestamp)
1920 1921
{
	/* checked again under seqlock below */
1922
	if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
1923 1924
		return false;

1925
	return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp));
1926
}
1927 1928 1929 1930
#endif

/*
 *
1931 1932 1933
 * Assuming a stable TSC across physical CPUS, and a stable TSC
 * across virtual CPUs, the following condition is possible.
 * Each numbered line represents an event visible to both
1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965
 * CPUs at the next numbered event.
 *
 * "timespecX" represents host monotonic time. "tscX" represents
 * RDTSC value.
 *
 * 		VCPU0 on CPU0		|	VCPU1 on CPU1
 *
 * 1.  read timespec0,tsc0
 * 2.					| timespec1 = timespec0 + N
 * 					| tsc1 = tsc0 + M
 * 3. transition to guest		| transition to guest
 * 4. ret0 = timespec0 + (rdtsc - tsc0) |
 * 5.				        | ret1 = timespec1 + (rdtsc - tsc1)
 * 				        | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
 *
 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
 *
 * 	- ret0 < ret1
 *	- timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
 *		...
 *	- 0 < N - M => M < N
 *
 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
 * always the case (the difference between two distinct xtime instances
 * might be smaller then the difference between corresponding TSC reads,
 * when updating guest vcpus pvclock areas).
 *
 * To avoid that problem, do not allow visibility of distinct
 * system_timestamp/tsc_timestamp values simultaneously: use a master
 * copy of host monotonic time values. Update that master copy
 * in lockstep.
 *
1966
 * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
1967 1968 1969 1970 1971 1972 1973 1974
 *
 */

static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
{
#ifdef CONFIG_X86_64
	struct kvm_arch *ka = &kvm->arch;
	int vclock_mode;
1975 1976 1977 1978
	bool host_tsc_clocksource, vcpus_matched;

	vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
			atomic_read(&kvm->online_vcpus));
1979 1980 1981 1982 1983

	/*
	 * If the host uses TSC clock, then passthrough TSC as stable
	 * to the guest.
	 */
1984
	host_tsc_clocksource = kvm_get_time_and_clockread(
1985 1986 1987
					&ka->master_kernel_ns,
					&ka->master_cycle_now);

1988
	ka->use_master_clock = host_tsc_clocksource && vcpus_matched
1989
				&& !ka->backwards_tsc_observed
1990
				&& !ka->boot_vcpu_runs_old_kvmclock;
1991

1992 1993 1994 1995
	if (ka->use_master_clock)
		atomic_set(&kvm_guest_has_master_clock, 1);

	vclock_mode = pvclock_gtod_data.clock.vclock_mode;
1996 1997
	trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
					vcpus_matched);
1998 1999 2000
#endif
}

2001 2002 2003 2004 2005
void kvm_make_mclock_inprogress_request(struct kvm *kvm)
{
	kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
}

2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018
static void kvm_gen_update_masterclock(struct kvm *kvm)
{
#ifdef CONFIG_X86_64
	int i;
	struct kvm_vcpu *vcpu;
	struct kvm_arch *ka = &kvm->arch;

	spin_lock(&ka->pvclock_gtod_sync_lock);
	kvm_make_mclock_inprogress_request(kvm);
	/* no guest entries from this point */
	pvclock_update_vm_gtod_copy(kvm);

	kvm_for_each_vcpu(i, vcpu, kvm)
2019
		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2020 2021 2022

	/* guest entries allowed */
	kvm_for_each_vcpu(i, vcpu, kvm)
2023
		kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
2024 2025 2026 2027 2028

	spin_unlock(&ka->pvclock_gtod_sync_lock);
#endif
}

2029
u64 get_kvmclock_ns(struct kvm *kvm)
2030 2031
{
	struct kvm_arch *ka = &kvm->arch;
2032
	struct pvclock_vcpu_time_info hv_clock;
2033
	u64 ret;
2034

2035 2036 2037 2038
	spin_lock(&ka->pvclock_gtod_sync_lock);
	if (!ka->use_master_clock) {
		spin_unlock(&ka->pvclock_gtod_sync_lock);
		return ktime_get_boot_ns() + ka->kvmclock_offset;
2039 2040
	}

2041 2042 2043 2044
	hv_clock.tsc_timestamp = ka->master_cycle_now;
	hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
	spin_unlock(&ka->pvclock_gtod_sync_lock);

2045 2046 2047
	/* both __this_cpu_read() and rdtsc() should be on the same cpu */
	get_cpu();

2048 2049 2050 2051 2052 2053 2054
	if (__this_cpu_read(cpu_tsc_khz)) {
		kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
				   &hv_clock.tsc_shift,
				   &hv_clock.tsc_to_system_mul);
		ret = __pvclock_read_cycles(&hv_clock, rdtsc());
	} else
		ret = ktime_get_boot_ns() + ka->kvmclock_offset;
2055 2056 2057 2058

	put_cpu();

	return ret;
2059 2060
}

2061 2062 2063 2064 2065
static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
{
	struct kvm_vcpu_arch *vcpu = &v->arch;
	struct pvclock_vcpu_time_info guest_hv_clock;

2066
	if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085
		&guest_hv_clock, sizeof(guest_hv_clock))))
		return;

	/* This VCPU is paused, but it's legal for a guest to read another
	 * VCPU's kvmclock, so we really have to follow the specification where
	 * it says that version is odd if data is being modified, and even after
	 * it is consistent.
	 *
	 * Version field updates must be kept separate.  This is because
	 * kvm_write_guest_cached might use a "rep movs" instruction, and
	 * writes within a string instruction are weakly ordered.  So there
	 * are three writes overall.
	 *
	 * As a small optimization, only write the version field in the first
	 * and third write.  The vcpu->pv_time cache is still valid, because the
	 * version field is the first in the struct.
	 */
	BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);

2086 2087 2088
	if (guest_hv_clock.version & 1)
		++guest_hv_clock.version;  /* first time write, random junk */

2089
	vcpu->hv_clock.version = guest_hv_clock.version + 1;
2090 2091 2092
	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
				&vcpu->hv_clock,
				sizeof(vcpu->hv_clock.version));
2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105

	smp_wmb();

	/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
	vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);

	if (vcpu->pvclock_set_guest_stopped_request) {
		vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
		vcpu->pvclock_set_guest_stopped_request = false;
	}

	trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);

2106 2107 2108
	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
				&vcpu->hv_clock,
				sizeof(vcpu->hv_clock));
2109 2110 2111 2112

	smp_wmb();

	vcpu->hv_clock.version++;
2113 2114 2115
	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
				&vcpu->hv_clock,
				sizeof(vcpu->hv_clock.version));
2116 2117
}

Z
Zachary Amsden 已提交
2118
static int kvm_guest_time_update(struct kvm_vcpu *v)
2119
{
2120
	unsigned long flags, tgt_tsc_khz;
2121
	struct kvm_vcpu_arch *vcpu = &v->arch;
2122
	struct kvm_arch *ka = &v->kvm->arch;
2123
	s64 kernel_ns;
2124
	u64 tsc_timestamp, host_tsc;
2125
	u8 pvclock_flags;
2126 2127 2128 2129
	bool use_master_clock;

	kernel_ns = 0;
	host_tsc = 0;
2130

2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141
	/*
	 * If the host uses TSC clock, then passthrough TSC as stable
	 * to the guest.
	 */
	spin_lock(&ka->pvclock_gtod_sync_lock);
	use_master_clock = ka->use_master_clock;
	if (use_master_clock) {
		host_tsc = ka->master_cycle_now;
		kernel_ns = ka->master_kernel_ns;
	}
	spin_unlock(&ka->pvclock_gtod_sync_lock);
2142 2143 2144

	/* Keep irq disabled to prevent changes to the clock */
	local_irq_save(flags);
2145 2146
	tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz);
	if (unlikely(tgt_tsc_khz == 0)) {
2147 2148 2149 2150
		local_irq_restore(flags);
		kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
		return 1;
	}
2151
	if (!use_master_clock) {
2152
		host_tsc = rdtsc();
2153
		kernel_ns = ktime_get_boot_ns();
2154 2155
	}

2156
	tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
2157

Z
Zachary Amsden 已提交
2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
	/*
	 * We may have to catch up the TSC to match elapsed wall clock
	 * time for two reasons, even if kvmclock is used.
	 *   1) CPU could have been running below the maximum TSC rate
	 *   2) Broken TSC compensation resets the base at each VCPU
	 *      entry to avoid unknown leaps of TSC even when running
	 *      again on the same CPU.  This may cause apparent elapsed
	 *      time to disappear, and the guest to stand still or run
	 *	very slowly.
	 */
	if (vcpu->tsc_catchup) {
		u64 tsc = compute_guest_tsc(v, kernel_ns);
		if (tsc > tsc_timestamp) {
2171
			adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
Z
Zachary Amsden 已提交
2172 2173
			tsc_timestamp = tsc;
		}
2174 2175
	}

2176 2177
	local_irq_restore(flags);

2178
	/* With all the info we got, fill in the values */
2179

2180 2181 2182 2183
	if (kvm_has_tsc_control)
		tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz);

	if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
2184
		kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
2185 2186
				   &vcpu->hv_clock.tsc_shift,
				   &vcpu->hv_clock.tsc_to_system_mul);
2187
		vcpu->hw_tsc_khz = tgt_tsc_khz;
2188 2189
	}

2190
	vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
2191
	vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
Z
Zachary Amsden 已提交
2192
	vcpu->last_guest_tsc = tsc_timestamp;
2193

2194
	/* If the host uses TSC clocksource, then it is stable */
2195
	pvclock_flags = 0;
2196 2197 2198
	if (use_master_clock)
		pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;

2199 2200
	vcpu->hv_clock.flags = pvclock_flags;

P
Paolo Bonzini 已提交
2201 2202 2203 2204
	if (vcpu->pv_time_enabled)
		kvm_setup_pvclock_page(v);
	if (v == kvm_get_vcpu(v->kvm, 0))
		kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
2205
	return 0;
2206 2207
}

2208 2209 2210 2211 2212 2213 2214 2215
/*
 * kvmclock updates which are isolated to a given vcpu, such as
 * vcpu->cpu migration, should not allow system_timestamp from
 * the rest of the vcpus to remain static. Otherwise ntp frequency
 * correction applies to one vcpu's system_timestamp but not
 * the others.
 *
 * So in those cases, request a kvmclock update for all vcpus.
2216 2217 2218 2219
 * We need to rate-limit these requests though, as they can
 * considerably slow guests that have a large number of vcpus.
 * The time for a remote vcpu to update its kvmclock is bound
 * by the delay we use to rate-limit the updates.
2220 2221
 */

2222 2223 2224
#define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)

static void kvmclock_update_fn(struct work_struct *work)
2225 2226
{
	int i;
2227 2228 2229 2230
	struct delayed_work *dwork = to_delayed_work(work);
	struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
					   kvmclock_update_work);
	struct kvm *kvm = container_of(ka, struct kvm, arch);
2231 2232 2233
	struct kvm_vcpu *vcpu;

	kvm_for_each_vcpu(i, vcpu, kvm) {
2234
		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2235 2236 2237 2238
		kvm_vcpu_kick(vcpu);
	}
}

2239 2240 2241 2242
static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
{
	struct kvm *kvm = v->kvm;

2243
	kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
2244 2245 2246 2247
	schedule_delayed_work(&kvm->arch.kvmclock_update_work,
					KVMCLOCK_UPDATE_DELAY);
}

2248 2249 2250 2251 2252 2253 2254 2255 2256
#define KVMCLOCK_SYNC_PERIOD (300 * HZ)

static void kvmclock_sync_fn(struct work_struct *work)
{
	struct delayed_work *dwork = to_delayed_work(work);
	struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
					   kvmclock_sync_work);
	struct kvm *kvm = container_of(ka, struct kvm, arch);

2257 2258 2259
	if (!kvmclock_periodic_sync)
		return;

2260 2261 2262 2263 2264
	schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
	schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
					KVMCLOCK_SYNC_PERIOD);
}

2265
static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2266
{
H
Huang Ying 已提交
2267 2268
	u64 mcg_cap = vcpu->arch.mcg_cap;
	unsigned bank_num = mcg_cap & 0xff;
2269 2270
	u32 msr = msr_info->index;
	u64 data = msr_info->data;
H
Huang Ying 已提交
2271

2272 2273
	switch (msr) {
	case MSR_IA32_MCG_STATUS:
H
Huang Ying 已提交
2274
		vcpu->arch.mcg_status = data;
2275
		break;
2276
	case MSR_IA32_MCG_CTL:
2277 2278
		if (!(mcg_cap & MCG_CTL_P) &&
		    (data || !msr_info->host_initiated))
H
Huang Ying 已提交
2279 2280
			return 1;
		if (data != 0 && data != ~(u64)0)
2281
			return 1;
H
Huang Ying 已提交
2282 2283 2284 2285
		vcpu->arch.mcg_ctl = data;
		break;
	default:
		if (msr >= MSR_IA32_MC0_CTL &&
2286
		    msr < MSR_IA32_MCx_CTL(bank_num)) {
H
Huang Ying 已提交
2287
			u32 offset = msr - MSR_IA32_MC0_CTL;
2288 2289 2290 2291 2292
			/* only 0 or all 1s can be written to IA32_MCi_CTL
			 * some Linux kernels though clear bit 10 in bank 4 to
			 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
			 * this to avoid an uncatched #GP in the guest
			 */
H
Huang Ying 已提交
2293
			if ((offset & 0x3) == 0 &&
2294
			    data != 0 && (data | (1 << 10)) != ~(u64)0)
H
Huang Ying 已提交
2295
				return -1;
2296 2297 2298
			if (!msr_info->host_initiated &&
				(offset & 0x3) == 1 && data != 0)
				return -1;
H
Huang Ying 已提交
2299 2300 2301 2302 2303 2304 2305 2306
			vcpu->arch.mce_banks[offset] = data;
			break;
		}
		return 1;
	}
	return 0;
}

E
Ed Swierk 已提交
2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323
static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
	struct kvm *kvm = vcpu->kvm;
	int lm = is_long_mode(vcpu);
	u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
		: (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
	u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
		: kvm->arch.xen_hvm_config.blob_size_32;
	u32 page_num = data & ~PAGE_MASK;
	u64 page_addr = data & PAGE_MASK;
	u8 *page;
	int r;

	r = -E2BIG;
	if (page_num >= blob_size)
		goto out;
	r = -ENOMEM;
2324 2325 2326
	page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
	if (IS_ERR(page)) {
		r = PTR_ERR(page);
E
Ed Swierk 已提交
2327
		goto out;
2328
	}
2329
	if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE))
E
Ed Swierk 已提交
2330 2331 2332 2333 2334 2335 2336 2337
		goto out_free;
	r = 0;
out_free:
	kfree(page);
out:
	return r;
}

2338 2339 2340 2341
static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{
	gpa_t gpa = data & ~0x3f;

2342 2343
	/* Bits 3:5 are reserved, Should be zero */
	if (data & 0x38)
2344 2345 2346 2347 2348 2349 2350 2351 2352 2353
		return 1;

	vcpu->arch.apf.msr_val = data;

	if (!(data & KVM_ASYNC_PF_ENABLED)) {
		kvm_clear_async_pf_completion_queue(vcpu);
		kvm_async_pf_hash_reset(vcpu);
		return 0;
	}

2354
	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
2355
					sizeof(u32)))
2356 2357
		return 1;

2358
	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
2359
	vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
2360 2361 2362 2363
	kvm_async_pf_wakeup_all(vcpu);
	return 0;
}

2364 2365
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
2366
	vcpu->arch.pv_time_enabled = false;
2367 2368
}

2369 2370 2371 2372 2373 2374
static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
{
	++vcpu->stat.tlb_flush;
	kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa);
}

G
Glauber Costa 已提交
2375 2376 2377 2378 2379
static void record_steal_time(struct kvm_vcpu *vcpu)
{
	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
		return;

2380
	if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
G
Glauber Costa 已提交
2381 2382 2383
		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
		return;

2384 2385 2386 2387 2388 2389
	/*
	 * Doing a TLB flush here, on the guest's behalf, can avoid
	 * expensive IPIs.
	 */
	if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB)
		kvm_vcpu_flush_tlb(vcpu, false);
2390

W
Wanpeng Li 已提交
2391 2392 2393 2394 2395
	if (vcpu->arch.st.steal.version & 1)
		vcpu->arch.st.steal.version += 1;  /* first time write, random junk */

	vcpu->arch.st.steal.version += 1;

2396
	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
W
Wanpeng Li 已提交
2397 2398 2399 2400
		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));

	smp_wmb();

2401 2402 2403
	vcpu->arch.st.steal.steal += current->sched_info.run_delay -
		vcpu->arch.st.last_steal;
	vcpu->arch.st.last_steal = current->sched_info.run_delay;
W
Wanpeng Li 已提交
2404

2405
	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
W
Wanpeng Li 已提交
2406 2407 2408 2409 2410
		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));

	smp_wmb();

	vcpu->arch.st.steal.version += 1;
G
Glauber Costa 已提交
2411

2412
	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
G
Glauber Costa 已提交
2413 2414 2415
		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
}

2416
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2417
{
2418
	bool pr = false;
2419 2420
	u32 msr = msr_info->index;
	u64 data = msr_info->data;
2421

2422
	switch (msr) {
2423 2424 2425 2426 2427
	case MSR_AMD64_NB_CFG:
	case MSR_IA32_UCODE_WRITE:
	case MSR_VM_HSAVE_PA:
	case MSR_AMD64_PATCH_LOADER:
	case MSR_AMD64_BU_CFG2:
2428
	case MSR_AMD64_DC_CFG:
2429 2430
		break;

2431 2432 2433 2434
	case MSR_IA32_UCODE_REV:
		if (msr_info->host_initiated)
			vcpu->arch.microcode_version = data;
		break;
2435
	case MSR_EFER:
2436
		return set_efer(vcpu, data);
2437 2438
	case MSR_K7_HWCR:
		data &= ~(u64)0x40;	/* ignore flush filter disable */
2439
		data &= ~(u64)0x100;	/* ignore ignne emulation enable */
2440
		data &= ~(u64)0x8;	/* ignore TLB cache disable */
2441
		data &= ~(u64)0x40000;  /* ignore Mc status write enable */
2442
		if (data != 0) {
2443 2444
			vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
				    data);
2445 2446
			return 1;
		}
2447
		break;
2448 2449
	case MSR_FAM10H_MMIO_CONF_BASE:
		if (data != 0) {
2450 2451
			vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
				    "0x%llx\n", data);
2452 2453
			return 1;
		}
2454
		break;
2455 2456 2457 2458 2459 2460 2461 2462 2463
	case MSR_IA32_DEBUGCTLMSR:
		if (!data) {
			/* We support the non-activated case already */
			break;
		} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
			/* Values other than LBR and BTF are vendor-specific,
			   thus reserved and should throw a #GP */
			return 1;
		}
2464 2465
		vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
			    __func__, data);
2466
		break;
A
Avi Kivity 已提交
2467
	case 0x200 ... 0x2ff:
2468
		return kvm_mtrr_set_msr(vcpu, msr, data);
2469
	case MSR_IA32_APICBASE:
2470
		return kvm_set_apic_base(vcpu, msr_info);
G
Gleb Natapov 已提交
2471 2472
	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
		return kvm_x2apic_msr_write(vcpu, msr, data);
2473 2474 2475
	case MSR_IA32_TSCDEADLINE:
		kvm_set_lapic_tscdeadline_msr(vcpu, data);
		break;
W
Will Auld 已提交
2476
	case MSR_IA32_TSC_ADJUST:
2477
		if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
W
Will Auld 已提交
2478
			if (!msr_info->host_initiated) {
2479
				s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
2480
				adjust_tsc_offset_guest(vcpu, adj);
W
Will Auld 已提交
2481 2482 2483 2484
			}
			vcpu->arch.ia32_tsc_adjust_msr = data;
		}
		break;
2485
	case MSR_IA32_MISC_ENABLE:
2486
		vcpu->arch.ia32_misc_enable_msr = data;
2487
		break;
P
Paolo Bonzini 已提交
2488 2489 2490 2491 2492
	case MSR_IA32_SMBASE:
		if (!msr_info->host_initiated)
			return 1;
		vcpu->arch.smbase = data;
		break;
2493 2494 2495
	case MSR_IA32_TSC:
		kvm_write_tsc(vcpu, msr_info);
		break;
2496 2497 2498 2499 2500
	case MSR_SMI_COUNT:
		if (!msr_info->host_initiated)
			return 1;
		vcpu->arch.smi_count = data;
		break;
2501
	case MSR_KVM_WALL_CLOCK_NEW:
2502 2503 2504 2505
	case MSR_KVM_WALL_CLOCK:
		vcpu->kvm->arch.wall_clock = data;
		kvm_write_wall_clock(vcpu->kvm, data);
		break;
2506
	case MSR_KVM_SYSTEM_TIME_NEW:
2507
	case MSR_KVM_SYSTEM_TIME: {
2508 2509
		struct kvm_arch *ka = &vcpu->kvm->arch;

2510
		kvmclock_reset(vcpu);
2511

2512 2513 2514 2515
		if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
			bool tmp = (msr == MSR_KVM_SYSTEM_TIME);

			if (ka->boot_vcpu_runs_old_kvmclock != tmp)
2516
				kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2517 2518 2519 2520

			ka->boot_vcpu_runs_old_kvmclock = tmp;
		}

2521
		vcpu->arch.time = data;
2522
		kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2523 2524 2525 2526 2527

		/* we verify if the enable bit is set... */
		if (!(data & 1))
			break;

2528
		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2529 2530
		     &vcpu->arch.pv_time, data & ~1ULL,
		     sizeof(struct pvclock_vcpu_time_info)))
2531 2532 2533
			vcpu->arch.pv_time_enabled = false;
		else
			vcpu->arch.pv_time_enabled = true;
2534

2535 2536
		break;
	}
2537 2538 2539 2540
	case MSR_KVM_ASYNC_PF_EN:
		if (kvm_pv_enable_async_pf(vcpu, data))
			return 1;
		break;
G
Glauber Costa 已提交
2541 2542 2543 2544 2545 2546 2547 2548
	case MSR_KVM_STEAL_TIME:

		if (unlikely(!sched_info_on()))
			return 1;

		if (data & KVM_STEAL_RESERVED_MASK)
			return 1;

2549
		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
2550 2551
						data & KVM_STEAL_VALID_BITS,
						sizeof(struct kvm_steal_time)))
G
Glauber Costa 已提交
2552 2553 2554 2555 2556 2557 2558 2559 2560 2561
			return 1;

		vcpu->arch.st.msr_val = data;

		if (!(data & KVM_MSR_ENABLED))
			break;

		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);

		break;
2562
	case MSR_KVM_PV_EOI_EN:
2563
		if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8)))
2564 2565
			return 1;
		break;
G
Glauber Costa 已提交
2566

H
Huang Ying 已提交
2567 2568
	case MSR_IA32_MCG_CTL:
	case MSR_IA32_MCG_STATUS:
2569
	case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2570
		return set_msr_mce(vcpu, msr_info);
2571

2572 2573 2574 2575 2576
	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
	case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
		pr = true; /* fall through */
	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
	case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
2577
		if (kvm_pmu_is_valid_msr(vcpu, msr))
2578
			return kvm_pmu_set_msr(vcpu, msr_info);
2579 2580

		if (pr || data != 0)
2581 2582
			vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
				    "0x%x data 0x%llx\n", msr, data);
2583
		break;
2584 2585 2586 2587 2588
	case MSR_K7_CLK_CTL:
		/*
		 * Ignore all writes to this no longer documented MSR.
		 * Writes are only relevant for old K7 processors,
		 * all pre-dating SVM, but a recommended workaround from
G
Guo Chao 已提交
2589
		 * AMD for these chips. It is possible to specify the
2590 2591 2592 2593
		 * affected processor models on the command line, hence
		 * the need to ignore the workaround.
		 */
		break;
2594
	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2595 2596
	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
	case HV_X64_MSR_CRASH_CTL:
A
Andrey Smetanin 已提交
2597
	case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
2598 2599 2600
	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
	case HV_X64_MSR_TSC_EMULATION_CONTROL:
	case HV_X64_MSR_TSC_EMULATION_STATUS:
2601 2602
		return kvm_hv_set_msr_common(vcpu, msr, data,
					     msr_info->host_initiated);
2603 2604 2605 2606
	case MSR_IA32_BBL_CR_CTL3:
		/* Drop writes to this legacy MSR -- see rdmsr
		 * counterpart for further detail.
		 */
2607 2608 2609
		if (report_ignored_msrs)
			vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
				msr, data);
2610
		break;
2611
	case MSR_AMD64_OSVW_ID_LENGTH:
2612
		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2613 2614 2615 2616
			return 1;
		vcpu->arch.osvw.length = data;
		break;
	case MSR_AMD64_OSVW_STATUS:
2617
		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2618 2619 2620
			return 1;
		vcpu->arch.osvw.status = data;
		break;
K
Kyle Huey 已提交
2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634
	case MSR_PLATFORM_INFO:
		if (!msr_info->host_initiated ||
		    (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
		     cpuid_fault_enabled(vcpu)))
			return 1;
		vcpu->arch.msr_platform_info = data;
		break;
	case MSR_MISC_FEATURES_ENABLES:
		if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ||
		    (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
		     !supports_cpuid_fault(vcpu)))
			return 1;
		vcpu->arch.msr_misc_features_enables = data;
		break;
2635
	default:
E
Ed Swierk 已提交
2636 2637
		if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
			return xen_hvm_config(vcpu, data);
2638
		if (kvm_pmu_is_valid_msr(vcpu, msr))
2639
			return kvm_pmu_set_msr(vcpu, msr_info);
2640
		if (!ignore_msrs) {
2641
			vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
2642
				    msr, data);
2643 2644
			return 1;
		} else {
2645 2646 2647 2648
			if (report_ignored_msrs)
				vcpu_unimpl(vcpu,
					"ignored wrmsr: 0x%x data 0x%llx\n",
					msr, data);
2649 2650
			break;
		}
2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_msr_common);


/*
 * Reads an msr value (of 'msr_index') into 'pdata'.
 * Returns 0 on success, non-0 otherwise.
 * Assumes vcpu_load() was already called.
 */
2662
int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2663
{
2664
	return kvm_x86_ops->get_msr(vcpu, msr);
2665
}
2666
EXPORT_SYMBOL_GPL(kvm_get_msr);
2667

2668
static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
2669 2670
{
	u64 data;
H
Huang Ying 已提交
2671 2672
	u64 mcg_cap = vcpu->arch.mcg_cap;
	unsigned bank_num = mcg_cap & 0xff;
2673 2674 2675 2676

	switch (msr) {
	case MSR_IA32_P5_MC_ADDR:
	case MSR_IA32_P5_MC_TYPE:
H
Huang Ying 已提交
2677 2678
		data = 0;
		break;
2679
	case MSR_IA32_MCG_CAP:
H
Huang Ying 已提交
2680 2681
		data = vcpu->arch.mcg_cap;
		break;
2682
	case MSR_IA32_MCG_CTL:
2683
		if (!(mcg_cap & MCG_CTL_P) && !host)
H
Huang Ying 已提交
2684 2685 2686 2687 2688 2689 2690 2691
			return 1;
		data = vcpu->arch.mcg_ctl;
		break;
	case MSR_IA32_MCG_STATUS:
		data = vcpu->arch.mcg_status;
		break;
	default:
		if (msr >= MSR_IA32_MC0_CTL &&
2692
		    msr < MSR_IA32_MCx_CTL(bank_num)) {
H
Huang Ying 已提交
2693 2694 2695 2696 2697 2698 2699 2700 2701 2702
			u32 offset = msr - MSR_IA32_MC0_CTL;
			data = vcpu->arch.mce_banks[offset];
			break;
		}
		return 1;
	}
	*pdata = data;
	return 0;
}

2703
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
H
Huang Ying 已提交
2704
{
2705
	switch (msr_info->index) {
H
Huang Ying 已提交
2706
	case MSR_IA32_PLATFORM_ID:
2707
	case MSR_IA32_EBL_CR_POWERON:
2708 2709 2710 2711 2712
	case MSR_IA32_DEBUGCTLMSR:
	case MSR_IA32_LASTBRANCHFROMIP:
	case MSR_IA32_LASTBRANCHTOIP:
	case MSR_IA32_LASTINTFROMIP:
	case MSR_IA32_LASTINTTOIP:
2713
	case MSR_K8_SYSCFG:
2714 2715
	case MSR_K8_TSEG_ADDR:
	case MSR_K8_TSEG_MASK:
2716
	case MSR_K7_HWCR:
2717
	case MSR_VM_HSAVE_PA:
2718
	case MSR_K8_INT_PENDING_MSG:
2719
	case MSR_AMD64_NB_CFG:
2720
	case MSR_FAM10H_MMIO_CONF_BASE:
2721
	case MSR_AMD64_BU_CFG2:
D
Dmitry Bilunov 已提交
2722
	case MSR_IA32_PERF_CTL:
2723
	case MSR_AMD64_DC_CFG:
2724
		msr_info->data = 0;
2725
		break;
2726
	case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
2727 2728 2729 2730
	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
	case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
	case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
2731
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
2732 2733
			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
		msr_info->data = 0;
2734
		break;
2735
	case MSR_IA32_UCODE_REV:
2736
		msr_info->data = vcpu->arch.microcode_version;
2737
		break;
2738 2739 2740
	case MSR_IA32_TSC:
		msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
		break;
A
Avi Kivity 已提交
2741 2742
	case MSR_MTRRcap:
	case 0x200 ... 0x2ff:
2743
		return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
2744
	case 0xcd: /* fsb frequency */
2745
		msr_info->data = 3;
2746
		break;
2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758
		/*
		 * MSR_EBC_FREQUENCY_ID
		 * Conservative value valid for even the basic CPU models.
		 * Models 0,1: 000 in bits 23:21 indicating a bus speed of
		 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
		 * and 266MHz for model 3, or 4. Set Core Clock
		 * Frequency to System Bus Frequency Ratio to 1 (bits
		 * 31:24) even though these are only valid for CPU
		 * models > 2, however guests may end up dividing or
		 * multiplying by zero otherwise.
		 */
	case MSR_EBC_FREQUENCY_ID:
2759
		msr_info->data = 1 << 24;
2760
		break;
2761
	case MSR_IA32_APICBASE:
2762
		msr_info->data = kvm_get_apic_base(vcpu);
2763
		break;
G
Gleb Natapov 已提交
2764
	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2765
		return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
G
Gleb Natapov 已提交
2766
		break;
2767
	case MSR_IA32_TSCDEADLINE:
2768
		msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
2769
		break;
W
Will Auld 已提交
2770
	case MSR_IA32_TSC_ADJUST:
2771
		msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
W
Will Auld 已提交
2772
		break;
2773
	case MSR_IA32_MISC_ENABLE:
2774
		msr_info->data = vcpu->arch.ia32_misc_enable_msr;
2775
		break;
P
Paolo Bonzini 已提交
2776 2777 2778 2779
	case MSR_IA32_SMBASE:
		if (!msr_info->host_initiated)
			return 1;
		msr_info->data = vcpu->arch.smbase;
2780
		break;
2781 2782 2783
	case MSR_SMI_COUNT:
		msr_info->data = vcpu->arch.smi_count;
		break;
2784 2785
	case MSR_IA32_PERF_STATUS:
		/* TSC increment by tick */
2786
		msr_info->data = 1000ULL;
2787
		/* CPU multiplier */
2788
		msr_info->data |= (((uint64_t)4ULL) << 40);
2789
		break;
2790
	case MSR_EFER:
2791
		msr_info->data = vcpu->arch.efer;
2792
		break;
2793
	case MSR_KVM_WALL_CLOCK:
2794
	case MSR_KVM_WALL_CLOCK_NEW:
2795
		msr_info->data = vcpu->kvm->arch.wall_clock;
2796 2797
		break;
	case MSR_KVM_SYSTEM_TIME:
2798
	case MSR_KVM_SYSTEM_TIME_NEW:
2799
		msr_info->data = vcpu->arch.time;
2800
		break;
2801
	case MSR_KVM_ASYNC_PF_EN:
2802
		msr_info->data = vcpu->arch.apf.msr_val;
2803
		break;
G
Glauber Costa 已提交
2804
	case MSR_KVM_STEAL_TIME:
2805
		msr_info->data = vcpu->arch.st.msr_val;
G
Glauber Costa 已提交
2806
		break;
2807
	case MSR_KVM_PV_EOI_EN:
2808
		msr_info->data = vcpu->arch.pv_eoi.msr_val;
2809
		break;
H
Huang Ying 已提交
2810 2811 2812 2813 2814
	case MSR_IA32_P5_MC_ADDR:
	case MSR_IA32_P5_MC_TYPE:
	case MSR_IA32_MCG_CAP:
	case MSR_IA32_MCG_CTL:
	case MSR_IA32_MCG_STATUS:
2815
	case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2816 2817
		return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
				   msr_info->host_initiated);
2818 2819 2820 2821 2822 2823 2824 2825 2826 2827
	case MSR_K7_CLK_CTL:
		/*
		 * Provide expected ramp-up count for K7. All other
		 * are set to zero, indicating minimum divisors for
		 * every field.
		 *
		 * This prevents guest kernels on AMD host with CPU
		 * type 6, model 8 and higher from exploding due to
		 * the rdmsr failing.
		 */
2828
		msr_info->data = 0x20000000;
2829
		break;
2830
	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2831 2832
	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
	case HV_X64_MSR_CRASH_CTL:
A
Andrey Smetanin 已提交
2833
	case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
2834 2835 2836
	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
	case HV_X64_MSR_TSC_EMULATION_CONTROL:
	case HV_X64_MSR_TSC_EMULATION_STATUS:
2837
		return kvm_hv_get_msr_common(vcpu,
2838 2839
					     msr_info->index, &msr_info->data,
					     msr_info->host_initiated);
2840
		break;
2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851
	case MSR_IA32_BBL_CR_CTL3:
		/* This legacy MSR exists but isn't fully documented in current
		 * silicon.  It is however accessed by winxp in very narrow
		 * scenarios where it sets bit #19, itself documented as
		 * a "reserved" bit.  Best effort attempt to source coherent
		 * read data here should the balance of the register be
		 * interpreted by the guest:
		 *
		 * L2 cache control register 3: 64GB range, 256KB size,
		 * enabled, latency 0x1, configured
		 */
2852
		msr_info->data = 0xbe702111;
2853
		break;
2854
	case MSR_AMD64_OSVW_ID_LENGTH:
2855
		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2856
			return 1;
2857
		msr_info->data = vcpu->arch.osvw.length;
2858 2859
		break;
	case MSR_AMD64_OSVW_STATUS:
2860
		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2861
			return 1;
2862
		msr_info->data = vcpu->arch.osvw.status;
2863
		break;
K
Kyle Huey 已提交
2864
	case MSR_PLATFORM_INFO:
2865 2866 2867
		if (!msr_info->host_initiated &&
		    !vcpu->kvm->arch.guest_can_read_msr_platform_info)
			return 1;
K
Kyle Huey 已提交
2868 2869 2870 2871 2872
		msr_info->data = vcpu->arch.msr_platform_info;
		break;
	case MSR_MISC_FEATURES_ENABLES:
		msr_info->data = vcpu->arch.msr_misc_features_enables;
		break;
2873
	default:
2874
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
2875
			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
2876
		if (!ignore_msrs) {
2877 2878
			vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n",
					       msr_info->index);
2879 2880
			return 1;
		} else {
2881 2882 2883
			if (report_ignored_msrs)
				vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n",
					msr_info->index);
2884
			msr_info->data = 0;
2885 2886
		}
		break;
2887 2888 2889 2890 2891
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_get_msr_common);

2892 2893 2894 2895 2896 2897 2898 2899 2900 2901
/*
 * Read or write a bunch of msrs. All parameters are kernel addresses.
 *
 * @return number of msrs set successfully.
 */
static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
		    struct kvm_msr_entry *entries,
		    int (*do_msr)(struct kvm_vcpu *vcpu,
				  unsigned index, u64 *data))
{
2902
	int i;
2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926

	for (i = 0; i < msrs->nmsrs; ++i)
		if (do_msr(vcpu, entries[i].index, &entries[i].data))
			break;

	return i;
}

/*
 * Read or write a bunch of msrs. Parameters are user addresses.
 *
 * @return number of msrs set successfully.
 */
static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
		  int (*do_msr)(struct kvm_vcpu *vcpu,
				unsigned index, u64 *data),
		  int writeback)
{
	struct kvm_msrs msrs;
	struct kvm_msr_entry *entries;
	int r, n;
	unsigned size;

	r = -EFAULT;
2927
	if (copy_from_user(&msrs, user_msrs, sizeof(msrs)))
2928 2929 2930 2931 2932 2933 2934
		goto out;

	r = -E2BIG;
	if (msrs.nmsrs >= MAX_IO_MSRS)
		goto out;

	size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2935 2936 2937
	entries = memdup_user(user_msrs->entries, size);
	if (IS_ERR(entries)) {
		r = PTR_ERR(entries);
2938
		goto out;
2939
	}
2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951

	r = n = __msr_io(vcpu, &msrs, entries, do_msr);
	if (r < 0)
		goto out_free;

	r = -EFAULT;
	if (writeback && copy_to_user(user_msrs->entries, entries, size))
		goto out_free;

	r = n;

out_free:
2952
	kfree(entries);
2953 2954 2955 2956
out:
	return r;
}

2957 2958 2959
static inline bool kvm_can_mwait_in_guest(void)
{
	return boot_cpu_has(X86_FEATURE_MWAIT) &&
2960 2961
		!boot_cpu_has_bug(X86_BUG_MONITOR) &&
		boot_cpu_has(X86_FEATURE_ARAT);
2962 2963
}

2964
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2965
{
2966
	int r = 0;
2967 2968 2969 2970 2971 2972

	switch (ext) {
	case KVM_CAP_IRQCHIP:
	case KVM_CAP_HLT:
	case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
	case KVM_CAP_SET_TSS_ADDR:
2973
	case KVM_CAP_EXT_CPUID:
B
Borislav Petkov 已提交
2974
	case KVM_CAP_EXT_EMUL_CPUID:
2975
	case KVM_CAP_CLOCKSOURCE:
S
Sheng Yang 已提交
2976
	case KVM_CAP_PIT:
2977
	case KVM_CAP_NOP_IO_DELAY:
2978
	case KVM_CAP_MP_STATE:
2979
	case KVM_CAP_SYNC_MMU:
2980
	case KVM_CAP_USER_NMI:
2981
	case KVM_CAP_REINJECT_CONTROL:
2982
	case KVM_CAP_IRQ_INJECT_STATUS:
G
Gregory Haskins 已提交
2983
	case KVM_CAP_IOEVENTFD:
2984
	case KVM_CAP_IOEVENTFD_NO_LENGTH:
2985
	case KVM_CAP_PIT2:
B
Beth Kon 已提交
2986
	case KVM_CAP_PIT_STATE2:
2987
	case KVM_CAP_SET_IDENTITY_MAP_ADDR:
E
Ed Swierk 已提交
2988
	case KVM_CAP_XEN_HVM:
J
Jan Kiszka 已提交
2989
	case KVM_CAP_VCPU_EVENTS:
2990
	case KVM_CAP_HYPERV:
G
Gleb Natapov 已提交
2991
	case KVM_CAP_HYPERV_VAPIC:
2992
	case KVM_CAP_HYPERV_SPIN:
2993
	case KVM_CAP_HYPERV_SYNIC:
2994
	case KVM_CAP_HYPERV_SYNIC2:
2995
	case KVM_CAP_HYPERV_VP_INDEX:
2996
	case KVM_CAP_HYPERV_EVENTFD:
2997
	case KVM_CAP_HYPERV_TLBFLUSH:
2998
	case KVM_CAP_HYPERV_SEND_IPI:
2999
	case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
3000
	case KVM_CAP_HYPERV_CPUID:
3001
	case KVM_CAP_PCI_SEGMENT:
3002
	case KVM_CAP_DEBUGREGS:
3003
	case KVM_CAP_X86_ROBUST_SINGLESTEP:
3004
	case KVM_CAP_XSAVE:
3005
	case KVM_CAP_ASYNC_PF:
3006
	case KVM_CAP_GET_TSC_KHZ:
3007
	case KVM_CAP_KVMCLOCK_CTRL:
X
Xiao Guangrong 已提交
3008
	case KVM_CAP_READONLY_MEM:
3009
	case KVM_CAP_HYPERV_TIME:
3010
	case KVM_CAP_IOAPIC_POLARITY_IGNORED:
3011
	case KVM_CAP_TSC_DEADLINE_TIMER:
3012
	case KVM_CAP_DISABLE_QUIRKS:
3013
	case KVM_CAP_SET_BOOT_CPU_ID:
3014
 	case KVM_CAP_SPLIT_IRQCHIP:
3015
	case KVM_CAP_IMMEDIATE_EXIT:
3016
	case KVM_CAP_GET_MSR_FEATURES:
3017
	case KVM_CAP_MSR_PLATFORM_INFO:
3018
	case KVM_CAP_EXCEPTION_PAYLOAD:
3019 3020
		r = 1;
		break;
K
Ken Hofsass 已提交
3021 3022 3023
	case KVM_CAP_SYNC_REGS:
		r = KVM_SYNC_X86_VALID_FIELDS;
		break;
3024 3025 3026
	case KVM_CAP_ADJUST_CLOCK:
		r = KVM_CLOCK_TSC_STABLE;
		break;
3027
	case KVM_CAP_X86_DISABLE_EXITS:
M
Michael S. Tsirkin 已提交
3028
		r |=  KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE;
3029 3030
		if(kvm_can_mwait_in_guest())
			r |= KVM_X86_DISABLE_EXITS_MWAIT;
3031
		break;
3032 3033 3034 3035 3036 3037 3038 3039 3040
	case KVM_CAP_X86_SMM:
		/* SMBASE is usually relocated above 1M on modern chipsets,
		 * and SMM handlers might indeed rely on 4G segment limits,
		 * so do not report SMM to be available if real mode is
		 * emulated via vm86 mode.  Still, do not go to great lengths
		 * to avoid userspace's usage of the feature, because it is a
		 * fringe case that is not enabled except via specific settings
		 * of the module parameters.
		 */
3041
		r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
3042
		break;
3043 3044 3045
	case KVM_CAP_VAPIC:
		r = !kvm_x86_ops->cpu_has_accelerated_tpr();
		break;
3046
	case KVM_CAP_NR_VCPUS:
3047 3048 3049
		r = KVM_SOFT_MAX_VCPUS;
		break;
	case KVM_CAP_MAX_VCPUS:
3050 3051
		r = KVM_MAX_VCPUS;
		break;
3052
	case KVM_CAP_NR_MEMSLOTS:
3053
		r = KVM_USER_MEM_SLOTS;
3054
		break;
3055 3056
	case KVM_CAP_PV_MMU:	/* obsolete */
		r = 0;
3057
		break;
H
Huang Ying 已提交
3058 3059 3060
	case KVM_CAP_MCE:
		r = KVM_MAX_MCE_BANKS;
		break;
3061
	case KVM_CAP_XCRS:
3062
		r = boot_cpu_has(X86_FEATURE_XSAVE);
3063
		break;
3064 3065 3066
	case KVM_CAP_TSC_CONTROL:
		r = kvm_has_tsc_control;
		break;
3067 3068 3069
	case KVM_CAP_X2APIC_API:
		r = KVM_X2APIC_API_VALID_FLAGS;
		break;
3070 3071 3072 3073
	case KVM_CAP_NESTED_STATE:
		r = kvm_x86_ops->get_nested_state ?
			kvm_x86_ops->get_nested_state(NULL, 0, 0) : 0;
		break;
3074 3075 3076 3077 3078 3079 3080
	default:
		break;
	}
	return r;

}

3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093
long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	long r;

	switch (ioctl) {
	case KVM_GET_MSR_INDEX_LIST: {
		struct kvm_msr_list __user *user_msr_list = argp;
		struct kvm_msr_list msr_list;
		unsigned n;

		r = -EFAULT;
3094
		if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
3095 3096
			goto out;
		n = msr_list.nmsrs;
3097
		msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
3098
		if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
3099 3100
			goto out;
		r = -E2BIG;
J
Jan Kiszka 已提交
3101
		if (n < msr_list.nmsrs)
3102 3103 3104 3105 3106
			goto out;
		r = -EFAULT;
		if (copy_to_user(user_msr_list->indices, &msrs_to_save,
				 num_msrs_to_save * sizeof(u32)))
			goto out;
J
Jan Kiszka 已提交
3107
		if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
3108
				 &emulated_msrs,
3109
				 num_emulated_msrs * sizeof(u32)))
3110 3111 3112 3113
			goto out;
		r = 0;
		break;
	}
B
Borislav Petkov 已提交
3114 3115
	case KVM_GET_SUPPORTED_CPUID:
	case KVM_GET_EMULATED_CPUID: {
3116 3117 3118 3119
		struct kvm_cpuid2 __user *cpuid_arg = argp;
		struct kvm_cpuid2 cpuid;

		r = -EFAULT;
3120
		if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
3121
			goto out;
B
Borislav Petkov 已提交
3122 3123 3124

		r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
					    ioctl);
3125 3126 3127 3128
		if (r)
			goto out;

		r = -EFAULT;
3129
		if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
3130 3131 3132 3133
			goto out;
		r = 0;
		break;
	}
H
Huang Ying 已提交
3134 3135
	case KVM_X86_GET_MCE_CAP_SUPPORTED: {
		r = -EFAULT;
3136 3137
		if (copy_to_user(argp, &kvm_mce_cap_supported,
				 sizeof(kvm_mce_cap_supported)))
H
Huang Ying 已提交
3138 3139 3140
			goto out;
		r = 0;
		break;
3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165
	case KVM_GET_MSR_FEATURE_INDEX_LIST: {
		struct kvm_msr_list __user *user_msr_list = argp;
		struct kvm_msr_list msr_list;
		unsigned int n;

		r = -EFAULT;
		if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
			goto out;
		n = msr_list.nmsrs;
		msr_list.nmsrs = num_msr_based_features;
		if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
			goto out;
		r = -E2BIG;
		if (n < msr_list.nmsrs)
			goto out;
		r = -EFAULT;
		if (copy_to_user(user_msr_list->indices, &msr_based_features,
				 num_msr_based_features * sizeof(u32)))
			goto out;
		r = 0;
		break;
	}
	case KVM_GET_MSRS:
		r = msr_io(NULL, argp, do_get_msr_feature, 1);
		break;
H
Huang Ying 已提交
3166
	}
3167 3168 3169 3170 3171 3172 3173
	default:
		r = -EINVAL;
	}
out:
	return r;
}

3174 3175 3176 3177 3178 3179 3180
static void wbinvd_ipi(void *garbage)
{
	wbinvd();
}

static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
3181
	return kvm_arch_has_noncoherent_dma(vcpu->kvm);
3182 3183
}

3184 3185
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
3186 3187 3188 3189 3190 3191 3192 3193 3194
	/* Address WBINVD may be executed by guest */
	if (need_emulate_wbinvd(vcpu)) {
		if (kvm_x86_ops->has_wbinvd_exit())
			cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
		else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
			smp_call_function_single(vcpu->cpu,
					wbinvd_ipi, NULL, 1);
	}

3195
	kvm_x86_ops->vcpu_load(vcpu, cpu);
3196

3197 3198 3199 3200
	/* Apply any externally detected TSC adjustments (due to suspend) */
	if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
		adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
		vcpu->arch.tsc_offset_adjustment = 0;
3201
		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3202
	}
3203

3204
	if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) {
3205
		s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
3206
				rdtsc() - vcpu->arch.last_host_tsc;
Z
Zachary Amsden 已提交
3207 3208
		if (tsc_delta < 0)
			mark_tsc_unstable("KVM discovered backwards TSC");
3209

3210
		if (kvm_check_tsc_unstable()) {
3211
			u64 offset = kvm_compute_tsc_offset(vcpu,
3212
						vcpu->arch.last_guest_tsc);
3213
			kvm_vcpu_write_tsc_offset(vcpu, offset);
Z
Zachary Amsden 已提交
3214 3215
			vcpu->arch.tsc_catchup = 1;
		}
3216 3217 3218 3219

		if (kvm_lapic_hv_timer_in_use(vcpu))
			kvm_lapic_restart_hv_timer(vcpu);

3220 3221 3222 3223 3224
		/*
		 * On a host with synchronized TSC, there is no need to update
		 * kvmclock on vcpu->cpu migration
		 */
		if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
3225
			kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
Z
Zachary Amsden 已提交
3226
		if (vcpu->cpu != cpu)
3227
			kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu);
Z
Zachary Amsden 已提交
3228
		vcpu->cpu = cpu;
Z
Zachary Amsden 已提交
3229
	}
G
Glauber Costa 已提交
3230 3231

	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
3232 3233
}

3234 3235 3236 3237 3238
static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
{
	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
		return;

W
Wanpeng Li 已提交
3239
	vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
3240

3241
	kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
3242 3243 3244 3245 3246
			&vcpu->arch.st.steal.preempted,
			offsetof(struct kvm_steal_time, preempted),
			sizeof(vcpu->arch.st.steal.preempted));
}

3247 3248
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
3249
	int idx;
3250 3251 3252 3253

	if (vcpu->preempted)
		vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu);

3254 3255 3256 3257 3258 3259 3260 3261 3262
	/*
	 * Disable page faults because we're in atomic context here.
	 * kvm_write_guest_offset_cached() would call might_fault()
	 * that relies on pagefault_disable() to tell if there's a
	 * bug. NOTE: the write to guest memory may not go through if
	 * during postcopy live migration or if there's heavy guest
	 * paging.
	 */
	pagefault_disable();
3263 3264 3265 3266 3267
	/*
	 * kvm_memslots() will be called by
	 * kvm_write_guest_offset_cached() so take the srcu lock.
	 */
	idx = srcu_read_lock(&vcpu->kvm->srcu);
3268
	kvm_steal_time_set_preempted(vcpu);
3269
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
3270
	pagefault_enable();
3271
	kvm_x86_ops->vcpu_put(vcpu);
3272
	vcpu->arch.last_host_tsc = rdtsc();
3273
	/*
3274 3275 3276
	 * If userspace has set any breakpoints or watchpoints, dr6 is restored
	 * on every vmexit, but if not, we might have a stale dr6 from the
	 * guest. do_debug expects dr6 to be cleared after it runs, do the same.
3277
	 */
3278
	set_debugreg(0, 6);
3279 3280 3281 3282 3283
}

static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
				    struct kvm_lapic_state *s)
{
3284
	if (vcpu->arch.apicv_active)
3285 3286
		kvm_x86_ops->sync_pir_to_irr(vcpu);

3287
	return kvm_apic_get_state(vcpu, s);
3288 3289 3290 3291 3292
}

static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
				    struct kvm_lapic_state *s)
{
3293 3294 3295 3296 3297
	int r;

	r = kvm_apic_set_state(vcpu, s);
	if (r)
		return r;
3298
	update_cr8_intercept(vcpu);
3299 3300 3301 3302

	return 0;
}

3303 3304 3305 3306 3307 3308
static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
{
	return (!lapic_in_kernel(vcpu) ||
		kvm_apic_accept_pic_intr(vcpu));
}

3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322
/*
 * if userspace requested an interrupt window, check that the
 * interrupt window is open.
 *
 * No need to exit to userspace if we already have an interrupt queued.
 */
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
{
	return kvm_arch_interrupt_allowed(vcpu) &&
		!kvm_cpu_has_interrupt(vcpu) &&
		!kvm_event_needs_reinjection(vcpu) &&
		kvm_cpu_accept_dm_intr(vcpu);
}

3323 3324 3325
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
				    struct kvm_interrupt *irq)
{
3326
	if (irq->irq >= KVM_NR_INTERRUPTS)
3327
		return -EINVAL;
3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339

	if (!irqchip_in_kernel(vcpu->kvm)) {
		kvm_queue_interrupt(vcpu, irq->irq, false);
		kvm_make_request(KVM_REQ_EVENT, vcpu);
		return 0;
	}

	/*
	 * With in-kernel LAPIC, we only use this to inject EXTINT, so
	 * fail for in-kernel 8259.
	 */
	if (pic_in_kernel(vcpu->kvm))
3340 3341
		return -ENXIO;

3342 3343
	if (vcpu->arch.pending_external_vector != -1)
		return -EEXIST;
3344

3345
	vcpu->arch.pending_external_vector = irq->irq;
3346
	kvm_make_request(KVM_REQ_EVENT, vcpu);
3347 3348 3349
	return 0;
}

3350 3351 3352 3353 3354 3355 3356
static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
{
	kvm_inject_nmi(vcpu);

	return 0;
}

3357 3358
static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
{
P
Paolo Bonzini 已提交
3359 3360
	kvm_make_request(KVM_REQ_SMI, vcpu);

3361 3362 3363
	return 0;
}

3364 3365 3366 3367 3368 3369 3370 3371 3372
static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
					   struct kvm_tpr_access_ctl *tac)
{
	if (tac->flags)
		return -EINVAL;
	vcpu->arch.tpr_access_reporting = !!tac->enabled;
	return 0;
}

H
Huang Ying 已提交
3373 3374 3375 3376 3377 3378 3379
static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
					u64 mcg_cap)
{
	int r;
	unsigned bank_num = mcg_cap & 0xff, bank;

	r = -EINVAL;
3380
	if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
H
Huang Ying 已提交
3381
		goto out;
3382
	if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
H
Huang Ying 已提交
3383 3384 3385 3386 3387 3388 3389 3390 3391
		goto out;
	r = 0;
	vcpu->arch.mcg_cap = mcg_cap;
	/* Init IA32_MCG_CTL to all 1s */
	if (mcg_cap & MCG_CTL_P)
		vcpu->arch.mcg_ctl = ~(u64)0;
	/* Init IA32_MCi_CTL to all 1s */
	for (bank = 0; bank < bank_num; bank++)
		vcpu->arch.mce_banks[bank*4] = ~(u64)0;
3392 3393 3394

	if (kvm_x86_ops->setup_mce)
		kvm_x86_ops->setup_mce(vcpu);
H
Huang Ying 已提交
3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423
out:
	return r;
}

static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
				      struct kvm_x86_mce *mce)
{
	u64 mcg_cap = vcpu->arch.mcg_cap;
	unsigned bank_num = mcg_cap & 0xff;
	u64 *banks = vcpu->arch.mce_banks;

	if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
		return -EINVAL;
	/*
	 * if IA32_MCG_CTL is not all 1s, the uncorrected error
	 * reporting is disabled
	 */
	if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
	    vcpu->arch.mcg_ctl != ~(u64)0)
		return 0;
	banks += 4 * mce->bank;
	/*
	 * if IA32_MCi_CTL is not all 1s, the uncorrected error
	 * reporting is disabled for the bank
	 */
	if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
		return 0;
	if (mce->status & MCI_STATUS_UC) {
		if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
3424
		    !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
3425
			kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
H
Huang Ying 已提交
3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446
			return 0;
		}
		if (banks[1] & MCI_STATUS_VAL)
			mce->status |= MCI_STATUS_OVER;
		banks[2] = mce->addr;
		banks[3] = mce->misc;
		vcpu->arch.mcg_status = mce->mcg_status;
		banks[1] = mce->status;
		kvm_queue_exception(vcpu, MC_VECTOR);
	} else if (!(banks[1] & MCI_STATUS_VAL)
		   || !(banks[1] & MCI_STATUS_UC)) {
		if (banks[1] & MCI_STATUS_VAL)
			mce->status |= MCI_STATUS_OVER;
		banks[2] = mce->addr;
		banks[3] = mce->misc;
		banks[1] = mce->status;
	} else
		banks[1] |= MCI_STATUS_OVER;
	return 0;
}

J
Jan Kiszka 已提交
3447 3448 3449
static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
					       struct kvm_vcpu_events *events)
{
A
Avi Kivity 已提交
3450
	process_nmi(vcpu);
3451

3452
	/*
3453 3454 3455 3456
	 * The API doesn't provide the instruction length for software
	 * exceptions, so don't report them. As long as the guest RIP
	 * isn't advanced, we should expect to encounter the exception
	 * again.
3457
	 */
3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472
	if (kvm_exception_is_soft(vcpu->arch.exception.nr)) {
		events->exception.injected = 0;
		events->exception.pending = 0;
	} else {
		events->exception.injected = vcpu->arch.exception.injected;
		events->exception.pending = vcpu->arch.exception.pending;
		/*
		 * For ABI compatibility, deliberately conflate
		 * pending and injected exceptions when
		 * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled.
		 */
		if (!vcpu->kvm->arch.exception_payload_enabled)
			events->exception.injected |=
				vcpu->arch.exception.pending;
	}
J
Jan Kiszka 已提交
3473 3474 3475
	events->exception.nr = vcpu->arch.exception.nr;
	events->exception.has_error_code = vcpu->arch.exception.has_error_code;
	events->exception.error_code = vcpu->arch.exception.error_code;
3476 3477
	events->exception_has_payload = vcpu->arch.exception.has_payload;
	events->exception_payload = vcpu->arch.exception.payload;
J
Jan Kiszka 已提交
3478

3479
	events->interrupt.injected =
3480
		vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
J
Jan Kiszka 已提交
3481
	events->interrupt.nr = vcpu->arch.interrupt.nr;
3482
	events->interrupt.soft = 0;
3483
	events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
J
Jan Kiszka 已提交
3484 3485

	events->nmi.injected = vcpu->arch.nmi_injected;
A
Avi Kivity 已提交
3486
	events->nmi.pending = vcpu->arch.nmi_pending != 0;
J
Jan Kiszka 已提交
3487
	events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
3488
	events->nmi.pad = 0;
J
Jan Kiszka 已提交
3489

3490
	events->sipi_vector = 0; /* never valid when reporting to user space */
J
Jan Kiszka 已提交
3491

3492 3493 3494 3495 3496 3497
	events->smi.smm = is_smm(vcpu);
	events->smi.pending = vcpu->arch.smi_pending;
	events->smi.smm_inside_nmi =
		!!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
	events->smi.latched_init = kvm_lapic_latched_init(vcpu);

3498
	events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
3499 3500
			 | KVM_VCPUEVENT_VALID_SHADOW
			 | KVM_VCPUEVENT_VALID_SMM);
3501 3502 3503
	if (vcpu->kvm->arch.exception_payload_enabled)
		events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD;

3504
	memset(&events->reserved, 0, sizeof(events->reserved));
J
Jan Kiszka 已提交
3505 3506
}

3507 3508
static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);

J
Jan Kiszka 已提交
3509 3510 3511
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
					      struct kvm_vcpu_events *events)
{
3512
	if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
3513
			      | KVM_VCPUEVENT_VALID_SIPI_VECTOR
3514
			      | KVM_VCPUEVENT_VALID_SHADOW
3515 3516
			      | KVM_VCPUEVENT_VALID_SMM
			      | KVM_VCPUEVENT_VALID_PAYLOAD))
J
Jan Kiszka 已提交
3517 3518
		return -EINVAL;

3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532
	if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
		if (!vcpu->kvm->arch.exception_payload_enabled)
			return -EINVAL;
		if (events->exception.pending)
			events->exception.injected = 0;
		else
			events->exception_has_payload = 0;
	} else {
		events->exception.pending = 0;
		events->exception_has_payload = 0;
	}

	if ((events->exception.injected || events->exception.pending) &&
	    (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
3533 3534
		return -EINVAL;

3535 3536 3537 3538 3539 3540
	/* INITs are latched while in SMM */
	if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
	    (events->smi.smm || events->smi.pending) &&
	    vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
		return -EINVAL;

A
Avi Kivity 已提交
3541
	process_nmi(vcpu);
3542 3543
	vcpu->arch.exception.injected = events->exception.injected;
	vcpu->arch.exception.pending = events->exception.pending;
J
Jan Kiszka 已提交
3544 3545 3546
	vcpu->arch.exception.nr = events->exception.nr;
	vcpu->arch.exception.has_error_code = events->exception.has_error_code;
	vcpu->arch.exception.error_code = events->exception.error_code;
3547 3548
	vcpu->arch.exception.has_payload = events->exception_has_payload;
	vcpu->arch.exception.payload = events->exception_payload;
J
Jan Kiszka 已提交
3549

3550
	vcpu->arch.interrupt.injected = events->interrupt.injected;
J
Jan Kiszka 已提交
3551 3552
	vcpu->arch.interrupt.nr = events->interrupt.nr;
	vcpu->arch.interrupt.soft = events->interrupt.soft;
3553 3554 3555
	if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
		kvm_x86_ops->set_interrupt_shadow(vcpu,
						  events->interrupt.shadow);
J
Jan Kiszka 已提交
3556 3557

	vcpu->arch.nmi_injected = events->nmi.injected;
3558 3559
	if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
		vcpu->arch.nmi_pending = events->nmi.pending;
J
Jan Kiszka 已提交
3560 3561
	kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);

3562
	if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
3563
	    lapic_in_kernel(vcpu))
3564
		vcpu->arch.apic->sipi_vector = events->sipi_vector;
J
Jan Kiszka 已提交
3565

3566
	if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
3567
		u32 hflags = vcpu->arch.hflags;
3568
		if (events->smi.smm)
3569
			hflags |= HF_SMM_MASK;
3570
		else
3571 3572 3573
			hflags &= ~HF_SMM_MASK;
		kvm_set_hflags(vcpu, hflags);

3574
		vcpu->arch.smi_pending = events->smi.pending;
3575 3576 3577 3578

		if (events->smi.smm) {
			if (events->smi.smm_inside_nmi)
				vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
3579
			else
3580 3581 3582 3583 3584 3585 3586
				vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
			if (lapic_in_kernel(vcpu)) {
				if (events->smi.latched_init)
					set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
				else
					clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
			}
3587 3588 3589
		}
	}

3590 3591
	kvm_make_request(KVM_REQ_EVENT, vcpu);

J
Jan Kiszka 已提交
3592 3593 3594
	return 0;
}

3595 3596 3597
static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
					     struct kvm_debugregs *dbgregs)
{
J
Jan Kiszka 已提交
3598 3599
	unsigned long val;

3600
	memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
3601
	kvm_get_dr(vcpu, 6, &val);
J
Jan Kiszka 已提交
3602
	dbgregs->dr6 = val;
3603 3604
	dbgregs->dr7 = vcpu->arch.dr7;
	dbgregs->flags = 0;
3605
	memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
3606 3607 3608 3609 3610 3611 3612 3613
}

static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
					    struct kvm_debugregs *dbgregs)
{
	if (dbgregs->flags)
		return -EINVAL;

3614 3615 3616 3617 3618
	if (dbgregs->dr6 & ~0xffffffffull)
		return -EINVAL;
	if (dbgregs->dr7 & ~0xffffffffull)
		return -EINVAL;

3619
	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
3620
	kvm_update_dr0123(vcpu);
3621
	vcpu->arch.dr6 = dbgregs->dr6;
J
Jan Kiszka 已提交
3622
	kvm_update_dr6(vcpu);
3623
	vcpu->arch.dr7 = dbgregs->dr7;
3624
	kvm_update_dr7(vcpu);
3625 3626 3627 3628

	return 0;
}

3629 3630 3631 3632
#define XSTATE_COMPACTION_ENABLED (1ULL << 63)

static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
{
3633
	struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
3634
	u64 xstate_bv = xsave->header.xfeatures;
3635 3636 3637 3638 3639 3640 3641 3642 3643
	u64 valid;

	/*
	 * Copy legacy XSAVE area, to avoid complications with CPUID
	 * leaves 0 and 1 in the loop below.
	 */
	memcpy(dest, xsave, XSAVE_HDR_OFFSET);

	/* Set XSTATE_BV */
3644
	xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
3645 3646 3647 3648 3649 3650
	*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;

	/*
	 * Copy each region from the possibly compacted offset to the
	 * non-compacted offset.
	 */
D
Dave Hansen 已提交
3651
	valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
3652 3653 3654 3655 3656 3657 3658 3659 3660
	while (valid) {
		u64 feature = valid & -valid;
		int index = fls64(feature) - 1;
		void *src = get_xsave_addr(xsave, feature);

		if (src) {
			u32 size, offset, ecx, edx;
			cpuid_count(XSTATE_CPUID, index,
				    &size, &offset, &ecx, &edx);
3661 3662 3663 3664 3665 3666
			if (feature == XFEATURE_MASK_PKRU)
				memcpy(dest + offset, &vcpu->arch.pkru,
				       sizeof(vcpu->arch.pkru));
			else
				memcpy(dest + offset, src, size);

3667 3668 3669 3670 3671 3672 3673 3674
		}

		valid -= feature;
	}
}

static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
{
3675
	struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
3676 3677 3678 3679 3680 3681 3682 3683 3684 3685
	u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
	u64 valid;

	/*
	 * Copy legacy XSAVE area, to avoid complications with CPUID
	 * leaves 0 and 1 in the loop below.
	 */
	memcpy(xsave, src, XSAVE_HDR_OFFSET);

	/* Set XSTATE_BV and possibly XCOMP_BV.  */
3686
	xsave->header.xfeatures = xstate_bv;
3687
	if (boot_cpu_has(X86_FEATURE_XSAVES))
3688
		xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
3689 3690 3691 3692 3693

	/*
	 * Copy each region from the non-compacted offset to the
	 * possibly compacted offset.
	 */
D
Dave Hansen 已提交
3694
	valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
3695 3696 3697 3698 3699 3700 3701 3702 3703
	while (valid) {
		u64 feature = valid & -valid;
		int index = fls64(feature) - 1;
		void *dest = get_xsave_addr(xsave, feature);

		if (dest) {
			u32 size, offset, ecx, edx;
			cpuid_count(XSTATE_CPUID, index,
				    &size, &offset, &ecx, &edx);
3704 3705 3706 3707 3708
			if (feature == XFEATURE_MASK_PKRU)
				memcpy(&vcpu->arch.pkru, src + offset,
				       sizeof(vcpu->arch.pkru));
			else
				memcpy(dest, src + offset, size);
3709
		}
3710 3711 3712 3713 3714

		valid -= feature;
	}
}

3715 3716 3717
static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
					 struct kvm_xsave *guest_xsave)
{
3718
	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
3719 3720
		memset(guest_xsave, 0, sizeof(struct kvm_xsave));
		fill_xsave((u8 *) guest_xsave->region, vcpu);
3721
	} else {
3722
		memcpy(guest_xsave->region,
3723
			&vcpu->arch.guest_fpu.state.fxsave,
3724
			sizeof(struct fxregs_state));
3725
		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
D
Dave Hansen 已提交
3726
			XFEATURE_MASK_FPSSE;
3727 3728 3729
	}
}

3730 3731
#define XSAVE_MXCSR_OFFSET 24

3732 3733 3734 3735 3736
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
					struct kvm_xsave *guest_xsave)
{
	u64 xstate_bv =
		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
3737
	u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
3738

3739
	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
3740 3741 3742 3743 3744
		/*
		 * Here we allow setting states that are not present in
		 * CPUID leaf 0xD, index 0, EDX:EAX.  This is for compatibility
		 * with old userspace.
		 */
3745 3746
		if (xstate_bv & ~kvm_supported_xcr0() ||
			mxcsr & ~mxcsr_feature_mask)
3747
			return -EINVAL;
3748
		load_xsave(vcpu, (u8 *)guest_xsave->region);
3749
	} else {
3750 3751
		if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
			mxcsr & ~mxcsr_feature_mask)
3752
			return -EINVAL;
3753
		memcpy(&vcpu->arch.guest_fpu.state.fxsave,
3754
			guest_xsave->region, sizeof(struct fxregs_state));
3755 3756 3757 3758 3759 3760 3761
	}
	return 0;
}

static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
					struct kvm_xcrs *guest_xcrs)
{
3762
	if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777
		guest_xcrs->nr_xcrs = 0;
		return;
	}

	guest_xcrs->nr_xcrs = 1;
	guest_xcrs->flags = 0;
	guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
	guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
}

static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
				       struct kvm_xcrs *guest_xcrs)
{
	int i, r = 0;

3778
	if (!boot_cpu_has(X86_FEATURE_XSAVE))
3779 3780 3781 3782 3783 3784 3785
		return -EINVAL;

	if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
		return -EINVAL;

	for (i = 0; i < guest_xcrs->nr_xcrs; i++)
		/* Only support XCR0 currently */
P
Paolo Bonzini 已提交
3786
		if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
3787
			r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
P
Paolo Bonzini 已提交
3788
				guest_xcrs->xcrs[i].value);
3789 3790 3791 3792 3793 3794 3795
			break;
		}
	if (r)
		r = -EINVAL;
	return r;
}

3796 3797 3798 3799 3800 3801 3802 3803
/*
 * kvm_set_guest_paused() indicates to the guest kernel that it has been
 * stopped by the hypervisor.  This function will be called from the host only.
 * EINVAL is returned when the host attempts to set the flag for a guest that
 * does not support pv clocks.
 */
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
3804
	if (!vcpu->arch.pv_time_enabled)
3805
		return -EINVAL;
3806
	vcpu->arch.pvclock_set_guest_stopped_request = true;
3807 3808 3809 3810
	kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
	return 0;
}

3811 3812 3813
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
				     struct kvm_enable_cap *cap)
{
3814 3815 3816 3817
	int r;
	uint16_t vmcs_version;
	void __user *user_ptr;

3818 3819 3820 3821
	if (cap->flags)
		return -EINVAL;

	switch (cap->cap) {
3822 3823 3824
	case KVM_CAP_HYPERV_SYNIC2:
		if (cap->args[0])
			return -EINVAL;
3825
	case KVM_CAP_HYPERV_SYNIC:
3826 3827
		if (!irqchip_in_kernel(vcpu->kvm))
			return -EINVAL;
3828 3829
		return kvm_hv_activate_synic(vcpu, cap->cap ==
					     KVM_CAP_HYPERV_SYNIC2);
3830
	case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
3831 3832
		if (!kvm_x86_ops->nested_enable_evmcs)
			return -ENOTTY;
3833 3834 3835 3836 3837 3838 3839 3840 3841
		r = kvm_x86_ops->nested_enable_evmcs(vcpu, &vmcs_version);
		if (!r) {
			user_ptr = (void __user *)(uintptr_t)cap->args[0];
			if (copy_to_user(user_ptr, &vmcs_version,
					 sizeof(vmcs_version)))
				r = -EFAULT;
		}
		return r;

3842 3843 3844 3845 3846
	default:
		return -EINVAL;
	}
}

3847 3848 3849 3850 3851 3852
long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg)
{
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = (void __user *)arg;
	int r;
3853 3854 3855 3856 3857 3858 3859
	union {
		struct kvm_lapic_state *lapic;
		struct kvm_xsave *xsave;
		struct kvm_xcrs *xcrs;
		void *buffer;
	} u;

3860 3861
	vcpu_load(vcpu);

3862
	u.buffer = NULL;
3863 3864
	switch (ioctl) {
	case KVM_GET_LAPIC: {
3865
		r = -EINVAL;
3866
		if (!lapic_in_kernel(vcpu))
3867
			goto out;
3868
		u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
3869

3870
		r = -ENOMEM;
3871
		if (!u.lapic)
3872
			goto out;
3873
		r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
3874 3875 3876
		if (r)
			goto out;
		r = -EFAULT;
3877
		if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
3878 3879 3880 3881 3882
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_LAPIC: {
3883
		r = -EINVAL;
3884
		if (!lapic_in_kernel(vcpu))
3885
			goto out;
3886
		u.lapic = memdup_user(argp, sizeof(*u.lapic));
3887 3888 3889 3890
		if (IS_ERR(u.lapic)) {
			r = PTR_ERR(u.lapic);
			goto out_nofree;
		}
3891

3892
		r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
3893 3894
		break;
	}
3895 3896 3897 3898
	case KVM_INTERRUPT: {
		struct kvm_interrupt irq;

		r = -EFAULT;
3899
		if (copy_from_user(&irq, argp, sizeof(irq)))
3900 3901 3902 3903
			goto out;
		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
		break;
	}
3904 3905 3906 3907
	case KVM_NMI: {
		r = kvm_vcpu_ioctl_nmi(vcpu);
		break;
	}
3908 3909 3910 3911
	case KVM_SMI: {
		r = kvm_vcpu_ioctl_smi(vcpu);
		break;
	}
3912 3913 3914 3915 3916
	case KVM_SET_CPUID: {
		struct kvm_cpuid __user *cpuid_arg = argp;
		struct kvm_cpuid cpuid;

		r = -EFAULT;
3917
		if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
3918 3919 3920 3921
			goto out;
		r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
		break;
	}
3922 3923 3924 3925 3926
	case KVM_SET_CPUID2: {
		struct kvm_cpuid2 __user *cpuid_arg = argp;
		struct kvm_cpuid2 cpuid;

		r = -EFAULT;
3927
		if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
3928 3929
			goto out;
		r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
3930
					      cpuid_arg->entries);
3931 3932 3933 3934 3935 3936 3937
		break;
	}
	case KVM_GET_CPUID2: {
		struct kvm_cpuid2 __user *cpuid_arg = argp;
		struct kvm_cpuid2 cpuid;

		r = -EFAULT;
3938
		if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
3939 3940
			goto out;
		r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
3941
					      cpuid_arg->entries);
3942 3943 3944
		if (r)
			goto out;
		r = -EFAULT;
3945
		if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
3946 3947 3948 3949
			goto out;
		r = 0;
		break;
	}
3950 3951
	case KVM_GET_MSRS: {
		int idx = srcu_read_lock(&vcpu->kvm->srcu);
3952
		r = msr_io(vcpu, argp, do_get_msr, 1);
3953
		srcu_read_unlock(&vcpu->kvm->srcu, idx);
3954
		break;
3955 3956 3957
	}
	case KVM_SET_MSRS: {
		int idx = srcu_read_lock(&vcpu->kvm->srcu);
3958
		r = msr_io(vcpu, argp, do_set_msr, 0);
3959
		srcu_read_unlock(&vcpu->kvm->srcu, idx);
3960
		break;
3961
	}
3962 3963 3964 3965
	case KVM_TPR_ACCESS_REPORTING: {
		struct kvm_tpr_access_ctl tac;

		r = -EFAULT;
3966
		if (copy_from_user(&tac, argp, sizeof(tac)))
3967 3968 3969 3970 3971
			goto out;
		r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
		if (r)
			goto out;
		r = -EFAULT;
3972
		if (copy_to_user(argp, &tac, sizeof(tac)))
3973 3974 3975 3976
			goto out;
		r = 0;
		break;
	};
A
Avi Kivity 已提交
3977 3978
	case KVM_SET_VAPIC_ADDR: {
		struct kvm_vapic_addr va;
3979
		int idx;
A
Avi Kivity 已提交
3980 3981

		r = -EINVAL;
3982
		if (!lapic_in_kernel(vcpu))
A
Avi Kivity 已提交
3983 3984
			goto out;
		r = -EFAULT;
3985
		if (copy_from_user(&va, argp, sizeof(va)))
A
Avi Kivity 已提交
3986
			goto out;
3987
		idx = srcu_read_lock(&vcpu->kvm->srcu);
3988
		r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
3989
		srcu_read_unlock(&vcpu->kvm->srcu, idx);
A
Avi Kivity 已提交
3990 3991
		break;
	}
H
Huang Ying 已提交
3992 3993 3994 3995
	case KVM_X86_SETUP_MCE: {
		u64 mcg_cap;

		r = -EFAULT;
3996
		if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap)))
H
Huang Ying 已提交
3997 3998 3999 4000 4001 4002 4003 4004
			goto out;
		r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
		break;
	}
	case KVM_X86_SET_MCE: {
		struct kvm_x86_mce mce;

		r = -EFAULT;
4005
		if (copy_from_user(&mce, argp, sizeof(mce)))
H
Huang Ying 已提交
4006 4007 4008 4009
			goto out;
		r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
		break;
	}
J
Jan Kiszka 已提交
4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030
	case KVM_GET_VCPU_EVENTS: {
		struct kvm_vcpu_events events;

		kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);

		r = -EFAULT;
		if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
			break;
		r = 0;
		break;
	}
	case KVM_SET_VCPU_EVENTS: {
		struct kvm_vcpu_events events;

		r = -EFAULT;
		if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
			break;

		r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
		break;
	}
4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053
	case KVM_GET_DEBUGREGS: {
		struct kvm_debugregs dbgregs;

		kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);

		r = -EFAULT;
		if (copy_to_user(argp, &dbgregs,
				 sizeof(struct kvm_debugregs)))
			break;
		r = 0;
		break;
	}
	case KVM_SET_DEBUGREGS: {
		struct kvm_debugregs dbgregs;

		r = -EFAULT;
		if (copy_from_user(&dbgregs, argp,
				   sizeof(struct kvm_debugregs)))
			break;

		r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
		break;
	}
4054
	case KVM_GET_XSAVE: {
4055
		u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
4056
		r = -ENOMEM;
4057
		if (!u.xsave)
4058 4059
			break;

4060
		kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
4061 4062

		r = -EFAULT;
4063
		if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
4064 4065 4066 4067 4068
			break;
		r = 0;
		break;
	}
	case KVM_SET_XSAVE: {
4069
		u.xsave = memdup_user(argp, sizeof(*u.xsave));
4070 4071 4072 4073
		if (IS_ERR(u.xsave)) {
			r = PTR_ERR(u.xsave);
			goto out_nofree;
		}
4074

4075
		r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
4076 4077 4078
		break;
	}
	case KVM_GET_XCRS: {
4079
		u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
4080
		r = -ENOMEM;
4081
		if (!u.xcrs)
4082 4083
			break;

4084
		kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
4085 4086

		r = -EFAULT;
4087
		if (copy_to_user(argp, u.xcrs,
4088 4089 4090 4091 4092 4093
				 sizeof(struct kvm_xcrs)))
			break;
		r = 0;
		break;
	}
	case KVM_SET_XCRS: {
4094
		u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
4095 4096 4097 4098
		if (IS_ERR(u.xcrs)) {
			r = PTR_ERR(u.xcrs);
			goto out_nofree;
		}
4099

4100
		r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
4101 4102
		break;
	}
4103 4104 4105 4106 4107 4108 4109 4110 4111
	case KVM_SET_TSC_KHZ: {
		u32 user_tsc_khz;

		r = -EINVAL;
		user_tsc_khz = (u32)arg;

		if (user_tsc_khz >= kvm_max_guest_tsc_khz)
			goto out;

4112 4113 4114
		if (user_tsc_khz == 0)
			user_tsc_khz = tsc_khz;

4115 4116
		if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
			r = 0;
4117 4118 4119 4120

		goto out;
	}
	case KVM_GET_TSC_KHZ: {
4121
		r = vcpu->arch.virtual_tsc_khz;
4122 4123
		goto out;
	}
4124 4125 4126 4127
	case KVM_KVMCLOCK_CTRL: {
		r = kvm_set_guest_paused(vcpu);
		goto out;
	}
4128 4129 4130 4131 4132 4133 4134 4135 4136
	case KVM_ENABLE_CAP: {
		struct kvm_enable_cap cap;

		r = -EFAULT;
		if (copy_from_user(&cap, argp, sizeof(cap)))
			goto out;
		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
		break;
	}
4137 4138 4139 4140 4141 4142 4143 4144 4145
	case KVM_GET_NESTED_STATE: {
		struct kvm_nested_state __user *user_kvm_nested_state = argp;
		u32 user_data_size;

		r = -EINVAL;
		if (!kvm_x86_ops->get_nested_state)
			break;

		BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
4146
		r = -EFAULT;
4147
		if (get_user(user_data_size, &user_kvm_nested_state->size))
4148
			break;
4149 4150 4151 4152

		r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
						  user_data_size);
		if (r < 0)
4153
			break;
4154 4155 4156

		if (r > user_data_size) {
			if (put_user(r, &user_kvm_nested_state->size))
4157 4158 4159 4160
				r = -EFAULT;
			else
				r = -E2BIG;
			break;
4161
		}
4162

4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173
		r = 0;
		break;
	}
	case KVM_SET_NESTED_STATE: {
		struct kvm_nested_state __user *user_kvm_nested_state = argp;
		struct kvm_nested_state kvm_state;

		r = -EINVAL;
		if (!kvm_x86_ops->set_nested_state)
			break;

4174
		r = -EFAULT;
4175
		if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
4176
			break;
4177

4178
		r = -EINVAL;
4179
		if (kvm_state.size < sizeof(kvm_state))
4180
			break;
4181 4182

		if (kvm_state.flags &
4183 4184
		    ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE
		      | KVM_STATE_NESTED_EVMCS))
4185
			break;
4186 4187

		/* nested_run_pending implies guest_mode.  */
4188 4189
		if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING)
		    && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE))
4190
			break;
4191 4192 4193 4194

		r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
		break;
	}
4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213
	case KVM_GET_SUPPORTED_HV_CPUID: {
		struct kvm_cpuid2 __user *cpuid_arg = argp;
		struct kvm_cpuid2 cpuid;

		r = -EFAULT;
		if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
			goto out;

		r = kvm_vcpu_ioctl_get_hv_cpuid(vcpu, &cpuid,
						cpuid_arg->entries);
		if (r)
			goto out;

		r = -EFAULT;
		if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
			goto out;
		r = 0;
		break;
	}
4214 4215 4216 4217
	default:
		r = -EINVAL;
	}
out:
4218
	kfree(u.buffer);
4219 4220
out_nofree:
	vcpu_put(vcpu);
4221 4222 4223
	return r;
}

4224
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
4225 4226 4227 4228
{
	return VM_FAULT_SIGBUS;
}

4229 4230 4231 4232 4233
static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
{
	int ret;

	if (addr > (unsigned int)(-3 * PAGE_SIZE))
4234
		return -EINVAL;
4235 4236 4237 4238
	ret = kvm_x86_ops->set_tss_addr(kvm, addr);
	return ret;
}

4239 4240 4241
static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
					      u64 ident_addr)
{
4242
	return kvm_x86_ops->set_identity_map_addr(kvm, ident_addr);
4243 4244
}

4245 4246 4247 4248 4249 4250
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
					  u32 kvm_nr_mmu_pages)
{
	if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
		return -EINVAL;

4251
	mutex_lock(&kvm->slots_lock);
4252 4253

	kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
4254
	kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
4255

4256
	mutex_unlock(&kvm->slots_lock);
4257 4258 4259 4260 4261
	return 0;
}

static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
{
4262
	return kvm->arch.n_max_mmu_pages;
4263 4264 4265 4266
}

static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
{
4267
	struct kvm_pic *pic = kvm->arch.vpic;
4268 4269 4270 4271 4272
	int r;

	r = 0;
	switch (chip->chip_id) {
	case KVM_IRQCHIP_PIC_MASTER:
4273
		memcpy(&chip->chip.pic, &pic->pics[0],
4274 4275 4276
			sizeof(struct kvm_pic_state));
		break;
	case KVM_IRQCHIP_PIC_SLAVE:
4277
		memcpy(&chip->chip.pic, &pic->pics[1],
4278 4279 4280
			sizeof(struct kvm_pic_state));
		break;
	case KVM_IRQCHIP_IOAPIC:
4281
		kvm_get_ioapic(kvm, &chip->chip.ioapic);
4282 4283 4284 4285 4286 4287 4288 4289 4290 4291
		break;
	default:
		r = -EINVAL;
		break;
	}
	return r;
}

static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
{
4292
	struct kvm_pic *pic = kvm->arch.vpic;
4293 4294 4295 4296 4297
	int r;

	r = 0;
	switch (chip->chip_id) {
	case KVM_IRQCHIP_PIC_MASTER:
4298 4299
		spin_lock(&pic->lock);
		memcpy(&pic->pics[0], &chip->chip.pic,
4300
			sizeof(struct kvm_pic_state));
4301
		spin_unlock(&pic->lock);
4302 4303
		break;
	case KVM_IRQCHIP_PIC_SLAVE:
4304 4305
		spin_lock(&pic->lock);
		memcpy(&pic->pics[1], &chip->chip.pic,
4306
			sizeof(struct kvm_pic_state));
4307
		spin_unlock(&pic->lock);
4308 4309
		break;
	case KVM_IRQCHIP_IOAPIC:
4310
		kvm_set_ioapic(kvm, &chip->chip.ioapic);
4311 4312 4313 4314 4315
		break;
	default:
		r = -EINVAL;
		break;
	}
4316
	kvm_pic_update_irq(pic);
4317 4318 4319
	return r;
}

4320 4321
static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
4322 4323 4324 4325 4326 4327 4328
	struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;

	BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels));

	mutex_lock(&kps->lock);
	memcpy(ps, &kps->channels, sizeof(*ps));
	mutex_unlock(&kps->lock);
4329
	return 0;
4330 4331 4332 4333
}

static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
4334
	int i;
4335 4336 4337
	struct kvm_pit *pit = kvm->arch.vpit;

	mutex_lock(&pit->pit_state.lock);
4338
	memcpy(&pit->pit_state.channels, ps, sizeof(*ps));
4339
	for (i = 0; i < 3; i++)
4340 4341
		kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
	mutex_unlock(&pit->pit_state.lock);
4342
	return 0;
B
Beth Kon 已提交
4343 4344 4345 4346 4347 4348 4349 4350 4351
}

static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{
	mutex_lock(&kvm->arch.vpit->pit_state.lock);
	memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
		sizeof(ps->channels));
	ps->flags = kvm->arch.vpit->pit_state.flags;
	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
4352
	memset(&ps->reserved, 0, sizeof(ps->reserved));
4353
	return 0;
B
Beth Kon 已提交
4354 4355 4356 4357
}

static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{
4358
	int start = 0;
4359
	int i;
B
Beth Kon 已提交
4360
	u32 prev_legacy, cur_legacy;
4361 4362 4363 4364
	struct kvm_pit *pit = kvm->arch.vpit;

	mutex_lock(&pit->pit_state.lock);
	prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
B
Beth Kon 已提交
4365 4366 4367
	cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
	if (!prev_legacy && cur_legacy)
		start = 1;
4368 4369 4370
	memcpy(&pit->pit_state.channels, &ps->channels,
	       sizeof(pit->pit_state.channels));
	pit->pit_state.flags = ps->flags;
4371
	for (i = 0; i < 3; i++)
4372
		kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count,
4373
				   start && i == 0);
4374
	mutex_unlock(&pit->pit_state.lock);
4375
	return 0;
4376 4377
}

4378 4379 4380
static int kvm_vm_ioctl_reinject(struct kvm *kvm,
				 struct kvm_reinject_control *control)
{
4381 4382 4383
	struct kvm_pit *pit = kvm->arch.vpit;

	if (!pit)
4384
		return -ENXIO;
4385

4386 4387 4388 4389 4390 4391 4392
	/* pit->pit_state.lock was overloaded to prevent userspace from getting
	 * an inconsistent state after running multiple KVM_REINJECT_CONTROL
	 * ioctls in parallel.  Use a separate lock if that ioctl isn't rare.
	 */
	mutex_lock(&pit->pit_state.lock);
	kvm_pit_set_reinject(pit, control->pit_reinject);
	mutex_unlock(&pit->pit_state.lock);
4393

4394 4395 4396
	return 0;
}

4397
/**
4398 4399 4400
 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
 * @kvm: kvm instance
 * @log: slot id and address to which we copy the log
4401
 *
4402 4403 4404 4405 4406 4407 4408 4409
 * Steps 1-4 below provide general overview of dirty page logging. See
 * kvm_get_dirty_log_protect() function description for additional details.
 *
 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
 * always flush the TLB (step 4) even if previous step failed  and the dirty
 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
 * writes will be marked dirty for next log read.
4410
 *
4411 4412
 *   1. Take a snapshot of the bit and clear it if needed.
 *   2. Write protect the corresponding page.
4413 4414
 *   3. Copy the snapshot to the userspace.
 *   4. Flush TLB's if needed.
4415
 */
4416
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
4417
{
4418
	bool flush = false;
4419
	int r;
4420

4421
	mutex_lock(&kvm->slots_lock);
4422

4423 4424 4425 4426 4427 4428
	/*
	 * Flush potentially hardware-cached dirty pages to dirty_bitmap.
	 */
	if (kvm_x86_ops->flush_log_dirty)
		kvm_x86_ops->flush_log_dirty(kvm);

4429
	r = kvm_get_dirty_log_protect(kvm, log, &flush);
4430 4431 4432 4433 4434

	/*
	 * All the TLBs can be flushed out of mmu lock, see the comments in
	 * kvm_mmu_slot_remove_write_access().
	 */
4435
	lockdep_assert_held(&kvm->slots_lock);
4436
	if (flush)
4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463
		kvm_flush_remote_tlbs(kvm);

	mutex_unlock(&kvm->slots_lock);
	return r;
}

int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
{
	bool flush = false;
	int r;

	mutex_lock(&kvm->slots_lock);

	/*
	 * Flush potentially hardware-cached dirty pages to dirty_bitmap.
	 */
	if (kvm_x86_ops->flush_log_dirty)
		kvm_x86_ops->flush_log_dirty(kvm);

	r = kvm_clear_dirty_log_protect(kvm, log, &flush);

	/*
	 * All the TLBs can be flushed out of mmu lock, see the comments in
	 * kvm_mmu_slot_remove_write_access().
	 */
	lockdep_assert_held(&kvm->slots_lock);
	if (flush)
4464 4465
		kvm_flush_remote_tlbs(kvm);

4466
	mutex_unlock(&kvm->slots_lock);
4467 4468 4469
	return r;
}

4470 4471
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
			bool line_status)
4472 4473 4474 4475 4476
{
	if (!irqchip_in_kernel(kvm))
		return -ENXIO;

	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
4477 4478
					irq_event->irq, irq_event->level,
					line_status);
4479 4480 4481
	return 0;
}

4482 4483
int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
			    struct kvm_enable_cap *cap)
4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494
{
	int r;

	if (cap->flags)
		return -EINVAL;

	switch (cap->cap) {
	case KVM_CAP_DISABLE_QUIRKS:
		kvm->arch.disabled_quirks = cap->args[0];
		r = 0;
		break;
4495 4496
	case KVM_CAP_SPLIT_IRQCHIP: {
		mutex_lock(&kvm->lock);
4497 4498 4499
		r = -EINVAL;
		if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS)
			goto split_irqchip_unlock;
4500 4501 4502
		r = -EEXIST;
		if (irqchip_in_kernel(kvm))
			goto split_irqchip_unlock;
P
Paolo Bonzini 已提交
4503
		if (kvm->created_vcpus)
4504 4505
			goto split_irqchip_unlock;
		r = kvm_setup_empty_irq_routing(kvm);
4506
		if (r)
4507 4508 4509
			goto split_irqchip_unlock;
		/* Pairs with irqchip_in_kernel. */
		smp_wmb();
4510
		kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
4511
		kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
4512 4513 4514 4515 4516
		r = 0;
split_irqchip_unlock:
		mutex_unlock(&kvm->lock);
		break;
	}
4517 4518 4519 4520 4521 4522 4523
	case KVM_CAP_X2APIC_API:
		r = -EINVAL;
		if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS)
			break;

		if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS)
			kvm->arch.x2apic_format = true;
4524 4525
		if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
			kvm->arch.x2apic_broadcast_quirk_disabled = true;
4526 4527 4528

		r = 0;
		break;
4529 4530 4531 4532 4533 4534 4535 4536
	case KVM_CAP_X86_DISABLE_EXITS:
		r = -EINVAL;
		if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS)
			break;

		if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) &&
			kvm_can_mwait_in_guest())
			kvm->arch.mwait_in_guest = true;
M
Michael S. Tsirkin 已提交
4537
		if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
4538
			kvm->arch.hlt_in_guest = true;
4539 4540
		if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
			kvm->arch.pause_in_guest = true;
4541 4542
		r = 0;
		break;
4543 4544 4545
	case KVM_CAP_MSR_PLATFORM_INFO:
		kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
		r = 0;
4546 4547 4548 4549
		break;
	case KVM_CAP_EXCEPTION_PAYLOAD:
		kvm->arch.exception_payload_enabled = cap->args[0];
		r = 0;
4550
		break;
4551 4552 4553 4554 4555 4556 4557
	default:
		r = -EINVAL;
		break;
	}
	return r;
}

4558 4559 4560 4561 4562
long kvm_arch_vm_ioctl(struct file *filp,
		       unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
4563
	int r = -ENOTTY;
4564 4565 4566 4567 4568 4569 4570
	/*
	 * This union makes it completely explicit to gcc-3.x
	 * that these two variables' stack usage should be
	 * combined, not added together.
	 */
	union {
		struct kvm_pit_state ps;
B
Beth Kon 已提交
4571
		struct kvm_pit_state2 ps2;
4572
		struct kvm_pit_config pit_config;
4573
	} u;
4574 4575 4576 4577 4578

	switch (ioctl) {
	case KVM_SET_TSS_ADDR:
		r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
		break;
4579 4580 4581
	case KVM_SET_IDENTITY_MAP_ADDR: {
		u64 ident_addr;

4582 4583 4584 4585
		mutex_lock(&kvm->lock);
		r = -EINVAL;
		if (kvm->created_vcpus)
			goto set_identity_unlock;
4586
		r = -EFAULT;
4587
		if (copy_from_user(&ident_addr, argp, sizeof(ident_addr)))
4588
			goto set_identity_unlock;
4589
		r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
4590 4591
set_identity_unlock:
		mutex_unlock(&kvm->lock);
4592 4593
		break;
	}
4594 4595 4596 4597 4598 4599
	case KVM_SET_NR_MMU_PAGES:
		r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
		break;
	case KVM_GET_NR_MMU_PAGES:
		r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
		break;
4600 4601
	case KVM_CREATE_IRQCHIP: {
		mutex_lock(&kvm->lock);
4602

4603
		r = -EEXIST;
4604
		if (irqchip_in_kernel(kvm))
4605
			goto create_irqchip_unlock;
4606

4607
		r = -EINVAL;
P
Paolo Bonzini 已提交
4608
		if (kvm->created_vcpus)
4609
			goto create_irqchip_unlock;
4610 4611 4612

		r = kvm_pic_init(kvm);
		if (r)
4613
			goto create_irqchip_unlock;
4614 4615 4616 4617

		r = kvm_ioapic_init(kvm);
		if (r) {
			kvm_pic_destroy(kvm);
4618
			goto create_irqchip_unlock;
4619 4620
		}

4621 4622
		r = kvm_setup_default_irq_routing(kvm);
		if (r) {
4623
			kvm_ioapic_destroy(kvm);
4624
			kvm_pic_destroy(kvm);
4625
			goto create_irqchip_unlock;
4626
		}
4627
		/* Write kvm->irq_routing before enabling irqchip_in_kernel. */
4628
		smp_wmb();
4629
		kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
4630 4631
	create_irqchip_unlock:
		mutex_unlock(&kvm->lock);
4632
		break;
4633
	}
S
Sheng Yang 已提交
4634
	case KVM_CREATE_PIT:
4635 4636 4637 4638 4639 4640 4641 4642
		u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
		goto create_pit;
	case KVM_CREATE_PIT2:
		r = -EFAULT;
		if (copy_from_user(&u.pit_config, argp,
				   sizeof(struct kvm_pit_config)))
			goto out;
	create_pit:
4643
		mutex_lock(&kvm->lock);
A
Avi Kivity 已提交
4644 4645 4646
		r = -EEXIST;
		if (kvm->arch.vpit)
			goto create_pit_unlock;
S
Sheng Yang 已提交
4647
		r = -ENOMEM;
4648
		kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
S
Sheng Yang 已提交
4649 4650
		if (kvm->arch.vpit)
			r = 0;
A
Avi Kivity 已提交
4651
	create_pit_unlock:
4652
		mutex_unlock(&kvm->lock);
S
Sheng Yang 已提交
4653
		break;
4654 4655
	case KVM_GET_IRQCHIP: {
		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
4656
		struct kvm_irqchip *chip;
4657

4658 4659 4660
		chip = memdup_user(argp, sizeof(*chip));
		if (IS_ERR(chip)) {
			r = PTR_ERR(chip);
4661
			goto out;
4662 4663
		}

4664
		r = -ENXIO;
4665
		if (!irqchip_kernel(kvm))
4666 4667
			goto get_irqchip_out;
		r = kvm_vm_ioctl_get_irqchip(kvm, chip);
4668
		if (r)
4669
			goto get_irqchip_out;
4670
		r = -EFAULT;
4671
		if (copy_to_user(argp, chip, sizeof(*chip)))
4672
			goto get_irqchip_out;
4673
		r = 0;
4674 4675
	get_irqchip_out:
		kfree(chip);
4676 4677 4678 4679
		break;
	}
	case KVM_SET_IRQCHIP: {
		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
4680
		struct kvm_irqchip *chip;
4681

4682 4683 4684
		chip = memdup_user(argp, sizeof(*chip));
		if (IS_ERR(chip)) {
			r = PTR_ERR(chip);
4685
			goto out;
4686 4687
		}

4688
		r = -ENXIO;
4689
		if (!irqchip_kernel(kvm))
4690 4691
			goto set_irqchip_out;
		r = kvm_vm_ioctl_set_irqchip(kvm, chip);
4692
		if (r)
4693
			goto set_irqchip_out;
4694
		r = 0;
4695 4696
	set_irqchip_out:
		kfree(chip);
4697 4698
		break;
	}
4699 4700
	case KVM_GET_PIT: {
		r = -EFAULT;
4701
		if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
4702 4703 4704 4705
			goto out;
		r = -ENXIO;
		if (!kvm->arch.vpit)
			goto out;
4706
		r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
4707 4708 4709
		if (r)
			goto out;
		r = -EFAULT;
4710
		if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
4711 4712 4713 4714 4715 4716
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_PIT: {
		r = -EFAULT;
4717
		if (copy_from_user(&u.ps, argp, sizeof(u.ps)))
4718 4719 4720 4721
			goto out;
		r = -ENXIO;
		if (!kvm->arch.vpit)
			goto out;
4722
		r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
4723 4724
		break;
	}
B
Beth Kon 已提交
4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747
	case KVM_GET_PIT2: {
		r = -ENXIO;
		if (!kvm->arch.vpit)
			goto out;
		r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
		if (r)
			goto out;
		r = -EFAULT;
		if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_PIT2: {
		r = -EFAULT;
		if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
			goto out;
		r = -ENXIO;
		if (!kvm->arch.vpit)
			goto out;
		r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
		break;
	}
4748 4749 4750 4751 4752 4753 4754 4755
	case KVM_REINJECT_CONTROL: {
		struct kvm_reinject_control control;
		r =  -EFAULT;
		if (copy_from_user(&control, argp, sizeof(control)))
			goto out;
		r = kvm_vm_ioctl_reinject(kvm, &control);
		break;
	}
4756 4757 4758
	case KVM_SET_BOOT_CPU_ID:
		r = 0;
		mutex_lock(&kvm->lock);
P
Paolo Bonzini 已提交
4759
		if (kvm->created_vcpus)
4760 4761 4762 4763 4764
			r = -EBUSY;
		else
			kvm->arch.bsp_vcpu_id = arg;
		mutex_unlock(&kvm->lock);
		break;
E
Ed Swierk 已提交
4765
	case KVM_XEN_HVM_CONFIG: {
4766
		struct kvm_xen_hvm_config xhc;
E
Ed Swierk 已提交
4767
		r = -EFAULT;
4768
		if (copy_from_user(&xhc, argp, sizeof(xhc)))
E
Ed Swierk 已提交
4769 4770
			goto out;
		r = -EINVAL;
4771
		if (xhc.flags)
E
Ed Swierk 已提交
4772
			goto out;
4773
		memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
E
Ed Swierk 已提交
4774 4775 4776
		r = 0;
		break;
	}
4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789
	case KVM_SET_CLOCK: {
		struct kvm_clock_data user_ns;
		u64 now_ns;

		r = -EFAULT;
		if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
			goto out;

		r = -EINVAL;
		if (user_ns.flags)
			goto out;

		r = 0;
4790 4791 4792 4793 4794 4795
		/*
		 * TODO: userspace has to take care of races with VCPU_RUN, so
		 * kvm_gen_update_masterclock() can be cut down to locked
		 * pvclock_update_vm_gtod_copy().
		 */
		kvm_gen_update_masterclock(kvm);
4796
		now_ns = get_kvmclock_ns(kvm);
4797
		kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
4798
		kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
4799 4800 4801 4802 4803 4804
		break;
	}
	case KVM_GET_CLOCK: {
		struct kvm_clock_data user_ns;
		u64 now_ns;

4805
		now_ns = get_kvmclock_ns(kvm);
4806
		user_ns.clock = now_ns;
4807
		user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0;
4808
		memset(&user_ns.pad, 0, sizeof(user_ns.pad));
4809 4810 4811 4812 4813 4814 4815

		r = -EFAULT;
		if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
			goto out;
		r = 0;
		break;
	}
4816 4817 4818 4819 4820 4821
	case KVM_MEMORY_ENCRYPT_OP: {
		r = -ENOTTY;
		if (kvm_x86_ops->mem_enc_op)
			r = kvm_x86_ops->mem_enc_op(kvm, argp);
		break;
	}
4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845
	case KVM_MEMORY_ENCRYPT_REG_REGION: {
		struct kvm_enc_region region;

		r = -EFAULT;
		if (copy_from_user(&region, argp, sizeof(region)))
			goto out;

		r = -ENOTTY;
		if (kvm_x86_ops->mem_enc_reg_region)
			r = kvm_x86_ops->mem_enc_reg_region(kvm, &region);
		break;
	}
	case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
		struct kvm_enc_region region;

		r = -EFAULT;
		if (copy_from_user(&region, argp, sizeof(region)))
			goto out;

		r = -ENOTTY;
		if (kvm_x86_ops->mem_enc_unreg_region)
			r = kvm_x86_ops->mem_enc_unreg_region(kvm, &region);
		break;
	}
4846 4847 4848 4849 4850 4851 4852 4853 4854
	case KVM_HYPERV_EVENTFD: {
		struct kvm_hyperv_eventfd hvevfd;

		r = -EFAULT;
		if (copy_from_user(&hvevfd, argp, sizeof(hvevfd)))
			goto out;
		r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd);
		break;
	}
4855
	default:
4856
		r = -ENOTTY;
4857 4858 4859 4860 4861
	}
out:
	return r;
}

4862
static void kvm_init_msr_list(void)
4863 4864 4865 4866
{
	u32 dummy[2];
	unsigned i, j;

4867
	for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
4868 4869
		if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
			continue;
4870 4871 4872

		/*
		 * Even MSRs that are valid in the host may not be exposed
4873
		 * to the guests in some cases.
4874 4875 4876
		 */
		switch (msrs_to_save[i]) {
		case MSR_IA32_BNDCFGS:
4877
			if (!kvm_mpx_supported())
4878 4879
				continue;
			break;
4880 4881 4882 4883
		case MSR_TSC_AUX:
			if (!kvm_x86_ops->rdtscp_supported())
				continue;
			break;
4884 4885 4886 4887
		default:
			break;
		}

4888 4889 4890 4891 4892
		if (j < i)
			msrs_to_save[j] = msrs_to_save[i];
		j++;
	}
	num_msrs_to_save = j;
4893 4894

	for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
4895 4896
		if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
			continue;
4897 4898 4899 4900 4901 4902

		if (j < i)
			emulated_msrs[j] = emulated_msrs[i];
		j++;
	}
	num_emulated_msrs = j;
4903 4904 4905 4906 4907

	for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) {
		struct kvm_msr_entry msr;

		msr.index = msr_based_features[i];
4908
		if (kvm_get_msr_feature(&msr))
4909 4910 4911 4912 4913 4914 4915
			continue;

		if (j < i)
			msr_based_features[j] = msr_based_features[i];
		j++;
	}
	num_msr_based_features = j;
4916 4917
}

4918 4919
static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
			   const void *v)
4920
{
4921 4922 4923 4924 4925
	int handled = 0;
	int n;

	do {
		n = min(len, 8);
4926
		if (!(lapic_in_kernel(vcpu) &&
4927 4928
		      !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
		    && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
4929 4930 4931 4932 4933 4934
			break;
		handled += n;
		addr += n;
		len -= n;
		v += n;
	} while (len);
4935

4936
	return handled;
4937 4938
}

4939
static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
4940
{
4941 4942 4943 4944 4945
	int handled = 0;
	int n;

	do {
		n = min(len, 8);
4946
		if (!(lapic_in_kernel(vcpu) &&
4947 4948 4949
		      !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
					 addr, n, v))
		    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
4950
			break;
4951
		trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
4952 4953 4954 4955 4956
		handled += n;
		addr += n;
		len -= n;
		v += n;
	} while (len);
4957

4958
	return handled;
4959 4960
}

4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972
static void kvm_set_segment(struct kvm_vcpu *vcpu,
			struct kvm_segment *var, int seg)
{
	kvm_x86_ops->set_segment(vcpu, var, seg);
}

void kvm_get_segment(struct kvm_vcpu *vcpu,
		     struct kvm_segment *var, int seg)
{
	kvm_x86_ops->get_segment(vcpu, var, seg);
}

4973 4974
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
			   struct x86_exception *exception)
4975 4976 4977 4978 4979 4980 4981
{
	gpa_t t_gpa;

	BUG_ON(!mmu_is_nested(vcpu));

	/* NPT walks are always user-walks */
	access |= PFERR_USER_MASK;
4982
	t_gpa  = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception);
4983 4984 4985 4986

	return t_gpa;
}

4987 4988
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
			      struct x86_exception *exception)
4989 4990
{
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4991
	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4992 4993
}

4994 4995
 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
				struct x86_exception *exception)
4996 4997 4998
{
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
	access |= PFERR_FETCH_MASK;
4999
	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
5000 5001
}

5002 5003
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception)
5004 5005 5006
{
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
	access |= PFERR_WRITE_MASK;
5007
	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
5008 5009 5010
}

/* uses this to access any guest's mapped memory without checking CPL */
5011 5012
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
				struct x86_exception *exception)
5013
{
5014
	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
5015 5016 5017 5018
}

static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
				      struct kvm_vcpu *vcpu, u32 access,
5019
				      struct x86_exception *exception)
5020 5021
{
	void *data = val;
5022
	int r = X86EMUL_CONTINUE;
5023 5024

	while (bytes) {
5025
		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
5026
							    exception);
5027
		unsigned offset = addr & (PAGE_SIZE-1);
5028
		unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
5029 5030
		int ret;

5031
		if (gpa == UNMAPPED_GVA)
5032
			return X86EMUL_PROPAGATE_FAULT;
5033 5034
		ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
					       offset, toread);
5035
		if (ret < 0) {
5036
			r = X86EMUL_IO_NEEDED;
5037 5038
			goto out;
		}
5039

5040 5041 5042
		bytes -= toread;
		data += toread;
		addr += toread;
5043
	}
5044 5045
out:
	return r;
5046
}
5047

5048
/* used for instruction fetching */
5049 5050
static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
				gva_t addr, void *val, unsigned int bytes,
5051
				struct x86_exception *exception)
5052
{
5053
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5054
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
5055 5056
	unsigned offset;
	int ret;
5057

5058 5059 5060 5061 5062 5063 5064 5065 5066
	/* Inline kvm_read_guest_virt_helper for speed.  */
	gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
						    exception);
	if (unlikely(gpa == UNMAPPED_GVA))
		return X86EMUL_PROPAGATE_FAULT;

	offset = addr & (PAGE_SIZE-1);
	if (WARN_ON(offset + bytes > PAGE_SIZE))
		bytes = (unsigned)PAGE_SIZE - offset;
5067 5068
	ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
				       offset, bytes);
5069 5070 5071 5072
	if (unlikely(ret < 0))
		return X86EMUL_IO_NEEDED;

	return X86EMUL_CONTINUE;
5073 5074
}

5075
int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
5076
			       gva_t addr, void *val, unsigned int bytes,
5077
			       struct x86_exception *exception)
5078 5079
{
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
5080

5081
	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
5082
					  exception);
5083
}
5084
EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
5085

5086 5087
static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
			     gva_t addr, void *val, unsigned int bytes,
5088
			     struct x86_exception *exception, bool system)
5089
{
5090
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5091 5092 5093 5094 5095 5096
	u32 access = 0;

	if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
		access |= PFERR_USER_MASK;

	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
5097 5098
}

5099 5100 5101 5102 5103 5104 5105 5106 5107
static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
		unsigned long addr, void *val, unsigned int bytes)
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
	int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes);

	return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
}

5108 5109 5110
static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
				      struct kvm_vcpu *vcpu, u32 access,
				      struct x86_exception *exception)
5111 5112 5113 5114 5115
{
	void *data = val;
	int r = X86EMUL_CONTINUE;

	while (bytes) {
5116
		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
5117
							     access,
5118
							     exception);
5119 5120 5121 5122
		unsigned offset = addr & (PAGE_SIZE-1);
		unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
		int ret;

5123
		if (gpa == UNMAPPED_GVA)
5124
			return X86EMUL_PROPAGATE_FAULT;
5125
		ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
5126
		if (ret < 0) {
5127
			r = X86EMUL_IO_NEEDED;
5128 5129 5130 5131 5132 5133 5134 5135 5136 5137
			goto out;
		}

		bytes -= towrite;
		data += towrite;
		addr += towrite;
	}
out:
	return r;
}
5138 5139

static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
5140 5141
			      unsigned int bytes, struct x86_exception *exception,
			      bool system)
5142 5143
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5144 5145 5146 5147
	u32 access = PFERR_WRITE_MASK;

	if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
		access |= PFERR_USER_MASK;
5148 5149

	return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
5150
					   access, exception);
5151 5152 5153 5154 5155
}

int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
				unsigned int bytes, struct x86_exception *exception)
{
P
Paolo Bonzini 已提交
5156 5157 5158
	/* kvm_write_guest_virt_system can pull in tons of pages. */
	vcpu->arch.l1tf_flush_l1d = true;

5159 5160 5161
	return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
					   PFERR_WRITE_MASK, exception);
}
N
Nadav Har'El 已提交
5162
EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
5163

W
Wanpeng Li 已提交
5164 5165
int handle_ud(struct kvm_vcpu *vcpu)
{
5166
	int emul_type = EMULTYPE_TRAP_UD;
W
Wanpeng Li 已提交
5167
	enum emulation_result er;
5168 5169 5170 5171
	char sig[5]; /* ud2; .ascii "kvm" */
	struct x86_exception e;

	if (force_emulation_prefix &&
5172 5173
	    kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
				sig, sizeof(sig), &e) == 0 &&
5174 5175 5176 5177
	    memcmp(sig, "\xf\xbkvm", sizeof(sig)) == 0) {
		kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
		emul_type = 0;
	}
W
Wanpeng Li 已提交
5178

5179
	er = kvm_emulate_instruction(vcpu, emul_type);
W
Wanpeng Li 已提交
5180 5181 5182 5183 5184 5185 5186 5187
	if (er == EMULATE_USER_EXIT)
		return 0;
	if (er != EMULATE_DONE)
		kvm_queue_exception(vcpu, UD_VECTOR);
	return 1;
}
EXPORT_SYMBOL_GPL(handle_ud);

5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202
static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
			    gpa_t gpa, bool write)
{
	/* For APIC access vmexit */
	if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
		return 1;

	if (vcpu_match_mmio_gpa(vcpu, gpa)) {
		trace_vcpu_match_mmio(gva, gpa, write, true);
		return 1;
	}

	return 0;
}

5203 5204 5205 5206
static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
				gpa_t *gpa, struct x86_exception *exception,
				bool write)
{
5207 5208
	u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
		| (write ? PFERR_WRITE_MASK : 0);
5209

5210 5211 5212 5213 5214
	/*
	 * currently PKRU is only applied to ept enabled guest so
	 * there is no pkey in EPT page table for L1 guest or EPT
	 * shadow page table for L2 guest.
	 */
5215
	if (vcpu_match_mmio_gva(vcpu, gva)
F
Feng Wu 已提交
5216
	    && !permission_fault(vcpu, vcpu->arch.walk_mmu,
5217
				 vcpu->arch.access, 0, access)) {
5218 5219
		*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
					(gva & (PAGE_SIZE - 1));
X
Xiao Guangrong 已提交
5220
		trace_vcpu_match_mmio(gva, *gpa, write, false);
5221 5222 5223
		return 1;
	}

5224 5225 5226 5227 5228
	*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);

	if (*gpa == UNMAPPED_GVA)
		return -1;

5229
	return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write);
5230 5231
}

5232
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
5233
			const void *val, int bytes)
5234 5235 5236
{
	int ret;

5237
	ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
5238
	if (ret < 0)
5239
		return 0;
5240
	kvm_page_track_write(vcpu, gpa, val, bytes);
5241 5242 5243
	return 1;
}

5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259
struct read_write_emulator_ops {
	int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
				  int bytes);
	int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
				  void *val, int bytes);
	int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
			       int bytes, void *val);
	int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
				    void *val, int bytes);
	bool write;
};

static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
{
	if (vcpu->mmio_read_completed) {
		trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
5260
			       vcpu->mmio_fragments[0].gpa, val);
5261 5262 5263 5264 5265 5266 5267 5268 5269 5270
		vcpu->mmio_read_completed = 0;
		return 1;
	}

	return 0;
}

static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
			void *val, int bytes)
{
5271
	return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
5272 5273 5274 5275 5276 5277 5278 5279 5280 5281
}

static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
			 void *val, int bytes)
{
	return emulator_write_phys(vcpu, gpa, val, bytes);
}

static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
{
5282
	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
5283 5284 5285 5286 5287 5288
	return vcpu_mmio_write(vcpu, gpa, bytes, val);
}

static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
			  void *val, int bytes)
{
5289
	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
5290 5291 5292 5293 5294 5295
	return X86EMUL_IO_NEEDED;
}

static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
			   void *val, int bytes)
{
A
Avi Kivity 已提交
5296 5297
	struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];

5298
	memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
5299 5300 5301
	return X86EMUL_CONTINUE;
}

5302
static const struct read_write_emulator_ops read_emultor = {
5303 5304 5305 5306 5307 5308
	.read_write_prepare = read_prepare,
	.read_write_emulate = read_emulate,
	.read_write_mmio = vcpu_mmio_read,
	.read_write_exit_mmio = read_exit_mmio,
};

5309
static const struct read_write_emulator_ops write_emultor = {
5310 5311 5312 5313 5314 5315
	.read_write_emulate = write_emulate,
	.read_write_mmio = write_mmio,
	.read_write_exit_mmio = write_exit_mmio,
	.write = true,
};

5316 5317 5318 5319
static int emulator_read_write_onepage(unsigned long addr, void *val,
				       unsigned int bytes,
				       struct x86_exception *exception,
				       struct kvm_vcpu *vcpu,
5320
				       const struct read_write_emulator_ops *ops)
5321
{
5322 5323
	gpa_t gpa;
	int handled, ret;
5324
	bool write = ops->write;
A
Avi Kivity 已提交
5325
	struct kvm_mmio_fragment *frag;
5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;

	/*
	 * If the exit was due to a NPF we may already have a GPA.
	 * If the GPA is present, use it to avoid the GVA to GPA table walk.
	 * Note, this cannot be used on string operations since string
	 * operation using rep will only have the initial GPA from the NPF
	 * occurred.
	 */
	if (vcpu->arch.gpa_available &&
	    emulator_can_use_gpa(ctxt) &&
5337 5338 5339 5340 5341 5342 5343
	    (addr & ~PAGE_MASK) == (vcpu->arch.gpa_val & ~PAGE_MASK)) {
		gpa = vcpu->arch.gpa_val;
		ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
	} else {
		ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
		if (ret < 0)
			return X86EMUL_PROPAGATE_FAULT;
5344
	}
5345

5346
	if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes))
5347 5348 5349 5350 5351
		return X86EMUL_CONTINUE;

	/*
	 * Is this MMIO handled locally?
	 */
5352
	handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
5353
	if (handled == bytes)
5354 5355
		return X86EMUL_CONTINUE;

5356 5357 5358 5359
	gpa += handled;
	bytes -= handled;
	val += handled;

5360 5361 5362 5363 5364
	WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
	frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
	frag->gpa = gpa;
	frag->data = val;
	frag->len = bytes;
A
Avi Kivity 已提交
5365
	return X86EMUL_CONTINUE;
5366 5367
}

5368 5369
static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
			unsigned long addr,
5370 5371
			void *val, unsigned int bytes,
			struct x86_exception *exception,
5372
			const struct read_write_emulator_ops *ops)
5373
{
5374
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
A
Avi Kivity 已提交
5375 5376 5377 5378 5379 5380 5381 5382
	gpa_t gpa;
	int rc;

	if (ops->read_write_prepare &&
		  ops->read_write_prepare(vcpu, val, bytes))
		return X86EMUL_CONTINUE;

	vcpu->mmio_nr_fragments = 0;
5383

5384 5385
	/* Crossing a page boundary? */
	if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
A
Avi Kivity 已提交
5386
		int now;
5387 5388

		now = -addr & ~PAGE_MASK;
5389 5390 5391
		rc = emulator_read_write_onepage(addr, val, now, exception,
						 vcpu, ops);

5392 5393 5394
		if (rc != X86EMUL_CONTINUE)
			return rc;
		addr += now;
5395 5396
		if (ctxt->mode != X86EMUL_MODE_PROT64)
			addr = (u32)addr;
5397 5398 5399
		val += now;
		bytes -= now;
	}
5400

A
Avi Kivity 已提交
5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413
	rc = emulator_read_write_onepage(addr, val, bytes, exception,
					 vcpu, ops);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	if (!vcpu->mmio_nr_fragments)
		return rc;

	gpa = vcpu->mmio_fragments[0].gpa;

	vcpu->mmio_needed = 1;
	vcpu->mmio_cur_fragment = 0;

5414
	vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
A
Avi Kivity 已提交
5415 5416 5417 5418 5419
	vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
	vcpu->run->exit_reason = KVM_EXIT_MMIO;
	vcpu->run->mmio.phys_addr = gpa;

	return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431
}

static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
				  unsigned long addr,
				  void *val,
				  unsigned int bytes,
				  struct x86_exception *exception)
{
	return emulator_read_write(ctxt, addr, val, bytes,
				   exception, &read_emultor);
}

5432
static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
5433 5434 5435 5436 5437 5438 5439
			    unsigned long addr,
			    const void *val,
			    unsigned int bytes,
			    struct x86_exception *exception)
{
	return emulator_read_write(ctxt, addr, (void *)val, bytes,
				   exception, &write_emultor);
5440 5441
}

5442 5443 5444 5445 5446 5447 5448
#define CMPXCHG_TYPE(t, ptr, old, new) \
	(cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))

#ifdef CONFIG_X86_64
#  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
#else
#  define CMPXCHG64(ptr, old, new) \
5449
	(cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
5450 5451
#endif

5452 5453
static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
				     unsigned long addr,
5454 5455 5456
				     const void *old,
				     const void *new,
				     unsigned int bytes,
5457
				     struct x86_exception *exception)
5458
{
5459
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5460 5461 5462 5463
	gpa_t gpa;
	struct page *page;
	char *kaddr;
	bool exchanged;
5464

5465 5466 5467
	/* guests cmpxchg8b have to be emulated atomically */
	if (bytes > 8 || (bytes & (bytes - 1)))
		goto emul_write;
5468

5469
	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
5470

5471 5472 5473
	if (gpa == UNMAPPED_GVA ||
	    (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
		goto emul_write;
5474

5475 5476
	if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
		goto emul_write;
5477

5478
	page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
5479
	if (is_error_page(page))
5480
		goto emul_write;
5481

5482
	kaddr = kmap_atomic(page);
5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498
	kaddr += offset_in_page(gpa);
	switch (bytes) {
	case 1:
		exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
		break;
	case 2:
		exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
		break;
	case 4:
		exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
		break;
	case 8:
		exchanged = CMPXCHG64(kaddr, old, new);
		break;
	default:
		BUG();
5499
	}
5500
	kunmap_atomic(kaddr);
5501 5502 5503 5504 5505
	kvm_release_page_dirty(page);

	if (!exchanged)
		return X86EMUL_CMPXCHG_FAILED;

5506
	kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
5507
	kvm_page_track_write(vcpu, gpa, new, bytes);
5508 5509

	return X86EMUL_CONTINUE;
5510

5511
emul_write:
5512
	printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
5513

5514
	return emulator_write_emulated(ctxt, addr, new, bytes, exception);
5515 5516
}

5517 5518
static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
{
5519
	int r = 0, i;
5520

5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532
	for (i = 0; i < vcpu->arch.pio.count; i++) {
		if (vcpu->arch.pio.in)
			r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
					    vcpu->arch.pio.size, pd);
		else
			r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
					     vcpu->arch.pio.port, vcpu->arch.pio.size,
					     pd);
		if (r)
			break;
		pd += vcpu->arch.pio.size;
	}
5533 5534 5535
	return r;
}

5536 5537 5538
static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
			       unsigned short port, void *val,
			       unsigned int count, bool in)
5539 5540
{
	vcpu->arch.pio.port = port;
5541
	vcpu->arch.pio.in = in;
5542
	vcpu->arch.pio.count  = count;
5543 5544 5545
	vcpu->arch.pio.size = size;

	if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
5546
		vcpu->arch.pio.count = 0;
5547 5548 5549 5550
		return 1;
	}

	vcpu->run->exit_reason = KVM_EXIT_IO;
5551
	vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
5552 5553 5554 5555 5556 5557 5558 5559
	vcpu->run->io.size = size;
	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
	vcpu->run->io.count = count;
	vcpu->run->io.port = port;

	return 0;
}

5560 5561 5562
static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
				    int size, unsigned short port, void *val,
				    unsigned int count)
5563
{
5564
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5565
	int ret;
5566

5567 5568
	if (vcpu->arch.pio.count)
		goto data_avail;
5569

5570 5571
	memset(vcpu->arch.pio_data, 0, size * count);

5572 5573 5574 5575
	ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
	if (ret) {
data_avail:
		memcpy(val, vcpu->arch.pio_data, size * count);
5576
		trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
5577
		vcpu->arch.pio.count = 0;
5578 5579 5580 5581 5582 5583
		return 1;
	}

	return 0;
}

5584 5585 5586 5587 5588 5589 5590
static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
				     int size, unsigned short port,
				     const void *val, unsigned int count)
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);

	memcpy(vcpu->arch.pio_data, val, size * count);
5591
	trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
5592 5593 5594
	return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
}

5595 5596 5597 5598 5599
static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
	return kvm_x86_ops->get_segment_base(vcpu, seg);
}

5600
static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
5601
{
5602
	kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
5603 5604
}

5605
static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
5606 5607 5608 5609 5610
{
	if (!need_emulate_wbinvd(vcpu))
		return X86EMUL_CONTINUE;

	if (kvm_x86_ops->has_wbinvd_exit()) {
5611 5612 5613
		int cpu = get_cpu();

		cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
5614 5615
		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
				wbinvd_ipi, NULL, 1);
5616
		put_cpu();
5617
		cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
5618 5619
	} else
		wbinvd();
5620 5621
	return X86EMUL_CONTINUE;
}
5622 5623 5624

int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
5625 5626
	kvm_emulate_wbinvd_noskip(vcpu);
	return kvm_skip_emulated_instruction(vcpu);
5627
}
5628 5629
EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);

5630 5631


5632 5633
static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
{
5634
	kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
5635 5636
}

5637 5638
static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
			   unsigned long *dest)
5639
{
5640
	return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
5641 5642
}

5643 5644
static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
			   unsigned long value)
5645
{
5646

5647
	return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
5648 5649
}

5650
static u64 mk_cr_64(u64 curr_cr, u32 new_val)
5651
{
5652
	return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
5653 5654
}

5655
static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
5656
{
5657
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5658 5659 5660 5661 5662 5663 5664 5665 5666 5667
	unsigned long value;

	switch (cr) {
	case 0:
		value = kvm_read_cr0(vcpu);
		break;
	case 2:
		value = vcpu->arch.cr2;
		break;
	case 3:
5668
		value = kvm_read_cr3(vcpu);
5669 5670 5671 5672 5673 5674 5675 5676
		break;
	case 4:
		value = kvm_read_cr4(vcpu);
		break;
	case 8:
		value = kvm_get_cr8(vcpu);
		break;
	default:
5677
		kvm_err("%s: unexpected cr %u\n", __func__, cr);
5678 5679 5680 5681 5682 5683
		return 0;
	}

	return value;
}

5684
static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
5685
{
5686
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5687 5688
	int res = 0;

5689 5690
	switch (cr) {
	case 0:
5691
		res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
5692 5693 5694 5695 5696
		break;
	case 2:
		vcpu->arch.cr2 = val;
		break;
	case 3:
5697
		res = kvm_set_cr3(vcpu, val);
5698 5699
		break;
	case 4:
5700
		res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
5701 5702
		break;
	case 8:
A
Andre Przywara 已提交
5703
		res = kvm_set_cr8(vcpu, val);
5704 5705
		break;
	default:
5706
		kvm_err("%s: unexpected cr %u\n", __func__, cr);
5707
		res = -1;
5708
	}
5709 5710

	return res;
5711 5712
}

5713
static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
5714
{
5715
	return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
5716 5717
}

5718
static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
5719
{
5720
	kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
5721 5722
}

5723
static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
5724
{
5725
	kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
5726 5727
}

5728 5729 5730 5731 5732 5733 5734 5735 5736 5737
static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
	kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
}

static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
	kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
}

5738 5739
static unsigned long emulator_get_cached_segment_base(
	struct x86_emulate_ctxt *ctxt, int seg)
5740
{
5741
	return get_segment_base(emul_to_vcpu(ctxt), seg);
5742 5743
}

5744 5745 5746
static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
				 struct desc_struct *desc, u32 *base3,
				 int seg)
5747 5748 5749
{
	struct kvm_segment var;

5750
	kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
5751
	*selector = var.selector;
5752

5753 5754
	if (var.unusable) {
		memset(desc, 0, sizeof(*desc));
5755 5756
		if (base3)
			*base3 = 0;
5757
		return false;
5758
	}
5759 5760 5761 5762 5763

	if (var.g)
		var.limit >>= 12;
	set_desc_limit(desc, var.limit);
	set_desc_base(desc, (unsigned long)var.base);
5764 5765 5766 5767
#ifdef CONFIG_X86_64
	if (base3)
		*base3 = var.base >> 32;
#endif
5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779
	desc->type = var.type;
	desc->s = var.s;
	desc->dpl = var.dpl;
	desc->p = var.present;
	desc->avl = var.avl;
	desc->l = var.l;
	desc->d = var.db;
	desc->g = var.g;

	return true;
}

5780 5781 5782
static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
				 struct desc_struct *desc, u32 base3,
				 int seg)
5783
{
5784
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5785 5786
	struct kvm_segment var;

5787
	var.selector = selector;
5788
	var.base = get_desc_base(desc);
5789 5790 5791
#ifdef CONFIG_X86_64
	var.base |= ((u64)base3) << 32;
#endif
5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809
	var.limit = get_desc_limit(desc);
	if (desc->g)
		var.limit = (var.limit << 12) | 0xfff;
	var.type = desc->type;
	var.dpl = desc->dpl;
	var.db = desc->d;
	var.s = desc->s;
	var.l = desc->l;
	var.g = desc->g;
	var.avl = desc->avl;
	var.present = desc->p;
	var.unusable = !var.present;
	var.padding = 0;

	kvm_set_segment(vcpu, &var, seg);
	return;
}

5810 5811 5812
static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
			    u32 msr_index, u64 *pdata)
{
5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823
	struct msr_data msr;
	int r;

	msr.index = msr_index;
	msr.host_initiated = false;
	r = kvm_get_msr(emul_to_vcpu(ctxt), &msr);
	if (r)
		return r;

	*pdata = msr.data;
	return 0;
5824 5825 5826 5827 5828
}

static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
			    u32 msr_index, u64 data)
{
5829 5830 5831 5832 5833 5834
	struct msr_data msr;

	msr.data = data;
	msr.index = msr_index;
	msr.host_initiated = false;
	return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
5835 5836
}

P
Paolo Bonzini 已提交
5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850
static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);

	return vcpu->arch.smbase;
}

static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);

	vcpu->arch.smbase = smbase;
}

5851 5852 5853
static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
			      u32 pmc)
{
5854
	return kvm_pmu_is_valid_msr_idx(emul_to_vcpu(ctxt), pmc);
5855 5856
}

5857 5858 5859
static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
			     u32 pmc, u64 *pdata)
{
5860
	return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
5861 5862
}

5863 5864 5865 5866 5867
static void emulator_halt(struct x86_emulate_ctxt *ctxt)
{
	emul_to_vcpu(ctxt)->arch.halt_request = 1;
}

5868
static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
5869
			      struct x86_instruction_info *info,
5870 5871
			      enum x86_intercept_stage stage)
{
5872
	return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
5873 5874
}

5875 5876
static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
			u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool check_limit)
5877
{
5878
	return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, check_limit);
5879 5880
}

5881 5882 5883 5884 5885 5886 5887 5888 5889 5890
static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
{
	return kvm_register_read(emul_to_vcpu(ctxt), reg);
}

static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
{
	kvm_register_write(emul_to_vcpu(ctxt), reg, val);
}

5891 5892 5893 5894 5895
static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
{
	kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
}

5896 5897 5898 5899 5900 5901 5902 5903 5904 5905
static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
{
	return emul_to_vcpu(ctxt)->arch.hflags;
}

static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
{
	kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
}

5906 5907 5908 5909 5910
static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
	return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase);
}

5911
static const struct x86_emulate_ops emulate_ops = {
5912 5913
	.read_gpr            = emulator_read_gpr,
	.write_gpr           = emulator_write_gpr,
5914 5915
	.read_std            = emulator_read_std,
	.write_std           = emulator_write_std,
5916
	.read_phys           = kvm_read_guest_phys_system,
5917
	.fetch               = kvm_fetch_guest_virt,
5918 5919 5920
	.read_emulated       = emulator_read_emulated,
	.write_emulated      = emulator_write_emulated,
	.cmpxchg_emulated    = emulator_cmpxchg_emulated,
5921
	.invlpg              = emulator_invlpg,
5922 5923
	.pio_in_emulated     = emulator_pio_in_emulated,
	.pio_out_emulated    = emulator_pio_out_emulated,
5924 5925
	.get_segment         = emulator_get_segment,
	.set_segment         = emulator_set_segment,
5926
	.get_cached_segment_base = emulator_get_cached_segment_base,
5927
	.get_gdt             = emulator_get_gdt,
5928
	.get_idt	     = emulator_get_idt,
5929 5930
	.set_gdt             = emulator_set_gdt,
	.set_idt	     = emulator_set_idt,
5931 5932
	.get_cr              = emulator_get_cr,
	.set_cr              = emulator_set_cr,
5933
	.cpl                 = emulator_get_cpl,
5934 5935
	.get_dr              = emulator_get_dr,
	.set_dr              = emulator_set_dr,
P
Paolo Bonzini 已提交
5936 5937
	.get_smbase          = emulator_get_smbase,
	.set_smbase          = emulator_set_smbase,
5938 5939
	.set_msr             = emulator_set_msr,
	.get_msr             = emulator_get_msr,
5940
	.check_pmc	     = emulator_check_pmc,
5941
	.read_pmc            = emulator_read_pmc,
5942
	.halt                = emulator_halt,
5943
	.wbinvd              = emulator_wbinvd,
5944
	.fix_hypercall       = emulator_fix_hypercall,
5945
	.intercept           = emulator_intercept,
5946
	.get_cpuid           = emulator_get_cpuid,
5947
	.set_nmi_mask        = emulator_set_nmi_mask,
5948 5949
	.get_hflags          = emulator_get_hflags,
	.set_hflags          = emulator_set_hflags,
5950
	.pre_leave_smm       = emulator_pre_leave_smm,
5951 5952
};

5953 5954
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
{
5955
	u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
5956 5957 5958 5959 5960 5961 5962
	/*
	 * an sti; sti; sequence only disable interrupts for the first
	 * instruction. So, if the last instruction, be it emulated or
	 * not, left the system with the INT_STI flag enabled, it
	 * means that the last instruction is an sti. We should not
	 * leave the flag on in this case. The same goes for mov ss
	 */
5963 5964
	if (int_shadow & mask)
		mask = 0;
5965
	if (unlikely(int_shadow || mask)) {
5966
		kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
5967 5968 5969
		if (!mask)
			kvm_make_request(KVM_REQ_EVENT, vcpu);
	}
5970 5971
}

5972
static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
5973 5974
{
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5975
	if (ctxt->exception.vector == PF_VECTOR)
5976 5977 5978
		return kvm_propagate_fault(vcpu, &ctxt->exception);

	if (ctxt->exception.error_code_valid)
5979 5980
		kvm_queue_exception_e(vcpu, ctxt->exception.vector,
				      ctxt->exception.error_code);
5981
	else
5982
		kvm_queue_exception(vcpu, ctxt->exception.vector);
5983
	return false;
5984 5985
}

5986 5987
static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
{
5988
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5989 5990 5991 5992
	int cs_db, cs_l;

	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);

5993
	ctxt->eflags = kvm_get_rflags(vcpu);
5994 5995
	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;

5996 5997 5998
	ctxt->eip = kvm_rip_read(vcpu);
	ctxt->mode = (!is_protmode(vcpu))		? X86EMUL_MODE_REAL :
		     (ctxt->eflags & X86_EFLAGS_VM)	? X86EMUL_MODE_VM86 :
5999
		     (cs_l && is_long_mode(vcpu))	? X86EMUL_MODE_PROT64 :
6000 6001
		     cs_db				? X86EMUL_MODE_PROT32 :
							  X86EMUL_MODE_PROT16;
6002
	BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
P
Paolo Bonzini 已提交
6003 6004
	BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
	BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
6005

6006
	init_decode_cache(ctxt);
6007
	vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
6008 6009
}

6010
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
6011
{
6012
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
6013 6014 6015 6016
	int ret;

	init_emulate_ctxt(vcpu);

6017 6018 6019
	ctxt->op_bytes = 2;
	ctxt->ad_bytes = 2;
	ctxt->_eip = ctxt->eip + inc_eip;
6020
	ret = emulate_int_real(ctxt, irq);
6021 6022 6023 6024

	if (ret != X86EMUL_CONTINUE)
		return EMULATE_FAIL;

6025
	ctxt->eip = ctxt->_eip;
6026 6027
	kvm_rip_write(vcpu, ctxt->eip);
	kvm_set_rflags(vcpu, ctxt->eflags);
6028 6029 6030 6031 6032

	return EMULATE_DONE;
}
EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);

6033
static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
6034
{
6035 6036
	int r = EMULATE_DONE;

6037 6038
	++vcpu->stat.insn_emulation_fail;
	trace_kvm_emulate_insn_failed(vcpu);
6039 6040 6041 6042

	if (emulation_type & EMULTYPE_NO_UD_ON_FAIL)
		return EMULATE_FAIL;

6043
	if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
6044 6045 6046
		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
		vcpu->run->internal.ndata = 0;
6047
		r = EMULATE_USER_EXIT;
6048
	}
6049

6050
	kvm_queue_exception(vcpu, UD_VECTOR);
6051 6052

	return r;
6053 6054
}

6055
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
6056 6057
				  bool write_fault_to_shadow_pgtable,
				  int emulation_type)
6058
{
6059
	gpa_t gpa = cr2;
D
Dan Williams 已提交
6060
	kvm_pfn_t pfn;
6061

6062
	if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
6063 6064
		return false;

6065 6066 6067
	if (WARN_ON_ONCE(is_guest_mode(vcpu)))
		return false;

6068
	if (!vcpu->arch.mmu->direct_map) {
6069 6070 6071 6072 6073
		/*
		 * Write permission should be allowed since only
		 * write access need to be emulated.
		 */
		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
6074

6075 6076 6077 6078 6079 6080 6081
		/*
		 * If the mapping is invalid in guest, let cpu retry
		 * it to generate fault.
		 */
		if (gpa == UNMAPPED_GVA)
			return true;
	}
6082

6083 6084 6085 6086 6087 6088 6089
	/*
	 * Do not retry the unhandleable instruction if it faults on the
	 * readonly host memory, otherwise it will goto a infinite loop:
	 * retry instruction -> write #PF -> emulation fail -> retry
	 * instruction -> ...
	 */
	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100

	/*
	 * If the instruction failed on the error pfn, it can not be fixed,
	 * report the error to userspace.
	 */
	if (is_error_noslot_pfn(pfn))
		return false;

	kvm_release_pfn_clean(pfn);

	/* The instructions are well-emulated on direct mmu. */
6101
	if (vcpu->arch.mmu->direct_map) {
6102 6103 6104 6105 6106 6107 6108 6109 6110
		unsigned int indirect_shadow_pages;

		spin_lock(&vcpu->kvm->mmu_lock);
		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
		spin_unlock(&vcpu->kvm->mmu_lock);

		if (indirect_shadow_pages)
			kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));

6111
		return true;
6112
	}
6113

6114 6115 6116 6117 6118 6119
	/*
	 * if emulation was due to access to shadowed page table
	 * and it failed try to unshadow page and re-enter the
	 * guest to let CPU execute the instruction.
	 */
	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
6120 6121 6122 6123 6124 6125 6126

	/*
	 * If the access faults on its page table, it can not
	 * be fixed by unprotecting shadow page and it should
	 * be reported to userspace.
	 */
	return !write_fault_to_shadow_pgtable;
6127 6128
}

6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152
static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
			      unsigned long cr2,  int emulation_type)
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
	unsigned long last_retry_eip, last_retry_addr, gpa = cr2;

	last_retry_eip = vcpu->arch.last_retry_eip;
	last_retry_addr = vcpu->arch.last_retry_addr;

	/*
	 * If the emulation is caused by #PF and it is non-page_table
	 * writing instruction, it means the VM-EXIT is caused by shadow
	 * page protected, we can zap the shadow page and retry this
	 * instruction directly.
	 *
	 * Note: if the guest uses a non-page-table modifying instruction
	 * on the PDE that points to the instruction, then we will unmap
	 * the instruction and go to an infinite loop. So, we cache the
	 * last retried eip and the last fault address, if we meet the eip
	 * and the address again, we can break out of the potential infinite
	 * loop.
	 */
	vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;

6153
	if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
6154 6155
		return false;

6156 6157 6158
	if (WARN_ON_ONCE(is_guest_mode(vcpu)))
		return false;

6159 6160 6161 6162 6163 6164 6165 6166 6167
	if (x86_page_table_writing_insn(ctxt))
		return false;

	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
		return false;

	vcpu->arch.last_retry_eip = ctxt->eip;
	vcpu->arch.last_retry_addr = cr2;

6168
	if (!vcpu->arch.mmu->direct_map)
6169 6170
		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);

6171
	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
6172 6173 6174 6175

	return true;
}

6176 6177 6178
static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
static int complete_emulated_pio(struct kvm_vcpu *vcpu);

P
Paolo Bonzini 已提交
6179
static void kvm_smm_changed(struct kvm_vcpu *vcpu)
6180
{
P
Paolo Bonzini 已提交
6181
	if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
6182 6183 6184
		/* This is a good place to trace that we are exiting SMM.  */
		trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);

6185 6186
		/* Process a latched INIT or SMI, if any.  */
		kvm_make_request(KVM_REQ_EVENT, vcpu);
P
Paolo Bonzini 已提交
6187
	}
6188 6189

	kvm_mmu_reset_context(vcpu);
P
Paolo Bonzini 已提交
6190 6191 6192 6193 6194 6195
}

static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
{
	unsigned changed = vcpu->arch.hflags ^ emul_flags;

6196
	vcpu->arch.hflags = emul_flags;
P
Paolo Bonzini 已提交
6197 6198 6199

	if (changed & HF_SMM_MASK)
		kvm_smm_changed(vcpu);
6200 6201
}

6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216
static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
				unsigned long *db)
{
	u32 dr6 = 0;
	int i;
	u32 enable, rwlen;

	enable = dr7;
	rwlen = dr7 >> 16;
	for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
		if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
			dr6 |= (1 << i);
	return dr6;
}

6217
static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
6218 6219 6220
{
	struct kvm_run *kvm_run = vcpu->run;

6221 6222 6223 6224 6225 6226 6227
	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
		kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
		kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
		kvm_run->debug.arch.exception = DB_VECTOR;
		kvm_run->exit_reason = KVM_EXIT_DEBUG;
		*r = EMULATE_USER_EXIT;
	} else {
6228
		kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS);
6229 6230 6231
	}
}

6232 6233 6234 6235 6236 6237
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
	unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
	int r = EMULATE_DONE;

	kvm_x86_ops->skip_emulated_instruction(vcpu);
6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248

	/*
	 * rflags is the old, "raw" value of the flags.  The new value has
	 * not been saved yet.
	 *
	 * This is correct even for TF set by the guest, because "the
	 * processor will not generate this exception after the instruction
	 * that sets the TF flag".
	 */
	if (unlikely(rflags & X86_EFLAGS_TF))
		kvm_vcpu_do_singlestep(vcpu, &r);
6249 6250 6251 6252
	return r == EMULATE_DONE;
}
EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);

6253 6254 6255 6256
static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
{
	if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
	    (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
6257 6258 6259
		struct kvm_run *kvm_run = vcpu->run;
		unsigned long eip = kvm_get_linear_rip(vcpu);
		u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
6260 6261 6262 6263
					   vcpu->arch.guest_debug_dr7,
					   vcpu->arch.eff_db);

		if (dr6 != 0) {
6264
			kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
6265
			kvm_run->debug.arch.pc = eip;
6266 6267 6268 6269 6270 6271 6272
			kvm_run->debug.arch.exception = DB_VECTOR;
			kvm_run->exit_reason = KVM_EXIT_DEBUG;
			*r = EMULATE_USER_EXIT;
			return true;
		}
	}

6273 6274
	if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
	    !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
6275 6276
		unsigned long eip = kvm_get_linear_rip(vcpu);
		u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
6277 6278 6279 6280 6281
					   vcpu->arch.dr7,
					   vcpu->arch.db);

		if (dr6 != 0) {
			vcpu->arch.dr6 &= ~15;
6282
			vcpu->arch.dr6 |= dr6 | DR6_RTM;
6283 6284 6285 6286 6287 6288 6289 6290 6291
			kvm_queue_exception(vcpu, DB_VECTOR);
			*r = EMULATE_DONE;
			return true;
		}
	}

	return false;
}

6292 6293
static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
{
6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317
	switch (ctxt->opcode_len) {
	case 1:
		switch (ctxt->b) {
		case 0xe4:	/* IN */
		case 0xe5:
		case 0xec:
		case 0xed:
		case 0xe6:	/* OUT */
		case 0xe7:
		case 0xee:
		case 0xef:
		case 0x6c:	/* INS */
		case 0x6d:
		case 0x6e:	/* OUTS */
		case 0x6f:
			return true;
		}
		break;
	case 2:
		switch (ctxt->b) {
		case 0x33:	/* RDPMC */
			return true;
		}
		break;
6318 6319 6320 6321 6322
	}

	return false;
}

6323 6324
int x86_emulate_instruction(struct kvm_vcpu *vcpu,
			    unsigned long cr2,
6325 6326 6327
			    int emulation_type,
			    void *insn,
			    int insn_len)
6328
{
6329
	int r;
6330
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
6331
	bool writeback = true;
6332
	bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
6333

P
Paolo Bonzini 已提交
6334 6335
	vcpu->arch.l1tf_flush_l1d = true;

6336 6337 6338 6339 6340
	/*
	 * Clear write_fault_to_shadow_pgtable here to ensure it is
	 * never reused.
	 */
	vcpu->arch.write_fault_to_shadow_pgtable = false;
6341
	kvm_clear_exception_queue(vcpu);
G
Gleb Natapov 已提交
6342

6343
	if (!(emulation_type & EMULTYPE_NO_DECODE)) {
6344
		init_emulate_ctxt(vcpu);
6345 6346 6347 6348 6349 6350 6351

		/*
		 * We will reenter on the same instruction since
		 * we do not set complete_userspace_io.  This does not
		 * handle watchpoints yet, those would be handled in
		 * the emulate_ops.
		 */
6352 6353
		if (!(emulation_type & EMULTYPE_SKIP) &&
		    kvm_vcpu_check_breakpoint(vcpu, &r))
6354 6355
			return r;

6356 6357
		ctxt->interruptibility = 0;
		ctxt->have_exception = false;
6358
		ctxt->exception.vector = -1;
6359
		ctxt->perm_ok = false;
6360

6361
		ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
6362

6363
		r = x86_decode_insn(ctxt, insn, insn_len);
6364

A
Avi Kivity 已提交
6365
		trace_kvm_emulate_insn_start(vcpu);
6366
		++vcpu->stat.insn_emulation;
6367
		if (r != EMULATION_OK)  {
6368 6369
			if (emulation_type & EMULTYPE_TRAP_UD)
				return EMULATE_FAIL;
6370 6371
			if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
						emulation_type))
6372
				return EMULATE_DONE;
6373 6374
			if (ctxt->have_exception && inject_emulated_exception(vcpu))
				return EMULATE_DONE;
6375 6376
			if (emulation_type & EMULTYPE_SKIP)
				return EMULATE_FAIL;
6377
			return handle_emulation_failure(vcpu, emulation_type);
6378 6379 6380
		}
	}

6381 6382 6383 6384
	if ((emulation_type & EMULTYPE_VMWARE) &&
	    !is_vmware_backdoor_opcode(ctxt))
		return EMULATE_FAIL;

6385
	if (emulation_type & EMULTYPE_SKIP) {
6386
		kvm_rip_write(vcpu, ctxt->_eip);
6387 6388
		if (ctxt->eflags & X86_EFLAGS_RF)
			kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
6389 6390 6391
		return EMULATE_DONE;
	}

6392 6393 6394
	if (retry_instruction(ctxt, cr2, emulation_type))
		return EMULATE_DONE;

6395
	/* this is needed for vmware backdoor interface to work since it
6396
	   changes registers values  during IO operation */
6397 6398
	if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
		vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
6399
		emulator_invalidate_register_cache(ctxt);
6400
	}
6401

6402
restart:
6403 6404 6405
	/* Save the faulting GPA (cr2) in the address field */
	ctxt->exception.address = cr2;

6406
	r = x86_emulate_insn(ctxt);
6407

6408 6409 6410
	if (r == EMULATION_INTERCEPTED)
		return EMULATE_DONE;

6411
	if (r == EMULATION_FAILED) {
6412 6413
		if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
					emulation_type))
6414 6415
			return EMULATE_DONE;

6416
		return handle_emulation_failure(vcpu, emulation_type);
6417 6418
	}

6419
	if (ctxt->have_exception) {
6420
		r = EMULATE_DONE;
6421 6422
		if (inject_emulated_exception(vcpu))
			return r;
6423
	} else if (vcpu->arch.pio.count) {
6424 6425
		if (!vcpu->arch.pio.in) {
			/* FIXME: return into emulator if single-stepping.  */
6426
			vcpu->arch.pio.count = 0;
6427
		} else {
6428
			writeback = false;
6429 6430
			vcpu->arch.complete_userspace_io = complete_emulated_pio;
		}
P
Paolo Bonzini 已提交
6431
		r = EMULATE_USER_EXIT;
6432 6433 6434
	} else if (vcpu->mmio_needed) {
		if (!vcpu->mmio_is_write)
			writeback = false;
P
Paolo Bonzini 已提交
6435
		r = EMULATE_USER_EXIT;
6436
		vcpu->arch.complete_userspace_io = complete_emulated_mmio;
6437
	} else if (r == EMULATION_RESTART)
6438
		goto restart;
6439 6440
	else
		r = EMULATE_DONE;
6441

6442
	if (writeback) {
6443
		unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
6444
		toggle_interruptibility(vcpu, ctxt->interruptibility);
6445
		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
6446
		kvm_rip_write(vcpu, ctxt->eip);
6447 6448 6449
		if (r == EMULATE_DONE &&
		    (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
			kvm_vcpu_do_singlestep(vcpu, &r);
6450 6451 6452
		if (!ctxt->have_exception ||
		    exception_type(ctxt->exception.vector) == EXCPT_TRAP)
			__kvm_set_rflags(vcpu, ctxt->eflags);
6453 6454 6455 6456 6457 6458 6459 6460 6461

		/*
		 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
		 * do nothing, and it will be requested again as soon as
		 * the shadow expires.  But we still need to check here,
		 * because POPF has no interrupt shadow.
		 */
		if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
			kvm_make_request(KVM_REQ_EVENT, vcpu);
6462 6463
	} else
		vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
6464 6465

	return r;
6466
}
6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479

int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
{
	return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
}
EXPORT_SYMBOL_GPL(kvm_emulate_instruction);

int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
					void *insn, int insn_len)
{
	return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
}
EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
6480

6481 6482
static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
			    unsigned short port)
6483
{
6484
	unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
6485 6486
	int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
					    size, port, &val, 1);
6487
	/* do not return to emulator after return from userspace */
6488
	vcpu->arch.pio.count = 0;
6489 6490 6491
	return ret;
}

6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513
static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
{
	unsigned long val;

	/* We should only ever be called with arch.pio.count equal to 1 */
	BUG_ON(vcpu->arch.pio.count != 1);

	/* For size less than 4 we merge, else we zero extend */
	val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
					: 0;

	/*
	 * Since vcpu->arch.pio.count == 1 let emulator_pio_in_emulated perform
	 * the copy and tracing
	 */
	emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size,
				 vcpu->arch.pio.port, &val, 1);
	kvm_register_write(vcpu, VCPU_REGS_RAX, val);

	return 1;
}

6514 6515
static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
			   unsigned short port)
6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533
{
	unsigned long val;
	int ret;

	/* For size less than 4 we merge, else we zero extend */
	val = (size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) : 0;

	ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port,
				       &val, 1);
	if (ret) {
		kvm_register_write(vcpu, VCPU_REGS_RAX, val);
		return ret;
	}

	vcpu->arch.complete_userspace_io = complete_fast_pio_in;

	return 0;
}
6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548

int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
{
	int ret = kvm_skip_emulated_instruction(vcpu);

	/*
	 * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
	 * KVM_EXIT_DEBUG here.
	 */
	if (in)
		return kvm_fast_pio_in(vcpu, size, port) && ret;
	else
		return kvm_fast_pio_out(vcpu, size, port) && ret;
}
EXPORT_SYMBOL_GPL(kvm_fast_pio);
6549

6550
static int kvmclock_cpu_down_prep(unsigned int cpu)
6551
{
T
Tejun Heo 已提交
6552
	__this_cpu_write(cpu_tsc_khz, 0);
6553
	return 0;
6554 6555 6556
}

static void tsc_khz_changed(void *data)
6557
{
6558 6559 6560 6561 6562 6563 6564 6565 6566
	struct cpufreq_freqs *freq = data;
	unsigned long khz = 0;

	if (data)
		khz = freq->new;
	else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
		khz = cpufreq_quick_get(raw_smp_processor_id());
	if (!khz)
		khz = tsc_khz;
T
Tejun Heo 已提交
6567
	__this_cpu_write(cpu_tsc_khz, khz);
6568 6569
}

6570
#ifdef CONFIG_X86_64
6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604
static void kvm_hyperv_tsc_notifier(void)
{
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int cpu;

	spin_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
		kvm_make_mclock_inprogress_request(kvm);

	hyperv_stop_tsc_emulation();

	/* TSC frequency always matches when on Hyper-V */
	for_each_present_cpu(cpu)
		per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
	kvm_max_guest_tsc_khz = tsc_khz;

	list_for_each_entry(kvm, &vm_list, vm_list) {
		struct kvm_arch *ka = &kvm->arch;

		spin_lock(&ka->pvclock_gtod_sync_lock);

		pvclock_update_vm_gtod_copy(kvm);

		kvm_for_each_vcpu(cpu, vcpu, kvm)
			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);

		kvm_for_each_vcpu(cpu, vcpu, kvm)
			kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);

		spin_unlock(&ka->pvclock_gtod_sync_lock);
	}
	spin_unlock(&kvm_lock);
}
6605
#endif
6606

6607 6608 6609 6610 6611 6612 6613 6614
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
				     void *data)
{
	struct cpufreq_freqs *freq = data;
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int i, send_ipi = 0;

6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653
	/*
	 * We allow guests to temporarily run on slowing clocks,
	 * provided we notify them after, or to run on accelerating
	 * clocks, provided we notify them before.  Thus time never
	 * goes backwards.
	 *
	 * However, we have a problem.  We can't atomically update
	 * the frequency of a given CPU from this function; it is
	 * merely a notifier, which can be called from any CPU.
	 * Changing the TSC frequency at arbitrary points in time
	 * requires a recomputation of local variables related to
	 * the TSC for each VCPU.  We must flag these local variables
	 * to be updated and be sure the update takes place with the
	 * new frequency before any guests proceed.
	 *
	 * Unfortunately, the combination of hotplug CPU and frequency
	 * change creates an intractable locking scenario; the order
	 * of when these callouts happen is undefined with respect to
	 * CPU hotplug, and they can race with each other.  As such,
	 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
	 * undefined; you can actually have a CPU frequency change take
	 * place in between the computation of X and the setting of the
	 * variable.  To protect against this problem, all updates of
	 * the per_cpu tsc_khz variable are done in an interrupt
	 * protected IPI, and all callers wishing to update the value
	 * must wait for a synchronous IPI to complete (which is trivial
	 * if the caller is on the CPU already).  This establishes the
	 * necessary total order on variable updates.
	 *
	 * Note that because a guest time update may take place
	 * anytime after the setting of the VCPU's request bit, the
	 * correct TSC value must be set before the request.  However,
	 * to ensure the update actually makes it to any guest which
	 * starts running in hardware virtualization between the set
	 * and the acquisition of the spinlock, we must also ping the
	 * CPU after setting the request bit.
	 *
	 */

6654 6655 6656 6657
	if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
		return 0;
	if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
		return 0;
6658 6659

	smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
6660

6661
	spin_lock(&kvm_lock);
6662
	list_for_each_entry(kvm, &vm_list, vm_list) {
6663
		kvm_for_each_vcpu(i, vcpu, kvm) {
6664 6665
			if (vcpu->cpu != freq->cpu)
				continue;
Z
Zachary Amsden 已提交
6666
			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
6667
			if (vcpu->cpu != smp_processor_id())
6668
				send_ipi = 1;
6669 6670
		}
	}
6671
	spin_unlock(&kvm_lock);
6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685

	if (freq->old < freq->new && send_ipi) {
		/*
		 * We upscale the frequency.  Must make the guest
		 * doesn't see old kvmclock values while running with
		 * the new frequency, otherwise we risk the guest sees
		 * time go backwards.
		 *
		 * In case we update the frequency for another cpu
		 * (which might be in guest context) send an interrupt
		 * to kick the cpu out of guest context.  Next time
		 * guest context is entered kvmclock will be updated,
		 * so the guest will not see stale values.
		 */
6686
		smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
6687 6688 6689 6690 6691
	}
	return 0;
}

static struct notifier_block kvmclock_cpufreq_notifier_block = {
6692 6693 6694
	.notifier_call  = kvmclock_cpufreq_notifier
};

6695
static int kvmclock_cpu_online(unsigned int cpu)
6696
{
6697 6698
	tsc_khz_changed(NULL);
	return 0;
6699 6700
}

6701 6702
static void kvm_timer_init(void)
{
Z
Zachary Amsden 已提交
6703
	max_tsc_khz = tsc_khz;
6704

6705
	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
Z
Zachary Amsden 已提交
6706 6707
#ifdef CONFIG_CPU_FREQ
		struct cpufreq_policy policy;
6708 6709
		int cpu;

Z
Zachary Amsden 已提交
6710
		memset(&policy, 0, sizeof(policy));
6711 6712
		cpu = get_cpu();
		cpufreq_get_policy(&policy, cpu);
Z
Zachary Amsden 已提交
6713 6714
		if (policy.cpuinfo.max_freq)
			max_tsc_khz = policy.cpuinfo.max_freq;
6715
		put_cpu();
Z
Zachary Amsden 已提交
6716
#endif
6717 6718 6719
		cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
					  CPUFREQ_TRANSITION_NOTIFIER);
	}
Z
Zachary Amsden 已提交
6720
	pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
6721

T
Thomas Gleixner 已提交
6722
	cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
6723
			  kvmclock_cpu_online, kvmclock_cpu_down_prep);
6724 6725
}

6726 6727
DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
EXPORT_PER_CPU_SYMBOL_GPL(current_vcpu);
6728

6729
int kvm_is_in_guest(void)
6730
{
6731
	return __this_cpu_read(current_vcpu) != NULL;
6732 6733 6734 6735 6736
}

static int kvm_is_user_mode(void)
{
	int user_mode = 3;
6737

6738 6739
	if (__this_cpu_read(current_vcpu))
		user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
6740

6741 6742 6743 6744 6745 6746
	return user_mode != 0;
}

static unsigned long kvm_get_guest_ip(void)
{
	unsigned long ip = 0;
6747

6748 6749
	if (__this_cpu_read(current_vcpu))
		ip = kvm_rip_read(__this_cpu_read(current_vcpu));
6750

6751 6752 6753 6754 6755 6756 6757 6758 6759
	return ip;
}

static struct perf_guest_info_callbacks kvm_guest_cbs = {
	.is_in_guest		= kvm_is_in_guest,
	.is_user_mode		= kvm_is_user_mode,
	.get_guest_ip		= kvm_get_guest_ip,
};

6760 6761 6762 6763 6764 6765 6766 6767 6768
static void kvm_set_mmio_spte_mask(void)
{
	u64 mask;
	int maxphyaddr = boot_cpu_data.x86_phys_bits;

	/*
	 * Set the reserved bits and the present bit of an paging-structure
	 * entry to generate page fault with PFER.RSV = 1.
	 */
6769 6770 6771 6772 6773 6774

	/*
	 * Mask the uppermost physical address bit, which would be reserved as
	 * long as the supported physical address width is less than 52.
	 */
	mask = 1ull << 51;
6775 6776

	/* Set the present bit. */
6777 6778 6779 6780 6781 6782
	mask |= 1ull;

	/*
	 * If reserved bit is not supported, clear the present bit to disable
	 * mmio page fault.
	 */
6783
	if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52)
6784 6785
		mask &= ~1ull;

6786
	kvm_mmu_set_mmio_spte_mask(mask, mask);
6787 6788
}

6789 6790 6791
#ifdef CONFIG_X86_64
static void pvclock_gtod_update_fn(struct work_struct *work)
{
6792 6793 6794 6795 6796
	struct kvm *kvm;

	struct kvm_vcpu *vcpu;
	int i;

6797
	spin_lock(&kvm_lock);
6798 6799
	list_for_each_entry(kvm, &vm_list, vm_list)
		kvm_for_each_vcpu(i, vcpu, kvm)
6800
			kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
6801
	atomic_set(&kvm_guest_has_master_clock, 0);
6802
	spin_unlock(&kvm_lock);
6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818
}

static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);

/*
 * Notification about pvclock gtod data update.
 */
static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
			       void *priv)
{
	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
	struct timekeeper *tk = priv;

	update_pvclock_gtod(tk);

	/* disable master clock if host does not trust, or does not
6819
	 * use, TSC based clocksource.
6820
	 */
6821
	if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832
	    atomic_read(&kvm_guest_has_master_clock) != 0)
		queue_work(system_long_wq, &pvclock_gtod_work);

	return 0;
}

static struct notifier_block pvclock_gtod_notifier = {
	.notifier_call = pvclock_gtod_notify,
};
#endif

6833
int kvm_arch_init(void *opaque)
6834
{
6835
	int r;
M
Mathias Krause 已提交
6836
	struct kvm_x86_ops *ops = opaque;
6837 6838 6839

	if (kvm_x86_ops) {
		printk(KERN_ERR "kvm: already loaded the other module\n");
6840 6841
		r = -EEXIST;
		goto out;
6842 6843 6844 6845
	}

	if (!ops->cpu_has_kvm_support()) {
		printk(KERN_ERR "kvm: no hardware support\n");
6846 6847
		r = -EOPNOTSUPP;
		goto out;
6848 6849 6850
	}
	if (ops->disabled_by_bios()) {
		printk(KERN_ERR "kvm: disabled by bios\n");
6851 6852
		r = -EOPNOTSUPP;
		goto out;
6853 6854
	}

6855 6856 6857 6858 6859 6860 6861
	r = -ENOMEM;
	shared_msrs = alloc_percpu(struct kvm_shared_msrs);
	if (!shared_msrs) {
		printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
		goto out;
	}

6862 6863
	r = kvm_mmu_module_init();
	if (r)
6864
		goto out_free_percpu;
6865

6866
	kvm_set_mmio_spte_mask();
6867

6868
	kvm_x86_ops = ops;
P
Paolo Bonzini 已提交
6869

S
Sheng Yang 已提交
6870
	kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
6871
			PT_DIRTY_MASK, PT64_NX_MASK, 0,
6872
			PT_PRESENT_MASK, 0, sme_me_mask);
6873
	kvm_timer_init();
6874

6875 6876
	perf_register_guest_info_callbacks(&kvm_guest_cbs);

6877
	if (boot_cpu_has(X86_FEATURE_XSAVE))
6878 6879
		host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);

6880
	kvm_lapic_init();
6881 6882
#ifdef CONFIG_X86_64
	pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
6883

6884
	if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
6885
		set_hv_tscchange_cb(kvm_hyperv_tsc_notifier);
6886 6887
#endif

6888
	return 0;
6889

6890 6891
out_free_percpu:
	free_percpu(shared_msrs);
6892 6893
out:
	return r;
6894
}
6895

6896 6897
void kvm_arch_exit(void)
{
6898
#ifdef CONFIG_X86_64
6899
	if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
6900 6901
		clear_hv_tscchange_cb();
#endif
6902
	kvm_lapic_exit();
6903 6904
	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);

6905 6906 6907
	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
		cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
					    CPUFREQ_TRANSITION_NOTIFIER);
6908
	cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
6909 6910 6911
#ifdef CONFIG_X86_64
	pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
#endif
6912
	kvm_x86_ops = NULL;
6913
	kvm_mmu_module_exit();
6914
	free_percpu(shared_msrs);
6915
}
6916

6917
int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
6918 6919
{
	++vcpu->stat.halt_exits;
6920
	if (lapic_in_kernel(vcpu)) {
6921
		vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
6922 6923 6924 6925 6926 6927
		return 1;
	} else {
		vcpu->run->exit_reason = KVM_EXIT_HLT;
		return 0;
	}
}
6928 6929 6930 6931
EXPORT_SYMBOL_GPL(kvm_vcpu_halt);

int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
6932 6933 6934 6935 6936 6937
	int ret = kvm_skip_emulated_instruction(vcpu);
	/*
	 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
	 * KVM_EXIT_DEBUG here.
	 */
	return kvm_vcpu_halt(vcpu) && ret;
6938
}
6939 6940
EXPORT_SYMBOL_GPL(kvm_emulate_halt);

6941
#ifdef CONFIG_X86_64
6942 6943 6944 6945
static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
			        unsigned long clock_type)
{
	struct kvm_clock_pairing clock_pairing;
6946
	struct timespec64 ts;
P
Paolo Bonzini 已提交
6947
	u64 cycle;
6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959
	int ret;

	if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK)
		return -KVM_EOPNOTSUPP;

	if (kvm_get_walltime_and_clockread(&ts, &cycle) == false)
		return -KVM_EOPNOTSUPP;

	clock_pairing.sec = ts.tv_sec;
	clock_pairing.nsec = ts.tv_nsec;
	clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
	clock_pairing.flags = 0;
6960
	memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad));
6961 6962 6963 6964 6965 6966 6967 6968

	ret = 0;
	if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
			    sizeof(struct kvm_clock_pairing)))
		ret = -KVM_EFAULT;

	return ret;
}
6969
#endif
6970

6971 6972 6973 6974 6975 6976 6977
/*
 * kvm_pv_kick_cpu_op:  Kick a vcpu.
 *
 * @apicid - apicid of vcpu to be kicked.
 */
static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
{
6978
	struct kvm_lapic_irq lapic_irq;
6979

6980 6981
	lapic_irq.shorthand = 0;
	lapic_irq.dest_mode = 0;
6982
	lapic_irq.level = 0;
6983
	lapic_irq.dest_id = apicid;
6984
	lapic_irq.msi_redir_hint = false;
6985

6986
	lapic_irq.delivery_mode = APIC_DM_REMRD;
6987
	kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
6988 6989
}

6990 6991 6992 6993 6994 6995
void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
{
	vcpu->arch.apicv_active = false;
	kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
}

6996 6997 6998
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
	unsigned long nr, a0, a1, a2, a3, ret;
6999
	int op_64_bit;
7000

7001 7002
	if (kvm_hv_hypercall_enabled(vcpu->kvm))
		return kvm_hv_hypercall(vcpu);
7003

7004 7005 7006 7007 7008
	nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
	a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
	a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
	a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
	a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
7009

7010
	trace_kvm_hypercall(nr, a0, a1, a2, a3);
F
Feng (Eric) Liu 已提交
7011

7012 7013
	op_64_bit = is_64_bit_mode(vcpu);
	if (!op_64_bit) {
7014 7015 7016 7017 7018 7019 7020
		nr &= 0xFFFFFFFF;
		a0 &= 0xFFFFFFFF;
		a1 &= 0xFFFFFFFF;
		a2 &= 0xFFFFFFFF;
		a3 &= 0xFFFFFFFF;
	}

7021 7022
	if (kvm_x86_ops->get_cpl(vcpu) != 0) {
		ret = -KVM_EPERM;
7023
		goto out;
7024 7025
	}

7026
	switch (nr) {
A
Avi Kivity 已提交
7027 7028 7029
	case KVM_HC_VAPIC_POLL_IRQ:
		ret = 0;
		break;
7030 7031 7032 7033
	case KVM_HC_KICK_CPU:
		kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
		ret = 0;
		break;
7034
#ifdef CONFIG_X86_64
7035 7036 7037
	case KVM_HC_CLOCK_PAIRING:
		ret = kvm_pv_clock_pairing(vcpu, a0, a1);
		break;
7038 7039 7040
	case KVM_HC_SEND_IPI:
		ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
		break;
7041
#endif
7042 7043 7044 7045
	default:
		ret = -KVM_ENOSYS;
		break;
	}
7046
out:
7047 7048
	if (!op_64_bit)
		ret = (u32)ret;
7049
	kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
7050

A
Amit Shah 已提交
7051
	++vcpu->stat.hypercalls;
7052
	return kvm_skip_emulated_instruction(vcpu);
7053 7054 7055
}
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);

7056
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
7057
{
7058
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7059
	char instruction[3];
7060
	unsigned long rip = kvm_rip_read(vcpu);
7061 7062 7063

	kvm_x86_ops->patch_hypercall(vcpu, instruction);

7064 7065
	return emulator_write_emulated(ctxt, rip, instruction, 3,
		&ctxt->exception);
7066 7067
}

A
Avi Kivity 已提交
7068
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
7069
{
7070 7071
	return vcpu->run->request_interrupt_window &&
		likely(!pic_in_kernel(vcpu->kvm));
7072 7073
}

A
Avi Kivity 已提交
7074
static void post_kvm_run_save(struct kvm_vcpu *vcpu)
7075
{
A
Avi Kivity 已提交
7076 7077
	struct kvm_run *kvm_run = vcpu->run;

7078
	kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
7079
	kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
7080
	kvm_run->cr8 = kvm_get_cr8(vcpu);
7081
	kvm_run->apic_base = kvm_get_apic_base(vcpu);
7082 7083
	kvm_run->ready_for_interrupt_injection =
		pic_in_kernel(vcpu->kvm) ||
7084
		kvm_vcpu_ready_for_interrupt_injection(vcpu);
7085 7086
}

7087 7088 7089 7090 7091 7092 7093
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
{
	int max_irr, tpr;

	if (!kvm_x86_ops->update_cr8_intercept)
		return;

7094
	if (!lapic_in_kernel(vcpu))
7095 7096
		return;

7097 7098 7099
	if (vcpu->arch.apicv_active)
		return;

7100 7101 7102 7103
	if (!vcpu->arch.apic->vapic_addr)
		max_irr = kvm_lapic_find_highest_irr(vcpu);
	else
		max_irr = -1;
7104 7105 7106 7107 7108 7109 7110 7111 7112

	if (max_irr != -1)
		max_irr >>= 4;

	tpr = kvm_lapic_get_cr8(vcpu);

	kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
}

7113
static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
7114
{
7115 7116
	int r;

7117
	/* try to reinject previous events if any */
7118

7119 7120
	if (vcpu->arch.exception.injected)
		kvm_x86_ops->queue_exception(vcpu);
7121
	/*
7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133
	 * Do not inject an NMI or interrupt if there is a pending
	 * exception.  Exceptions and interrupts are recognized at
	 * instruction boundaries, i.e. the start of an instruction.
	 * Trap-like exceptions, e.g. #DB, have higher priority than
	 * NMIs and interrupts, i.e. traps are recognized before an
	 * NMI/interrupt that's pending on the same instruction.
	 * Fault-like exceptions, e.g. #GP and #PF, are the lowest
	 * priority, but are only generated (pended) during instruction
	 * execution, i.e. a pending fault-like exception means the
	 * fault occurred on the *previous* instruction and must be
	 * serviced prior to recognizing any new events in order to
	 * fully complete the previous instruction.
7134
	 */
7135 7136
	else if (!vcpu->arch.exception.pending) {
		if (vcpu->arch.nmi_injected)
7137
			kvm_x86_ops->set_nmi(vcpu);
7138
		else if (vcpu->arch.interrupt.injected)
7139 7140 7141
			kvm_x86_ops->set_irq(vcpu);
	}

7142 7143 7144 7145 7146 7147
	/*
	 * Call check_nested_events() even if we reinjected a previous event
	 * in order for caller to determine if it should require immediate-exit
	 * from L2 to L1 due to pending L1 events which require exit
	 * from L2 to L1.
	 */
7148 7149 7150 7151 7152 7153 7154
	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
		r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
		if (r != 0)
			return r;
	}

	/* try to inject new event if pending */
7155
	if (vcpu->arch.exception.pending) {
A
Avi Kivity 已提交
7156 7157 7158
		trace_kvm_inj_exception(vcpu->arch.exception.nr,
					vcpu->arch.exception.has_error_code,
					vcpu->arch.exception.error_code);
7159

7160
		WARN_ON_ONCE(vcpu->arch.exception.injected);
7161 7162 7163
		vcpu->arch.exception.pending = false;
		vcpu->arch.exception.injected = true;

7164 7165 7166 7167
		if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
			__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
					     X86_EFLAGS_RF);

7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183
		if (vcpu->arch.exception.nr == DB_VECTOR) {
			/*
			 * This code assumes that nSVM doesn't use
			 * check_nested_events(). If it does, the
			 * DR6/DR7 changes should happen before L1
			 * gets a #VMEXIT for an intercepted #DB in
			 * L2.  (Under VMX, on the other hand, the
			 * DR6/DR7 changes should not happen in the
			 * event of a VM-exit to L1 for an intercepted
			 * #DB in L2.)
			 */
			kvm_deliver_exception_payload(vcpu);
			if (vcpu->arch.dr7 & DR7_GD) {
				vcpu->arch.dr7 &= ~DR7_GD;
				kvm_update_dr7(vcpu);
			}
7184 7185
		}

7186
		kvm_x86_ops->queue_exception(vcpu);
7187 7188 7189 7190 7191 7192 7193 7194
	}

	/* Don't consider new event if we re-injected an event */
	if (kvm_event_needs_reinjection(vcpu))
		return 0;

	if (vcpu->arch.smi_pending && !is_smm(vcpu) &&
	    kvm_x86_ops->smi_allowed(vcpu)) {
7195
		vcpu->arch.smi_pending = false;
7196
		++vcpu->arch.smi_count;
7197
		enter_smm(vcpu);
7198
	} else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
7199 7200 7201
		--vcpu->arch.nmi_pending;
		vcpu->arch.nmi_injected = true;
		kvm_x86_ops->set_nmi(vcpu);
7202
	} else if (kvm_cpu_has_injectable_intr(vcpu)) {
7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214
		/*
		 * Because interrupts can be injected asynchronously, we are
		 * calling check_nested_events again here to avoid a race condition.
		 * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
		 * proposal and current concerns.  Perhaps we should be setting
		 * KVM_REQ_EVENT only on certain events and not unconditionally?
		 */
		if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
			r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
			if (r != 0)
				return r;
		}
7215
		if (kvm_x86_ops->interrupt_allowed(vcpu)) {
7216 7217 7218
			kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
					    false);
			kvm_x86_ops->set_irq(vcpu);
7219 7220
		}
	}
7221

7222
	return 0;
7223 7224
}

A
Avi Kivity 已提交
7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241
static void process_nmi(struct kvm_vcpu *vcpu)
{
	unsigned limit = 2;

	/*
	 * x86 is limited to one NMI running, and one NMI pending after it.
	 * If an NMI is already in progress, limit further NMIs to just one.
	 * Otherwise, allow two (and we'll inject the first one immediately).
	 */
	if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
		limit = 1;

	vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
	vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
	kvm_make_request(KVM_REQ_EVENT, vcpu);
}

7242
static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255
{
	u32 flags = 0;
	flags |= seg->g       << 23;
	flags |= seg->db      << 22;
	flags |= seg->l       << 21;
	flags |= seg->avl     << 20;
	flags |= seg->present << 15;
	flags |= seg->dpl     << 13;
	flags |= seg->s       << 12;
	flags |= seg->type    << 8;
	return flags;
}

7256
static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270
{
	struct kvm_segment seg;
	int offset;

	kvm_get_segment(vcpu, &seg, n);
	put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector);

	if (n < 3)
		offset = 0x7f84 + n * 12;
	else
		offset = 0x7f2c + (n - 3) * 12;

	put_smstate(u32, buf, offset + 8, seg.base);
	put_smstate(u32, buf, offset + 4, seg.limit);
7271
	put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg));
7272 7273
}

7274
#ifdef CONFIG_X86_64
7275
static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
7276 7277 7278 7279 7280 7281 7282 7283
{
	struct kvm_segment seg;
	int offset;
	u16 flags;

	kvm_get_segment(vcpu, &seg, n);
	offset = 0x7e00 + n * 16;

7284
	flags = enter_smm_get_segment_flags(&seg) >> 8;
7285 7286 7287 7288 7289
	put_smstate(u16, buf, offset, seg.selector);
	put_smstate(u16, buf, offset + 2, flags);
	put_smstate(u32, buf, offset + 4, seg.limit);
	put_smstate(u64, buf, offset + 8, seg.base);
}
7290
#endif
7291

7292
static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315
{
	struct desc_ptr dt;
	struct kvm_segment seg;
	unsigned long val;
	int i;

	put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
	put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
	put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
	put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));

	for (i = 0; i < 8; i++)
		put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i));

	kvm_get_dr(vcpu, 6, &val);
	put_smstate(u32, buf, 0x7fcc, (u32)val);
	kvm_get_dr(vcpu, 7, &val);
	put_smstate(u32, buf, 0x7fc8, (u32)val);

	kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
	put_smstate(u32, buf, 0x7fc4, seg.selector);
	put_smstate(u32, buf, 0x7f64, seg.base);
	put_smstate(u32, buf, 0x7f60, seg.limit);
7316
	put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg));
7317 7318 7319 7320 7321

	kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
	put_smstate(u32, buf, 0x7fc0, seg.selector);
	put_smstate(u32, buf, 0x7f80, seg.base);
	put_smstate(u32, buf, 0x7f7c, seg.limit);
7322
	put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
7323 7324 7325 7326 7327 7328 7329 7330 7331 7332

	kvm_x86_ops->get_gdt(vcpu, &dt);
	put_smstate(u32, buf, 0x7f74, dt.address);
	put_smstate(u32, buf, 0x7f70, dt.size);

	kvm_x86_ops->get_idt(vcpu, &dt);
	put_smstate(u32, buf, 0x7f58, dt.address);
	put_smstate(u32, buf, 0x7f54, dt.size);

	for (i = 0; i < 6; i++)
7333
		enter_smm_save_seg_32(vcpu, buf, i);
7334 7335 7336 7337 7338 7339 7340 7341

	put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu));

	/* revision id */
	put_smstate(u32, buf, 0x7efc, 0x00020000);
	put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
}

7342
static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373
{
#ifdef CONFIG_X86_64
	struct desc_ptr dt;
	struct kvm_segment seg;
	unsigned long val;
	int i;

	for (i = 0; i < 16; i++)
		put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i));

	put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
	put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));

	kvm_get_dr(vcpu, 6, &val);
	put_smstate(u64, buf, 0x7f68, val);
	kvm_get_dr(vcpu, 7, &val);
	put_smstate(u64, buf, 0x7f60, val);

	put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
	put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
	put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu));

	put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase);

	/* revision id */
	put_smstate(u32, buf, 0x7efc, 0x00020064);

	put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer);

	kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
	put_smstate(u16, buf, 0x7e90, seg.selector);
7374
	put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
7375 7376 7377 7378 7379 7380 7381 7382 7383
	put_smstate(u32, buf, 0x7e94, seg.limit);
	put_smstate(u64, buf, 0x7e98, seg.base);

	kvm_x86_ops->get_idt(vcpu, &dt);
	put_smstate(u32, buf, 0x7e84, dt.size);
	put_smstate(u64, buf, 0x7e88, dt.address);

	kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
	put_smstate(u16, buf, 0x7e70, seg.selector);
7384
	put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
7385 7386 7387 7388 7389 7390 7391 7392
	put_smstate(u32, buf, 0x7e74, seg.limit);
	put_smstate(u64, buf, 0x7e78, seg.base);

	kvm_x86_ops->get_gdt(vcpu, &dt);
	put_smstate(u32, buf, 0x7e64, dt.size);
	put_smstate(u64, buf, 0x7e68, dt.address);

	for (i = 0; i < 6; i++)
7393
		enter_smm_save_seg_64(vcpu, buf, i);
7394 7395 7396 7397 7398
#else
	WARN_ON_ONCE(1);
#endif
}

7399
static void enter_smm(struct kvm_vcpu *vcpu)
P
Paolo Bonzini 已提交
7400
{
7401
	struct kvm_segment cs, ds;
7402
	struct desc_ptr dt;
7403 7404 7405 7406 7407
	char buf[512];
	u32 cr0;

	trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
	memset(buf, 0, 512);
7408
	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
7409
		enter_smm_save_state_64(vcpu, buf);
7410
	else
7411
		enter_smm_save_state_32(vcpu, buf);
7412

7413 7414 7415 7416 7417 7418 7419 7420
	/*
	 * Give pre_enter_smm() a chance to make ISA-specific changes to the
	 * vCPU state (e.g. leave guest mode) after we've saved the state into
	 * the SMM state-save area.
	 */
	kvm_x86_ops->pre_enter_smm(vcpu, buf);

	vcpu->arch.hflags |= HF_SMM_MASK;
7421
	kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436

	if (kvm_x86_ops->get_nmi_mask(vcpu))
		vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
	else
		kvm_x86_ops->set_nmi_mask(vcpu, true);

	kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
	kvm_rip_write(vcpu, 0x8000);

	cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
	kvm_x86_ops->set_cr0(vcpu, cr0);
	vcpu->arch.cr0 = cr0;

	kvm_x86_ops->set_cr4(vcpu, 0);

7437 7438 7439 7440
	/* Undocumented: IDT limit is set to zero on entry to SMM.  */
	dt.address = dt.size = 0;
	kvm_x86_ops->set_idt(vcpu, &dt);

7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467
	__kvm_set_dr(vcpu, 7, DR7_FIXED_1);

	cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
	cs.base = vcpu->arch.smbase;

	ds.selector = 0;
	ds.base = 0;

	cs.limit    = ds.limit = 0xffffffff;
	cs.type     = ds.type = 0x3;
	cs.dpl      = ds.dpl = 0;
	cs.db       = ds.db = 0;
	cs.s        = ds.s = 1;
	cs.l        = ds.l = 0;
	cs.g        = ds.g = 1;
	cs.avl      = ds.avl = 0;
	cs.present  = ds.present = 1;
	cs.unusable = ds.unusable = 0;
	cs.padding  = ds.padding = 0;

	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_DS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_ES);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_FS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);

7468
	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
7469 7470 7471 7472
		kvm_x86_ops->set_efer(vcpu, 0);

	kvm_update_cpuid(vcpu);
	kvm_mmu_reset_context(vcpu);
P
Paolo Bonzini 已提交
7473 7474
}

7475
static void process_smi(struct kvm_vcpu *vcpu)
7476 7477 7478 7479 7480
{
	vcpu->arch.smi_pending = true;
	kvm_make_request(KVM_REQ_EVENT, vcpu);
}

7481 7482 7483 7484 7485
void kvm_make_scan_ioapic_request(struct kvm *kvm)
{
	kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
}

7486
static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
7487
{
7488 7489
	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
		return;
7490

7491
	bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
7492

7493
	if (irqchip_split(vcpu->kvm))
7494
		kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
7495
	else {
7496
		if (vcpu->arch.apicv_active)
7497
			kvm_x86_ops->sync_pir_to_irr(vcpu);
7498 7499
		if (ioapic_in_kernel(vcpu->kvm))
			kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
7500
	}
7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514

	if (is_guest_mode(vcpu))
		vcpu->arch.load_eoi_exitmap_pending = true;
	else
		kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
}

static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
{
	u64 eoi_exit_bitmap[4];

	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
		return;

7515 7516 7517
	bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
		  vcpu_to_synic(vcpu)->vec_bitmap, 256);
	kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
7518 7519
}

7520 7521 7522
int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
		unsigned long start, unsigned long end,
		bool blockable)
7523 7524 7525 7526 7527 7528 7529 7530 7531 7532
{
	unsigned long apic_address;

	/*
	 * The physical address of apic access page is stored in the VMCS.
	 * Update it when it becomes invalid.
	 */
	apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
	if (start <= apic_address && apic_address < end)
		kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
7533 7534

	return 0;
7535 7536
}

7537 7538
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
{
7539 7540
	struct page *page = NULL;

7541
	if (!lapic_in_kernel(vcpu))
7542 7543
		return;

7544 7545 7546
	if (!kvm_x86_ops->set_apic_access_page_addr)
		return;

7547
	page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
7548 7549
	if (is_error_page(page))
		return;
7550 7551 7552 7553 7554 7555 7556
	kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));

	/*
	 * Do not pin apic access page in memory, the MMU notifier
	 * will call us again if it is migrated or swapped out.
	 */
	put_page(page);
7557 7558 7559
}
EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);

7560 7561 7562 7563 7564 7565
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
{
	smp_send_reschedule(vcpu->cpu);
}
EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);

7566
/*
7567
 * Returns 1 to let vcpu_run() continue the guest execution loop without
7568 7569 7570
 * exiting to the userspace.  Otherwise, the value will be returned to the
 * userspace.
 */
A
Avi Kivity 已提交
7571
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
7572 7573
{
	int r;
7574 7575 7576 7577
	bool req_int_win =
		dm_request_for_irq_injection(vcpu) &&
		kvm_cpu_accept_dm_intr(vcpu);

7578
	bool req_immediate_exit = false;
7579

R
Radim Krčmář 已提交
7580
	if (kvm_request_pending(vcpu)) {
7581 7582
		if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu))
			kvm_x86_ops->get_vmcs12_pages(vcpu);
7583
		if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
7584
			kvm_mmu_unload(vcpu);
7585
		if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
M
Marcelo Tosatti 已提交
7586
			__kvm_migrate_timers(vcpu);
7587 7588
		if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
			kvm_gen_update_masterclock(vcpu->kvm);
7589 7590
		if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
			kvm_gen_kvmclock_update(vcpu);
Z
Zachary Amsden 已提交
7591 7592
		if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
			r = kvm_guest_time_update(vcpu);
7593 7594 7595
			if (unlikely(r))
				goto out;
		}
7596
		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
7597
			kvm_mmu_sync_roots(vcpu);
7598 7599
		if (kvm_check_request(KVM_REQ_LOAD_CR3, vcpu))
			kvm_mmu_load_cr3(vcpu);
7600
		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
7601
			kvm_vcpu_flush_tlb(vcpu, true);
7602
		if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
A
Avi Kivity 已提交
7603
			vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
A
Avi Kivity 已提交
7604 7605 7606
			r = 0;
			goto out;
		}
7607
		if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
A
Avi Kivity 已提交
7608
			vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
7609
			vcpu->mmio_needed = 0;
J
Joerg Roedel 已提交
7610 7611 7612
			r = 0;
			goto out;
		}
7613 7614 7615 7616 7617 7618
		if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
			/* Page is swapped out. Do synthetic halt */
			vcpu->arch.apf.halted = true;
			r = 1;
			goto out;
		}
G
Glauber Costa 已提交
7619 7620
		if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
			record_steal_time(vcpu);
P
Paolo Bonzini 已提交
7621 7622
		if (kvm_check_request(KVM_REQ_SMI, vcpu))
			process_smi(vcpu);
A
Avi Kivity 已提交
7623 7624
		if (kvm_check_request(KVM_REQ_NMI, vcpu))
			process_nmi(vcpu);
7625
		if (kvm_check_request(KVM_REQ_PMU, vcpu))
7626
			kvm_pmu_handle_event(vcpu);
7627
		if (kvm_check_request(KVM_REQ_PMI, vcpu))
7628
			kvm_pmu_deliver_pmi(vcpu);
7629 7630 7631
		if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
			BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
			if (test_bit(vcpu->arch.pending_ioapic_eoi,
7632
				     vcpu->arch.ioapic_handled_vectors)) {
7633 7634 7635 7636 7637 7638 7639
				vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI;
				vcpu->run->eoi.vector =
						vcpu->arch.pending_ioapic_eoi;
				r = 0;
				goto out;
			}
		}
7640 7641
		if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
			vcpu_scan_ioapic(vcpu);
7642 7643
		if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu))
			vcpu_load_eoi_exitmap(vcpu);
7644 7645
		if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
			kvm_vcpu_reload_apic_access_page(vcpu);
7646 7647 7648 7649 7650 7651
		if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
			r = 0;
			goto out;
		}
7652 7653 7654 7655 7656 7657
		if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
			r = 0;
			goto out;
		}
A
Andrey Smetanin 已提交
7658 7659 7660 7661 7662 7663
		if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
			vcpu->run->exit_reason = KVM_EXIT_HYPERV;
			vcpu->run->hyperv = vcpu->arch.hyperv.exit;
			r = 0;
			goto out;
		}
7664 7665 7666 7667 7668 7669

		/*
		 * KVM_REQ_HV_STIMER has to be processed after
		 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers
		 * depend on the guest clock being up-to-date
		 */
A
Andrey Smetanin 已提交
7670 7671
		if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
			kvm_hv_process_stimers(vcpu);
7672
	}
A
Avi Kivity 已提交
7673

A
Avi Kivity 已提交
7674
	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
7675
		++vcpu->stat.req_event;
7676 7677 7678 7679 7680 7681
		kvm_apic_accept_events(vcpu);
		if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
			r = 1;
			goto out;
		}

7682 7683
		if (inject_pending_event(vcpu, req_int_win) != 0)
			req_immediate_exit = true;
7684
		else {
7685
			/* Enable SMI/NMI/IRQ window open exits if needed.
7686
			 *
7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697
			 * SMIs have three cases:
			 * 1) They can be nested, and then there is nothing to
			 *    do here because RSM will cause a vmexit anyway.
			 * 2) There is an ISA-specific reason why SMI cannot be
			 *    injected, and the moment when this changes can be
			 *    intercepted.
			 * 3) Or the SMI can be pending because
			 *    inject_pending_event has completed the injection
			 *    of an IRQ or NMI from the previous vmexit, and
			 *    then we request an immediate exit to inject the
			 *    SMI.
7698 7699
			 */
			if (vcpu->arch.smi_pending && !is_smm(vcpu))
7700 7701
				if (!kvm_x86_ops->enable_smi_window(vcpu))
					req_immediate_exit = true;
7702 7703 7704 7705
			if (vcpu->arch.nmi_pending)
				kvm_x86_ops->enable_nmi_window(vcpu);
			if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
				kvm_x86_ops->enable_irq_window(vcpu);
7706
			WARN_ON(vcpu->arch.exception.pending);
7707
		}
A
Avi Kivity 已提交
7708 7709 7710 7711 7712 7713 7714

		if (kvm_lapic_enabled(vcpu)) {
			update_cr8_intercept(vcpu);
			kvm_lapic_sync_to_vapic(vcpu);
		}
	}

7715 7716
	r = kvm_mmu_reload(vcpu);
	if (unlikely(r)) {
7717
		goto cancel_injection;
7718 7719
	}

7720 7721 7722
	preempt_disable();

	kvm_x86_ops->prepare_guest_switch(vcpu);
7723 7724 7725 7726 7727 7728 7729

	/*
	 * Disable IRQs before setting IN_GUEST_MODE.  Posted interrupt
	 * IPI are then delayed after guest entry, which ensures that they
	 * result in virtual interrupt delivery.
	 */
	local_irq_disable();
7730 7731
	vcpu->mode = IN_GUEST_MODE;

7732 7733
	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);

7734
	/*
7735
	 * 1) We should set ->mode before checking ->requests.  Please see
7736
	 * the comment in kvm_vcpu_exiting_guest_mode().
7737 7738 7739 7740 7741 7742 7743 7744
	 *
	 * 2) For APICv, we should set ->mode before checking PIR.ON.  This
	 * pairs with the memory barrier implicit in pi_test_and_set_on
	 * (see vmx_deliver_posted_interrupt).
	 *
	 * 3) This also orders the write to mode from any reads to the page
	 * tables done while the VCPU is running.  Please see the comment
	 * in kvm_flush_remote_tlbs.
7745
	 */
7746
	smp_mb__after_srcu_read_unlock();
7747

7748 7749 7750 7751
	/*
	 * This handles the case where a posted interrupt was
	 * notified with kvm_vcpu_kick.
	 */
7752 7753
	if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
		kvm_x86_ops->sync_pir_to_irr(vcpu);
7754

R
Radim Krčmář 已提交
7755
	if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
A
Avi Kivity 已提交
7756
	    || need_resched() || signal_pending(current)) {
7757
		vcpu->mode = OUTSIDE_GUEST_MODE;
A
Avi Kivity 已提交
7758
		smp_wmb();
7759 7760
		local_irq_enable();
		preempt_enable();
7761
		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
7762
		r = 1;
7763
		goto cancel_injection;
7764 7765
	}

7766 7767
	kvm_load_guest_xcr0(vcpu);

7768 7769
	if (req_immediate_exit) {
		kvm_make_request(KVM_REQ_EVENT, vcpu);
7770
		kvm_x86_ops->request_immediate_exit(vcpu);
7771
	}
7772

7773
	trace_kvm_entry(vcpu->vcpu_id);
7774 7775
	if (lapic_timer_advance_ns)
		wait_lapic_expire(vcpu);
7776
	guest_enter_irqoff();
7777

7778 7779 7780 7781 7782 7783
	if (unlikely(vcpu->arch.switch_db_regs)) {
		set_debugreg(0, 7);
		set_debugreg(vcpu->arch.eff_db[0], 0);
		set_debugreg(vcpu->arch.eff_db[1], 1);
		set_debugreg(vcpu->arch.eff_db[2], 2);
		set_debugreg(vcpu->arch.eff_db[3], 3);
7784
		set_debugreg(vcpu->arch.dr6, 6);
7785
		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
7786
	}
7787

A
Avi Kivity 已提交
7788
	kvm_x86_ops->run(vcpu);
7789

7790 7791 7792 7793 7794 7795 7796 7797 7798
	/*
	 * Do this here before restoring debug registers on the host.  And
	 * since we do this before handling the vmexit, a DR access vmexit
	 * can (a) read the correct value of the debug registers, (b) set
	 * KVM_DEBUGREG_WONT_EXIT again.
	 */
	if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
		WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
		kvm_x86_ops->sync_dirty_debug_regs(vcpu);
7799 7800 7801 7802
		kvm_update_dr0123(vcpu);
		kvm_update_dr6(vcpu);
		kvm_update_dr7(vcpu);
		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
7803 7804
	}

7805 7806 7807 7808 7809 7810 7811
	/*
	 * If the guest has used debug registers, at least dr7
	 * will be disabled while returning to the host.
	 * If we don't have active breakpoints in the host, we don't
	 * care about the messed up debug address registers. But if
	 * we have some of them active, restore the old state.
	 */
7812
	if (hw_breakpoint_active())
7813
		hw_breakpoint_restore();
7814

7815
	vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
7816

7817
	vcpu->mode = OUTSIDE_GUEST_MODE;
A
Avi Kivity 已提交
7818
	smp_wmb();
7819

7820 7821
	kvm_put_guest_xcr0(vcpu);

7822
	kvm_before_interrupt(vcpu);
7823
	kvm_x86_ops->handle_external_intr(vcpu);
7824
	kvm_after_interrupt(vcpu);
7825 7826 7827

	++vcpu->stat.exits;

P
Paolo Bonzini 已提交
7828
	guest_exit_irqoff();
7829

P
Paolo Bonzini 已提交
7830
	local_irq_enable();
7831 7832
	preempt_enable();

7833
	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
7834

7835 7836 7837 7838
	/*
	 * Profile KVM exit RIPs:
	 */
	if (unlikely(prof_on == KVM_PROFILING)) {
7839 7840
		unsigned long rip = kvm_rip_read(vcpu);
		profile_hit(KVM_PROFILING, (void *)rip);
7841 7842
	}

7843 7844
	if (unlikely(vcpu->arch.tsc_always_catchup))
		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
7845

7846 7847
	if (vcpu->arch.apic_attention)
		kvm_lapic_sync_from_vapic(vcpu);
A
Avi Kivity 已提交
7848

7849
	vcpu->arch.gpa_available = false;
A
Avi Kivity 已提交
7850
	r = kvm_x86_ops->handle_exit(vcpu);
7851 7852 7853 7854
	return r;

cancel_injection:
	kvm_x86_ops->cancel_injection(vcpu);
7855 7856
	if (unlikely(vcpu->arch.apic_attention))
		kvm_lapic_sync_from_vapic(vcpu);
7857 7858 7859
out:
	return r;
}
7860

7861 7862
static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
{
7863 7864
	if (!kvm_arch_vcpu_runnable(vcpu) &&
	    (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) {
7865 7866 7867
		srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
		kvm_vcpu_block(vcpu);
		vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
7868 7869 7870 7871

		if (kvm_x86_ops->post_block)
			kvm_x86_ops->post_block(vcpu);

7872 7873 7874
		if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
			return 1;
	}
7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892

	kvm_apic_accept_events(vcpu);
	switch(vcpu->arch.mp_state) {
	case KVM_MP_STATE_HALTED:
		vcpu->arch.pv.pv_unhalted = false;
		vcpu->arch.mp_state =
			KVM_MP_STATE_RUNNABLE;
	case KVM_MP_STATE_RUNNABLE:
		vcpu->arch.apf.halted = false;
		break;
	case KVM_MP_STATE_INIT_RECEIVED:
		break;
	default:
		return -EINTR;
		break;
	}
	return 1;
}
7893

7894 7895
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{
7896 7897 7898
	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
		kvm_x86_ops->check_nested_events(vcpu, false);

7899 7900 7901 7902
	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
		!vcpu->arch.apf.halted);
}

7903
static int vcpu_run(struct kvm_vcpu *vcpu)
7904 7905
{
	int r;
7906
	struct kvm *kvm = vcpu->kvm;
7907

7908
	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
P
Paolo Bonzini 已提交
7909
	vcpu->arch.l1tf_flush_l1d = true;
7910

7911
	for (;;) {
7912
		if (kvm_vcpu_running(vcpu)) {
A
Avi Kivity 已提交
7913
			r = vcpu_enter_guest(vcpu);
7914
		} else {
7915
			r = vcpu_block(kvm, vcpu);
7916 7917
		}

7918 7919 7920
		if (r <= 0)
			break;

7921
		kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu);
7922 7923 7924
		if (kvm_cpu_has_pending_timer(vcpu))
			kvm_inject_pending_timer_irqs(vcpu);

7925 7926
		if (dm_request_for_irq_injection(vcpu) &&
			kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
7927 7928
			r = 0;
			vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
7929
			++vcpu->stat.request_irq_exits;
7930
			break;
7931
		}
7932 7933 7934

		kvm_check_async_pf_completion(vcpu);

7935 7936
		if (signal_pending(current)) {
			r = -EINTR;
A
Avi Kivity 已提交
7937
			vcpu->run->exit_reason = KVM_EXIT_INTR;
7938
			++vcpu->stat.signal_exits;
7939
			break;
7940 7941
		}
		if (need_resched()) {
7942
			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
7943
			cond_resched();
7944
			vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
7945
		}
7946 7947
	}

7948
	srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
7949 7950 7951 7952

	return r;
}

7953 7954 7955 7956
static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
{
	int r;
	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
7957
	r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970
	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
	if (r != EMULATE_DONE)
		return 0;
	return 1;
}

static int complete_emulated_pio(struct kvm_vcpu *vcpu)
{
	BUG_ON(!vcpu->arch.pio.count);

	return complete_emulated_io(vcpu);
}

A
Avi Kivity 已提交
7971 7972 7973 7974 7975
/*
 * Implements the following, as a state machine:
 *
 * read:
 *   for each fragment
7976 7977 7978 7979
 *     for each mmio piece in the fragment
 *       write gpa, len
 *       exit
 *       copy data
A
Avi Kivity 已提交
7980 7981 7982 7983
 *   execute insn
 *
 * write:
 *   for each fragment
7984 7985 7986 7987
 *     for each mmio piece in the fragment
 *       write gpa, len
 *       copy data
 *       exit
A
Avi Kivity 已提交
7988
 */
7989
static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
7990 7991
{
	struct kvm_run *run = vcpu->run;
A
Avi Kivity 已提交
7992
	struct kvm_mmio_fragment *frag;
7993
	unsigned len;
7994

7995
	BUG_ON(!vcpu->mmio_needed);
7996

7997
	/* Complete previous fragment */
7998 7999
	frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
	len = min(8u, frag->len);
8000
	if (!vcpu->mmio_is_write)
8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013
		memcpy(frag->data, run->mmio.data, len);

	if (frag->len <= 8) {
		/* Switch to the next fragment. */
		frag++;
		vcpu->mmio_cur_fragment++;
	} else {
		/* Go forward to the next mmio piece. */
		frag->data += len;
		frag->gpa += len;
		frag->len -= len;
	}

8014
	if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
8015
		vcpu->mmio_needed = 0;
8016 8017

		/* FIXME: return into emulator if single-stepping.  */
A
Avi Kivity 已提交
8018
		if (vcpu->mmio_is_write)
8019 8020 8021 8022
			return 1;
		vcpu->mmio_read_completed = 1;
		return complete_emulated_io(vcpu);
	}
8023

8024 8025 8026
	run->exit_reason = KVM_EXIT_MMIO;
	run->mmio.phys_addr = frag->gpa;
	if (vcpu->mmio_is_write)
8027 8028
		memcpy(run->mmio.data, frag->data, min(8u, frag->len));
	run->mmio.len = min(8u, frag->len);
8029 8030 8031
	run->mmio.is_write = vcpu->mmio_is_write;
	vcpu->arch.complete_userspace_io = complete_emulated_mmio;
	return 0;
8032 8033
}

8034 8035 8036 8037
/* Swap (qemu) user FPU context for the guest FPU context. */
static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{
	preempt_disable();
8038
	copy_fpregs_to_fpstate(&current->thread.fpu);
8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050
	/* PKRU is separately restored in kvm_x86_ops->run.  */
	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
				~XFEATURE_MASK_PKRU);
	preempt_enable();
	trace_kvm_fpu(1);
}

/* When vcpu_run ends, restore user space FPU context. */
static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
	preempt_disable();
	copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
8051
	copy_kernel_to_fpregs(&current->thread.fpu.state);
8052 8053 8054 8055 8056
	preempt_enable();
	++vcpu->stat.fpu_reload;
	trace_kvm_fpu(0);
}

8057 8058 8059 8060
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
	int r;

8061
	vcpu_load(vcpu);
8062
	kvm_sigset_activate(vcpu);
8063 8064
	kvm_load_guest_fpu(vcpu);

8065
	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
8066 8067 8068 8069
		if (kvm_run->immediate_exit) {
			r = -EINTR;
			goto out;
		}
8070
		kvm_vcpu_block(vcpu);
8071
		kvm_apic_accept_events(vcpu);
8072
		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
8073
		r = -EAGAIN;
8074 8075 8076 8077 8078
		if (signal_pending(current)) {
			r = -EINTR;
			vcpu->run->exit_reason = KVM_EXIT_INTR;
			++vcpu->stat.signal_exits;
		}
8079
		goto out;
8080 8081
	}

K
Ken Hofsass 已提交
8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092
	if (vcpu->run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) {
		r = -EINVAL;
		goto out;
	}

	if (vcpu->run->kvm_dirty_regs) {
		r = sync_regs(vcpu);
		if (r != 0)
			goto out;
	}

8093
	/* re-sync apic's tpr */
8094
	if (!lapic_in_kernel(vcpu)) {
A
Andre Przywara 已提交
8095 8096 8097 8098 8099
		if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
			r = -EINVAL;
			goto out;
		}
	}
8100

8101 8102 8103 8104 8105
	if (unlikely(vcpu->arch.complete_userspace_io)) {
		int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
		vcpu->arch.complete_userspace_io = NULL;
		r = cui(vcpu);
		if (r <= 0)
8106
			goto out;
8107 8108
	} else
		WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
8109

8110 8111 8112 8113
	if (kvm_run->immediate_exit)
		r = -EINTR;
	else
		r = vcpu_run(vcpu);
8114 8115

out:
8116
	kvm_put_guest_fpu(vcpu);
K
Ken Hofsass 已提交
8117 8118
	if (vcpu->run->kvm_valid_regs)
		store_regs(vcpu);
8119
	post_kvm_run_save(vcpu);
8120
	kvm_sigset_deactivate(vcpu);
8121

8122
	vcpu_put(vcpu);
8123 8124 8125
	return r;
}

K
Ken Hofsass 已提交
8126
static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
8127
{
8128 8129 8130 8131
	if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
		/*
		 * We are here if userspace calls get_regs() in the middle of
		 * instruction emulation. Registers state needs to be copied
G
Guo Chao 已提交
8132
		 * back from emulation context to vcpu. Userspace shouldn't do
8133 8134 8135
		 * that usually, but some bad designed PV devices (vmware
		 * backdoor interface) need this to work
		 */
8136
		emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
8137 8138
		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
	}
8139 8140 8141 8142 8143 8144 8145 8146
	regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
	regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
	regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
	regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
	regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
	regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
	regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
	regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
8147
#ifdef CONFIG_X86_64
8148 8149 8150 8151 8152 8153 8154 8155
	regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
	regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
	regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
	regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
	regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
	regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
	regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
	regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
8156 8157
#endif

8158
	regs->rip = kvm_rip_read(vcpu);
8159
	regs->rflags = kvm_get_rflags(vcpu);
K
Ken Hofsass 已提交
8160
}
8161

K
Ken Hofsass 已提交
8162 8163 8164 8165
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
	vcpu_load(vcpu);
	__get_regs(vcpu, regs);
8166
	vcpu_put(vcpu);
8167 8168 8169
	return 0;
}

K
Ken Hofsass 已提交
8170
static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
8171
{
8172 8173 8174
	vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
	vcpu->arch.emulate_regs_need_sync_to_vcpu = false;

8175 8176 8177 8178 8179 8180 8181 8182
	kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
	kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
	kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
	kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
	kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
	kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
	kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
	kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
8183
#ifdef CONFIG_X86_64
8184 8185 8186 8187 8188 8189 8190 8191
	kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
	kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
	kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
	kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
	kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
	kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
	kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
	kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
8192 8193
#endif

8194
	kvm_rip_write(vcpu, regs->rip);
8195
	kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
8196

8197 8198
	vcpu->arch.exception.pending = false;

8199
	kvm_make_request(KVM_REQ_EVENT, vcpu);
K
Ken Hofsass 已提交
8200
}
8201

K
Ken Hofsass 已提交
8202 8203 8204 8205
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
	vcpu_load(vcpu);
	__set_regs(vcpu, regs);
8206
	vcpu_put(vcpu);
8207 8208 8209 8210 8211 8212 8213
	return 0;
}

void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
{
	struct kvm_segment cs;

8214
	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
8215 8216 8217 8218 8219
	*db = cs.db;
	*l = cs.l;
}
EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);

K
Ken Hofsass 已提交
8220
static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
8221
{
8222
	struct desc_ptr dt;
8223

8224 8225 8226 8227 8228 8229
	kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
	kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
	kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
	kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
	kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
	kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
8230

8231 8232
	kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
	kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
8233 8234

	kvm_x86_ops->get_idt(vcpu, &dt);
8235 8236
	sregs->idt.limit = dt.size;
	sregs->idt.base = dt.address;
8237
	kvm_x86_ops->get_gdt(vcpu, &dt);
8238 8239
	sregs->gdt.limit = dt.size;
	sregs->gdt.base = dt.address;
8240

8241
	sregs->cr0 = kvm_read_cr0(vcpu);
8242
	sregs->cr2 = vcpu->arch.cr2;
8243
	sregs->cr3 = kvm_read_cr3(vcpu);
8244
	sregs->cr4 = kvm_read_cr4(vcpu);
8245
	sregs->cr8 = kvm_get_cr8(vcpu);
8246
	sregs->efer = vcpu->arch.efer;
8247 8248
	sregs->apic_base = kvm_get_apic_base(vcpu);

8249
	memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap));
8250

8251
	if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
8252 8253
		set_bit(vcpu->arch.interrupt.nr,
			(unsigned long *)sregs->interrupt_bitmap);
K
Ken Hofsass 已提交
8254
}
8255

K
Ken Hofsass 已提交
8256 8257 8258 8259 8260
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
	vcpu_load(vcpu);
	__get_sregs(vcpu, sregs);
8261
	vcpu_put(vcpu);
8262 8263 8264
	return 0;
}

8265 8266 8267
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
8268 8269
	vcpu_load(vcpu);

8270
	kvm_apic_accept_events(vcpu);
8271 8272 8273 8274 8275 8276
	if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
					vcpu->arch.pv.pv_unhalted)
		mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
	else
		mp_state->mp_state = vcpu->arch.mp_state;

8277
	vcpu_put(vcpu);
8278 8279 8280 8281 8282 8283
	return 0;
}

int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
8284 8285 8286 8287
	int ret = -EINVAL;

	vcpu_load(vcpu);

8288
	if (!lapic_in_kernel(vcpu) &&
8289
	    mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
8290
		goto out;
8291

8292 8293 8294 8295
	/* INITs are latched while in SMM */
	if ((is_smm(vcpu) || vcpu->arch.smi_pending) &&
	    (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
	     mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
8296
		goto out;
8297

8298 8299 8300 8301 8302
	if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
		vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
		set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
	} else
		vcpu->arch.mp_state = mp_state->mp_state;
8303
	kvm_make_request(KVM_REQ_EVENT, vcpu);
8304 8305 8306 8307 8308

	ret = 0;
out:
	vcpu_put(vcpu);
	return ret;
8309 8310
}

8311 8312
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
		    int reason, bool has_error_code, u32 error_code)
8313
{
8314
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
8315
	int ret;
8316

8317
	init_emulate_ctxt(vcpu);
8318

8319
	ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
8320
				   has_error_code, error_code);
8321 8322

	if (ret)
8323
		return EMULATE_FAIL;
8324

8325 8326
	kvm_rip_write(vcpu, ctxt->eip);
	kvm_set_rflags(vcpu, ctxt->eflags);
8327
	kvm_make_request(KVM_REQ_EVENT, vcpu);
8328
	return EMULATE_DONE;
8329 8330 8331
}
EXPORT_SYMBOL_GPL(kvm_task_switch);

P
Peng Hao 已提交
8332
static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
8333
{
8334 8335 8336 8337
	if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
			(sregs->cr4 & X86_CR4_OSXSAVE))
		return  -EINVAL;

8338
	if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
8339 8340 8341 8342 8343
		/*
		 * When EFER.LME and CR0.PG are set, the processor is in
		 * 64-bit mode (though maybe in a 32-bit code segment).
		 * CR4.PAE and EFER.LMA must be set.
		 */
8344
		if (!(sregs->cr4 & X86_CR4_PAE)
8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358
		    || !(sregs->efer & EFER_LMA))
			return -EINVAL;
	} else {
		/*
		 * Not in 64-bit mode: EFER.LMA is clear and the code
		 * segment cannot be 64-bit.
		 */
		if (sregs->efer & EFER_LMA || sregs->cs.l)
			return -EINVAL;
	}

	return 0;
}

K
Ken Hofsass 已提交
8359
static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
8360
{
8361
	struct msr_data apic_base_msr;
8362
	int mmu_reset_needed = 0;
8363
	int cpuid_update_needed = 0;
8364
	int pending_vec, max_bits, idx;
8365
	struct desc_ptr dt;
8366 8367
	int ret = -EINVAL;

8368
	if (kvm_valid_sregs(vcpu, sregs))
8369
		goto out;
8370

8371 8372 8373
	apic_base_msr.data = sregs->apic_base;
	apic_base_msr.host_initiated = true;
	if (kvm_set_apic_base(vcpu, &apic_base_msr))
8374
		goto out;
8375

8376 8377
	dt.size = sregs->idt.limit;
	dt.address = sregs->idt.base;
8378
	kvm_x86_ops->set_idt(vcpu, &dt);
8379 8380
	dt.size = sregs->gdt.limit;
	dt.address = sregs->gdt.base;
8381 8382
	kvm_x86_ops->set_gdt(vcpu, &dt);

8383
	vcpu->arch.cr2 = sregs->cr2;
8384
	mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
8385
	vcpu->arch.cr3 = sregs->cr3;
8386
	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
8387

8388
	kvm_set_cr8(vcpu, sregs->cr8);
8389

8390
	mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
8391 8392
	kvm_x86_ops->set_efer(vcpu, sregs->efer);

8393
	mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
8394
	kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
8395
	vcpu->arch.cr0 = sregs->cr0;
8396

8397
	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
8398 8399
	cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
				(X86_CR4_OSXSAVE | X86_CR4_PKE));
8400
	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
8401
	if (cpuid_update_needed)
A
Avi Kivity 已提交
8402
		kvm_update_cpuid(vcpu);
8403 8404

	idx = srcu_read_lock(&vcpu->kvm->srcu);
8405
	if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu)) {
8406
		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
8407 8408
		mmu_reset_needed = 1;
	}
8409
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
8410 8411 8412 8413

	if (mmu_reset_needed)
		kvm_mmu_reset_context(vcpu);

8414
	max_bits = KVM_NR_INTERRUPTS;
G
Gleb Natapov 已提交
8415 8416 8417
	pending_vec = find_first_bit(
		(const unsigned long *)sregs->interrupt_bitmap, max_bits);
	if (pending_vec < max_bits) {
8418
		kvm_queue_interrupt(vcpu, pending_vec, false);
G
Gleb Natapov 已提交
8419
		pr_debug("Set back pending irq %d\n", pending_vec);
8420 8421
	}

8422 8423 8424 8425 8426 8427
	kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
	kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
	kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
	kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
	kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
	kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
8428

8429 8430
	kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
	kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
8431

8432 8433
	update_cr8_intercept(vcpu);

M
Marcelo Tosatti 已提交
8434
	/* Older userspace won't unhalt the vcpu on reset. */
8435
	if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
M
Marcelo Tosatti 已提交
8436
	    sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
8437
	    !is_protmode(vcpu))
M
Marcelo Tosatti 已提交
8438 8439
		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;

8440 8441
	kvm_make_request(KVM_REQ_EVENT, vcpu);

8442 8443
	ret = 0;
out:
K
Ken Hofsass 已提交
8444 8445 8446 8447 8448 8449 8450 8451 8452 8453
	return ret;
}

int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
	int ret;

	vcpu_load(vcpu);
	ret = __set_sregs(vcpu, sregs);
8454 8455
	vcpu_put(vcpu);
	return ret;
8456 8457
}

J
Jan Kiszka 已提交
8458 8459
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
					struct kvm_guest_debug *dbg)
8460
{
8461
	unsigned long rflags;
8462
	int i, r;
8463

8464 8465
	vcpu_load(vcpu);

8466 8467 8468
	if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
		r = -EBUSY;
		if (vcpu->arch.exception.pending)
8469
			goto out;
8470 8471 8472 8473 8474 8475
		if (dbg->control & KVM_GUESTDBG_INJECT_DB)
			kvm_queue_exception(vcpu, DB_VECTOR);
		else
			kvm_queue_exception(vcpu, BP_VECTOR);
	}

8476 8477 8478 8479 8480
	/*
	 * Read rflags as long as potentially injected trace flags are still
	 * filtered out.
	 */
	rflags = kvm_get_rflags(vcpu);
8481 8482 8483 8484 8485 8486

	vcpu->guest_debug = dbg->control;
	if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
		vcpu->guest_debug = 0;

	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
8487 8488
		for (i = 0; i < KVM_NR_DB_REGS; ++i)
			vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
8489
		vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
8490 8491 8492 8493
	} else {
		for (i = 0; i < KVM_NR_DB_REGS; i++)
			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
	}
8494
	kvm_update_dr7(vcpu);
8495

J
Jan Kiszka 已提交
8496 8497 8498
	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
		vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
			get_segment_base(vcpu, VCPU_SREG_CS);
8499

8500 8501 8502 8503 8504
	/*
	 * Trigger an rflags update that will inject or remove the trace
	 * flags.
	 */
	kvm_set_rflags(vcpu, rflags);
8505

8506
	kvm_x86_ops->update_bp_intercept(vcpu);
8507

8508
	r = 0;
J
Jan Kiszka 已提交
8509

8510
out:
8511
	vcpu_put(vcpu);
8512 8513 8514
	return r;
}

8515 8516 8517 8518 8519 8520 8521 8522
/*
 * Translate a guest virtual address to a guest physical address.
 */
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
				    struct kvm_translation *tr)
{
	unsigned long vaddr = tr->linear_address;
	gpa_t gpa;
8523
	int idx;
8524

8525 8526
	vcpu_load(vcpu);

8527
	idx = srcu_read_lock(&vcpu->kvm->srcu);
8528
	gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
8529
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
8530 8531 8532 8533 8534
	tr->physical_address = gpa;
	tr->valid = gpa != UNMAPPED_GVA;
	tr->writeable = 1;
	tr->usermode = 0;

8535
	vcpu_put(vcpu);
8536 8537 8538
	return 0;
}

8539 8540
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
8541
	struct fxregs_state *fxsave;
8542

8543
	vcpu_load(vcpu);
8544

8545
	fxsave = &vcpu->arch.guest_fpu.state.fxsave;
8546 8547 8548 8549 8550 8551 8552
	memcpy(fpu->fpr, fxsave->st_space, 128);
	fpu->fcw = fxsave->cwd;
	fpu->fsw = fxsave->swd;
	fpu->ftwx = fxsave->twd;
	fpu->last_opcode = fxsave->fop;
	fpu->last_ip = fxsave->rip;
	fpu->last_dp = fxsave->rdp;
8553
	memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space));
8554

8555
	vcpu_put(vcpu);
8556 8557 8558 8559 8560
	return 0;
}

int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
8561 8562 8563 8564 8565
	struct fxregs_state *fxsave;

	vcpu_load(vcpu);

	fxsave = &vcpu->arch.guest_fpu.state.fxsave;
8566 8567 8568 8569 8570 8571 8572 8573

	memcpy(fxsave->st_space, fpu->fpr, 128);
	fxsave->cwd = fpu->fcw;
	fxsave->swd = fpu->fsw;
	fxsave->twd = fpu->ftwx;
	fxsave->fop = fpu->last_opcode;
	fxsave->rip = fpu->last_ip;
	fxsave->rdp = fpu->last_dp;
8574
	memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space));
8575

8576
	vcpu_put(vcpu);
8577 8578 8579
	return 0;
}

K
Ken Hofsass 已提交
8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618
static void store_regs(struct kvm_vcpu *vcpu)
{
	BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES);

	if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS)
		__get_regs(vcpu, &vcpu->run->s.regs.regs);

	if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS)
		__get_sregs(vcpu, &vcpu->run->s.regs.sregs);

	if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS)
		kvm_vcpu_ioctl_x86_get_vcpu_events(
				vcpu, &vcpu->run->s.regs.events);
}

static int sync_regs(struct kvm_vcpu *vcpu)
{
	if (vcpu->run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)
		return -EINVAL;

	if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) {
		__set_regs(vcpu, &vcpu->run->s.regs.regs);
		vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS;
	}
	if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) {
		if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs))
			return -EINVAL;
		vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS;
	}
	if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) {
		if (kvm_vcpu_ioctl_x86_set_vcpu_events(
				vcpu, &vcpu->run->s.regs.events))
			return -EINVAL;
		vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS;
	}

	return 0;
}

I
Ingo Molnar 已提交
8619
static void fx_init(struct kvm_vcpu *vcpu)
8620
{
8621
	fpstate_init(&vcpu->arch.guest_fpu.state);
8622
	if (boot_cpu_has(X86_FEATURE_XSAVES))
8623
		vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv =
8624
			host_xcr0 | XSTATE_COMPACTION_ENABLED;
8625

8626 8627 8628
	/*
	 * Ensure guest xcr0 is valid for loading
	 */
D
Dave Hansen 已提交
8629
	vcpu->arch.xcr0 = XFEATURE_MASK_FP;
8630

8631
	vcpu->arch.cr0 |= X86_CR0_ET;
8632 8633
}

8634 8635
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
8636 8637
	void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;

8638
	kvmclock_reset(vcpu);
8639

8640
	kvm_x86_ops->vcpu_free(vcpu);
8641
	free_cpumask_var(wbinvd_dirty_mask);
8642 8643 8644 8645 8646
}

struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
						unsigned int id)
{
8647 8648
	struct kvm_vcpu *vcpu;

8649
	if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
Z
Zachary Amsden 已提交
8650 8651 8652
		printk_once(KERN_WARNING
		"kvm: SMP vm created on host with unstable TSC; "
		"guest TSC will not be reliable\n");
8653 8654 8655 8656

	vcpu = kvm_x86_ops->vcpu_create(kvm, id);

	return vcpu;
8657
}
8658

8659 8660
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
8661
	vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
X
Xiao Guangrong 已提交
8662
	kvm_vcpu_mtrr_init(vcpu);
8663
	vcpu_load(vcpu);
8664
	kvm_vcpu_reset(vcpu, false);
8665
	kvm_init_mmu(vcpu, false);
8666
	vcpu_put(vcpu);
8667
	return 0;
8668 8669
}

8670
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
8671
{
8672
	struct msr_data msr;
8673
	struct kvm *kvm = vcpu->kvm;
8674

8675 8676
	kvm_hv_vcpu_postcreate(vcpu);

8677
	if (mutex_lock_killable(&vcpu->mutex))
8678
		return;
8679
	vcpu_load(vcpu);
8680 8681 8682 8683
	msr.data = 0x0;
	msr.index = MSR_IA32_TSC;
	msr.host_initiated = true;
	kvm_write_tsc(vcpu, &msr);
8684
	vcpu_put(vcpu);
8685
	mutex_unlock(&vcpu->mutex);
8686

8687 8688 8689
	if (!kvmclock_periodic_sync)
		return;

8690 8691
	schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
					KVMCLOCK_SYNC_PERIOD);
8692 8693
}

8694
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
8695
{
8696 8697
	vcpu->arch.apf.msr_val = 0;

8698
	vcpu_load(vcpu);
8699 8700 8701 8702 8703 8704
	kvm_mmu_unload(vcpu);
	vcpu_put(vcpu);

	kvm_x86_ops->vcpu_free(vcpu);
}

8705
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
8706
{
8707 8708
	kvm_lapic_reset(vcpu, init_event);

8709 8710
	vcpu->arch.hflags = 0;

8711
	vcpu->arch.smi_pending = 0;
8712
	vcpu->arch.smi_count = 0;
A
Avi Kivity 已提交
8713 8714
	atomic_set(&vcpu->arch.nmi_queued, 0);
	vcpu->arch.nmi_pending = 0;
8715
	vcpu->arch.nmi_injected = false;
8716 8717
	kvm_clear_interrupt_queue(vcpu);
	kvm_clear_exception_queue(vcpu);
8718
	vcpu->arch.exception.pending = false;
8719

8720
	memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
8721
	kvm_update_dr0123(vcpu);
8722
	vcpu->arch.dr6 = DR6_INIT;
J
Jan Kiszka 已提交
8723
	kvm_update_dr6(vcpu);
8724
	vcpu->arch.dr7 = DR7_FIXED_1;
8725
	kvm_update_dr7(vcpu);
8726

N
Nadav Amit 已提交
8727 8728
	vcpu->arch.cr2 = 0;

8729
	kvm_make_request(KVM_REQ_EVENT, vcpu);
8730
	vcpu->arch.apf.msr_val = 0;
G
Glauber Costa 已提交
8731
	vcpu->arch.st.msr_val = 0;
8732

8733 8734
	kvmclock_reset(vcpu);

8735 8736 8737
	kvm_clear_async_pf_completion_queue(vcpu);
	kvm_async_pf_hash_reset(vcpu);
	vcpu->arch.apf.halted = false;
8738

8739 8740 8741 8742 8743 8744 8745
	if (kvm_mpx_supported()) {
		void *mpx_state_buffer;

		/*
		 * To avoid have the INIT path from kvm_apic_has_events() that be
		 * called with loaded FPU and does not let userspace fix the state.
		 */
8746 8747
		if (init_event)
			kvm_put_guest_fpu(vcpu);
8748 8749 8750 8751 8752 8753 8754 8755
		mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave,
					XFEATURE_MASK_BNDREGS);
		if (mpx_state_buffer)
			memset(mpx_state_buffer, 0, sizeof(struct mpx_bndreg_state));
		mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave,
					XFEATURE_MASK_BNDCSR);
		if (mpx_state_buffer)
			memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr));
8756 8757
		if (init_event)
			kvm_load_guest_fpu(vcpu);
8758 8759
	}

P
Paolo Bonzini 已提交
8760
	if (!init_event) {
8761
		kvm_pmu_reset(vcpu);
P
Paolo Bonzini 已提交
8762
		vcpu->arch.smbase = 0x30000;
K
Kyle Huey 已提交
8763 8764

		vcpu->arch.msr_misc_features_enables = 0;
8765 8766

		vcpu->arch.xcr0 = XFEATURE_MASK_FP;
P
Paolo Bonzini 已提交
8767
	}
8768

8769 8770 8771 8772
	memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
	vcpu->arch.regs_avail = ~0;
	vcpu->arch.regs_dirty = ~0;

8773 8774
	vcpu->arch.ia32_xss = 0;

8775
	kvm_x86_ops->vcpu_reset(vcpu, init_event);
8776 8777
}

8778
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
8779 8780 8781 8782 8783 8784 8785 8786
{
	struct kvm_segment cs;

	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
	cs.selector = vector << 8;
	cs.base = vector << 12;
	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
	kvm_rip_write(vcpu, 0);
8787 8788
}

8789
int kvm_arch_hardware_enable(void)
8790
{
8791 8792 8793
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int i;
8794 8795 8796 8797
	int ret;
	u64 local_tsc;
	u64 max_tsc = 0;
	bool stable, backwards_tsc = false;
A
Avi Kivity 已提交
8798 8799

	kvm_shared_msr_cpu_online();
8800
	ret = kvm_x86_ops->hardware_enable();
8801 8802 8803
	if (ret != 0)
		return ret;

8804
	local_tsc = rdtsc();
8805
	stable = !kvm_check_tsc_unstable();
8806 8807 8808
	list_for_each_entry(kvm, &vm_list, vm_list) {
		kvm_for_each_vcpu(i, vcpu, kvm) {
			if (!stable && vcpu->cpu == smp_processor_id())
8809
				kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
8810 8811 8812 8813 8814 8815 8816 8817 8818 8819 8820 8821 8822 8823 8824 8825
			if (stable && vcpu->arch.last_host_tsc > local_tsc) {
				backwards_tsc = true;
				if (vcpu->arch.last_host_tsc > max_tsc)
					max_tsc = vcpu->arch.last_host_tsc;
			}
		}
	}

	/*
	 * Sometimes, even reliable TSCs go backwards.  This happens on
	 * platforms that reset TSC during suspend or hibernate actions, but
	 * maintain synchronization.  We must compensate.  Fortunately, we can
	 * detect that condition here, which happens early in CPU bringup,
	 * before any KVM threads can be running.  Unfortunately, we can't
	 * bring the TSCs fully up to date with real time, as we aren't yet far
	 * enough into CPU bringup that we know how much real time has actually
8826
	 * elapsed; our helper function, ktime_get_boot_ns() will be using boot
8827 8828 8829 8830 8831 8832 8833 8834 8835 8836 8837 8838 8839 8840 8841 8842 8843 8844 8845 8846 8847 8848 8849 8850
	 * variables that haven't been updated yet.
	 *
	 * So we simply find the maximum observed TSC above, then record the
	 * adjustment to TSC in each VCPU.  When the VCPU later gets loaded,
	 * the adjustment will be applied.  Note that we accumulate
	 * adjustments, in case multiple suspend cycles happen before some VCPU
	 * gets a chance to run again.  In the event that no KVM threads get a
	 * chance to run, we will miss the entire elapsed period, as we'll have
	 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
	 * loose cycle time.  This isn't too big a deal, since the loss will be
	 * uniform across all VCPUs (not to mention the scenario is extremely
	 * unlikely). It is possible that a second hibernate recovery happens
	 * much faster than a first, causing the observed TSC here to be
	 * smaller; this would require additional padding adjustment, which is
	 * why we set last_host_tsc to the local tsc observed here.
	 *
	 * N.B. - this code below runs only on platforms with reliable TSC,
	 * as that is the only way backwards_tsc is set above.  Also note
	 * that this runs for ALL vcpus, which is not a bug; all VCPUs should
	 * have the same delta_cyc adjustment applied if backwards_tsc
	 * is detected.  Note further, this adjustment is only done once,
	 * as we reset last_host_tsc on all VCPUs to stop this from being
	 * called multiple times (one for each physical CPU bringup).
	 *
G
Guo Chao 已提交
8851
	 * Platforms with unreliable TSCs don't have to deal with this, they
8852 8853 8854 8855 8856 8857 8858
	 * will be compensated by the logic in vcpu_load, which sets the TSC to
	 * catchup mode.  This will catchup all VCPUs to real time, but cannot
	 * guarantee that they stay in perfect synchronization.
	 */
	if (backwards_tsc) {
		u64 delta_cyc = max_tsc - local_tsc;
		list_for_each_entry(kvm, &vm_list, vm_list) {
8859
			kvm->arch.backwards_tsc_observed = true;
8860 8861 8862
			kvm_for_each_vcpu(i, vcpu, kvm) {
				vcpu->arch.tsc_offset_adjustment += delta_cyc;
				vcpu->arch.last_host_tsc = local_tsc;
8863
				kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
8864 8865 8866 8867 8868 8869 8870 8871 8872 8873 8874 8875 8876 8877
			}

			/*
			 * We have to disable TSC offset matching.. if you were
			 * booting a VM while issuing an S4 host suspend....
			 * you may have some problem.  Solving this issue is
			 * left as an exercise to the reader.
			 */
			kvm->arch.last_tsc_nsec = 0;
			kvm->arch.last_tsc_write = 0;
		}

	}
	return 0;
8878 8879
}

8880
void kvm_arch_hardware_disable(void)
8881
{
8882 8883
	kvm_x86_ops->hardware_disable();
	drop_user_return_notifiers();
8884 8885 8886 8887
}

int kvm_arch_hardware_setup(void)
{
8888 8889 8890 8891 8892 8893
	int r;

	r = kvm_x86_ops->hardware_setup();
	if (r != 0)
		return r;

8894 8895 8896 8897
	if (kvm_has_tsc_control) {
		/*
		 * Make sure the user can only configure tsc_khz values that
		 * fit into a signed integer.
8898
		 * A min value is not calculated because it will always
8899 8900 8901 8902 8903 8904
		 * be 1 on all machines.
		 */
		u64 max = min(0x7fffffffULL,
			      __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz));
		kvm_max_guest_tsc_khz = max;

8905
		kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
8906
	}
8907

8908 8909
	kvm_init_msr_list();
	return 0;
8910 8911 8912 8913 8914 8915 8916 8917 8918 8919
}

void kvm_arch_hardware_unsetup(void)
{
	kvm_x86_ops->hardware_unsetup();
}

void kvm_arch_check_processor_compat(void *rtn)
{
	kvm_x86_ops->check_processor_compatibility(rtn);
8920 8921 8922 8923 8924 8925 8926 8927 8928 8929 8930
}

bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
{
	return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp);

bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
{
	return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
8931 8932
}

8933
struct static_key kvm_no_apic_vcpu __read_mostly;
8934
EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
8935

8936 8937 8938 8939 8940
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
	struct page *page;
	int r;

8941
	vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu);
8942
	vcpu->arch.emulate_ctxt.ops = &emulate_ops;
8943
	if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
8944
		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
8945
	else
8946
		vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
8947 8948 8949 8950 8951 8952

	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page) {
		r = -ENOMEM;
		goto fail;
	}
8953
	vcpu->arch.pio_data = page_address(page);
8954

8955
	kvm_set_tsc_khz(vcpu, max_tsc_khz);
Z
Zachary Amsden 已提交
8956

8957 8958 8959 8960
	r = kvm_mmu_create(vcpu);
	if (r < 0)
		goto fail_free_pio_data;

8961
	if (irqchip_in_kernel(vcpu->kvm)) {
8962 8963 8964
		r = kvm_create_lapic(vcpu);
		if (r < 0)
			goto fail_mmu_destroy;
8965 8966
	} else
		static_key_slow_inc(&kvm_no_apic_vcpu);
8967

H
Huang Ying 已提交
8968 8969 8970 8971
	vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
				       GFP_KERNEL);
	if (!vcpu->arch.mce_banks) {
		r = -ENOMEM;
8972
		goto fail_free_lapic;
H
Huang Ying 已提交
8973 8974 8975
	}
	vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;

8976 8977
	if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) {
		r = -ENOMEM;
8978
		goto fail_free_mce_banks;
8979
	}
8980

I
Ingo Molnar 已提交
8981
	fx_init(vcpu);
8982

8983
	vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
8984

8985 8986
	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);

8987 8988
	vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;

8989
	kvm_async_pf_hash_reset(vcpu);
8990
	kvm_pmu_init(vcpu);
8991

8992
	vcpu->arch.pending_external_vector = -1;
8993
	vcpu->arch.preempted_in_kernel = false;
8994

8995 8996
	kvm_hv_vcpu_init(vcpu);

8997
	return 0;
I
Ingo Molnar 已提交
8998

8999 9000
fail_free_mce_banks:
	kfree(vcpu->arch.mce_banks);
9001 9002
fail_free_lapic:
	kvm_free_lapic(vcpu);
9003 9004 9005
fail_mmu_destroy:
	kvm_mmu_destroy(vcpu);
fail_free_pio_data:
9006
	free_page((unsigned long)vcpu->arch.pio_data);
9007 9008 9009 9010 9011 9012
fail:
	return r;
}

void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
9013 9014
	int idx;

A
Andrey Smetanin 已提交
9015
	kvm_hv_vcpu_uninit(vcpu);
9016
	kvm_pmu_destroy(vcpu);
9017
	kfree(vcpu->arch.mce_banks);
9018
	kvm_free_lapic(vcpu);
9019
	idx = srcu_read_lock(&vcpu->kvm->srcu);
9020
	kvm_mmu_destroy(vcpu);
9021
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
9022
	free_page((unsigned long)vcpu->arch.pio_data);
9023
	if (!lapic_in_kernel(vcpu))
9024
		static_key_slow_dec(&kvm_no_apic_vcpu);
9025
}
9026

R
Radim Krčmář 已提交
9027 9028
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
P
Paolo Bonzini 已提交
9029
	vcpu->arch.l1tf_flush_l1d = true;
9030
	kvm_x86_ops->sched_in(vcpu, cpu);
R
Radim Krčmář 已提交
9031 9032
}

9033
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
9034
{
9035 9036 9037
	if (type)
		return -EINVAL;

9038
	INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
9039
	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
9040
	INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
B
Ben-Ami Yassour 已提交
9041
	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
9042
	atomic_set(&kvm->arch.noncoherent_dma_count, 0);
9043

9044 9045
	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
9046 9047 9048
	/* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
	set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
		&kvm->arch.irq_sources_bitmap);
9049

9050
	raw_spin_lock_init(&kvm->arch.tsc_write_lock);
9051
	mutex_init(&kvm->arch.apic_map_lock);
9052 9053
	spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);

9054
	kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
9055
	pvclock_update_vm_gtod_copy(kvm);
9056

9057 9058
	kvm->arch.guest_can_read_msr_platform_info = true;

9059
	INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
9060
	INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
9061

9062
	kvm_hv_init_vm(kvm);
9063
	kvm_page_track_init(kvm);
9064
	kvm_mmu_init_vm(kvm);
9065

9066 9067 9068
	if (kvm_x86_ops->vm_init)
		return kvm_x86_ops->vm_init(kvm);

9069
	return 0;
9070 9071 9072 9073
}

static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{
9074
	vcpu_load(vcpu);
9075 9076 9077 9078 9079 9080 9081
	kvm_mmu_unload(vcpu);
	vcpu_put(vcpu);
}

static void kvm_free_vcpus(struct kvm *kvm)
{
	unsigned int i;
9082
	struct kvm_vcpu *vcpu;
9083 9084 9085 9086

	/*
	 * Unpin any mmu pages first.
	 */
9087 9088
	kvm_for_each_vcpu(i, vcpu, kvm) {
		kvm_clear_async_pf_completion_queue(vcpu);
9089
		kvm_unload_vcpu_mmu(vcpu);
9090
	}
9091 9092 9093 9094 9095 9096
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_arch_vcpu_free(vcpu);

	mutex_lock(&kvm->lock);
	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
		kvm->vcpus[i] = NULL;
9097

9098 9099
	atomic_set(&kvm->online_vcpus, 0);
	mutex_unlock(&kvm->lock);
9100 9101
}

9102 9103
void kvm_arch_sync_events(struct kvm *kvm)
{
9104
	cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
9105
	cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
9106
	kvm_free_pit(kvm);
9107 9108
}

9109
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
9110 9111
{
	int i, r;
9112
	unsigned long hva;
9113 9114
	struct kvm_memslots *slots = kvm_memslots(kvm);
	struct kvm_memory_slot *slot, old;
9115 9116

	/* Called with kvm->slots_lock held.  */
9117 9118
	if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
		return -EINVAL;
9119

9120 9121
	slot = id_to_memslot(slots, id);
	if (size) {
9122
		if (slot->npages)
9123 9124 9125 9126 9127 9128 9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139 9140
			return -EEXIST;

		/*
		 * MAP_SHARED to prevent internal slot pages from being moved
		 * by fork()/COW.
		 */
		hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
			      MAP_SHARED | MAP_ANONYMOUS, 0);
		if (IS_ERR((void *)hva))
			return PTR_ERR((void *)hva);
	} else {
		if (!slot->npages)
			return 0;

		hva = 0;
	}

	old = *slot;
9141
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
9142
		struct kvm_userspace_memory_region m;
9143

9144 9145 9146
		m.slot = id | (i << 16);
		m.flags = 0;
		m.guest_phys_addr = gpa;
9147
		m.userspace_addr = hva;
9148
		m.memory_size = size;
9149 9150 9151 9152 9153
		r = __kvm_set_memory_region(kvm, &m);
		if (r < 0)
			return r;
	}

9154 9155
	if (!size)
		vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
9156

9157 9158 9159 9160
	return 0;
}
EXPORT_SYMBOL_GPL(__x86_set_memory_region);

9161
int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
9162 9163 9164 9165
{
	int r;

	mutex_lock(&kvm->slots_lock);
9166
	r = __x86_set_memory_region(kvm, id, gpa, size);
9167 9168 9169 9170 9171 9172
	mutex_unlock(&kvm->slots_lock);

	return r;
}
EXPORT_SYMBOL_GPL(x86_set_memory_region);

9173 9174
void kvm_arch_destroy_vm(struct kvm *kvm)
{
9175 9176 9177 9178 9179 9180
	if (current->mm == kvm->mm) {
		/*
		 * Free memory regions allocated on behalf of userspace,
		 * unless the the memory map has changed due to process exit
		 * or fd copying.
		 */
9181 9182 9183
		x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
		x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
		x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
9184
	}
9185 9186
	if (kvm_x86_ops->vm_destroy)
		kvm_x86_ops->vm_destroy(kvm);
9187 9188
	kvm_pic_destroy(kvm);
	kvm_ioapic_destroy(kvm);
9189
	kvm_free_vcpus(kvm);
9190
	kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
9191
	kvm_mmu_uninit_vm(kvm);
9192
	kvm_page_track_cleanup(kvm);
9193
	kvm_hv_destroy_vm(kvm);
9194
}
9195

9196
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
9197 9198 9199 9200
			   struct kvm_memory_slot *dont)
{
	int i;

9201 9202
	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
		if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
T
Thomas Huth 已提交
9203
			kvfree(free->arch.rmap[i]);
9204
			free->arch.rmap[i] = NULL;
9205
		}
9206 9207 9208 9209 9210
		if (i == 0)
			continue;

		if (!dont || free->arch.lpage_info[i - 1] !=
			     dont->arch.lpage_info[i - 1]) {
T
Thomas Huth 已提交
9211
			kvfree(free->arch.lpage_info[i - 1]);
9212
			free->arch.lpage_info[i - 1] = NULL;
9213 9214
		}
	}
9215 9216

	kvm_page_track_free_memslot(free, dont);
9217 9218
}

9219 9220
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
			    unsigned long npages)
9221 9222 9223
{
	int i;

9224
	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
9225
		struct kvm_lpage_info *linfo;
9226 9227
		unsigned long ugfn;
		int lpages;
9228
		int level = i + 1;
9229 9230 9231 9232

		lpages = gfn_to_index(slot->base_gfn + npages - 1,
				      slot->base_gfn, level) + 1;

9233
		slot->arch.rmap[i] =
K
Kees Cook 已提交
9234 9235
			kvcalloc(lpages, sizeof(*slot->arch.rmap[i]),
				 GFP_KERNEL);
9236
		if (!slot->arch.rmap[i])
9237
			goto out_free;
9238 9239
		if (i == 0)
			continue;
9240

K
Kees Cook 已提交
9241
		linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL);
9242
		if (!linfo)
9243 9244
			goto out_free;

9245 9246
		slot->arch.lpage_info[i - 1] = linfo;

9247
		if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
9248
			linfo[0].disallow_lpage = 1;
9249
		if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
9250
			linfo[lpages - 1].disallow_lpage = 1;
9251 9252 9253 9254 9255 9256 9257 9258 9259 9260 9261
		ugfn = slot->userspace_addr >> PAGE_SHIFT;
		/*
		 * If the gfn and userspace address are not aligned wrt each
		 * other, or if explicitly asked to, disable large page
		 * support for this slot
		 */
		if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
		    !kvm_largepages_enabled()) {
			unsigned long j;

			for (j = 0; j < lpages; ++j)
9262
				linfo[j].disallow_lpage = 1;
9263 9264 9265
		}
	}

9266 9267 9268
	if (kvm_page_track_create_memslot(slot, npages))
		goto out_free;

9269 9270 9271
	return 0;

out_free:
9272
	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
T
Thomas Huth 已提交
9273
		kvfree(slot->arch.rmap[i]);
9274 9275 9276 9277
		slot->arch.rmap[i] = NULL;
		if (i == 0)
			continue;

T
Thomas Huth 已提交
9278
		kvfree(slot->arch.lpage_info[i - 1]);
9279
		slot->arch.lpage_info[i - 1] = NULL;
9280 9281 9282 9283
	}
	return -ENOMEM;
}

9284
void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
9285
{
9286 9287 9288 9289
	/*
	 * memslots->generation has been incremented.
	 * mmio generation may have reached its maximum value.
	 */
9290
	kvm_mmu_invalidate_mmio_sptes(kvm, slots);
9291 9292
}

9293 9294
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				struct kvm_memory_slot *memslot,
9295
				const struct kvm_userspace_memory_region *mem,
9296
				enum kvm_mr_change change)
9297
{
9298 9299 9300
	return 0;
}

9301 9302 9303 9304 9305 9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350
static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
				     struct kvm_memory_slot *new)
{
	/* Still write protect RO slot */
	if (new->flags & KVM_MEM_READONLY) {
		kvm_mmu_slot_remove_write_access(kvm, new);
		return;
	}

	/*
	 * Call kvm_x86_ops dirty logging hooks when they are valid.
	 *
	 * kvm_x86_ops->slot_disable_log_dirty is called when:
	 *
	 *  - KVM_MR_CREATE with dirty logging is disabled
	 *  - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag
	 *
	 * The reason is, in case of PML, we need to set D-bit for any slots
	 * with dirty logging disabled in order to eliminate unnecessary GPA
	 * logging in PML buffer (and potential PML buffer full VMEXT). This
	 * guarantees leaving PML enabled during guest's lifetime won't have
	 * any additonal overhead from PML when guest is running with dirty
	 * logging disabled for memory slots.
	 *
	 * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot
	 * to dirty logging mode.
	 *
	 * If kvm_x86_ops dirty logging hooks are invalid, use write protect.
	 *
	 * In case of write protect:
	 *
	 * Write protect all pages for dirty logging.
	 *
	 * All the sptes including the large sptes which point to this
	 * slot are set to readonly. We can not create any new large
	 * spte on this slot until the end of the logging.
	 *
	 * See the comments in fast_page_fault().
	 */
	if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
		if (kvm_x86_ops->slot_enable_log_dirty)
			kvm_x86_ops->slot_enable_log_dirty(kvm, new);
		else
			kvm_mmu_slot_remove_write_access(kvm, new);
	} else {
		if (kvm_x86_ops->slot_disable_log_dirty)
			kvm_x86_ops->slot_disable_log_dirty(kvm, new);
	}
}

9351
void kvm_arch_commit_memory_region(struct kvm *kvm,
9352
				const struct kvm_userspace_memory_region *mem,
9353
				const struct kvm_memory_slot *old,
9354
				const struct kvm_memory_slot *new,
9355
				enum kvm_mr_change change)
9356
{
9357
	int nr_mmu_pages = 0;
9358

9359 9360 9361 9362
	if (!kvm->arch.n_requested_mmu_pages)
		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);

	if (nr_mmu_pages)
9363
		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
9364

9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381
	/*
	 * Dirty logging tracks sptes in 4k granularity, meaning that large
	 * sptes have to be split.  If live migration is successful, the guest
	 * in the source machine will be destroyed and large sptes will be
	 * created in the destination. However, if the guest continues to run
	 * in the source machine (for example if live migration fails), small
	 * sptes will remain around and cause bad performance.
	 *
	 * Scan sptes if dirty logging has been stopped, dropping those
	 * which can be collapsed into a single large-page spte.  Later
	 * page faults will create the large-page sptes.
	 */
	if ((change != KVM_MR_DELETE) &&
		(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
		!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
		kvm_mmu_zap_collapsible_sptes(kvm, new);

9382
	/*
9383
	 * Set up write protection and/or dirty logging for the new slot.
9384
	 *
9385 9386 9387 9388
	 * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have
	 * been zapped so no dirty logging staff is needed for old slot. For
	 * KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the
	 * new and it's also covered when dealing with the new slot.
9389 9390
	 *
	 * FIXME: const-ify all uses of struct kvm_memory_slot.
9391
	 */
9392
	if (change != KVM_MR_DELETE)
9393
		kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new);
9394
}
9395

9396
void kvm_arch_flush_shadow_all(struct kvm *kvm)
9397
{
9398
	kvm_mmu_invalidate_zap_all_pages(kvm);
9399 9400
}

9401 9402 9403
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
				   struct kvm_memory_slot *slot)
{
9404
	kvm_page_track_flush_slot(kvm, slot);
9405 9406
}

9407 9408 9409 9410 9411 9412 9413
static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
	return (is_guest_mode(vcpu) &&
			kvm_x86_ops->guest_apic_has_interrupt &&
			kvm_x86_ops->guest_apic_has_interrupt(vcpu));
}

9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
{
	if (!list_empty_careful(&vcpu->async_pf.done))
		return true;

	if (kvm_apic_has_events(vcpu))
		return true;

	if (vcpu->arch.pv.pv_unhalted)
		return true;

9425 9426 9427
	if (vcpu->arch.exception.pending)
		return true;

9428 9429 9430
	if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
	    (vcpu->arch.nmi_pending &&
	     kvm_x86_ops->nmi_allowed(vcpu)))
9431 9432
		return true;

9433 9434
	if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
	    (vcpu->arch.smi_pending && !is_smm(vcpu)))
P
Paolo Bonzini 已提交
9435 9436
		return true;

9437
	if (kvm_arch_interrupt_allowed(vcpu) &&
9438 9439
	    (kvm_cpu_has_interrupt(vcpu) ||
	    kvm_guest_apic_has_interrupt(vcpu)))
9440 9441
		return true;

A
Andrey Smetanin 已提交
9442 9443 9444
	if (kvm_hv_has_stimer_pending(vcpu))
		return true;

9445 9446 9447
	return false;
}

9448 9449
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
9450
	return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
9451
}
9452

9453 9454
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
9455
	return vcpu->arch.preempted_in_kernel;
9456 9457
}

9458
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
9459
{
9460
	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
9461
}
9462 9463 9464 9465 9466

int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
{
	return kvm_x86_ops->interrupt_allowed(vcpu);
}
9467

9468
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
J
Jan Kiszka 已提交
9469
{
9470 9471 9472 9473 9474 9475
	if (is_64_bit_mode(vcpu))
		return kvm_rip_read(vcpu);
	return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
		     kvm_rip_read(vcpu));
}
EXPORT_SYMBOL_GPL(kvm_get_linear_rip);
J
Jan Kiszka 已提交
9476

9477 9478 9479
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
{
	return kvm_get_linear_rip(vcpu) == linear_rip;
J
Jan Kiszka 已提交
9480 9481 9482
}
EXPORT_SYMBOL_GPL(kvm_is_linear_rip);

9483 9484 9485 9486 9487 9488
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
{
	unsigned long rflags;

	rflags = kvm_x86_ops->get_rflags(vcpu);
	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
9489
		rflags &= ~X86_EFLAGS_TF;
9490 9491 9492 9493
	return rflags;
}
EXPORT_SYMBOL_GPL(kvm_get_rflags);

9494
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
9495 9496
{
	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
J
Jan Kiszka 已提交
9497
	    kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
9498
		rflags |= X86_EFLAGS_TF;
9499
	kvm_x86_ops->set_rflags(vcpu, rflags);
9500 9501 9502 9503 9504
}

void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
	__kvm_set_rflags(vcpu, rflags);
9505
	kvm_make_request(KVM_REQ_EVENT, vcpu);
9506 9507 9508
}
EXPORT_SYMBOL_GPL(kvm_set_rflags);

G
Gleb Natapov 已提交
9509 9510 9511 9512
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
{
	int r;

9513
	if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) ||
9514
	      work->wakeup_all)
G
Gleb Natapov 已提交
9515 9516 9517 9518 9519 9520
		return;

	r = kvm_mmu_reload(vcpu);
	if (unlikely(r))
		return;

9521 9522
	if (!vcpu->arch.mmu->direct_map &&
	      work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu))
X
Xiao Guangrong 已提交
9523 9524
		return;

9525
	vcpu->arch.mmu->page_fault(vcpu, work->gva, 0, true);
G
Gleb Natapov 已提交
9526 9527
}

9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539 9540 9541 9542 9543 9544 9545 9546 9547 9548 9549 9550 9551 9552 9553
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
{
	return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
}

static inline u32 kvm_async_pf_next_probe(u32 key)
{
	return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
}

static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	u32 key = kvm_async_pf_hash_fn(gfn);

	while (vcpu->arch.apf.gfns[key] != ~0)
		key = kvm_async_pf_next_probe(key);

	vcpu->arch.apf.gfns[key] = gfn;
}

static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	int i;
	u32 key = kvm_async_pf_hash_fn(gfn);

	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
9554 9555
		     (vcpu->arch.apf.gfns[key] != gfn &&
		      vcpu->arch.apf.gfns[key] != ~0); i++)
9556 9557 9558 9559 9560 9561 9562 9563 9564 9565 9566 9567 9568 9569 9570 9571 9572 9573 9574 9575 9576 9577 9578 9579 9580 9581 9582 9583 9584 9585 9586 9587 9588
		key = kvm_async_pf_next_probe(key);

	return key;
}

bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
}

static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	u32 i, j, k;

	i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
	while (true) {
		vcpu->arch.apf.gfns[i] = ~0;
		do {
			j = kvm_async_pf_next_probe(j);
			if (vcpu->arch.apf.gfns[j] == ~0)
				return;
			k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
			/*
			 * k lies cyclically in ]i,j]
			 * |    i.k.j |
			 * |....j i.k.| or  |.k..j i...|
			 */
		} while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
		vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
		i = j;
	}
}

9589 9590
static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
{
9591 9592 9593

	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
				      sizeof(val));
9594 9595
}

9596 9597 9598 9599 9600 9601 9602
static int apf_get_user(struct kvm_vcpu *vcpu, u32 *val)
{

	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, val,
				      sizeof(u32));
}

9603 9604 9605
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work)
{
9606 9607
	struct x86_exception fault;

9608
	trace_kvm_async_pf_not_present(work->arch.token, work->gva);
9609
	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
9610 9611

	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
9612 9613
	    (vcpu->arch.apf.send_user_only &&
	     kvm_x86_ops->get_cpl(vcpu) == 0))
9614 9615
		kvm_make_request(KVM_REQ_APF_HALT, vcpu);
	else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
9616 9617 9618 9619 9620
		fault.vector = PF_VECTOR;
		fault.error_code_valid = true;
		fault.error_code = 0;
		fault.nested_page_fault = false;
		fault.address = work->arch.token;
9621
		fault.async_page_fault = true;
9622
		kvm_inject_page_fault(vcpu, &fault);
9623
	}
9624 9625 9626 9627 9628
}

void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work)
{
9629
	struct x86_exception fault;
9630
	u32 val;
9631

9632
	if (work->wakeup_all)
9633 9634 9635
		work->arch.token = ~0; /* broadcast wakeup */
	else
		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
9636
	trace_kvm_async_pf_ready(work->arch.token, work->gva);
9637

9638 9639 9640 9641 9642 9643 9644 9645 9646 9647 9648
	if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
	    !apf_get_user(vcpu, &val)) {
		if (val == KVM_PV_REASON_PAGE_NOT_PRESENT &&
		    vcpu->arch.exception.pending &&
		    vcpu->arch.exception.nr == PF_VECTOR &&
		    !apf_put_user(vcpu, 0)) {
			vcpu->arch.exception.injected = false;
			vcpu->arch.exception.pending = false;
			vcpu->arch.exception.nr = 0;
			vcpu->arch.exception.has_error_code = false;
			vcpu->arch.exception.error_code = 0;
9649 9650
			vcpu->arch.exception.has_payload = false;
			vcpu->arch.exception.payload = 0;
9651 9652 9653 9654 9655 9656 9657 9658 9659
		} else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
			fault.vector = PF_VECTOR;
			fault.error_code_valid = true;
			fault.error_code = 0;
			fault.nested_page_fault = false;
			fault.address = work->arch.token;
			fault.async_page_fault = true;
			kvm_inject_page_fault(vcpu, &fault);
		}
9660
	}
9661
	vcpu->arch.apf.halted = false;
9662
	vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
9663 9664 9665 9666 9667 9668 9669
}

bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
{
	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
		return true;
	else
9670
		return kvm_can_do_async_pf(vcpu);
9671 9672
}

9673 9674 9675 9676 9677 9678 9679 9680 9681 9682 9683 9684 9685 9686 9687 9688 9689 9690
void kvm_arch_start_assignment(struct kvm *kvm)
{
	atomic_inc(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);

void kvm_arch_end_assignment(struct kvm *kvm)
{
	atomic_dec(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);

bool kvm_arch_has_assigned_device(struct kvm *kvm)
{
	return atomic_read(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);

9691 9692 9693 9694 9695 9696 9697 9698 9699 9700 9701 9702 9703 9704 9705 9706 9707 9708
void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
{
	atomic_inc(&kvm->arch.noncoherent_dma_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);

void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
{
	atomic_dec(&kvm->arch.noncoherent_dma_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);

bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
{
	return atomic_read(&kvm->arch.noncoherent_dma_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);

9709 9710 9711 9712 9713
bool kvm_arch_has_irq_bypass(void)
{
	return kvm_x86_ops->update_pi_irte != NULL;
}

F
Feng Wu 已提交
9714 9715 9716 9717 9718 9719
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
				      struct irq_bypass_producer *prod)
{
	struct kvm_kernel_irqfd *irqfd =
		container_of(cons, struct kvm_kernel_irqfd, consumer);

9720
	irqfd->producer = prod;
F
Feng Wu 已提交
9721

9722 9723
	return kvm_x86_ops->update_pi_irte(irqfd->kvm,
					   prod->irq, irqfd->gsi, 1);
F
Feng Wu 已提交
9724 9725 9726 9727 9728 9729 9730 9731 9732 9733 9734 9735 9736 9737 9738
}

void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
				      struct irq_bypass_producer *prod)
{
	int ret;
	struct kvm_kernel_irqfd *irqfd =
		container_of(cons, struct kvm_kernel_irqfd, consumer);

	WARN_ON(irqfd->producer != prod);
	irqfd->producer = NULL;

	/*
	 * When producer of consumer is unregistered, we change back to
	 * remapped mode, so we can re-use the current implementation
A
Andrea Gelmini 已提交
9739
	 * when the irq is masked/disabled or the consumer side (KVM
F
Feng Wu 已提交
9740 9741 9742 9743 9744 9745 9746 9747 9748 9749 9750 9751 9752 9753 9754 9755 9756
	 * int this case doesn't want to receive the interrupts.
	*/
	ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
	if (ret)
		printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
		       " fails: %d\n", irqfd->consumer.token, ret);
}

int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
				   uint32_t guest_irq, bool set)
{
	if (!kvm_x86_ops->update_pi_irte)
		return -EINVAL;

	return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set);
}

9757 9758 9759 9760 9761 9762
bool kvm_vector_hashing_enabled(void)
{
	return vector_hashing;
}
EXPORT_SYMBOL_GPL(kvm_vector_hashing_enabled);

9763
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
J
Jason Wang 已提交
9764
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
9765 9766 9767 9768
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
9769
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
9770
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
9771
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
9772
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
9773
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
9774
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
9775
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
9776
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
9777
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);
K
Kai Huang 已提交
9778
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
9779
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
9780 9781
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);