x86.c 242.3 KB
Newer Older
1 2 3 4 5 6
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * derived from drivers/kvm/kvm_main.c
 *
 * Copyright (C) 2006 Qumranet, Inc.
B
Ben-Ami Yassour 已提交
7 8
 * Copyright (C) 2008 Qumranet, Inc.
 * Copyright IBM Corporation, 2008
N
Nicolas Kaiser 已提交
9
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 11 12 13
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
B
Ben-Ami Yassour 已提交
14 15
 *   Amit Shah    <amit.shah@qumranet.com>
 *   Ben-Ami Yassour <benami@il.ibm.com>
16 17 18 19 20 21
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

22
#include <linux/kvm_host.h>
23
#include "irq.h"
24
#include "mmu.h"
S
Sheng Yang 已提交
25
#include "i8254.h"
26
#include "tss.h"
27
#include "kvm_cache_regs.h"
28
#include "x86.h"
A
Avi Kivity 已提交
29
#include "cpuid.h"
30
#include "pmu.h"
31
#include "hyperv.h"
32

33
#include <linux/clocksource.h>
B
Ben-Ami Yassour 已提交
34
#include <linux/interrupt.h>
35 36 37
#include <linux/kvm.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
38 39
#include <linux/export.h>
#include <linux/moduleparam.h>
40
#include <linux/mman.h>
41
#include <linux/highmem.h>
J
Joerg Roedel 已提交
42
#include <linux/iommu.h>
B
Ben-Ami Yassour 已提交
43
#include <linux/intel-iommu.h>
44
#include <linux/cpufreq.h>
A
Avi Kivity 已提交
45
#include <linux/user-return-notifier.h>
46
#include <linux/srcu.h>
47
#include <linux/slab.h>
48
#include <linux/perf_event.h>
49
#include <linux/uaccess.h>
50
#include <linux/hash.h>
51
#include <linux/pci.h>
52 53
#include <linux/timekeeper_internal.h>
#include <linux/pvclock_gtod.h>
F
Feng Wu 已提交
54 55
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
56
#include <linux/sched/stat.h>
57
#include <linux/mem_encrypt.h>
58

A
Avi Kivity 已提交
59
#include <trace/events/kvm.h>
X
Xiao Guangrong 已提交
60

61
#include <asm/debugreg.h>
62
#include <asm/msr.h>
63
#include <asm/desc.h>
H
Huang Ying 已提交
64
#include <asm/mce.h>
65
#include <linux/kernel_stat.h>
66
#include <asm/fpu/internal.h> /* Ugh! */
67
#include <asm/pvclock.h>
68
#include <asm/div64.h>
69
#include <asm/irq_remapping.h>
70
#include <asm/mshyperv.h>
71
#include <asm/hypervisor.h>
72

73 74 75
#define CREATE_TRACE_POINTS
#include "trace.h"

76
#define MAX_IO_MSRS 256
H
Huang Ying 已提交
77
#define KVM_MAX_MCE_BANKS 32
78 79
u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
H
Huang Ying 已提交
80

81 82 83
#define emul_to_vcpu(ctxt) \
	container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)

84 85 86 87 88
/* EFER defaults:
 * - enable syscall per default because its emulated by KVM
 * - enable LME and LMA per default on 64 bit KVM
 */
#ifdef CONFIG_X86_64
89 90
static
u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
91
#else
92
static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
93
#endif
94

95 96
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
97

98 99
#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
                                    KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
100

101
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
A
Avi Kivity 已提交
102
static void process_nmi(struct kvm_vcpu *vcpu);
103
static void enter_smm(struct kvm_vcpu *vcpu);
104
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
K
Ken Hofsass 已提交
105 106
static void store_regs(struct kvm_vcpu *vcpu);
static int sync_regs(struct kvm_vcpu *vcpu);
107

108
struct kvm_x86_ops *kvm_x86_ops __read_mostly;
109
EXPORT_SYMBOL_GPL(kvm_x86_ops);
110

111
static bool __read_mostly ignore_msrs = 0;
112
module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
113

114 115 116
static bool __read_mostly report_ignored_msrs = true;
module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);

117
unsigned int min_timer_period_us = 200;
118 119
module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);

120 121 122
static bool __read_mostly kvmclock_periodic_sync = true;
module_param(kvmclock_periodic_sync, bool, S_IRUGO);

123
bool __read_mostly kvm_has_tsc_control;
124
EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
125
u32  __read_mostly kvm_max_guest_tsc_khz;
126
EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
127 128 129 130
u8   __read_mostly kvm_tsc_scaling_ratio_frac_bits;
EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);
u64  __read_mostly kvm_max_tsc_scaling_ratio;
EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
131 132
u64 __read_mostly kvm_default_tsc_scaling_ratio;
EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
133

134
/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
135
static u32 __read_mostly tsc_tolerance_ppm = 250;
136 137
module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);

138
/* lapic timer advance (tscdeadline mode only) in nanoseconds */
139
unsigned int __read_mostly lapic_timer_advance_ns = 0;
140
module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
141
EXPORT_SYMBOL_GPL(lapic_timer_advance_ns);
142

143 144 145
static bool __read_mostly vector_hashing = true;
module_param(vector_hashing, bool, S_IRUGO);

146 147 148 149
bool __read_mostly enable_vmware_backdoor = false;
module_param(enable_vmware_backdoor, bool, S_IRUGO);
EXPORT_SYMBOL_GPL(enable_vmware_backdoor);

150 151 152
static bool __read_mostly force_emulation_prefix = false;
module_param(force_emulation_prefix, bool, S_IRUGO);

A
Avi Kivity 已提交
153 154 155 156
#define KVM_NR_SHARED_MSRS 16

struct kvm_shared_msrs_global {
	int nr;
157
	u32 msrs[KVM_NR_SHARED_MSRS];
A
Avi Kivity 已提交
158 159 160 161 162
};

struct kvm_shared_msrs {
	struct user_return_notifier urn;
	bool registered;
163 164 165 166
	struct kvm_shared_msr_values {
		u64 host;
		u64 curr;
	} values[KVM_NR_SHARED_MSRS];
A
Avi Kivity 已提交
167 168 169
};

static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
170
static struct kvm_shared_msrs __percpu *shared_msrs;
A
Avi Kivity 已提交
171

172
struct kvm_stats_debugfs_item debugfs_entries[] = {
173 174 175 176 177 178 179 180 181
	{ "pf_fixed", VCPU_STAT(pf_fixed) },
	{ "pf_guest", VCPU_STAT(pf_guest) },
	{ "tlb_flush", VCPU_STAT(tlb_flush) },
	{ "invlpg", VCPU_STAT(invlpg) },
	{ "exits", VCPU_STAT(exits) },
	{ "io_exits", VCPU_STAT(io_exits) },
	{ "mmio_exits", VCPU_STAT(mmio_exits) },
	{ "signal_exits", VCPU_STAT(signal_exits) },
	{ "irq_window", VCPU_STAT(irq_window_exits) },
182
	{ "nmi_window", VCPU_STAT(nmi_window_exits) },
183
	{ "halt_exits", VCPU_STAT(halt_exits) },
184
	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
185
	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
186
	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
187
	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
A
Amit Shah 已提交
188
	{ "hypercalls", VCPU_STAT(hypercalls) },
189 190 191 192 193 194
	{ "request_irq", VCPU_STAT(request_irq_exits) },
	{ "irq_exits", VCPU_STAT(irq_exits) },
	{ "host_state_reload", VCPU_STAT(host_state_reload) },
	{ "fpu_reload", VCPU_STAT(fpu_reload) },
	{ "insn_emulation", VCPU_STAT(insn_emulation) },
	{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
195
	{ "irq_injections", VCPU_STAT(irq_injections) },
196
	{ "nmi_injections", VCPU_STAT(nmi_injections) },
197
	{ "req_event", VCPU_STAT(req_event) },
A
Avi Kivity 已提交
198 199 200 201 202 203
	{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
	{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
	{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
	{ "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
	{ "mmu_flooded", VM_STAT(mmu_flooded) },
	{ "mmu_recycled", VM_STAT(mmu_recycled) },
A
Avi Kivity 已提交
204
	{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
205
	{ "mmu_unsync", VM_STAT(mmu_unsync) },
206
	{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
M
Marcelo Tosatti 已提交
207
	{ "largepages", VM_STAT(lpages) },
208 209
	{ "max_mmu_page_hash_collisions",
		VM_STAT(max_mmu_page_hash_collisions) },
210 211 212
	{ NULL }
};

213 214
u64 __read_mostly host_xcr0;

215
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
216

217 218 219 220 221 222 223
static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
{
	int i;
	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
		vcpu->arch.apf.gfns[i] = ~0;
}

A
Avi Kivity 已提交
224 225 226 227 228
static void kvm_on_user_return(struct user_return_notifier *urn)
{
	unsigned slot;
	struct kvm_shared_msrs *locals
		= container_of(urn, struct kvm_shared_msrs, urn);
229
	struct kvm_shared_msr_values *values;
230 231 232 233 234 235 236 237 238 239 240 241
	unsigned long flags;

	/*
	 * Disabling irqs at this point since the following code could be
	 * interrupted and executed through kvm_arch_hardware_disable()
	 */
	local_irq_save(flags);
	if (locals->registered) {
		locals->registered = false;
		user_return_notifier_unregister(urn);
	}
	local_irq_restore(flags);
A
Avi Kivity 已提交
242
	for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
243 244 245 246
		values = &locals->values[slot];
		if (values->host != values->curr) {
			wrmsrl(shared_msrs_global.msrs[slot], values->host);
			values->curr = values->host;
A
Avi Kivity 已提交
247 248 249 250
		}
	}
}

251
static void shared_msr_update(unsigned slot, u32 msr)
A
Avi Kivity 已提交
252 253
{
	u64 value;
254 255
	unsigned int cpu = smp_processor_id();
	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
A
Avi Kivity 已提交
256

257 258 259 260 261 262 263 264 265 266 267 268 269
	/* only read, and nobody should modify it at this time,
	 * so don't need lock */
	if (slot >= shared_msrs_global.nr) {
		printk(KERN_ERR "kvm: invalid MSR slot!");
		return;
	}
	rdmsrl_safe(msr, &value);
	smsr->values[slot].host = value;
	smsr->values[slot].curr = value;
}

void kvm_define_shared_msr(unsigned slot, u32 msr)
{
270
	BUG_ON(slot >= KVM_NR_SHARED_MSRS);
271
	shared_msrs_global.msrs[slot] = msr;
A
Avi Kivity 已提交
272 273 274 275 276 277 278 279 280 281
	if (slot >= shared_msrs_global.nr)
		shared_msrs_global.nr = slot + 1;
}
EXPORT_SYMBOL_GPL(kvm_define_shared_msr);

static void kvm_shared_msr_cpu_online(void)
{
	unsigned i;

	for (i = 0; i < shared_msrs_global.nr; ++i)
282
		shared_msr_update(i, shared_msrs_global.msrs[i]);
A
Avi Kivity 已提交
283 284
}

285
int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
A
Avi Kivity 已提交
286
{
287 288
	unsigned int cpu = smp_processor_id();
	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
289
	int err;
A
Avi Kivity 已提交
290

291
	if (((value ^ smsr->values[slot].curr) & mask) == 0)
292
		return 0;
293
	smsr->values[slot].curr = value;
294 295 296 297
	err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
	if (err)
		return 1;

A
Avi Kivity 已提交
298 299 300 301 302
	if (!smsr->registered) {
		smsr->urn.on_user_return = kvm_on_user_return;
		user_return_notifier_register(&smsr->urn);
		smsr->registered = true;
	}
303
	return 0;
A
Avi Kivity 已提交
304 305 306
}
EXPORT_SYMBOL_GPL(kvm_set_shared_msr);

307
static void drop_user_return_notifiers(void)
308
{
309 310
	unsigned int cpu = smp_processor_id();
	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
311 312 313 314 315

	if (smsr->registered)
		kvm_on_user_return(&smsr->urn);
}

316 317
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
{
318
	return vcpu->arch.apic_base;
319 320 321
}
EXPORT_SYMBOL_GPL(kvm_get_apic_base);

322 323 324 325 326 327
enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
{
	return kvm_apic_mode(kvm_get_apic_base(vcpu));
}
EXPORT_SYMBOL_GPL(kvm_get_apic_mode);

328 329
int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
330 331
	enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
	enum lapic_mode new_mode = kvm_apic_mode(msr_info->data);
332 333
	u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff |
		(guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
334

335
	if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
336
		return 1;
337 338 339 340 341 342
	if (!msr_info->host_initiated) {
		if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC)
			return 1;
		if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC)
			return 1;
	}
343 344 345

	kvm_lapic_set_base(vcpu, msr_info->data);
	return 0;
346 347 348
}
EXPORT_SYMBOL_GPL(kvm_set_apic_base);

349
asmlinkage __visible void kvm_spurious_fault(void)
350 351 352 353 354 355
{
	/* Fault while not rebooting.  We want the trace. */
	BUG();
}
EXPORT_SYMBOL_GPL(kvm_spurious_fault);

356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
#define EXCPT_BENIGN		0
#define EXCPT_CONTRIBUTORY	1
#define EXCPT_PF		2

static int exception_class(int vector)
{
	switch (vector) {
	case PF_VECTOR:
		return EXCPT_PF;
	case DE_VECTOR:
	case TS_VECTOR:
	case NP_VECTOR:
	case SS_VECTOR:
	case GP_VECTOR:
		return EXCPT_CONTRIBUTORY;
	default:
		break;
	}
	return EXCPT_BENIGN;
}

377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
#define EXCPT_FAULT		0
#define EXCPT_TRAP		1
#define EXCPT_ABORT		2
#define EXCPT_INTERRUPT		3

static int exception_type(int vector)
{
	unsigned int mask;

	if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
		return EXCPT_INTERRUPT;

	mask = 1 << vector;

	/* #DB is trap, as instruction watchpoints are handled elsewhere */
	if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
		return EXCPT_TRAP;

	if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
		return EXCPT_ABORT;

	/* Reserved exceptions will result in fault */
	return EXCPT_FAULT;
}

402
static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
403 404
		unsigned nr, bool has_error, u32 error_code,
		bool reinject)
405 406 407 408
{
	u32 prev_nr;
	int class1, class2;

409 410
	kvm_make_request(KVM_REQ_EVENT, vcpu);

411
	if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
412
	queue:
413 414
		if (has_error && !is_protmode(vcpu))
			has_error = false;
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
		if (reinject) {
			/*
			 * On vmentry, vcpu->arch.exception.pending is only
			 * true if an event injection was blocked by
			 * nested_run_pending.  In that case, however,
			 * vcpu_enter_guest requests an immediate exit,
			 * and the guest shouldn't proceed far enough to
			 * need reinjection.
			 */
			WARN_ON_ONCE(vcpu->arch.exception.pending);
			vcpu->arch.exception.injected = true;
		} else {
			vcpu->arch.exception.pending = true;
			vcpu->arch.exception.injected = false;
		}
430 431 432 433 434 435 436 437 438 439
		vcpu->arch.exception.has_error_code = has_error;
		vcpu->arch.exception.nr = nr;
		vcpu->arch.exception.error_code = error_code;
		return;
	}

	/* to check exception */
	prev_nr = vcpu->arch.exception.nr;
	if (prev_nr == DF_VECTOR) {
		/* triple fault -> shutdown */
440
		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
441 442 443 444 445 446
		return;
	}
	class1 = exception_class(prev_nr);
	class2 = exception_class(nr);
	if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
		|| (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
447 448 449 450 451
		/*
		 * Generate double fault per SDM Table 5-5.  Set
		 * exception.pending = true so that the double fault
		 * can trigger a nested vmexit.
		 */
452
		vcpu->arch.exception.pending = true;
453
		vcpu->arch.exception.injected = false;
454 455 456 457 458 459 460 461 462 463
		vcpu->arch.exception.has_error_code = true;
		vcpu->arch.exception.nr = DF_VECTOR;
		vcpu->arch.exception.error_code = 0;
	} else
		/* replace previous exception with a new one in a hope
		   that instruction re-execution will regenerate lost
		   exception */
		goto queue;
}

464 465
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
466
	kvm_multiple_exception(vcpu, nr, false, 0, false);
467 468 469
}
EXPORT_SYMBOL_GPL(kvm_queue_exception);

470 471 472 473 474 475
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
	kvm_multiple_exception(vcpu, nr, false, 0, true);
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception);

476
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
477
{
478 479 480
	if (err)
		kvm_inject_gp(vcpu, 0);
	else
481 482 483
		return kvm_skip_emulated_instruction(vcpu);

	return 1;
484 485
}
EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
486

487
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
488 489
{
	++vcpu->stat.pf_guest;
490 491 492 493 494 495
	vcpu->arch.exception.nested_apf =
		is_guest_mode(vcpu) && fault->async_page_fault;
	if (vcpu->arch.exception.nested_apf)
		vcpu->arch.apf.nested_apf_token = fault->address;
	else
		vcpu->arch.cr2 = fault->address;
496
	kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
497
}
N
Nadav Har'El 已提交
498
EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
499

500
static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
501
{
502 503
	if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
504
	else
505
		vcpu->arch.mmu.inject_page_fault(vcpu, fault);
506 507

	return fault->nested_page_fault;
508 509
}

510 511
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
{
A
Avi Kivity 已提交
512 513
	atomic_inc(&vcpu->arch.nmi_queued);
	kvm_make_request(KVM_REQ_NMI, vcpu);
514 515 516
}
EXPORT_SYMBOL_GPL(kvm_inject_nmi);

517 518
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
519
	kvm_multiple_exception(vcpu, nr, true, error_code, false);
520 521 522
}
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);

523 524 525 526 527 528
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
	kvm_multiple_exception(vcpu, nr, true, error_code, true);
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);

529 530 531 532 533
/*
 * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
 * a #GP and return false.
 */
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
534
{
535 536 537 538
	if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
		return true;
	kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
	return false;
539
}
540
EXPORT_SYMBOL_GPL(kvm_require_cpl);
541

542 543 544 545 546 547 548 549 550 551
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
{
	if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
		return true;

	kvm_queue_exception(vcpu, UD_VECTOR);
	return false;
}
EXPORT_SYMBOL_GPL(kvm_require_dr);

552 553
/*
 * This function will be used to read from the physical memory of the currently
554
 * running guest. The difference to kvm_vcpu_read_guest_page is that this function
555 556 557 558 559 560
 * can read from guest physical or from the guest's guest physical memory.
 */
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			    gfn_t ngfn, void *data, int offset, int len,
			    u32 access)
{
561
	struct x86_exception exception;
562 563 564 565
	gfn_t real_gfn;
	gpa_t ngpa;

	ngpa     = gfn_to_gpa(ngfn);
566
	real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
567 568 569 570 571
	if (real_gfn == UNMAPPED_GVA)
		return -EFAULT;

	real_gfn = gpa_to_gfn(real_gfn);

572
	return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len);
573 574 575
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);

576
static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
577 578 579 580 581 582
			       void *data, int offset, int len, u32 access)
{
	return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
				       data, offset, len, access);
}

583 584 585
/*
 * Load the pae pdptrs.  Return true is they are all valid.
 */
586
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
587 588 589 590 591
{
	gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
	unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
	int i;
	int ret;
592
	u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
593

594 595 596
	ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
				      offset * sizeof(u64), sizeof(pdpte),
				      PFERR_USER_MASK|PFERR_WRITE_MASK);
597 598 599 600 601
	if (ret < 0) {
		ret = 0;
		goto out;
	}
	for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
B
Bandan Das 已提交
602
		if ((pdpte[i] & PT_PRESENT_MASK) &&
603 604
		    (pdpte[i] &
		     vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
605 606 607 608 609 610
			ret = 0;
			goto out;
		}
	}
	ret = 1;

611
	memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
A
Avi Kivity 已提交
612 613 614 615
	__set_bit(VCPU_EXREG_PDPTR,
		  (unsigned long *)&vcpu->arch.regs_avail);
	__set_bit(VCPU_EXREG_PDPTR,
		  (unsigned long *)&vcpu->arch.regs_dirty);
616 617 618 619
out:

	return ret;
}
620
EXPORT_SYMBOL_GPL(load_pdptrs);
621

622
bool pdptrs_changed(struct kvm_vcpu *vcpu)
623
{
624
	u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
625
	bool changed = true;
626 627
	int offset;
	gfn_t gfn;
628 629 630 631 632
	int r;

	if (is_long_mode(vcpu) || !is_pae(vcpu))
		return false;

A
Avi Kivity 已提交
633 634 635 636
	if (!test_bit(VCPU_EXREG_PDPTR,
		      (unsigned long *)&vcpu->arch.regs_avail))
		return true;

637 638
	gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT;
	offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1);
639 640
	r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
				       PFERR_USER_MASK | PFERR_WRITE_MASK);
641 642
	if (r < 0)
		goto out;
643
	changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
644 645 646 647
out:

	return changed;
}
648
EXPORT_SYMBOL_GPL(pdptrs_changed);
649

650
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
651
{
652
	unsigned long old_cr0 = kvm_read_cr0(vcpu);
653
	unsigned long update_bits = X86_CR0_PG | X86_CR0_WP;
654

655 656
	cr0 |= X86_CR0_ET;

657
#ifdef CONFIG_X86_64
658 659
	if (cr0 & 0xffffffff00000000UL)
		return 1;
660 661 662
#endif

	cr0 &= ~CR0_RESERVED_BITS;
663

664 665
	if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
		return 1;
666

667 668
	if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
		return 1;
669 670 671

	if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
#ifdef CONFIG_X86_64
672
		if ((vcpu->arch.efer & EFER_LME)) {
673 674
			int cs_db, cs_l;

675 676
			if (!is_pae(vcpu))
				return 1;
677
			kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
678 679
			if (cs_l)
				return 1;
680 681
		} else
#endif
682
		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
683
						 kvm_read_cr3(vcpu)))
684
			return 1;
685 686
	}

687 688 689
	if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
		return 1;

690 691
	kvm_x86_ops->set_cr0(vcpu, cr0);

692
	if ((cr0 ^ old_cr0) & X86_CR0_PG) {
693
		kvm_clear_async_pf_completion_queue(vcpu);
694 695
		kvm_async_pf_hash_reset(vcpu);
	}
696

697 698
	if ((cr0 ^ old_cr0) & update_bits)
		kvm_mmu_reset_context(vcpu);
699

700 701 702
	if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
	    kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
	    !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
703 704
		kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);

705 706
	return 0;
}
707
EXPORT_SYMBOL_GPL(kvm_set_cr0);
708

709
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
710
{
711
	(void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
712
}
713
EXPORT_SYMBOL_GPL(kvm_lmsw);
714

715 716 717 718 719
static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
{
	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
			!vcpu->guest_xcr0_loaded) {
		/* kvm_set_xcr() also depends on this */
720 721
		if (vcpu->arch.xcr0 != host_xcr0)
			xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
722 723 724 725 726 727 728 729 730 731 732 733 734
		vcpu->guest_xcr0_loaded = 1;
	}
}

static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
{
	if (vcpu->guest_xcr0_loaded) {
		if (vcpu->arch.xcr0 != host_xcr0)
			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
		vcpu->guest_xcr0_loaded = 0;
	}
}

735
static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
736
{
737 738
	u64 xcr0 = xcr;
	u64 old_xcr0 = vcpu->arch.xcr0;
739
	u64 valid_bits;
740 741 742 743

	/* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
	if (index != XCR_XFEATURE_ENABLED_MASK)
		return 1;
D
Dave Hansen 已提交
744
	if (!(xcr0 & XFEATURE_MASK_FP))
745
		return 1;
D
Dave Hansen 已提交
746
	if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
747
		return 1;
748 749 750 751 752 753

	/*
	 * Do not allow the guest to set bits that we do not support
	 * saving.  However, xcr0 bit 0 is always set, even if the
	 * emulated CPU does not support XSAVE (see fx_init).
	 */
D
Dave Hansen 已提交
754
	valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
755
	if (xcr0 & ~valid_bits)
756
		return 1;
757

D
Dave Hansen 已提交
758 759
	if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
	    (!(xcr0 & XFEATURE_MASK_BNDCSR)))
760 761
		return 1;

D
Dave Hansen 已提交
762 763
	if (xcr0 & XFEATURE_MASK_AVX512) {
		if (!(xcr0 & XFEATURE_MASK_YMM))
764
			return 1;
D
Dave Hansen 已提交
765
		if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
766 767
			return 1;
	}
768
	vcpu->arch.xcr0 = xcr0;
769

D
Dave Hansen 已提交
770
	if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
771
		kvm_update_cpuid(vcpu);
772 773 774 775 776
	return 0;
}

int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
777 778
	if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
	    __kvm_set_xcr(vcpu, index, xcr)) {
779 780 781 782 783 784 785
		kvm_inject_gp(vcpu, 0);
		return 1;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_xcr);

786
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
787
{
788
	unsigned long old_cr4 = kvm_read_cr4(vcpu);
789
	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
790
				   X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
791

792 793
	if (cr4 & CR4_RESERVED_BITS)
		return 1;
794

795
	if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
796 797
		return 1;

798
	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
799 800
		return 1;

801
	if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
802 803
		return 1;

804
	if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
F
Feng Wu 已提交
805 806
		return 1;

807
	if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
808 809
		return 1;

810
	if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
811 812
		return 1;

P
Paolo Bonzini 已提交
813 814 815
	if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
		return 1;

816
	if (is_long_mode(vcpu)) {
817 818
		if (!(cr4 & X86_CR4_PAE))
			return 1;
819 820
	} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
		   && ((cr4 ^ old_cr4) & pdptr_bits)
821 822
		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
				   kvm_read_cr3(vcpu)))
823 824
		return 1;

825
	if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
826
		if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
827 828 829 830 831 832 833
			return 1;

		/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
		if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
			return 1;
	}

834
	if (kvm_x86_ops->set_cr4(vcpu, cr4))
835
		return 1;
836

837 838
	if (((cr4 ^ old_cr4) & pdptr_bits) ||
	    (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
839
		kvm_mmu_reset_context(vcpu);
840

841
	if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
A
Avi Kivity 已提交
842
		kvm_update_cpuid(vcpu);
843

844 845
	return 0;
}
846
EXPORT_SYMBOL_GPL(kvm_set_cr4);
847

848
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
849
{
850
#ifdef CONFIG_X86_64
851 852 853 854
	bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);

	if (pcid_enabled)
		cr3 &= ~CR3_PCID_INVD;
855
#endif
N
Nadav Amit 已提交
856

857
	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
858
		kvm_mmu_sync_roots(vcpu);
859
		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
860
		return 0;
861 862
	}

863
	if (is_long_mode(vcpu) &&
864
	    (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)))
865 866
		return 1;
	else if (is_pae(vcpu) && is_paging(vcpu) &&
867
		   !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
N
Nadav Amit 已提交
868
		return 1;
869

870
	vcpu->arch.cr3 = cr3;
871
	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
872
	kvm_mmu_new_cr3(vcpu);
873 874
	return 0;
}
875
EXPORT_SYMBOL_GPL(kvm_set_cr3);
876

A
Andre Przywara 已提交
877
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
878
{
879 880
	if (cr8 & CR8_RESERVED_BITS)
		return 1;
881
	if (lapic_in_kernel(vcpu))
882 883
		kvm_lapic_set_tpr(vcpu, cr8);
	else
884
		vcpu->arch.cr8 = cr8;
885 886
	return 0;
}
887
EXPORT_SYMBOL_GPL(kvm_set_cr8);
888

889
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
890
{
891
	if (lapic_in_kernel(vcpu))
892 893
		return kvm_lapic_get_cr8(vcpu);
	else
894
		return vcpu->arch.cr8;
895
}
896
EXPORT_SYMBOL_GPL(kvm_get_cr8);
897

898 899 900 901 902 903 904 905 906 907 908
static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
{
	int i;

	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
		for (i = 0; i < KVM_NR_DB_REGS; i++)
			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
	}
}

J
Jan Kiszka 已提交
909 910 911 912 913 914
static void kvm_update_dr6(struct kvm_vcpu *vcpu)
{
	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
		kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6);
}

915 916 917 918 919 920 921 922 923
static void kvm_update_dr7(struct kvm_vcpu *vcpu)
{
	unsigned long dr7;

	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
		dr7 = vcpu->arch.guest_debug_dr7;
	else
		dr7 = vcpu->arch.dr7;
	kvm_x86_ops->set_dr7(vcpu, dr7);
924 925 926
	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
	if (dr7 & DR7_BP_EN_MASK)
		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
927 928
}

929 930 931 932
static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
{
	u64 fixed = DR6_FIXED_1;

933
	if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
934 935 936 937
		fixed |= DR6_RTM;
	return fixed;
}

938
static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
939 940 941 942 943 944 945 946 947 948
{
	switch (dr) {
	case 0 ... 3:
		vcpu->arch.db[dr] = val;
		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
			vcpu->arch.eff_db[dr] = val;
		break;
	case 4:
		/* fall through */
	case 6:
949 950
		if (val & 0xffffffff00000000ULL)
			return -1; /* #GP */
951
		vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
J
Jan Kiszka 已提交
952
		kvm_update_dr6(vcpu);
953 954 955 956
		break;
	case 5:
		/* fall through */
	default: /* 7 */
957 958
		if (val & 0xffffffff00000000ULL)
			return -1; /* #GP */
959
		vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
960
		kvm_update_dr7(vcpu);
961 962 963 964 965
		break;
	}

	return 0;
}
966 967 968

int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{
969
	if (__kvm_set_dr(vcpu, dr, val)) {
970
		kvm_inject_gp(vcpu, 0);
971 972 973
		return 1;
	}
	return 0;
974
}
975 976
EXPORT_SYMBOL_GPL(kvm_set_dr);

977
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
978 979 980 981 982 983 984 985
{
	switch (dr) {
	case 0 ... 3:
		*val = vcpu->arch.db[dr];
		break;
	case 4:
		/* fall through */
	case 6:
J
Jan Kiszka 已提交
986 987 988 989
		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
			*val = vcpu->arch.dr6;
		else
			*val = kvm_x86_ops->get_dr6(vcpu);
990 991 992 993 994 995 996
		break;
	case 5:
		/* fall through */
	default: /* 7 */
		*val = vcpu->arch.dr7;
		break;
	}
997 998
	return 0;
}
999 1000
EXPORT_SYMBOL_GPL(kvm_get_dr);

A
Avi Kivity 已提交
1001 1002 1003 1004 1005 1006
bool kvm_rdpmc(struct kvm_vcpu *vcpu)
{
	u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
	u64 data;
	int err;

1007
	err = kvm_pmu_rdpmc(vcpu, ecx, &data);
A
Avi Kivity 已提交
1008 1009 1010 1011 1012 1013 1014 1015
	if (err)
		return err;
	kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
	kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
	return err;
}
EXPORT_SYMBOL_GPL(kvm_rdpmc);

1016 1017 1018 1019 1020
/*
 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
 *
 * This list is modified at module load time to reflect the
1021
 * capabilities of the host cpu. This capabilities test skips MSRs that are
1022 1023
 * kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs
 * may depend on host virtualization features rather than host cpu features.
1024
 */
1025

1026 1027
static u32 msrs_to_save[] = {
	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
B
Brian Gerst 已提交
1028
	MSR_STAR,
1029 1030 1031
#ifdef CONFIG_X86_64
	MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
1032
	MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
1033
	MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
1034
	MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES
1035 1036 1037 1038
};

static unsigned num_msrs_to_save;

1039 1040 1041 1042 1043
static u32 emulated_msrs[] = {
	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
	HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
1044
	HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
1045 1046
	HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
	HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
1047
	HV_X64_MSR_RESET,
1048
	HV_X64_MSR_VP_INDEX,
1049
	HV_X64_MSR_VP_RUNTIME,
1050
	HV_X64_MSR_SCONTROL,
A
Andrey Smetanin 已提交
1051
	HV_X64_MSR_STIMER0_CONFIG,
1052
	HV_X64_MSR_VP_ASSIST_PAGE,
1053 1054 1055 1056
	HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
	HV_X64_MSR_TSC_EMULATION_STATUS,

	MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
1057 1058
	MSR_KVM_PV_EOI_EN,

W
Will Auld 已提交
1059
	MSR_IA32_TSC_ADJUST,
1060
	MSR_IA32_TSCDEADLINE,
1061
	MSR_IA32_MISC_ENABLE,
1062 1063
	MSR_IA32_MCG_STATUS,
	MSR_IA32_MCG_CTL,
1064
	MSR_IA32_MCG_EXT_CTL,
P
Paolo Bonzini 已提交
1065
	MSR_IA32_SMBASE,
1066
	MSR_SMI_COUNT,
K
Kyle Huey 已提交
1067 1068
	MSR_PLATFORM_INFO,
	MSR_MISC_FEATURES_ENABLES,
1069
	MSR_AMD64_VIRT_SPEC_CTRL,
1070 1071
};

1072 1073
static unsigned num_emulated_msrs;

1074 1075 1076 1077 1078
/*
 * List of msr numbers which are used to expose MSR-based features that
 * can be used by a hypervisor to validate requested CPU features.
 */
static u32 msr_based_features[] = {
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
	MSR_IA32_VMX_BASIC,
	MSR_IA32_VMX_TRUE_PINBASED_CTLS,
	MSR_IA32_VMX_PINBASED_CTLS,
	MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
	MSR_IA32_VMX_PROCBASED_CTLS,
	MSR_IA32_VMX_TRUE_EXIT_CTLS,
	MSR_IA32_VMX_EXIT_CTLS,
	MSR_IA32_VMX_TRUE_ENTRY_CTLS,
	MSR_IA32_VMX_ENTRY_CTLS,
	MSR_IA32_VMX_MISC,
	MSR_IA32_VMX_CR0_FIXED0,
	MSR_IA32_VMX_CR0_FIXED1,
	MSR_IA32_VMX_CR4_FIXED0,
	MSR_IA32_VMX_CR4_FIXED1,
	MSR_IA32_VMX_VMCS_ENUM,
	MSR_IA32_VMX_PROCBASED_CTLS2,
	MSR_IA32_VMX_EPT_VPID_CAP,
	MSR_IA32_VMX_VMFUNC,

1098
	MSR_F10H_DECFG,
1099
	MSR_IA32_UCODE_REV,
1100
	MSR_IA32_ARCH_CAPABILITIES,
1101 1102 1103 1104
};

static unsigned int num_msr_based_features;

1105 1106 1107
static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
{
	switch (msr->index) {
1108
	case MSR_IA32_UCODE_REV:
1109 1110
	case MSR_IA32_ARCH_CAPABILITIES:
		rdmsrl_safe(msr->index, &msr->data);
1111
		break;
1112 1113 1114 1115 1116 1117 1118
	default:
		if (kvm_x86_ops->get_msr_feature(msr))
			return 1;
	}
	return 0;
}

1119 1120 1121
static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
	struct kvm_msr_entry msr;
1122
	int r;
1123 1124

	msr.index = index;
1125 1126 1127
	r = kvm_get_msr_feature(&msr);
	if (r)
		return r;
1128 1129 1130 1131 1132 1133

	*data = msr.data;

	return 0;
}

1134
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1135
{
1136
	if (efer & efer_reserved_bits)
1137
		return false;
1138

1139
	if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
1140
			return false;
A
Alexander Graf 已提交
1141

1142
	if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
1143
			return false;
1144

1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
	return true;
}
EXPORT_SYMBOL_GPL(kvm_valid_efer);

static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
	u64 old_efer = vcpu->arch.efer;

	if (!kvm_valid_efer(vcpu, efer))
		return 1;

	if (is_paging(vcpu)
	    && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
		return 1;

1160
	efer &= ~EFER_LMA;
1161
	efer |= vcpu->arch.efer & EFER_LMA;
1162

1163 1164
	kvm_x86_ops->set_efer(vcpu, efer);

1165 1166 1167 1168
	/* Update reserved bits */
	if ((efer ^ old_efer) & EFER_NX)
		kvm_mmu_reset_context(vcpu);

1169
	return 0;
1170 1171
}

1172 1173 1174 1175 1176 1177
void kvm_enable_efer_bits(u64 mask)
{
       efer_reserved_bits &= ~mask;
}
EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);

1178 1179 1180 1181 1182
/*
 * Writes msr value into into the appropriate "register".
 * Returns 0 on success, non-0 otherwise.
 * Assumes vcpu_load() was already called.
 */
1183
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1184
{
1185 1186 1187 1188 1189 1190
	switch (msr->index) {
	case MSR_FS_BASE:
	case MSR_GS_BASE:
	case MSR_KERNEL_GS_BASE:
	case MSR_CSTAR:
	case MSR_LSTAR:
1191
		if (is_noncanonical_address(msr->data, vcpu))
1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
			return 1;
		break;
	case MSR_IA32_SYSENTER_EIP:
	case MSR_IA32_SYSENTER_ESP:
		/*
		 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
		 * non-canonical address is written on Intel but not on
		 * AMD (which ignores the top 32-bits, because it does
		 * not implement 64-bit SYSENTER).
		 *
		 * 64-bit code should hence be able to write a non-canonical
		 * value on AMD.  Making the address canonical ensures that
		 * vmentry does not fail on Intel after writing a non-canonical
		 * value, and that something deterministic happens if the guest
		 * invokes 64-bit SYSENTER.
		 */
1208
		msr->data = get_canonical(msr->data, vcpu_virt_addr_bits(vcpu));
1209
	}
1210
	return kvm_x86_ops->set_msr(vcpu, msr);
1211
}
1212
EXPORT_SYMBOL_GPL(kvm_set_msr);
1213

1214 1215 1216
/*
 * Adapt set_msr() to msr_io()'s calling convention
 */
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
	struct msr_data msr;
	int r;

	msr.index = index;
	msr.host_initiated = true;
	r = kvm_get_msr(vcpu, &msr);
	if (r)
		return r;

	*data = msr.data;
	return 0;
}

1232 1233
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
1234 1235 1236 1237 1238 1239
	struct msr_data msr;

	msr.data = *data;
	msr.index = index;
	msr.host_initiated = true;
	return kvm_set_msr(vcpu, &msr);
1240 1241
}

1242 1243 1244 1245 1246 1247
#ifdef CONFIG_X86_64
struct pvclock_gtod_data {
	seqcount_t	seq;

	struct { /* extract of a clocksource struct */
		int vclock_mode;
1248 1249
		u64	cycle_last;
		u64	mask;
1250 1251 1252 1253
		u32	mult;
		u32	shift;
	} clock;

1254 1255
	u64		boot_ns;
	u64		nsec_base;
1256
	u64		wall_time_sec;
1257 1258 1259 1260 1261 1262 1263
};

static struct pvclock_gtod_data pvclock_gtod_data;

static void update_pvclock_gtod(struct timekeeper *tk)
{
	struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
1264 1265
	u64 boot_ns;

1266
	boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
1267 1268 1269 1270

	write_seqcount_begin(&vdata->seq);

	/* copy pvclock gtod data */
1271 1272 1273 1274 1275
	vdata->clock.vclock_mode	= tk->tkr_mono.clock->archdata.vclock_mode;
	vdata->clock.cycle_last		= tk->tkr_mono.cycle_last;
	vdata->clock.mask		= tk->tkr_mono.mask;
	vdata->clock.mult		= tk->tkr_mono.mult;
	vdata->clock.shift		= tk->tkr_mono.shift;
1276

1277
	vdata->boot_ns			= boot_ns;
1278
	vdata->nsec_base		= tk->tkr_mono.xtime_nsec;
1279

1280 1281
	vdata->wall_time_sec            = tk->xtime_sec;

1282 1283 1284 1285
	write_seqcount_end(&vdata->seq);
}
#endif

1286 1287 1288 1289 1290 1291 1292 1293 1294
void kvm_set_pending_timer(struct kvm_vcpu *vcpu)
{
	/*
	 * Note: KVM_REQ_PENDING_TIMER is implicitly checked in
	 * vcpu_enter_guest.  This function is only called from
	 * the physical CPU that is running vcpu.
	 */
	kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
}
1295

1296 1297
static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
{
1298 1299
	int version;
	int r;
1300
	struct pvclock_wall_clock wc;
A
Arnd Bergmann 已提交
1301
	struct timespec64 boot;
1302 1303 1304 1305

	if (!wall_clock)
		return;

1306 1307 1308 1309 1310 1311 1312 1313
	r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
	if (r)
		return;

	if (version & 1)
		++version;  /* first time write, random junk */

	++version;
1314

1315 1316
	if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
		return;
1317

1318 1319
	/*
	 * The guest calculates current wall clock time by adding
Z
Zachary Amsden 已提交
1320
	 * system time (updated by kvm_guest_time_update below) to the
1321 1322 1323
	 * wall clock specified here.  guest system time equals host
	 * system time for us, thus we must fill in host boot time here.
	 */
A
Arnd Bergmann 已提交
1324
	getboottime64(&boot);
1325

1326
	if (kvm->arch.kvmclock_offset) {
A
Arnd Bergmann 已提交
1327 1328
		struct timespec64 ts = ns_to_timespec64(kvm->arch.kvmclock_offset);
		boot = timespec64_sub(boot, ts);
1329
	}
A
Arnd Bergmann 已提交
1330
	wc.sec = (u32)boot.tv_sec; /* overflow in 2106 guest time */
1331 1332
	wc.nsec = boot.tv_nsec;
	wc.version = version;
1333 1334 1335 1336 1337 1338 1339

	kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));

	version++;
	kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
}

1340 1341
static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
{
1342 1343
	do_shl32_div32(dividend, divisor);
	return dividend;
1344 1345
}

1346
static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
1347
			       s8 *pshift, u32 *pmultiplier)
1348
{
1349
	uint64_t scaled64;
1350 1351 1352 1353
	int32_t  shift = 0;
	uint64_t tps64;
	uint32_t tps32;

1354 1355
	tps64 = base_hz;
	scaled64 = scaled_hz;
1356
	while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
1357 1358 1359 1360 1361
		tps64 >>= 1;
		shift--;
	}

	tps32 = (uint32_t)tps64;
1362 1363
	while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
		if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
1364 1365 1366
			scaled64 >>= 1;
		else
			tps32 <<= 1;
1367 1368 1369
		shift++;
	}

1370 1371
	*pshift = shift;
	*pmultiplier = div_frac(scaled64, tps32);
1372

1373 1374
	pr_debug("%s: base_hz %llu => %llu, shift %d, mul %u\n",
		 __func__, base_hz, scaled_hz, shift, *pmultiplier);
1375 1376
}

1377
#ifdef CONFIG_X86_64
1378
static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
1379
#endif
1380

1381
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
1382
static unsigned long max_tsc_khz;
1383

1384
static u32 adjust_tsc_khz(u32 khz, s32 ppm)
1385
{
1386 1387 1388
	u64 v = (u64)khz * (1000000 + ppm);
	do_div(v, 1000000);
	return v;
1389 1390
}

1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
{
	u64 ratio;

	/* Guest TSC same frequency as host TSC? */
	if (!scale) {
		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
		return 0;
	}

	/* TSC scaling supported? */
	if (!kvm_has_tsc_control) {
		if (user_tsc_khz > tsc_khz) {
			vcpu->arch.tsc_catchup = 1;
			vcpu->arch.tsc_always_catchup = 1;
			return 0;
		} else {
			WARN(1, "user requested TSC rate below hardware speed\n");
			return -1;
		}
	}

	/* TSC scaling required  - calculate ratio */
	ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits,
				user_tsc_khz, tsc_khz);

	if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
		WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
			  user_tsc_khz);
		return -1;
	}

	vcpu->arch.tsc_scaling_ratio = ratio;
	return 0;
}

1427
static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
1428
{
1429 1430
	u32 thresh_lo, thresh_hi;
	int use_scaling = 0;
1431

1432
	/* tsc_khz can be zero if TSC calibration fails */
1433
	if (user_tsc_khz == 0) {
1434 1435
		/* set tsc_scaling_ratio to a safe value */
		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
1436
		return -1;
1437
	}
1438

Z
Zachary Amsden 已提交
1439
	/* Compute a scale to convert nanoseconds in TSC cycles */
1440
	kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
1441 1442
			   &vcpu->arch.virtual_tsc_shift,
			   &vcpu->arch.virtual_tsc_mult);
1443
	vcpu->arch.virtual_tsc_khz = user_tsc_khz;
1444 1445 1446 1447 1448 1449 1450 1451 1452

	/*
	 * Compute the variation in TSC rate which is acceptable
	 * within the range of tolerance and decide if the
	 * rate being applied is within that bounds of the hardware
	 * rate.  If so, no scaling or compensation need be done.
	 */
	thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
	thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
1453 1454
	if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
		pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi);
1455 1456
		use_scaling = 1;
	}
1457
	return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
Z
Zachary Amsden 已提交
1458 1459 1460 1461
}

static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
{
1462
	u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
1463 1464
				      vcpu->arch.virtual_tsc_mult,
				      vcpu->arch.virtual_tsc_shift);
1465
	tsc += vcpu->arch.this_tsc_write;
Z
Zachary Amsden 已提交
1466 1467 1468
	return tsc;
}

1469 1470 1471 1472 1473
static inline int gtod_is_based_on_tsc(int mode)
{
	return mode == VCLOCK_TSC || mode == VCLOCK_HVCLOCK;
}

1474
static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
1475 1476 1477 1478 1479 1480 1481 1482 1483
{
#ifdef CONFIG_X86_64
	bool vcpus_matched;
	struct kvm_arch *ka = &vcpu->kvm->arch;
	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;

	vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
			 atomic_read(&vcpu->kvm->online_vcpus));

1484 1485 1486 1487 1488 1489 1490 1491 1492
	/*
	 * Once the masterclock is enabled, always perform request in
	 * order to update it.
	 *
	 * In order to enable masterclock, the host clocksource must be TSC
	 * and the vcpus need to have matched TSCs.  When that happens,
	 * perform request to enable masterclock.
	 */
	if (ka->use_master_clock ||
1493
	    (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched))
1494 1495 1496 1497 1498 1499 1500 1501
		kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);

	trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
			    atomic_read(&vcpu->kvm->online_vcpus),
		            ka->use_master_clock, gtod->clock.vclock_mode);
#endif
}

W
Will Auld 已提交
1502 1503
static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
{
1504
	u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
W
Will Auld 已提交
1505 1506 1507
	vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
}

1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534
/*
 * Multiply tsc by a fixed point number represented by ratio.
 *
 * The most significant 64-N bits (mult) of ratio represent the
 * integral part of the fixed point number; the remaining N bits
 * (frac) represent the fractional part, ie. ratio represents a fixed
 * point number (mult + frac * 2^(-N)).
 *
 * N equals to kvm_tsc_scaling_ratio_frac_bits.
 */
static inline u64 __scale_tsc(u64 ratio, u64 tsc)
{
	return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
}

u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
{
	u64 _tsc = tsc;
	u64 ratio = vcpu->arch.tsc_scaling_ratio;

	if (ratio != kvm_default_tsc_scaling_ratio)
		_tsc = __scale_tsc(ratio, tsc);

	return _tsc;
}
EXPORT_SYMBOL_GPL(kvm_scale_tsc);

1535 1536 1537 1538 1539 1540 1541 1542 1543
static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
	u64 tsc;

	tsc = kvm_scale_tsc(vcpu, rdtsc());

	return target_tsc - tsc;
}

1544 1545
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
1546 1547 1548
	u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);

	return tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
1549 1550 1551
}
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);

1552 1553 1554 1555 1556 1557
static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
	kvm_x86_ops->write_tsc_offset(vcpu, offset);
	vcpu->arch.tsc_offset = offset;
}

1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
static inline bool kvm_check_tsc_unstable(void)
{
#ifdef CONFIG_X86_64
	/*
	 * TSC is marked unstable when we're running on Hyper-V,
	 * 'TSC page' clocksource is good.
	 */
	if (pvclock_gtod_data.clock.vclock_mode == VCLOCK_HVCLOCK)
		return false;
#endif
	return check_tsc_unstable();
}

1571
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
1572 1573
{
	struct kvm *kvm = vcpu->kvm;
Z
Zachary Amsden 已提交
1574
	u64 offset, ns, elapsed;
1575
	unsigned long flags;
1576
	bool matched;
T
Tomasz Grabiec 已提交
1577
	bool already_matched;
1578
	u64 data = msr->data;
1579
	bool synchronizing = false;
1580

1581
	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1582
	offset = kvm_compute_tsc_offset(vcpu, data);
1583
	ns = ktime_get_boot_ns();
Z
Zachary Amsden 已提交
1584
	elapsed = ns - kvm->arch.last_tsc_nsec;
1585

1586
	if (vcpu->arch.virtual_tsc_khz) {
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605
		if (data == 0 && msr->host_initiated) {
			/*
			 * detection of vcpu initialization -- need to sync
			 * with other vCPUs. This particularly helps to keep
			 * kvm_clock stable after CPU hotplug
			 */
			synchronizing = true;
		} else {
			u64 tsc_exp = kvm->arch.last_tsc_write +
						nsec_to_cycles(vcpu, elapsed);
			u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
			/*
			 * Special case: TSC write with a small delta (1 second)
			 * of virtual cycle time against real time is
			 * interpreted as an attempt to synchronize the CPU.
			 */
			synchronizing = data < tsc_exp + tsc_hz &&
					data + tsc_hz > tsc_exp;
		}
1606
	}
Z
Zachary Amsden 已提交
1607 1608

	/*
1609 1610 1611 1612 1613
	 * For a reliable TSC, we can match TSC offsets, and for an unstable
	 * TSC, we add elapsed time in this computation.  We could let the
	 * compensation code attempt to catch up if we fall behind, but
	 * it's better to try to match offsets from the beginning.
         */
1614
	if (synchronizing &&
1615
	    vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
1616
		if (!kvm_check_tsc_unstable()) {
1617
			offset = kvm->arch.cur_tsc_offset;
Z
Zachary Amsden 已提交
1618 1619
			pr_debug("kvm: matched tsc offset for %llu\n", data);
		} else {
1620
			u64 delta = nsec_to_cycles(vcpu, elapsed);
1621
			data += delta;
1622
			offset = kvm_compute_tsc_offset(vcpu, data);
1623
			pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
Z
Zachary Amsden 已提交
1624
		}
1625
		matched = true;
T
Tomasz Grabiec 已提交
1626
		already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
1627 1628 1629 1630 1631 1632
	} else {
		/*
		 * We split periods of matched TSC writes into generations.
		 * For each generation, we track the original measured
		 * nanosecond time, offset, and write, so if TSCs are in
		 * sync, we can match exact offset, and if not, we can match
G
Guo Chao 已提交
1633
		 * exact software computation in compute_guest_tsc()
1634 1635 1636 1637 1638 1639 1640
		 *
		 * These values are tracked in kvm->arch.cur_xxx variables.
		 */
		kvm->arch.cur_tsc_generation++;
		kvm->arch.cur_tsc_nsec = ns;
		kvm->arch.cur_tsc_write = data;
		kvm->arch.cur_tsc_offset = offset;
1641
		matched = false;
T
Tomasz Grabiec 已提交
1642
		pr_debug("kvm: new tsc generation %llu, clock %llu\n",
1643
			 kvm->arch.cur_tsc_generation, data);
Z
Zachary Amsden 已提交
1644
	}
1645 1646 1647 1648 1649

	/*
	 * We also track th most recent recorded KHZ, write and time to
	 * allow the matching interval to be extended at each write.
	 */
Z
Zachary Amsden 已提交
1650 1651
	kvm->arch.last_tsc_nsec = ns;
	kvm->arch.last_tsc_write = data;
1652
	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
1653

1654
	vcpu->arch.last_guest_tsc = data;
1655 1656 1657 1658 1659 1660

	/* Keep track of which generation this VCPU has synchronized to */
	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;

1661
	if (!msr->host_initiated && guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST))
W
Will Auld 已提交
1662
		update_ia32_tsc_adjust_msr(vcpu, offset);
1663

1664
	kvm_vcpu_write_tsc_offset(vcpu, offset);
1665
	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1666 1667

	spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
T
Tomasz Grabiec 已提交
1668
	if (!matched) {
1669
		kvm->arch.nr_vcpus_matched_tsc = 0;
T
Tomasz Grabiec 已提交
1670 1671 1672
	} else if (!already_matched) {
		kvm->arch.nr_vcpus_matched_tsc++;
	}
1673 1674 1675

	kvm_track_tsc_matching(vcpu);
	spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
1676
}
1677

1678 1679
EXPORT_SYMBOL_GPL(kvm_write_tsc);

1680 1681 1682
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
					   s64 adjustment)
{
1683
	kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
1684 1685 1686 1687 1688 1689 1690
}

static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{
	if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
		WARN_ON(adjustment < 0);
	adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
1691
	adjust_tsc_offset_guest(vcpu, adjustment);
1692 1693
}

1694 1695
#ifdef CONFIG_X86_64

1696
static u64 read_tsc(void)
1697
{
1698
	u64 ret = (u64)rdtsc_ordered();
1699
	u64 last = pvclock_gtod_data.clock.cycle_last;
1700 1701 1702 1703 1704 1705

	if (likely(ret >= last))
		return ret;

	/*
	 * GCC likes to generate cmov here, but this branch is extremely
1706
	 * predictable (it's just a function of time and the likely is
1707 1708 1709 1710 1711 1712 1713 1714 1715
	 * very likely) and there's a data dependence, so force GCC
	 * to generate a branch instead.  I don't barrier() because
	 * we don't actually need a barrier, and if this function
	 * ever gets inlined it will generate worse code.
	 */
	asm volatile ("");
	return last;
}

1716
static inline u64 vgettsc(u64 *tsc_timestamp, int *mode)
1717 1718 1719
{
	long v;
	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744
	u64 tsc_pg_val;

	switch (gtod->clock.vclock_mode) {
	case VCLOCK_HVCLOCK:
		tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(),
						  tsc_timestamp);
		if (tsc_pg_val != U64_MAX) {
			/* TSC page valid */
			*mode = VCLOCK_HVCLOCK;
			v = (tsc_pg_val - gtod->clock.cycle_last) &
				gtod->clock.mask;
		} else {
			/* TSC page invalid */
			*mode = VCLOCK_NONE;
		}
		break;
	case VCLOCK_TSC:
		*mode = VCLOCK_TSC;
		*tsc_timestamp = read_tsc();
		v = (*tsc_timestamp - gtod->clock.cycle_last) &
			gtod->clock.mask;
		break;
	default:
		*mode = VCLOCK_NONE;
	}
1745

1746 1747
	if (*mode == VCLOCK_NONE)
		*tsc_timestamp = v = 0;
1748 1749 1750 1751

	return v * gtod->clock.mult;
}

1752
static int do_monotonic_boot(s64 *t, u64 *tsc_timestamp)
1753
{
1754
	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1755 1756
	unsigned long seq;
	int mode;
1757
	u64 ns;
1758 1759 1760

	do {
		seq = read_seqcount_begin(&gtod->seq);
1761
		ns = gtod->nsec_base;
1762
		ns += vgettsc(tsc_timestamp, &mode);
1763
		ns >>= gtod->clock.shift;
1764
		ns += gtod->boot_ns;
1765
	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
1766
	*t = ns;
1767 1768 1769 1770

	return mode;
}

1771
static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
1772 1773 1774 1775 1776 1777 1778 1779 1780 1781
{
	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
	unsigned long seq;
	int mode;
	u64 ns;

	do {
		seq = read_seqcount_begin(&gtod->seq);
		ts->tv_sec = gtod->wall_time_sec;
		ns = gtod->nsec_base;
1782
		ns += vgettsc(tsc_timestamp, &mode);
1783 1784 1785 1786 1787 1788 1789 1790 1791
		ns >>= gtod->clock.shift;
	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));

	ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
	ts->tv_nsec = ns;

	return mode;
}

1792 1793
/* returns true if host is using TSC based clocksource */
static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
1794 1795
{
	/* checked again under seqlock below */
1796
	if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
1797 1798
		return false;

1799 1800
	return gtod_is_based_on_tsc(do_monotonic_boot(kernel_ns,
						      tsc_timestamp));
1801
}
1802

1803
/* returns true if host is using TSC based clocksource */
1804
static bool kvm_get_walltime_and_clockread(struct timespec64 *ts,
1805
					   u64 *tsc_timestamp)
1806 1807
{
	/* checked again under seqlock below */
1808
	if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
1809 1810
		return false;

1811
	return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp));
1812
}
1813 1814 1815 1816
#endif

/*
 *
1817 1818 1819
 * Assuming a stable TSC across physical CPUS, and a stable TSC
 * across virtual CPUs, the following condition is possible.
 * Each numbered line represents an event visible to both
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851
 * CPUs at the next numbered event.
 *
 * "timespecX" represents host monotonic time. "tscX" represents
 * RDTSC value.
 *
 * 		VCPU0 on CPU0		|	VCPU1 on CPU1
 *
 * 1.  read timespec0,tsc0
 * 2.					| timespec1 = timespec0 + N
 * 					| tsc1 = tsc0 + M
 * 3. transition to guest		| transition to guest
 * 4. ret0 = timespec0 + (rdtsc - tsc0) |
 * 5.				        | ret1 = timespec1 + (rdtsc - tsc1)
 * 				        | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
 *
 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
 *
 * 	- ret0 < ret1
 *	- timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
 *		...
 *	- 0 < N - M => M < N
 *
 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
 * always the case (the difference between two distinct xtime instances
 * might be smaller then the difference between corresponding TSC reads,
 * when updating guest vcpus pvclock areas).
 *
 * To avoid that problem, do not allow visibility of distinct
 * system_timestamp/tsc_timestamp values simultaneously: use a master
 * copy of host monotonic time values. Update that master copy
 * in lockstep.
 *
1852
 * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
1853 1854 1855 1856 1857 1858 1859 1860
 *
 */

static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
{
#ifdef CONFIG_X86_64
	struct kvm_arch *ka = &kvm->arch;
	int vclock_mode;
1861 1862 1863 1864
	bool host_tsc_clocksource, vcpus_matched;

	vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
			atomic_read(&kvm->online_vcpus));
1865 1866 1867 1868 1869

	/*
	 * If the host uses TSC clock, then passthrough TSC as stable
	 * to the guest.
	 */
1870
	host_tsc_clocksource = kvm_get_time_and_clockread(
1871 1872 1873
					&ka->master_kernel_ns,
					&ka->master_cycle_now);

1874
	ka->use_master_clock = host_tsc_clocksource && vcpus_matched
1875
				&& !ka->backwards_tsc_observed
1876
				&& !ka->boot_vcpu_runs_old_kvmclock;
1877

1878 1879 1880 1881
	if (ka->use_master_clock)
		atomic_set(&kvm_guest_has_master_clock, 1);

	vclock_mode = pvclock_gtod_data.clock.vclock_mode;
1882 1883
	trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
					vcpus_matched);
1884 1885 1886
#endif
}

1887 1888 1889 1890 1891
void kvm_make_mclock_inprogress_request(struct kvm *kvm)
{
	kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
}

1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904
static void kvm_gen_update_masterclock(struct kvm *kvm)
{
#ifdef CONFIG_X86_64
	int i;
	struct kvm_vcpu *vcpu;
	struct kvm_arch *ka = &kvm->arch;

	spin_lock(&ka->pvclock_gtod_sync_lock);
	kvm_make_mclock_inprogress_request(kvm);
	/* no guest entries from this point */
	pvclock_update_vm_gtod_copy(kvm);

	kvm_for_each_vcpu(i, vcpu, kvm)
1905
		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1906 1907 1908

	/* guest entries allowed */
	kvm_for_each_vcpu(i, vcpu, kvm)
1909
		kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
1910 1911 1912 1913 1914

	spin_unlock(&ka->pvclock_gtod_sync_lock);
#endif
}

1915
u64 get_kvmclock_ns(struct kvm *kvm)
1916 1917
{
	struct kvm_arch *ka = &kvm->arch;
1918
	struct pvclock_vcpu_time_info hv_clock;
1919
	u64 ret;
1920

1921 1922 1923 1924
	spin_lock(&ka->pvclock_gtod_sync_lock);
	if (!ka->use_master_clock) {
		spin_unlock(&ka->pvclock_gtod_sync_lock);
		return ktime_get_boot_ns() + ka->kvmclock_offset;
1925 1926
	}

1927 1928 1929 1930
	hv_clock.tsc_timestamp = ka->master_cycle_now;
	hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
	spin_unlock(&ka->pvclock_gtod_sync_lock);

1931 1932 1933
	/* both __this_cpu_read() and rdtsc() should be on the same cpu */
	get_cpu();

1934 1935 1936 1937 1938 1939 1940
	if (__this_cpu_read(cpu_tsc_khz)) {
		kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
				   &hv_clock.tsc_shift,
				   &hv_clock.tsc_to_system_mul);
		ret = __pvclock_read_cycles(&hv_clock, rdtsc());
	} else
		ret = ktime_get_boot_ns() + ka->kvmclock_offset;
1941 1942 1943 1944

	put_cpu();

	return ret;
1945 1946
}

1947 1948 1949 1950 1951
static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
{
	struct kvm_vcpu_arch *vcpu = &v->arch;
	struct pvclock_vcpu_time_info guest_hv_clock;

1952
	if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971
		&guest_hv_clock, sizeof(guest_hv_clock))))
		return;

	/* This VCPU is paused, but it's legal for a guest to read another
	 * VCPU's kvmclock, so we really have to follow the specification where
	 * it says that version is odd if data is being modified, and even after
	 * it is consistent.
	 *
	 * Version field updates must be kept separate.  This is because
	 * kvm_write_guest_cached might use a "rep movs" instruction, and
	 * writes within a string instruction are weakly ordered.  So there
	 * are three writes overall.
	 *
	 * As a small optimization, only write the version field in the first
	 * and third write.  The vcpu->pv_time cache is still valid, because the
	 * version field is the first in the struct.
	 */
	BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);

1972 1973 1974
	if (guest_hv_clock.version & 1)
		++guest_hv_clock.version;  /* first time write, random junk */

1975
	vcpu->hv_clock.version = guest_hv_clock.version + 1;
1976 1977 1978
	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
				&vcpu->hv_clock,
				sizeof(vcpu->hv_clock.version));
1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991

	smp_wmb();

	/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
	vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);

	if (vcpu->pvclock_set_guest_stopped_request) {
		vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
		vcpu->pvclock_set_guest_stopped_request = false;
	}

	trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);

1992 1993 1994
	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
				&vcpu->hv_clock,
				sizeof(vcpu->hv_clock));
1995 1996 1997 1998

	smp_wmb();

	vcpu->hv_clock.version++;
1999 2000 2001
	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
				&vcpu->hv_clock,
				sizeof(vcpu->hv_clock.version));
2002 2003
}

Z
Zachary Amsden 已提交
2004
static int kvm_guest_time_update(struct kvm_vcpu *v)
2005
{
2006
	unsigned long flags, tgt_tsc_khz;
2007
	struct kvm_vcpu_arch *vcpu = &v->arch;
2008
	struct kvm_arch *ka = &v->kvm->arch;
2009
	s64 kernel_ns;
2010
	u64 tsc_timestamp, host_tsc;
2011
	u8 pvclock_flags;
2012 2013 2014 2015
	bool use_master_clock;

	kernel_ns = 0;
	host_tsc = 0;
2016

2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027
	/*
	 * If the host uses TSC clock, then passthrough TSC as stable
	 * to the guest.
	 */
	spin_lock(&ka->pvclock_gtod_sync_lock);
	use_master_clock = ka->use_master_clock;
	if (use_master_clock) {
		host_tsc = ka->master_cycle_now;
		kernel_ns = ka->master_kernel_ns;
	}
	spin_unlock(&ka->pvclock_gtod_sync_lock);
2028 2029 2030

	/* Keep irq disabled to prevent changes to the clock */
	local_irq_save(flags);
2031 2032
	tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz);
	if (unlikely(tgt_tsc_khz == 0)) {
2033 2034 2035 2036
		local_irq_restore(flags);
		kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
		return 1;
	}
2037
	if (!use_master_clock) {
2038
		host_tsc = rdtsc();
2039
		kernel_ns = ktime_get_boot_ns();
2040 2041
	}

2042
	tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
2043

Z
Zachary Amsden 已提交
2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056
	/*
	 * We may have to catch up the TSC to match elapsed wall clock
	 * time for two reasons, even if kvmclock is used.
	 *   1) CPU could have been running below the maximum TSC rate
	 *   2) Broken TSC compensation resets the base at each VCPU
	 *      entry to avoid unknown leaps of TSC even when running
	 *      again on the same CPU.  This may cause apparent elapsed
	 *      time to disappear, and the guest to stand still or run
	 *	very slowly.
	 */
	if (vcpu->tsc_catchup) {
		u64 tsc = compute_guest_tsc(v, kernel_ns);
		if (tsc > tsc_timestamp) {
2057
			adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
Z
Zachary Amsden 已提交
2058 2059
			tsc_timestamp = tsc;
		}
2060 2061
	}

2062 2063
	local_irq_restore(flags);

2064
	/* With all the info we got, fill in the values */
2065

2066 2067 2068 2069
	if (kvm_has_tsc_control)
		tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz);

	if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
2070
		kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
2071 2072
				   &vcpu->hv_clock.tsc_shift,
				   &vcpu->hv_clock.tsc_to_system_mul);
2073
		vcpu->hw_tsc_khz = tgt_tsc_khz;
2074 2075
	}

2076
	vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
2077
	vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
Z
Zachary Amsden 已提交
2078
	vcpu->last_guest_tsc = tsc_timestamp;
2079

2080
	/* If the host uses TSC clocksource, then it is stable */
2081
	pvclock_flags = 0;
2082 2083 2084
	if (use_master_clock)
		pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;

2085 2086
	vcpu->hv_clock.flags = pvclock_flags;

P
Paolo Bonzini 已提交
2087 2088 2089 2090
	if (vcpu->pv_time_enabled)
		kvm_setup_pvclock_page(v);
	if (v == kvm_get_vcpu(v->kvm, 0))
		kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
2091
	return 0;
2092 2093
}

2094 2095 2096 2097 2098 2099 2100 2101
/*
 * kvmclock updates which are isolated to a given vcpu, such as
 * vcpu->cpu migration, should not allow system_timestamp from
 * the rest of the vcpus to remain static. Otherwise ntp frequency
 * correction applies to one vcpu's system_timestamp but not
 * the others.
 *
 * So in those cases, request a kvmclock update for all vcpus.
2102 2103 2104 2105
 * We need to rate-limit these requests though, as they can
 * considerably slow guests that have a large number of vcpus.
 * The time for a remote vcpu to update its kvmclock is bound
 * by the delay we use to rate-limit the updates.
2106 2107
 */

2108 2109 2110
#define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)

static void kvmclock_update_fn(struct work_struct *work)
2111 2112
{
	int i;
2113 2114 2115 2116
	struct delayed_work *dwork = to_delayed_work(work);
	struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
					   kvmclock_update_work);
	struct kvm *kvm = container_of(ka, struct kvm, arch);
2117 2118 2119
	struct kvm_vcpu *vcpu;

	kvm_for_each_vcpu(i, vcpu, kvm) {
2120
		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2121 2122 2123 2124
		kvm_vcpu_kick(vcpu);
	}
}

2125 2126 2127 2128
static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
{
	struct kvm *kvm = v->kvm;

2129
	kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
2130 2131 2132 2133
	schedule_delayed_work(&kvm->arch.kvmclock_update_work,
					KVMCLOCK_UPDATE_DELAY);
}

2134 2135 2136 2137 2138 2139 2140 2141 2142
#define KVMCLOCK_SYNC_PERIOD (300 * HZ)

static void kvmclock_sync_fn(struct work_struct *work)
{
	struct delayed_work *dwork = to_delayed_work(work);
	struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
					   kvmclock_sync_work);
	struct kvm *kvm = container_of(ka, struct kvm, arch);

2143 2144 2145
	if (!kvmclock_periodic_sync)
		return;

2146 2147 2148 2149 2150
	schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
	schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
					KVMCLOCK_SYNC_PERIOD);
}

2151
static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2152
{
H
Huang Ying 已提交
2153 2154
	u64 mcg_cap = vcpu->arch.mcg_cap;
	unsigned bank_num = mcg_cap & 0xff;
2155 2156
	u32 msr = msr_info->index;
	u64 data = msr_info->data;
H
Huang Ying 已提交
2157

2158 2159
	switch (msr) {
	case MSR_IA32_MCG_STATUS:
H
Huang Ying 已提交
2160
		vcpu->arch.mcg_status = data;
2161
		break;
2162
	case MSR_IA32_MCG_CTL:
2163 2164
		if (!(mcg_cap & MCG_CTL_P) &&
		    (data || !msr_info->host_initiated))
H
Huang Ying 已提交
2165 2166
			return 1;
		if (data != 0 && data != ~(u64)0)
2167
			return 1;
H
Huang Ying 已提交
2168 2169 2170 2171
		vcpu->arch.mcg_ctl = data;
		break;
	default:
		if (msr >= MSR_IA32_MC0_CTL &&
2172
		    msr < MSR_IA32_MCx_CTL(bank_num)) {
H
Huang Ying 已提交
2173
			u32 offset = msr - MSR_IA32_MC0_CTL;
2174 2175 2176 2177 2178
			/* only 0 or all 1s can be written to IA32_MCi_CTL
			 * some Linux kernels though clear bit 10 in bank 4 to
			 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
			 * this to avoid an uncatched #GP in the guest
			 */
H
Huang Ying 已提交
2179
			if ((offset & 0x3) == 0 &&
2180
			    data != 0 && (data | (1 << 10)) != ~(u64)0)
H
Huang Ying 已提交
2181
				return -1;
2182 2183 2184
			if (!msr_info->host_initiated &&
				(offset & 0x3) == 1 && data != 0)
				return -1;
H
Huang Ying 已提交
2185 2186 2187 2188 2189 2190 2191 2192
			vcpu->arch.mce_banks[offset] = data;
			break;
		}
		return 1;
	}
	return 0;
}

E
Ed Swierk 已提交
2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209
static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
	struct kvm *kvm = vcpu->kvm;
	int lm = is_long_mode(vcpu);
	u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
		: (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
	u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
		: kvm->arch.xen_hvm_config.blob_size_32;
	u32 page_num = data & ~PAGE_MASK;
	u64 page_addr = data & PAGE_MASK;
	u8 *page;
	int r;

	r = -E2BIG;
	if (page_num >= blob_size)
		goto out;
	r = -ENOMEM;
2210 2211 2212
	page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
	if (IS_ERR(page)) {
		r = PTR_ERR(page);
E
Ed Swierk 已提交
2213
		goto out;
2214
	}
2215
	if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE))
E
Ed Swierk 已提交
2216 2217 2218 2219 2220 2221 2222 2223
		goto out_free;
	r = 0;
out_free:
	kfree(page);
out:
	return r;
}

2224 2225 2226 2227
static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{
	gpa_t gpa = data & ~0x3f;

2228 2229
	/* Bits 3:5 are reserved, Should be zero */
	if (data & 0x38)
2230 2231 2232 2233 2234 2235 2236 2237 2238 2239
		return 1;

	vcpu->arch.apf.msr_val = data;

	if (!(data & KVM_ASYNC_PF_ENABLED)) {
		kvm_clear_async_pf_completion_queue(vcpu);
		kvm_async_pf_hash_reset(vcpu);
		return 0;
	}

2240
	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
2241
					sizeof(u32)))
2242 2243
		return 1;

2244
	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
2245
	vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
2246 2247 2248 2249
	kvm_async_pf_wakeup_all(vcpu);
	return 0;
}

2250 2251
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
2252
	vcpu->arch.pv_time_enabled = false;
2253 2254
}

2255 2256 2257 2258 2259 2260
static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
{
	++vcpu->stat.tlb_flush;
	kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa);
}

G
Glauber Costa 已提交
2261 2262 2263 2264 2265
static void record_steal_time(struct kvm_vcpu *vcpu)
{
	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
		return;

2266
	if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
G
Glauber Costa 已提交
2267 2268 2269
		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
		return;

2270 2271 2272 2273 2274 2275
	/*
	 * Doing a TLB flush here, on the guest's behalf, can avoid
	 * expensive IPIs.
	 */
	if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB)
		kvm_vcpu_flush_tlb(vcpu, false);
2276

W
Wanpeng Li 已提交
2277 2278 2279 2280 2281
	if (vcpu->arch.st.steal.version & 1)
		vcpu->arch.st.steal.version += 1;  /* first time write, random junk */

	vcpu->arch.st.steal.version += 1;

2282
	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
W
Wanpeng Li 已提交
2283 2284 2285 2286
		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));

	smp_wmb();

2287 2288 2289
	vcpu->arch.st.steal.steal += current->sched_info.run_delay -
		vcpu->arch.st.last_steal;
	vcpu->arch.st.last_steal = current->sched_info.run_delay;
W
Wanpeng Li 已提交
2290

2291
	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
W
Wanpeng Li 已提交
2292 2293 2294 2295 2296
		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));

	smp_wmb();

	vcpu->arch.st.steal.version += 1;
G
Glauber Costa 已提交
2297

2298
	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
G
Glauber Costa 已提交
2299 2300 2301
		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
}

2302
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2303
{
2304
	bool pr = false;
2305 2306
	u32 msr = msr_info->index;
	u64 data = msr_info->data;
2307

2308
	switch (msr) {
2309 2310 2311 2312 2313
	case MSR_AMD64_NB_CFG:
	case MSR_IA32_UCODE_WRITE:
	case MSR_VM_HSAVE_PA:
	case MSR_AMD64_PATCH_LOADER:
	case MSR_AMD64_BU_CFG2:
2314
	case MSR_AMD64_DC_CFG:
2315 2316
		break;

2317 2318 2319 2320
	case MSR_IA32_UCODE_REV:
		if (msr_info->host_initiated)
			vcpu->arch.microcode_version = data;
		break;
2321
	case MSR_EFER:
2322
		return set_efer(vcpu, data);
2323 2324
	case MSR_K7_HWCR:
		data &= ~(u64)0x40;	/* ignore flush filter disable */
2325
		data &= ~(u64)0x100;	/* ignore ignne emulation enable */
2326
		data &= ~(u64)0x8;	/* ignore TLB cache disable */
2327
		data &= ~(u64)0x40000;  /* ignore Mc status write enable */
2328
		if (data != 0) {
2329 2330
			vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
				    data);
2331 2332
			return 1;
		}
2333
		break;
2334 2335
	case MSR_FAM10H_MMIO_CONF_BASE:
		if (data != 0) {
2336 2337
			vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
				    "0x%llx\n", data);
2338 2339
			return 1;
		}
2340
		break;
2341 2342 2343 2344 2345 2346 2347 2348 2349
	case MSR_IA32_DEBUGCTLMSR:
		if (!data) {
			/* We support the non-activated case already */
			break;
		} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
			/* Values other than LBR and BTF are vendor-specific,
			   thus reserved and should throw a #GP */
			return 1;
		}
2350 2351
		vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
			    __func__, data);
2352
		break;
A
Avi Kivity 已提交
2353
	case 0x200 ... 0x2ff:
2354
		return kvm_mtrr_set_msr(vcpu, msr, data);
2355
	case MSR_IA32_APICBASE:
2356
		return kvm_set_apic_base(vcpu, msr_info);
G
Gleb Natapov 已提交
2357 2358
	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
		return kvm_x2apic_msr_write(vcpu, msr, data);
2359 2360 2361
	case MSR_IA32_TSCDEADLINE:
		kvm_set_lapic_tscdeadline_msr(vcpu, data);
		break;
W
Will Auld 已提交
2362
	case MSR_IA32_TSC_ADJUST:
2363
		if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
W
Will Auld 已提交
2364
			if (!msr_info->host_initiated) {
2365
				s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
2366
				adjust_tsc_offset_guest(vcpu, adj);
W
Will Auld 已提交
2367 2368 2369 2370
			}
			vcpu->arch.ia32_tsc_adjust_msr = data;
		}
		break;
2371
	case MSR_IA32_MISC_ENABLE:
2372
		vcpu->arch.ia32_misc_enable_msr = data;
2373
		break;
P
Paolo Bonzini 已提交
2374 2375 2376 2377 2378
	case MSR_IA32_SMBASE:
		if (!msr_info->host_initiated)
			return 1;
		vcpu->arch.smbase = data;
		break;
2379 2380 2381
	case MSR_IA32_TSC:
		kvm_write_tsc(vcpu, msr_info);
		break;
2382 2383 2384 2385 2386
	case MSR_SMI_COUNT:
		if (!msr_info->host_initiated)
			return 1;
		vcpu->arch.smi_count = data;
		break;
2387
	case MSR_KVM_WALL_CLOCK_NEW:
2388 2389 2390 2391
	case MSR_KVM_WALL_CLOCK:
		vcpu->kvm->arch.wall_clock = data;
		kvm_write_wall_clock(vcpu->kvm, data);
		break;
2392
	case MSR_KVM_SYSTEM_TIME_NEW:
2393
	case MSR_KVM_SYSTEM_TIME: {
2394 2395
		struct kvm_arch *ka = &vcpu->kvm->arch;

2396
		kvmclock_reset(vcpu);
2397

2398 2399 2400 2401
		if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
			bool tmp = (msr == MSR_KVM_SYSTEM_TIME);

			if (ka->boot_vcpu_runs_old_kvmclock != tmp)
2402
				kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2403 2404 2405 2406

			ka->boot_vcpu_runs_old_kvmclock = tmp;
		}

2407
		vcpu->arch.time = data;
2408
		kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2409 2410 2411 2412 2413

		/* we verify if the enable bit is set... */
		if (!(data & 1))
			break;

2414
		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2415 2416
		     &vcpu->arch.pv_time, data & ~1ULL,
		     sizeof(struct pvclock_vcpu_time_info)))
2417 2418 2419
			vcpu->arch.pv_time_enabled = false;
		else
			vcpu->arch.pv_time_enabled = true;
2420

2421 2422
		break;
	}
2423 2424 2425 2426
	case MSR_KVM_ASYNC_PF_EN:
		if (kvm_pv_enable_async_pf(vcpu, data))
			return 1;
		break;
G
Glauber Costa 已提交
2427 2428 2429 2430 2431 2432 2433 2434
	case MSR_KVM_STEAL_TIME:

		if (unlikely(!sched_info_on()))
			return 1;

		if (data & KVM_STEAL_RESERVED_MASK)
			return 1;

2435
		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
2436 2437
						data & KVM_STEAL_VALID_BITS,
						sizeof(struct kvm_steal_time)))
G
Glauber Costa 已提交
2438 2439 2440 2441 2442 2443 2444 2445 2446 2447
			return 1;

		vcpu->arch.st.msr_val = data;

		if (!(data & KVM_MSR_ENABLED))
			break;

		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);

		break;
2448 2449 2450 2451
	case MSR_KVM_PV_EOI_EN:
		if (kvm_lapic_enable_pv_eoi(vcpu, data))
			return 1;
		break;
G
Glauber Costa 已提交
2452

H
Huang Ying 已提交
2453 2454
	case MSR_IA32_MCG_CTL:
	case MSR_IA32_MCG_STATUS:
2455
	case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2456
		return set_msr_mce(vcpu, msr_info);
2457

2458 2459 2460 2461 2462
	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
	case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
		pr = true; /* fall through */
	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
	case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
2463
		if (kvm_pmu_is_valid_msr(vcpu, msr))
2464
			return kvm_pmu_set_msr(vcpu, msr_info);
2465 2466

		if (pr || data != 0)
2467 2468
			vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
				    "0x%x data 0x%llx\n", msr, data);
2469
		break;
2470 2471 2472 2473 2474
	case MSR_K7_CLK_CTL:
		/*
		 * Ignore all writes to this no longer documented MSR.
		 * Writes are only relevant for old K7 processors,
		 * all pre-dating SVM, but a recommended workaround from
G
Guo Chao 已提交
2475
		 * AMD for these chips. It is possible to specify the
2476 2477 2478 2479
		 * affected processor models on the command line, hence
		 * the need to ignore the workaround.
		 */
		break;
2480
	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2481 2482
	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
	case HV_X64_MSR_CRASH_CTL:
A
Andrey Smetanin 已提交
2483
	case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
2484 2485 2486
	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
	case HV_X64_MSR_TSC_EMULATION_CONTROL:
	case HV_X64_MSR_TSC_EMULATION_STATUS:
2487 2488
		return kvm_hv_set_msr_common(vcpu, msr, data,
					     msr_info->host_initiated);
2489 2490 2491 2492
	case MSR_IA32_BBL_CR_CTL3:
		/* Drop writes to this legacy MSR -- see rdmsr
		 * counterpart for further detail.
		 */
2493 2494 2495
		if (report_ignored_msrs)
			vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
				msr, data);
2496
		break;
2497
	case MSR_AMD64_OSVW_ID_LENGTH:
2498
		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2499 2500 2501 2502
			return 1;
		vcpu->arch.osvw.length = data;
		break;
	case MSR_AMD64_OSVW_STATUS:
2503
		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2504 2505 2506
			return 1;
		vcpu->arch.osvw.status = data;
		break;
K
Kyle Huey 已提交
2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521
	case MSR_PLATFORM_INFO:
		if (!msr_info->host_initiated ||
		    data & ~MSR_PLATFORM_INFO_CPUID_FAULT ||
		    (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
		     cpuid_fault_enabled(vcpu)))
			return 1;
		vcpu->arch.msr_platform_info = data;
		break;
	case MSR_MISC_FEATURES_ENABLES:
		if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ||
		    (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
		     !supports_cpuid_fault(vcpu)))
			return 1;
		vcpu->arch.msr_misc_features_enables = data;
		break;
2522
	default:
E
Ed Swierk 已提交
2523 2524
		if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
			return xen_hvm_config(vcpu, data);
2525
		if (kvm_pmu_is_valid_msr(vcpu, msr))
2526
			return kvm_pmu_set_msr(vcpu, msr_info);
2527
		if (!ignore_msrs) {
2528
			vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
2529
				    msr, data);
2530 2531
			return 1;
		} else {
2532 2533 2534 2535
			if (report_ignored_msrs)
				vcpu_unimpl(vcpu,
					"ignored wrmsr: 0x%x data 0x%llx\n",
					msr, data);
2536 2537
			break;
		}
2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_msr_common);


/*
 * Reads an msr value (of 'msr_index') into 'pdata'.
 * Returns 0 on success, non-0 otherwise.
 * Assumes vcpu_load() was already called.
 */
2549
int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2550
{
2551
	return kvm_x86_ops->get_msr(vcpu, msr);
2552
}
2553
EXPORT_SYMBOL_GPL(kvm_get_msr);
2554

2555
static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
2556 2557
{
	u64 data;
H
Huang Ying 已提交
2558 2559
	u64 mcg_cap = vcpu->arch.mcg_cap;
	unsigned bank_num = mcg_cap & 0xff;
2560 2561 2562 2563

	switch (msr) {
	case MSR_IA32_P5_MC_ADDR:
	case MSR_IA32_P5_MC_TYPE:
H
Huang Ying 已提交
2564 2565
		data = 0;
		break;
2566
	case MSR_IA32_MCG_CAP:
H
Huang Ying 已提交
2567 2568
		data = vcpu->arch.mcg_cap;
		break;
2569
	case MSR_IA32_MCG_CTL:
2570
		if (!(mcg_cap & MCG_CTL_P) && !host)
H
Huang Ying 已提交
2571 2572 2573 2574 2575 2576 2577 2578
			return 1;
		data = vcpu->arch.mcg_ctl;
		break;
	case MSR_IA32_MCG_STATUS:
		data = vcpu->arch.mcg_status;
		break;
	default:
		if (msr >= MSR_IA32_MC0_CTL &&
2579
		    msr < MSR_IA32_MCx_CTL(bank_num)) {
H
Huang Ying 已提交
2580 2581 2582 2583 2584 2585 2586 2587 2588 2589
			u32 offset = msr - MSR_IA32_MC0_CTL;
			data = vcpu->arch.mce_banks[offset];
			break;
		}
		return 1;
	}
	*pdata = data;
	return 0;
}

2590
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
H
Huang Ying 已提交
2591
{
2592
	switch (msr_info->index) {
H
Huang Ying 已提交
2593
	case MSR_IA32_PLATFORM_ID:
2594
	case MSR_IA32_EBL_CR_POWERON:
2595 2596 2597 2598 2599
	case MSR_IA32_DEBUGCTLMSR:
	case MSR_IA32_LASTBRANCHFROMIP:
	case MSR_IA32_LASTBRANCHTOIP:
	case MSR_IA32_LASTINTFROMIP:
	case MSR_IA32_LASTINTTOIP:
2600
	case MSR_K8_SYSCFG:
2601 2602
	case MSR_K8_TSEG_ADDR:
	case MSR_K8_TSEG_MASK:
2603
	case MSR_K7_HWCR:
2604
	case MSR_VM_HSAVE_PA:
2605
	case MSR_K8_INT_PENDING_MSG:
2606
	case MSR_AMD64_NB_CFG:
2607
	case MSR_FAM10H_MMIO_CONF_BASE:
2608
	case MSR_AMD64_BU_CFG2:
D
Dmitry Bilunov 已提交
2609
	case MSR_IA32_PERF_CTL:
2610
	case MSR_AMD64_DC_CFG:
2611
		msr_info->data = 0;
2612
		break;
2613
	case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
2614 2615 2616 2617
	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
	case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
	case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
2618
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
2619 2620
			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
		msr_info->data = 0;
2621
		break;
2622
	case MSR_IA32_UCODE_REV:
2623
		msr_info->data = vcpu->arch.microcode_version;
2624
		break;
2625 2626 2627
	case MSR_IA32_TSC:
		msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
		break;
A
Avi Kivity 已提交
2628 2629
	case MSR_MTRRcap:
	case 0x200 ... 0x2ff:
2630
		return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
2631
	case 0xcd: /* fsb frequency */
2632
		msr_info->data = 3;
2633
		break;
2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645
		/*
		 * MSR_EBC_FREQUENCY_ID
		 * Conservative value valid for even the basic CPU models.
		 * Models 0,1: 000 in bits 23:21 indicating a bus speed of
		 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
		 * and 266MHz for model 3, or 4. Set Core Clock
		 * Frequency to System Bus Frequency Ratio to 1 (bits
		 * 31:24) even though these are only valid for CPU
		 * models > 2, however guests may end up dividing or
		 * multiplying by zero otherwise.
		 */
	case MSR_EBC_FREQUENCY_ID:
2646
		msr_info->data = 1 << 24;
2647
		break;
2648
	case MSR_IA32_APICBASE:
2649
		msr_info->data = kvm_get_apic_base(vcpu);
2650
		break;
G
Gleb Natapov 已提交
2651
	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2652
		return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
G
Gleb Natapov 已提交
2653
		break;
2654
	case MSR_IA32_TSCDEADLINE:
2655
		msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
2656
		break;
W
Will Auld 已提交
2657
	case MSR_IA32_TSC_ADJUST:
2658
		msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
W
Will Auld 已提交
2659
		break;
2660
	case MSR_IA32_MISC_ENABLE:
2661
		msr_info->data = vcpu->arch.ia32_misc_enable_msr;
2662
		break;
P
Paolo Bonzini 已提交
2663 2664 2665 2666
	case MSR_IA32_SMBASE:
		if (!msr_info->host_initiated)
			return 1;
		msr_info->data = vcpu->arch.smbase;
2667
		break;
2668 2669 2670
	case MSR_SMI_COUNT:
		msr_info->data = vcpu->arch.smi_count;
		break;
2671 2672
	case MSR_IA32_PERF_STATUS:
		/* TSC increment by tick */
2673
		msr_info->data = 1000ULL;
2674
		/* CPU multiplier */
2675
		msr_info->data |= (((uint64_t)4ULL) << 40);
2676
		break;
2677
	case MSR_EFER:
2678
		msr_info->data = vcpu->arch.efer;
2679
		break;
2680
	case MSR_KVM_WALL_CLOCK:
2681
	case MSR_KVM_WALL_CLOCK_NEW:
2682
		msr_info->data = vcpu->kvm->arch.wall_clock;
2683 2684
		break;
	case MSR_KVM_SYSTEM_TIME:
2685
	case MSR_KVM_SYSTEM_TIME_NEW:
2686
		msr_info->data = vcpu->arch.time;
2687
		break;
2688
	case MSR_KVM_ASYNC_PF_EN:
2689
		msr_info->data = vcpu->arch.apf.msr_val;
2690
		break;
G
Glauber Costa 已提交
2691
	case MSR_KVM_STEAL_TIME:
2692
		msr_info->data = vcpu->arch.st.msr_val;
G
Glauber Costa 已提交
2693
		break;
2694
	case MSR_KVM_PV_EOI_EN:
2695
		msr_info->data = vcpu->arch.pv_eoi.msr_val;
2696
		break;
H
Huang Ying 已提交
2697 2698 2699 2700 2701
	case MSR_IA32_P5_MC_ADDR:
	case MSR_IA32_P5_MC_TYPE:
	case MSR_IA32_MCG_CAP:
	case MSR_IA32_MCG_CTL:
	case MSR_IA32_MCG_STATUS:
2702
	case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2703 2704
		return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
				   msr_info->host_initiated);
2705 2706 2707 2708 2709 2710 2711 2712 2713 2714
	case MSR_K7_CLK_CTL:
		/*
		 * Provide expected ramp-up count for K7. All other
		 * are set to zero, indicating minimum divisors for
		 * every field.
		 *
		 * This prevents guest kernels on AMD host with CPU
		 * type 6, model 8 and higher from exploding due to
		 * the rdmsr failing.
		 */
2715
		msr_info->data = 0x20000000;
2716
		break;
2717
	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2718 2719
	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
	case HV_X64_MSR_CRASH_CTL:
A
Andrey Smetanin 已提交
2720
	case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
2721 2722 2723
	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
	case HV_X64_MSR_TSC_EMULATION_CONTROL:
	case HV_X64_MSR_TSC_EMULATION_STATUS:
2724
		return kvm_hv_get_msr_common(vcpu,
2725 2726
					     msr_info->index, &msr_info->data,
					     msr_info->host_initiated);
2727
		break;
2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738
	case MSR_IA32_BBL_CR_CTL3:
		/* This legacy MSR exists but isn't fully documented in current
		 * silicon.  It is however accessed by winxp in very narrow
		 * scenarios where it sets bit #19, itself documented as
		 * a "reserved" bit.  Best effort attempt to source coherent
		 * read data here should the balance of the register be
		 * interpreted by the guest:
		 *
		 * L2 cache control register 3: 64GB range, 256KB size,
		 * enabled, latency 0x1, configured
		 */
2739
		msr_info->data = 0xbe702111;
2740
		break;
2741
	case MSR_AMD64_OSVW_ID_LENGTH:
2742
		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2743
			return 1;
2744
		msr_info->data = vcpu->arch.osvw.length;
2745 2746
		break;
	case MSR_AMD64_OSVW_STATUS:
2747
		if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
2748
			return 1;
2749
		msr_info->data = vcpu->arch.osvw.status;
2750
		break;
K
Kyle Huey 已提交
2751 2752 2753 2754 2755 2756
	case MSR_PLATFORM_INFO:
		msr_info->data = vcpu->arch.msr_platform_info;
		break;
	case MSR_MISC_FEATURES_ENABLES:
		msr_info->data = vcpu->arch.msr_misc_features_enables;
		break;
2757
	default:
2758
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
2759
			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
2760
		if (!ignore_msrs) {
2761 2762
			vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n",
					       msr_info->index);
2763 2764
			return 1;
		} else {
2765 2766 2767
			if (report_ignored_msrs)
				vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n",
					msr_info->index);
2768
			msr_info->data = 0;
2769 2770
		}
		break;
2771 2772 2773 2774 2775
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_get_msr_common);

2776 2777 2778 2779 2780 2781 2782 2783 2784 2785
/*
 * Read or write a bunch of msrs. All parameters are kernel addresses.
 *
 * @return number of msrs set successfully.
 */
static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
		    struct kvm_msr_entry *entries,
		    int (*do_msr)(struct kvm_vcpu *vcpu,
				  unsigned index, u64 *data))
{
2786
	int i;
2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818

	for (i = 0; i < msrs->nmsrs; ++i)
		if (do_msr(vcpu, entries[i].index, &entries[i].data))
			break;

	return i;
}

/*
 * Read or write a bunch of msrs. Parameters are user addresses.
 *
 * @return number of msrs set successfully.
 */
static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
		  int (*do_msr)(struct kvm_vcpu *vcpu,
				unsigned index, u64 *data),
		  int writeback)
{
	struct kvm_msrs msrs;
	struct kvm_msr_entry *entries;
	int r, n;
	unsigned size;

	r = -EFAULT;
	if (copy_from_user(&msrs, user_msrs, sizeof msrs))
		goto out;

	r = -E2BIG;
	if (msrs.nmsrs >= MAX_IO_MSRS)
		goto out;

	size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2819 2820 2821
	entries = memdup_user(user_msrs->entries, size);
	if (IS_ERR(entries)) {
		r = PTR_ERR(entries);
2822
		goto out;
2823
	}
2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835

	r = n = __msr_io(vcpu, &msrs, entries, do_msr);
	if (r < 0)
		goto out_free;

	r = -EFAULT;
	if (writeback && copy_to_user(user_msrs->entries, entries, size))
		goto out_free;

	r = n;

out_free:
2836
	kfree(entries);
2837 2838 2839 2840
out:
	return r;
}

2841 2842 2843
static inline bool kvm_can_mwait_in_guest(void)
{
	return boot_cpu_has(X86_FEATURE_MWAIT) &&
2844 2845
		!boot_cpu_has_bug(X86_BUG_MONITOR) &&
		boot_cpu_has(X86_FEATURE_ARAT);
2846 2847
}

2848
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2849
{
2850
	int r = 0;
2851 2852 2853 2854 2855 2856

	switch (ext) {
	case KVM_CAP_IRQCHIP:
	case KVM_CAP_HLT:
	case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
	case KVM_CAP_SET_TSS_ADDR:
2857
	case KVM_CAP_EXT_CPUID:
B
Borislav Petkov 已提交
2858
	case KVM_CAP_EXT_EMUL_CPUID:
2859
	case KVM_CAP_CLOCKSOURCE:
S
Sheng Yang 已提交
2860
	case KVM_CAP_PIT:
2861
	case KVM_CAP_NOP_IO_DELAY:
2862
	case KVM_CAP_MP_STATE:
2863
	case KVM_CAP_SYNC_MMU:
2864
	case KVM_CAP_USER_NMI:
2865
	case KVM_CAP_REINJECT_CONTROL:
2866
	case KVM_CAP_IRQ_INJECT_STATUS:
G
Gregory Haskins 已提交
2867
	case KVM_CAP_IOEVENTFD:
2868
	case KVM_CAP_IOEVENTFD_NO_LENGTH:
2869
	case KVM_CAP_PIT2:
B
Beth Kon 已提交
2870
	case KVM_CAP_PIT_STATE2:
2871
	case KVM_CAP_SET_IDENTITY_MAP_ADDR:
E
Ed Swierk 已提交
2872
	case KVM_CAP_XEN_HVM:
J
Jan Kiszka 已提交
2873
	case KVM_CAP_VCPU_EVENTS:
2874
	case KVM_CAP_HYPERV:
G
Gleb Natapov 已提交
2875
	case KVM_CAP_HYPERV_VAPIC:
2876
	case KVM_CAP_HYPERV_SPIN:
2877
	case KVM_CAP_HYPERV_SYNIC:
2878
	case KVM_CAP_HYPERV_SYNIC2:
2879
	case KVM_CAP_HYPERV_VP_INDEX:
2880
	case KVM_CAP_HYPERV_EVENTFD:
2881
	case KVM_CAP_HYPERV_TLBFLUSH:
2882
	case KVM_CAP_PCI_SEGMENT:
2883
	case KVM_CAP_DEBUGREGS:
2884
	case KVM_CAP_X86_ROBUST_SINGLESTEP:
2885
	case KVM_CAP_XSAVE:
2886
	case KVM_CAP_ASYNC_PF:
2887
	case KVM_CAP_GET_TSC_KHZ:
2888
	case KVM_CAP_KVMCLOCK_CTRL:
X
Xiao Guangrong 已提交
2889
	case KVM_CAP_READONLY_MEM:
2890
	case KVM_CAP_HYPERV_TIME:
2891
	case KVM_CAP_IOAPIC_POLARITY_IGNORED:
2892
	case KVM_CAP_TSC_DEADLINE_TIMER:
2893 2894
	case KVM_CAP_ENABLE_CAP_VM:
	case KVM_CAP_DISABLE_QUIRKS:
2895
	case KVM_CAP_SET_BOOT_CPU_ID:
2896
 	case KVM_CAP_SPLIT_IRQCHIP:
2897
	case KVM_CAP_IMMEDIATE_EXIT:
2898
	case KVM_CAP_GET_MSR_FEATURES:
2899 2900
		r = 1;
		break;
K
Ken Hofsass 已提交
2901 2902 2903
	case KVM_CAP_SYNC_REGS:
		r = KVM_SYNC_X86_VALID_FIELDS;
		break;
2904 2905 2906
	case KVM_CAP_ADJUST_CLOCK:
		r = KVM_CLOCK_TSC_STABLE;
		break;
2907
	case KVM_CAP_X86_DISABLE_EXITS:
M
Michael S. Tsirkin 已提交
2908
		r |=  KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE;
2909 2910
		if(kvm_can_mwait_in_guest())
			r |= KVM_X86_DISABLE_EXITS_MWAIT;
2911
		break;
2912 2913 2914 2915 2916 2917 2918 2919 2920
	case KVM_CAP_X86_SMM:
		/* SMBASE is usually relocated above 1M on modern chipsets,
		 * and SMM handlers might indeed rely on 4G segment limits,
		 * so do not report SMM to be available if real mode is
		 * emulated via vm86 mode.  Still, do not go to great lengths
		 * to avoid userspace's usage of the feature, because it is a
		 * fringe case that is not enabled except via specific settings
		 * of the module parameters.
		 */
2921
		r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
2922
		break;
2923 2924 2925
	case KVM_CAP_VAPIC:
		r = !kvm_x86_ops->cpu_has_accelerated_tpr();
		break;
2926
	case KVM_CAP_NR_VCPUS:
2927 2928 2929
		r = KVM_SOFT_MAX_VCPUS;
		break;
	case KVM_CAP_MAX_VCPUS:
2930 2931
		r = KVM_MAX_VCPUS;
		break;
2932
	case KVM_CAP_NR_MEMSLOTS:
2933
		r = KVM_USER_MEM_SLOTS;
2934
		break;
2935 2936
	case KVM_CAP_PV_MMU:	/* obsolete */
		r = 0;
2937
		break;
H
Huang Ying 已提交
2938 2939 2940
	case KVM_CAP_MCE:
		r = KVM_MAX_MCE_BANKS;
		break;
2941
	case KVM_CAP_XCRS:
2942
		r = boot_cpu_has(X86_FEATURE_XSAVE);
2943
		break;
2944 2945 2946
	case KVM_CAP_TSC_CONTROL:
		r = kvm_has_tsc_control;
		break;
2947 2948 2949
	case KVM_CAP_X2APIC_API:
		r = KVM_X2APIC_API_VALID_FLAGS;
		break;
2950 2951 2952 2953 2954 2955 2956
	default:
		break;
	}
	return r;

}

2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972
long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	long r;

	switch (ioctl) {
	case KVM_GET_MSR_INDEX_LIST: {
		struct kvm_msr_list __user *user_msr_list = argp;
		struct kvm_msr_list msr_list;
		unsigned n;

		r = -EFAULT;
		if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
			goto out;
		n = msr_list.nmsrs;
2973
		msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
2974 2975 2976
		if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
			goto out;
		r = -E2BIG;
J
Jan Kiszka 已提交
2977
		if (n < msr_list.nmsrs)
2978 2979 2980 2981 2982
			goto out;
		r = -EFAULT;
		if (copy_to_user(user_msr_list->indices, &msrs_to_save,
				 num_msrs_to_save * sizeof(u32)))
			goto out;
J
Jan Kiszka 已提交
2983
		if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
2984
				 &emulated_msrs,
2985
				 num_emulated_msrs * sizeof(u32)))
2986 2987 2988 2989
			goto out;
		r = 0;
		break;
	}
B
Borislav Petkov 已提交
2990 2991
	case KVM_GET_SUPPORTED_CPUID:
	case KVM_GET_EMULATED_CPUID: {
2992 2993 2994 2995 2996 2997
		struct kvm_cpuid2 __user *cpuid_arg = argp;
		struct kvm_cpuid2 cpuid;

		r = -EFAULT;
		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
			goto out;
B
Borislav Petkov 已提交
2998 2999 3000

		r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
					    ioctl);
3001 3002 3003 3004 3005 3006 3007 3008 3009
		if (r)
			goto out;

		r = -EFAULT;
		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
			goto out;
		r = 0;
		break;
	}
H
Huang Ying 已提交
3010 3011
	case KVM_X86_GET_MCE_CAP_SUPPORTED: {
		r = -EFAULT;
3012 3013
		if (copy_to_user(argp, &kvm_mce_cap_supported,
				 sizeof(kvm_mce_cap_supported)))
H
Huang Ying 已提交
3014 3015 3016
			goto out;
		r = 0;
		break;
3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041
	case KVM_GET_MSR_FEATURE_INDEX_LIST: {
		struct kvm_msr_list __user *user_msr_list = argp;
		struct kvm_msr_list msr_list;
		unsigned int n;

		r = -EFAULT;
		if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
			goto out;
		n = msr_list.nmsrs;
		msr_list.nmsrs = num_msr_based_features;
		if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
			goto out;
		r = -E2BIG;
		if (n < msr_list.nmsrs)
			goto out;
		r = -EFAULT;
		if (copy_to_user(user_msr_list->indices, &msr_based_features,
				 num_msr_based_features * sizeof(u32)))
			goto out;
		r = 0;
		break;
	}
	case KVM_GET_MSRS:
		r = msr_io(NULL, argp, do_get_msr_feature, 1);
		break;
H
Huang Ying 已提交
3042
	}
3043 3044 3045 3046 3047 3048 3049
	default:
		r = -EINVAL;
	}
out:
	return r;
}

3050 3051 3052 3053 3054 3055 3056
static void wbinvd_ipi(void *garbage)
{
	wbinvd();
}

static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
3057
	return kvm_arch_has_noncoherent_dma(vcpu->kvm);
3058 3059
}

3060 3061
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
3062 3063 3064 3065 3066 3067 3068 3069 3070
	/* Address WBINVD may be executed by guest */
	if (need_emulate_wbinvd(vcpu)) {
		if (kvm_x86_ops->has_wbinvd_exit())
			cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
		else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
			smp_call_function_single(vcpu->cpu,
					wbinvd_ipi, NULL, 1);
	}

3071
	kvm_x86_ops->vcpu_load(vcpu, cpu);
3072

3073 3074 3075 3076
	/* Apply any externally detected TSC adjustments (due to suspend) */
	if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
		adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
		vcpu->arch.tsc_offset_adjustment = 0;
3077
		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3078
	}
3079

3080
	if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) {
3081
		s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
3082
				rdtsc() - vcpu->arch.last_host_tsc;
Z
Zachary Amsden 已提交
3083 3084
		if (tsc_delta < 0)
			mark_tsc_unstable("KVM discovered backwards TSC");
3085

3086
		if (kvm_check_tsc_unstable()) {
3087
			u64 offset = kvm_compute_tsc_offset(vcpu,
3088
						vcpu->arch.last_guest_tsc);
3089
			kvm_vcpu_write_tsc_offset(vcpu, offset);
Z
Zachary Amsden 已提交
3090 3091
			vcpu->arch.tsc_catchup = 1;
		}
3092 3093 3094 3095

		if (kvm_lapic_hv_timer_in_use(vcpu))
			kvm_lapic_restart_hv_timer(vcpu);

3096 3097 3098 3099 3100
		/*
		 * On a host with synchronized TSC, there is no need to update
		 * kvmclock on vcpu->cpu migration
		 */
		if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
3101
			kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
Z
Zachary Amsden 已提交
3102
		if (vcpu->cpu != cpu)
3103
			kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu);
Z
Zachary Amsden 已提交
3104
		vcpu->cpu = cpu;
Z
Zachary Amsden 已提交
3105
	}
G
Glauber Costa 已提交
3106 3107

	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
3108 3109
}

3110 3111 3112 3113 3114
static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
{
	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
		return;

W
Wanpeng Li 已提交
3115
	vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
3116

3117
	kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
3118 3119 3120 3121 3122
			&vcpu->arch.st.steal.preempted,
			offsetof(struct kvm_steal_time, preempted),
			sizeof(vcpu->arch.st.steal.preempted));
}

3123 3124
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
3125
	int idx;
3126 3127 3128 3129

	if (vcpu->preempted)
		vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu);

3130 3131 3132 3133 3134 3135 3136 3137 3138
	/*
	 * Disable page faults because we're in atomic context here.
	 * kvm_write_guest_offset_cached() would call might_fault()
	 * that relies on pagefault_disable() to tell if there's a
	 * bug. NOTE: the write to guest memory may not go through if
	 * during postcopy live migration or if there's heavy guest
	 * paging.
	 */
	pagefault_disable();
3139 3140 3141 3142 3143
	/*
	 * kvm_memslots() will be called by
	 * kvm_write_guest_offset_cached() so take the srcu lock.
	 */
	idx = srcu_read_lock(&vcpu->kvm->srcu);
3144
	kvm_steal_time_set_preempted(vcpu);
3145
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
3146
	pagefault_enable();
3147
	kvm_x86_ops->vcpu_put(vcpu);
3148
	vcpu->arch.last_host_tsc = rdtsc();
3149 3150 3151 3152 3153 3154
	/*
	 * If userspace has set any breakpoints or watchpoints, dr6 is restored
	 * on every vmexit, but if not, we might have a stale dr6 from the
	 * guest. do_debug expects dr6 to be cleared after it runs, do the same.
	 */
	set_debugreg(0, 6);
3155 3156 3157 3158 3159
}

static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
				    struct kvm_lapic_state *s)
{
3160
	if (vcpu->arch.apicv_active)
3161 3162
		kvm_x86_ops->sync_pir_to_irr(vcpu);

3163
	return kvm_apic_get_state(vcpu, s);
3164 3165 3166 3167 3168
}

static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
				    struct kvm_lapic_state *s)
{
3169 3170 3171 3172 3173
	int r;

	r = kvm_apic_set_state(vcpu, s);
	if (r)
		return r;
3174
	update_cr8_intercept(vcpu);
3175 3176 3177 3178

	return 0;
}

3179 3180 3181 3182 3183 3184
static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
{
	return (!lapic_in_kernel(vcpu) ||
		kvm_apic_accept_pic_intr(vcpu));
}

3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198
/*
 * if userspace requested an interrupt window, check that the
 * interrupt window is open.
 *
 * No need to exit to userspace if we already have an interrupt queued.
 */
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
{
	return kvm_arch_interrupt_allowed(vcpu) &&
		!kvm_cpu_has_interrupt(vcpu) &&
		!kvm_event_needs_reinjection(vcpu) &&
		kvm_cpu_accept_dm_intr(vcpu);
}

3199 3200 3201
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
				    struct kvm_interrupt *irq)
{
3202
	if (irq->irq >= KVM_NR_INTERRUPTS)
3203
		return -EINVAL;
3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215

	if (!irqchip_in_kernel(vcpu->kvm)) {
		kvm_queue_interrupt(vcpu, irq->irq, false);
		kvm_make_request(KVM_REQ_EVENT, vcpu);
		return 0;
	}

	/*
	 * With in-kernel LAPIC, we only use this to inject EXTINT, so
	 * fail for in-kernel 8259.
	 */
	if (pic_in_kernel(vcpu->kvm))
3216 3217
		return -ENXIO;

3218 3219
	if (vcpu->arch.pending_external_vector != -1)
		return -EEXIST;
3220

3221
	vcpu->arch.pending_external_vector = irq->irq;
3222
	kvm_make_request(KVM_REQ_EVENT, vcpu);
3223 3224 3225
	return 0;
}

3226 3227 3228 3229 3230 3231 3232
static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
{
	kvm_inject_nmi(vcpu);

	return 0;
}

3233 3234
static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
{
P
Paolo Bonzini 已提交
3235 3236
	kvm_make_request(KVM_REQ_SMI, vcpu);

3237 3238 3239
	return 0;
}

3240 3241 3242 3243 3244 3245 3246 3247 3248
static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
					   struct kvm_tpr_access_ctl *tac)
{
	if (tac->flags)
		return -EINVAL;
	vcpu->arch.tpr_access_reporting = !!tac->enabled;
	return 0;
}

H
Huang Ying 已提交
3249 3250 3251 3252 3253 3254 3255
static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
					u64 mcg_cap)
{
	int r;
	unsigned bank_num = mcg_cap & 0xff, bank;

	r = -EINVAL;
3256
	if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
H
Huang Ying 已提交
3257
		goto out;
3258
	if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
H
Huang Ying 已提交
3259 3260 3261 3262 3263 3264 3265 3266 3267
		goto out;
	r = 0;
	vcpu->arch.mcg_cap = mcg_cap;
	/* Init IA32_MCG_CTL to all 1s */
	if (mcg_cap & MCG_CTL_P)
		vcpu->arch.mcg_ctl = ~(u64)0;
	/* Init IA32_MCi_CTL to all 1s */
	for (bank = 0; bank < bank_num; bank++)
		vcpu->arch.mce_banks[bank*4] = ~(u64)0;
3268 3269 3270

	if (kvm_x86_ops->setup_mce)
		kvm_x86_ops->setup_mce(vcpu);
H
Huang Ying 已提交
3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299
out:
	return r;
}

static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
				      struct kvm_x86_mce *mce)
{
	u64 mcg_cap = vcpu->arch.mcg_cap;
	unsigned bank_num = mcg_cap & 0xff;
	u64 *banks = vcpu->arch.mce_banks;

	if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
		return -EINVAL;
	/*
	 * if IA32_MCG_CTL is not all 1s, the uncorrected error
	 * reporting is disabled
	 */
	if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
	    vcpu->arch.mcg_ctl != ~(u64)0)
		return 0;
	banks += 4 * mce->bank;
	/*
	 * if IA32_MCi_CTL is not all 1s, the uncorrected error
	 * reporting is disabled for the bank
	 */
	if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
		return 0;
	if (mce->status & MCI_STATUS_UC) {
		if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
3300
		    !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
3301
			kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
H
Huang Ying 已提交
3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322
			return 0;
		}
		if (banks[1] & MCI_STATUS_VAL)
			mce->status |= MCI_STATUS_OVER;
		banks[2] = mce->addr;
		banks[3] = mce->misc;
		vcpu->arch.mcg_status = mce->mcg_status;
		banks[1] = mce->status;
		kvm_queue_exception(vcpu, MC_VECTOR);
	} else if (!(banks[1] & MCI_STATUS_VAL)
		   || !(banks[1] & MCI_STATUS_UC)) {
		if (banks[1] & MCI_STATUS_VAL)
			mce->status |= MCI_STATUS_OVER;
		banks[2] = mce->addr;
		banks[3] = mce->misc;
		banks[1] = mce->status;
	} else
		banks[1] |= MCI_STATUS_OVER;
	return 0;
}

J
Jan Kiszka 已提交
3323 3324 3325
static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
					       struct kvm_vcpu_events *events)
{
A
Avi Kivity 已提交
3326
	process_nmi(vcpu);
3327 3328 3329 3330 3331
	/*
	 * FIXME: pass injected and pending separately.  This is only
	 * needed for nested virtualization, whose state cannot be
	 * migrated yet.  For now we can combine them.
	 */
3332
	events->exception.injected =
3333 3334
		(vcpu->arch.exception.pending ||
		 vcpu->arch.exception.injected) &&
3335
		!kvm_exception_is_soft(vcpu->arch.exception.nr);
J
Jan Kiszka 已提交
3336 3337
	events->exception.nr = vcpu->arch.exception.nr;
	events->exception.has_error_code = vcpu->arch.exception.has_error_code;
3338
	events->exception.pad = 0;
J
Jan Kiszka 已提交
3339 3340
	events->exception.error_code = vcpu->arch.exception.error_code;

3341
	events->interrupt.injected =
3342
		vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
J
Jan Kiszka 已提交
3343
	events->interrupt.nr = vcpu->arch.interrupt.nr;
3344
	events->interrupt.soft = 0;
3345
	events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
J
Jan Kiszka 已提交
3346 3347

	events->nmi.injected = vcpu->arch.nmi_injected;
A
Avi Kivity 已提交
3348
	events->nmi.pending = vcpu->arch.nmi_pending != 0;
J
Jan Kiszka 已提交
3349
	events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
3350
	events->nmi.pad = 0;
J
Jan Kiszka 已提交
3351

3352
	events->sipi_vector = 0; /* never valid when reporting to user space */
J
Jan Kiszka 已提交
3353

3354 3355 3356 3357 3358 3359
	events->smi.smm = is_smm(vcpu);
	events->smi.pending = vcpu->arch.smi_pending;
	events->smi.smm_inside_nmi =
		!!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
	events->smi.latched_init = kvm_lapic_latched_init(vcpu);

3360
	events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
3361 3362
			 | KVM_VCPUEVENT_VALID_SHADOW
			 | KVM_VCPUEVENT_VALID_SMM);
3363
	memset(&events->reserved, 0, sizeof(events->reserved));
J
Jan Kiszka 已提交
3364 3365
}

3366 3367
static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);

J
Jan Kiszka 已提交
3368 3369 3370
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
					      struct kvm_vcpu_events *events)
{
3371
	if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
3372
			      | KVM_VCPUEVENT_VALID_SIPI_VECTOR
3373 3374
			      | KVM_VCPUEVENT_VALID_SHADOW
			      | KVM_VCPUEVENT_VALID_SMM))
J
Jan Kiszka 已提交
3375 3376
		return -EINVAL;

3377
	if (events->exception.injected &&
3378 3379
	    (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR ||
	     is_guest_mode(vcpu)))
3380 3381
		return -EINVAL;

3382 3383 3384 3385 3386 3387
	/* INITs are latched while in SMM */
	if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
	    (events->smi.smm || events->smi.pending) &&
	    vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
		return -EINVAL;

A
Avi Kivity 已提交
3388
	process_nmi(vcpu);
3389
	vcpu->arch.exception.injected = false;
J
Jan Kiszka 已提交
3390 3391 3392 3393 3394
	vcpu->arch.exception.pending = events->exception.injected;
	vcpu->arch.exception.nr = events->exception.nr;
	vcpu->arch.exception.has_error_code = events->exception.has_error_code;
	vcpu->arch.exception.error_code = events->exception.error_code;

3395
	vcpu->arch.interrupt.injected = events->interrupt.injected;
J
Jan Kiszka 已提交
3396 3397
	vcpu->arch.interrupt.nr = events->interrupt.nr;
	vcpu->arch.interrupt.soft = events->interrupt.soft;
3398 3399 3400
	if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
		kvm_x86_ops->set_interrupt_shadow(vcpu,
						  events->interrupt.shadow);
J
Jan Kiszka 已提交
3401 3402

	vcpu->arch.nmi_injected = events->nmi.injected;
3403 3404
	if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
		vcpu->arch.nmi_pending = events->nmi.pending;
J
Jan Kiszka 已提交
3405 3406
	kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);

3407
	if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
3408
	    lapic_in_kernel(vcpu))
3409
		vcpu->arch.apic->sipi_vector = events->sipi_vector;
J
Jan Kiszka 已提交
3410

3411
	if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
3412
		u32 hflags = vcpu->arch.hflags;
3413
		if (events->smi.smm)
3414
			hflags |= HF_SMM_MASK;
3415
		else
3416 3417 3418
			hflags &= ~HF_SMM_MASK;
		kvm_set_hflags(vcpu, hflags);

3419
		vcpu->arch.smi_pending = events->smi.pending;
3420 3421 3422 3423

		if (events->smi.smm) {
			if (events->smi.smm_inside_nmi)
				vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
3424
			else
3425 3426 3427 3428 3429 3430 3431
				vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
			if (lapic_in_kernel(vcpu)) {
				if (events->smi.latched_init)
					set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
				else
					clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
			}
3432 3433 3434
		}
	}

3435 3436
	kvm_make_request(KVM_REQ_EVENT, vcpu);

J
Jan Kiszka 已提交
3437 3438 3439
	return 0;
}

3440 3441 3442
static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
					     struct kvm_debugregs *dbgregs)
{
J
Jan Kiszka 已提交
3443 3444
	unsigned long val;

3445
	memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
3446
	kvm_get_dr(vcpu, 6, &val);
J
Jan Kiszka 已提交
3447
	dbgregs->dr6 = val;
3448 3449
	dbgregs->dr7 = vcpu->arch.dr7;
	dbgregs->flags = 0;
3450
	memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
3451 3452 3453 3454 3455 3456 3457 3458
}

static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
					    struct kvm_debugregs *dbgregs)
{
	if (dbgregs->flags)
		return -EINVAL;

3459 3460 3461 3462 3463
	if (dbgregs->dr6 & ~0xffffffffull)
		return -EINVAL;
	if (dbgregs->dr7 & ~0xffffffffull)
		return -EINVAL;

3464
	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
3465
	kvm_update_dr0123(vcpu);
3466
	vcpu->arch.dr6 = dbgregs->dr6;
J
Jan Kiszka 已提交
3467
	kvm_update_dr6(vcpu);
3468
	vcpu->arch.dr7 = dbgregs->dr7;
3469
	kvm_update_dr7(vcpu);
3470 3471 3472 3473

	return 0;
}

3474 3475 3476 3477
#define XSTATE_COMPACTION_ENABLED (1ULL << 63)

static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
{
3478
	struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
3479
	u64 xstate_bv = xsave->header.xfeatures;
3480 3481 3482 3483 3484 3485 3486 3487 3488
	u64 valid;

	/*
	 * Copy legacy XSAVE area, to avoid complications with CPUID
	 * leaves 0 and 1 in the loop below.
	 */
	memcpy(dest, xsave, XSAVE_HDR_OFFSET);

	/* Set XSTATE_BV */
3489
	xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
3490 3491 3492 3493 3494 3495
	*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;

	/*
	 * Copy each region from the possibly compacted offset to the
	 * non-compacted offset.
	 */
D
Dave Hansen 已提交
3496
	valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
3497 3498 3499 3500 3501 3502 3503 3504 3505
	while (valid) {
		u64 feature = valid & -valid;
		int index = fls64(feature) - 1;
		void *src = get_xsave_addr(xsave, feature);

		if (src) {
			u32 size, offset, ecx, edx;
			cpuid_count(XSTATE_CPUID, index,
				    &size, &offset, &ecx, &edx);
3506 3507 3508 3509 3510 3511
			if (feature == XFEATURE_MASK_PKRU)
				memcpy(dest + offset, &vcpu->arch.pkru,
				       sizeof(vcpu->arch.pkru));
			else
				memcpy(dest + offset, src, size);

3512 3513 3514 3515 3516 3517 3518 3519
		}

		valid -= feature;
	}
}

static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
{
3520
	struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
3521 3522 3523 3524 3525 3526 3527 3528 3529 3530
	u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
	u64 valid;

	/*
	 * Copy legacy XSAVE area, to avoid complications with CPUID
	 * leaves 0 and 1 in the loop below.
	 */
	memcpy(xsave, src, XSAVE_HDR_OFFSET);

	/* Set XSTATE_BV and possibly XCOMP_BV.  */
3531
	xsave->header.xfeatures = xstate_bv;
3532
	if (boot_cpu_has(X86_FEATURE_XSAVES))
3533
		xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
3534 3535 3536 3537 3538

	/*
	 * Copy each region from the non-compacted offset to the
	 * possibly compacted offset.
	 */
D
Dave Hansen 已提交
3539
	valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
3540 3541 3542 3543 3544 3545 3546 3547 3548
	while (valid) {
		u64 feature = valid & -valid;
		int index = fls64(feature) - 1;
		void *dest = get_xsave_addr(xsave, feature);

		if (dest) {
			u32 size, offset, ecx, edx;
			cpuid_count(XSTATE_CPUID, index,
				    &size, &offset, &ecx, &edx);
3549 3550 3551 3552 3553
			if (feature == XFEATURE_MASK_PKRU)
				memcpy(&vcpu->arch.pkru, src + offset,
				       sizeof(vcpu->arch.pkru));
			else
				memcpy(dest, src + offset, size);
3554
		}
3555 3556 3557 3558 3559

		valid -= feature;
	}
}

3560 3561 3562
static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
					 struct kvm_xsave *guest_xsave)
{
3563
	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
3564 3565
		memset(guest_xsave, 0, sizeof(struct kvm_xsave));
		fill_xsave((u8 *) guest_xsave->region, vcpu);
3566
	} else {
3567
		memcpy(guest_xsave->region,
3568
			&vcpu->arch.guest_fpu.state.fxsave,
3569
			sizeof(struct fxregs_state));
3570
		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
D
Dave Hansen 已提交
3571
			XFEATURE_MASK_FPSSE;
3572 3573 3574
	}
}

3575 3576
#define XSAVE_MXCSR_OFFSET 24

3577 3578 3579 3580 3581
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
					struct kvm_xsave *guest_xsave)
{
	u64 xstate_bv =
		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
3582
	u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
3583

3584
	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
3585 3586 3587 3588 3589
		/*
		 * Here we allow setting states that are not present in
		 * CPUID leaf 0xD, index 0, EDX:EAX.  This is for compatibility
		 * with old userspace.
		 */
3590 3591
		if (xstate_bv & ~kvm_supported_xcr0() ||
			mxcsr & ~mxcsr_feature_mask)
3592
			return -EINVAL;
3593
		load_xsave(vcpu, (u8 *)guest_xsave->region);
3594
	} else {
3595 3596
		if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
			mxcsr & ~mxcsr_feature_mask)
3597
			return -EINVAL;
3598
		memcpy(&vcpu->arch.guest_fpu.state.fxsave,
3599
			guest_xsave->region, sizeof(struct fxregs_state));
3600 3601 3602 3603 3604 3605 3606
	}
	return 0;
}

static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
					struct kvm_xcrs *guest_xcrs)
{
3607
	if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622
		guest_xcrs->nr_xcrs = 0;
		return;
	}

	guest_xcrs->nr_xcrs = 1;
	guest_xcrs->flags = 0;
	guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
	guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
}

static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
				       struct kvm_xcrs *guest_xcrs)
{
	int i, r = 0;

3623
	if (!boot_cpu_has(X86_FEATURE_XSAVE))
3624 3625 3626 3627 3628 3629 3630
		return -EINVAL;

	if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
		return -EINVAL;

	for (i = 0; i < guest_xcrs->nr_xcrs; i++)
		/* Only support XCR0 currently */
P
Paolo Bonzini 已提交
3631
		if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
3632
			r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
P
Paolo Bonzini 已提交
3633
				guest_xcrs->xcrs[i].value);
3634 3635 3636 3637 3638 3639 3640
			break;
		}
	if (r)
		r = -EINVAL;
	return r;
}

3641 3642 3643 3644 3645 3646 3647 3648
/*
 * kvm_set_guest_paused() indicates to the guest kernel that it has been
 * stopped by the hypervisor.  This function will be called from the host only.
 * EINVAL is returned when the host attempts to set the flag for a guest that
 * does not support pv clocks.
 */
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
3649
	if (!vcpu->arch.pv_time_enabled)
3650
		return -EINVAL;
3651
	vcpu->arch.pvclock_set_guest_stopped_request = true;
3652 3653 3654 3655
	kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
	return 0;
}

3656 3657 3658 3659 3660 3661 3662
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
				     struct kvm_enable_cap *cap)
{
	if (cap->flags)
		return -EINVAL;

	switch (cap->cap) {
3663 3664 3665
	case KVM_CAP_HYPERV_SYNIC2:
		if (cap->args[0])
			return -EINVAL;
3666
	case KVM_CAP_HYPERV_SYNIC:
3667 3668
		if (!irqchip_in_kernel(vcpu->kvm))
			return -EINVAL;
3669 3670
		return kvm_hv_activate_synic(vcpu, cap->cap ==
					     KVM_CAP_HYPERV_SYNIC2);
3671 3672 3673 3674 3675
	default:
		return -EINVAL;
	}
}

3676 3677 3678 3679 3680 3681
long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg)
{
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = (void __user *)arg;
	int r;
3682 3683 3684 3685 3686 3687 3688
	union {
		struct kvm_lapic_state *lapic;
		struct kvm_xsave *xsave;
		struct kvm_xcrs *xcrs;
		void *buffer;
	} u;

3689 3690
	vcpu_load(vcpu);

3691
	u.buffer = NULL;
3692 3693
	switch (ioctl) {
	case KVM_GET_LAPIC: {
3694
		r = -EINVAL;
3695
		if (!lapic_in_kernel(vcpu))
3696
			goto out;
3697
		u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
3698

3699
		r = -ENOMEM;
3700
		if (!u.lapic)
3701
			goto out;
3702
		r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
3703 3704 3705
		if (r)
			goto out;
		r = -EFAULT;
3706
		if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
3707 3708 3709 3710 3711
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_LAPIC: {
3712
		r = -EINVAL;
3713
		if (!lapic_in_kernel(vcpu))
3714
			goto out;
3715
		u.lapic = memdup_user(argp, sizeof(*u.lapic));
3716 3717 3718 3719
		if (IS_ERR(u.lapic)) {
			r = PTR_ERR(u.lapic);
			goto out_nofree;
		}
3720

3721
		r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
3722 3723
		break;
	}
3724 3725 3726 3727 3728 3729 3730 3731 3732
	case KVM_INTERRUPT: {
		struct kvm_interrupt irq;

		r = -EFAULT;
		if (copy_from_user(&irq, argp, sizeof irq))
			goto out;
		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
		break;
	}
3733 3734 3735 3736
	case KVM_NMI: {
		r = kvm_vcpu_ioctl_nmi(vcpu);
		break;
	}
3737 3738 3739 3740
	case KVM_SMI: {
		r = kvm_vcpu_ioctl_smi(vcpu);
		break;
	}
3741 3742 3743 3744 3745 3746 3747 3748 3749 3750
	case KVM_SET_CPUID: {
		struct kvm_cpuid __user *cpuid_arg = argp;
		struct kvm_cpuid cpuid;

		r = -EFAULT;
		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
			goto out;
		r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
		break;
	}
3751 3752 3753 3754 3755 3756 3757 3758
	case KVM_SET_CPUID2: {
		struct kvm_cpuid2 __user *cpuid_arg = argp;
		struct kvm_cpuid2 cpuid;

		r = -EFAULT;
		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
			goto out;
		r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
3759
					      cpuid_arg->entries);
3760 3761 3762 3763 3764 3765 3766 3767 3768 3769
		break;
	}
	case KVM_GET_CPUID2: {
		struct kvm_cpuid2 __user *cpuid_arg = argp;
		struct kvm_cpuid2 cpuid;

		r = -EFAULT;
		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
			goto out;
		r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
3770
					      cpuid_arg->entries);
3771 3772 3773 3774 3775 3776 3777 3778
		if (r)
			goto out;
		r = -EFAULT;
		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
			goto out;
		r = 0;
		break;
	}
3779 3780
	case KVM_GET_MSRS: {
		int idx = srcu_read_lock(&vcpu->kvm->srcu);
3781
		r = msr_io(vcpu, argp, do_get_msr, 1);
3782
		srcu_read_unlock(&vcpu->kvm->srcu, idx);
3783
		break;
3784 3785 3786
	}
	case KVM_SET_MSRS: {
		int idx = srcu_read_lock(&vcpu->kvm->srcu);
3787
		r = msr_io(vcpu, argp, do_set_msr, 0);
3788
		srcu_read_unlock(&vcpu->kvm->srcu, idx);
3789
		break;
3790
	}
3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805
	case KVM_TPR_ACCESS_REPORTING: {
		struct kvm_tpr_access_ctl tac;

		r = -EFAULT;
		if (copy_from_user(&tac, argp, sizeof tac))
			goto out;
		r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
		if (r)
			goto out;
		r = -EFAULT;
		if (copy_to_user(argp, &tac, sizeof tac))
			goto out;
		r = 0;
		break;
	};
A
Avi Kivity 已提交
3806 3807
	case KVM_SET_VAPIC_ADDR: {
		struct kvm_vapic_addr va;
3808
		int idx;
A
Avi Kivity 已提交
3809 3810

		r = -EINVAL;
3811
		if (!lapic_in_kernel(vcpu))
A
Avi Kivity 已提交
3812 3813 3814 3815
			goto out;
		r = -EFAULT;
		if (copy_from_user(&va, argp, sizeof va))
			goto out;
3816
		idx = srcu_read_lock(&vcpu->kvm->srcu);
3817
		r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
3818
		srcu_read_unlock(&vcpu->kvm->srcu, idx);
A
Avi Kivity 已提交
3819 3820
		break;
	}
H
Huang Ying 已提交
3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838
	case KVM_X86_SETUP_MCE: {
		u64 mcg_cap;

		r = -EFAULT;
		if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
			goto out;
		r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
		break;
	}
	case KVM_X86_SET_MCE: {
		struct kvm_x86_mce mce;

		r = -EFAULT;
		if (copy_from_user(&mce, argp, sizeof mce))
			goto out;
		r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
		break;
	}
J
Jan Kiszka 已提交
3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859
	case KVM_GET_VCPU_EVENTS: {
		struct kvm_vcpu_events events;

		kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);

		r = -EFAULT;
		if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
			break;
		r = 0;
		break;
	}
	case KVM_SET_VCPU_EVENTS: {
		struct kvm_vcpu_events events;

		r = -EFAULT;
		if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
			break;

		r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
		break;
	}
3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882
	case KVM_GET_DEBUGREGS: {
		struct kvm_debugregs dbgregs;

		kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);

		r = -EFAULT;
		if (copy_to_user(argp, &dbgregs,
				 sizeof(struct kvm_debugregs)))
			break;
		r = 0;
		break;
	}
	case KVM_SET_DEBUGREGS: {
		struct kvm_debugregs dbgregs;

		r = -EFAULT;
		if (copy_from_user(&dbgregs, argp,
				   sizeof(struct kvm_debugregs)))
			break;

		r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
		break;
	}
3883
	case KVM_GET_XSAVE: {
3884
		u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
3885
		r = -ENOMEM;
3886
		if (!u.xsave)
3887 3888
			break;

3889
		kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
3890 3891

		r = -EFAULT;
3892
		if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
3893 3894 3895 3896 3897
			break;
		r = 0;
		break;
	}
	case KVM_SET_XSAVE: {
3898
		u.xsave = memdup_user(argp, sizeof(*u.xsave));
3899 3900 3901 3902
		if (IS_ERR(u.xsave)) {
			r = PTR_ERR(u.xsave);
			goto out_nofree;
		}
3903

3904
		r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
3905 3906 3907
		break;
	}
	case KVM_GET_XCRS: {
3908
		u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
3909
		r = -ENOMEM;
3910
		if (!u.xcrs)
3911 3912
			break;

3913
		kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
3914 3915

		r = -EFAULT;
3916
		if (copy_to_user(argp, u.xcrs,
3917 3918 3919 3920 3921 3922
				 sizeof(struct kvm_xcrs)))
			break;
		r = 0;
		break;
	}
	case KVM_SET_XCRS: {
3923
		u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
3924 3925 3926 3927
		if (IS_ERR(u.xcrs)) {
			r = PTR_ERR(u.xcrs);
			goto out_nofree;
		}
3928

3929
		r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
3930 3931
		break;
	}
3932 3933 3934 3935 3936 3937 3938 3939 3940
	case KVM_SET_TSC_KHZ: {
		u32 user_tsc_khz;

		r = -EINVAL;
		user_tsc_khz = (u32)arg;

		if (user_tsc_khz >= kvm_max_guest_tsc_khz)
			goto out;

3941 3942 3943
		if (user_tsc_khz == 0)
			user_tsc_khz = tsc_khz;

3944 3945
		if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
			r = 0;
3946 3947 3948 3949

		goto out;
	}
	case KVM_GET_TSC_KHZ: {
3950
		r = vcpu->arch.virtual_tsc_khz;
3951 3952
		goto out;
	}
3953 3954 3955 3956
	case KVM_KVMCLOCK_CTRL: {
		r = kvm_set_guest_paused(vcpu);
		goto out;
	}
3957 3958 3959 3960 3961 3962 3963 3964 3965
	case KVM_ENABLE_CAP: {
		struct kvm_enable_cap cap;

		r = -EFAULT;
		if (copy_from_user(&cap, argp, sizeof(cap)))
			goto out;
		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
		break;
	}
3966 3967 3968 3969
	default:
		r = -EINVAL;
	}
out:
3970
	kfree(u.buffer);
3971 3972
out_nofree:
	vcpu_put(vcpu);
3973 3974 3975
	return r;
}

3976
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3977 3978 3979 3980
{
	return VM_FAULT_SIGBUS;
}

3981 3982 3983 3984 3985
static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
{
	int ret;

	if (addr > (unsigned int)(-3 * PAGE_SIZE))
3986
		return -EINVAL;
3987 3988 3989 3990
	ret = kvm_x86_ops->set_tss_addr(kvm, addr);
	return ret;
}

3991 3992 3993
static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
					      u64 ident_addr)
{
3994
	return kvm_x86_ops->set_identity_map_addr(kvm, ident_addr);
3995 3996
}

3997 3998 3999 4000 4001 4002
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
					  u32 kvm_nr_mmu_pages)
{
	if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
		return -EINVAL;

4003
	mutex_lock(&kvm->slots_lock);
4004 4005

	kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
4006
	kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
4007

4008
	mutex_unlock(&kvm->slots_lock);
4009 4010 4011 4012 4013
	return 0;
}

static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
{
4014
	return kvm->arch.n_max_mmu_pages;
4015 4016 4017 4018
}

static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
{
4019
	struct kvm_pic *pic = kvm->arch.vpic;
4020 4021 4022 4023 4024
	int r;

	r = 0;
	switch (chip->chip_id) {
	case KVM_IRQCHIP_PIC_MASTER:
4025
		memcpy(&chip->chip.pic, &pic->pics[0],
4026 4027 4028
			sizeof(struct kvm_pic_state));
		break;
	case KVM_IRQCHIP_PIC_SLAVE:
4029
		memcpy(&chip->chip.pic, &pic->pics[1],
4030 4031 4032
			sizeof(struct kvm_pic_state));
		break;
	case KVM_IRQCHIP_IOAPIC:
4033
		kvm_get_ioapic(kvm, &chip->chip.ioapic);
4034 4035 4036 4037 4038 4039 4040 4041 4042 4043
		break;
	default:
		r = -EINVAL;
		break;
	}
	return r;
}

static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
{
4044
	struct kvm_pic *pic = kvm->arch.vpic;
4045 4046 4047 4048 4049
	int r;

	r = 0;
	switch (chip->chip_id) {
	case KVM_IRQCHIP_PIC_MASTER:
4050 4051
		spin_lock(&pic->lock);
		memcpy(&pic->pics[0], &chip->chip.pic,
4052
			sizeof(struct kvm_pic_state));
4053
		spin_unlock(&pic->lock);
4054 4055
		break;
	case KVM_IRQCHIP_PIC_SLAVE:
4056 4057
		spin_lock(&pic->lock);
		memcpy(&pic->pics[1], &chip->chip.pic,
4058
			sizeof(struct kvm_pic_state));
4059
		spin_unlock(&pic->lock);
4060 4061
		break;
	case KVM_IRQCHIP_IOAPIC:
4062
		kvm_set_ioapic(kvm, &chip->chip.ioapic);
4063 4064 4065 4066 4067
		break;
	default:
		r = -EINVAL;
		break;
	}
4068
	kvm_pic_update_irq(pic);
4069 4070 4071
	return r;
}

4072 4073
static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
4074 4075 4076 4077 4078 4079 4080
	struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;

	BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels));

	mutex_lock(&kps->lock);
	memcpy(ps, &kps->channels, sizeof(*ps));
	mutex_unlock(&kps->lock);
4081
	return 0;
4082 4083 4084 4085
}

static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
4086
	int i;
4087 4088 4089
	struct kvm_pit *pit = kvm->arch.vpit;

	mutex_lock(&pit->pit_state.lock);
4090
	memcpy(&pit->pit_state.channels, ps, sizeof(*ps));
4091
	for (i = 0; i < 3; i++)
4092 4093
		kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
	mutex_unlock(&pit->pit_state.lock);
4094
	return 0;
B
Beth Kon 已提交
4095 4096 4097 4098 4099 4100 4101 4102 4103
}

static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{
	mutex_lock(&kvm->arch.vpit->pit_state.lock);
	memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
		sizeof(ps->channels));
	ps->flags = kvm->arch.vpit->pit_state.flags;
	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
4104
	memset(&ps->reserved, 0, sizeof(ps->reserved));
4105
	return 0;
B
Beth Kon 已提交
4106 4107 4108 4109
}

static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{
4110
	int start = 0;
4111
	int i;
B
Beth Kon 已提交
4112
	u32 prev_legacy, cur_legacy;
4113 4114 4115 4116
	struct kvm_pit *pit = kvm->arch.vpit;

	mutex_lock(&pit->pit_state.lock);
	prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
B
Beth Kon 已提交
4117 4118 4119
	cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
	if (!prev_legacy && cur_legacy)
		start = 1;
4120 4121 4122
	memcpy(&pit->pit_state.channels, &ps->channels,
	       sizeof(pit->pit_state.channels));
	pit->pit_state.flags = ps->flags;
4123
	for (i = 0; i < 3; i++)
4124
		kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count,
4125
				   start && i == 0);
4126
	mutex_unlock(&pit->pit_state.lock);
4127
	return 0;
4128 4129
}

4130 4131 4132
static int kvm_vm_ioctl_reinject(struct kvm *kvm,
				 struct kvm_reinject_control *control)
{
4133 4134 4135
	struct kvm_pit *pit = kvm->arch.vpit;

	if (!pit)
4136
		return -ENXIO;
4137

4138 4139 4140 4141 4142 4143 4144
	/* pit->pit_state.lock was overloaded to prevent userspace from getting
	 * an inconsistent state after running multiple KVM_REINJECT_CONTROL
	 * ioctls in parallel.  Use a separate lock if that ioctl isn't rare.
	 */
	mutex_lock(&pit->pit_state.lock);
	kvm_pit_set_reinject(pit, control->pit_reinject);
	mutex_unlock(&pit->pit_state.lock);
4145

4146 4147 4148
	return 0;
}

4149
/**
4150 4151 4152
 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
 * @kvm: kvm instance
 * @log: slot id and address to which we copy the log
4153
 *
4154 4155 4156 4157 4158 4159 4160 4161
 * Steps 1-4 below provide general overview of dirty page logging. See
 * kvm_get_dirty_log_protect() function description for additional details.
 *
 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
 * always flush the TLB (step 4) even if previous step failed  and the dirty
 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
 * writes will be marked dirty for next log read.
4162
 *
4163 4164
 *   1. Take a snapshot of the bit and clear it if needed.
 *   2. Write protect the corresponding page.
4165 4166
 *   3. Copy the snapshot to the userspace.
 *   4. Flush TLB's if needed.
4167
 */
4168
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
4169
{
4170
	bool is_dirty = false;
4171
	int r;
4172

4173
	mutex_lock(&kvm->slots_lock);
4174

4175 4176 4177 4178 4179 4180
	/*
	 * Flush potentially hardware-cached dirty pages to dirty_bitmap.
	 */
	if (kvm_x86_ops->flush_log_dirty)
		kvm_x86_ops->flush_log_dirty(kvm);

4181
	r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
4182 4183 4184 4185 4186

	/*
	 * All the TLBs can be flushed out of mmu lock, see the comments in
	 * kvm_mmu_slot_remove_write_access().
	 */
4187
	lockdep_assert_held(&kvm->slots_lock);
4188 4189 4190
	if (is_dirty)
		kvm_flush_remote_tlbs(kvm);

4191
	mutex_unlock(&kvm->slots_lock);
4192 4193 4194
	return r;
}

4195 4196
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
			bool line_status)
4197 4198 4199 4200 4201
{
	if (!irqchip_in_kernel(kvm))
		return -ENXIO;

	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
4202 4203
					irq_event->irq, irq_event->level,
					line_status);
4204 4205 4206
	return 0;
}

4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219
static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
				   struct kvm_enable_cap *cap)
{
	int r;

	if (cap->flags)
		return -EINVAL;

	switch (cap->cap) {
	case KVM_CAP_DISABLE_QUIRKS:
		kvm->arch.disabled_quirks = cap->args[0];
		r = 0;
		break;
4220 4221
	case KVM_CAP_SPLIT_IRQCHIP: {
		mutex_lock(&kvm->lock);
4222 4223 4224
		r = -EINVAL;
		if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS)
			goto split_irqchip_unlock;
4225 4226 4227
		r = -EEXIST;
		if (irqchip_in_kernel(kvm))
			goto split_irqchip_unlock;
P
Paolo Bonzini 已提交
4228
		if (kvm->created_vcpus)
4229 4230
			goto split_irqchip_unlock;
		r = kvm_setup_empty_irq_routing(kvm);
4231
		if (r)
4232 4233 4234
			goto split_irqchip_unlock;
		/* Pairs with irqchip_in_kernel. */
		smp_wmb();
4235
		kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
4236
		kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
4237 4238 4239 4240 4241
		r = 0;
split_irqchip_unlock:
		mutex_unlock(&kvm->lock);
		break;
	}
4242 4243 4244 4245 4246 4247 4248
	case KVM_CAP_X2APIC_API:
		r = -EINVAL;
		if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS)
			break;

		if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS)
			kvm->arch.x2apic_format = true;
4249 4250
		if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
			kvm->arch.x2apic_broadcast_quirk_disabled = true;
4251 4252 4253

		r = 0;
		break;
4254 4255 4256 4257 4258 4259 4260 4261
	case KVM_CAP_X86_DISABLE_EXITS:
		r = -EINVAL;
		if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS)
			break;

		if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) &&
			kvm_can_mwait_in_guest())
			kvm->arch.mwait_in_guest = true;
M
Michael S. Tsirkin 已提交
4262
		if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
4263
			kvm->arch.hlt_in_guest = true;
4264 4265
		if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
			kvm->arch.pause_in_guest = true;
4266 4267
		r = 0;
		break;
4268 4269 4270 4271 4272 4273 4274
	default:
		r = -EINVAL;
		break;
	}
	return r;
}

4275 4276 4277 4278 4279
long kvm_arch_vm_ioctl(struct file *filp,
		       unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
4280
	int r = -ENOTTY;
4281 4282 4283 4284 4285 4286 4287
	/*
	 * This union makes it completely explicit to gcc-3.x
	 * that these two variables' stack usage should be
	 * combined, not added together.
	 */
	union {
		struct kvm_pit_state ps;
B
Beth Kon 已提交
4288
		struct kvm_pit_state2 ps2;
4289
		struct kvm_pit_config pit_config;
4290
	} u;
4291 4292 4293 4294 4295

	switch (ioctl) {
	case KVM_SET_TSS_ADDR:
		r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
		break;
4296 4297 4298
	case KVM_SET_IDENTITY_MAP_ADDR: {
		u64 ident_addr;

4299 4300 4301 4302
		mutex_lock(&kvm->lock);
		r = -EINVAL;
		if (kvm->created_vcpus)
			goto set_identity_unlock;
4303 4304
		r = -EFAULT;
		if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
4305
			goto set_identity_unlock;
4306
		r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
4307 4308
set_identity_unlock:
		mutex_unlock(&kvm->lock);
4309 4310
		break;
	}
4311 4312 4313 4314 4315 4316
	case KVM_SET_NR_MMU_PAGES:
		r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
		break;
	case KVM_GET_NR_MMU_PAGES:
		r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
		break;
4317 4318
	case KVM_CREATE_IRQCHIP: {
		mutex_lock(&kvm->lock);
4319

4320
		r = -EEXIST;
4321
		if (irqchip_in_kernel(kvm))
4322
			goto create_irqchip_unlock;
4323

4324
		r = -EINVAL;
P
Paolo Bonzini 已提交
4325
		if (kvm->created_vcpus)
4326
			goto create_irqchip_unlock;
4327 4328 4329

		r = kvm_pic_init(kvm);
		if (r)
4330
			goto create_irqchip_unlock;
4331 4332 4333 4334

		r = kvm_ioapic_init(kvm);
		if (r) {
			kvm_pic_destroy(kvm);
4335
			goto create_irqchip_unlock;
4336 4337
		}

4338 4339
		r = kvm_setup_default_irq_routing(kvm);
		if (r) {
4340
			kvm_ioapic_destroy(kvm);
4341
			kvm_pic_destroy(kvm);
4342
			goto create_irqchip_unlock;
4343
		}
4344
		/* Write kvm->irq_routing before enabling irqchip_in_kernel. */
4345
		smp_wmb();
4346
		kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
4347 4348
	create_irqchip_unlock:
		mutex_unlock(&kvm->lock);
4349
		break;
4350
	}
S
Sheng Yang 已提交
4351
	case KVM_CREATE_PIT:
4352 4353 4354 4355 4356 4357 4358 4359
		u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
		goto create_pit;
	case KVM_CREATE_PIT2:
		r = -EFAULT;
		if (copy_from_user(&u.pit_config, argp,
				   sizeof(struct kvm_pit_config)))
			goto out;
	create_pit:
4360
		mutex_lock(&kvm->lock);
A
Avi Kivity 已提交
4361 4362 4363
		r = -EEXIST;
		if (kvm->arch.vpit)
			goto create_pit_unlock;
S
Sheng Yang 已提交
4364
		r = -ENOMEM;
4365
		kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
S
Sheng Yang 已提交
4366 4367
		if (kvm->arch.vpit)
			r = 0;
A
Avi Kivity 已提交
4368
	create_pit_unlock:
4369
		mutex_unlock(&kvm->lock);
S
Sheng Yang 已提交
4370
		break;
4371 4372
	case KVM_GET_IRQCHIP: {
		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
4373
		struct kvm_irqchip *chip;
4374

4375 4376 4377
		chip = memdup_user(argp, sizeof(*chip));
		if (IS_ERR(chip)) {
			r = PTR_ERR(chip);
4378
			goto out;
4379 4380
		}

4381
		r = -ENXIO;
4382
		if (!irqchip_kernel(kvm))
4383 4384
			goto get_irqchip_out;
		r = kvm_vm_ioctl_get_irqchip(kvm, chip);
4385
		if (r)
4386
			goto get_irqchip_out;
4387
		r = -EFAULT;
4388 4389
		if (copy_to_user(argp, chip, sizeof *chip))
			goto get_irqchip_out;
4390
		r = 0;
4391 4392
	get_irqchip_out:
		kfree(chip);
4393 4394 4395 4396
		break;
	}
	case KVM_SET_IRQCHIP: {
		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
4397
		struct kvm_irqchip *chip;
4398

4399 4400 4401
		chip = memdup_user(argp, sizeof(*chip));
		if (IS_ERR(chip)) {
			r = PTR_ERR(chip);
4402
			goto out;
4403 4404
		}

4405
		r = -ENXIO;
4406
		if (!irqchip_kernel(kvm))
4407 4408
			goto set_irqchip_out;
		r = kvm_vm_ioctl_set_irqchip(kvm, chip);
4409
		if (r)
4410
			goto set_irqchip_out;
4411
		r = 0;
4412 4413
	set_irqchip_out:
		kfree(chip);
4414 4415
		break;
	}
4416 4417
	case KVM_GET_PIT: {
		r = -EFAULT;
4418
		if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
4419 4420 4421 4422
			goto out;
		r = -ENXIO;
		if (!kvm->arch.vpit)
			goto out;
4423
		r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
4424 4425 4426
		if (r)
			goto out;
		r = -EFAULT;
4427
		if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
4428 4429 4430 4431 4432 4433
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_PIT: {
		r = -EFAULT;
4434
		if (copy_from_user(&u.ps, argp, sizeof u.ps))
4435 4436 4437 4438
			goto out;
		r = -ENXIO;
		if (!kvm->arch.vpit)
			goto out;
4439
		r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
4440 4441
		break;
	}
B
Beth Kon 已提交
4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464
	case KVM_GET_PIT2: {
		r = -ENXIO;
		if (!kvm->arch.vpit)
			goto out;
		r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
		if (r)
			goto out;
		r = -EFAULT;
		if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_PIT2: {
		r = -EFAULT;
		if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
			goto out;
		r = -ENXIO;
		if (!kvm->arch.vpit)
			goto out;
		r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
		break;
	}
4465 4466 4467 4468 4469 4470 4471 4472
	case KVM_REINJECT_CONTROL: {
		struct kvm_reinject_control control;
		r =  -EFAULT;
		if (copy_from_user(&control, argp, sizeof(control)))
			goto out;
		r = kvm_vm_ioctl_reinject(kvm, &control);
		break;
	}
4473 4474 4475
	case KVM_SET_BOOT_CPU_ID:
		r = 0;
		mutex_lock(&kvm->lock);
P
Paolo Bonzini 已提交
4476
		if (kvm->created_vcpus)
4477 4478 4479 4480 4481
			r = -EBUSY;
		else
			kvm->arch.bsp_vcpu_id = arg;
		mutex_unlock(&kvm->lock);
		break;
E
Ed Swierk 已提交
4482
	case KVM_XEN_HVM_CONFIG: {
4483
		struct kvm_xen_hvm_config xhc;
E
Ed Swierk 已提交
4484
		r = -EFAULT;
4485
		if (copy_from_user(&xhc, argp, sizeof(xhc)))
E
Ed Swierk 已提交
4486 4487
			goto out;
		r = -EINVAL;
4488
		if (xhc.flags)
E
Ed Swierk 已提交
4489
			goto out;
4490
		memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
E
Ed Swierk 已提交
4491 4492 4493
		r = 0;
		break;
	}
4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506
	case KVM_SET_CLOCK: {
		struct kvm_clock_data user_ns;
		u64 now_ns;

		r = -EFAULT;
		if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
			goto out;

		r = -EINVAL;
		if (user_ns.flags)
			goto out;

		r = 0;
4507 4508 4509 4510 4511 4512
		/*
		 * TODO: userspace has to take care of races with VCPU_RUN, so
		 * kvm_gen_update_masterclock() can be cut down to locked
		 * pvclock_update_vm_gtod_copy().
		 */
		kvm_gen_update_masterclock(kvm);
4513
		now_ns = get_kvmclock_ns(kvm);
4514
		kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
4515
		kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
4516 4517 4518 4519 4520 4521
		break;
	}
	case KVM_GET_CLOCK: {
		struct kvm_clock_data user_ns;
		u64 now_ns;

4522
		now_ns = get_kvmclock_ns(kvm);
4523
		user_ns.clock = now_ns;
4524
		user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0;
4525
		memset(&user_ns.pad, 0, sizeof(user_ns.pad));
4526 4527 4528 4529 4530 4531 4532

		r = -EFAULT;
		if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
			goto out;
		r = 0;
		break;
	}
4533 4534
	case KVM_ENABLE_CAP: {
		struct kvm_enable_cap cap;
4535

4536 4537 4538 4539 4540 4541
		r = -EFAULT;
		if (copy_from_user(&cap, argp, sizeof(cap)))
			goto out;
		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
		break;
	}
4542 4543 4544 4545 4546 4547
	case KVM_MEMORY_ENCRYPT_OP: {
		r = -ENOTTY;
		if (kvm_x86_ops->mem_enc_op)
			r = kvm_x86_ops->mem_enc_op(kvm, argp);
		break;
	}
4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571
	case KVM_MEMORY_ENCRYPT_REG_REGION: {
		struct kvm_enc_region region;

		r = -EFAULT;
		if (copy_from_user(&region, argp, sizeof(region)))
			goto out;

		r = -ENOTTY;
		if (kvm_x86_ops->mem_enc_reg_region)
			r = kvm_x86_ops->mem_enc_reg_region(kvm, &region);
		break;
	}
	case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
		struct kvm_enc_region region;

		r = -EFAULT;
		if (copy_from_user(&region, argp, sizeof(region)))
			goto out;

		r = -ENOTTY;
		if (kvm_x86_ops->mem_enc_unreg_region)
			r = kvm_x86_ops->mem_enc_unreg_region(kvm, &region);
		break;
	}
4572 4573 4574 4575 4576 4577 4578 4579 4580
	case KVM_HYPERV_EVENTFD: {
		struct kvm_hyperv_eventfd hvevfd;

		r = -EFAULT;
		if (copy_from_user(&hvevfd, argp, sizeof(hvevfd)))
			goto out;
		r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd);
		break;
	}
4581
	default:
4582
		r = -ENOTTY;
4583 4584 4585 4586 4587
	}
out:
	return r;
}

4588
static void kvm_init_msr_list(void)
4589 4590 4591 4592
{
	u32 dummy[2];
	unsigned i, j;

4593
	for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
4594 4595
		if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
			continue;
4596 4597 4598

		/*
		 * Even MSRs that are valid in the host may not be exposed
4599
		 * to the guests in some cases.
4600 4601 4602 4603 4604 4605
		 */
		switch (msrs_to_save[i]) {
		case MSR_IA32_BNDCFGS:
			if (!kvm_x86_ops->mpx_supported())
				continue;
			break;
4606 4607 4608 4609
		case MSR_TSC_AUX:
			if (!kvm_x86_ops->rdtscp_supported())
				continue;
			break;
4610 4611 4612 4613
		default:
			break;
		}

4614 4615 4616 4617 4618
		if (j < i)
			msrs_to_save[j] = msrs_to_save[i];
		j++;
	}
	num_msrs_to_save = j;
4619 4620

	for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
4621 4622
		if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
			continue;
4623 4624 4625 4626 4627 4628

		if (j < i)
			emulated_msrs[j] = emulated_msrs[i];
		j++;
	}
	num_emulated_msrs = j;
4629 4630 4631 4632 4633

	for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) {
		struct kvm_msr_entry msr;

		msr.index = msr_based_features[i];
4634
		if (kvm_get_msr_feature(&msr))
4635 4636 4637 4638 4639 4640 4641
			continue;

		if (j < i)
			msr_based_features[j] = msr_based_features[i];
		j++;
	}
	num_msr_based_features = j;
4642 4643
}

4644 4645
static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
			   const void *v)
4646
{
4647 4648 4649 4650 4651
	int handled = 0;
	int n;

	do {
		n = min(len, 8);
4652
		if (!(lapic_in_kernel(vcpu) &&
4653 4654
		      !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
		    && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
4655 4656 4657 4658 4659 4660
			break;
		handled += n;
		addr += n;
		len -= n;
		v += n;
	} while (len);
4661

4662
	return handled;
4663 4664
}

4665
static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
4666
{
4667 4668 4669 4670 4671
	int handled = 0;
	int n;

	do {
		n = min(len, 8);
4672
		if (!(lapic_in_kernel(vcpu) &&
4673 4674 4675
		      !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
					 addr, n, v))
		    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
4676
			break;
4677
		trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
4678 4679 4680 4681 4682
		handled += n;
		addr += n;
		len -= n;
		v += n;
	} while (len);
4683

4684
	return handled;
4685 4686
}

4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698
static void kvm_set_segment(struct kvm_vcpu *vcpu,
			struct kvm_segment *var, int seg)
{
	kvm_x86_ops->set_segment(vcpu, var, seg);
}

void kvm_get_segment(struct kvm_vcpu *vcpu,
		     struct kvm_segment *var, int seg)
{
	kvm_x86_ops->get_segment(vcpu, var, seg);
}

4699 4700
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
			   struct x86_exception *exception)
4701 4702 4703 4704 4705 4706 4707
{
	gpa_t t_gpa;

	BUG_ON(!mmu_is_nested(vcpu));

	/* NPT walks are always user-walks */
	access |= PFERR_USER_MASK;
4708
	t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception);
4709 4710 4711 4712

	return t_gpa;
}

4713 4714
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
			      struct x86_exception *exception)
4715 4716
{
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4717
	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4718 4719
}

4720 4721
 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
				struct x86_exception *exception)
4722 4723 4724
{
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
	access |= PFERR_FETCH_MASK;
4725
	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4726 4727
}

4728 4729
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception)
4730 4731 4732
{
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
	access |= PFERR_WRITE_MASK;
4733
	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4734 4735 4736
}

/* uses this to access any guest's mapped memory without checking CPL */
4737 4738
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
				struct x86_exception *exception)
4739
{
4740
	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
4741 4742 4743 4744
}

static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
				      struct kvm_vcpu *vcpu, u32 access,
4745
				      struct x86_exception *exception)
4746 4747
{
	void *data = val;
4748
	int r = X86EMUL_CONTINUE;
4749 4750

	while (bytes) {
4751
		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
4752
							    exception);
4753
		unsigned offset = addr & (PAGE_SIZE-1);
4754
		unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
4755 4756
		int ret;

4757
		if (gpa == UNMAPPED_GVA)
4758
			return X86EMUL_PROPAGATE_FAULT;
4759 4760
		ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
					       offset, toread);
4761
		if (ret < 0) {
4762
			r = X86EMUL_IO_NEEDED;
4763 4764
			goto out;
		}
4765

4766 4767 4768
		bytes -= toread;
		data += toread;
		addr += toread;
4769
	}
4770 4771
out:
	return r;
4772
}
4773

4774
/* used for instruction fetching */
4775 4776
static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
				gva_t addr, void *val, unsigned int bytes,
4777
				struct x86_exception *exception)
4778
{
4779
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4780
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4781 4782
	unsigned offset;
	int ret;
4783

4784 4785 4786 4787 4788 4789 4790 4791 4792
	/* Inline kvm_read_guest_virt_helper for speed.  */
	gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
						    exception);
	if (unlikely(gpa == UNMAPPED_GVA))
		return X86EMUL_PROPAGATE_FAULT;

	offset = addr & (PAGE_SIZE-1);
	if (WARN_ON(offset + bytes > PAGE_SIZE))
		bytes = (unsigned)PAGE_SIZE - offset;
4793 4794
	ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
				       offset, bytes);
4795 4796 4797 4798
	if (unlikely(ret < 0))
		return X86EMUL_IO_NEEDED;

	return X86EMUL_CONTINUE;
4799 4800
}

4801
int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
4802
			       gva_t addr, void *val, unsigned int bytes,
4803
			       struct x86_exception *exception)
4804 4805
{
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4806

4807
	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
4808
					  exception);
4809
}
4810
EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
4811

4812 4813
static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
			     gva_t addr, void *val, unsigned int bytes,
4814
			     struct x86_exception *exception, bool system)
4815
{
4816
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4817 4818 4819 4820 4821 4822
	u32 access = 0;

	if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
		access |= PFERR_USER_MASK;

	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
4823 4824
}

4825 4826 4827 4828 4829 4830 4831 4832 4833
static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
		unsigned long addr, void *val, unsigned int bytes)
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
	int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes);

	return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
}

4834 4835 4836
static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
				      struct kvm_vcpu *vcpu, u32 access,
				      struct x86_exception *exception)
4837 4838 4839 4840 4841
{
	void *data = val;
	int r = X86EMUL_CONTINUE;

	while (bytes) {
4842
		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
4843
							     access,
4844
							     exception);
4845 4846 4847 4848
		unsigned offset = addr & (PAGE_SIZE-1);
		unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
		int ret;

4849
		if (gpa == UNMAPPED_GVA)
4850
			return X86EMUL_PROPAGATE_FAULT;
4851
		ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
4852
		if (ret < 0) {
4853
			r = X86EMUL_IO_NEEDED;
4854 4855 4856 4857 4858 4859 4860 4861 4862 4863
			goto out;
		}

		bytes -= towrite;
		data += towrite;
		addr += towrite;
	}
out:
	return r;
}
4864 4865

static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
4866 4867
			      unsigned int bytes, struct x86_exception *exception,
			      bool system)
4868 4869
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4870 4871 4872 4873
	u32 access = PFERR_WRITE_MASK;

	if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
		access |= PFERR_USER_MASK;
4874 4875

	return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
4876
					   access, exception);
4877 4878 4879 4880 4881 4882 4883 4884
}

int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
				unsigned int bytes, struct x86_exception *exception)
{
	return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
					   PFERR_WRITE_MASK, exception);
}
N
Nadav Har'El 已提交
4885
EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
4886

W
Wanpeng Li 已提交
4887 4888
int handle_ud(struct kvm_vcpu *vcpu)
{
4889
	int emul_type = EMULTYPE_TRAP_UD;
W
Wanpeng Li 已提交
4890
	enum emulation_result er;
4891 4892 4893 4894
	char sig[5]; /* ud2; .ascii "kvm" */
	struct x86_exception e;

	if (force_emulation_prefix &&
4895 4896
	    kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
				sig, sizeof(sig), &e) == 0 &&
4897 4898 4899 4900
	    memcmp(sig, "\xf\xbkvm", sizeof(sig)) == 0) {
		kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
		emul_type = 0;
	}
W
Wanpeng Li 已提交
4901

4902
	er = emulate_instruction(vcpu, emul_type);
W
Wanpeng Li 已提交
4903 4904 4905 4906 4907 4908 4909 4910
	if (er == EMULATE_USER_EXIT)
		return 0;
	if (er != EMULATE_DONE)
		kvm_queue_exception(vcpu, UD_VECTOR);
	return 1;
}
EXPORT_SYMBOL_GPL(handle_ud);

4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925
static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
			    gpa_t gpa, bool write)
{
	/* For APIC access vmexit */
	if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
		return 1;

	if (vcpu_match_mmio_gpa(vcpu, gpa)) {
		trace_vcpu_match_mmio(gva, gpa, write, true);
		return 1;
	}

	return 0;
}

4926 4927 4928 4929
static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
				gpa_t *gpa, struct x86_exception *exception,
				bool write)
{
4930 4931
	u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
		| (write ? PFERR_WRITE_MASK : 0);
4932

4933 4934 4935 4936 4937
	/*
	 * currently PKRU is only applied to ept enabled guest so
	 * there is no pkey in EPT page table for L1 guest or EPT
	 * shadow page table for L2 guest.
	 */
4938
	if (vcpu_match_mmio_gva(vcpu, gva)
F
Feng Wu 已提交
4939
	    && !permission_fault(vcpu, vcpu->arch.walk_mmu,
4940
				 vcpu->arch.access, 0, access)) {
4941 4942
		*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
					(gva & (PAGE_SIZE - 1));
X
Xiao Guangrong 已提交
4943
		trace_vcpu_match_mmio(gva, *gpa, write, false);
4944 4945 4946
		return 1;
	}

4947 4948 4949 4950 4951
	*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);

	if (*gpa == UNMAPPED_GVA)
		return -1;

4952
	return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write);
4953 4954
}

4955
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
4956
			const void *val, int bytes)
4957 4958 4959
{
	int ret;

4960
	ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
4961
	if (ret < 0)
4962
		return 0;
4963
	kvm_page_track_write(vcpu, gpa, val, bytes);
4964 4965 4966
	return 1;
}

4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982
struct read_write_emulator_ops {
	int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
				  int bytes);
	int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
				  void *val, int bytes);
	int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
			       int bytes, void *val);
	int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
				    void *val, int bytes);
	bool write;
};

static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
{
	if (vcpu->mmio_read_completed) {
		trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
4983
			       vcpu->mmio_fragments[0].gpa, val);
4984 4985 4986 4987 4988 4989 4990 4991 4992 4993
		vcpu->mmio_read_completed = 0;
		return 1;
	}

	return 0;
}

static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
			void *val, int bytes)
{
4994
	return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
4995 4996 4997 4998 4999 5000 5001 5002 5003 5004
}

static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
			 void *val, int bytes)
{
	return emulator_write_phys(vcpu, gpa, val, bytes);
}

static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
{
5005
	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
5006 5007 5008 5009 5010 5011
	return vcpu_mmio_write(vcpu, gpa, bytes, val);
}

static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
			  void *val, int bytes)
{
5012
	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
5013 5014 5015 5016 5017 5018
	return X86EMUL_IO_NEEDED;
}

static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
			   void *val, int bytes)
{
A
Avi Kivity 已提交
5019 5020
	struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];

5021
	memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
5022 5023 5024
	return X86EMUL_CONTINUE;
}

5025
static const struct read_write_emulator_ops read_emultor = {
5026 5027 5028 5029 5030 5031
	.read_write_prepare = read_prepare,
	.read_write_emulate = read_emulate,
	.read_write_mmio = vcpu_mmio_read,
	.read_write_exit_mmio = read_exit_mmio,
};

5032
static const struct read_write_emulator_ops write_emultor = {
5033 5034 5035 5036 5037 5038
	.read_write_emulate = write_emulate,
	.read_write_mmio = write_mmio,
	.read_write_exit_mmio = write_exit_mmio,
	.write = true,
};

5039 5040 5041 5042
static int emulator_read_write_onepage(unsigned long addr, void *val,
				       unsigned int bytes,
				       struct x86_exception *exception,
				       struct kvm_vcpu *vcpu,
5043
				       const struct read_write_emulator_ops *ops)
5044
{
5045 5046
	gpa_t gpa;
	int handled, ret;
5047
	bool write = ops->write;
A
Avi Kivity 已提交
5048
	struct kvm_mmio_fragment *frag;
5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;

	/*
	 * If the exit was due to a NPF we may already have a GPA.
	 * If the GPA is present, use it to avoid the GVA to GPA table walk.
	 * Note, this cannot be used on string operations since string
	 * operation using rep will only have the initial GPA from the NPF
	 * occurred.
	 */
	if (vcpu->arch.gpa_available &&
	    emulator_can_use_gpa(ctxt) &&
5060 5061 5062 5063 5064 5065 5066
	    (addr & ~PAGE_MASK) == (vcpu->arch.gpa_val & ~PAGE_MASK)) {
		gpa = vcpu->arch.gpa_val;
		ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
	} else {
		ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
		if (ret < 0)
			return X86EMUL_PROPAGATE_FAULT;
5067
	}
5068

5069
	if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes))
5070 5071 5072 5073 5074
		return X86EMUL_CONTINUE;

	/*
	 * Is this MMIO handled locally?
	 */
5075
	handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
5076
	if (handled == bytes)
5077 5078
		return X86EMUL_CONTINUE;

5079 5080 5081 5082
	gpa += handled;
	bytes -= handled;
	val += handled;

5083 5084 5085 5086 5087
	WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
	frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
	frag->gpa = gpa;
	frag->data = val;
	frag->len = bytes;
A
Avi Kivity 已提交
5088
	return X86EMUL_CONTINUE;
5089 5090
}

5091 5092
static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
			unsigned long addr,
5093 5094
			void *val, unsigned int bytes,
			struct x86_exception *exception,
5095
			const struct read_write_emulator_ops *ops)
5096
{
5097
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
A
Avi Kivity 已提交
5098 5099 5100 5101 5102 5103 5104 5105
	gpa_t gpa;
	int rc;

	if (ops->read_write_prepare &&
		  ops->read_write_prepare(vcpu, val, bytes))
		return X86EMUL_CONTINUE;

	vcpu->mmio_nr_fragments = 0;
5106

5107 5108
	/* Crossing a page boundary? */
	if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
A
Avi Kivity 已提交
5109
		int now;
5110 5111

		now = -addr & ~PAGE_MASK;
5112 5113 5114
		rc = emulator_read_write_onepage(addr, val, now, exception,
						 vcpu, ops);

5115 5116 5117
		if (rc != X86EMUL_CONTINUE)
			return rc;
		addr += now;
5118 5119
		if (ctxt->mode != X86EMUL_MODE_PROT64)
			addr = (u32)addr;
5120 5121 5122
		val += now;
		bytes -= now;
	}
5123

A
Avi Kivity 已提交
5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136
	rc = emulator_read_write_onepage(addr, val, bytes, exception,
					 vcpu, ops);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	if (!vcpu->mmio_nr_fragments)
		return rc;

	gpa = vcpu->mmio_fragments[0].gpa;

	vcpu->mmio_needed = 1;
	vcpu->mmio_cur_fragment = 0;

5137
	vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
A
Avi Kivity 已提交
5138 5139 5140 5141 5142
	vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
	vcpu->run->exit_reason = KVM_EXIT_MMIO;
	vcpu->run->mmio.phys_addr = gpa;

	return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154
}

static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
				  unsigned long addr,
				  void *val,
				  unsigned int bytes,
				  struct x86_exception *exception)
{
	return emulator_read_write(ctxt, addr, val, bytes,
				   exception, &read_emultor);
}

5155
static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
5156 5157 5158 5159 5160 5161 5162
			    unsigned long addr,
			    const void *val,
			    unsigned int bytes,
			    struct x86_exception *exception)
{
	return emulator_read_write(ctxt, addr, (void *)val, bytes,
				   exception, &write_emultor);
5163 5164
}

5165 5166 5167 5168 5169 5170 5171
#define CMPXCHG_TYPE(t, ptr, old, new) \
	(cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))

#ifdef CONFIG_X86_64
#  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
#else
#  define CMPXCHG64(ptr, old, new) \
5172
	(cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
5173 5174
#endif

5175 5176
static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
				     unsigned long addr,
5177 5178 5179
				     const void *old,
				     const void *new,
				     unsigned int bytes,
5180
				     struct x86_exception *exception)
5181
{
5182
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5183 5184 5185 5186
	gpa_t gpa;
	struct page *page;
	char *kaddr;
	bool exchanged;
5187

5188 5189 5190
	/* guests cmpxchg8b have to be emulated atomically */
	if (bytes > 8 || (bytes & (bytes - 1)))
		goto emul_write;
5191

5192
	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
5193

5194 5195 5196
	if (gpa == UNMAPPED_GVA ||
	    (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
		goto emul_write;
5197

5198 5199
	if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
		goto emul_write;
5200

5201
	page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
5202
	if (is_error_page(page))
5203
		goto emul_write;
5204

5205
	kaddr = kmap_atomic(page);
5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221
	kaddr += offset_in_page(gpa);
	switch (bytes) {
	case 1:
		exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
		break;
	case 2:
		exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
		break;
	case 4:
		exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
		break;
	case 8:
		exchanged = CMPXCHG64(kaddr, old, new);
		break;
	default:
		BUG();
5222
	}
5223
	kunmap_atomic(kaddr);
5224 5225 5226 5227 5228
	kvm_release_page_dirty(page);

	if (!exchanged)
		return X86EMUL_CMPXCHG_FAILED;

5229
	kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
5230
	kvm_page_track_write(vcpu, gpa, new, bytes);
5231 5232

	return X86EMUL_CONTINUE;
5233

5234
emul_write:
5235
	printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
5236

5237
	return emulator_write_emulated(ctxt, addr, new, bytes, exception);
5238 5239
}

5240 5241
static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
{
5242
	int r = 0, i;
5243

5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255
	for (i = 0; i < vcpu->arch.pio.count; i++) {
		if (vcpu->arch.pio.in)
			r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
					    vcpu->arch.pio.size, pd);
		else
			r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
					     vcpu->arch.pio.port, vcpu->arch.pio.size,
					     pd);
		if (r)
			break;
		pd += vcpu->arch.pio.size;
	}
5256 5257 5258
	return r;
}

5259 5260 5261
static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
			       unsigned short port, void *val,
			       unsigned int count, bool in)
5262 5263
{
	vcpu->arch.pio.port = port;
5264
	vcpu->arch.pio.in = in;
5265
	vcpu->arch.pio.count  = count;
5266 5267 5268
	vcpu->arch.pio.size = size;

	if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
5269
		vcpu->arch.pio.count = 0;
5270 5271 5272 5273
		return 1;
	}

	vcpu->run->exit_reason = KVM_EXIT_IO;
5274
	vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
5275 5276 5277 5278 5279 5280 5281 5282
	vcpu->run->io.size = size;
	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
	vcpu->run->io.count = count;
	vcpu->run->io.port = port;

	return 0;
}

5283 5284 5285
static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
				    int size, unsigned short port, void *val,
				    unsigned int count)
5286
{
5287
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5288
	int ret;
5289

5290 5291
	if (vcpu->arch.pio.count)
		goto data_avail;
5292

5293 5294
	memset(vcpu->arch.pio_data, 0, size * count);

5295 5296 5297 5298
	ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
	if (ret) {
data_avail:
		memcpy(val, vcpu->arch.pio_data, size * count);
5299
		trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
5300
		vcpu->arch.pio.count = 0;
5301 5302 5303 5304 5305 5306
		return 1;
	}

	return 0;
}

5307 5308 5309 5310 5311 5312 5313
static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
				     int size, unsigned short port,
				     const void *val, unsigned int count)
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);

	memcpy(vcpu->arch.pio_data, val, size * count);
5314
	trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
5315 5316 5317
	return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
}

5318 5319 5320 5321 5322
static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
	return kvm_x86_ops->get_segment_base(vcpu, seg);
}

5323
static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
5324
{
5325
	kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
5326 5327
}

5328
static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
5329 5330 5331 5332 5333
{
	if (!need_emulate_wbinvd(vcpu))
		return X86EMUL_CONTINUE;

	if (kvm_x86_ops->has_wbinvd_exit()) {
5334 5335 5336
		int cpu = get_cpu();

		cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
5337 5338
		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
				wbinvd_ipi, NULL, 1);
5339
		put_cpu();
5340
		cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
5341 5342
	} else
		wbinvd();
5343 5344
	return X86EMUL_CONTINUE;
}
5345 5346 5347

int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
5348 5349
	kvm_emulate_wbinvd_noskip(vcpu);
	return kvm_skip_emulated_instruction(vcpu);
5350
}
5351 5352
EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);

5353 5354


5355 5356
static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
{
5357
	kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
5358 5359
}

5360 5361
static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
			   unsigned long *dest)
5362
{
5363
	return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
5364 5365
}

5366 5367
static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
			   unsigned long value)
5368
{
5369

5370
	return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
5371 5372
}

5373
static u64 mk_cr_64(u64 curr_cr, u32 new_val)
5374
{
5375
	return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
5376 5377
}

5378
static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
5379
{
5380
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5381 5382 5383 5384 5385 5386 5387 5388 5389 5390
	unsigned long value;

	switch (cr) {
	case 0:
		value = kvm_read_cr0(vcpu);
		break;
	case 2:
		value = vcpu->arch.cr2;
		break;
	case 3:
5391
		value = kvm_read_cr3(vcpu);
5392 5393 5394 5395 5396 5397 5398 5399
		break;
	case 4:
		value = kvm_read_cr4(vcpu);
		break;
	case 8:
		value = kvm_get_cr8(vcpu);
		break;
	default:
5400
		kvm_err("%s: unexpected cr %u\n", __func__, cr);
5401 5402 5403 5404 5405 5406
		return 0;
	}

	return value;
}

5407
static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
5408
{
5409
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5410 5411
	int res = 0;

5412 5413
	switch (cr) {
	case 0:
5414
		res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
5415 5416 5417 5418 5419
		break;
	case 2:
		vcpu->arch.cr2 = val;
		break;
	case 3:
5420
		res = kvm_set_cr3(vcpu, val);
5421 5422
		break;
	case 4:
5423
		res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
5424 5425
		break;
	case 8:
A
Andre Przywara 已提交
5426
		res = kvm_set_cr8(vcpu, val);
5427 5428
		break;
	default:
5429
		kvm_err("%s: unexpected cr %u\n", __func__, cr);
5430
		res = -1;
5431
	}
5432 5433

	return res;
5434 5435
}

5436
static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
5437
{
5438
	return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
5439 5440
}

5441
static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
5442
{
5443
	kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
5444 5445
}

5446
static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
5447
{
5448
	kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
5449 5450
}

5451 5452 5453 5454 5455 5456 5457 5458 5459 5460
static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
	kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
}

static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
	kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
}

5461 5462
static unsigned long emulator_get_cached_segment_base(
	struct x86_emulate_ctxt *ctxt, int seg)
5463
{
5464
	return get_segment_base(emul_to_vcpu(ctxt), seg);
5465 5466
}

5467 5468 5469
static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
				 struct desc_struct *desc, u32 *base3,
				 int seg)
5470 5471 5472
{
	struct kvm_segment var;

5473
	kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
5474
	*selector = var.selector;
5475

5476 5477
	if (var.unusable) {
		memset(desc, 0, sizeof(*desc));
5478 5479
		if (base3)
			*base3 = 0;
5480
		return false;
5481
	}
5482 5483 5484 5485 5486

	if (var.g)
		var.limit >>= 12;
	set_desc_limit(desc, var.limit);
	set_desc_base(desc, (unsigned long)var.base);
5487 5488 5489 5490
#ifdef CONFIG_X86_64
	if (base3)
		*base3 = var.base >> 32;
#endif
5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502
	desc->type = var.type;
	desc->s = var.s;
	desc->dpl = var.dpl;
	desc->p = var.present;
	desc->avl = var.avl;
	desc->l = var.l;
	desc->d = var.db;
	desc->g = var.g;

	return true;
}

5503 5504 5505
static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
				 struct desc_struct *desc, u32 base3,
				 int seg)
5506
{
5507
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5508 5509
	struct kvm_segment var;

5510
	var.selector = selector;
5511
	var.base = get_desc_base(desc);
5512 5513 5514
#ifdef CONFIG_X86_64
	var.base |= ((u64)base3) << 32;
#endif
5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532
	var.limit = get_desc_limit(desc);
	if (desc->g)
		var.limit = (var.limit << 12) | 0xfff;
	var.type = desc->type;
	var.dpl = desc->dpl;
	var.db = desc->d;
	var.s = desc->s;
	var.l = desc->l;
	var.g = desc->g;
	var.avl = desc->avl;
	var.present = desc->p;
	var.unusable = !var.present;
	var.padding = 0;

	kvm_set_segment(vcpu, &var, seg);
	return;
}

5533 5534 5535
static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
			    u32 msr_index, u64 *pdata)
{
5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546
	struct msr_data msr;
	int r;

	msr.index = msr_index;
	msr.host_initiated = false;
	r = kvm_get_msr(emul_to_vcpu(ctxt), &msr);
	if (r)
		return r;

	*pdata = msr.data;
	return 0;
5547 5548 5549 5550 5551
}

static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
			    u32 msr_index, u64 data)
{
5552 5553 5554 5555 5556 5557
	struct msr_data msr;

	msr.data = data;
	msr.index = msr_index;
	msr.host_initiated = false;
	return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
5558 5559
}

P
Paolo Bonzini 已提交
5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573
static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);

	return vcpu->arch.smbase;
}

static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);

	vcpu->arch.smbase = smbase;
}

5574 5575 5576
static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
			      u32 pmc)
{
5577
	return kvm_pmu_is_valid_msr_idx(emul_to_vcpu(ctxt), pmc);
5578 5579
}

5580 5581 5582
static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
			     u32 pmc, u64 *pdata)
{
5583
	return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
5584 5585
}

5586 5587 5588 5589 5590
static void emulator_halt(struct x86_emulate_ctxt *ctxt)
{
	emul_to_vcpu(ctxt)->arch.halt_request = 1;
}

5591
static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
5592
			      struct x86_instruction_info *info,
5593 5594
			      enum x86_intercept_stage stage)
{
5595
	return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
5596 5597
}

5598 5599
static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
			u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool check_limit)
5600
{
5601
	return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, check_limit);
5602 5603
}

5604 5605 5606 5607 5608 5609 5610 5611 5612 5613
static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
{
	return kvm_register_read(emul_to_vcpu(ctxt), reg);
}

static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
{
	kvm_register_write(emul_to_vcpu(ctxt), reg, val);
}

5614 5615 5616 5617 5618
static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
{
	kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
}

5619 5620 5621 5622 5623 5624 5625 5626 5627 5628
static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
{
	return emul_to_vcpu(ctxt)->arch.hflags;
}

static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
{
	kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
}

5629 5630 5631 5632 5633
static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
	return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smbase);
}

5634
static const struct x86_emulate_ops emulate_ops = {
5635 5636
	.read_gpr            = emulator_read_gpr,
	.write_gpr           = emulator_write_gpr,
5637 5638
	.read_std            = emulator_read_std,
	.write_std           = emulator_write_std,
5639
	.read_phys           = kvm_read_guest_phys_system,
5640
	.fetch               = kvm_fetch_guest_virt,
5641 5642 5643
	.read_emulated       = emulator_read_emulated,
	.write_emulated      = emulator_write_emulated,
	.cmpxchg_emulated    = emulator_cmpxchg_emulated,
5644
	.invlpg              = emulator_invlpg,
5645 5646
	.pio_in_emulated     = emulator_pio_in_emulated,
	.pio_out_emulated    = emulator_pio_out_emulated,
5647 5648
	.get_segment         = emulator_get_segment,
	.set_segment         = emulator_set_segment,
5649
	.get_cached_segment_base = emulator_get_cached_segment_base,
5650
	.get_gdt             = emulator_get_gdt,
5651
	.get_idt	     = emulator_get_idt,
5652 5653
	.set_gdt             = emulator_set_gdt,
	.set_idt	     = emulator_set_idt,
5654 5655
	.get_cr              = emulator_get_cr,
	.set_cr              = emulator_set_cr,
5656
	.cpl                 = emulator_get_cpl,
5657 5658
	.get_dr              = emulator_get_dr,
	.set_dr              = emulator_set_dr,
P
Paolo Bonzini 已提交
5659 5660
	.get_smbase          = emulator_get_smbase,
	.set_smbase          = emulator_set_smbase,
5661 5662
	.set_msr             = emulator_set_msr,
	.get_msr             = emulator_get_msr,
5663
	.check_pmc	     = emulator_check_pmc,
5664
	.read_pmc            = emulator_read_pmc,
5665
	.halt                = emulator_halt,
5666
	.wbinvd              = emulator_wbinvd,
5667
	.fix_hypercall       = emulator_fix_hypercall,
5668
	.intercept           = emulator_intercept,
5669
	.get_cpuid           = emulator_get_cpuid,
5670
	.set_nmi_mask        = emulator_set_nmi_mask,
5671 5672
	.get_hflags          = emulator_get_hflags,
	.set_hflags          = emulator_set_hflags,
5673
	.pre_leave_smm       = emulator_pre_leave_smm,
5674 5675
};

5676 5677
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
{
5678
	u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
5679 5680 5681 5682 5683 5684 5685
	/*
	 * an sti; sti; sequence only disable interrupts for the first
	 * instruction. So, if the last instruction, be it emulated or
	 * not, left the system with the INT_STI flag enabled, it
	 * means that the last instruction is an sti. We should not
	 * leave the flag on in this case. The same goes for mov ss
	 */
5686 5687
	if (int_shadow & mask)
		mask = 0;
5688
	if (unlikely(int_shadow || mask)) {
5689
		kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
5690 5691 5692
		if (!mask)
			kvm_make_request(KVM_REQ_EVENT, vcpu);
	}
5693 5694
}

5695
static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
5696 5697
{
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5698
	if (ctxt->exception.vector == PF_VECTOR)
5699 5700 5701
		return kvm_propagate_fault(vcpu, &ctxt->exception);

	if (ctxt->exception.error_code_valid)
5702 5703
		kvm_queue_exception_e(vcpu, ctxt->exception.vector,
				      ctxt->exception.error_code);
5704
	else
5705
		kvm_queue_exception(vcpu, ctxt->exception.vector);
5706
	return false;
5707 5708
}

5709 5710
static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
{
5711
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5712 5713 5714 5715
	int cs_db, cs_l;

	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);

5716
	ctxt->eflags = kvm_get_rflags(vcpu);
5717 5718
	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;

5719 5720 5721
	ctxt->eip = kvm_rip_read(vcpu);
	ctxt->mode = (!is_protmode(vcpu))		? X86EMUL_MODE_REAL :
		     (ctxt->eflags & X86_EFLAGS_VM)	? X86EMUL_MODE_VM86 :
5722
		     (cs_l && is_long_mode(vcpu))	? X86EMUL_MODE_PROT64 :
5723 5724
		     cs_db				? X86EMUL_MODE_PROT32 :
							  X86EMUL_MODE_PROT16;
5725
	BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
P
Paolo Bonzini 已提交
5726 5727
	BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
	BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
5728

5729
	init_decode_cache(ctxt);
5730
	vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
5731 5732
}

5733
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
5734
{
5735
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5736 5737 5738 5739
	int ret;

	init_emulate_ctxt(vcpu);

5740 5741 5742
	ctxt->op_bytes = 2;
	ctxt->ad_bytes = 2;
	ctxt->_eip = ctxt->eip + inc_eip;
5743
	ret = emulate_int_real(ctxt, irq);
5744 5745 5746 5747

	if (ret != X86EMUL_CONTINUE)
		return EMULATE_FAIL;

5748
	ctxt->eip = ctxt->_eip;
5749 5750
	kvm_rip_write(vcpu, ctxt->eip);
	kvm_set_rflags(vcpu, ctxt->eflags);
5751 5752 5753 5754 5755

	return EMULATE_DONE;
}
EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);

5756
static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
5757
{
5758 5759
	int r = EMULATE_DONE;

5760 5761
	++vcpu->stat.insn_emulation_fail;
	trace_kvm_emulate_insn_failed(vcpu);
5762 5763 5764 5765

	if (emulation_type & EMULTYPE_NO_UD_ON_FAIL)
		return EMULATE_FAIL;

5766
	if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
5767 5768 5769
		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
		vcpu->run->internal.ndata = 0;
5770
		r = EMULATE_USER_EXIT;
5771
	}
5772

5773
	kvm_queue_exception(vcpu, UD_VECTOR);
5774 5775

	return r;
5776 5777
}

5778
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
5779 5780
				  bool write_fault_to_shadow_pgtable,
				  int emulation_type)
5781
{
5782
	gpa_t gpa = cr2;
D
Dan Williams 已提交
5783
	kvm_pfn_t pfn;
5784

5785 5786 5787
	if (emulation_type & EMULTYPE_NO_REEXECUTE)
		return false;

5788 5789 5790 5791 5792 5793
	if (!vcpu->arch.mmu.direct_map) {
		/*
		 * Write permission should be allowed since only
		 * write access need to be emulated.
		 */
		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
5794

5795 5796 5797 5798 5799 5800 5801
		/*
		 * If the mapping is invalid in guest, let cpu retry
		 * it to generate fault.
		 */
		if (gpa == UNMAPPED_GVA)
			return true;
	}
5802

5803 5804 5805 5806 5807 5808 5809
	/*
	 * Do not retry the unhandleable instruction if it faults on the
	 * readonly host memory, otherwise it will goto a infinite loop:
	 * retry instruction -> write #PF -> emulation fail -> retry
	 * instruction -> ...
	 */
	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830

	/*
	 * If the instruction failed on the error pfn, it can not be fixed,
	 * report the error to userspace.
	 */
	if (is_error_noslot_pfn(pfn))
		return false;

	kvm_release_pfn_clean(pfn);

	/* The instructions are well-emulated on direct mmu. */
	if (vcpu->arch.mmu.direct_map) {
		unsigned int indirect_shadow_pages;

		spin_lock(&vcpu->kvm->mmu_lock);
		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
		spin_unlock(&vcpu->kvm->mmu_lock);

		if (indirect_shadow_pages)
			kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));

5831
		return true;
5832
	}
5833

5834 5835 5836 5837 5838 5839
	/*
	 * if emulation was due to access to shadowed page table
	 * and it failed try to unshadow page and re-enter the
	 * guest to let CPU execute the instruction.
	 */
	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
5840 5841 5842 5843 5844 5845 5846

	/*
	 * If the access faults on its page table, it can not
	 * be fixed by unprotecting shadow page and it should
	 * be reported to userspace.
	 */
	return !write_fault_to_shadow_pgtable;
5847 5848
}

5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887
static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
			      unsigned long cr2,  int emulation_type)
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
	unsigned long last_retry_eip, last_retry_addr, gpa = cr2;

	last_retry_eip = vcpu->arch.last_retry_eip;
	last_retry_addr = vcpu->arch.last_retry_addr;

	/*
	 * If the emulation is caused by #PF and it is non-page_table
	 * writing instruction, it means the VM-EXIT is caused by shadow
	 * page protected, we can zap the shadow page and retry this
	 * instruction directly.
	 *
	 * Note: if the guest uses a non-page-table modifying instruction
	 * on the PDE that points to the instruction, then we will unmap
	 * the instruction and go to an infinite loop. So, we cache the
	 * last retried eip and the last fault address, if we meet the eip
	 * and the address again, we can break out of the potential infinite
	 * loop.
	 */
	vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;

	if (!(emulation_type & EMULTYPE_RETRY))
		return false;

	if (x86_page_table_writing_insn(ctxt))
		return false;

	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
		return false;

	vcpu->arch.last_retry_eip = ctxt->eip;
	vcpu->arch.last_retry_addr = cr2;

	if (!vcpu->arch.mmu.direct_map)
		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);

5888
	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
5889 5890 5891 5892

	return true;
}

5893 5894 5895
static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
static int complete_emulated_pio(struct kvm_vcpu *vcpu);

P
Paolo Bonzini 已提交
5896
static void kvm_smm_changed(struct kvm_vcpu *vcpu)
5897
{
P
Paolo Bonzini 已提交
5898
	if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
5899 5900 5901
		/* This is a good place to trace that we are exiting SMM.  */
		trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);

5902 5903
		/* Process a latched INIT or SMI, if any.  */
		kvm_make_request(KVM_REQ_EVENT, vcpu);
P
Paolo Bonzini 已提交
5904
	}
5905 5906

	kvm_mmu_reset_context(vcpu);
P
Paolo Bonzini 已提交
5907 5908 5909 5910 5911 5912
}

static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
{
	unsigned changed = vcpu->arch.hflags ^ emul_flags;

5913
	vcpu->arch.hflags = emul_flags;
P
Paolo Bonzini 已提交
5914 5915 5916

	if (changed & HF_SMM_MASK)
		kvm_smm_changed(vcpu);
5917 5918
}

5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933
static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
				unsigned long *db)
{
	u32 dr6 = 0;
	int i;
	u32 enable, rwlen;

	enable = dr7;
	rwlen = dr7 >> 16;
	for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
		if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
			dr6 |= (1 << i);
	return dr6;
}

5934
static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
5935 5936 5937
{
	struct kvm_run *kvm_run = vcpu->run;

5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952
	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
		kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
		kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
		kvm_run->debug.arch.exception = DB_VECTOR;
		kvm_run->exit_reason = KVM_EXIT_DEBUG;
		*r = EMULATE_USER_EXIT;
	} else {
		/*
		 * "Certain debug exceptions may clear bit 0-3.  The
		 * remaining contents of the DR6 register are never
		 * cleared by the processor".
		 */
		vcpu->arch.dr6 &= ~15;
		vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
		kvm_queue_exception(vcpu, DB_VECTOR);
5953 5954 5955
	}
}

5956 5957 5958 5959 5960 5961
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
	unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
	int r = EMULATE_DONE;

	kvm_x86_ops->skip_emulated_instruction(vcpu);
5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972

	/*
	 * rflags is the old, "raw" value of the flags.  The new value has
	 * not been saved yet.
	 *
	 * This is correct even for TF set by the guest, because "the
	 * processor will not generate this exception after the instruction
	 * that sets the TF flag".
	 */
	if (unlikely(rflags & X86_EFLAGS_TF))
		kvm_vcpu_do_singlestep(vcpu, &r);
5973 5974 5975 5976
	return r == EMULATE_DONE;
}
EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);

5977 5978 5979 5980
static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
{
	if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
	    (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
5981 5982 5983
		struct kvm_run *kvm_run = vcpu->run;
		unsigned long eip = kvm_get_linear_rip(vcpu);
		u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
5984 5985 5986 5987
					   vcpu->arch.guest_debug_dr7,
					   vcpu->arch.eff_db);

		if (dr6 != 0) {
5988
			kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
5989
			kvm_run->debug.arch.pc = eip;
5990 5991 5992 5993 5994 5995 5996
			kvm_run->debug.arch.exception = DB_VECTOR;
			kvm_run->exit_reason = KVM_EXIT_DEBUG;
			*r = EMULATE_USER_EXIT;
			return true;
		}
	}

5997 5998
	if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
	    !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
5999 6000
		unsigned long eip = kvm_get_linear_rip(vcpu);
		u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
6001 6002 6003 6004 6005
					   vcpu->arch.dr7,
					   vcpu->arch.db);

		if (dr6 != 0) {
			vcpu->arch.dr6 &= ~15;
6006
			vcpu->arch.dr6 |= dr6 | DR6_RTM;
6007 6008 6009 6010 6011 6012 6013 6014 6015
			kvm_queue_exception(vcpu, DB_VECTOR);
			*r = EMULATE_DONE;
			return true;
		}
	}

	return false;
}

6016 6017
static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
{
6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041
	switch (ctxt->opcode_len) {
	case 1:
		switch (ctxt->b) {
		case 0xe4:	/* IN */
		case 0xe5:
		case 0xec:
		case 0xed:
		case 0xe6:	/* OUT */
		case 0xe7:
		case 0xee:
		case 0xef:
		case 0x6c:	/* INS */
		case 0x6d:
		case 0x6e:	/* OUTS */
		case 0x6f:
			return true;
		}
		break;
	case 2:
		switch (ctxt->b) {
		case 0x33:	/* RDPMC */
			return true;
		}
		break;
6042 6043 6044 6045 6046
	}

	return false;
}

6047 6048
int x86_emulate_instruction(struct kvm_vcpu *vcpu,
			    unsigned long cr2,
6049 6050 6051
			    int emulation_type,
			    void *insn,
			    int insn_len)
6052
{
6053
	int r;
6054
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
6055
	bool writeback = true;
6056
	bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
6057

6058 6059 6060 6061 6062
	/*
	 * Clear write_fault_to_shadow_pgtable here to ensure it is
	 * never reused.
	 */
	vcpu->arch.write_fault_to_shadow_pgtable = false;
6063
	kvm_clear_exception_queue(vcpu);
G
Gleb Natapov 已提交
6064

6065
	if (!(emulation_type & EMULTYPE_NO_DECODE)) {
6066
		init_emulate_ctxt(vcpu);
6067 6068 6069 6070 6071 6072 6073

		/*
		 * We will reenter on the same instruction since
		 * we do not set complete_userspace_io.  This does not
		 * handle watchpoints yet, those would be handled in
		 * the emulate_ops.
		 */
6074 6075
		if (!(emulation_type & EMULTYPE_SKIP) &&
		    kvm_vcpu_check_breakpoint(vcpu, &r))
6076 6077
			return r;

6078 6079
		ctxt->interruptibility = 0;
		ctxt->have_exception = false;
6080
		ctxt->exception.vector = -1;
6081
		ctxt->perm_ok = false;
6082

6083
		ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
6084

6085
		r = x86_decode_insn(ctxt, insn, insn_len);
6086

A
Avi Kivity 已提交
6087
		trace_kvm_emulate_insn_start(vcpu);
6088
		++vcpu->stat.insn_emulation;
6089
		if (r != EMULATION_OK)  {
6090 6091
			if (emulation_type & EMULTYPE_TRAP_UD)
				return EMULATE_FAIL;
6092 6093
			if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
						emulation_type))
6094
				return EMULATE_DONE;
6095 6096
			if (ctxt->have_exception && inject_emulated_exception(vcpu))
				return EMULATE_DONE;
6097 6098
			if (emulation_type & EMULTYPE_SKIP)
				return EMULATE_FAIL;
6099
			return handle_emulation_failure(vcpu, emulation_type);
6100 6101 6102
		}
	}

6103 6104 6105 6106
	if ((emulation_type & EMULTYPE_VMWARE) &&
	    !is_vmware_backdoor_opcode(ctxt))
		return EMULATE_FAIL;

6107
	if (emulation_type & EMULTYPE_SKIP) {
6108
		kvm_rip_write(vcpu, ctxt->_eip);
6109 6110
		if (ctxt->eflags & X86_EFLAGS_RF)
			kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
6111 6112 6113
		return EMULATE_DONE;
	}

6114 6115 6116
	if (retry_instruction(ctxt, cr2, emulation_type))
		return EMULATE_DONE;

6117
	/* this is needed for vmware backdoor interface to work since it
6118
	   changes registers values  during IO operation */
6119 6120
	if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
		vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
6121
		emulator_invalidate_register_cache(ctxt);
6122
	}
6123

6124
restart:
6125 6126 6127
	/* Save the faulting GPA (cr2) in the address field */
	ctxt->exception.address = cr2;

6128
	r = x86_emulate_insn(ctxt);
6129

6130 6131 6132
	if (r == EMULATION_INTERCEPTED)
		return EMULATE_DONE;

6133
	if (r == EMULATION_FAILED) {
6134 6135
		if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
					emulation_type))
6136 6137
			return EMULATE_DONE;

6138
		return handle_emulation_failure(vcpu, emulation_type);
6139 6140
	}

6141
	if (ctxt->have_exception) {
6142
		r = EMULATE_DONE;
6143 6144
		if (inject_emulated_exception(vcpu))
			return r;
6145
	} else if (vcpu->arch.pio.count) {
6146 6147
		if (!vcpu->arch.pio.in) {
			/* FIXME: return into emulator if single-stepping.  */
6148
			vcpu->arch.pio.count = 0;
6149
		} else {
6150
			writeback = false;
6151 6152
			vcpu->arch.complete_userspace_io = complete_emulated_pio;
		}
P
Paolo Bonzini 已提交
6153
		r = EMULATE_USER_EXIT;
6154 6155 6156
	} else if (vcpu->mmio_needed) {
		if (!vcpu->mmio_is_write)
			writeback = false;
P
Paolo Bonzini 已提交
6157
		r = EMULATE_USER_EXIT;
6158
		vcpu->arch.complete_userspace_io = complete_emulated_mmio;
6159
	} else if (r == EMULATION_RESTART)
6160
		goto restart;
6161 6162
	else
		r = EMULATE_DONE;
6163

6164
	if (writeback) {
6165
		unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
6166
		toggle_interruptibility(vcpu, ctxt->interruptibility);
6167
		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
6168
		kvm_rip_write(vcpu, ctxt->eip);
6169 6170 6171
		if (r == EMULATE_DONE &&
		    (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
			kvm_vcpu_do_singlestep(vcpu, &r);
6172 6173 6174
		if (!ctxt->have_exception ||
		    exception_type(ctxt->exception.vector) == EXCPT_TRAP)
			__kvm_set_rflags(vcpu, ctxt->eflags);
6175 6176 6177 6178 6179 6180 6181 6182 6183

		/*
		 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
		 * do nothing, and it will be requested again as soon as
		 * the shadow expires.  But we still need to check here,
		 * because POPF has no interrupt shadow.
		 */
		if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
			kvm_make_request(KVM_REQ_EVENT, vcpu);
6184 6185
	} else
		vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
6186 6187

	return r;
6188
}
6189
EXPORT_SYMBOL_GPL(x86_emulate_instruction);
6190

6191 6192
static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
			    unsigned short port)
6193
{
6194
	unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
6195 6196
	int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
					    size, port, &val, 1);
6197
	/* do not return to emulator after return from userspace */
6198
	vcpu->arch.pio.count = 0;
6199 6200 6201
	return ret;
}

6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223
static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
{
	unsigned long val;

	/* We should only ever be called with arch.pio.count equal to 1 */
	BUG_ON(vcpu->arch.pio.count != 1);

	/* For size less than 4 we merge, else we zero extend */
	val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
					: 0;

	/*
	 * Since vcpu->arch.pio.count == 1 let emulator_pio_in_emulated perform
	 * the copy and tracing
	 */
	emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size,
				 vcpu->arch.pio.port, &val, 1);
	kvm_register_write(vcpu, VCPU_REGS_RAX, val);

	return 1;
}

6224 6225
static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
			   unsigned short port)
6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243
{
	unsigned long val;
	int ret;

	/* For size less than 4 we merge, else we zero extend */
	val = (size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) : 0;

	ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port,
				       &val, 1);
	if (ret) {
		kvm_register_write(vcpu, VCPU_REGS_RAX, val);
		return ret;
	}

	vcpu->arch.complete_userspace_io = complete_fast_pio_in;

	return 0;
}
6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258

int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
{
	int ret = kvm_skip_emulated_instruction(vcpu);

	/*
	 * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
	 * KVM_EXIT_DEBUG here.
	 */
	if (in)
		return kvm_fast_pio_in(vcpu, size, port) && ret;
	else
		return kvm_fast_pio_out(vcpu, size, port) && ret;
}
EXPORT_SYMBOL_GPL(kvm_fast_pio);
6259

6260
static int kvmclock_cpu_down_prep(unsigned int cpu)
6261
{
T
Tejun Heo 已提交
6262
	__this_cpu_write(cpu_tsc_khz, 0);
6263
	return 0;
6264 6265 6266
}

static void tsc_khz_changed(void *data)
6267
{
6268 6269 6270 6271 6272 6273 6274 6275 6276
	struct cpufreq_freqs *freq = data;
	unsigned long khz = 0;

	if (data)
		khz = freq->new;
	else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
		khz = cpufreq_quick_get(raw_smp_processor_id());
	if (!khz)
		khz = tsc_khz;
T
Tejun Heo 已提交
6277
	__this_cpu_write(cpu_tsc_khz, khz);
6278 6279
}

6280
#ifdef CONFIG_X86_64
6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314
static void kvm_hyperv_tsc_notifier(void)
{
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int cpu;

	spin_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
		kvm_make_mclock_inprogress_request(kvm);

	hyperv_stop_tsc_emulation();

	/* TSC frequency always matches when on Hyper-V */
	for_each_present_cpu(cpu)
		per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
	kvm_max_guest_tsc_khz = tsc_khz;

	list_for_each_entry(kvm, &vm_list, vm_list) {
		struct kvm_arch *ka = &kvm->arch;

		spin_lock(&ka->pvclock_gtod_sync_lock);

		pvclock_update_vm_gtod_copy(kvm);

		kvm_for_each_vcpu(cpu, vcpu, kvm)
			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);

		kvm_for_each_vcpu(cpu, vcpu, kvm)
			kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);

		spin_unlock(&ka->pvclock_gtod_sync_lock);
	}
	spin_unlock(&kvm_lock);
}
6315
#endif
6316

6317 6318 6319 6320 6321 6322 6323 6324
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
				     void *data)
{
	struct cpufreq_freqs *freq = data;
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int i, send_ipi = 0;

6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363
	/*
	 * We allow guests to temporarily run on slowing clocks,
	 * provided we notify them after, or to run on accelerating
	 * clocks, provided we notify them before.  Thus time never
	 * goes backwards.
	 *
	 * However, we have a problem.  We can't atomically update
	 * the frequency of a given CPU from this function; it is
	 * merely a notifier, which can be called from any CPU.
	 * Changing the TSC frequency at arbitrary points in time
	 * requires a recomputation of local variables related to
	 * the TSC for each VCPU.  We must flag these local variables
	 * to be updated and be sure the update takes place with the
	 * new frequency before any guests proceed.
	 *
	 * Unfortunately, the combination of hotplug CPU and frequency
	 * change creates an intractable locking scenario; the order
	 * of when these callouts happen is undefined with respect to
	 * CPU hotplug, and they can race with each other.  As such,
	 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
	 * undefined; you can actually have a CPU frequency change take
	 * place in between the computation of X and the setting of the
	 * variable.  To protect against this problem, all updates of
	 * the per_cpu tsc_khz variable are done in an interrupt
	 * protected IPI, and all callers wishing to update the value
	 * must wait for a synchronous IPI to complete (which is trivial
	 * if the caller is on the CPU already).  This establishes the
	 * necessary total order on variable updates.
	 *
	 * Note that because a guest time update may take place
	 * anytime after the setting of the VCPU's request bit, the
	 * correct TSC value must be set before the request.  However,
	 * to ensure the update actually makes it to any guest which
	 * starts running in hardware virtualization between the set
	 * and the acquisition of the spinlock, we must also ping the
	 * CPU after setting the request bit.
	 *
	 */

6364 6365 6366 6367
	if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
		return 0;
	if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
		return 0;
6368 6369

	smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
6370

6371
	spin_lock(&kvm_lock);
6372
	list_for_each_entry(kvm, &vm_list, vm_list) {
6373
		kvm_for_each_vcpu(i, vcpu, kvm) {
6374 6375
			if (vcpu->cpu != freq->cpu)
				continue;
Z
Zachary Amsden 已提交
6376
			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
6377
			if (vcpu->cpu != smp_processor_id())
6378
				send_ipi = 1;
6379 6380
		}
	}
6381
	spin_unlock(&kvm_lock);
6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395

	if (freq->old < freq->new && send_ipi) {
		/*
		 * We upscale the frequency.  Must make the guest
		 * doesn't see old kvmclock values while running with
		 * the new frequency, otherwise we risk the guest sees
		 * time go backwards.
		 *
		 * In case we update the frequency for another cpu
		 * (which might be in guest context) send an interrupt
		 * to kick the cpu out of guest context.  Next time
		 * guest context is entered kvmclock will be updated,
		 * so the guest will not see stale values.
		 */
6396
		smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
6397 6398 6399 6400 6401
	}
	return 0;
}

static struct notifier_block kvmclock_cpufreq_notifier_block = {
6402 6403 6404
	.notifier_call  = kvmclock_cpufreq_notifier
};

6405
static int kvmclock_cpu_online(unsigned int cpu)
6406
{
6407 6408
	tsc_khz_changed(NULL);
	return 0;
6409 6410
}

6411 6412
static void kvm_timer_init(void)
{
Z
Zachary Amsden 已提交
6413
	max_tsc_khz = tsc_khz;
6414

6415
	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
Z
Zachary Amsden 已提交
6416 6417
#ifdef CONFIG_CPU_FREQ
		struct cpufreq_policy policy;
6418 6419
		int cpu;

Z
Zachary Amsden 已提交
6420
		memset(&policy, 0, sizeof(policy));
6421 6422
		cpu = get_cpu();
		cpufreq_get_policy(&policy, cpu);
Z
Zachary Amsden 已提交
6423 6424
		if (policy.cpuinfo.max_freq)
			max_tsc_khz = policy.cpuinfo.max_freq;
6425
		put_cpu();
Z
Zachary Amsden 已提交
6426
#endif
6427 6428 6429
		cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
					  CPUFREQ_TRANSITION_NOTIFIER);
	}
Z
Zachary Amsden 已提交
6430
	pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
6431

T
Thomas Gleixner 已提交
6432
	cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
6433
			  kvmclock_cpu_online, kvmclock_cpu_down_prep);
6434 6435
}

6436 6437
DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
EXPORT_PER_CPU_SYMBOL_GPL(current_vcpu);
6438

6439
int kvm_is_in_guest(void)
6440
{
6441
	return __this_cpu_read(current_vcpu) != NULL;
6442 6443 6444 6445 6446
}

static int kvm_is_user_mode(void)
{
	int user_mode = 3;
6447

6448 6449
	if (__this_cpu_read(current_vcpu))
		user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
6450

6451 6452 6453 6454 6455 6456
	return user_mode != 0;
}

static unsigned long kvm_get_guest_ip(void)
{
	unsigned long ip = 0;
6457

6458 6459
	if (__this_cpu_read(current_vcpu))
		ip = kvm_rip_read(__this_cpu_read(current_vcpu));
6460

6461 6462 6463 6464 6465 6466 6467 6468 6469
	return ip;
}

static struct perf_guest_info_callbacks kvm_guest_cbs = {
	.is_in_guest		= kvm_is_in_guest,
	.is_user_mode		= kvm_is_user_mode,
	.get_guest_ip		= kvm_get_guest_ip,
};

6470 6471 6472 6473 6474 6475 6476 6477 6478
static void kvm_set_mmio_spte_mask(void)
{
	u64 mask;
	int maxphyaddr = boot_cpu_data.x86_phys_bits;

	/*
	 * Set the reserved bits and the present bit of an paging-structure
	 * entry to generate page fault with PFER.RSV = 1.
	 */
6479
	 /* Mask the reserved physical address bits. */
6480
	mask = rsvd_bits(maxphyaddr, 51);
6481 6482

	/* Set the present bit. */
6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493
	mask |= 1ull;

#ifdef CONFIG_X86_64
	/*
	 * If reserved bit is not supported, clear the present bit to disable
	 * mmio page fault.
	 */
	if (maxphyaddr == 52)
		mask &= ~1ull;
#endif

6494
	kvm_mmu_set_mmio_spte_mask(mask, mask);
6495 6496
}

6497 6498 6499
#ifdef CONFIG_X86_64
static void pvclock_gtod_update_fn(struct work_struct *work)
{
6500 6501 6502 6503 6504
	struct kvm *kvm;

	struct kvm_vcpu *vcpu;
	int i;

6505
	spin_lock(&kvm_lock);
6506 6507
	list_for_each_entry(kvm, &vm_list, vm_list)
		kvm_for_each_vcpu(i, vcpu, kvm)
6508
			kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
6509
	atomic_set(&kvm_guest_has_master_clock, 0);
6510
	spin_unlock(&kvm_lock);
6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526
}

static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);

/*
 * Notification about pvclock gtod data update.
 */
static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
			       void *priv)
{
	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
	struct timekeeper *tk = priv;

	update_pvclock_gtod(tk);

	/* disable master clock if host does not trust, or does not
6527
	 * use, TSC based clocksource.
6528
	 */
6529
	if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540
	    atomic_read(&kvm_guest_has_master_clock) != 0)
		queue_work(system_long_wq, &pvclock_gtod_work);

	return 0;
}

static struct notifier_block pvclock_gtod_notifier = {
	.notifier_call = pvclock_gtod_notify,
};
#endif

6541
int kvm_arch_init(void *opaque)
6542
{
6543
	int r;
M
Mathias Krause 已提交
6544
	struct kvm_x86_ops *ops = opaque;
6545 6546 6547

	if (kvm_x86_ops) {
		printk(KERN_ERR "kvm: already loaded the other module\n");
6548 6549
		r = -EEXIST;
		goto out;
6550 6551 6552 6553
	}

	if (!ops->cpu_has_kvm_support()) {
		printk(KERN_ERR "kvm: no hardware support\n");
6554 6555
		r = -EOPNOTSUPP;
		goto out;
6556 6557 6558
	}
	if (ops->disabled_by_bios()) {
		printk(KERN_ERR "kvm: disabled by bios\n");
6559 6560
		r = -EOPNOTSUPP;
		goto out;
6561 6562
	}

6563 6564 6565 6566 6567 6568 6569
	r = -ENOMEM;
	shared_msrs = alloc_percpu(struct kvm_shared_msrs);
	if (!shared_msrs) {
		printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
		goto out;
	}

6570 6571
	r = kvm_mmu_module_init();
	if (r)
6572
		goto out_free_percpu;
6573

6574
	kvm_set_mmio_spte_mask();
6575

6576
	kvm_x86_ops = ops;
P
Paolo Bonzini 已提交
6577

S
Sheng Yang 已提交
6578
	kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
6579
			PT_DIRTY_MASK, PT64_NX_MASK, 0,
6580
			PT_PRESENT_MASK, 0, sme_me_mask);
6581
	kvm_timer_init();
6582

6583 6584
	perf_register_guest_info_callbacks(&kvm_guest_cbs);

6585
	if (boot_cpu_has(X86_FEATURE_XSAVE))
6586 6587
		host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);

6588
	kvm_lapic_init();
6589 6590
#ifdef CONFIG_X86_64
	pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
6591

6592
	if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
6593
		set_hv_tscchange_cb(kvm_hyperv_tsc_notifier);
6594 6595
#endif

6596
	return 0;
6597

6598 6599
out_free_percpu:
	free_percpu(shared_msrs);
6600 6601
out:
	return r;
6602
}
6603

6604 6605
void kvm_arch_exit(void)
{
6606
#ifdef CONFIG_X86_64
6607
	if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
6608 6609
		clear_hv_tscchange_cb();
#endif
6610
	kvm_lapic_exit();
6611 6612
	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);

6613 6614 6615
	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
		cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
					    CPUFREQ_TRANSITION_NOTIFIER);
6616
	cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
6617 6618 6619
#ifdef CONFIG_X86_64
	pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
#endif
6620
	kvm_x86_ops = NULL;
6621
	kvm_mmu_module_exit();
6622
	free_percpu(shared_msrs);
6623
}
6624

6625
int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
6626 6627
{
	++vcpu->stat.halt_exits;
6628
	if (lapic_in_kernel(vcpu)) {
6629
		vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
6630 6631 6632 6633 6634 6635
		return 1;
	} else {
		vcpu->run->exit_reason = KVM_EXIT_HLT;
		return 0;
	}
}
6636 6637 6638 6639
EXPORT_SYMBOL_GPL(kvm_vcpu_halt);

int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
6640 6641 6642 6643 6644 6645
	int ret = kvm_skip_emulated_instruction(vcpu);
	/*
	 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
	 * KVM_EXIT_DEBUG here.
	 */
	return kvm_vcpu_halt(vcpu) && ret;
6646
}
6647 6648
EXPORT_SYMBOL_GPL(kvm_emulate_halt);

6649
#ifdef CONFIG_X86_64
6650 6651 6652 6653
static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
			        unsigned long clock_type)
{
	struct kvm_clock_pairing clock_pairing;
6654
	struct timespec64 ts;
P
Paolo Bonzini 已提交
6655
	u64 cycle;
6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675
	int ret;

	if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK)
		return -KVM_EOPNOTSUPP;

	if (kvm_get_walltime_and_clockread(&ts, &cycle) == false)
		return -KVM_EOPNOTSUPP;

	clock_pairing.sec = ts.tv_sec;
	clock_pairing.nsec = ts.tv_nsec;
	clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
	clock_pairing.flags = 0;

	ret = 0;
	if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
			    sizeof(struct kvm_clock_pairing)))
		ret = -KVM_EFAULT;

	return ret;
}
6676
#endif
6677

6678 6679 6680 6681 6682 6683 6684
/*
 * kvm_pv_kick_cpu_op:  Kick a vcpu.
 *
 * @apicid - apicid of vcpu to be kicked.
 */
static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
{
6685
	struct kvm_lapic_irq lapic_irq;
6686

6687 6688
	lapic_irq.shorthand = 0;
	lapic_irq.dest_mode = 0;
6689
	lapic_irq.level = 0;
6690
	lapic_irq.dest_id = apicid;
6691
	lapic_irq.msi_redir_hint = false;
6692

6693
	lapic_irq.delivery_mode = APIC_DM_REMRD;
6694
	kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
6695 6696
}

6697 6698 6699 6700 6701 6702
void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
{
	vcpu->arch.apicv_active = false;
	kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
}

6703 6704 6705
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
	unsigned long nr, a0, a1, a2, a3, ret;
6706
	int op_64_bit;
6707

6708 6709
	if (kvm_hv_hypercall_enabled(vcpu->kvm))
		return kvm_hv_hypercall(vcpu);
6710

6711 6712 6713 6714 6715
	nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
	a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
	a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
	a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
	a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
6716

6717
	trace_kvm_hypercall(nr, a0, a1, a2, a3);
F
Feng (Eric) Liu 已提交
6718

6719 6720
	op_64_bit = is_64_bit_mode(vcpu);
	if (!op_64_bit) {
6721 6722 6723 6724 6725 6726 6727
		nr &= 0xFFFFFFFF;
		a0 &= 0xFFFFFFFF;
		a1 &= 0xFFFFFFFF;
		a2 &= 0xFFFFFFFF;
		a3 &= 0xFFFFFFFF;
	}

6728 6729
	if (kvm_x86_ops->get_cpl(vcpu) != 0) {
		ret = -KVM_EPERM;
6730
		goto out;
6731 6732
	}

6733
	switch (nr) {
A
Avi Kivity 已提交
6734 6735 6736
	case KVM_HC_VAPIC_POLL_IRQ:
		ret = 0;
		break;
6737 6738 6739 6740
	case KVM_HC_KICK_CPU:
		kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
		ret = 0;
		break;
6741
#ifdef CONFIG_X86_64
6742 6743 6744
	case KVM_HC_CLOCK_PAIRING:
		ret = kvm_pv_clock_pairing(vcpu, a0, a1);
		break;
6745
#endif
6746 6747 6748 6749
	default:
		ret = -KVM_ENOSYS;
		break;
	}
6750
out:
6751 6752
	if (!op_64_bit)
		ret = (u32)ret;
6753
	kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
6754

A
Amit Shah 已提交
6755
	++vcpu->stat.hypercalls;
6756
	return kvm_skip_emulated_instruction(vcpu);
6757 6758 6759
}
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);

6760
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
6761
{
6762
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6763
	char instruction[3];
6764
	unsigned long rip = kvm_rip_read(vcpu);
6765 6766 6767

	kvm_x86_ops->patch_hypercall(vcpu, instruction);

6768 6769
	return emulator_write_emulated(ctxt, rip, instruction, 3,
		&ctxt->exception);
6770 6771
}

A
Avi Kivity 已提交
6772
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
6773
{
6774 6775
	return vcpu->run->request_interrupt_window &&
		likely(!pic_in_kernel(vcpu->kvm));
6776 6777
}

A
Avi Kivity 已提交
6778
static void post_kvm_run_save(struct kvm_vcpu *vcpu)
6779
{
A
Avi Kivity 已提交
6780 6781
	struct kvm_run *kvm_run = vcpu->run;

6782
	kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
6783
	kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
6784
	kvm_run->cr8 = kvm_get_cr8(vcpu);
6785
	kvm_run->apic_base = kvm_get_apic_base(vcpu);
6786 6787
	kvm_run->ready_for_interrupt_injection =
		pic_in_kernel(vcpu->kvm) ||
6788
		kvm_vcpu_ready_for_interrupt_injection(vcpu);
6789 6790
}

6791 6792 6793 6794 6795 6796 6797
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
{
	int max_irr, tpr;

	if (!kvm_x86_ops->update_cr8_intercept)
		return;

6798
	if (!lapic_in_kernel(vcpu))
6799 6800
		return;

6801 6802 6803
	if (vcpu->arch.apicv_active)
		return;

6804 6805 6806 6807
	if (!vcpu->arch.apic->vapic_addr)
		max_irr = kvm_lapic_find_highest_irr(vcpu);
	else
		max_irr = -1;
6808 6809 6810 6811 6812 6813 6814 6815 6816

	if (max_irr != -1)
		max_irr >>= 4;

	tpr = kvm_lapic_get_cr8(vcpu);

	kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
}

6817
static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
6818
{
6819 6820
	int r;

6821
	/* try to reinject previous events if any */
6822

6823 6824
	if (vcpu->arch.exception.injected)
		kvm_x86_ops->queue_exception(vcpu);
6825
	/*
6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837
	 * Do not inject an NMI or interrupt if there is a pending
	 * exception.  Exceptions and interrupts are recognized at
	 * instruction boundaries, i.e. the start of an instruction.
	 * Trap-like exceptions, e.g. #DB, have higher priority than
	 * NMIs and interrupts, i.e. traps are recognized before an
	 * NMI/interrupt that's pending on the same instruction.
	 * Fault-like exceptions, e.g. #GP and #PF, are the lowest
	 * priority, but are only generated (pended) during instruction
	 * execution, i.e. a pending fault-like exception means the
	 * fault occurred on the *previous* instruction and must be
	 * serviced prior to recognizing any new events in order to
	 * fully complete the previous instruction.
6838
	 */
6839 6840
	else if (!vcpu->arch.exception.pending) {
		if (vcpu->arch.nmi_injected)
6841
			kvm_x86_ops->set_nmi(vcpu);
6842
		else if (vcpu->arch.interrupt.injected)
6843 6844 6845
			kvm_x86_ops->set_irq(vcpu);
	}

6846 6847 6848 6849 6850 6851
	/*
	 * Call check_nested_events() even if we reinjected a previous event
	 * in order for caller to determine if it should require immediate-exit
	 * from L2 to L1 due to pending L1 events which require exit
	 * from L2 to L1.
	 */
6852 6853 6854 6855 6856 6857 6858
	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
		r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
		if (r != 0)
			return r;
	}

	/* try to inject new event if pending */
6859
	if (vcpu->arch.exception.pending) {
A
Avi Kivity 已提交
6860 6861 6862
		trace_kvm_inj_exception(vcpu->arch.exception.nr,
					vcpu->arch.exception.has_error_code,
					vcpu->arch.exception.error_code);
6863

6864
		WARN_ON_ONCE(vcpu->arch.exception.injected);
6865 6866 6867
		vcpu->arch.exception.pending = false;
		vcpu->arch.exception.injected = true;

6868 6869 6870 6871
		if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
			__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
					     X86_EFLAGS_RF);

6872 6873 6874 6875 6876 6877
		if (vcpu->arch.exception.nr == DB_VECTOR &&
		    (vcpu->arch.dr7 & DR7_GD)) {
			vcpu->arch.dr7 &= ~DR7_GD;
			kvm_update_dr7(vcpu);
		}

6878
		kvm_x86_ops->queue_exception(vcpu);
6879 6880 6881 6882 6883 6884 6885 6886
	}

	/* Don't consider new event if we re-injected an event */
	if (kvm_event_needs_reinjection(vcpu))
		return 0;

	if (vcpu->arch.smi_pending && !is_smm(vcpu) &&
	    kvm_x86_ops->smi_allowed(vcpu)) {
6887
		vcpu->arch.smi_pending = false;
6888
		++vcpu->arch.smi_count;
6889
		enter_smm(vcpu);
6890
	} else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
6891 6892 6893
		--vcpu->arch.nmi_pending;
		vcpu->arch.nmi_injected = true;
		kvm_x86_ops->set_nmi(vcpu);
6894
	} else if (kvm_cpu_has_injectable_intr(vcpu)) {
6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906
		/*
		 * Because interrupts can be injected asynchronously, we are
		 * calling check_nested_events again here to avoid a race condition.
		 * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
		 * proposal and current concerns.  Perhaps we should be setting
		 * KVM_REQ_EVENT only on certain events and not unconditionally?
		 */
		if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
			r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
			if (r != 0)
				return r;
		}
6907
		if (kvm_x86_ops->interrupt_allowed(vcpu)) {
6908 6909 6910
			kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
					    false);
			kvm_x86_ops->set_irq(vcpu);
6911 6912
		}
	}
6913

6914
	return 0;
6915 6916
}

A
Avi Kivity 已提交
6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933
static void process_nmi(struct kvm_vcpu *vcpu)
{
	unsigned limit = 2;

	/*
	 * x86 is limited to one NMI running, and one NMI pending after it.
	 * If an NMI is already in progress, limit further NMIs to just one.
	 * Otherwise, allow two (and we'll inject the first one immediately).
	 */
	if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
		limit = 1;

	vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
	vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
	kvm_make_request(KVM_REQ_EVENT, vcpu);
}

6934
static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947
{
	u32 flags = 0;
	flags |= seg->g       << 23;
	flags |= seg->db      << 22;
	flags |= seg->l       << 21;
	flags |= seg->avl     << 20;
	flags |= seg->present << 15;
	flags |= seg->dpl     << 13;
	flags |= seg->s       << 12;
	flags |= seg->type    << 8;
	return flags;
}

6948
static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962
{
	struct kvm_segment seg;
	int offset;

	kvm_get_segment(vcpu, &seg, n);
	put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector);

	if (n < 3)
		offset = 0x7f84 + n * 12;
	else
		offset = 0x7f2c + (n - 3) * 12;

	put_smstate(u32, buf, offset + 8, seg.base);
	put_smstate(u32, buf, offset + 4, seg.limit);
6963
	put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg));
6964 6965
}

6966
#ifdef CONFIG_X86_64
6967
static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
6968 6969 6970 6971 6972 6973 6974 6975
{
	struct kvm_segment seg;
	int offset;
	u16 flags;

	kvm_get_segment(vcpu, &seg, n);
	offset = 0x7e00 + n * 16;

6976
	flags = enter_smm_get_segment_flags(&seg) >> 8;
6977 6978 6979 6980 6981
	put_smstate(u16, buf, offset, seg.selector);
	put_smstate(u16, buf, offset + 2, flags);
	put_smstate(u32, buf, offset + 4, seg.limit);
	put_smstate(u64, buf, offset + 8, seg.base);
}
6982
#endif
6983

6984
static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007
{
	struct desc_ptr dt;
	struct kvm_segment seg;
	unsigned long val;
	int i;

	put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
	put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
	put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
	put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));

	for (i = 0; i < 8; i++)
		put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i));

	kvm_get_dr(vcpu, 6, &val);
	put_smstate(u32, buf, 0x7fcc, (u32)val);
	kvm_get_dr(vcpu, 7, &val);
	put_smstate(u32, buf, 0x7fc8, (u32)val);

	kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
	put_smstate(u32, buf, 0x7fc4, seg.selector);
	put_smstate(u32, buf, 0x7f64, seg.base);
	put_smstate(u32, buf, 0x7f60, seg.limit);
7008
	put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg));
7009 7010 7011 7012 7013

	kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
	put_smstate(u32, buf, 0x7fc0, seg.selector);
	put_smstate(u32, buf, 0x7f80, seg.base);
	put_smstate(u32, buf, 0x7f7c, seg.limit);
7014
	put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
7015 7016 7017 7018 7019 7020 7021 7022 7023 7024

	kvm_x86_ops->get_gdt(vcpu, &dt);
	put_smstate(u32, buf, 0x7f74, dt.address);
	put_smstate(u32, buf, 0x7f70, dt.size);

	kvm_x86_ops->get_idt(vcpu, &dt);
	put_smstate(u32, buf, 0x7f58, dt.address);
	put_smstate(u32, buf, 0x7f54, dt.size);

	for (i = 0; i < 6; i++)
7025
		enter_smm_save_seg_32(vcpu, buf, i);
7026 7027 7028 7029 7030 7031 7032 7033

	put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu));

	/* revision id */
	put_smstate(u32, buf, 0x7efc, 0x00020000);
	put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
}

7034
static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065
{
#ifdef CONFIG_X86_64
	struct desc_ptr dt;
	struct kvm_segment seg;
	unsigned long val;
	int i;

	for (i = 0; i < 16; i++)
		put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i));

	put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
	put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));

	kvm_get_dr(vcpu, 6, &val);
	put_smstate(u64, buf, 0x7f68, val);
	kvm_get_dr(vcpu, 7, &val);
	put_smstate(u64, buf, 0x7f60, val);

	put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
	put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
	put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu));

	put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase);

	/* revision id */
	put_smstate(u32, buf, 0x7efc, 0x00020064);

	put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer);

	kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
	put_smstate(u16, buf, 0x7e90, seg.selector);
7066
	put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
7067 7068 7069 7070 7071 7072 7073 7074 7075
	put_smstate(u32, buf, 0x7e94, seg.limit);
	put_smstate(u64, buf, 0x7e98, seg.base);

	kvm_x86_ops->get_idt(vcpu, &dt);
	put_smstate(u32, buf, 0x7e84, dt.size);
	put_smstate(u64, buf, 0x7e88, dt.address);

	kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
	put_smstate(u16, buf, 0x7e70, seg.selector);
7076
	put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
7077 7078 7079 7080 7081 7082 7083 7084
	put_smstate(u32, buf, 0x7e74, seg.limit);
	put_smstate(u64, buf, 0x7e78, seg.base);

	kvm_x86_ops->get_gdt(vcpu, &dt);
	put_smstate(u32, buf, 0x7e64, dt.size);
	put_smstate(u64, buf, 0x7e68, dt.address);

	for (i = 0; i < 6; i++)
7085
		enter_smm_save_seg_64(vcpu, buf, i);
7086 7087 7088 7089 7090
#else
	WARN_ON_ONCE(1);
#endif
}

7091
static void enter_smm(struct kvm_vcpu *vcpu)
P
Paolo Bonzini 已提交
7092
{
7093
	struct kvm_segment cs, ds;
7094
	struct desc_ptr dt;
7095 7096 7097 7098 7099
	char buf[512];
	u32 cr0;

	trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
	memset(buf, 0, 512);
7100
	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
7101
		enter_smm_save_state_64(vcpu, buf);
7102
	else
7103
		enter_smm_save_state_32(vcpu, buf);
7104

7105 7106 7107 7108 7109 7110 7111 7112
	/*
	 * Give pre_enter_smm() a chance to make ISA-specific changes to the
	 * vCPU state (e.g. leave guest mode) after we've saved the state into
	 * the SMM state-save area.
	 */
	kvm_x86_ops->pre_enter_smm(vcpu, buf);

	vcpu->arch.hflags |= HF_SMM_MASK;
7113
	kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128

	if (kvm_x86_ops->get_nmi_mask(vcpu))
		vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
	else
		kvm_x86_ops->set_nmi_mask(vcpu, true);

	kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
	kvm_rip_write(vcpu, 0x8000);

	cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
	kvm_x86_ops->set_cr0(vcpu, cr0);
	vcpu->arch.cr0 = cr0;

	kvm_x86_ops->set_cr4(vcpu, 0);

7129 7130 7131 7132
	/* Undocumented: IDT limit is set to zero on entry to SMM.  */
	dt.address = dt.size = 0;
	kvm_x86_ops->set_idt(vcpu, &dt);

7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159
	__kvm_set_dr(vcpu, 7, DR7_FIXED_1);

	cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
	cs.base = vcpu->arch.smbase;

	ds.selector = 0;
	ds.base = 0;

	cs.limit    = ds.limit = 0xffffffff;
	cs.type     = ds.type = 0x3;
	cs.dpl      = ds.dpl = 0;
	cs.db       = ds.db = 0;
	cs.s        = ds.s = 1;
	cs.l        = ds.l = 0;
	cs.g        = ds.g = 1;
	cs.avl      = ds.avl = 0;
	cs.present  = ds.present = 1;
	cs.unusable = ds.unusable = 0;
	cs.padding  = ds.padding = 0;

	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_DS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_ES);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_FS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);

7160
	if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
7161 7162 7163 7164
		kvm_x86_ops->set_efer(vcpu, 0);

	kvm_update_cpuid(vcpu);
	kvm_mmu_reset_context(vcpu);
P
Paolo Bonzini 已提交
7165 7166
}

7167
static void process_smi(struct kvm_vcpu *vcpu)
7168 7169 7170 7171 7172
{
	vcpu->arch.smi_pending = true;
	kvm_make_request(KVM_REQ_EVENT, vcpu);
}

7173 7174 7175 7176 7177
void kvm_make_scan_ioapic_request(struct kvm *kvm)
{
	kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
}

7178
static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
7179
{
7180 7181
	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
		return;
7182

7183
	bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
7184

7185
	if (irqchip_split(vcpu->kvm))
7186
		kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
7187
	else {
7188
		if (vcpu->arch.apicv_active)
7189
			kvm_x86_ops->sync_pir_to_irr(vcpu);
7190
		kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
7191
	}
7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205

	if (is_guest_mode(vcpu))
		vcpu->arch.load_eoi_exitmap_pending = true;
	else
		kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
}

static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
{
	u64 eoi_exit_bitmap[4];

	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
		return;

7206 7207 7208
	bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
		  vcpu_to_synic(vcpu)->vec_bitmap, 256);
	kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
7209 7210
}

7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
		unsigned long start, unsigned long end)
{
	unsigned long apic_address;

	/*
	 * The physical address of apic access page is stored in the VMCS.
	 * Update it when it becomes invalid.
	 */
	apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
	if (start <= apic_address && apic_address < end)
		kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
}

7225 7226
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
{
7227 7228
	struct page *page = NULL;

7229
	if (!lapic_in_kernel(vcpu))
7230 7231
		return;

7232 7233 7234
	if (!kvm_x86_ops->set_apic_access_page_addr)
		return;

7235
	page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
7236 7237
	if (is_error_page(page))
		return;
7238 7239 7240 7241 7242 7243 7244
	kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));

	/*
	 * Do not pin apic access page in memory, the MMU notifier
	 * will call us again if it is migrated or swapped out.
	 */
	put_page(page);
7245 7246 7247
}
EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);

7248
/*
7249
 * Returns 1 to let vcpu_run() continue the guest execution loop without
7250 7251 7252
 * exiting to the userspace.  Otherwise, the value will be returned to the
 * userspace.
 */
A
Avi Kivity 已提交
7253
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
7254 7255
{
	int r;
7256 7257 7258 7259
	bool req_int_win =
		dm_request_for_irq_injection(vcpu) &&
		kvm_cpu_accept_dm_intr(vcpu);

7260
	bool req_immediate_exit = false;
7261

R
Radim Krčmář 已提交
7262
	if (kvm_request_pending(vcpu)) {
7263 7264
		if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu))
			kvm_x86_ops->get_vmcs12_pages(vcpu);
7265
		if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
7266
			kvm_mmu_unload(vcpu);
7267
		if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
M
Marcelo Tosatti 已提交
7268
			__kvm_migrate_timers(vcpu);
7269 7270
		if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
			kvm_gen_update_masterclock(vcpu->kvm);
7271 7272
		if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
			kvm_gen_kvmclock_update(vcpu);
Z
Zachary Amsden 已提交
7273 7274
		if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
			r = kvm_guest_time_update(vcpu);
7275 7276 7277
			if (unlikely(r))
				goto out;
		}
7278
		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
7279
			kvm_mmu_sync_roots(vcpu);
7280
		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
7281
			kvm_vcpu_flush_tlb(vcpu, true);
7282
		if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
A
Avi Kivity 已提交
7283
			vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
A
Avi Kivity 已提交
7284 7285 7286
			r = 0;
			goto out;
		}
7287
		if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
A
Avi Kivity 已提交
7288
			vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
7289
			vcpu->mmio_needed = 0;
J
Joerg Roedel 已提交
7290 7291 7292
			r = 0;
			goto out;
		}
7293 7294 7295 7296 7297 7298
		if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
			/* Page is swapped out. Do synthetic halt */
			vcpu->arch.apf.halted = true;
			r = 1;
			goto out;
		}
G
Glauber Costa 已提交
7299 7300
		if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
			record_steal_time(vcpu);
P
Paolo Bonzini 已提交
7301 7302
		if (kvm_check_request(KVM_REQ_SMI, vcpu))
			process_smi(vcpu);
A
Avi Kivity 已提交
7303 7304
		if (kvm_check_request(KVM_REQ_NMI, vcpu))
			process_nmi(vcpu);
7305
		if (kvm_check_request(KVM_REQ_PMU, vcpu))
7306
			kvm_pmu_handle_event(vcpu);
7307
		if (kvm_check_request(KVM_REQ_PMI, vcpu))
7308
			kvm_pmu_deliver_pmi(vcpu);
7309 7310 7311
		if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
			BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
			if (test_bit(vcpu->arch.pending_ioapic_eoi,
7312
				     vcpu->arch.ioapic_handled_vectors)) {
7313 7314 7315 7316 7317 7318 7319
				vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI;
				vcpu->run->eoi.vector =
						vcpu->arch.pending_ioapic_eoi;
				r = 0;
				goto out;
			}
		}
7320 7321
		if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
			vcpu_scan_ioapic(vcpu);
7322 7323
		if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu))
			vcpu_load_eoi_exitmap(vcpu);
7324 7325
		if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
			kvm_vcpu_reload_apic_access_page(vcpu);
7326 7327 7328 7329 7330 7331
		if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
			r = 0;
			goto out;
		}
7332 7333 7334 7335 7336 7337
		if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
			r = 0;
			goto out;
		}
A
Andrey Smetanin 已提交
7338 7339 7340 7341 7342 7343
		if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
			vcpu->run->exit_reason = KVM_EXIT_HYPERV;
			vcpu->run->hyperv = vcpu->arch.hyperv.exit;
			r = 0;
			goto out;
		}
7344 7345 7346 7347 7348 7349

		/*
		 * KVM_REQ_HV_STIMER has to be processed after
		 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers
		 * depend on the guest clock being up-to-date
		 */
A
Andrey Smetanin 已提交
7350 7351
		if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
			kvm_hv_process_stimers(vcpu);
7352
	}
A
Avi Kivity 已提交
7353

A
Avi Kivity 已提交
7354
	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
7355
		++vcpu->stat.req_event;
7356 7357 7358 7359 7360 7361
		kvm_apic_accept_events(vcpu);
		if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
			r = 1;
			goto out;
		}

7362 7363
		if (inject_pending_event(vcpu, req_int_win) != 0)
			req_immediate_exit = true;
7364
		else {
7365
			/* Enable SMI/NMI/IRQ window open exits if needed.
7366
			 *
7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377
			 * SMIs have three cases:
			 * 1) They can be nested, and then there is nothing to
			 *    do here because RSM will cause a vmexit anyway.
			 * 2) There is an ISA-specific reason why SMI cannot be
			 *    injected, and the moment when this changes can be
			 *    intercepted.
			 * 3) Or the SMI can be pending because
			 *    inject_pending_event has completed the injection
			 *    of an IRQ or NMI from the previous vmexit, and
			 *    then we request an immediate exit to inject the
			 *    SMI.
7378 7379
			 */
			if (vcpu->arch.smi_pending && !is_smm(vcpu))
7380 7381
				if (!kvm_x86_ops->enable_smi_window(vcpu))
					req_immediate_exit = true;
7382 7383 7384 7385
			if (vcpu->arch.nmi_pending)
				kvm_x86_ops->enable_nmi_window(vcpu);
			if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
				kvm_x86_ops->enable_irq_window(vcpu);
7386
			WARN_ON(vcpu->arch.exception.pending);
7387
		}
A
Avi Kivity 已提交
7388 7389 7390 7391 7392 7393 7394

		if (kvm_lapic_enabled(vcpu)) {
			update_cr8_intercept(vcpu);
			kvm_lapic_sync_to_vapic(vcpu);
		}
	}

7395 7396
	r = kvm_mmu_reload(vcpu);
	if (unlikely(r)) {
7397
		goto cancel_injection;
7398 7399
	}

7400 7401 7402
	preempt_disable();

	kvm_x86_ops->prepare_guest_switch(vcpu);
7403 7404 7405 7406 7407 7408 7409

	/*
	 * Disable IRQs before setting IN_GUEST_MODE.  Posted interrupt
	 * IPI are then delayed after guest entry, which ensures that they
	 * result in virtual interrupt delivery.
	 */
	local_irq_disable();
7410 7411
	vcpu->mode = IN_GUEST_MODE;

7412 7413
	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);

7414
	/*
7415
	 * 1) We should set ->mode before checking ->requests.  Please see
7416
	 * the comment in kvm_vcpu_exiting_guest_mode().
7417 7418 7419 7420 7421 7422 7423 7424
	 *
	 * 2) For APICv, we should set ->mode before checking PIR.ON.  This
	 * pairs with the memory barrier implicit in pi_test_and_set_on
	 * (see vmx_deliver_posted_interrupt).
	 *
	 * 3) This also orders the write to mode from any reads to the page
	 * tables done while the VCPU is running.  Please see the comment
	 * in kvm_flush_remote_tlbs.
7425
	 */
7426
	smp_mb__after_srcu_read_unlock();
7427

7428 7429 7430 7431
	/*
	 * This handles the case where a posted interrupt was
	 * notified with kvm_vcpu_kick.
	 */
7432 7433
	if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
		kvm_x86_ops->sync_pir_to_irr(vcpu);
7434

R
Radim Krčmář 已提交
7435
	if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
A
Avi Kivity 已提交
7436
	    || need_resched() || signal_pending(current)) {
7437
		vcpu->mode = OUTSIDE_GUEST_MODE;
A
Avi Kivity 已提交
7438
		smp_wmb();
7439 7440
		local_irq_enable();
		preempt_enable();
7441
		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
7442
		r = 1;
7443
		goto cancel_injection;
7444 7445
	}

7446 7447
	kvm_load_guest_xcr0(vcpu);

7448 7449
	if (req_immediate_exit) {
		kvm_make_request(KVM_REQ_EVENT, vcpu);
7450
		smp_send_reschedule(vcpu->cpu);
7451
	}
7452

7453
	trace_kvm_entry(vcpu->vcpu_id);
7454 7455
	if (lapic_timer_advance_ns)
		wait_lapic_expire(vcpu);
7456
	guest_enter_irqoff();
7457

7458 7459 7460 7461 7462 7463
	if (unlikely(vcpu->arch.switch_db_regs)) {
		set_debugreg(0, 7);
		set_debugreg(vcpu->arch.eff_db[0], 0);
		set_debugreg(vcpu->arch.eff_db[1], 1);
		set_debugreg(vcpu->arch.eff_db[2], 2);
		set_debugreg(vcpu->arch.eff_db[3], 3);
7464
		set_debugreg(vcpu->arch.dr6, 6);
7465
		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
7466
	}
7467

A
Avi Kivity 已提交
7468
	kvm_x86_ops->run(vcpu);
7469

7470 7471 7472 7473 7474 7475 7476 7477 7478
	/*
	 * Do this here before restoring debug registers on the host.  And
	 * since we do this before handling the vmexit, a DR access vmexit
	 * can (a) read the correct value of the debug registers, (b) set
	 * KVM_DEBUGREG_WONT_EXIT again.
	 */
	if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
		WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
		kvm_x86_ops->sync_dirty_debug_regs(vcpu);
7479 7480 7481 7482
		kvm_update_dr0123(vcpu);
		kvm_update_dr6(vcpu);
		kvm_update_dr7(vcpu);
		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
7483 7484
	}

7485 7486 7487 7488 7489 7490 7491
	/*
	 * If the guest has used debug registers, at least dr7
	 * will be disabled while returning to the host.
	 * If we don't have active breakpoints in the host, we don't
	 * care about the messed up debug address registers. But if
	 * we have some of them active, restore the old state.
	 */
7492
	if (hw_breakpoint_active())
7493
		hw_breakpoint_restore();
7494

7495
	vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
7496

7497
	vcpu->mode = OUTSIDE_GUEST_MODE;
A
Avi Kivity 已提交
7498
	smp_wmb();
7499

7500 7501
	kvm_put_guest_xcr0(vcpu);

7502
	kvm_before_interrupt(vcpu);
7503
	kvm_x86_ops->handle_external_intr(vcpu);
7504
	kvm_after_interrupt(vcpu);
7505 7506 7507

	++vcpu->stat.exits;

P
Paolo Bonzini 已提交
7508
	guest_exit_irqoff();
7509

P
Paolo Bonzini 已提交
7510
	local_irq_enable();
7511 7512
	preempt_enable();

7513
	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
7514

7515 7516 7517 7518
	/*
	 * Profile KVM exit RIPs:
	 */
	if (unlikely(prof_on == KVM_PROFILING)) {
7519 7520
		unsigned long rip = kvm_rip_read(vcpu);
		profile_hit(KVM_PROFILING, (void *)rip);
7521 7522
	}

7523 7524
	if (unlikely(vcpu->arch.tsc_always_catchup))
		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
7525

7526 7527
	if (vcpu->arch.apic_attention)
		kvm_lapic_sync_from_vapic(vcpu);
A
Avi Kivity 已提交
7528

7529
	vcpu->arch.gpa_available = false;
A
Avi Kivity 已提交
7530
	r = kvm_x86_ops->handle_exit(vcpu);
7531 7532 7533 7534
	return r;

cancel_injection:
	kvm_x86_ops->cancel_injection(vcpu);
7535 7536
	if (unlikely(vcpu->arch.apic_attention))
		kvm_lapic_sync_from_vapic(vcpu);
7537 7538 7539
out:
	return r;
}
7540

7541 7542
static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
{
7543 7544
	if (!kvm_arch_vcpu_runnable(vcpu) &&
	    (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) {
7545 7546 7547
		srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
		kvm_vcpu_block(vcpu);
		vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
7548 7549 7550 7551

		if (kvm_x86_ops->post_block)
			kvm_x86_ops->post_block(vcpu);

7552 7553 7554
		if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
			return 1;
	}
7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572

	kvm_apic_accept_events(vcpu);
	switch(vcpu->arch.mp_state) {
	case KVM_MP_STATE_HALTED:
		vcpu->arch.pv.pv_unhalted = false;
		vcpu->arch.mp_state =
			KVM_MP_STATE_RUNNABLE;
	case KVM_MP_STATE_RUNNABLE:
		vcpu->arch.apf.halted = false;
		break;
	case KVM_MP_STATE_INIT_RECEIVED:
		break;
	default:
		return -EINTR;
		break;
	}
	return 1;
}
7573

7574 7575
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{
7576 7577 7578
	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
		kvm_x86_ops->check_nested_events(vcpu, false);

7579 7580 7581 7582
	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
		!vcpu->arch.apf.halted);
}

7583
static int vcpu_run(struct kvm_vcpu *vcpu)
7584 7585
{
	int r;
7586
	struct kvm *kvm = vcpu->kvm;
7587

7588
	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
7589

7590
	for (;;) {
7591
		if (kvm_vcpu_running(vcpu)) {
A
Avi Kivity 已提交
7592
			r = vcpu_enter_guest(vcpu);
7593
		} else {
7594
			r = vcpu_block(kvm, vcpu);
7595 7596
		}

7597 7598 7599
		if (r <= 0)
			break;

7600
		kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu);
7601 7602 7603
		if (kvm_cpu_has_pending_timer(vcpu))
			kvm_inject_pending_timer_irqs(vcpu);

7604 7605
		if (dm_request_for_irq_injection(vcpu) &&
			kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
7606 7607
			r = 0;
			vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
7608
			++vcpu->stat.request_irq_exits;
7609
			break;
7610
		}
7611 7612 7613

		kvm_check_async_pf_completion(vcpu);

7614 7615
		if (signal_pending(current)) {
			r = -EINTR;
A
Avi Kivity 已提交
7616
			vcpu->run->exit_reason = KVM_EXIT_INTR;
7617
			++vcpu->stat.signal_exits;
7618
			break;
7619 7620
		}
		if (need_resched()) {
7621
			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
7622
			cond_resched();
7623
			vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
7624
		}
7625 7626
	}

7627
	srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
7628 7629 7630 7631

	return r;
}

7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649
static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
{
	int r;
	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
	r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
	if (r != EMULATE_DONE)
		return 0;
	return 1;
}

static int complete_emulated_pio(struct kvm_vcpu *vcpu)
{
	BUG_ON(!vcpu->arch.pio.count);

	return complete_emulated_io(vcpu);
}

A
Avi Kivity 已提交
7650 7651 7652 7653 7654
/*
 * Implements the following, as a state machine:
 *
 * read:
 *   for each fragment
7655 7656 7657 7658
 *     for each mmio piece in the fragment
 *       write gpa, len
 *       exit
 *       copy data
A
Avi Kivity 已提交
7659 7660 7661 7662
 *   execute insn
 *
 * write:
 *   for each fragment
7663 7664 7665 7666
 *     for each mmio piece in the fragment
 *       write gpa, len
 *       copy data
 *       exit
A
Avi Kivity 已提交
7667
 */
7668
static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
7669 7670
{
	struct kvm_run *run = vcpu->run;
A
Avi Kivity 已提交
7671
	struct kvm_mmio_fragment *frag;
7672
	unsigned len;
7673

7674
	BUG_ON(!vcpu->mmio_needed);
7675

7676
	/* Complete previous fragment */
7677 7678
	frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
	len = min(8u, frag->len);
7679
	if (!vcpu->mmio_is_write)
7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692
		memcpy(frag->data, run->mmio.data, len);

	if (frag->len <= 8) {
		/* Switch to the next fragment. */
		frag++;
		vcpu->mmio_cur_fragment++;
	} else {
		/* Go forward to the next mmio piece. */
		frag->data += len;
		frag->gpa += len;
		frag->len -= len;
	}

7693
	if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
7694
		vcpu->mmio_needed = 0;
7695 7696

		/* FIXME: return into emulator if single-stepping.  */
A
Avi Kivity 已提交
7697
		if (vcpu->mmio_is_write)
7698 7699 7700 7701
			return 1;
		vcpu->mmio_read_completed = 1;
		return complete_emulated_io(vcpu);
	}
7702

7703 7704 7705
	run->exit_reason = KVM_EXIT_MMIO;
	run->mmio.phys_addr = frag->gpa;
	if (vcpu->mmio_is_write)
7706 7707
		memcpy(run->mmio.data, frag->data, min(8u, frag->len));
	run->mmio.len = min(8u, frag->len);
7708 7709 7710
	run->mmio.is_write = vcpu->mmio_is_write;
	vcpu->arch.complete_userspace_io = complete_emulated_mmio;
	return 0;
7711 7712
}

7713 7714 7715 7716
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
	int r;

7717
	vcpu_load(vcpu);
7718
	kvm_sigset_activate(vcpu);
7719 7720
	kvm_load_guest_fpu(vcpu);

7721
	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
7722 7723 7724 7725
		if (kvm_run->immediate_exit) {
			r = -EINTR;
			goto out;
		}
7726
		kvm_vcpu_block(vcpu);
7727
		kvm_apic_accept_events(vcpu);
7728
		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
7729
		r = -EAGAIN;
7730 7731 7732 7733 7734
		if (signal_pending(current)) {
			r = -EINTR;
			vcpu->run->exit_reason = KVM_EXIT_INTR;
			++vcpu->stat.signal_exits;
		}
7735
		goto out;
7736 7737
	}

K
Ken Hofsass 已提交
7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748
	if (vcpu->run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) {
		r = -EINVAL;
		goto out;
	}

	if (vcpu->run->kvm_dirty_regs) {
		r = sync_regs(vcpu);
		if (r != 0)
			goto out;
	}

7749
	/* re-sync apic's tpr */
7750
	if (!lapic_in_kernel(vcpu)) {
A
Andre Przywara 已提交
7751 7752 7753 7754 7755
		if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
			r = -EINVAL;
			goto out;
		}
	}
7756

7757 7758 7759 7760 7761
	if (unlikely(vcpu->arch.complete_userspace_io)) {
		int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
		vcpu->arch.complete_userspace_io = NULL;
		r = cui(vcpu);
		if (r <= 0)
7762
			goto out;
7763 7764
	} else
		WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
7765

7766 7767 7768 7769
	if (kvm_run->immediate_exit)
		r = -EINTR;
	else
		r = vcpu_run(vcpu);
7770 7771

out:
7772
	kvm_put_guest_fpu(vcpu);
K
Ken Hofsass 已提交
7773 7774
	if (vcpu->run->kvm_valid_regs)
		store_regs(vcpu);
7775
	post_kvm_run_save(vcpu);
7776
	kvm_sigset_deactivate(vcpu);
7777

7778
	vcpu_put(vcpu);
7779 7780 7781
	return r;
}

K
Ken Hofsass 已提交
7782
static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
7783
{
7784 7785 7786 7787
	if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
		/*
		 * We are here if userspace calls get_regs() in the middle of
		 * instruction emulation. Registers state needs to be copied
G
Guo Chao 已提交
7788
		 * back from emulation context to vcpu. Userspace shouldn't do
7789 7790 7791
		 * that usually, but some bad designed PV devices (vmware
		 * backdoor interface) need this to work
		 */
7792
		emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
7793 7794
		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
	}
7795 7796 7797 7798 7799 7800 7801 7802
	regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
	regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
	regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
	regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
	regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
	regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
	regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
	regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
7803
#ifdef CONFIG_X86_64
7804 7805 7806 7807 7808 7809 7810 7811
	regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
	regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
	regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
	regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
	regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
	regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
	regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
	regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
7812 7813
#endif

7814
	regs->rip = kvm_rip_read(vcpu);
7815
	regs->rflags = kvm_get_rflags(vcpu);
K
Ken Hofsass 已提交
7816
}
7817

K
Ken Hofsass 已提交
7818 7819 7820 7821
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
	vcpu_load(vcpu);
	__get_regs(vcpu, regs);
7822
	vcpu_put(vcpu);
7823 7824 7825
	return 0;
}

K
Ken Hofsass 已提交
7826
static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
7827
{
7828 7829 7830
	vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
	vcpu->arch.emulate_regs_need_sync_to_vcpu = false;

7831 7832 7833 7834 7835 7836 7837 7838
	kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
	kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
	kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
	kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
	kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
	kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
	kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
	kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
7839
#ifdef CONFIG_X86_64
7840 7841 7842 7843 7844 7845 7846 7847
	kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
	kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
	kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
	kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
	kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
	kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
	kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
	kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
7848 7849
#endif

7850
	kvm_rip_write(vcpu, regs->rip);
7851
	kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
7852

7853 7854
	vcpu->arch.exception.pending = false;

7855
	kvm_make_request(KVM_REQ_EVENT, vcpu);
K
Ken Hofsass 已提交
7856
}
7857

K
Ken Hofsass 已提交
7858 7859 7860 7861
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
	vcpu_load(vcpu);
	__set_regs(vcpu, regs);
7862
	vcpu_put(vcpu);
7863 7864 7865 7866 7867 7868 7869
	return 0;
}

void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
{
	struct kvm_segment cs;

7870
	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
7871 7872 7873 7874 7875
	*db = cs.db;
	*l = cs.l;
}
EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);

K
Ken Hofsass 已提交
7876
static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
7877
{
7878
	struct desc_ptr dt;
7879

7880 7881 7882 7883 7884 7885
	kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
	kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
	kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
	kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
	kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
	kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
7886

7887 7888
	kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
	kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
7889 7890

	kvm_x86_ops->get_idt(vcpu, &dt);
7891 7892
	sregs->idt.limit = dt.size;
	sregs->idt.base = dt.address;
7893
	kvm_x86_ops->get_gdt(vcpu, &dt);
7894 7895
	sregs->gdt.limit = dt.size;
	sregs->gdt.base = dt.address;
7896

7897
	sregs->cr0 = kvm_read_cr0(vcpu);
7898
	sregs->cr2 = vcpu->arch.cr2;
7899
	sregs->cr3 = kvm_read_cr3(vcpu);
7900
	sregs->cr4 = kvm_read_cr4(vcpu);
7901
	sregs->cr8 = kvm_get_cr8(vcpu);
7902
	sregs->efer = vcpu->arch.efer;
7903 7904
	sregs->apic_base = kvm_get_apic_base(vcpu);

G
Gleb Natapov 已提交
7905
	memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
7906

7907
	if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
7908 7909
		set_bit(vcpu->arch.interrupt.nr,
			(unsigned long *)sregs->interrupt_bitmap);
K
Ken Hofsass 已提交
7910
}
7911

K
Ken Hofsass 已提交
7912 7913 7914 7915 7916
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
	vcpu_load(vcpu);
	__get_sregs(vcpu, sregs);
7917
	vcpu_put(vcpu);
7918 7919 7920
	return 0;
}

7921 7922 7923
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
7924 7925
	vcpu_load(vcpu);

7926
	kvm_apic_accept_events(vcpu);
7927 7928 7929 7930 7931 7932
	if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
					vcpu->arch.pv.pv_unhalted)
		mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
	else
		mp_state->mp_state = vcpu->arch.mp_state;

7933
	vcpu_put(vcpu);
7934 7935 7936 7937 7938 7939
	return 0;
}

int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
7940 7941 7942 7943
	int ret = -EINVAL;

	vcpu_load(vcpu);

7944
	if (!lapic_in_kernel(vcpu) &&
7945
	    mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
7946
		goto out;
7947

7948 7949 7950 7951
	/* INITs are latched while in SMM */
	if ((is_smm(vcpu) || vcpu->arch.smi_pending) &&
	    (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
	     mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
7952
		goto out;
7953

7954 7955 7956 7957 7958
	if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
		vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
		set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
	} else
		vcpu->arch.mp_state = mp_state->mp_state;
7959
	kvm_make_request(KVM_REQ_EVENT, vcpu);
7960 7961 7962 7963 7964

	ret = 0;
out:
	vcpu_put(vcpu);
	return ret;
7965 7966
}

7967 7968
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
		    int reason, bool has_error_code, u32 error_code)
7969
{
7970
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
7971
	int ret;
7972

7973
	init_emulate_ctxt(vcpu);
7974

7975
	ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
7976
				   has_error_code, error_code);
7977 7978

	if (ret)
7979
		return EMULATE_FAIL;
7980

7981 7982
	kvm_rip_write(vcpu, ctxt->eip);
	kvm_set_rflags(vcpu, ctxt->eflags);
7983
	kvm_make_request(KVM_REQ_EVENT, vcpu);
7984
	return EMULATE_DONE;
7985 7986 7987
}
EXPORT_SYMBOL_GPL(kvm_task_switch);

P
Peng Hao 已提交
7988
static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
7989
{
7990
	if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
7991 7992 7993 7994 7995
		/*
		 * When EFER.LME and CR0.PG are set, the processor is in
		 * 64-bit mode (though maybe in a 32-bit code segment).
		 * CR4.PAE and EFER.LMA must be set.
		 */
7996
		if (!(sregs->cr4 & X86_CR4_PAE)
7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010
		    || !(sregs->efer & EFER_LMA))
			return -EINVAL;
	} else {
		/*
		 * Not in 64-bit mode: EFER.LMA is clear and the code
		 * segment cannot be 64-bit.
		 */
		if (sregs->efer & EFER_LMA || sregs->cs.l)
			return -EINVAL;
	}

	return 0;
}

K
Ken Hofsass 已提交
8011
static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
8012
{
8013
	struct msr_data apic_base_msr;
8014
	int mmu_reset_needed = 0;
8015
	int cpuid_update_needed = 0;
8016
	int pending_vec, max_bits, idx;
8017
	struct desc_ptr dt;
8018 8019
	int ret = -EINVAL;

8020 8021
	if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
			(sregs->cr4 & X86_CR4_OSXSAVE))
8022
		goto out;
8023

8024
	if (kvm_valid_sregs(vcpu, sregs))
8025
		goto out;
8026

8027 8028 8029
	apic_base_msr.data = sregs->apic_base;
	apic_base_msr.host_initiated = true;
	if (kvm_set_apic_base(vcpu, &apic_base_msr))
8030
		goto out;
8031

8032 8033
	dt.size = sregs->idt.limit;
	dt.address = sregs->idt.base;
8034
	kvm_x86_ops->set_idt(vcpu, &dt);
8035 8036
	dt.size = sregs->gdt.limit;
	dt.address = sregs->gdt.base;
8037 8038
	kvm_x86_ops->set_gdt(vcpu, &dt);

8039
	vcpu->arch.cr2 = sregs->cr2;
8040
	mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
8041
	vcpu->arch.cr3 = sregs->cr3;
8042
	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
8043

8044
	kvm_set_cr8(vcpu, sregs->cr8);
8045

8046
	mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
8047 8048
	kvm_x86_ops->set_efer(vcpu, sregs->efer);

8049
	mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
8050
	kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
8051
	vcpu->arch.cr0 = sregs->cr0;
8052

8053
	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
8054 8055
	cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
				(X86_CR4_OSXSAVE | X86_CR4_PKE));
8056
	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
8057
	if (cpuid_update_needed)
A
Avi Kivity 已提交
8058
		kvm_update_cpuid(vcpu);
8059 8060

	idx = srcu_read_lock(&vcpu->kvm->srcu);
8061
	if (!is_long_mode(vcpu) && is_pae(vcpu)) {
8062
		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
8063 8064
		mmu_reset_needed = 1;
	}
8065
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
8066 8067 8068 8069

	if (mmu_reset_needed)
		kvm_mmu_reset_context(vcpu);

8070
	max_bits = KVM_NR_INTERRUPTS;
G
Gleb Natapov 已提交
8071 8072 8073
	pending_vec = find_first_bit(
		(const unsigned long *)sregs->interrupt_bitmap, max_bits);
	if (pending_vec < max_bits) {
8074
		kvm_queue_interrupt(vcpu, pending_vec, false);
G
Gleb Natapov 已提交
8075
		pr_debug("Set back pending irq %d\n", pending_vec);
8076 8077
	}

8078 8079 8080 8081 8082 8083
	kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
	kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
	kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
	kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
	kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
	kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
8084

8085 8086
	kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
	kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
8087

8088 8089
	update_cr8_intercept(vcpu);

M
Marcelo Tosatti 已提交
8090
	/* Older userspace won't unhalt the vcpu on reset. */
8091
	if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
M
Marcelo Tosatti 已提交
8092
	    sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
8093
	    !is_protmode(vcpu))
M
Marcelo Tosatti 已提交
8094 8095
		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;

8096 8097
	kvm_make_request(KVM_REQ_EVENT, vcpu);

8098 8099
	ret = 0;
out:
K
Ken Hofsass 已提交
8100 8101 8102 8103 8104 8105 8106 8107 8108 8109
	return ret;
}

int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
	int ret;

	vcpu_load(vcpu);
	ret = __set_sregs(vcpu, sregs);
8110 8111
	vcpu_put(vcpu);
	return ret;
8112 8113
}

J
Jan Kiszka 已提交
8114 8115
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
					struct kvm_guest_debug *dbg)
8116
{
8117
	unsigned long rflags;
8118
	int i, r;
8119

8120 8121
	vcpu_load(vcpu);

8122 8123 8124
	if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
		r = -EBUSY;
		if (vcpu->arch.exception.pending)
8125
			goto out;
8126 8127 8128 8129 8130 8131
		if (dbg->control & KVM_GUESTDBG_INJECT_DB)
			kvm_queue_exception(vcpu, DB_VECTOR);
		else
			kvm_queue_exception(vcpu, BP_VECTOR);
	}

8132 8133 8134 8135 8136
	/*
	 * Read rflags as long as potentially injected trace flags are still
	 * filtered out.
	 */
	rflags = kvm_get_rflags(vcpu);
8137 8138 8139 8140 8141 8142

	vcpu->guest_debug = dbg->control;
	if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
		vcpu->guest_debug = 0;

	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
8143 8144
		for (i = 0; i < KVM_NR_DB_REGS; ++i)
			vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
8145
		vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
8146 8147 8148 8149
	} else {
		for (i = 0; i < KVM_NR_DB_REGS; i++)
			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
	}
8150
	kvm_update_dr7(vcpu);
8151

J
Jan Kiszka 已提交
8152 8153 8154
	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
		vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
			get_segment_base(vcpu, VCPU_SREG_CS);
8155

8156 8157 8158 8159 8160
	/*
	 * Trigger an rflags update that will inject or remove the trace
	 * flags.
	 */
	kvm_set_rflags(vcpu, rflags);
8161

8162
	kvm_x86_ops->update_bp_intercept(vcpu);
8163

8164
	r = 0;
J
Jan Kiszka 已提交
8165

8166
out:
8167
	vcpu_put(vcpu);
8168 8169 8170
	return r;
}

8171 8172 8173 8174 8175 8176 8177 8178
/*
 * Translate a guest virtual address to a guest physical address.
 */
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
				    struct kvm_translation *tr)
{
	unsigned long vaddr = tr->linear_address;
	gpa_t gpa;
8179
	int idx;
8180

8181 8182
	vcpu_load(vcpu);

8183
	idx = srcu_read_lock(&vcpu->kvm->srcu);
8184
	gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
8185
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
8186 8187 8188 8189 8190
	tr->physical_address = gpa;
	tr->valid = gpa != UNMAPPED_GVA;
	tr->writeable = 1;
	tr->usermode = 0;

8191
	vcpu_put(vcpu);
8192 8193 8194
	return 0;
}

8195 8196
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
8197
	struct fxregs_state *fxsave;
8198

8199
	vcpu_load(vcpu);
8200

8201
	fxsave = &vcpu->arch.guest_fpu.state.fxsave;
8202 8203 8204 8205 8206 8207 8208 8209 8210
	memcpy(fpu->fpr, fxsave->st_space, 128);
	fpu->fcw = fxsave->cwd;
	fpu->fsw = fxsave->swd;
	fpu->ftwx = fxsave->twd;
	fpu->last_opcode = fxsave->fop;
	fpu->last_ip = fxsave->rip;
	fpu->last_dp = fxsave->rdp;
	memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);

8211
	vcpu_put(vcpu);
8212 8213 8214 8215 8216
	return 0;
}

int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
8217 8218 8219 8220 8221
	struct fxregs_state *fxsave;

	vcpu_load(vcpu);

	fxsave = &vcpu->arch.guest_fpu.state.fxsave;
8222 8223 8224 8225 8226 8227 8228 8229 8230 8231

	memcpy(fxsave->st_space, fpu->fpr, 128);
	fxsave->cwd = fpu->fcw;
	fxsave->swd = fpu->fsw;
	fxsave->twd = fpu->ftwx;
	fxsave->fop = fpu->last_opcode;
	fxsave->rip = fpu->last_ip;
	fxsave->rdp = fpu->last_dp;
	memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);

8232
	vcpu_put(vcpu);
8233 8234 8235
	return 0;
}

K
Ken Hofsass 已提交
8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274
static void store_regs(struct kvm_vcpu *vcpu)
{
	BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES);

	if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS)
		__get_regs(vcpu, &vcpu->run->s.regs.regs);

	if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS)
		__get_sregs(vcpu, &vcpu->run->s.regs.sregs);

	if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS)
		kvm_vcpu_ioctl_x86_get_vcpu_events(
				vcpu, &vcpu->run->s.regs.events);
}

static int sync_regs(struct kvm_vcpu *vcpu)
{
	if (vcpu->run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)
		return -EINVAL;

	if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) {
		__set_regs(vcpu, &vcpu->run->s.regs.regs);
		vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS;
	}
	if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) {
		if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs))
			return -EINVAL;
		vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS;
	}
	if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) {
		if (kvm_vcpu_ioctl_x86_set_vcpu_events(
				vcpu, &vcpu->run->s.regs.events))
			return -EINVAL;
		vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS;
	}

	return 0;
}

I
Ingo Molnar 已提交
8275
static void fx_init(struct kvm_vcpu *vcpu)
8276
{
8277
	fpstate_init(&vcpu->arch.guest_fpu.state);
8278
	if (boot_cpu_has(X86_FEATURE_XSAVES))
8279
		vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv =
8280
			host_xcr0 | XSTATE_COMPACTION_ENABLED;
8281

8282 8283 8284
	/*
	 * Ensure guest xcr0 is valid for loading
	 */
D
Dave Hansen 已提交
8285
	vcpu->arch.xcr0 = XFEATURE_MASK_FP;
8286

8287
	vcpu->arch.cr0 |= X86_CR0_ET;
8288 8289
}

8290
/* Swap (qemu) user FPU context for the guest FPU context. */
8291 8292
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{
8293 8294
	preempt_disable();
	copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
8295 8296 8297
	/* PKRU is separately restored in kvm_x86_ops->run.  */
	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
				~XFEATURE_MASK_PKRU);
8298
	preempt_enable();
8299
	trace_kvm_fpu(1);
8300 8301
}

8302
/* When vcpu_run ends, restore user space FPU context. */
8303 8304
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
8305
	preempt_disable();
8306
	copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
8307 8308
	copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
	preempt_enable();
A
Avi Kivity 已提交
8309
	++vcpu->stat.fpu_reload;
8310
	trace_kvm_fpu(0);
8311
}
8312 8313 8314

void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
8315 8316
	void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;

8317
	kvmclock_reset(vcpu);
8318

8319
	kvm_x86_ops->vcpu_free(vcpu);
8320
	free_cpumask_var(wbinvd_dirty_mask);
8321 8322 8323 8324 8325
}

struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
						unsigned int id)
{
8326 8327
	struct kvm_vcpu *vcpu;

8328
	if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
Z
Zachary Amsden 已提交
8329 8330 8331
		printk_once(KERN_WARNING
		"kvm: SMP vm created on host with unstable TSC; "
		"guest TSC will not be reliable\n");
8332 8333 8334 8335

	vcpu = kvm_x86_ops->vcpu_create(kvm, id);

	return vcpu;
8336
}
8337

8338 8339
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
X
Xiao Guangrong 已提交
8340
	kvm_vcpu_mtrr_init(vcpu);
8341
	vcpu_load(vcpu);
8342
	kvm_vcpu_reset(vcpu, false);
8343
	kvm_mmu_setup(vcpu);
8344
	vcpu_put(vcpu);
8345
	return 0;
8346 8347
}

8348
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
8349
{
8350
	struct msr_data msr;
8351
	struct kvm *kvm = vcpu->kvm;
8352

8353 8354
	kvm_hv_vcpu_postcreate(vcpu);

8355
	if (mutex_lock_killable(&vcpu->mutex))
8356
		return;
8357
	vcpu_load(vcpu);
8358 8359 8360 8361
	msr.data = 0x0;
	msr.index = MSR_IA32_TSC;
	msr.host_initiated = true;
	kvm_write_tsc(vcpu, &msr);
8362
	vcpu_put(vcpu);
8363
	mutex_unlock(&vcpu->mutex);
8364

8365 8366 8367
	if (!kvmclock_periodic_sync)
		return;

8368 8369
	schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
					KVMCLOCK_SYNC_PERIOD);
8370 8371
}

8372
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
8373
{
8374 8375
	vcpu->arch.apf.msr_val = 0;

8376
	vcpu_load(vcpu);
8377 8378 8379 8380 8381 8382
	kvm_mmu_unload(vcpu);
	vcpu_put(vcpu);

	kvm_x86_ops->vcpu_free(vcpu);
}

8383
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
8384
{
8385 8386
	kvm_lapic_reset(vcpu, init_event);

8387 8388
	vcpu->arch.hflags = 0;

8389
	vcpu->arch.smi_pending = 0;
8390
	vcpu->arch.smi_count = 0;
A
Avi Kivity 已提交
8391 8392
	atomic_set(&vcpu->arch.nmi_queued, 0);
	vcpu->arch.nmi_pending = 0;
8393
	vcpu->arch.nmi_injected = false;
8394 8395
	kvm_clear_interrupt_queue(vcpu);
	kvm_clear_exception_queue(vcpu);
8396
	vcpu->arch.exception.pending = false;
8397

8398
	memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
8399
	kvm_update_dr0123(vcpu);
8400
	vcpu->arch.dr6 = DR6_INIT;
J
Jan Kiszka 已提交
8401
	kvm_update_dr6(vcpu);
8402
	vcpu->arch.dr7 = DR7_FIXED_1;
8403
	kvm_update_dr7(vcpu);
8404

N
Nadav Amit 已提交
8405 8406
	vcpu->arch.cr2 = 0;

8407
	kvm_make_request(KVM_REQ_EVENT, vcpu);
8408
	vcpu->arch.apf.msr_val = 0;
G
Glauber Costa 已提交
8409
	vcpu->arch.st.msr_val = 0;
8410

8411 8412
	kvmclock_reset(vcpu);

8413 8414 8415
	kvm_clear_async_pf_completion_queue(vcpu);
	kvm_async_pf_hash_reset(vcpu);
	vcpu->arch.apf.halted = false;
8416

8417 8418 8419 8420 8421 8422 8423
	if (kvm_mpx_supported()) {
		void *mpx_state_buffer;

		/*
		 * To avoid have the INIT path from kvm_apic_has_events() that be
		 * called with loaded FPU and does not let userspace fix the state.
		 */
8424 8425
		if (init_event)
			kvm_put_guest_fpu(vcpu);
8426 8427 8428 8429 8430 8431 8432 8433
		mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave,
					XFEATURE_MASK_BNDREGS);
		if (mpx_state_buffer)
			memset(mpx_state_buffer, 0, sizeof(struct mpx_bndreg_state));
		mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave,
					XFEATURE_MASK_BNDCSR);
		if (mpx_state_buffer)
			memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr));
8434 8435
		if (init_event)
			kvm_load_guest_fpu(vcpu);
8436 8437
	}

P
Paolo Bonzini 已提交
8438
	if (!init_event) {
8439
		kvm_pmu_reset(vcpu);
P
Paolo Bonzini 已提交
8440
		vcpu->arch.smbase = 0x30000;
K
Kyle Huey 已提交
8441 8442 8443

		vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
		vcpu->arch.msr_misc_features_enables = 0;
8444 8445

		vcpu->arch.xcr0 = XFEATURE_MASK_FP;
P
Paolo Bonzini 已提交
8446
	}
8447

8448 8449 8450 8451
	memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
	vcpu->arch.regs_avail = ~0;
	vcpu->arch.regs_dirty = ~0;

8452 8453
	vcpu->arch.ia32_xss = 0;

8454
	kvm_x86_ops->vcpu_reset(vcpu, init_event);
8455 8456
}

8457
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
8458 8459 8460 8461 8462 8463 8464 8465
{
	struct kvm_segment cs;

	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
	cs.selector = vector << 8;
	cs.base = vector << 12;
	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
	kvm_rip_write(vcpu, 0);
8466 8467
}

8468
int kvm_arch_hardware_enable(void)
8469
{
8470 8471 8472
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int i;
8473 8474 8475 8476
	int ret;
	u64 local_tsc;
	u64 max_tsc = 0;
	bool stable, backwards_tsc = false;
A
Avi Kivity 已提交
8477 8478

	kvm_shared_msr_cpu_online();
8479
	ret = kvm_x86_ops->hardware_enable();
8480 8481 8482
	if (ret != 0)
		return ret;

8483
	local_tsc = rdtsc();
8484
	stable = !kvm_check_tsc_unstable();
8485 8486 8487
	list_for_each_entry(kvm, &vm_list, vm_list) {
		kvm_for_each_vcpu(i, vcpu, kvm) {
			if (!stable && vcpu->cpu == smp_processor_id())
8488
				kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
8489 8490 8491 8492 8493 8494 8495 8496 8497 8498 8499 8500 8501 8502 8503 8504
			if (stable && vcpu->arch.last_host_tsc > local_tsc) {
				backwards_tsc = true;
				if (vcpu->arch.last_host_tsc > max_tsc)
					max_tsc = vcpu->arch.last_host_tsc;
			}
		}
	}

	/*
	 * Sometimes, even reliable TSCs go backwards.  This happens on
	 * platforms that reset TSC during suspend or hibernate actions, but
	 * maintain synchronization.  We must compensate.  Fortunately, we can
	 * detect that condition here, which happens early in CPU bringup,
	 * before any KVM threads can be running.  Unfortunately, we can't
	 * bring the TSCs fully up to date with real time, as we aren't yet far
	 * enough into CPU bringup that we know how much real time has actually
8505
	 * elapsed; our helper function, ktime_get_boot_ns() will be using boot
8506 8507 8508 8509 8510 8511 8512 8513 8514 8515 8516 8517 8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529
	 * variables that haven't been updated yet.
	 *
	 * So we simply find the maximum observed TSC above, then record the
	 * adjustment to TSC in each VCPU.  When the VCPU later gets loaded,
	 * the adjustment will be applied.  Note that we accumulate
	 * adjustments, in case multiple suspend cycles happen before some VCPU
	 * gets a chance to run again.  In the event that no KVM threads get a
	 * chance to run, we will miss the entire elapsed period, as we'll have
	 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
	 * loose cycle time.  This isn't too big a deal, since the loss will be
	 * uniform across all VCPUs (not to mention the scenario is extremely
	 * unlikely). It is possible that a second hibernate recovery happens
	 * much faster than a first, causing the observed TSC here to be
	 * smaller; this would require additional padding adjustment, which is
	 * why we set last_host_tsc to the local tsc observed here.
	 *
	 * N.B. - this code below runs only on platforms with reliable TSC,
	 * as that is the only way backwards_tsc is set above.  Also note
	 * that this runs for ALL vcpus, which is not a bug; all VCPUs should
	 * have the same delta_cyc adjustment applied if backwards_tsc
	 * is detected.  Note further, this adjustment is only done once,
	 * as we reset last_host_tsc on all VCPUs to stop this from being
	 * called multiple times (one for each physical CPU bringup).
	 *
G
Guo Chao 已提交
8530
	 * Platforms with unreliable TSCs don't have to deal with this, they
8531 8532 8533 8534 8535 8536 8537
	 * will be compensated by the logic in vcpu_load, which sets the TSC to
	 * catchup mode.  This will catchup all VCPUs to real time, but cannot
	 * guarantee that they stay in perfect synchronization.
	 */
	if (backwards_tsc) {
		u64 delta_cyc = max_tsc - local_tsc;
		list_for_each_entry(kvm, &vm_list, vm_list) {
8538
			kvm->arch.backwards_tsc_observed = true;
8539 8540 8541
			kvm_for_each_vcpu(i, vcpu, kvm) {
				vcpu->arch.tsc_offset_adjustment += delta_cyc;
				vcpu->arch.last_host_tsc = local_tsc;
8542
				kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
8543 8544 8545 8546 8547 8548 8549 8550 8551 8552 8553 8554 8555 8556
			}

			/*
			 * We have to disable TSC offset matching.. if you were
			 * booting a VM while issuing an S4 host suspend....
			 * you may have some problem.  Solving this issue is
			 * left as an exercise to the reader.
			 */
			kvm->arch.last_tsc_nsec = 0;
			kvm->arch.last_tsc_write = 0;
		}

	}
	return 0;
8557 8558
}

8559
void kvm_arch_hardware_disable(void)
8560
{
8561 8562
	kvm_x86_ops->hardware_disable();
	drop_user_return_notifiers();
8563 8564 8565 8566
}

int kvm_arch_hardware_setup(void)
{
8567 8568 8569 8570 8571 8572
	int r;

	r = kvm_x86_ops->hardware_setup();
	if (r != 0)
		return r;

8573 8574 8575 8576
	if (kvm_has_tsc_control) {
		/*
		 * Make sure the user can only configure tsc_khz values that
		 * fit into a signed integer.
8577
		 * A min value is not calculated because it will always
8578 8579 8580 8581 8582 8583
		 * be 1 on all machines.
		 */
		u64 max = min(0x7fffffffULL,
			      __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz));
		kvm_max_guest_tsc_khz = max;

8584
		kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
8585
	}
8586

8587 8588
	kvm_init_msr_list();
	return 0;
8589 8590 8591 8592 8593 8594 8595 8596 8597 8598
}

void kvm_arch_hardware_unsetup(void)
{
	kvm_x86_ops->hardware_unsetup();
}

void kvm_arch_check_processor_compat(void *rtn)
{
	kvm_x86_ops->check_processor_compatibility(rtn);
8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609
}

bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
{
	return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp);

bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
{
	return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
8610 8611
}

8612
struct static_key kvm_no_apic_vcpu __read_mostly;
8613
EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
8614

8615 8616 8617 8618 8619
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
	struct page *page;
	int r;

8620
	vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu);
8621
	vcpu->arch.emulate_ctxt.ops = &emulate_ops;
8622
	if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
8623
		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
8624
	else
8625
		vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
8626 8627 8628 8629 8630 8631

	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page) {
		r = -ENOMEM;
		goto fail;
	}
8632
	vcpu->arch.pio_data = page_address(page);
8633

8634
	kvm_set_tsc_khz(vcpu, max_tsc_khz);
Z
Zachary Amsden 已提交
8635

8636 8637 8638 8639
	r = kvm_mmu_create(vcpu);
	if (r < 0)
		goto fail_free_pio_data;

8640
	if (irqchip_in_kernel(vcpu->kvm)) {
8641 8642 8643
		r = kvm_create_lapic(vcpu);
		if (r < 0)
			goto fail_mmu_destroy;
8644 8645
	} else
		static_key_slow_inc(&kvm_no_apic_vcpu);
8646

H
Huang Ying 已提交
8647 8648 8649 8650
	vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
				       GFP_KERNEL);
	if (!vcpu->arch.mce_banks) {
		r = -ENOMEM;
8651
		goto fail_free_lapic;
H
Huang Ying 已提交
8652 8653 8654
	}
	vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;

8655 8656
	if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) {
		r = -ENOMEM;
8657
		goto fail_free_mce_banks;
8658
	}
8659

I
Ingo Molnar 已提交
8660
	fx_init(vcpu);
8661

8662
	vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
8663

8664 8665
	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);

8666 8667
	vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;

8668
	kvm_async_pf_hash_reset(vcpu);
8669
	kvm_pmu_init(vcpu);
8670

8671
	vcpu->arch.pending_external_vector = -1;
8672
	vcpu->arch.preempted_in_kernel = false;
8673

8674 8675
	kvm_hv_vcpu_init(vcpu);

8676
	return 0;
I
Ingo Molnar 已提交
8677

8678 8679
fail_free_mce_banks:
	kfree(vcpu->arch.mce_banks);
8680 8681
fail_free_lapic:
	kvm_free_lapic(vcpu);
8682 8683 8684
fail_mmu_destroy:
	kvm_mmu_destroy(vcpu);
fail_free_pio_data:
8685
	free_page((unsigned long)vcpu->arch.pio_data);
8686 8687 8688 8689 8690 8691
fail:
	return r;
}

void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
8692 8693
	int idx;

A
Andrey Smetanin 已提交
8694
	kvm_hv_vcpu_uninit(vcpu);
8695
	kvm_pmu_destroy(vcpu);
8696
	kfree(vcpu->arch.mce_banks);
8697
	kvm_free_lapic(vcpu);
8698
	idx = srcu_read_lock(&vcpu->kvm->srcu);
8699
	kvm_mmu_destroy(vcpu);
8700
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
8701
	free_page((unsigned long)vcpu->arch.pio_data);
8702
	if (!lapic_in_kernel(vcpu))
8703
		static_key_slow_dec(&kvm_no_apic_vcpu);
8704
}
8705

R
Radim Krčmář 已提交
8706 8707
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
8708
	kvm_x86_ops->sched_in(vcpu, cpu);
R
Radim Krčmář 已提交
8709 8710
}

8711
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
8712
{
8713 8714 8715
	if (type)
		return -EINVAL;

8716
	INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
8717
	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
8718
	INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
B
Ben-Ami Yassour 已提交
8719
	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
8720
	atomic_set(&kvm->arch.noncoherent_dma_count, 0);
8721

8722 8723
	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
8724 8725 8726
	/* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
	set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
		&kvm->arch.irq_sources_bitmap);
8727

8728
	raw_spin_lock_init(&kvm->arch.tsc_write_lock);
8729
	mutex_init(&kvm->arch.apic_map_lock);
8730 8731
	spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);

8732
	kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
8733
	pvclock_update_vm_gtod_copy(kvm);
8734

8735
	INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
8736
	INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
8737

8738
	kvm_hv_init_vm(kvm);
8739
	kvm_page_track_init(kvm);
8740
	kvm_mmu_init_vm(kvm);
8741

8742 8743 8744
	if (kvm_x86_ops->vm_init)
		return kvm_x86_ops->vm_init(kvm);

8745
	return 0;
8746 8747 8748 8749
}

static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{
8750
	vcpu_load(vcpu);
8751 8752 8753 8754 8755 8756 8757
	kvm_mmu_unload(vcpu);
	vcpu_put(vcpu);
}

static void kvm_free_vcpus(struct kvm *kvm)
{
	unsigned int i;
8758
	struct kvm_vcpu *vcpu;
8759 8760 8761 8762

	/*
	 * Unpin any mmu pages first.
	 */
8763 8764
	kvm_for_each_vcpu(i, vcpu, kvm) {
		kvm_clear_async_pf_completion_queue(vcpu);
8765
		kvm_unload_vcpu_mmu(vcpu);
8766
	}
8767 8768 8769 8770 8771 8772
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_arch_vcpu_free(vcpu);

	mutex_lock(&kvm->lock);
	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
		kvm->vcpus[i] = NULL;
8773

8774 8775
	atomic_set(&kvm->online_vcpus, 0);
	mutex_unlock(&kvm->lock);
8776 8777
}

8778 8779
void kvm_arch_sync_events(struct kvm *kvm)
{
8780
	cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
8781
	cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
8782
	kvm_free_pit(kvm);
8783 8784
}

8785
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
8786 8787
{
	int i, r;
8788
	unsigned long hva;
8789 8790
	struct kvm_memslots *slots = kvm_memslots(kvm);
	struct kvm_memory_slot *slot, old;
8791 8792

	/* Called with kvm->slots_lock held.  */
8793 8794
	if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
		return -EINVAL;
8795

8796 8797
	slot = id_to_memslot(slots, id);
	if (size) {
8798
		if (slot->npages)
8799 8800 8801 8802 8803 8804 8805 8806 8807 8808 8809 8810 8811 8812 8813 8814 8815 8816
			return -EEXIST;

		/*
		 * MAP_SHARED to prevent internal slot pages from being moved
		 * by fork()/COW.
		 */
		hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
			      MAP_SHARED | MAP_ANONYMOUS, 0);
		if (IS_ERR((void *)hva))
			return PTR_ERR((void *)hva);
	} else {
		if (!slot->npages)
			return 0;

		hva = 0;
	}

	old = *slot;
8817
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
8818
		struct kvm_userspace_memory_region m;
8819

8820 8821 8822
		m.slot = id | (i << 16);
		m.flags = 0;
		m.guest_phys_addr = gpa;
8823
		m.userspace_addr = hva;
8824
		m.memory_size = size;
8825 8826 8827 8828 8829
		r = __kvm_set_memory_region(kvm, &m);
		if (r < 0)
			return r;
	}

8830 8831
	if (!size)
		vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
8832

8833 8834 8835 8836
	return 0;
}
EXPORT_SYMBOL_GPL(__x86_set_memory_region);

8837
int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
8838 8839 8840 8841
{
	int r;

	mutex_lock(&kvm->slots_lock);
8842
	r = __x86_set_memory_region(kvm, id, gpa, size);
8843 8844 8845 8846 8847 8848
	mutex_unlock(&kvm->slots_lock);

	return r;
}
EXPORT_SYMBOL_GPL(x86_set_memory_region);

8849 8850
void kvm_arch_destroy_vm(struct kvm *kvm)
{
8851 8852 8853 8854 8855 8856
	if (current->mm == kvm->mm) {
		/*
		 * Free memory regions allocated on behalf of userspace,
		 * unless the the memory map has changed due to process exit
		 * or fd copying.
		 */
8857 8858 8859
		x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
		x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
		x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
8860
	}
8861 8862
	if (kvm_x86_ops->vm_destroy)
		kvm_x86_ops->vm_destroy(kvm);
8863 8864
	kvm_pic_destroy(kvm);
	kvm_ioapic_destroy(kvm);
8865
	kvm_free_vcpus(kvm);
8866
	kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
8867
	kvm_mmu_uninit_vm(kvm);
8868
	kvm_page_track_cleanup(kvm);
8869
	kvm_hv_destroy_vm(kvm);
8870
}
8871

8872
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
8873 8874 8875 8876
			   struct kvm_memory_slot *dont)
{
	int i;

8877 8878
	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
		if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
T
Thomas Huth 已提交
8879
			kvfree(free->arch.rmap[i]);
8880
			free->arch.rmap[i] = NULL;
8881
		}
8882 8883 8884 8885 8886
		if (i == 0)
			continue;

		if (!dont || free->arch.lpage_info[i - 1] !=
			     dont->arch.lpage_info[i - 1]) {
T
Thomas Huth 已提交
8887
			kvfree(free->arch.lpage_info[i - 1]);
8888
			free->arch.lpage_info[i - 1] = NULL;
8889 8890
		}
	}
8891 8892

	kvm_page_track_free_memslot(free, dont);
8893 8894
}

8895 8896
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
			    unsigned long npages)
8897 8898 8899
{
	int i;

8900
	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
8901
		struct kvm_lpage_info *linfo;
8902 8903
		unsigned long ugfn;
		int lpages;
8904
		int level = i + 1;
8905 8906 8907 8908

		lpages = gfn_to_index(slot->base_gfn + npages - 1,
				      slot->base_gfn, level) + 1;

8909
		slot->arch.rmap[i] =
K
Kees Cook 已提交
8910 8911
			kvcalloc(lpages, sizeof(*slot->arch.rmap[i]),
				 GFP_KERNEL);
8912
		if (!slot->arch.rmap[i])
8913
			goto out_free;
8914 8915
		if (i == 0)
			continue;
8916

K
Kees Cook 已提交
8917
		linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL);
8918
		if (!linfo)
8919 8920
			goto out_free;

8921 8922
		slot->arch.lpage_info[i - 1] = linfo;

8923
		if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
8924
			linfo[0].disallow_lpage = 1;
8925
		if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
8926
			linfo[lpages - 1].disallow_lpage = 1;
8927 8928 8929 8930 8931 8932 8933 8934 8935 8936 8937
		ugfn = slot->userspace_addr >> PAGE_SHIFT;
		/*
		 * If the gfn and userspace address are not aligned wrt each
		 * other, or if explicitly asked to, disable large page
		 * support for this slot
		 */
		if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
		    !kvm_largepages_enabled()) {
			unsigned long j;

			for (j = 0; j < lpages; ++j)
8938
				linfo[j].disallow_lpage = 1;
8939 8940 8941
		}
	}

8942 8943 8944
	if (kvm_page_track_create_memslot(slot, npages))
		goto out_free;

8945 8946 8947
	return 0;

out_free:
8948
	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
T
Thomas Huth 已提交
8949
		kvfree(slot->arch.rmap[i]);
8950 8951 8952 8953
		slot->arch.rmap[i] = NULL;
		if (i == 0)
			continue;

T
Thomas Huth 已提交
8954
		kvfree(slot->arch.lpage_info[i - 1]);
8955
		slot->arch.lpage_info[i - 1] = NULL;
8956 8957 8958 8959
	}
	return -ENOMEM;
}

8960
void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
8961
{
8962 8963 8964 8965
	/*
	 * memslots->generation has been incremented.
	 * mmio generation may have reached its maximum value.
	 */
8966
	kvm_mmu_invalidate_mmio_sptes(kvm, slots);
8967 8968
}

8969 8970
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				struct kvm_memory_slot *memslot,
8971
				const struct kvm_userspace_memory_region *mem,
8972
				enum kvm_mr_change change)
8973
{
8974 8975 8976
	return 0;
}

8977 8978 8979 8980 8981 8982 8983 8984 8985 8986 8987 8988 8989 8990 8991 8992 8993 8994 8995 8996 8997 8998 8999 9000 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9018 9019 9020 9021 9022 9023 9024 9025 9026
static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
				     struct kvm_memory_slot *new)
{
	/* Still write protect RO slot */
	if (new->flags & KVM_MEM_READONLY) {
		kvm_mmu_slot_remove_write_access(kvm, new);
		return;
	}

	/*
	 * Call kvm_x86_ops dirty logging hooks when they are valid.
	 *
	 * kvm_x86_ops->slot_disable_log_dirty is called when:
	 *
	 *  - KVM_MR_CREATE with dirty logging is disabled
	 *  - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag
	 *
	 * The reason is, in case of PML, we need to set D-bit for any slots
	 * with dirty logging disabled in order to eliminate unnecessary GPA
	 * logging in PML buffer (and potential PML buffer full VMEXT). This
	 * guarantees leaving PML enabled during guest's lifetime won't have
	 * any additonal overhead from PML when guest is running with dirty
	 * logging disabled for memory slots.
	 *
	 * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot
	 * to dirty logging mode.
	 *
	 * If kvm_x86_ops dirty logging hooks are invalid, use write protect.
	 *
	 * In case of write protect:
	 *
	 * Write protect all pages for dirty logging.
	 *
	 * All the sptes including the large sptes which point to this
	 * slot are set to readonly. We can not create any new large
	 * spte on this slot until the end of the logging.
	 *
	 * See the comments in fast_page_fault().
	 */
	if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
		if (kvm_x86_ops->slot_enable_log_dirty)
			kvm_x86_ops->slot_enable_log_dirty(kvm, new);
		else
			kvm_mmu_slot_remove_write_access(kvm, new);
	} else {
		if (kvm_x86_ops->slot_disable_log_dirty)
			kvm_x86_ops->slot_disable_log_dirty(kvm, new);
	}
}

9027
void kvm_arch_commit_memory_region(struct kvm *kvm,
9028
				const struct kvm_userspace_memory_region *mem,
9029
				const struct kvm_memory_slot *old,
9030
				const struct kvm_memory_slot *new,
9031
				enum kvm_mr_change change)
9032
{
9033
	int nr_mmu_pages = 0;
9034

9035 9036 9037 9038
	if (!kvm->arch.n_requested_mmu_pages)
		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);

	if (nr_mmu_pages)
9039
		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
9040

9041 9042 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053 9054 9055 9056 9057
	/*
	 * Dirty logging tracks sptes in 4k granularity, meaning that large
	 * sptes have to be split.  If live migration is successful, the guest
	 * in the source machine will be destroyed and large sptes will be
	 * created in the destination. However, if the guest continues to run
	 * in the source machine (for example if live migration fails), small
	 * sptes will remain around and cause bad performance.
	 *
	 * Scan sptes if dirty logging has been stopped, dropping those
	 * which can be collapsed into a single large-page spte.  Later
	 * page faults will create the large-page sptes.
	 */
	if ((change != KVM_MR_DELETE) &&
		(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
		!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
		kvm_mmu_zap_collapsible_sptes(kvm, new);

9058
	/*
9059
	 * Set up write protection and/or dirty logging for the new slot.
9060
	 *
9061 9062 9063 9064
	 * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have
	 * been zapped so no dirty logging staff is needed for old slot. For
	 * KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the
	 * new and it's also covered when dealing with the new slot.
9065 9066
	 *
	 * FIXME: const-ify all uses of struct kvm_memory_slot.
9067
	 */
9068
	if (change != KVM_MR_DELETE)
9069
		kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new);
9070
}
9071

9072
void kvm_arch_flush_shadow_all(struct kvm *kvm)
9073
{
9074
	kvm_mmu_invalidate_zap_all_pages(kvm);
9075 9076
}

9077 9078 9079
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
				   struct kvm_memory_slot *slot)
{
9080
	kvm_page_track_flush_slot(kvm, slot);
9081 9082
}

9083 9084 9085 9086 9087 9088 9089 9090 9091 9092 9093
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
{
	if (!list_empty_careful(&vcpu->async_pf.done))
		return true;

	if (kvm_apic_has_events(vcpu))
		return true;

	if (vcpu->arch.pv.pv_unhalted)
		return true;

9094 9095 9096
	if (vcpu->arch.exception.pending)
		return true;

9097 9098 9099
	if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
	    (vcpu->arch.nmi_pending &&
	     kvm_x86_ops->nmi_allowed(vcpu)))
9100 9101
		return true;

9102 9103
	if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
	    (vcpu->arch.smi_pending && !is_smm(vcpu)))
P
Paolo Bonzini 已提交
9104 9105
		return true;

9106 9107 9108 9109
	if (kvm_arch_interrupt_allowed(vcpu) &&
	    kvm_cpu_has_interrupt(vcpu))
		return true;

A
Andrey Smetanin 已提交
9110 9111 9112
	if (kvm_hv_has_stimer_pending(vcpu))
		return true;

9113 9114 9115
	return false;
}

9116 9117
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
9118
	return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
9119
}
9120

9121 9122
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
9123
	return vcpu->arch.preempted_in_kernel;
9124 9125
}

9126
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
9127
{
9128
	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
9129
}
9130 9131 9132 9133 9134

int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
{
	return kvm_x86_ops->interrupt_allowed(vcpu);
}
9135

9136
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
J
Jan Kiszka 已提交
9137
{
9138 9139 9140 9141 9142 9143
	if (is_64_bit_mode(vcpu))
		return kvm_rip_read(vcpu);
	return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
		     kvm_rip_read(vcpu));
}
EXPORT_SYMBOL_GPL(kvm_get_linear_rip);
J
Jan Kiszka 已提交
9144

9145 9146 9147
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
{
	return kvm_get_linear_rip(vcpu) == linear_rip;
J
Jan Kiszka 已提交
9148 9149 9150
}
EXPORT_SYMBOL_GPL(kvm_is_linear_rip);

9151 9152 9153 9154 9155 9156
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
{
	unsigned long rflags;

	rflags = kvm_x86_ops->get_rflags(vcpu);
	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
9157
		rflags &= ~X86_EFLAGS_TF;
9158 9159 9160 9161
	return rflags;
}
EXPORT_SYMBOL_GPL(kvm_get_rflags);

9162
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
9163 9164
{
	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
J
Jan Kiszka 已提交
9165
	    kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
9166
		rflags |= X86_EFLAGS_TF;
9167
	kvm_x86_ops->set_rflags(vcpu, rflags);
9168 9169 9170 9171 9172
}

void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
	__kvm_set_rflags(vcpu, rflags);
9173
	kvm_make_request(KVM_REQ_EVENT, vcpu);
9174 9175 9176
}
EXPORT_SYMBOL_GPL(kvm_set_rflags);

G
Gleb Natapov 已提交
9177 9178 9179 9180
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
{
	int r;

X
Xiao Guangrong 已提交
9181
	if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
9182
	      work->wakeup_all)
G
Gleb Natapov 已提交
9183 9184 9185 9186 9187 9188
		return;

	r = kvm_mmu_reload(vcpu);
	if (unlikely(r))
		return;

X
Xiao Guangrong 已提交
9189 9190 9191 9192
	if (!vcpu->arch.mmu.direct_map &&
	      work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
		return;

G
Gleb Natapov 已提交
9193 9194 9195
	vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
}

9196 9197 9198 9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 9215 9216 9217 9218 9219 9220 9221
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
{
	return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
}

static inline u32 kvm_async_pf_next_probe(u32 key)
{
	return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
}

static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	u32 key = kvm_async_pf_hash_fn(gfn);

	while (vcpu->arch.apf.gfns[key] != ~0)
		key = kvm_async_pf_next_probe(key);

	vcpu->arch.apf.gfns[key] = gfn;
}

static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	int i;
	u32 key = kvm_async_pf_hash_fn(gfn);

	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
9222 9223
		     (vcpu->arch.apf.gfns[key] != gfn &&
		      vcpu->arch.apf.gfns[key] != ~0); i++)
9224 9225 9226 9227 9228 9229 9230 9231 9232 9233 9234 9235 9236 9237 9238 9239 9240 9241 9242 9243 9244 9245 9246 9247 9248 9249 9250 9251 9252 9253 9254 9255 9256
		key = kvm_async_pf_next_probe(key);

	return key;
}

bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
}

static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	u32 i, j, k;

	i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
	while (true) {
		vcpu->arch.apf.gfns[i] = ~0;
		do {
			j = kvm_async_pf_next_probe(j);
			if (vcpu->arch.apf.gfns[j] == ~0)
				return;
			k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
			/*
			 * k lies cyclically in ]i,j]
			 * |    i.k.j |
			 * |....j i.k.| or  |.k..j i...|
			 */
		} while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
		vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
		i = j;
	}
}

9257 9258
static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
{
9259 9260 9261

	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
				      sizeof(val));
9262 9263
}

9264 9265 9266 9267 9268 9269 9270
static int apf_get_user(struct kvm_vcpu *vcpu, u32 *val)
{

	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, val,
				      sizeof(u32));
}

9271 9272 9273
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work)
{
9274 9275
	struct x86_exception fault;

9276
	trace_kvm_async_pf_not_present(work->arch.token, work->gva);
9277
	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
9278 9279

	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
9280 9281
	    (vcpu->arch.apf.send_user_only &&
	     kvm_x86_ops->get_cpl(vcpu) == 0))
9282 9283
		kvm_make_request(KVM_REQ_APF_HALT, vcpu);
	else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
9284 9285 9286 9287 9288
		fault.vector = PF_VECTOR;
		fault.error_code_valid = true;
		fault.error_code = 0;
		fault.nested_page_fault = false;
		fault.address = work->arch.token;
9289
		fault.async_page_fault = true;
9290
		kvm_inject_page_fault(vcpu, &fault);
9291
	}
9292 9293 9294 9295 9296
}

void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work)
{
9297
	struct x86_exception fault;
9298
	u32 val;
9299

9300
	if (work->wakeup_all)
9301 9302 9303
		work->arch.token = ~0; /* broadcast wakeup */
	else
		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
9304
	trace_kvm_async_pf_ready(work->arch.token, work->gva);
9305

9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325
	if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
	    !apf_get_user(vcpu, &val)) {
		if (val == KVM_PV_REASON_PAGE_NOT_PRESENT &&
		    vcpu->arch.exception.pending &&
		    vcpu->arch.exception.nr == PF_VECTOR &&
		    !apf_put_user(vcpu, 0)) {
			vcpu->arch.exception.injected = false;
			vcpu->arch.exception.pending = false;
			vcpu->arch.exception.nr = 0;
			vcpu->arch.exception.has_error_code = false;
			vcpu->arch.exception.error_code = 0;
		} else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
			fault.vector = PF_VECTOR;
			fault.error_code_valid = true;
			fault.error_code = 0;
			fault.nested_page_fault = false;
			fault.address = work->arch.token;
			fault.async_page_fault = true;
			kvm_inject_page_fault(vcpu, &fault);
		}
9326
	}
9327
	vcpu->arch.apf.halted = false;
9328
	vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
9329 9330 9331 9332 9333 9334 9335
}

bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
{
	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
		return true;
	else
9336
		return kvm_can_do_async_pf(vcpu);
9337 9338
}

9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350 9351 9352 9353 9354 9355 9356
void kvm_arch_start_assignment(struct kvm *kvm)
{
	atomic_inc(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);

void kvm_arch_end_assignment(struct kvm *kvm)
{
	atomic_dec(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);

bool kvm_arch_has_assigned_device(struct kvm *kvm)
{
	return atomic_read(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);

9357 9358 9359 9360 9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374
void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
{
	atomic_inc(&kvm->arch.noncoherent_dma_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);

void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
{
	atomic_dec(&kvm->arch.noncoherent_dma_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);

bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
{
	return atomic_read(&kvm->arch.noncoherent_dma_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);

9375 9376 9377 9378 9379
bool kvm_arch_has_irq_bypass(void)
{
	return kvm_x86_ops->update_pi_irte != NULL;
}

F
Feng Wu 已提交
9380 9381 9382 9383 9384 9385
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
				      struct irq_bypass_producer *prod)
{
	struct kvm_kernel_irqfd *irqfd =
		container_of(cons, struct kvm_kernel_irqfd, consumer);

9386
	irqfd->producer = prod;
F
Feng Wu 已提交
9387

9388 9389
	return kvm_x86_ops->update_pi_irte(irqfd->kvm,
					   prod->irq, irqfd->gsi, 1);
F
Feng Wu 已提交
9390 9391 9392 9393 9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404
}

void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
				      struct irq_bypass_producer *prod)
{
	int ret;
	struct kvm_kernel_irqfd *irqfd =
		container_of(cons, struct kvm_kernel_irqfd, consumer);

	WARN_ON(irqfd->producer != prod);
	irqfd->producer = NULL;

	/*
	 * When producer of consumer is unregistered, we change back to
	 * remapped mode, so we can re-use the current implementation
A
Andrea Gelmini 已提交
9405
	 * when the irq is masked/disabled or the consumer side (KVM
F
Feng Wu 已提交
9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422
	 * int this case doesn't want to receive the interrupts.
	*/
	ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
	if (ret)
		printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
		       " fails: %d\n", irqfd->consumer.token, ret);
}

int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
				   uint32_t guest_irq, bool set)
{
	if (!kvm_x86_ops->update_pi_irte)
		return -EINVAL;

	return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set);
}

9423 9424 9425 9426 9427 9428
bool kvm_vector_hashing_enabled(void)
{
	return vector_hashing;
}
EXPORT_SYMBOL_GPL(kvm_vector_hashing_enabled);

9429
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
J
Jason Wang 已提交
9430
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
9431 9432 9433 9434
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
9435
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
9436
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
9437
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
9438
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
9439
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
9440
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
9441
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
9442
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
9443
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);
K
Kai Huang 已提交
9444
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
9445
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
9446 9447
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);