x86.c 216.5 KB
Newer Older
1 2 3 4 5 6
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * derived from drivers/kvm/kvm_main.c
 *
 * Copyright (C) 2006 Qumranet, Inc.
B
Ben-Ami Yassour 已提交
7 8
 * Copyright (C) 2008 Qumranet, Inc.
 * Copyright IBM Corporation, 2008
N
Nicolas Kaiser 已提交
9
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 11 12 13
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
B
Ben-Ami Yassour 已提交
14 15
 *   Amit Shah    <amit.shah@qumranet.com>
 *   Ben-Ami Yassour <benami@il.ibm.com>
16 17 18 19 20 21
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

22
#include <linux/kvm_host.h>
23
#include "irq.h"
24
#include "mmu.h"
S
Sheng Yang 已提交
25
#include "i8254.h"
26
#include "tss.h"
27
#include "kvm_cache_regs.h"
28
#include "x86.h"
A
Avi Kivity 已提交
29
#include "cpuid.h"
30
#include "assigned-dev.h"
31
#include "pmu.h"
32
#include "hyperv.h"
33

34
#include <linux/clocksource.h>
B
Ben-Ami Yassour 已提交
35
#include <linux/interrupt.h>
36 37 38
#include <linux/kvm.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
39 40
#include <linux/export.h>
#include <linux/moduleparam.h>
41
#include <linux/mman.h>
42
#include <linux/highmem.h>
J
Joerg Roedel 已提交
43
#include <linux/iommu.h>
B
Ben-Ami Yassour 已提交
44
#include <linux/intel-iommu.h>
45
#include <linux/cpufreq.h>
A
Avi Kivity 已提交
46
#include <linux/user-return-notifier.h>
47
#include <linux/srcu.h>
48
#include <linux/slab.h>
49
#include <linux/perf_event.h>
50
#include <linux/uaccess.h>
51
#include <linux/hash.h>
52
#include <linux/pci.h>
53 54
#include <linux/timekeeper_internal.h>
#include <linux/pvclock_gtod.h>
F
Feng Wu 已提交
55 56
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
A
Avi Kivity 已提交
57
#include <trace/events/kvm.h>
X
Xiao Guangrong 已提交
58

59
#include <asm/debugreg.h>
60
#include <asm/msr.h>
61
#include <asm/desc.h>
H
Huang Ying 已提交
62
#include <asm/mce.h>
63
#include <linux/kernel_stat.h>
64
#include <asm/fpu/internal.h> /* Ugh! */
65
#include <asm/pvclock.h>
66
#include <asm/div64.h>
67
#include <asm/irq_remapping.h>
68

69 70 71
#define CREATE_TRACE_POINTS
#include "trace.h"

72
#define MAX_IO_MSRS 256
H
Huang Ying 已提交
73
#define KVM_MAX_MCE_BANKS 32
74 75
u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
H
Huang Ying 已提交
76

77 78 79
#define emul_to_vcpu(ctxt) \
	container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)

80 81 82 83 84
/* EFER defaults:
 * - enable syscall per default because its emulated by KVM
 * - enable LME and LMA per default on 64 bit KVM
 */
#ifdef CONFIG_X86_64
85 86
static
u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
87
#else
88
static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
89
#endif
90

91 92
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
93

94 95
#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
                                    KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
96

97
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
A
Avi Kivity 已提交
98
static void process_nmi(struct kvm_vcpu *vcpu);
99
static void enter_smm(struct kvm_vcpu *vcpu);
100
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
101

102
struct kvm_x86_ops *kvm_x86_ops __read_mostly;
103
EXPORT_SYMBOL_GPL(kvm_x86_ops);
104

105
static bool __read_mostly ignore_msrs = 0;
106
module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
107

108 109 110
unsigned int min_timer_period_us = 500;
module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);

111 112 113
static bool __read_mostly kvmclock_periodic_sync = true;
module_param(kvmclock_periodic_sync, bool, S_IRUGO);

114
bool __read_mostly kvm_has_tsc_control;
115
EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
116
u32  __read_mostly kvm_max_guest_tsc_khz;
117
EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
118 119 120 121
u8   __read_mostly kvm_tsc_scaling_ratio_frac_bits;
EXPORT_SYMBOL_GPL(kvm_tsc_scaling_ratio_frac_bits);
u64  __read_mostly kvm_max_tsc_scaling_ratio;
EXPORT_SYMBOL_GPL(kvm_max_tsc_scaling_ratio);
122 123
u64 __read_mostly kvm_default_tsc_scaling_ratio;
EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
124

125
/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
126
static u32 __read_mostly tsc_tolerance_ppm = 250;
127 128
module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);

129
/* lapic timer advance (tscdeadline mode only) in nanoseconds */
130
unsigned int __read_mostly lapic_timer_advance_ns = 0;
131 132
module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);

133 134 135
static bool __read_mostly vector_hashing = true;
module_param(vector_hashing, bool, S_IRUGO);

136
static bool __read_mostly backwards_tsc_observed = false;
137

A
Avi Kivity 已提交
138 139 140 141
#define KVM_NR_SHARED_MSRS 16

struct kvm_shared_msrs_global {
	int nr;
142
	u32 msrs[KVM_NR_SHARED_MSRS];
A
Avi Kivity 已提交
143 144 145 146 147
};

struct kvm_shared_msrs {
	struct user_return_notifier urn;
	bool registered;
148 149 150 151
	struct kvm_shared_msr_values {
		u64 host;
		u64 curr;
	} values[KVM_NR_SHARED_MSRS];
A
Avi Kivity 已提交
152 153 154
};

static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
155
static struct kvm_shared_msrs __percpu *shared_msrs;
A
Avi Kivity 已提交
156

157
struct kvm_stats_debugfs_item debugfs_entries[] = {
158 159 160 161 162 163 164 165 166
	{ "pf_fixed", VCPU_STAT(pf_fixed) },
	{ "pf_guest", VCPU_STAT(pf_guest) },
	{ "tlb_flush", VCPU_STAT(tlb_flush) },
	{ "invlpg", VCPU_STAT(invlpg) },
	{ "exits", VCPU_STAT(exits) },
	{ "io_exits", VCPU_STAT(io_exits) },
	{ "mmio_exits", VCPU_STAT(mmio_exits) },
	{ "signal_exits", VCPU_STAT(signal_exits) },
	{ "irq_window", VCPU_STAT(irq_window_exits) },
167
	{ "nmi_window", VCPU_STAT(nmi_window_exits) },
168
	{ "halt_exits", VCPU_STAT(halt_exits) },
169
	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
170
	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
171
	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
172
	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
A
Amit Shah 已提交
173
	{ "hypercalls", VCPU_STAT(hypercalls) },
174 175 176 177 178 179 180
	{ "request_irq", VCPU_STAT(request_irq_exits) },
	{ "irq_exits", VCPU_STAT(irq_exits) },
	{ "host_state_reload", VCPU_STAT(host_state_reload) },
	{ "efer_reload", VCPU_STAT(efer_reload) },
	{ "fpu_reload", VCPU_STAT(fpu_reload) },
	{ "insn_emulation", VCPU_STAT(insn_emulation) },
	{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
181
	{ "irq_injections", VCPU_STAT(irq_injections) },
182
	{ "nmi_injections", VCPU_STAT(nmi_injections) },
A
Avi Kivity 已提交
183 184 185 186 187 188
	{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
	{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
	{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
	{ "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
	{ "mmu_flooded", VM_STAT(mmu_flooded) },
	{ "mmu_recycled", VM_STAT(mmu_recycled) },
A
Avi Kivity 已提交
189
	{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
190
	{ "mmu_unsync", VM_STAT(mmu_unsync) },
191
	{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
M
Marcelo Tosatti 已提交
192
	{ "largepages", VM_STAT(lpages) },
193 194 195
	{ NULL }
};

196 197
u64 __read_mostly host_xcr0;

198
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
199

200 201 202 203 204 205 206
static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
{
	int i;
	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
		vcpu->arch.apf.gfns[i] = ~0;
}

A
Avi Kivity 已提交
207 208 209 210 211
static void kvm_on_user_return(struct user_return_notifier *urn)
{
	unsigned slot;
	struct kvm_shared_msrs *locals
		= container_of(urn, struct kvm_shared_msrs, urn);
212
	struct kvm_shared_msr_values *values;
A
Avi Kivity 已提交
213 214

	for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
215 216 217 218
		values = &locals->values[slot];
		if (values->host != values->curr) {
			wrmsrl(shared_msrs_global.msrs[slot], values->host);
			values->curr = values->host;
A
Avi Kivity 已提交
219 220 221 222 223 224
		}
	}
	locals->registered = false;
	user_return_notifier_unregister(urn);
}

225
static void shared_msr_update(unsigned slot, u32 msr)
A
Avi Kivity 已提交
226 227
{
	u64 value;
228 229
	unsigned int cpu = smp_processor_id();
	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
A
Avi Kivity 已提交
230

231 232 233 234 235 236 237 238 239 240 241 242 243
	/* only read, and nobody should modify it at this time,
	 * so don't need lock */
	if (slot >= shared_msrs_global.nr) {
		printk(KERN_ERR "kvm: invalid MSR slot!");
		return;
	}
	rdmsrl_safe(msr, &value);
	smsr->values[slot].host = value;
	smsr->values[slot].curr = value;
}

void kvm_define_shared_msr(unsigned slot, u32 msr)
{
244
	BUG_ON(slot >= KVM_NR_SHARED_MSRS);
245
	shared_msrs_global.msrs[slot] = msr;
A
Avi Kivity 已提交
246 247 248 249 250 251 252 253 254 255
	if (slot >= shared_msrs_global.nr)
		shared_msrs_global.nr = slot + 1;
}
EXPORT_SYMBOL_GPL(kvm_define_shared_msr);

static void kvm_shared_msr_cpu_online(void)
{
	unsigned i;

	for (i = 0; i < shared_msrs_global.nr; ++i)
256
		shared_msr_update(i, shared_msrs_global.msrs[i]);
A
Avi Kivity 已提交
257 258
}

259
int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
A
Avi Kivity 已提交
260
{
261 262
	unsigned int cpu = smp_processor_id();
	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
263
	int err;
A
Avi Kivity 已提交
264

265
	if (((value ^ smsr->values[slot].curr) & mask) == 0)
266
		return 0;
267
	smsr->values[slot].curr = value;
268 269 270 271
	err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
	if (err)
		return 1;

A
Avi Kivity 已提交
272 273 274 275 276
	if (!smsr->registered) {
		smsr->urn.on_user_return = kvm_on_user_return;
		user_return_notifier_register(&smsr->urn);
		smsr->registered = true;
	}
277
	return 0;
A
Avi Kivity 已提交
278 279 280
}
EXPORT_SYMBOL_GPL(kvm_set_shared_msr);

281
static void drop_user_return_notifiers(void)
282
{
283 284
	unsigned int cpu = smp_processor_id();
	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
285 286 287 288 289

	if (smsr->registered)
		kvm_on_user_return(&smsr->urn);
}

290 291
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
{
292
	return vcpu->arch.apic_base;
293 294 295
}
EXPORT_SYMBOL_GPL(kvm_get_apic_base);

296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
	u64 old_state = vcpu->arch.apic_base &
		(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
	u64 new_state = msr_info->data &
		(MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
	u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
		0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);

	if (!msr_info->host_initiated &&
	    ((msr_info->data & reserved_bits) != 0 ||
	     new_state == X2APIC_ENABLE ||
	     (new_state == MSR_IA32_APICBASE_ENABLE &&
	      old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
	     (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
	      old_state == 0)))
		return 1;

	kvm_lapic_set_base(vcpu, msr_info->data);
	return 0;
316 317 318
}
EXPORT_SYMBOL_GPL(kvm_set_apic_base);

319
asmlinkage __visible void kvm_spurious_fault(void)
320 321 322 323 324 325
{
	/* Fault while not rebooting.  We want the trace. */
	BUG();
}
EXPORT_SYMBOL_GPL(kvm_spurious_fault);

326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
#define EXCPT_BENIGN		0
#define EXCPT_CONTRIBUTORY	1
#define EXCPT_PF		2

static int exception_class(int vector)
{
	switch (vector) {
	case PF_VECTOR:
		return EXCPT_PF;
	case DE_VECTOR:
	case TS_VECTOR:
	case NP_VECTOR:
	case SS_VECTOR:
	case GP_VECTOR:
		return EXCPT_CONTRIBUTORY;
	default:
		break;
	}
	return EXCPT_BENIGN;
}

347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
#define EXCPT_FAULT		0
#define EXCPT_TRAP		1
#define EXCPT_ABORT		2
#define EXCPT_INTERRUPT		3

static int exception_type(int vector)
{
	unsigned int mask;

	if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
		return EXCPT_INTERRUPT;

	mask = 1 << vector;

	/* #DB is trap, as instruction watchpoints are handled elsewhere */
	if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
		return EXCPT_TRAP;

	if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
		return EXCPT_ABORT;

	/* Reserved exceptions will result in fault */
	return EXCPT_FAULT;
}

372
static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
373 374
		unsigned nr, bool has_error, u32 error_code,
		bool reinject)
375 376 377 378
{
	u32 prev_nr;
	int class1, class2;

379 380
	kvm_make_request(KVM_REQ_EVENT, vcpu);

381 382
	if (!vcpu->arch.exception.pending) {
	queue:
383 384
		if (has_error && !is_protmode(vcpu))
			has_error = false;
385 386 387 388
		vcpu->arch.exception.pending = true;
		vcpu->arch.exception.has_error_code = has_error;
		vcpu->arch.exception.nr = nr;
		vcpu->arch.exception.error_code = error_code;
389
		vcpu->arch.exception.reinject = reinject;
390 391 392 393 394 395 396
		return;
	}

	/* to check exception */
	prev_nr = vcpu->arch.exception.nr;
	if (prev_nr == DF_VECTOR) {
		/* triple fault -> shutdown */
397
		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
		return;
	}
	class1 = exception_class(prev_nr);
	class2 = exception_class(nr);
	if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
		|| (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
		/* generate double fault per SDM Table 5-5 */
		vcpu->arch.exception.pending = true;
		vcpu->arch.exception.has_error_code = true;
		vcpu->arch.exception.nr = DF_VECTOR;
		vcpu->arch.exception.error_code = 0;
	} else
		/* replace previous exception with a new one in a hope
		   that instruction re-execution will regenerate lost
		   exception */
		goto queue;
}

416 417
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
418
	kvm_multiple_exception(vcpu, nr, false, 0, false);
419 420 421
}
EXPORT_SYMBOL_GPL(kvm_queue_exception);

422 423 424 425 426 427
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
	kvm_multiple_exception(vcpu, nr, false, 0, true);
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception);

428
void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
429
{
430 431 432 433 434 435
	if (err)
		kvm_inject_gp(vcpu, 0);
	else
		kvm_x86_ops->skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
436

437
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
438 439
{
	++vcpu->stat.pf_guest;
440 441
	vcpu->arch.cr2 = fault->address;
	kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
442
}
N
Nadav Har'El 已提交
443
EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
444

445
static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
446
{
447 448
	if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
449
	else
450
		vcpu->arch.mmu.inject_page_fault(vcpu, fault);
451 452

	return fault->nested_page_fault;
453 454
}

455 456
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
{
A
Avi Kivity 已提交
457 458
	atomic_inc(&vcpu->arch.nmi_queued);
	kvm_make_request(KVM_REQ_NMI, vcpu);
459 460 461
}
EXPORT_SYMBOL_GPL(kvm_inject_nmi);

462 463
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
464
	kvm_multiple_exception(vcpu, nr, true, error_code, false);
465 466 467
}
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);

468 469 470 471 472 473
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
	kvm_multiple_exception(vcpu, nr, true, error_code, true);
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);

474 475 476 477 478
/*
 * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
 * a #GP and return false.
 */
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
479
{
480 481 482 483
	if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
		return true;
	kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
	return false;
484
}
485
EXPORT_SYMBOL_GPL(kvm_require_cpl);
486

487 488 489 490 491 492 493 494 495 496
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
{
	if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
		return true;

	kvm_queue_exception(vcpu, UD_VECTOR);
	return false;
}
EXPORT_SYMBOL_GPL(kvm_require_dr);

497 498
/*
 * This function will be used to read from the physical memory of the currently
499
 * running guest. The difference to kvm_vcpu_read_guest_page is that this function
500 501 502 503 504 505
 * can read from guest physical or from the guest's guest physical memory.
 */
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			    gfn_t ngfn, void *data, int offset, int len,
			    u32 access)
{
506
	struct x86_exception exception;
507 508 509 510
	gfn_t real_gfn;
	gpa_t ngpa;

	ngpa     = gfn_to_gpa(ngfn);
511
	real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
512 513 514 515 516
	if (real_gfn == UNMAPPED_GVA)
		return -EFAULT;

	real_gfn = gpa_to_gfn(real_gfn);

517
	return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len);
518 519 520
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);

521
static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
522 523 524 525 526 527
			       void *data, int offset, int len, u32 access)
{
	return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
				       data, offset, len, access);
}

528 529 530
/*
 * Load the pae pdptrs.  Return true is they are all valid.
 */
531
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
532 533 534 535 536
{
	gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
	unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
	int i;
	int ret;
537
	u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
538

539 540 541
	ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
				      offset * sizeof(u64), sizeof(pdpte),
				      PFERR_USER_MASK|PFERR_WRITE_MASK);
542 543 544 545 546
	if (ret < 0) {
		ret = 0;
		goto out;
	}
	for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
B
Bandan Das 已提交
547
		if ((pdpte[i] & PT_PRESENT_MASK) &&
548 549
		    (pdpte[i] &
		     vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
550 551 552 553 554 555
			ret = 0;
			goto out;
		}
	}
	ret = 1;

556
	memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
A
Avi Kivity 已提交
557 558 559 560
	__set_bit(VCPU_EXREG_PDPTR,
		  (unsigned long *)&vcpu->arch.regs_avail);
	__set_bit(VCPU_EXREG_PDPTR,
		  (unsigned long *)&vcpu->arch.regs_dirty);
561 562 563 564
out:

	return ret;
}
565
EXPORT_SYMBOL_GPL(load_pdptrs);
566

567 568
static bool pdptrs_changed(struct kvm_vcpu *vcpu)
{
569
	u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
570
	bool changed = true;
571 572
	int offset;
	gfn_t gfn;
573 574 575 576 577
	int r;

	if (is_long_mode(vcpu) || !is_pae(vcpu))
		return false;

A
Avi Kivity 已提交
578 579 580 581
	if (!test_bit(VCPU_EXREG_PDPTR,
		      (unsigned long *)&vcpu->arch.regs_avail))
		return true;

582 583
	gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
	offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
584 585
	r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
				       PFERR_USER_MASK | PFERR_WRITE_MASK);
586 587
	if (r < 0)
		goto out;
588
	changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
589 590 591 592 593
out:

	return changed;
}

594
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
595
{
596
	unsigned long old_cr0 = kvm_read_cr0(vcpu);
597
	unsigned long update_bits = X86_CR0_PG | X86_CR0_WP;
598

599 600
	cr0 |= X86_CR0_ET;

601
#ifdef CONFIG_X86_64
602 603
	if (cr0 & 0xffffffff00000000UL)
		return 1;
604 605 606
#endif

	cr0 &= ~CR0_RESERVED_BITS;
607

608 609
	if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
		return 1;
610

611 612
	if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
		return 1;
613 614 615

	if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
#ifdef CONFIG_X86_64
616
		if ((vcpu->arch.efer & EFER_LME)) {
617 618
			int cs_db, cs_l;

619 620
			if (!is_pae(vcpu))
				return 1;
621
			kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
622 623
			if (cs_l)
				return 1;
624 625
		} else
#endif
626
		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
627
						 kvm_read_cr3(vcpu)))
628
			return 1;
629 630
	}

631 632 633
	if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
		return 1;

634 635
	kvm_x86_ops->set_cr0(vcpu, cr0);

636
	if ((cr0 ^ old_cr0) & X86_CR0_PG) {
637
		kvm_clear_async_pf_completion_queue(vcpu);
638 639
		kvm_async_pf_hash_reset(vcpu);
	}
640

641 642
	if ((cr0 ^ old_cr0) & update_bits)
		kvm_mmu_reset_context(vcpu);
643

644 645 646
	if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
	    kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
	    !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
647 648
		kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);

649 650
	return 0;
}
651
EXPORT_SYMBOL_GPL(kvm_set_cr0);
652

653
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
654
{
655
	(void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
656
}
657
EXPORT_SYMBOL_GPL(kvm_lmsw);
658

659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
{
	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
			!vcpu->guest_xcr0_loaded) {
		/* kvm_set_xcr() also depends on this */
		xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
		vcpu->guest_xcr0_loaded = 1;
	}
}

static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
{
	if (vcpu->guest_xcr0_loaded) {
		if (vcpu->arch.xcr0 != host_xcr0)
			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
		vcpu->guest_xcr0_loaded = 0;
	}
}

678
static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
679
{
680 681
	u64 xcr0 = xcr;
	u64 old_xcr0 = vcpu->arch.xcr0;
682
	u64 valid_bits;
683 684 685 686

	/* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
	if (index != XCR_XFEATURE_ENABLED_MASK)
		return 1;
D
Dave Hansen 已提交
687
	if (!(xcr0 & XFEATURE_MASK_FP))
688
		return 1;
D
Dave Hansen 已提交
689
	if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
690
		return 1;
691 692 693 694 695 696

	/*
	 * Do not allow the guest to set bits that we do not support
	 * saving.  However, xcr0 bit 0 is always set, even if the
	 * emulated CPU does not support XSAVE (see fx_init).
	 */
D
Dave Hansen 已提交
697
	valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
698
	if (xcr0 & ~valid_bits)
699
		return 1;
700

D
Dave Hansen 已提交
701 702
	if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
	    (!(xcr0 & XFEATURE_MASK_BNDCSR)))
703 704
		return 1;

D
Dave Hansen 已提交
705 706
	if (xcr0 & XFEATURE_MASK_AVX512) {
		if (!(xcr0 & XFEATURE_MASK_YMM))
707
			return 1;
D
Dave Hansen 已提交
708
		if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
709 710
			return 1;
	}
711
	vcpu->arch.xcr0 = xcr0;
712

D
Dave Hansen 已提交
713
	if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
714
		kvm_update_cpuid(vcpu);
715 716 717 718 719
	return 0;
}

int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
720 721
	if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
	    __kvm_set_xcr(vcpu, index, xcr)) {
722 723 724 725 726 727 728
		kvm_inject_gp(vcpu, 0);
		return 1;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_xcr);

729
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
730
{
731
	unsigned long old_cr4 = kvm_read_cr4(vcpu);
732
	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
733
				   X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
734

735 736
	if (cr4 & CR4_RESERVED_BITS)
		return 1;
737

738 739 740
	if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
		return 1;

741 742 743
	if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
		return 1;

F
Feng Wu 已提交
744 745 746
	if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP))
		return 1;

747
	if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
748 749
		return 1;

750 751 752
	if (!guest_cpuid_has_pku(vcpu) && (cr4 & X86_CR4_PKE))
		return 1;

753
	if (is_long_mode(vcpu)) {
754 755
		if (!(cr4 & X86_CR4_PAE))
			return 1;
756 757
	} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
		   && ((cr4 ^ old_cr4) & pdptr_bits)
758 759
		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
				   kvm_read_cr3(vcpu)))
760 761
		return 1;

762 763 764 765 766 767 768 769 770
	if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
		if (!guest_cpuid_has_pcid(vcpu))
			return 1;

		/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
		if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
			return 1;
	}

771
	if (kvm_x86_ops->set_cr4(vcpu, cr4))
772
		return 1;
773

774 775
	if (((cr4 ^ old_cr4) & pdptr_bits) ||
	    (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
776
		kvm_mmu_reset_context(vcpu);
777

778
	if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
A
Avi Kivity 已提交
779
		kvm_update_cpuid(vcpu);
780

781 782
	return 0;
}
783
EXPORT_SYMBOL_GPL(kvm_set_cr4);
784

785
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
786
{
787
#ifdef CONFIG_X86_64
N
Nadav Amit 已提交
788
	cr3 &= ~CR3_PCID_INVD;
789
#endif
N
Nadav Amit 已提交
790

791
	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
792
		kvm_mmu_sync_roots(vcpu);
793
		kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
794
		return 0;
795 796
	}

797
	if (is_long_mode(vcpu)) {
798 799 800 801
		if (cr3 & CR3_L_MODE_RESERVED_BITS)
			return 1;
	} else if (is_pae(vcpu) && is_paging(vcpu) &&
		   !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
N
Nadav Amit 已提交
802
		return 1;
803

804
	vcpu->arch.cr3 = cr3;
805
	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
806
	kvm_mmu_new_cr3(vcpu);
807 808
	return 0;
}
809
EXPORT_SYMBOL_GPL(kvm_set_cr3);
810

A
Andre Przywara 已提交
811
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
812
{
813 814
	if (cr8 & CR8_RESERVED_BITS)
		return 1;
815
	if (lapic_in_kernel(vcpu))
816 817
		kvm_lapic_set_tpr(vcpu, cr8);
	else
818
		vcpu->arch.cr8 = cr8;
819 820
	return 0;
}
821
EXPORT_SYMBOL_GPL(kvm_set_cr8);
822

823
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
824
{
825
	if (lapic_in_kernel(vcpu))
826 827
		return kvm_lapic_get_cr8(vcpu);
	else
828
		return vcpu->arch.cr8;
829
}
830
EXPORT_SYMBOL_GPL(kvm_get_cr8);
831

832 833 834 835 836 837 838 839 840 841 842
static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
{
	int i;

	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
		for (i = 0; i < KVM_NR_DB_REGS; i++)
			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
	}
}

J
Jan Kiszka 已提交
843 844 845 846 847 848
static void kvm_update_dr6(struct kvm_vcpu *vcpu)
{
	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
		kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6);
}

849 850 851 852 853 854 855 856 857
static void kvm_update_dr7(struct kvm_vcpu *vcpu)
{
	unsigned long dr7;

	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
		dr7 = vcpu->arch.guest_debug_dr7;
	else
		dr7 = vcpu->arch.dr7;
	kvm_x86_ops->set_dr7(vcpu, dr7);
858 859 860
	vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
	if (dr7 & DR7_BP_EN_MASK)
		vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
861 862
}

863 864 865 866 867 868 869 870 871
static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
{
	u64 fixed = DR6_FIXED_1;

	if (!guest_cpuid_has_rtm(vcpu))
		fixed |= DR6_RTM;
	return fixed;
}

872
static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
873 874 875 876 877 878 879 880 881 882
{
	switch (dr) {
	case 0 ... 3:
		vcpu->arch.db[dr] = val;
		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
			vcpu->arch.eff_db[dr] = val;
		break;
	case 4:
		/* fall through */
	case 6:
883 884
		if (val & 0xffffffff00000000ULL)
			return -1; /* #GP */
885
		vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
J
Jan Kiszka 已提交
886
		kvm_update_dr6(vcpu);
887 888 889 890
		break;
	case 5:
		/* fall through */
	default: /* 7 */
891 892
		if (val & 0xffffffff00000000ULL)
			return -1; /* #GP */
893
		vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
894
		kvm_update_dr7(vcpu);
895 896 897 898 899
		break;
	}

	return 0;
}
900 901 902

int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{
903
	if (__kvm_set_dr(vcpu, dr, val)) {
904
		kvm_inject_gp(vcpu, 0);
905 906 907
		return 1;
	}
	return 0;
908
}
909 910
EXPORT_SYMBOL_GPL(kvm_set_dr);

911
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
912 913 914 915 916 917 918 919
{
	switch (dr) {
	case 0 ... 3:
		*val = vcpu->arch.db[dr];
		break;
	case 4:
		/* fall through */
	case 6:
J
Jan Kiszka 已提交
920 921 922 923
		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
			*val = vcpu->arch.dr6;
		else
			*val = kvm_x86_ops->get_dr6(vcpu);
924 925 926 927 928 929 930
		break;
	case 5:
		/* fall through */
	default: /* 7 */
		*val = vcpu->arch.dr7;
		break;
	}
931 932
	return 0;
}
933 934
EXPORT_SYMBOL_GPL(kvm_get_dr);

A
Avi Kivity 已提交
935 936 937 938 939 940
bool kvm_rdpmc(struct kvm_vcpu *vcpu)
{
	u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
	u64 data;
	int err;

941
	err = kvm_pmu_rdpmc(vcpu, ecx, &data);
A
Avi Kivity 已提交
942 943 944 945 946 947 948 949
	if (err)
		return err;
	kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
	kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
	return err;
}
EXPORT_SYMBOL_GPL(kvm_rdpmc);

950 951 952 953 954
/*
 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
 *
 * This list is modified at module load time to reflect the
955
 * capabilities of the host cpu. This capabilities test skips MSRs that are
956 957
 * kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs
 * may depend on host virtualization features rather than host cpu features.
958
 */
959

960 961
static u32 msrs_to_save[] = {
	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
B
Brian Gerst 已提交
962
	MSR_STAR,
963 964 965
#ifdef CONFIG_X86_64
	MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
966
	MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
967
	MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
968 969 970 971
};

static unsigned num_msrs_to_save;

972 973 974 975 976
static u32 emulated_msrs[] = {
	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
	HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
977 978
	HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
	HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
979
	HV_X64_MSR_RESET,
980
	HV_X64_MSR_VP_INDEX,
981
	HV_X64_MSR_VP_RUNTIME,
982
	HV_X64_MSR_SCONTROL,
A
Andrey Smetanin 已提交
983
	HV_X64_MSR_STIMER0_CONFIG,
984 985 986
	HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
	MSR_KVM_PV_EOI_EN,

W
Will Auld 已提交
987
	MSR_IA32_TSC_ADJUST,
988
	MSR_IA32_TSCDEADLINE,
989
	MSR_IA32_MISC_ENABLE,
990 991
	MSR_IA32_MCG_STATUS,
	MSR_IA32_MCG_CTL,
992
	MSR_IA32_MCG_EXT_CTL,
P
Paolo Bonzini 已提交
993
	MSR_IA32_SMBASE,
994 995
};

996 997
static unsigned num_emulated_msrs;

998
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
999
{
1000
	if (efer & efer_reserved_bits)
1001
		return false;
1002

A
Alexander Graf 已提交
1003 1004 1005 1006
	if (efer & EFER_FFXSR) {
		struct kvm_cpuid_entry2 *feat;

		feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1007
		if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
1008
			return false;
A
Alexander Graf 已提交
1009 1010
	}

1011 1012 1013 1014
	if (efer & EFER_SVME) {
		struct kvm_cpuid_entry2 *feat;

		feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
1015
		if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
1016
			return false;
1017 1018
	}

1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
	return true;
}
EXPORT_SYMBOL_GPL(kvm_valid_efer);

static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
	u64 old_efer = vcpu->arch.efer;

	if (!kvm_valid_efer(vcpu, efer))
		return 1;

	if (is_paging(vcpu)
	    && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
		return 1;

1034
	efer &= ~EFER_LMA;
1035
	efer |= vcpu->arch.efer & EFER_LMA;
1036

1037 1038
	kvm_x86_ops->set_efer(vcpu, efer);

1039 1040 1041 1042
	/* Update reserved bits */
	if ((efer ^ old_efer) & EFER_NX)
		kvm_mmu_reset_context(vcpu);

1043
	return 0;
1044 1045
}

1046 1047 1048 1049 1050 1051
void kvm_enable_efer_bits(u64 mask)
{
       efer_reserved_bits &= ~mask;
}
EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);

1052 1053 1054 1055 1056
/*
 * Writes msr value into into the appropriate "register".
 * Returns 0 on success, non-0 otherwise.
 * Assumes vcpu_load() was already called.
 */
1057
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1058
{
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
	switch (msr->index) {
	case MSR_FS_BASE:
	case MSR_GS_BASE:
	case MSR_KERNEL_GS_BASE:
	case MSR_CSTAR:
	case MSR_LSTAR:
		if (is_noncanonical_address(msr->data))
			return 1;
		break;
	case MSR_IA32_SYSENTER_EIP:
	case MSR_IA32_SYSENTER_ESP:
		/*
		 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
		 * non-canonical address is written on Intel but not on
		 * AMD (which ignores the top 32-bits, because it does
		 * not implement 64-bit SYSENTER).
		 *
		 * 64-bit code should hence be able to write a non-canonical
		 * value on AMD.  Making the address canonical ensures that
		 * vmentry does not fail on Intel after writing a non-canonical
		 * value, and that something deterministic happens if the guest
		 * invokes 64-bit SYSENTER.
		 */
		msr->data = get_canonical(msr->data);
	}
1084
	return kvm_x86_ops->set_msr(vcpu, msr);
1085
}
1086
EXPORT_SYMBOL_GPL(kvm_set_msr);
1087

1088 1089 1090
/*
 * Adapt set_msr() to msr_io()'s calling convention
 */
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
	struct msr_data msr;
	int r;

	msr.index = index;
	msr.host_initiated = true;
	r = kvm_get_msr(vcpu, &msr);
	if (r)
		return r;

	*data = msr.data;
	return 0;
}

1106 1107
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
1108 1109 1110 1111 1112 1113
	struct msr_data msr;

	msr.data = *data;
	msr.index = index;
	msr.host_initiated = true;
	return kvm_set_msr(vcpu, &msr);
1114 1115
}

1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
#ifdef CONFIG_X86_64
struct pvclock_gtod_data {
	seqcount_t	seq;

	struct { /* extract of a clocksource struct */
		int vclock_mode;
		cycle_t	cycle_last;
		cycle_t	mask;
		u32	mult;
		u32	shift;
	} clock;

1128 1129
	u64		boot_ns;
	u64		nsec_base;
1130 1131 1132 1133 1134 1135 1136
};

static struct pvclock_gtod_data pvclock_gtod_data;

static void update_pvclock_gtod(struct timekeeper *tk)
{
	struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
1137 1138
	u64 boot_ns;

1139
	boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
1140 1141 1142 1143

	write_seqcount_begin(&vdata->seq);

	/* copy pvclock gtod data */
1144 1145 1146 1147 1148
	vdata->clock.vclock_mode	= tk->tkr_mono.clock->archdata.vclock_mode;
	vdata->clock.cycle_last		= tk->tkr_mono.cycle_last;
	vdata->clock.mask		= tk->tkr_mono.mask;
	vdata->clock.mult		= tk->tkr_mono.mult;
	vdata->clock.shift		= tk->tkr_mono.shift;
1149

1150
	vdata->boot_ns			= boot_ns;
1151
	vdata->nsec_base		= tk->tkr_mono.xtime_nsec;
1152 1153 1154 1155 1156

	write_seqcount_end(&vdata->seq);
}
#endif

1157 1158 1159 1160 1161 1162 1163 1164 1165
void kvm_set_pending_timer(struct kvm_vcpu *vcpu)
{
	/*
	 * Note: KVM_REQ_PENDING_TIMER is implicitly checked in
	 * vcpu_enter_guest.  This function is only called from
	 * the physical CPU that is running vcpu.
	 */
	kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
}
1166

1167 1168
static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
{
1169 1170
	int version;
	int r;
1171
	struct pvclock_wall_clock wc;
A
Arnd Bergmann 已提交
1172
	struct timespec64 boot;
1173 1174 1175 1176

	if (!wall_clock)
		return;

1177 1178 1179 1180 1181 1182 1183 1184
	r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
	if (r)
		return;

	if (version & 1)
		++version;  /* first time write, random junk */

	++version;
1185

1186 1187
	if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
		return;
1188

1189 1190
	/*
	 * The guest calculates current wall clock time by adding
Z
Zachary Amsden 已提交
1191
	 * system time (updated by kvm_guest_time_update below) to the
1192 1193 1194
	 * wall clock specified here.  guest system time equals host
	 * system time for us, thus we must fill in host boot time here.
	 */
A
Arnd Bergmann 已提交
1195
	getboottime64(&boot);
1196

1197
	if (kvm->arch.kvmclock_offset) {
A
Arnd Bergmann 已提交
1198 1199
		struct timespec64 ts = ns_to_timespec64(kvm->arch.kvmclock_offset);
		boot = timespec64_sub(boot, ts);
1200
	}
A
Arnd Bergmann 已提交
1201
	wc.sec = (u32)boot.tv_sec; /* overflow in 2106 guest time */
1202 1203
	wc.nsec = boot.tv_nsec;
	wc.version = version;
1204 1205 1206 1207 1208 1209 1210

	kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));

	version++;
	kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
}

1211 1212
static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
{
1213 1214
	do_shl32_div32(dividend, divisor);
	return dividend;
1215 1216
}

1217
static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
1218
			       s8 *pshift, u32 *pmultiplier)
1219
{
1220
	uint64_t scaled64;
1221 1222 1223 1224
	int32_t  shift = 0;
	uint64_t tps64;
	uint32_t tps32;

1225 1226
	tps64 = base_hz;
	scaled64 = scaled_hz;
1227
	while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
1228 1229 1230 1231 1232
		tps64 >>= 1;
		shift--;
	}

	tps32 = (uint32_t)tps64;
1233 1234
	while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
		if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
1235 1236 1237
			scaled64 >>= 1;
		else
			tps32 <<= 1;
1238 1239 1240
		shift++;
	}

1241 1242
	*pshift = shift;
	*pmultiplier = div_frac(scaled64, tps32);
1243

1244 1245
	pr_debug("%s: base_hz %llu => %llu, shift %d, mul %u\n",
		 __func__, base_hz, scaled_hz, shift, *pmultiplier);
1246 1247
}

1248
#ifdef CONFIG_X86_64
1249
static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
1250
#endif
1251

1252
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
1253
static unsigned long max_tsc_khz;
1254

1255
static u32 adjust_tsc_khz(u32 khz, s32 ppm)
1256
{
1257 1258 1259
	u64 v = (u64)khz * (1000000 + ppm);
	do_div(v, 1000000);
	return v;
1260 1261
}

1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
{
	u64 ratio;

	/* Guest TSC same frequency as host TSC? */
	if (!scale) {
		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
		return 0;
	}

	/* TSC scaling supported? */
	if (!kvm_has_tsc_control) {
		if (user_tsc_khz > tsc_khz) {
			vcpu->arch.tsc_catchup = 1;
			vcpu->arch.tsc_always_catchup = 1;
			return 0;
		} else {
			WARN(1, "user requested TSC rate below hardware speed\n");
			return -1;
		}
	}

	/* TSC scaling required  - calculate ratio */
	ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits,
				user_tsc_khz, tsc_khz);

	if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
		WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
			  user_tsc_khz);
		return -1;
	}

	vcpu->arch.tsc_scaling_ratio = ratio;
	return 0;
}

1298
static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
1299
{
1300 1301
	u32 thresh_lo, thresh_hi;
	int use_scaling = 0;
1302

1303
	/* tsc_khz can be zero if TSC calibration fails */
1304
	if (user_tsc_khz == 0) {
1305 1306
		/* set tsc_scaling_ratio to a safe value */
		vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
1307
		return -1;
1308
	}
1309

Z
Zachary Amsden 已提交
1310
	/* Compute a scale to convert nanoseconds in TSC cycles */
1311
	kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
1312 1313
			   &vcpu->arch.virtual_tsc_shift,
			   &vcpu->arch.virtual_tsc_mult);
1314
	vcpu->arch.virtual_tsc_khz = user_tsc_khz;
1315 1316 1317 1318 1319 1320 1321 1322 1323

	/*
	 * Compute the variation in TSC rate which is acceptable
	 * within the range of tolerance and decide if the
	 * rate being applied is within that bounds of the hardware
	 * rate.  If so, no scaling or compensation need be done.
	 */
	thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
	thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
1324 1325
	if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
		pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi);
1326 1327
		use_scaling = 1;
	}
1328
	return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
Z
Zachary Amsden 已提交
1329 1330 1331 1332
}

static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
{
1333
	u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
1334 1335
				      vcpu->arch.virtual_tsc_mult,
				      vcpu->arch.virtual_tsc_shift);
1336
	tsc += vcpu->arch.this_tsc_write;
Z
Zachary Amsden 已提交
1337 1338 1339
	return tsc;
}

1340
static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
1341 1342 1343 1344 1345 1346 1347 1348 1349
{
#ifdef CONFIG_X86_64
	bool vcpus_matched;
	struct kvm_arch *ka = &vcpu->kvm->arch;
	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;

	vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
			 atomic_read(&vcpu->kvm->online_vcpus));

1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
	/*
	 * Once the masterclock is enabled, always perform request in
	 * order to update it.
	 *
	 * In order to enable masterclock, the host clocksource must be TSC
	 * and the vcpus need to have matched TSCs.  When that happens,
	 * perform request to enable masterclock.
	 */
	if (ka->use_master_clock ||
	    (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
1360 1361 1362 1363 1364 1365 1366 1367
		kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);

	trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
			    atomic_read(&vcpu->kvm->online_vcpus),
		            ka->use_master_clock, gtod->clock.vclock_mode);
#endif
}

W
Will Auld 已提交
1368 1369
static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
{
1370
	u64 curr_offset = vcpu->arch.tsc_offset;
W
Will Auld 已提交
1371 1372 1373
	vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
}

1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
/*
 * Multiply tsc by a fixed point number represented by ratio.
 *
 * The most significant 64-N bits (mult) of ratio represent the
 * integral part of the fixed point number; the remaining N bits
 * (frac) represent the fractional part, ie. ratio represents a fixed
 * point number (mult + frac * 2^(-N)).
 *
 * N equals to kvm_tsc_scaling_ratio_frac_bits.
 */
static inline u64 __scale_tsc(u64 ratio, u64 tsc)
{
	return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
}

u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
{
	u64 _tsc = tsc;
	u64 ratio = vcpu->arch.tsc_scaling_ratio;

	if (ratio != kvm_default_tsc_scaling_ratio)
		_tsc = __scale_tsc(ratio, tsc);

	return _tsc;
}
EXPORT_SYMBOL_GPL(kvm_scale_tsc);

1401 1402 1403 1404 1405 1406 1407 1408 1409
static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
	u64 tsc;

	tsc = kvm_scale_tsc(vcpu, rdtsc());

	return target_tsc - tsc;
}

1410 1411
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
1412
	return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
1413 1414 1415
}
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);

1416 1417 1418 1419 1420 1421
static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
	kvm_x86_ops->write_tsc_offset(vcpu, offset);
	vcpu->arch.tsc_offset = offset;
}

1422
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
1423 1424
{
	struct kvm *kvm = vcpu->kvm;
Z
Zachary Amsden 已提交
1425
	u64 offset, ns, elapsed;
1426
	unsigned long flags;
1427
	s64 usdiff;
1428
	bool matched;
T
Tomasz Grabiec 已提交
1429
	bool already_matched;
1430
	u64 data = msr->data;
1431

1432
	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1433
	offset = kvm_compute_tsc_offset(vcpu, data);
1434
	ns = ktime_get_boot_ns();
Z
Zachary Amsden 已提交
1435
	elapsed = ns - kvm->arch.last_tsc_nsec;
1436

1437
	if (vcpu->arch.virtual_tsc_khz) {
1438 1439
		int faulted = 0;

1440 1441
		/* n.b - signed multiplication and division required */
		usdiff = data - kvm->arch.last_tsc_write;
1442
#ifdef CONFIG_X86_64
1443
		usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
1444
#else
1445
		/* do_div() only does unsigned */
1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
		asm("1: idivl %[divisor]\n"
		    "2: xor %%edx, %%edx\n"
		    "   movl $0, %[faulted]\n"
		    "3:\n"
		    ".section .fixup,\"ax\"\n"
		    "4: movl $1, %[faulted]\n"
		    "   jmp  3b\n"
		    ".previous\n"

		_ASM_EXTABLE(1b, 4b)

		: "=A"(usdiff), [faulted] "=r" (faulted)
		: "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz));

1460
#endif
1461 1462 1463 1464
		do_div(elapsed, 1000);
		usdiff -= elapsed;
		if (usdiff < 0)
			usdiff = -usdiff;
1465 1466 1467 1468

		/* idivl overflow => difference is larger than USEC_PER_SEC */
		if (faulted)
			usdiff = USEC_PER_SEC;
1469 1470
	} else
		usdiff = USEC_PER_SEC; /* disable TSC match window below */
Z
Zachary Amsden 已提交
1471 1472

	/*
1473 1474 1475 1476 1477 1478 1479 1480 1481
	 * Special case: TSC write with a small delta (1 second) of virtual
	 * cycle time against real time is interpreted as an attempt to
	 * synchronize the CPU.
         *
	 * For a reliable TSC, we can match TSC offsets, and for an unstable
	 * TSC, we add elapsed time in this computation.  We could let the
	 * compensation code attempt to catch up if we fall behind, but
	 * it's better to try to match offsets from the beginning.
         */
1482
	if (usdiff < USEC_PER_SEC &&
1483
	    vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
Z
Zachary Amsden 已提交
1484
		if (!check_tsc_unstable()) {
1485
			offset = kvm->arch.cur_tsc_offset;
Z
Zachary Amsden 已提交
1486 1487
			pr_debug("kvm: matched tsc offset for %llu\n", data);
		} else {
1488
			u64 delta = nsec_to_cycles(vcpu, elapsed);
1489
			data += delta;
1490
			offset = kvm_compute_tsc_offset(vcpu, data);
1491
			pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
Z
Zachary Amsden 已提交
1492
		}
1493
		matched = true;
T
Tomasz Grabiec 已提交
1494
		already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
1495 1496 1497 1498 1499 1500
	} else {
		/*
		 * We split periods of matched TSC writes into generations.
		 * For each generation, we track the original measured
		 * nanosecond time, offset, and write, so if TSCs are in
		 * sync, we can match exact offset, and if not, we can match
G
Guo Chao 已提交
1501
		 * exact software computation in compute_guest_tsc()
1502 1503 1504 1505 1506 1507 1508
		 *
		 * These values are tracked in kvm->arch.cur_xxx variables.
		 */
		kvm->arch.cur_tsc_generation++;
		kvm->arch.cur_tsc_nsec = ns;
		kvm->arch.cur_tsc_write = data;
		kvm->arch.cur_tsc_offset = offset;
1509
		matched = false;
T
Tomasz Grabiec 已提交
1510
		pr_debug("kvm: new tsc generation %llu, clock %llu\n",
1511
			 kvm->arch.cur_tsc_generation, data);
Z
Zachary Amsden 已提交
1512
	}
1513 1514 1515 1516 1517

	/*
	 * We also track th most recent recorded KHZ, write and time to
	 * allow the matching interval to be extended at each write.
	 */
Z
Zachary Amsden 已提交
1518 1519
	kvm->arch.last_tsc_nsec = ns;
	kvm->arch.last_tsc_write = data;
1520
	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
1521

1522
	vcpu->arch.last_guest_tsc = data;
1523 1524 1525 1526 1527 1528

	/* Keep track of which generation this VCPU has synchronized to */
	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;

W
Will Auld 已提交
1529 1530
	if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
		update_ia32_tsc_adjust_msr(vcpu, offset);
1531
	kvm_vcpu_write_tsc_offset(vcpu, offset);
1532
	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1533 1534

	spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
T
Tomasz Grabiec 已提交
1535
	if (!matched) {
1536
		kvm->arch.nr_vcpus_matched_tsc = 0;
T
Tomasz Grabiec 已提交
1537 1538 1539
	} else if (!already_matched) {
		kvm->arch.nr_vcpus_matched_tsc++;
	}
1540 1541 1542

	kvm_track_tsc_matching(vcpu);
	spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
1543
}
1544

1545 1546
EXPORT_SYMBOL_GPL(kvm_write_tsc);

1547 1548 1549
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
					   s64 adjustment)
{
1550
	kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
1551 1552 1553 1554 1555 1556 1557
}

static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{
	if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
		WARN_ON(adjustment < 0);
	adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
1558
	adjust_tsc_offset_guest(vcpu, adjustment);
1559 1560
}

1561 1562 1563 1564
#ifdef CONFIG_X86_64

static cycle_t read_tsc(void)
{
1565 1566
	cycle_t ret = (cycle_t)rdtsc_ordered();
	u64 last = pvclock_gtod_data.clock.cycle_last;
1567 1568 1569 1570 1571 1572

	if (likely(ret >= last))
		return ret;

	/*
	 * GCC likes to generate cmov here, but this branch is extremely
1573
	 * predictable (it's just a function of time and the likely is
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
	 * very likely) and there's a data dependence, so force GCC
	 * to generate a branch instead.  I don't barrier() because
	 * we don't actually need a barrier, and if this function
	 * ever gets inlined it will generate worse code.
	 */
	asm volatile ("");
	return last;
}

static inline u64 vgettsc(cycle_t *cycle_now)
{
	long v;
	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;

	*cycle_now = read_tsc();

	v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask;
	return v * gtod->clock.mult;
}

1594
static int do_monotonic_boot(s64 *t, cycle_t *cycle_now)
1595
{
1596
	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
1597 1598
	unsigned long seq;
	int mode;
1599
	u64 ns;
1600 1601 1602 1603

	do {
		seq = read_seqcount_begin(&gtod->seq);
		mode = gtod->clock.vclock_mode;
1604
		ns = gtod->nsec_base;
1605 1606
		ns += vgettsc(cycle_now);
		ns >>= gtod->clock.shift;
1607
		ns += gtod->boot_ns;
1608
	} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
1609
	*t = ns;
1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620

	return mode;
}

/* returns true if host is using tsc clocksource */
static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now)
{
	/* checked again under seqlock below */
	if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
		return false;

1621
	return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC;
1622 1623 1624 1625 1626
}
#endif

/*
 *
1627 1628 1629
 * Assuming a stable TSC across physical CPUS, and a stable TSC
 * across virtual CPUs, the following condition is possible.
 * Each numbered line represents an event visible to both
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
 * CPUs at the next numbered event.
 *
 * "timespecX" represents host monotonic time. "tscX" represents
 * RDTSC value.
 *
 * 		VCPU0 on CPU0		|	VCPU1 on CPU1
 *
 * 1.  read timespec0,tsc0
 * 2.					| timespec1 = timespec0 + N
 * 					| tsc1 = tsc0 + M
 * 3. transition to guest		| transition to guest
 * 4. ret0 = timespec0 + (rdtsc - tsc0) |
 * 5.				        | ret1 = timespec1 + (rdtsc - tsc1)
 * 				        | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
 *
 * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
 *
 * 	- ret0 < ret1
 *	- timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
 *		...
 *	- 0 < N - M => M < N
 *
 * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
 * always the case (the difference between two distinct xtime instances
 * might be smaller then the difference between corresponding TSC reads,
 * when updating guest vcpus pvclock areas).
 *
 * To avoid that problem, do not allow visibility of distinct
 * system_timestamp/tsc_timestamp values simultaneously: use a master
 * copy of host monotonic time values. Update that master copy
 * in lockstep.
 *
1662
 * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
1663 1664 1665 1666 1667 1668 1669 1670
 *
 */

static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
{
#ifdef CONFIG_X86_64
	struct kvm_arch *ka = &kvm->arch;
	int vclock_mode;
1671 1672 1673 1674
	bool host_tsc_clocksource, vcpus_matched;

	vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
			atomic_read(&kvm->online_vcpus));
1675 1676 1677 1678 1679

	/*
	 * If the host uses TSC clock, then passthrough TSC as stable
	 * to the guest.
	 */
1680
	host_tsc_clocksource = kvm_get_time_and_clockread(
1681 1682 1683
					&ka->master_kernel_ns,
					&ka->master_cycle_now);

1684
	ka->use_master_clock = host_tsc_clocksource && vcpus_matched
1685 1686
				&& !backwards_tsc_observed
				&& !ka->boot_vcpu_runs_old_kvmclock;
1687

1688 1689 1690 1691
	if (ka->use_master_clock)
		atomic_set(&kvm_guest_has_master_clock, 1);

	vclock_mode = pvclock_gtod_data.clock.vclock_mode;
1692 1693
	trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
					vcpus_matched);
1694 1695 1696
#endif
}

1697 1698 1699 1700 1701
void kvm_make_mclock_inprogress_request(struct kvm *kvm)
{
	kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
}

1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
static void kvm_gen_update_masterclock(struct kvm *kvm)
{
#ifdef CONFIG_X86_64
	int i;
	struct kvm_vcpu *vcpu;
	struct kvm_arch *ka = &kvm->arch;

	spin_lock(&ka->pvclock_gtod_sync_lock);
	kvm_make_mclock_inprogress_request(kvm);
	/* no guest entries from this point */
	pvclock_update_vm_gtod_copy(kvm);

	kvm_for_each_vcpu(i, vcpu, kvm)
1715
		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1716 1717 1718 1719 1720 1721 1722 1723 1724

	/* guest entries allowed */
	kvm_for_each_vcpu(i, vcpu, kvm)
		clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests);

	spin_unlock(&ka->pvclock_gtod_sync_lock);
#endif
}

1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752
static u64 __get_kvmclock_ns(struct kvm *kvm)
{
	struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0);
	struct kvm_arch *ka = &kvm->arch;
	s64 ns;

	if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) {
		u64 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
		ns = __pvclock_read_cycles(&vcpu->arch.hv_clock, tsc);
	} else {
		ns = ktime_get_boot_ns() + ka->kvmclock_offset;
	}

	return ns;
}

u64 get_kvmclock_ns(struct kvm *kvm)
{
	unsigned long flags;
	s64 ns;

	local_irq_save(flags);
	ns = __get_kvmclock_ns(kvm);
	local_irq_restore(flags);

	return ns;
}

1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806
static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
{
	struct kvm_vcpu_arch *vcpu = &v->arch;
	struct pvclock_vcpu_time_info guest_hv_clock;

	if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
		&guest_hv_clock, sizeof(guest_hv_clock))))
		return;

	/* This VCPU is paused, but it's legal for a guest to read another
	 * VCPU's kvmclock, so we really have to follow the specification where
	 * it says that version is odd if data is being modified, and even after
	 * it is consistent.
	 *
	 * Version field updates must be kept separate.  This is because
	 * kvm_write_guest_cached might use a "rep movs" instruction, and
	 * writes within a string instruction are weakly ordered.  So there
	 * are three writes overall.
	 *
	 * As a small optimization, only write the version field in the first
	 * and third write.  The vcpu->pv_time cache is still valid, because the
	 * version field is the first in the struct.
	 */
	BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);

	vcpu->hv_clock.version = guest_hv_clock.version + 1;
	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
				&vcpu->hv_clock,
				sizeof(vcpu->hv_clock.version));

	smp_wmb();

	/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
	vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);

	if (vcpu->pvclock_set_guest_stopped_request) {
		vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
		vcpu->pvclock_set_guest_stopped_request = false;
	}

	trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);

	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
				&vcpu->hv_clock,
				sizeof(vcpu->hv_clock));

	smp_wmb();

	vcpu->hv_clock.version++;
	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
				&vcpu->hv_clock,
				sizeof(vcpu->hv_clock.version));
}

Z
Zachary Amsden 已提交
1807
static int kvm_guest_time_update(struct kvm_vcpu *v)
1808
{
1809
	unsigned long flags, tgt_tsc_khz;
1810
	struct kvm_vcpu_arch *vcpu = &v->arch;
1811
	struct kvm_arch *ka = &v->kvm->arch;
1812
	s64 kernel_ns;
1813
	u64 tsc_timestamp, host_tsc;
1814
	u8 pvclock_flags;
1815 1816 1817 1818
	bool use_master_clock;

	kernel_ns = 0;
	host_tsc = 0;
1819

1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830
	/*
	 * If the host uses TSC clock, then passthrough TSC as stable
	 * to the guest.
	 */
	spin_lock(&ka->pvclock_gtod_sync_lock);
	use_master_clock = ka->use_master_clock;
	if (use_master_clock) {
		host_tsc = ka->master_cycle_now;
		kernel_ns = ka->master_kernel_ns;
	}
	spin_unlock(&ka->pvclock_gtod_sync_lock);
1831 1832 1833

	/* Keep irq disabled to prevent changes to the clock */
	local_irq_save(flags);
1834 1835
	tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz);
	if (unlikely(tgt_tsc_khz == 0)) {
1836 1837 1838 1839
		local_irq_restore(flags);
		kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
		return 1;
	}
1840
	if (!use_master_clock) {
1841
		host_tsc = rdtsc();
1842
		kernel_ns = ktime_get_boot_ns();
1843 1844
	}

1845
	tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
1846

Z
Zachary Amsden 已提交
1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859
	/*
	 * We may have to catch up the TSC to match elapsed wall clock
	 * time for two reasons, even if kvmclock is used.
	 *   1) CPU could have been running below the maximum TSC rate
	 *   2) Broken TSC compensation resets the base at each VCPU
	 *      entry to avoid unknown leaps of TSC even when running
	 *      again on the same CPU.  This may cause apparent elapsed
	 *      time to disappear, and the guest to stand still or run
	 *	very slowly.
	 */
	if (vcpu->tsc_catchup) {
		u64 tsc = compute_guest_tsc(v, kernel_ns);
		if (tsc > tsc_timestamp) {
1860
			adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
Z
Zachary Amsden 已提交
1861 1862
			tsc_timestamp = tsc;
		}
1863 1864
	}

1865 1866
	local_irq_restore(flags);

1867
	/* With all the info we got, fill in the values */
1868

1869 1870 1871 1872
	if (kvm_has_tsc_control)
		tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz);

	if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
1873
		kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
1874 1875
				   &vcpu->hv_clock.tsc_shift,
				   &vcpu->hv_clock.tsc_to_system_mul);
1876
		vcpu->hw_tsc_khz = tgt_tsc_khz;
1877 1878
	}

1879
	vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
1880
	vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
Z
Zachary Amsden 已提交
1881
	vcpu->last_guest_tsc = tsc_timestamp;
1882

1883
	/* If the host uses TSC clocksource, then it is stable */
1884
	pvclock_flags = 0;
1885 1886 1887
	if (use_master_clock)
		pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;

1888 1889
	vcpu->hv_clock.flags = pvclock_flags;

P
Paolo Bonzini 已提交
1890 1891 1892 1893
	if (vcpu->pv_time_enabled)
		kvm_setup_pvclock_page(v);
	if (v == kvm_get_vcpu(v->kvm, 0))
		kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
1894
	return 0;
1895 1896
}

1897 1898 1899 1900 1901 1902 1903 1904
/*
 * kvmclock updates which are isolated to a given vcpu, such as
 * vcpu->cpu migration, should not allow system_timestamp from
 * the rest of the vcpus to remain static. Otherwise ntp frequency
 * correction applies to one vcpu's system_timestamp but not
 * the others.
 *
 * So in those cases, request a kvmclock update for all vcpus.
1905 1906 1907 1908
 * We need to rate-limit these requests though, as they can
 * considerably slow guests that have a large number of vcpus.
 * The time for a remote vcpu to update its kvmclock is bound
 * by the delay we use to rate-limit the updates.
1909 1910
 */

1911 1912 1913
#define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)

static void kvmclock_update_fn(struct work_struct *work)
1914 1915
{
	int i;
1916 1917 1918 1919
	struct delayed_work *dwork = to_delayed_work(work);
	struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
					   kvmclock_update_work);
	struct kvm *kvm = container_of(ka, struct kvm, arch);
1920 1921 1922
	struct kvm_vcpu *vcpu;

	kvm_for_each_vcpu(i, vcpu, kvm) {
1923
		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1924 1925 1926 1927
		kvm_vcpu_kick(vcpu);
	}
}

1928 1929 1930 1931
static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
{
	struct kvm *kvm = v->kvm;

1932
	kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1933 1934 1935 1936
	schedule_delayed_work(&kvm->arch.kvmclock_update_work,
					KVMCLOCK_UPDATE_DELAY);
}

1937 1938 1939 1940 1941 1942 1943 1944 1945
#define KVMCLOCK_SYNC_PERIOD (300 * HZ)

static void kvmclock_sync_fn(struct work_struct *work)
{
	struct delayed_work *dwork = to_delayed_work(work);
	struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
					   kvmclock_sync_work);
	struct kvm *kvm = container_of(ka, struct kvm, arch);

1946 1947 1948
	if (!kvmclock_periodic_sync)
		return;

1949 1950 1951 1952 1953
	schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
	schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
					KVMCLOCK_SYNC_PERIOD);
}

H
Huang Ying 已提交
1954
static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1955
{
H
Huang Ying 已提交
1956 1957 1958
	u64 mcg_cap = vcpu->arch.mcg_cap;
	unsigned bank_num = mcg_cap & 0xff;

1959 1960
	switch (msr) {
	case MSR_IA32_MCG_STATUS:
H
Huang Ying 已提交
1961
		vcpu->arch.mcg_status = data;
1962
		break;
1963
	case MSR_IA32_MCG_CTL:
H
Huang Ying 已提交
1964 1965 1966 1967 1968 1969 1970 1971
		if (!(mcg_cap & MCG_CTL_P))
			return 1;
		if (data != 0 && data != ~(u64)0)
			return -1;
		vcpu->arch.mcg_ctl = data;
		break;
	default:
		if (msr >= MSR_IA32_MC0_CTL &&
1972
		    msr < MSR_IA32_MCx_CTL(bank_num)) {
H
Huang Ying 已提交
1973
			u32 offset = msr - MSR_IA32_MC0_CTL;
1974 1975 1976 1977 1978
			/* only 0 or all 1s can be written to IA32_MCi_CTL
			 * some Linux kernels though clear bit 10 in bank 4 to
			 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
			 * this to avoid an uncatched #GP in the guest
			 */
H
Huang Ying 已提交
1979
			if ((offset & 0x3) == 0 &&
1980
			    data != 0 && (data | (1 << 10)) != ~(u64)0)
H
Huang Ying 已提交
1981 1982 1983 1984 1985 1986 1987 1988 1989
				return -1;
			vcpu->arch.mce_banks[offset] = data;
			break;
		}
		return 1;
	}
	return 0;
}

E
Ed Swierk 已提交
1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006
static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
	struct kvm *kvm = vcpu->kvm;
	int lm = is_long_mode(vcpu);
	u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
		: (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
	u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
		: kvm->arch.xen_hvm_config.blob_size_32;
	u32 page_num = data & ~PAGE_MASK;
	u64 page_addr = data & PAGE_MASK;
	u8 *page;
	int r;

	r = -E2BIG;
	if (page_num >= blob_size)
		goto out;
	r = -ENOMEM;
2007 2008 2009
	page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
	if (IS_ERR(page)) {
		r = PTR_ERR(page);
E
Ed Swierk 已提交
2010
		goto out;
2011
	}
2012
	if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE))
E
Ed Swierk 已提交
2013 2014 2015 2016 2017 2018 2019 2020
		goto out_free;
	r = 0;
out_free:
	kfree(page);
out:
	return r;
}

2021 2022 2023 2024
static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{
	gpa_t gpa = data & ~0x3f;

G
Guo Chao 已提交
2025
	/* Bits 2:5 are reserved, Should be zero */
2026
	if (data & 0x3c)
2027 2028 2029 2030 2031 2032 2033 2034 2035 2036
		return 1;

	vcpu->arch.apf.msr_val = data;

	if (!(data & KVM_ASYNC_PF_ENABLED)) {
		kvm_clear_async_pf_completion_queue(vcpu);
		kvm_async_pf_hash_reset(vcpu);
		return 0;
	}

2037 2038
	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
					sizeof(u32)))
2039 2040
		return 1;

2041
	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
2042 2043 2044 2045
	kvm_async_pf_wakeup_all(vcpu);
	return 0;
}

2046 2047
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
2048
	vcpu->arch.pv_time_enabled = false;
2049 2050
}

G
Glauber Costa 已提交
2051 2052 2053 2054 2055 2056 2057 2058 2059
static void record_steal_time(struct kvm_vcpu *vcpu)
{
	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
		return;

	if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
		return;

W
Wanpeng Li 已提交
2060 2061 2062 2063 2064 2065 2066 2067 2068 2069
	if (vcpu->arch.st.steal.version & 1)
		vcpu->arch.st.steal.version += 1;  /* first time write, random junk */

	vcpu->arch.st.steal.version += 1;

	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));

	smp_wmb();

2070 2071 2072
	vcpu->arch.st.steal.steal += current->sched_info.run_delay -
		vcpu->arch.st.last_steal;
	vcpu->arch.st.last_steal = current->sched_info.run_delay;
W
Wanpeng Li 已提交
2073 2074 2075 2076 2077 2078 2079

	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));

	smp_wmb();

	vcpu->arch.st.steal.version += 1;
G
Glauber Costa 已提交
2080 2081 2082 2083 2084

	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
}

2085
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2086
{
2087
	bool pr = false;
2088 2089
	u32 msr = msr_info->index;
	u64 data = msr_info->data;
2090

2091
	switch (msr) {
2092 2093 2094 2095 2096 2097 2098 2099
	case MSR_AMD64_NB_CFG:
	case MSR_IA32_UCODE_REV:
	case MSR_IA32_UCODE_WRITE:
	case MSR_VM_HSAVE_PA:
	case MSR_AMD64_PATCH_LOADER:
	case MSR_AMD64_BU_CFG2:
		break;

2100
	case MSR_EFER:
2101
		return set_efer(vcpu, data);
2102 2103
	case MSR_K7_HWCR:
		data &= ~(u64)0x40;	/* ignore flush filter disable */
2104
		data &= ~(u64)0x100;	/* ignore ignne emulation enable */
2105
		data &= ~(u64)0x8;	/* ignore TLB cache disable */
2106
		data &= ~(u64)0x40000;  /* ignore Mc status write enable */
2107
		if (data != 0) {
2108 2109
			vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
				    data);
2110 2111
			return 1;
		}
2112
		break;
2113 2114
	case MSR_FAM10H_MMIO_CONF_BASE:
		if (data != 0) {
2115 2116
			vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
				    "0x%llx\n", data);
2117 2118
			return 1;
		}
2119
		break;
2120 2121 2122 2123 2124 2125 2126 2127 2128
	case MSR_IA32_DEBUGCTLMSR:
		if (!data) {
			/* We support the non-activated case already */
			break;
		} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
			/* Values other than LBR and BTF are vendor-specific,
			   thus reserved and should throw a #GP */
			return 1;
		}
2129 2130
		vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
			    __func__, data);
2131
		break;
A
Avi Kivity 已提交
2132
	case 0x200 ... 0x2ff:
2133
		return kvm_mtrr_set_msr(vcpu, msr, data);
2134
	case MSR_IA32_APICBASE:
2135
		return kvm_set_apic_base(vcpu, msr_info);
G
Gleb Natapov 已提交
2136 2137
	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
		return kvm_x2apic_msr_write(vcpu, msr, data);
2138 2139 2140
	case MSR_IA32_TSCDEADLINE:
		kvm_set_lapic_tscdeadline_msr(vcpu, data);
		break;
W
Will Auld 已提交
2141 2142 2143
	case MSR_IA32_TSC_ADJUST:
		if (guest_cpuid_has_tsc_adjust(vcpu)) {
			if (!msr_info->host_initiated) {
2144
				s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
2145
				adjust_tsc_offset_guest(vcpu, adj);
W
Will Auld 已提交
2146 2147 2148 2149
			}
			vcpu->arch.ia32_tsc_adjust_msr = data;
		}
		break;
2150
	case MSR_IA32_MISC_ENABLE:
2151
		vcpu->arch.ia32_misc_enable_msr = data;
2152
		break;
P
Paolo Bonzini 已提交
2153 2154 2155 2156 2157
	case MSR_IA32_SMBASE:
		if (!msr_info->host_initiated)
			return 1;
		vcpu->arch.smbase = data;
		break;
2158
	case MSR_KVM_WALL_CLOCK_NEW:
2159 2160 2161 2162
	case MSR_KVM_WALL_CLOCK:
		vcpu->kvm->arch.wall_clock = data;
		kvm_write_wall_clock(vcpu->kvm, data);
		break;
2163
	case MSR_KVM_SYSTEM_TIME_NEW:
2164
	case MSR_KVM_SYSTEM_TIME: {
2165
		u64 gpa_offset;
2166 2167
		struct kvm_arch *ka = &vcpu->kvm->arch;

2168
		kvmclock_reset(vcpu);
2169

2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
		if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
			bool tmp = (msr == MSR_KVM_SYSTEM_TIME);

			if (ka->boot_vcpu_runs_old_kvmclock != tmp)
				set_bit(KVM_REQ_MASTERCLOCK_UPDATE,
					&vcpu->requests);

			ka->boot_vcpu_runs_old_kvmclock = tmp;
		}

2180
		vcpu->arch.time = data;
2181
		kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2182 2183 2184 2185 2186

		/* we verify if the enable bit is set... */
		if (!(data & 1))
			break;

2187
		gpa_offset = data & ~(PAGE_MASK | 1);
2188

2189
		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2190 2191
		     &vcpu->arch.pv_time, data & ~1ULL,
		     sizeof(struct pvclock_vcpu_time_info)))
2192 2193 2194
			vcpu->arch.pv_time_enabled = false;
		else
			vcpu->arch.pv_time_enabled = true;
2195

2196 2197
		break;
	}
2198 2199 2200 2201
	case MSR_KVM_ASYNC_PF_EN:
		if (kvm_pv_enable_async_pf(vcpu, data))
			return 1;
		break;
G
Glauber Costa 已提交
2202 2203 2204 2205 2206 2207 2208 2209 2210
	case MSR_KVM_STEAL_TIME:

		if (unlikely(!sched_info_on()))
			return 1;

		if (data & KVM_STEAL_RESERVED_MASK)
			return 1;

		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
2211 2212
						data & KVM_STEAL_VALID_BITS,
						sizeof(struct kvm_steal_time)))
G
Glauber Costa 已提交
2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
			return 1;

		vcpu->arch.st.msr_val = data;

		if (!(data & KVM_MSR_ENABLED))
			break;

		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);

		break;
2223 2224 2225 2226
	case MSR_KVM_PV_EOI_EN:
		if (kvm_lapic_enable_pv_eoi(vcpu, data))
			return 1;
		break;
G
Glauber Costa 已提交
2227

H
Huang Ying 已提交
2228 2229
	case MSR_IA32_MCG_CTL:
	case MSR_IA32_MCG_STATUS:
2230
	case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
H
Huang Ying 已提交
2231
		return set_msr_mce(vcpu, msr, data);
2232

2233 2234 2235 2236 2237
	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
	case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
		pr = true; /* fall through */
	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
	case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
2238
		if (kvm_pmu_is_valid_msr(vcpu, msr))
2239
			return kvm_pmu_set_msr(vcpu, msr_info);
2240 2241

		if (pr || data != 0)
2242 2243
			vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
				    "0x%x data 0x%llx\n", msr, data);
2244
		break;
2245 2246 2247 2248 2249
	case MSR_K7_CLK_CTL:
		/*
		 * Ignore all writes to this no longer documented MSR.
		 * Writes are only relevant for old K7 processors,
		 * all pre-dating SVM, but a recommended workaround from
G
Guo Chao 已提交
2250
		 * AMD for these chips. It is possible to specify the
2251 2252 2253 2254
		 * affected processor models on the command line, hence
		 * the need to ignore the workaround.
		 */
		break;
2255
	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2256 2257
	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
	case HV_X64_MSR_CRASH_CTL:
A
Andrey Smetanin 已提交
2258
	case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
2259 2260
		return kvm_hv_set_msr_common(vcpu, msr, data,
					     msr_info->host_initiated);
2261 2262 2263 2264
	case MSR_IA32_BBL_CR_CTL3:
		/* Drop writes to this legacy MSR -- see rdmsr
		 * counterpart for further detail.
		 */
2265
		vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
2266
		break;
2267 2268 2269 2270 2271 2272 2273 2274 2275 2276
	case MSR_AMD64_OSVW_ID_LENGTH:
		if (!guest_cpuid_has_osvw(vcpu))
			return 1;
		vcpu->arch.osvw.length = data;
		break;
	case MSR_AMD64_OSVW_STATUS:
		if (!guest_cpuid_has_osvw(vcpu))
			return 1;
		vcpu->arch.osvw.status = data;
		break;
2277
	default:
E
Ed Swierk 已提交
2278 2279
		if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
			return xen_hvm_config(vcpu, data);
2280
		if (kvm_pmu_is_valid_msr(vcpu, msr))
2281
			return kvm_pmu_set_msr(vcpu, msr_info);
2282
		if (!ignore_msrs) {
2283
			vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
2284
				    msr, data);
2285 2286
			return 1;
		} else {
2287
			vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
2288
				    msr, data);
2289 2290
			break;
		}
2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_msr_common);


/*
 * Reads an msr value (of 'msr_index') into 'pdata'.
 * Returns 0 on success, non-0 otherwise.
 * Assumes vcpu_load() was already called.
 */
2302
int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2303
{
2304
	return kvm_x86_ops->get_msr(vcpu, msr);
2305
}
2306
EXPORT_SYMBOL_GPL(kvm_get_msr);
2307

H
Huang Ying 已提交
2308
static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2309 2310
{
	u64 data;
H
Huang Ying 已提交
2311 2312
	u64 mcg_cap = vcpu->arch.mcg_cap;
	unsigned bank_num = mcg_cap & 0xff;
2313 2314 2315 2316

	switch (msr) {
	case MSR_IA32_P5_MC_ADDR:
	case MSR_IA32_P5_MC_TYPE:
H
Huang Ying 已提交
2317 2318
		data = 0;
		break;
2319
	case MSR_IA32_MCG_CAP:
H
Huang Ying 已提交
2320 2321
		data = vcpu->arch.mcg_cap;
		break;
2322
	case MSR_IA32_MCG_CTL:
H
Huang Ying 已提交
2323 2324 2325 2326 2327 2328 2329 2330 2331
		if (!(mcg_cap & MCG_CTL_P))
			return 1;
		data = vcpu->arch.mcg_ctl;
		break;
	case MSR_IA32_MCG_STATUS:
		data = vcpu->arch.mcg_status;
		break;
	default:
		if (msr >= MSR_IA32_MC0_CTL &&
2332
		    msr < MSR_IA32_MCx_CTL(bank_num)) {
H
Huang Ying 已提交
2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
			u32 offset = msr - MSR_IA32_MC0_CTL;
			data = vcpu->arch.mce_banks[offset];
			break;
		}
		return 1;
	}
	*pdata = data;
	return 0;
}

2343
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
H
Huang Ying 已提交
2344
{
2345
	switch (msr_info->index) {
H
Huang Ying 已提交
2346
	case MSR_IA32_PLATFORM_ID:
2347
	case MSR_IA32_EBL_CR_POWERON:
2348 2349 2350 2351 2352
	case MSR_IA32_DEBUGCTLMSR:
	case MSR_IA32_LASTBRANCHFROMIP:
	case MSR_IA32_LASTBRANCHTOIP:
	case MSR_IA32_LASTINTFROMIP:
	case MSR_IA32_LASTINTTOIP:
2353
	case MSR_K8_SYSCFG:
2354 2355
	case MSR_K8_TSEG_ADDR:
	case MSR_K8_TSEG_MASK:
2356
	case MSR_K7_HWCR:
2357
	case MSR_VM_HSAVE_PA:
2358
	case MSR_K8_INT_PENDING_MSG:
2359
	case MSR_AMD64_NB_CFG:
2360
	case MSR_FAM10H_MMIO_CONF_BASE:
2361
	case MSR_AMD64_BU_CFG2:
D
Dmitry Bilunov 已提交
2362
	case MSR_IA32_PERF_CTL:
2363
		msr_info->data = 0;
2364
		break;
2365 2366 2367 2368
	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
	case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
	case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
2369
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
2370 2371
			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
		msr_info->data = 0;
2372
		break;
2373
	case MSR_IA32_UCODE_REV:
2374
		msr_info->data = 0x100000000ULL;
2375
		break;
A
Avi Kivity 已提交
2376 2377
	case MSR_MTRRcap:
	case 0x200 ... 0x2ff:
2378
		return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
2379
	case 0xcd: /* fsb frequency */
2380
		msr_info->data = 3;
2381
		break;
2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393
		/*
		 * MSR_EBC_FREQUENCY_ID
		 * Conservative value valid for even the basic CPU models.
		 * Models 0,1: 000 in bits 23:21 indicating a bus speed of
		 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
		 * and 266MHz for model 3, or 4. Set Core Clock
		 * Frequency to System Bus Frequency Ratio to 1 (bits
		 * 31:24) even though these are only valid for CPU
		 * models > 2, however guests may end up dividing or
		 * multiplying by zero otherwise.
		 */
	case MSR_EBC_FREQUENCY_ID:
2394
		msr_info->data = 1 << 24;
2395
		break;
2396
	case MSR_IA32_APICBASE:
2397
		msr_info->data = kvm_get_apic_base(vcpu);
2398
		break;
G
Gleb Natapov 已提交
2399
	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
2400
		return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
G
Gleb Natapov 已提交
2401
		break;
2402
	case MSR_IA32_TSCDEADLINE:
2403
		msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
2404
		break;
W
Will Auld 已提交
2405
	case MSR_IA32_TSC_ADJUST:
2406
		msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
W
Will Auld 已提交
2407
		break;
2408
	case MSR_IA32_MISC_ENABLE:
2409
		msr_info->data = vcpu->arch.ia32_misc_enable_msr;
2410
		break;
P
Paolo Bonzini 已提交
2411 2412 2413 2414
	case MSR_IA32_SMBASE:
		if (!msr_info->host_initiated)
			return 1;
		msr_info->data = vcpu->arch.smbase;
2415
		break;
2416 2417
	case MSR_IA32_PERF_STATUS:
		/* TSC increment by tick */
2418
		msr_info->data = 1000ULL;
2419
		/* CPU multiplier */
2420
		msr_info->data |= (((uint64_t)4ULL) << 40);
2421
		break;
2422
	case MSR_EFER:
2423
		msr_info->data = vcpu->arch.efer;
2424
		break;
2425
	case MSR_KVM_WALL_CLOCK:
2426
	case MSR_KVM_WALL_CLOCK_NEW:
2427
		msr_info->data = vcpu->kvm->arch.wall_clock;
2428 2429
		break;
	case MSR_KVM_SYSTEM_TIME:
2430
	case MSR_KVM_SYSTEM_TIME_NEW:
2431
		msr_info->data = vcpu->arch.time;
2432
		break;
2433
	case MSR_KVM_ASYNC_PF_EN:
2434
		msr_info->data = vcpu->arch.apf.msr_val;
2435
		break;
G
Glauber Costa 已提交
2436
	case MSR_KVM_STEAL_TIME:
2437
		msr_info->data = vcpu->arch.st.msr_val;
G
Glauber Costa 已提交
2438
		break;
2439
	case MSR_KVM_PV_EOI_EN:
2440
		msr_info->data = vcpu->arch.pv_eoi.msr_val;
2441
		break;
H
Huang Ying 已提交
2442 2443 2444 2445 2446
	case MSR_IA32_P5_MC_ADDR:
	case MSR_IA32_P5_MC_TYPE:
	case MSR_IA32_MCG_CAP:
	case MSR_IA32_MCG_CTL:
	case MSR_IA32_MCG_STATUS:
2447
	case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2448
		return get_msr_mce(vcpu, msr_info->index, &msr_info->data);
2449 2450 2451 2452 2453 2454 2455 2456 2457 2458
	case MSR_K7_CLK_CTL:
		/*
		 * Provide expected ramp-up count for K7. All other
		 * are set to zero, indicating minimum divisors for
		 * every field.
		 *
		 * This prevents guest kernels on AMD host with CPU
		 * type 6, model 8 and higher from exploding due to
		 * the rdmsr failing.
		 */
2459
		msr_info->data = 0x20000000;
2460
		break;
2461
	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
2462 2463
	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
	case HV_X64_MSR_CRASH_CTL:
A
Andrey Smetanin 已提交
2464
	case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
2465 2466
		return kvm_hv_get_msr_common(vcpu,
					     msr_info->index, &msr_info->data);
2467
		break;
2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478
	case MSR_IA32_BBL_CR_CTL3:
		/* This legacy MSR exists but isn't fully documented in current
		 * silicon.  It is however accessed by winxp in very narrow
		 * scenarios where it sets bit #19, itself documented as
		 * a "reserved" bit.  Best effort attempt to source coherent
		 * read data here should the balance of the register be
		 * interpreted by the guest:
		 *
		 * L2 cache control register 3: 64GB range, 256KB size,
		 * enabled, latency 0x1, configured
		 */
2479
		msr_info->data = 0xbe702111;
2480
		break;
2481 2482 2483
	case MSR_AMD64_OSVW_ID_LENGTH:
		if (!guest_cpuid_has_osvw(vcpu))
			return 1;
2484
		msr_info->data = vcpu->arch.osvw.length;
2485 2486 2487 2488
		break;
	case MSR_AMD64_OSVW_STATUS:
		if (!guest_cpuid_has_osvw(vcpu))
			return 1;
2489
		msr_info->data = vcpu->arch.osvw.status;
2490
		break;
2491
	default:
2492
		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
2493
			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
2494
		if (!ignore_msrs) {
2495
			vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr_info->index);
2496 2497
			return 1;
		} else {
2498 2499
			vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
			msr_info->data = 0;
2500 2501
		}
		break;
2502 2503 2504 2505 2506
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_get_msr_common);

2507 2508 2509 2510 2511 2512 2513 2514 2515 2516
/*
 * Read or write a bunch of msrs. All parameters are kernel addresses.
 *
 * @return number of msrs set successfully.
 */
static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
		    struct kvm_msr_entry *entries,
		    int (*do_msr)(struct kvm_vcpu *vcpu,
				  unsigned index, u64 *data))
{
2517
	int i, idx;
2518

2519
	idx = srcu_read_lock(&vcpu->kvm->srcu);
2520 2521 2522
	for (i = 0; i < msrs->nmsrs; ++i)
		if (do_msr(vcpu, entries[i].index, &entries[i].data))
			break;
2523
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551

	return i;
}

/*
 * Read or write a bunch of msrs. Parameters are user addresses.
 *
 * @return number of msrs set successfully.
 */
static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
		  int (*do_msr)(struct kvm_vcpu *vcpu,
				unsigned index, u64 *data),
		  int writeback)
{
	struct kvm_msrs msrs;
	struct kvm_msr_entry *entries;
	int r, n;
	unsigned size;

	r = -EFAULT;
	if (copy_from_user(&msrs, user_msrs, sizeof msrs))
		goto out;

	r = -E2BIG;
	if (msrs.nmsrs >= MAX_IO_MSRS)
		goto out;

	size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2552 2553 2554
	entries = memdup_user(user_msrs->entries, size);
	if (IS_ERR(entries)) {
		r = PTR_ERR(entries);
2555
		goto out;
2556
	}
2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568

	r = n = __msr_io(vcpu, &msrs, entries, do_msr);
	if (r < 0)
		goto out_free;

	r = -EFAULT;
	if (writeback && copy_to_user(user_msrs->entries, entries, size))
		goto out_free;

	r = n;

out_free:
2569
	kfree(entries);
2570 2571 2572 2573
out:
	return r;
}

2574
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2575 2576 2577 2578 2579 2580 2581 2582
{
	int r;

	switch (ext) {
	case KVM_CAP_IRQCHIP:
	case KVM_CAP_HLT:
	case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
	case KVM_CAP_SET_TSS_ADDR:
2583
	case KVM_CAP_EXT_CPUID:
B
Borislav Petkov 已提交
2584
	case KVM_CAP_EXT_EMUL_CPUID:
2585
	case KVM_CAP_CLOCKSOURCE:
S
Sheng Yang 已提交
2586
	case KVM_CAP_PIT:
2587
	case KVM_CAP_NOP_IO_DELAY:
2588
	case KVM_CAP_MP_STATE:
2589
	case KVM_CAP_SYNC_MMU:
2590
	case KVM_CAP_USER_NMI:
2591
	case KVM_CAP_REINJECT_CONTROL:
2592
	case KVM_CAP_IRQ_INJECT_STATUS:
G
Gregory Haskins 已提交
2593
	case KVM_CAP_IOEVENTFD:
2594
	case KVM_CAP_IOEVENTFD_NO_LENGTH:
2595
	case KVM_CAP_PIT2:
B
Beth Kon 已提交
2596
	case KVM_CAP_PIT_STATE2:
2597
	case KVM_CAP_SET_IDENTITY_MAP_ADDR:
E
Ed Swierk 已提交
2598
	case KVM_CAP_XEN_HVM:
2599
	case KVM_CAP_ADJUST_CLOCK:
J
Jan Kiszka 已提交
2600
	case KVM_CAP_VCPU_EVENTS:
2601
	case KVM_CAP_HYPERV:
G
Gleb Natapov 已提交
2602
	case KVM_CAP_HYPERV_VAPIC:
2603
	case KVM_CAP_HYPERV_SPIN:
2604
	case KVM_CAP_HYPERV_SYNIC:
2605
	case KVM_CAP_PCI_SEGMENT:
2606
	case KVM_CAP_DEBUGREGS:
2607
	case KVM_CAP_X86_ROBUST_SINGLESTEP:
2608
	case KVM_CAP_XSAVE:
2609
	case KVM_CAP_ASYNC_PF:
2610
	case KVM_CAP_GET_TSC_KHZ:
2611
	case KVM_CAP_KVMCLOCK_CTRL:
X
Xiao Guangrong 已提交
2612
	case KVM_CAP_READONLY_MEM:
2613
	case KVM_CAP_HYPERV_TIME:
2614
	case KVM_CAP_IOAPIC_POLARITY_IGNORED:
2615
	case KVM_CAP_TSC_DEADLINE_TIMER:
2616 2617
	case KVM_CAP_ENABLE_CAP_VM:
	case KVM_CAP_DISABLE_QUIRKS:
2618
	case KVM_CAP_SET_BOOT_CPU_ID:
2619
 	case KVM_CAP_SPLIT_IRQCHIP:
2620 2621 2622 2623
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
	case KVM_CAP_ASSIGN_DEV_IRQ:
	case KVM_CAP_PCI_2_3:
#endif
2624 2625
		r = 1;
		break;
2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636
	case KVM_CAP_X86_SMM:
		/* SMBASE is usually relocated above 1M on modern chipsets,
		 * and SMM handlers might indeed rely on 4G segment limits,
		 * so do not report SMM to be available if real mode is
		 * emulated via vm86 mode.  Still, do not go to great lengths
		 * to avoid userspace's usage of the feature, because it is a
		 * fringe case that is not enabled except via specific settings
		 * of the module parameters.
		 */
		r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
		break;
2637 2638 2639
	case KVM_CAP_COALESCED_MMIO:
		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
		break;
2640 2641 2642
	case KVM_CAP_VAPIC:
		r = !kvm_x86_ops->cpu_has_accelerated_tpr();
		break;
2643
	case KVM_CAP_NR_VCPUS:
2644 2645 2646
		r = KVM_SOFT_MAX_VCPUS;
		break;
	case KVM_CAP_MAX_VCPUS:
2647 2648
		r = KVM_MAX_VCPUS;
		break;
2649
	case KVM_CAP_NR_MEMSLOTS:
2650
		r = KVM_USER_MEM_SLOTS;
2651
		break;
2652 2653
	case KVM_CAP_PV_MMU:	/* obsolete */
		r = 0;
2654
		break;
2655
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
B
Ben-Ami Yassour 已提交
2656
	case KVM_CAP_IOMMU:
2657
		r = iommu_present(&pci_bus_type);
B
Ben-Ami Yassour 已提交
2658
		break;
2659
#endif
H
Huang Ying 已提交
2660 2661 2662
	case KVM_CAP_MCE:
		r = KVM_MAX_MCE_BANKS;
		break;
2663
	case KVM_CAP_XCRS:
2664
		r = boot_cpu_has(X86_FEATURE_XSAVE);
2665
		break;
2666 2667 2668
	case KVM_CAP_TSC_CONTROL:
		r = kvm_has_tsc_control;
		break;
2669 2670 2671
	case KVM_CAP_X2APIC_API:
		r = KVM_X2APIC_API_VALID_FLAGS;
		break;
2672 2673 2674 2675 2676 2677 2678 2679
	default:
		r = 0;
		break;
	}
	return r;

}

2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695
long kvm_arch_dev_ioctl(struct file *filp,
			unsigned int ioctl, unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	long r;

	switch (ioctl) {
	case KVM_GET_MSR_INDEX_LIST: {
		struct kvm_msr_list __user *user_msr_list = argp;
		struct kvm_msr_list msr_list;
		unsigned n;

		r = -EFAULT;
		if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
			goto out;
		n = msr_list.nmsrs;
2696
		msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
2697 2698 2699
		if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
			goto out;
		r = -E2BIG;
J
Jan Kiszka 已提交
2700
		if (n < msr_list.nmsrs)
2701 2702 2703 2704 2705
			goto out;
		r = -EFAULT;
		if (copy_to_user(user_msr_list->indices, &msrs_to_save,
				 num_msrs_to_save * sizeof(u32)))
			goto out;
J
Jan Kiszka 已提交
2706
		if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
2707
				 &emulated_msrs,
2708
				 num_emulated_msrs * sizeof(u32)))
2709 2710 2711 2712
			goto out;
		r = 0;
		break;
	}
B
Borislav Petkov 已提交
2713 2714
	case KVM_GET_SUPPORTED_CPUID:
	case KVM_GET_EMULATED_CPUID: {
2715 2716 2717 2718 2719 2720
		struct kvm_cpuid2 __user *cpuid_arg = argp;
		struct kvm_cpuid2 cpuid;

		r = -EFAULT;
		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
			goto out;
B
Borislav Petkov 已提交
2721 2722 2723

		r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
					    ioctl);
2724 2725 2726 2727 2728 2729 2730 2731 2732
		if (r)
			goto out;

		r = -EFAULT;
		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
			goto out;
		r = 0;
		break;
	}
H
Huang Ying 已提交
2733 2734
	case KVM_X86_GET_MCE_CAP_SUPPORTED: {
		r = -EFAULT;
2735 2736
		if (copy_to_user(argp, &kvm_mce_cap_supported,
				 sizeof(kvm_mce_cap_supported)))
H
Huang Ying 已提交
2737 2738 2739 2740
			goto out;
		r = 0;
		break;
	}
2741 2742 2743 2744 2745 2746 2747
	default:
		r = -EINVAL;
	}
out:
	return r;
}

2748 2749 2750 2751 2752 2753 2754
static void wbinvd_ipi(void *garbage)
{
	wbinvd();
}

static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
2755
	return kvm_arch_has_noncoherent_dma(vcpu->kvm);
2756 2757
}

2758 2759 2760 2761 2762
static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
{
	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
}

2763 2764
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
2765 2766 2767 2768 2769 2770 2771 2772 2773
	/* Address WBINVD may be executed by guest */
	if (need_emulate_wbinvd(vcpu)) {
		if (kvm_x86_ops->has_wbinvd_exit())
			cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
		else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
			smp_call_function_single(vcpu->cpu,
					wbinvd_ipi, NULL, 1);
	}

2774
	kvm_x86_ops->vcpu_load(vcpu, cpu);
2775

2776 2777 2778 2779
	/* Apply any externally detected TSC adjustments (due to suspend) */
	if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
		adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
		vcpu->arch.tsc_offset_adjustment = 0;
2780
		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2781
	}
2782

2783
	if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
2784
		s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
2785
				rdtsc() - vcpu->arch.last_host_tsc;
Z
Zachary Amsden 已提交
2786 2787
		if (tsc_delta < 0)
			mark_tsc_unstable("KVM discovered backwards TSC");
2788

Z
Zachary Amsden 已提交
2789
		if (check_tsc_unstable()) {
2790
			u64 offset = kvm_compute_tsc_offset(vcpu,
2791
						vcpu->arch.last_guest_tsc);
2792
			kvm_vcpu_write_tsc_offset(vcpu, offset);
Z
Zachary Amsden 已提交
2793 2794
			vcpu->arch.tsc_catchup = 1;
		}
2795 2796
		if (kvm_lapic_hv_timer_in_use(vcpu) &&
				kvm_x86_ops->set_hv_timer(vcpu,
2797
					kvm_get_lapic_target_expiration_tsc(vcpu)))
2798
			kvm_lapic_switch_to_sw_timer(vcpu);
2799 2800 2801 2802 2803
		/*
		 * On a host with synchronized TSC, there is no need to update
		 * kvmclock on vcpu->cpu migration
		 */
		if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
2804
			kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
Z
Zachary Amsden 已提交
2805 2806
		if (vcpu->cpu != cpu)
			kvm_migrate_timers(vcpu);
Z
Zachary Amsden 已提交
2807
		vcpu->cpu = cpu;
Z
Zachary Amsden 已提交
2808
	}
G
Glauber Costa 已提交
2809 2810

	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2811 2812 2813 2814
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
2815
	kvm_x86_ops->vcpu_put(vcpu);
2816
	kvm_put_guest_fpu(vcpu);
2817
	vcpu->arch.last_host_tsc = rdtsc();
2818 2819 2820 2821 2822
}

static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
				    struct kvm_lapic_state *s)
{
2823 2824 2825
	if (vcpu->arch.apicv_active)
		kvm_x86_ops->sync_pir_to_irr(vcpu);

2826
	return kvm_apic_get_state(vcpu, s);
2827 2828 2829 2830 2831
}

static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
				    struct kvm_lapic_state *s)
{
2832 2833 2834 2835 2836
	int r;

	r = kvm_apic_set_state(vcpu, s);
	if (r)
		return r;
2837
	update_cr8_intercept(vcpu);
2838 2839 2840 2841

	return 0;
}

2842 2843 2844 2845 2846 2847
static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
{
	return (!lapic_in_kernel(vcpu) ||
		kvm_apic_accept_pic_intr(vcpu));
}

2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861
/*
 * if userspace requested an interrupt window, check that the
 * interrupt window is open.
 *
 * No need to exit to userspace if we already have an interrupt queued.
 */
static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
{
	return kvm_arch_interrupt_allowed(vcpu) &&
		!kvm_cpu_has_interrupt(vcpu) &&
		!kvm_event_needs_reinjection(vcpu) &&
		kvm_cpu_accept_dm_intr(vcpu);
}

2862 2863 2864
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
				    struct kvm_interrupt *irq)
{
2865
	if (irq->irq >= KVM_NR_INTERRUPTS)
2866
		return -EINVAL;
2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878

	if (!irqchip_in_kernel(vcpu->kvm)) {
		kvm_queue_interrupt(vcpu, irq->irq, false);
		kvm_make_request(KVM_REQ_EVENT, vcpu);
		return 0;
	}

	/*
	 * With in-kernel LAPIC, we only use this to inject EXTINT, so
	 * fail for in-kernel 8259.
	 */
	if (pic_in_kernel(vcpu->kvm))
2879 2880
		return -ENXIO;

2881 2882
	if (vcpu->arch.pending_external_vector != -1)
		return -EEXIST;
2883

2884
	vcpu->arch.pending_external_vector = irq->irq;
2885
	kvm_make_request(KVM_REQ_EVENT, vcpu);
2886 2887 2888
	return 0;
}

2889 2890 2891 2892 2893 2894 2895
static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
{
	kvm_inject_nmi(vcpu);

	return 0;
}

2896 2897
static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
{
P
Paolo Bonzini 已提交
2898 2899
	kvm_make_request(KVM_REQ_SMI, vcpu);

2900 2901 2902
	return 0;
}

2903 2904 2905 2906 2907 2908 2909 2910 2911
static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
					   struct kvm_tpr_access_ctl *tac)
{
	if (tac->flags)
		return -EINVAL;
	vcpu->arch.tpr_access_reporting = !!tac->enabled;
	return 0;
}

H
Huang Ying 已提交
2912 2913 2914 2915 2916 2917 2918
static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
					u64 mcg_cap)
{
	int r;
	unsigned bank_num = mcg_cap & 0xff, bank;

	r = -EINVAL;
2919
	if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
H
Huang Ying 已提交
2920
		goto out;
2921
	if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
H
Huang Ying 已提交
2922 2923 2924 2925 2926 2927 2928 2929 2930
		goto out;
	r = 0;
	vcpu->arch.mcg_cap = mcg_cap;
	/* Init IA32_MCG_CTL to all 1s */
	if (mcg_cap & MCG_CTL_P)
		vcpu->arch.mcg_ctl = ~(u64)0;
	/* Init IA32_MCi_CTL to all 1s */
	for (bank = 0; bank < bank_num; bank++)
		vcpu->arch.mce_banks[bank*4] = ~(u64)0;
2931 2932 2933

	if (kvm_x86_ops->setup_mce)
		kvm_x86_ops->setup_mce(vcpu);
H
Huang Ying 已提交
2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962
out:
	return r;
}

static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
				      struct kvm_x86_mce *mce)
{
	u64 mcg_cap = vcpu->arch.mcg_cap;
	unsigned bank_num = mcg_cap & 0xff;
	u64 *banks = vcpu->arch.mce_banks;

	if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
		return -EINVAL;
	/*
	 * if IA32_MCG_CTL is not all 1s, the uncorrected error
	 * reporting is disabled
	 */
	if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
	    vcpu->arch.mcg_ctl != ~(u64)0)
		return 0;
	banks += 4 * mce->bank;
	/*
	 * if IA32_MCi_CTL is not all 1s, the uncorrected error
	 * reporting is disabled for the bank
	 */
	if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
		return 0;
	if (mce->status & MCI_STATUS_UC) {
		if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
2963
		    !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
2964
			kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
H
Huang Ying 已提交
2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985
			return 0;
		}
		if (banks[1] & MCI_STATUS_VAL)
			mce->status |= MCI_STATUS_OVER;
		banks[2] = mce->addr;
		banks[3] = mce->misc;
		vcpu->arch.mcg_status = mce->mcg_status;
		banks[1] = mce->status;
		kvm_queue_exception(vcpu, MC_VECTOR);
	} else if (!(banks[1] & MCI_STATUS_VAL)
		   || !(banks[1] & MCI_STATUS_UC)) {
		if (banks[1] & MCI_STATUS_VAL)
			mce->status |= MCI_STATUS_OVER;
		banks[2] = mce->addr;
		banks[3] = mce->misc;
		banks[1] = mce->status;
	} else
		banks[1] |= MCI_STATUS_OVER;
	return 0;
}

J
Jan Kiszka 已提交
2986 2987 2988
static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
					       struct kvm_vcpu_events *events)
{
A
Avi Kivity 已提交
2989
	process_nmi(vcpu);
2990 2991 2992
	events->exception.injected =
		vcpu->arch.exception.pending &&
		!kvm_exception_is_soft(vcpu->arch.exception.nr);
J
Jan Kiszka 已提交
2993 2994
	events->exception.nr = vcpu->arch.exception.nr;
	events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2995
	events->exception.pad = 0;
J
Jan Kiszka 已提交
2996 2997
	events->exception.error_code = vcpu->arch.exception.error_code;

2998 2999
	events->interrupt.injected =
		vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
J
Jan Kiszka 已提交
3000
	events->interrupt.nr = vcpu->arch.interrupt.nr;
3001
	events->interrupt.soft = 0;
3002
	events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
J
Jan Kiszka 已提交
3003 3004

	events->nmi.injected = vcpu->arch.nmi_injected;
A
Avi Kivity 已提交
3005
	events->nmi.pending = vcpu->arch.nmi_pending != 0;
J
Jan Kiszka 已提交
3006
	events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
3007
	events->nmi.pad = 0;
J
Jan Kiszka 已提交
3008

3009
	events->sipi_vector = 0; /* never valid when reporting to user space */
J
Jan Kiszka 已提交
3010

3011 3012 3013 3014 3015 3016
	events->smi.smm = is_smm(vcpu);
	events->smi.pending = vcpu->arch.smi_pending;
	events->smi.smm_inside_nmi =
		!!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
	events->smi.latched_init = kvm_lapic_latched_init(vcpu);

3017
	events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
3018 3019
			 | KVM_VCPUEVENT_VALID_SHADOW
			 | KVM_VCPUEVENT_VALID_SMM);
3020
	memset(&events->reserved, 0, sizeof(events->reserved));
J
Jan Kiszka 已提交
3021 3022 3023 3024 3025
}

static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
					      struct kvm_vcpu_events *events)
{
3026
	if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
3027
			      | KVM_VCPUEVENT_VALID_SIPI_VECTOR
3028 3029
			      | KVM_VCPUEVENT_VALID_SHADOW
			      | KVM_VCPUEVENT_VALID_SMM))
J
Jan Kiszka 已提交
3030 3031
		return -EINVAL;

3032 3033 3034 3035
	if (events->exception.injected &&
	    (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
		return -EINVAL;

A
Avi Kivity 已提交
3036
	process_nmi(vcpu);
J
Jan Kiszka 已提交
3037 3038 3039 3040 3041 3042 3043 3044
	vcpu->arch.exception.pending = events->exception.injected;
	vcpu->arch.exception.nr = events->exception.nr;
	vcpu->arch.exception.has_error_code = events->exception.has_error_code;
	vcpu->arch.exception.error_code = events->exception.error_code;

	vcpu->arch.interrupt.pending = events->interrupt.injected;
	vcpu->arch.interrupt.nr = events->interrupt.nr;
	vcpu->arch.interrupt.soft = events->interrupt.soft;
3045 3046 3047
	if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
		kvm_x86_ops->set_interrupt_shadow(vcpu,
						  events->interrupt.shadow);
J
Jan Kiszka 已提交
3048 3049

	vcpu->arch.nmi_injected = events->nmi.injected;
3050 3051
	if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
		vcpu->arch.nmi_pending = events->nmi.pending;
J
Jan Kiszka 已提交
3052 3053
	kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);

3054
	if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
3055
	    lapic_in_kernel(vcpu))
3056
		vcpu->arch.apic->sipi_vector = events->sipi_vector;
J
Jan Kiszka 已提交
3057

3058 3059 3060 3061 3062 3063 3064 3065 3066 3067
	if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
		if (events->smi.smm)
			vcpu->arch.hflags |= HF_SMM_MASK;
		else
			vcpu->arch.hflags &= ~HF_SMM_MASK;
		vcpu->arch.smi_pending = events->smi.pending;
		if (events->smi.smm_inside_nmi)
			vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
		else
			vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
3068
		if (lapic_in_kernel(vcpu)) {
3069 3070 3071 3072 3073 3074 3075
			if (events->smi.latched_init)
				set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
			else
				clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
		}
	}

3076 3077
	kvm_make_request(KVM_REQ_EVENT, vcpu);

J
Jan Kiszka 已提交
3078 3079 3080
	return 0;
}

3081 3082 3083
static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
					     struct kvm_debugregs *dbgregs)
{
J
Jan Kiszka 已提交
3084 3085
	unsigned long val;

3086
	memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
3087
	kvm_get_dr(vcpu, 6, &val);
J
Jan Kiszka 已提交
3088
	dbgregs->dr6 = val;
3089 3090
	dbgregs->dr7 = vcpu->arch.dr7;
	dbgregs->flags = 0;
3091
	memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
3092 3093 3094 3095 3096 3097 3098 3099
}

static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
					    struct kvm_debugregs *dbgregs)
{
	if (dbgregs->flags)
		return -EINVAL;

3100 3101 3102 3103 3104
	if (dbgregs->dr6 & ~0xffffffffull)
		return -EINVAL;
	if (dbgregs->dr7 & ~0xffffffffull)
		return -EINVAL;

3105
	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
3106
	kvm_update_dr0123(vcpu);
3107
	vcpu->arch.dr6 = dbgregs->dr6;
J
Jan Kiszka 已提交
3108
	kvm_update_dr6(vcpu);
3109
	vcpu->arch.dr7 = dbgregs->dr7;
3110
	kvm_update_dr7(vcpu);
3111 3112 3113 3114

	return 0;
}

3115 3116 3117 3118
#define XSTATE_COMPACTION_ENABLED (1ULL << 63)

static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
{
3119
	struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
3120
	u64 xstate_bv = xsave->header.xfeatures;
3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135
	u64 valid;

	/*
	 * Copy legacy XSAVE area, to avoid complications with CPUID
	 * leaves 0 and 1 in the loop below.
	 */
	memcpy(dest, xsave, XSAVE_HDR_OFFSET);

	/* Set XSTATE_BV */
	*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;

	/*
	 * Copy each region from the possibly compacted offset to the
	 * non-compacted offset.
	 */
D
Dave Hansen 已提交
3136
	valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154
	while (valid) {
		u64 feature = valid & -valid;
		int index = fls64(feature) - 1;
		void *src = get_xsave_addr(xsave, feature);

		if (src) {
			u32 size, offset, ecx, edx;
			cpuid_count(XSTATE_CPUID, index,
				    &size, &offset, &ecx, &edx);
			memcpy(dest + offset, src, size);
		}

		valid -= feature;
	}
}

static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
{
3155
	struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
3156 3157 3158 3159 3160 3161 3162 3163 3164 3165
	u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
	u64 valid;

	/*
	 * Copy legacy XSAVE area, to avoid complications with CPUID
	 * leaves 0 and 1 in the loop below.
	 */
	memcpy(xsave, src, XSAVE_HDR_OFFSET);

	/* Set XSTATE_BV and possibly XCOMP_BV.  */
3166
	xsave->header.xfeatures = xstate_bv;
3167
	if (boot_cpu_has(X86_FEATURE_XSAVES))
3168
		xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
3169 3170 3171 3172 3173

	/*
	 * Copy each region from the non-compacted offset to the
	 * possibly compacted offset.
	 */
D
Dave Hansen 已提交
3174
	valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
3175 3176 3177 3178 3179 3180 3181 3182 3183 3184
	while (valid) {
		u64 feature = valid & -valid;
		int index = fls64(feature) - 1;
		void *dest = get_xsave_addr(xsave, feature);

		if (dest) {
			u32 size, offset, ecx, edx;
			cpuid_count(XSTATE_CPUID, index,
				    &size, &offset, &ecx, &edx);
			memcpy(dest, src + offset, size);
3185
		}
3186 3187 3188 3189 3190

		valid -= feature;
	}
}

3191 3192 3193
static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
					 struct kvm_xsave *guest_xsave)
{
3194
	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
3195 3196
		memset(guest_xsave, 0, sizeof(struct kvm_xsave));
		fill_xsave((u8 *) guest_xsave->region, vcpu);
3197
	} else {
3198
		memcpy(guest_xsave->region,
3199
			&vcpu->arch.guest_fpu.state.fxsave,
3200
			sizeof(struct fxregs_state));
3201
		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
D
Dave Hansen 已提交
3202
			XFEATURE_MASK_FPSSE;
3203 3204 3205 3206 3207 3208 3209 3210 3211
	}
}

static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
					struct kvm_xsave *guest_xsave)
{
	u64 xstate_bv =
		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];

3212
	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
3213 3214 3215 3216 3217
		/*
		 * Here we allow setting states that are not present in
		 * CPUID leaf 0xD, index 0, EDX:EAX.  This is for compatibility
		 * with old userspace.
		 */
3218
		if (xstate_bv & ~kvm_supported_xcr0())
3219
			return -EINVAL;
3220
		load_xsave(vcpu, (u8 *)guest_xsave->region);
3221
	} else {
D
Dave Hansen 已提交
3222
		if (xstate_bv & ~XFEATURE_MASK_FPSSE)
3223
			return -EINVAL;
3224
		memcpy(&vcpu->arch.guest_fpu.state.fxsave,
3225
			guest_xsave->region, sizeof(struct fxregs_state));
3226 3227 3228 3229 3230 3231 3232
	}
	return 0;
}

static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
					struct kvm_xcrs *guest_xcrs)
{
3233
	if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248
		guest_xcrs->nr_xcrs = 0;
		return;
	}

	guest_xcrs->nr_xcrs = 1;
	guest_xcrs->flags = 0;
	guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
	guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
}

static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
				       struct kvm_xcrs *guest_xcrs)
{
	int i, r = 0;

3249
	if (!boot_cpu_has(X86_FEATURE_XSAVE))
3250 3251 3252 3253 3254 3255 3256
		return -EINVAL;

	if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
		return -EINVAL;

	for (i = 0; i < guest_xcrs->nr_xcrs; i++)
		/* Only support XCR0 currently */
P
Paolo Bonzini 已提交
3257
		if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
3258
			r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
P
Paolo Bonzini 已提交
3259
				guest_xcrs->xcrs[i].value);
3260 3261 3262 3263 3264 3265 3266
			break;
		}
	if (r)
		r = -EINVAL;
	return r;
}

3267 3268 3269 3270 3271 3272 3273 3274
/*
 * kvm_set_guest_paused() indicates to the guest kernel that it has been
 * stopped by the hypervisor.  This function will be called from the host only.
 * EINVAL is returned when the host attempts to set the flag for a guest that
 * does not support pv clocks.
 */
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
3275
	if (!vcpu->arch.pv_time_enabled)
3276
		return -EINVAL;
3277
	vcpu->arch.pvclock_set_guest_stopped_request = true;
3278 3279 3280 3281
	kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
	return 0;
}

3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
				     struct kvm_enable_cap *cap)
{
	if (cap->flags)
		return -EINVAL;

	switch (cap->cap) {
	case KVM_CAP_HYPERV_SYNIC:
		return kvm_hv_activate_synic(vcpu);
	default:
		return -EINVAL;
	}
}

3296 3297 3298 3299 3300 3301
long kvm_arch_vcpu_ioctl(struct file *filp,
			 unsigned int ioctl, unsigned long arg)
{
	struct kvm_vcpu *vcpu = filp->private_data;
	void __user *argp = (void __user *)arg;
	int r;
3302 3303 3304 3305 3306 3307 3308 3309
	union {
		struct kvm_lapic_state *lapic;
		struct kvm_xsave *xsave;
		struct kvm_xcrs *xcrs;
		void *buffer;
	} u;

	u.buffer = NULL;
3310 3311
	switch (ioctl) {
	case KVM_GET_LAPIC: {
3312
		r = -EINVAL;
3313
		if (!lapic_in_kernel(vcpu))
3314
			goto out;
3315
		u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
3316

3317
		r = -ENOMEM;
3318
		if (!u.lapic)
3319
			goto out;
3320
		r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
3321 3322 3323
		if (r)
			goto out;
		r = -EFAULT;
3324
		if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
3325 3326 3327 3328 3329
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_LAPIC: {
3330
		r = -EINVAL;
3331
		if (!lapic_in_kernel(vcpu))
3332
			goto out;
3333
		u.lapic = memdup_user(argp, sizeof(*u.lapic));
G
Guo Chao 已提交
3334 3335
		if (IS_ERR(u.lapic))
			return PTR_ERR(u.lapic);
3336

3337
		r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
3338 3339
		break;
	}
3340 3341 3342 3343 3344 3345 3346 3347 3348
	case KVM_INTERRUPT: {
		struct kvm_interrupt irq;

		r = -EFAULT;
		if (copy_from_user(&irq, argp, sizeof irq))
			goto out;
		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
		break;
	}
3349 3350 3351 3352
	case KVM_NMI: {
		r = kvm_vcpu_ioctl_nmi(vcpu);
		break;
	}
3353 3354 3355 3356
	case KVM_SMI: {
		r = kvm_vcpu_ioctl_smi(vcpu);
		break;
	}
3357 3358 3359 3360 3361 3362 3363 3364 3365 3366
	case KVM_SET_CPUID: {
		struct kvm_cpuid __user *cpuid_arg = argp;
		struct kvm_cpuid cpuid;

		r = -EFAULT;
		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
			goto out;
		r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
		break;
	}
3367 3368 3369 3370 3371 3372 3373 3374
	case KVM_SET_CPUID2: {
		struct kvm_cpuid2 __user *cpuid_arg = argp;
		struct kvm_cpuid2 cpuid;

		r = -EFAULT;
		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
			goto out;
		r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
3375
					      cpuid_arg->entries);
3376 3377 3378 3379 3380 3381 3382 3383 3384 3385
		break;
	}
	case KVM_GET_CPUID2: {
		struct kvm_cpuid2 __user *cpuid_arg = argp;
		struct kvm_cpuid2 cpuid;

		r = -EFAULT;
		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
			goto out;
		r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
3386
					      cpuid_arg->entries);
3387 3388 3389 3390 3391 3392 3393 3394
		if (r)
			goto out;
		r = -EFAULT;
		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
			goto out;
		r = 0;
		break;
	}
3395
	case KVM_GET_MSRS:
3396
		r = msr_io(vcpu, argp, do_get_msr, 1);
3397 3398 3399 3400
		break;
	case KVM_SET_MSRS:
		r = msr_io(vcpu, argp, do_set_msr, 0);
		break;
3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415
	case KVM_TPR_ACCESS_REPORTING: {
		struct kvm_tpr_access_ctl tac;

		r = -EFAULT;
		if (copy_from_user(&tac, argp, sizeof tac))
			goto out;
		r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
		if (r)
			goto out;
		r = -EFAULT;
		if (copy_to_user(argp, &tac, sizeof tac))
			goto out;
		r = 0;
		break;
	};
A
Avi Kivity 已提交
3416 3417 3418 3419
	case KVM_SET_VAPIC_ADDR: {
		struct kvm_vapic_addr va;

		r = -EINVAL;
3420
		if (!lapic_in_kernel(vcpu))
A
Avi Kivity 已提交
3421 3422 3423 3424
			goto out;
		r = -EFAULT;
		if (copy_from_user(&va, argp, sizeof va))
			goto out;
3425
		r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
A
Avi Kivity 已提交
3426 3427
		break;
	}
H
Huang Ying 已提交
3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445
	case KVM_X86_SETUP_MCE: {
		u64 mcg_cap;

		r = -EFAULT;
		if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
			goto out;
		r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
		break;
	}
	case KVM_X86_SET_MCE: {
		struct kvm_x86_mce mce;

		r = -EFAULT;
		if (copy_from_user(&mce, argp, sizeof mce))
			goto out;
		r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
		break;
	}
J
Jan Kiszka 已提交
3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466
	case KVM_GET_VCPU_EVENTS: {
		struct kvm_vcpu_events events;

		kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);

		r = -EFAULT;
		if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
			break;
		r = 0;
		break;
	}
	case KVM_SET_VCPU_EVENTS: {
		struct kvm_vcpu_events events;

		r = -EFAULT;
		if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
			break;

		r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
		break;
	}
3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489
	case KVM_GET_DEBUGREGS: {
		struct kvm_debugregs dbgregs;

		kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);

		r = -EFAULT;
		if (copy_to_user(argp, &dbgregs,
				 sizeof(struct kvm_debugregs)))
			break;
		r = 0;
		break;
	}
	case KVM_SET_DEBUGREGS: {
		struct kvm_debugregs dbgregs;

		r = -EFAULT;
		if (copy_from_user(&dbgregs, argp,
				   sizeof(struct kvm_debugregs)))
			break;

		r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
		break;
	}
3490
	case KVM_GET_XSAVE: {
3491
		u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
3492
		r = -ENOMEM;
3493
		if (!u.xsave)
3494 3495
			break;

3496
		kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
3497 3498

		r = -EFAULT;
3499
		if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
3500 3501 3502 3503 3504
			break;
		r = 0;
		break;
	}
	case KVM_SET_XSAVE: {
3505
		u.xsave = memdup_user(argp, sizeof(*u.xsave));
G
Guo Chao 已提交
3506 3507
		if (IS_ERR(u.xsave))
			return PTR_ERR(u.xsave);
3508

3509
		r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
3510 3511 3512
		break;
	}
	case KVM_GET_XCRS: {
3513
		u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
3514
		r = -ENOMEM;
3515
		if (!u.xcrs)
3516 3517
			break;

3518
		kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
3519 3520

		r = -EFAULT;
3521
		if (copy_to_user(argp, u.xcrs,
3522 3523 3524 3525 3526 3527
				 sizeof(struct kvm_xcrs)))
			break;
		r = 0;
		break;
	}
	case KVM_SET_XCRS: {
3528
		u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
G
Guo Chao 已提交
3529 3530
		if (IS_ERR(u.xcrs))
			return PTR_ERR(u.xcrs);
3531

3532
		r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
3533 3534
		break;
	}
3535 3536 3537 3538 3539 3540 3541 3542 3543
	case KVM_SET_TSC_KHZ: {
		u32 user_tsc_khz;

		r = -EINVAL;
		user_tsc_khz = (u32)arg;

		if (user_tsc_khz >= kvm_max_guest_tsc_khz)
			goto out;

3544 3545 3546
		if (user_tsc_khz == 0)
			user_tsc_khz = tsc_khz;

3547 3548
		if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
			r = 0;
3549 3550 3551 3552

		goto out;
	}
	case KVM_GET_TSC_KHZ: {
3553
		r = vcpu->arch.virtual_tsc_khz;
3554 3555
		goto out;
	}
3556 3557 3558 3559
	case KVM_KVMCLOCK_CTRL: {
		r = kvm_set_guest_paused(vcpu);
		goto out;
	}
3560 3561 3562 3563 3564 3565 3566 3567 3568
	case KVM_ENABLE_CAP: {
		struct kvm_enable_cap cap;

		r = -EFAULT;
		if (copy_from_user(&cap, argp, sizeof(cap)))
			goto out;
		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
		break;
	}
3569 3570 3571 3572
	default:
		r = -EINVAL;
	}
out:
3573
	kfree(u.buffer);
3574 3575 3576
	return r;
}

3577 3578 3579 3580 3581
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
	return VM_FAULT_SIGBUS;
}

3582 3583 3584 3585 3586
static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
{
	int ret;

	if (addr > (unsigned int)(-3 * PAGE_SIZE))
3587
		return -EINVAL;
3588 3589 3590 3591
	ret = kvm_x86_ops->set_tss_addr(kvm, addr);
	return ret;
}

3592 3593 3594 3595 3596 3597 3598
static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
					      u64 ident_addr)
{
	kvm->arch.ept_identity_map_addr = ident_addr;
	return 0;
}

3599 3600 3601 3602 3603 3604
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
					  u32 kvm_nr_mmu_pages)
{
	if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
		return -EINVAL;

3605
	mutex_lock(&kvm->slots_lock);
3606 3607

	kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
3608
	kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
3609

3610
	mutex_unlock(&kvm->slots_lock);
3611 3612 3613 3614 3615
	return 0;
}

static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
{
3616
	return kvm->arch.n_max_mmu_pages;
3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635
}

static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
{
	int r;

	r = 0;
	switch (chip->chip_id) {
	case KVM_IRQCHIP_PIC_MASTER:
		memcpy(&chip->chip.pic,
			&pic_irqchip(kvm)->pics[0],
			sizeof(struct kvm_pic_state));
		break;
	case KVM_IRQCHIP_PIC_SLAVE:
		memcpy(&chip->chip.pic,
			&pic_irqchip(kvm)->pics[1],
			sizeof(struct kvm_pic_state));
		break;
	case KVM_IRQCHIP_IOAPIC:
G
Gleb Natapov 已提交
3636
		r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651
		break;
	default:
		r = -EINVAL;
		break;
	}
	return r;
}

static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
{
	int r;

	r = 0;
	switch (chip->chip_id) {
	case KVM_IRQCHIP_PIC_MASTER:
3652
		spin_lock(&pic_irqchip(kvm)->lock);
3653 3654 3655
		memcpy(&pic_irqchip(kvm)->pics[0],
			&chip->chip.pic,
			sizeof(struct kvm_pic_state));
3656
		spin_unlock(&pic_irqchip(kvm)->lock);
3657 3658
		break;
	case KVM_IRQCHIP_PIC_SLAVE:
3659
		spin_lock(&pic_irqchip(kvm)->lock);
3660 3661 3662
		memcpy(&pic_irqchip(kvm)->pics[1],
			&chip->chip.pic,
			sizeof(struct kvm_pic_state));
3663
		spin_unlock(&pic_irqchip(kvm)->lock);
3664 3665
		break;
	case KVM_IRQCHIP_IOAPIC:
G
Gleb Natapov 已提交
3666
		r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
3667 3668 3669 3670 3671 3672 3673 3674 3675
		break;
	default:
		r = -EINVAL;
		break;
	}
	kvm_pic_update_irq(pic_irqchip(kvm));
	return r;
}

3676 3677
static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
3678 3679 3680 3681 3682 3683 3684
	struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;

	BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels));

	mutex_lock(&kps->lock);
	memcpy(ps, &kps->channels, sizeof(*ps));
	mutex_unlock(&kps->lock);
3685
	return 0;
3686 3687 3688 3689
}

static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
3690
	int i;
3691 3692 3693
	struct kvm_pit *pit = kvm->arch.vpit;

	mutex_lock(&pit->pit_state.lock);
3694
	memcpy(&pit->pit_state.channels, ps, sizeof(*ps));
3695
	for (i = 0; i < 3; i++)
3696 3697
		kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
	mutex_unlock(&pit->pit_state.lock);
3698
	return 0;
B
Beth Kon 已提交
3699 3700 3701 3702 3703 3704 3705 3706 3707
}

static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{
	mutex_lock(&kvm->arch.vpit->pit_state.lock);
	memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
		sizeof(ps->channels));
	ps->flags = kvm->arch.vpit->pit_state.flags;
	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3708
	memset(&ps->reserved, 0, sizeof(ps->reserved));
3709
	return 0;
B
Beth Kon 已提交
3710 3711 3712 3713
}

static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{
3714
	int start = 0;
3715
	int i;
B
Beth Kon 已提交
3716
	u32 prev_legacy, cur_legacy;
3717 3718 3719 3720
	struct kvm_pit *pit = kvm->arch.vpit;

	mutex_lock(&pit->pit_state.lock);
	prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
B
Beth Kon 已提交
3721 3722 3723
	cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
	if (!prev_legacy && cur_legacy)
		start = 1;
3724 3725 3726
	memcpy(&pit->pit_state.channels, &ps->channels,
	       sizeof(pit->pit_state.channels));
	pit->pit_state.flags = ps->flags;
3727
	for (i = 0; i < 3; i++)
3728
		kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count,
3729
				   start && i == 0);
3730
	mutex_unlock(&pit->pit_state.lock);
3731
	return 0;
3732 3733
}

3734 3735 3736
static int kvm_vm_ioctl_reinject(struct kvm *kvm,
				 struct kvm_reinject_control *control)
{
3737 3738 3739
	struct kvm_pit *pit = kvm->arch.vpit;

	if (!pit)
3740
		return -ENXIO;
3741

3742 3743 3744 3745 3746 3747 3748
	/* pit->pit_state.lock was overloaded to prevent userspace from getting
	 * an inconsistent state after running multiple KVM_REINJECT_CONTROL
	 * ioctls in parallel.  Use a separate lock if that ioctl isn't rare.
	 */
	mutex_lock(&pit->pit_state.lock);
	kvm_pit_set_reinject(pit, control->pit_reinject);
	mutex_unlock(&pit->pit_state.lock);
3749

3750 3751 3752
	return 0;
}

3753
/**
3754 3755 3756
 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
 * @kvm: kvm instance
 * @log: slot id and address to which we copy the log
3757
 *
3758 3759 3760 3761 3762 3763 3764 3765
 * Steps 1-4 below provide general overview of dirty page logging. See
 * kvm_get_dirty_log_protect() function description for additional details.
 *
 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
 * always flush the TLB (step 4) even if previous step failed  and the dirty
 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
 * writes will be marked dirty for next log read.
3766
 *
3767 3768
 *   1. Take a snapshot of the bit and clear it if needed.
 *   2. Write protect the corresponding page.
3769 3770
 *   3. Copy the snapshot to the userspace.
 *   4. Flush TLB's if needed.
3771
 */
3772
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
3773
{
3774
	bool is_dirty = false;
3775
	int r;
3776

3777
	mutex_lock(&kvm->slots_lock);
3778

3779 3780 3781 3782 3783 3784
	/*
	 * Flush potentially hardware-cached dirty pages to dirty_bitmap.
	 */
	if (kvm_x86_ops->flush_log_dirty)
		kvm_x86_ops->flush_log_dirty(kvm);

3785
	r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
3786 3787 3788 3789 3790

	/*
	 * All the TLBs can be flushed out of mmu lock, see the comments in
	 * kvm_mmu_slot_remove_write_access().
	 */
3791
	lockdep_assert_held(&kvm->slots_lock);
3792 3793 3794
	if (is_dirty)
		kvm_flush_remote_tlbs(kvm);

3795
	mutex_unlock(&kvm->slots_lock);
3796 3797 3798
	return r;
}

3799 3800
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
			bool line_status)
3801 3802 3803 3804 3805
{
	if (!irqchip_in_kernel(kvm))
		return -ENXIO;

	irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
3806 3807
					irq_event->irq, irq_event->level,
					line_status);
3808 3809 3810
	return 0;
}

3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823
static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
				   struct kvm_enable_cap *cap)
{
	int r;

	if (cap->flags)
		return -EINVAL;

	switch (cap->cap) {
	case KVM_CAP_DISABLE_QUIRKS:
		kvm->arch.disabled_quirks = cap->args[0];
		r = 0;
		break;
3824 3825
	case KVM_CAP_SPLIT_IRQCHIP: {
		mutex_lock(&kvm->lock);
3826 3827 3828
		r = -EINVAL;
		if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS)
			goto split_irqchip_unlock;
3829 3830 3831
		r = -EEXIST;
		if (irqchip_in_kernel(kvm))
			goto split_irqchip_unlock;
P
Paolo Bonzini 已提交
3832
		if (kvm->created_vcpus)
3833 3834 3835 3836 3837 3838 3839
			goto split_irqchip_unlock;
		r = kvm_setup_empty_irq_routing(kvm);
		if (r)
			goto split_irqchip_unlock;
		/* Pairs with irqchip_in_kernel. */
		smp_wmb();
		kvm->arch.irqchip_split = true;
3840
		kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
3841 3842 3843 3844 3845
		r = 0;
split_irqchip_unlock:
		mutex_unlock(&kvm->lock);
		break;
	}
3846 3847 3848 3849 3850 3851 3852
	case KVM_CAP_X2APIC_API:
		r = -EINVAL;
		if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS)
			break;

		if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS)
			kvm->arch.x2apic_format = true;
3853 3854
		if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
			kvm->arch.x2apic_broadcast_quirk_disabled = true;
3855 3856 3857

		r = 0;
		break;
3858 3859 3860 3861 3862 3863 3864
	default:
		r = -EINVAL;
		break;
	}
	return r;
}

3865 3866 3867 3868 3869
long kvm_arch_vm_ioctl(struct file *filp,
		       unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
3870
	int r = -ENOTTY;
3871 3872 3873 3874 3875 3876 3877
	/*
	 * This union makes it completely explicit to gcc-3.x
	 * that these two variables' stack usage should be
	 * combined, not added together.
	 */
	union {
		struct kvm_pit_state ps;
B
Beth Kon 已提交
3878
		struct kvm_pit_state2 ps2;
3879
		struct kvm_pit_config pit_config;
3880
	} u;
3881 3882 3883 3884 3885

	switch (ioctl) {
	case KVM_SET_TSS_ADDR:
		r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
		break;
3886 3887 3888 3889 3890 3891 3892 3893 3894
	case KVM_SET_IDENTITY_MAP_ADDR: {
		u64 ident_addr;

		r = -EFAULT;
		if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
			goto out;
		r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
		break;
	}
3895 3896 3897 3898 3899 3900
	case KVM_SET_NR_MMU_PAGES:
		r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
		break;
	case KVM_GET_NR_MMU_PAGES:
		r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
		break;
3901 3902 3903 3904 3905 3906 3907
	case KVM_CREATE_IRQCHIP: {
		struct kvm_pic *vpic;

		mutex_lock(&kvm->lock);
		r = -EEXIST;
		if (kvm->arch.vpic)
			goto create_irqchip_unlock;
3908
		r = -EINVAL;
P
Paolo Bonzini 已提交
3909
		if (kvm->created_vcpus)
3910
			goto create_irqchip_unlock;
3911
		r = -ENOMEM;
3912 3913
		vpic = kvm_create_pic(kvm);
		if (vpic) {
3914 3915
			r = kvm_ioapic_init(kvm);
			if (r) {
3916
				mutex_lock(&kvm->slots_lock);
3917
				kvm_destroy_pic(vpic);
3918
				mutex_unlock(&kvm->slots_lock);
3919
				goto create_irqchip_unlock;
3920 3921
			}
		} else
3922
			goto create_irqchip_unlock;
3923 3924
		r = kvm_setup_default_irq_routing(kvm);
		if (r) {
3925
			mutex_lock(&kvm->slots_lock);
3926
			mutex_lock(&kvm->irq_lock);
3927
			kvm_ioapic_destroy(kvm);
3928
			kvm_destroy_pic(vpic);
3929
			mutex_unlock(&kvm->irq_lock);
3930
			mutex_unlock(&kvm->slots_lock);
3931
			goto create_irqchip_unlock;
3932
		}
3933 3934 3935
		/* Write kvm->irq_routing before kvm->arch.vpic.  */
		smp_wmb();
		kvm->arch.vpic = vpic;
3936 3937
	create_irqchip_unlock:
		mutex_unlock(&kvm->lock);
3938
		break;
3939
	}
S
Sheng Yang 已提交
3940
	case KVM_CREATE_PIT:
3941 3942 3943 3944 3945 3946 3947 3948
		u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
		goto create_pit;
	case KVM_CREATE_PIT2:
		r = -EFAULT;
		if (copy_from_user(&u.pit_config, argp,
				   sizeof(struct kvm_pit_config)))
			goto out;
	create_pit:
3949
		mutex_lock(&kvm->lock);
A
Avi Kivity 已提交
3950 3951 3952
		r = -EEXIST;
		if (kvm->arch.vpit)
			goto create_pit_unlock;
S
Sheng Yang 已提交
3953
		r = -ENOMEM;
3954
		kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
S
Sheng Yang 已提交
3955 3956
		if (kvm->arch.vpit)
			r = 0;
A
Avi Kivity 已提交
3957
	create_pit_unlock:
3958
		mutex_unlock(&kvm->lock);
S
Sheng Yang 已提交
3959
		break;
3960 3961
	case KVM_GET_IRQCHIP: {
		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3962
		struct kvm_irqchip *chip;
3963

3964 3965 3966
		chip = memdup_user(argp, sizeof(*chip));
		if (IS_ERR(chip)) {
			r = PTR_ERR(chip);
3967
			goto out;
3968 3969
		}

3970
		r = -ENXIO;
3971
		if (!irqchip_in_kernel(kvm) || irqchip_split(kvm))
3972 3973
			goto get_irqchip_out;
		r = kvm_vm_ioctl_get_irqchip(kvm, chip);
3974
		if (r)
3975
			goto get_irqchip_out;
3976
		r = -EFAULT;
3977 3978
		if (copy_to_user(argp, chip, sizeof *chip))
			goto get_irqchip_out;
3979
		r = 0;
3980 3981
	get_irqchip_out:
		kfree(chip);
3982 3983 3984 3985
		break;
	}
	case KVM_SET_IRQCHIP: {
		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3986
		struct kvm_irqchip *chip;
3987

3988 3989 3990
		chip = memdup_user(argp, sizeof(*chip));
		if (IS_ERR(chip)) {
			r = PTR_ERR(chip);
3991
			goto out;
3992 3993
		}

3994
		r = -ENXIO;
3995
		if (!irqchip_in_kernel(kvm) || irqchip_split(kvm))
3996 3997
			goto set_irqchip_out;
		r = kvm_vm_ioctl_set_irqchip(kvm, chip);
3998
		if (r)
3999
			goto set_irqchip_out;
4000
		r = 0;
4001 4002
	set_irqchip_out:
		kfree(chip);
4003 4004
		break;
	}
4005 4006
	case KVM_GET_PIT: {
		r = -EFAULT;
4007
		if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
4008 4009 4010 4011
			goto out;
		r = -ENXIO;
		if (!kvm->arch.vpit)
			goto out;
4012
		r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
4013 4014 4015
		if (r)
			goto out;
		r = -EFAULT;
4016
		if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
4017 4018 4019 4020 4021 4022
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_PIT: {
		r = -EFAULT;
4023
		if (copy_from_user(&u.ps, argp, sizeof u.ps))
4024 4025 4026 4027
			goto out;
		r = -ENXIO;
		if (!kvm->arch.vpit)
			goto out;
4028
		r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
4029 4030
		break;
	}
B
Beth Kon 已提交
4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053
	case KVM_GET_PIT2: {
		r = -ENXIO;
		if (!kvm->arch.vpit)
			goto out;
		r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
		if (r)
			goto out;
		r = -EFAULT;
		if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_PIT2: {
		r = -EFAULT;
		if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
			goto out;
		r = -ENXIO;
		if (!kvm->arch.vpit)
			goto out;
		r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
		break;
	}
4054 4055 4056 4057 4058 4059 4060 4061
	case KVM_REINJECT_CONTROL: {
		struct kvm_reinject_control control;
		r =  -EFAULT;
		if (copy_from_user(&control, argp, sizeof(control)))
			goto out;
		r = kvm_vm_ioctl_reinject(kvm, &control);
		break;
	}
4062 4063 4064
	case KVM_SET_BOOT_CPU_ID:
		r = 0;
		mutex_lock(&kvm->lock);
P
Paolo Bonzini 已提交
4065
		if (kvm->created_vcpus)
4066 4067 4068 4069 4070
			r = -EBUSY;
		else
			kvm->arch.bsp_vcpu_id = arg;
		mutex_unlock(&kvm->lock);
		break;
E
Ed Swierk 已提交
4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081
	case KVM_XEN_HVM_CONFIG: {
		r = -EFAULT;
		if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
				   sizeof(struct kvm_xen_hvm_config)))
			goto out;
		r = -EINVAL;
		if (kvm->arch.xen_hvm_config.flags)
			goto out;
		r = 0;
		break;
	}
4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094
	case KVM_SET_CLOCK: {
		struct kvm_clock_data user_ns;
		u64 now_ns;

		r = -EFAULT;
		if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
			goto out;

		r = -EINVAL;
		if (user_ns.flags)
			goto out;

		r = 0;
4095
		local_irq_disable();
4096 4097
		now_ns = __get_kvmclock_ns(kvm);
		kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
4098
		local_irq_enable();
4099
		kvm_gen_update_masterclock(kvm);
4100 4101 4102 4103 4104 4105
		break;
	}
	case KVM_GET_CLOCK: {
		struct kvm_clock_data user_ns;
		u64 now_ns;

4106 4107
		now_ns = get_kvmclock_ns(kvm);
		user_ns.clock = now_ns;
4108
		user_ns.flags = 0;
4109
		memset(&user_ns.pad, 0, sizeof(user_ns.pad));
4110 4111 4112 4113 4114 4115 4116

		r = -EFAULT;
		if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
			goto out;
		r = 0;
		break;
	}
4117 4118
	case KVM_ENABLE_CAP: {
		struct kvm_enable_cap cap;
4119

4120 4121 4122 4123 4124 4125
		r = -EFAULT;
		if (copy_from_user(&cap, argp, sizeof(cap)))
			goto out;
		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
		break;
	}
4126
	default:
4127
		r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
4128 4129 4130 4131 4132
	}
out:
	return r;
}

4133
static void kvm_init_msr_list(void)
4134 4135 4136 4137
{
	u32 dummy[2];
	unsigned i, j;

4138
	for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
4139 4140
		if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
			continue;
4141 4142 4143

		/*
		 * Even MSRs that are valid in the host may not be exposed
4144
		 * to the guests in some cases.
4145 4146 4147 4148 4149 4150
		 */
		switch (msrs_to_save[i]) {
		case MSR_IA32_BNDCFGS:
			if (!kvm_x86_ops->mpx_supported())
				continue;
			break;
4151 4152 4153 4154
		case MSR_TSC_AUX:
			if (!kvm_x86_ops->rdtscp_supported())
				continue;
			break;
4155 4156 4157 4158
		default:
			break;
		}

4159 4160 4161 4162 4163
		if (j < i)
			msrs_to_save[j] = msrs_to_save[i];
		j++;
	}
	num_msrs_to_save = j;
4164 4165 4166

	for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
		switch (emulated_msrs[i]) {
4167 4168 4169 4170
		case MSR_IA32_SMBASE:
			if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
				continue;
			break;
4171 4172 4173 4174 4175 4176 4177 4178 4179
		default:
			break;
		}

		if (j < i)
			emulated_msrs[j] = emulated_msrs[i];
		j++;
	}
	num_emulated_msrs = j;
4180 4181
}

4182 4183
static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
			   const void *v)
4184
{
4185 4186 4187 4188 4189
	int handled = 0;
	int n;

	do {
		n = min(len, 8);
4190
		if (!(lapic_in_kernel(vcpu) &&
4191 4192
		      !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
		    && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
4193 4194 4195 4196 4197 4198
			break;
		handled += n;
		addr += n;
		len -= n;
		v += n;
	} while (len);
4199

4200
	return handled;
4201 4202
}

4203
static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
4204
{
4205 4206 4207 4208 4209
	int handled = 0;
	int n;

	do {
		n = min(len, 8);
4210
		if (!(lapic_in_kernel(vcpu) &&
4211 4212 4213
		      !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
					 addr, n, v))
		    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
4214 4215 4216 4217 4218 4219 4220
			break;
		trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
		handled += n;
		addr += n;
		len -= n;
		v += n;
	} while (len);
4221

4222
	return handled;
4223 4224
}

4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236
static void kvm_set_segment(struct kvm_vcpu *vcpu,
			struct kvm_segment *var, int seg)
{
	kvm_x86_ops->set_segment(vcpu, var, seg);
}

void kvm_get_segment(struct kvm_vcpu *vcpu,
		     struct kvm_segment *var, int seg)
{
	kvm_x86_ops->get_segment(vcpu, var, seg);
}

4237 4238
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
			   struct x86_exception *exception)
4239 4240 4241 4242 4243 4244 4245
{
	gpa_t t_gpa;

	BUG_ON(!mmu_is_nested(vcpu));

	/* NPT walks are always user-walks */
	access |= PFERR_USER_MASK;
4246
	t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception);
4247 4248 4249 4250

	return t_gpa;
}

4251 4252
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
			      struct x86_exception *exception)
4253 4254
{
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4255
	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4256 4257
}

4258 4259
 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
				struct x86_exception *exception)
4260 4261 4262
{
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
	access |= PFERR_FETCH_MASK;
4263
	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4264 4265
}

4266 4267
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
			       struct x86_exception *exception)
4268 4269 4270
{
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
	access |= PFERR_WRITE_MASK;
4271
	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
4272 4273 4274
}

/* uses this to access any guest's mapped memory without checking CPL */
4275 4276
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
				struct x86_exception *exception)
4277
{
4278
	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
4279 4280 4281 4282
}

static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
				      struct kvm_vcpu *vcpu, u32 access,
4283
				      struct x86_exception *exception)
4284 4285
{
	void *data = val;
4286
	int r = X86EMUL_CONTINUE;
4287 4288

	while (bytes) {
4289
		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
4290
							    exception);
4291
		unsigned offset = addr & (PAGE_SIZE-1);
4292
		unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
4293 4294
		int ret;

4295
		if (gpa == UNMAPPED_GVA)
4296
			return X86EMUL_PROPAGATE_FAULT;
4297 4298
		ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
					       offset, toread);
4299
		if (ret < 0) {
4300
			r = X86EMUL_IO_NEEDED;
4301 4302
			goto out;
		}
4303

4304 4305 4306
		bytes -= toread;
		data += toread;
		addr += toread;
4307
	}
4308 4309
out:
	return r;
4310
}
4311

4312
/* used for instruction fetching */
4313 4314
static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
				gva_t addr, void *val, unsigned int bytes,
4315
				struct x86_exception *exception)
4316
{
4317
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4318
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4319 4320
	unsigned offset;
	int ret;
4321

4322 4323 4324 4325 4326 4327 4328 4329 4330
	/* Inline kvm_read_guest_virt_helper for speed.  */
	gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
						    exception);
	if (unlikely(gpa == UNMAPPED_GVA))
		return X86EMUL_PROPAGATE_FAULT;

	offset = addr & (PAGE_SIZE-1);
	if (WARN_ON(offset + bytes > PAGE_SIZE))
		bytes = (unsigned)PAGE_SIZE - offset;
4331 4332
	ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
				       offset, bytes);
4333 4334 4335 4336
	if (unlikely(ret < 0))
		return X86EMUL_IO_NEEDED;

	return X86EMUL_CONTINUE;
4337 4338
}

4339
int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
4340
			       gva_t addr, void *val, unsigned int bytes,
4341
			       struct x86_exception *exception)
4342
{
4343
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4344
	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
4345

4346
	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
4347
					  exception);
4348
}
4349
EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
4350

4351 4352
static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
				      gva_t addr, void *val, unsigned int bytes,
4353
				      struct x86_exception *exception)
4354
{
4355
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4356
	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
4357 4358
}

4359 4360 4361 4362 4363 4364 4365 4366 4367
static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
		unsigned long addr, void *val, unsigned int bytes)
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
	int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes);

	return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
}

N
Nadav Har'El 已提交
4368
int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
4369
				       gva_t addr, void *val,
4370
				       unsigned int bytes,
4371
				       struct x86_exception *exception)
4372
{
4373
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4374 4375 4376 4377
	void *data = val;
	int r = X86EMUL_CONTINUE;

	while (bytes) {
4378 4379
		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
							     PFERR_WRITE_MASK,
4380
							     exception);
4381 4382 4383 4384
		unsigned offset = addr & (PAGE_SIZE-1);
		unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
		int ret;

4385
		if (gpa == UNMAPPED_GVA)
4386
			return X86EMUL_PROPAGATE_FAULT;
4387
		ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
4388
		if (ret < 0) {
4389
			r = X86EMUL_IO_NEEDED;
4390 4391 4392 4393 4394 4395 4396 4397 4398 4399
			goto out;
		}

		bytes -= towrite;
		data += towrite;
		addr += towrite;
	}
out:
	return r;
}
N
Nadav Har'El 已提交
4400
EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
4401

4402 4403 4404 4405
static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
				gpa_t *gpa, struct x86_exception *exception,
				bool write)
{
4406 4407
	u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
		| (write ? PFERR_WRITE_MASK : 0);
4408

4409 4410 4411 4412 4413
	/*
	 * currently PKRU is only applied to ept enabled guest so
	 * there is no pkey in EPT page table for L1 guest or EPT
	 * shadow page table for L2 guest.
	 */
4414
	if (vcpu_match_mmio_gva(vcpu, gva)
F
Feng Wu 已提交
4415
	    && !permission_fault(vcpu, vcpu->arch.walk_mmu,
4416
				 vcpu->arch.access, 0, access)) {
4417 4418
		*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
					(gva & (PAGE_SIZE - 1));
X
Xiao Guangrong 已提交
4419
		trace_vcpu_match_mmio(gva, *gpa, write, false);
4420 4421 4422
		return 1;
	}

4423 4424 4425 4426 4427 4428 4429 4430 4431
	*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);

	if (*gpa == UNMAPPED_GVA)
		return -1;

	/* For APIC access vmexit */
	if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
		return 1;

X
Xiao Guangrong 已提交
4432 4433
	if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
		trace_vcpu_match_mmio(gva, *gpa, write, true);
4434
		return 1;
X
Xiao Guangrong 已提交
4435
	}
4436

4437 4438 4439
	return 0;
}

4440
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
4441
			const void *val, int bytes)
4442 4443 4444
{
	int ret;

4445
	ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
4446
	if (ret < 0)
4447
		return 0;
4448
	kvm_page_track_write(vcpu, gpa, val, bytes);
4449 4450 4451
	return 1;
}

4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467
struct read_write_emulator_ops {
	int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
				  int bytes);
	int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
				  void *val, int bytes);
	int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
			       int bytes, void *val);
	int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
				    void *val, int bytes);
	bool write;
};

static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
{
	if (vcpu->mmio_read_completed) {
		trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
A
Avi Kivity 已提交
4468
			       vcpu->mmio_fragments[0].gpa, *(u64 *)val);
4469 4470 4471 4472 4473 4474 4475 4476 4477 4478
		vcpu->mmio_read_completed = 0;
		return 1;
	}

	return 0;
}

static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
			void *val, int bytes)
{
4479
	return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503
}

static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
			 void *val, int bytes)
{
	return emulator_write_phys(vcpu, gpa, val, bytes);
}

static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
{
	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
	return vcpu_mmio_write(vcpu, gpa, bytes, val);
}

static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
			  void *val, int bytes)
{
	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
	return X86EMUL_IO_NEEDED;
}

static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
			   void *val, int bytes)
{
A
Avi Kivity 已提交
4504 4505
	struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];

4506
	memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
4507 4508 4509
	return X86EMUL_CONTINUE;
}

4510
static const struct read_write_emulator_ops read_emultor = {
4511 4512 4513 4514 4515 4516
	.read_write_prepare = read_prepare,
	.read_write_emulate = read_emulate,
	.read_write_mmio = vcpu_mmio_read,
	.read_write_exit_mmio = read_exit_mmio,
};

4517
static const struct read_write_emulator_ops write_emultor = {
4518 4519 4520 4521 4522 4523
	.read_write_emulate = write_emulate,
	.read_write_mmio = write_mmio,
	.read_write_exit_mmio = write_exit_mmio,
	.write = true,
};

4524 4525 4526 4527
static int emulator_read_write_onepage(unsigned long addr, void *val,
				       unsigned int bytes,
				       struct x86_exception *exception,
				       struct kvm_vcpu *vcpu,
4528
				       const struct read_write_emulator_ops *ops)
4529
{
4530 4531
	gpa_t gpa;
	int handled, ret;
4532
	bool write = ops->write;
A
Avi Kivity 已提交
4533
	struct kvm_mmio_fragment *frag;
4534

4535
	ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
4536

4537
	if (ret < 0)
4538 4539 4540
		return X86EMUL_PROPAGATE_FAULT;

	/* For APIC access vmexit */
4541
	if (ret)
4542 4543
		goto mmio;

4544
	if (ops->read_write_emulate(vcpu, gpa, val, bytes))
4545 4546 4547 4548 4549 4550
		return X86EMUL_CONTINUE;

mmio:
	/*
	 * Is this MMIO handled locally?
	 */
4551
	handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
4552
	if (handled == bytes)
4553 4554
		return X86EMUL_CONTINUE;

4555 4556 4557 4558
	gpa += handled;
	bytes -= handled;
	val += handled;

4559 4560 4561 4562 4563
	WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
	frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
	frag->gpa = gpa;
	frag->data = val;
	frag->len = bytes;
A
Avi Kivity 已提交
4564
	return X86EMUL_CONTINUE;
4565 4566
}

4567 4568
static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
			unsigned long addr,
4569 4570
			void *val, unsigned int bytes,
			struct x86_exception *exception,
4571
			const struct read_write_emulator_ops *ops)
4572
{
4573
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
A
Avi Kivity 已提交
4574 4575 4576 4577 4578 4579 4580 4581
	gpa_t gpa;
	int rc;

	if (ops->read_write_prepare &&
		  ops->read_write_prepare(vcpu, val, bytes))
		return X86EMUL_CONTINUE;

	vcpu->mmio_nr_fragments = 0;
4582

4583 4584
	/* Crossing a page boundary? */
	if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
A
Avi Kivity 已提交
4585
		int now;
4586 4587

		now = -addr & ~PAGE_MASK;
4588 4589 4590
		rc = emulator_read_write_onepage(addr, val, now, exception,
						 vcpu, ops);

4591 4592 4593
		if (rc != X86EMUL_CONTINUE)
			return rc;
		addr += now;
4594 4595
		if (ctxt->mode != X86EMUL_MODE_PROT64)
			addr = (u32)addr;
4596 4597 4598
		val += now;
		bytes -= now;
	}
4599

A
Avi Kivity 已提交
4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612
	rc = emulator_read_write_onepage(addr, val, bytes, exception,
					 vcpu, ops);
	if (rc != X86EMUL_CONTINUE)
		return rc;

	if (!vcpu->mmio_nr_fragments)
		return rc;

	gpa = vcpu->mmio_fragments[0].gpa;

	vcpu->mmio_needed = 1;
	vcpu->mmio_cur_fragment = 0;

4613
	vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
A
Avi Kivity 已提交
4614 4615 4616 4617 4618
	vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
	vcpu->run->exit_reason = KVM_EXIT_MMIO;
	vcpu->run->mmio.phys_addr = gpa;

	return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630
}

static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
				  unsigned long addr,
				  void *val,
				  unsigned int bytes,
				  struct x86_exception *exception)
{
	return emulator_read_write(ctxt, addr, val, bytes,
				   exception, &read_emultor);
}

4631
static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
4632 4633 4634 4635 4636 4637 4638
			    unsigned long addr,
			    const void *val,
			    unsigned int bytes,
			    struct x86_exception *exception)
{
	return emulator_read_write(ctxt, addr, (void *)val, bytes,
				   exception, &write_emultor);
4639 4640
}

4641 4642 4643 4644 4645 4646 4647
#define CMPXCHG_TYPE(t, ptr, old, new) \
	(cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))

#ifdef CONFIG_X86_64
#  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
#else
#  define CMPXCHG64(ptr, old, new) \
4648
	(cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
4649 4650
#endif

4651 4652
static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
				     unsigned long addr,
4653 4654 4655
				     const void *old,
				     const void *new,
				     unsigned int bytes,
4656
				     struct x86_exception *exception)
4657
{
4658
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4659 4660 4661 4662
	gpa_t gpa;
	struct page *page;
	char *kaddr;
	bool exchanged;
4663

4664 4665 4666
	/* guests cmpxchg8b have to be emulated atomically */
	if (bytes > 8 || (bytes & (bytes - 1)))
		goto emul_write;
4667

4668
	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
4669

4670 4671 4672
	if (gpa == UNMAPPED_GVA ||
	    (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
		goto emul_write;
4673

4674 4675
	if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
		goto emul_write;
4676

4677
	page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
4678
	if (is_error_page(page))
4679
		goto emul_write;
4680

4681
	kaddr = kmap_atomic(page);
4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697
	kaddr += offset_in_page(gpa);
	switch (bytes) {
	case 1:
		exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
		break;
	case 2:
		exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
		break;
	case 4:
		exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
		break;
	case 8:
		exchanged = CMPXCHG64(kaddr, old, new);
		break;
	default:
		BUG();
4698
	}
4699
	kunmap_atomic(kaddr);
4700 4701 4702 4703 4704
	kvm_release_page_dirty(page);

	if (!exchanged)
		return X86EMUL_CMPXCHG_FAILED;

4705
	kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
4706
	kvm_page_track_write(vcpu, gpa, new, bytes);
4707 4708

	return X86EMUL_CONTINUE;
4709

4710
emul_write:
4711
	printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
4712

4713
	return emulator_write_emulated(ctxt, addr, new, bytes, exception);
4714 4715
}

4716 4717 4718 4719 4720 4721
static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
{
	/* TODO: String I/O for in kernel device */
	int r;

	if (vcpu->arch.pio.in)
4722
		r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
4723 4724
				    vcpu->arch.pio.size, pd);
	else
4725
		r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
4726 4727 4728 4729 4730
				     vcpu->arch.pio.port, vcpu->arch.pio.size,
				     pd);
	return r;
}

4731 4732 4733
static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
			       unsigned short port, void *val,
			       unsigned int count, bool in)
4734 4735
{
	vcpu->arch.pio.port = port;
4736
	vcpu->arch.pio.in = in;
4737
	vcpu->arch.pio.count  = count;
4738 4739 4740
	vcpu->arch.pio.size = size;

	if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
4741
		vcpu->arch.pio.count = 0;
4742 4743 4744 4745
		return 1;
	}

	vcpu->run->exit_reason = KVM_EXIT_IO;
4746
	vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
4747 4748 4749 4750 4751 4752 4753 4754
	vcpu->run->io.size = size;
	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
	vcpu->run->io.count = count;
	vcpu->run->io.port = port;

	return 0;
}

4755 4756 4757
static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
				    int size, unsigned short port, void *val,
				    unsigned int count)
4758
{
4759
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4760
	int ret;
4761

4762 4763
	if (vcpu->arch.pio.count)
		goto data_avail;
4764

4765 4766 4767 4768
	ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
	if (ret) {
data_avail:
		memcpy(val, vcpu->arch.pio_data, size * count);
4769
		trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
4770
		vcpu->arch.pio.count = 0;
4771 4772 4773 4774 4775 4776
		return 1;
	}

	return 0;
}

4777 4778 4779 4780 4781 4782 4783
static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
				     int size, unsigned short port,
				     const void *val, unsigned int count)
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);

	memcpy(vcpu->arch.pio_data, val, size * count);
4784
	trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
4785 4786 4787
	return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
}

4788 4789 4790 4791 4792
static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
	return kvm_x86_ops->get_segment_base(vcpu, seg);
}

4793
static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
4794
{
4795
	kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
4796 4797
}

4798
int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
4799 4800 4801 4802 4803
{
	if (!need_emulate_wbinvd(vcpu))
		return X86EMUL_CONTINUE;

	if (kvm_x86_ops->has_wbinvd_exit()) {
4804 4805 4806
		int cpu = get_cpu();

		cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
4807 4808
		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
				wbinvd_ipi, NULL, 1);
4809
		put_cpu();
4810
		cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
4811 4812
	} else
		wbinvd();
4813 4814
	return X86EMUL_CONTINUE;
}
4815 4816 4817 4818 4819 4820

int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
	kvm_x86_ops->skip_emulated_instruction(vcpu);
	return kvm_emulate_wbinvd_noskip(vcpu);
}
4821 4822
EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);

4823 4824


4825 4826
static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
{
4827
	kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
4828 4829
}

4830 4831
static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
			   unsigned long *dest)
4832
{
4833
	return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
4834 4835
}

4836 4837
static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
			   unsigned long value)
4838
{
4839

4840
	return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
4841 4842
}

4843
static u64 mk_cr_64(u64 curr_cr, u32 new_val)
4844
{
4845
	return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
4846 4847
}

4848
static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
4849
{
4850
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4851 4852 4853 4854 4855 4856 4857 4858 4859 4860
	unsigned long value;

	switch (cr) {
	case 0:
		value = kvm_read_cr0(vcpu);
		break;
	case 2:
		value = vcpu->arch.cr2;
		break;
	case 3:
4861
		value = kvm_read_cr3(vcpu);
4862 4863 4864 4865 4866 4867 4868 4869
		break;
	case 4:
		value = kvm_read_cr4(vcpu);
		break;
	case 8:
		value = kvm_get_cr8(vcpu);
		break;
	default:
4870
		kvm_err("%s: unexpected cr %u\n", __func__, cr);
4871 4872 4873 4874 4875 4876
		return 0;
	}

	return value;
}

4877
static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
4878
{
4879
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4880 4881
	int res = 0;

4882 4883
	switch (cr) {
	case 0:
4884
		res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
4885 4886 4887 4888 4889
		break;
	case 2:
		vcpu->arch.cr2 = val;
		break;
	case 3:
4890
		res = kvm_set_cr3(vcpu, val);
4891 4892
		break;
	case 4:
4893
		res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
4894 4895
		break;
	case 8:
A
Andre Przywara 已提交
4896
		res = kvm_set_cr8(vcpu, val);
4897 4898
		break;
	default:
4899
		kvm_err("%s: unexpected cr %u\n", __func__, cr);
4900
		res = -1;
4901
	}
4902 4903

	return res;
4904 4905
}

4906
static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
4907
{
4908
	return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
4909 4910
}

4911
static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4912
{
4913
	kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
4914 4915
}

4916
static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4917
{
4918
	kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
4919 4920
}

4921 4922 4923 4924 4925 4926 4927 4928 4929 4930
static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
	kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
}

static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
	kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
}

4931 4932
static unsigned long emulator_get_cached_segment_base(
	struct x86_emulate_ctxt *ctxt, int seg)
4933
{
4934
	return get_segment_base(emul_to_vcpu(ctxt), seg);
4935 4936
}

4937 4938 4939
static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
				 struct desc_struct *desc, u32 *base3,
				 int seg)
4940 4941 4942
{
	struct kvm_segment var;

4943
	kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
4944
	*selector = var.selector;
4945

4946 4947
	if (var.unusable) {
		memset(desc, 0, sizeof(*desc));
4948
		return false;
4949
	}
4950 4951 4952 4953 4954

	if (var.g)
		var.limit >>= 12;
	set_desc_limit(desc, var.limit);
	set_desc_base(desc, (unsigned long)var.base);
4955 4956 4957 4958
#ifdef CONFIG_X86_64
	if (base3)
		*base3 = var.base >> 32;
#endif
4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970
	desc->type = var.type;
	desc->s = var.s;
	desc->dpl = var.dpl;
	desc->p = var.present;
	desc->avl = var.avl;
	desc->l = var.l;
	desc->d = var.db;
	desc->g = var.g;

	return true;
}

4971 4972 4973
static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
				 struct desc_struct *desc, u32 base3,
				 int seg)
4974
{
4975
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4976 4977
	struct kvm_segment var;

4978
	var.selector = selector;
4979
	var.base = get_desc_base(desc);
4980 4981 4982
#ifdef CONFIG_X86_64
	var.base |= ((u64)base3) << 32;
#endif
4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000
	var.limit = get_desc_limit(desc);
	if (desc->g)
		var.limit = (var.limit << 12) | 0xfff;
	var.type = desc->type;
	var.dpl = desc->dpl;
	var.db = desc->d;
	var.s = desc->s;
	var.l = desc->l;
	var.g = desc->g;
	var.avl = desc->avl;
	var.present = desc->p;
	var.unusable = !var.present;
	var.padding = 0;

	kvm_set_segment(vcpu, &var, seg);
	return;
}

5001 5002 5003
static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
			    u32 msr_index, u64 *pdata)
{
5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014
	struct msr_data msr;
	int r;

	msr.index = msr_index;
	msr.host_initiated = false;
	r = kvm_get_msr(emul_to_vcpu(ctxt), &msr);
	if (r)
		return r;

	*pdata = msr.data;
	return 0;
5015 5016 5017 5018 5019
}

static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
			    u32 msr_index, u64 data)
{
5020 5021 5022 5023 5024 5025
	struct msr_data msr;

	msr.data = data;
	msr.index = msr_index;
	msr.host_initiated = false;
	return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
5026 5027
}

P
Paolo Bonzini 已提交
5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041
static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);

	return vcpu->arch.smbase;
}

static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);

	vcpu->arch.smbase = smbase;
}

5042 5043 5044
static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
			      u32 pmc)
{
5045
	return kvm_pmu_is_valid_msr_idx(emul_to_vcpu(ctxt), pmc);
5046 5047
}

5048 5049 5050
static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
			     u32 pmc, u64 *pdata)
{
5051
	return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
5052 5053
}

5054 5055 5056 5057 5058
static void emulator_halt(struct x86_emulate_ctxt *ctxt)
{
	emul_to_vcpu(ctxt)->arch.halt_request = 1;
}

5059 5060 5061
static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
{
	preempt_disable();
5062
	kvm_load_guest_fpu(emul_to_vcpu(ctxt));
5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074
	/*
	 * CR0.TS may reference the host fpu state, not the guest fpu state,
	 * so it may be clear at this point.
	 */
	clts();
}

static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
{
	preempt_enable();
}

5075
static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
5076
			      struct x86_instruction_info *info,
5077 5078
			      enum x86_intercept_stage stage)
{
5079
	return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
5080 5081
}

5082
static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
5083 5084
			       u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
{
5085
	kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx);
5086 5087
}

5088 5089 5090 5091 5092 5093 5094 5095 5096 5097
static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
{
	return kvm_register_read(emul_to_vcpu(ctxt), reg);
}

static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
{
	kvm_register_write(emul_to_vcpu(ctxt), reg, val);
}

5098 5099 5100 5101 5102
static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
{
	kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
}

5103
static const struct x86_emulate_ops emulate_ops = {
5104 5105
	.read_gpr            = emulator_read_gpr,
	.write_gpr           = emulator_write_gpr,
5106
	.read_std            = kvm_read_guest_virt_system,
5107
	.write_std           = kvm_write_guest_virt_system,
5108
	.read_phys           = kvm_read_guest_phys_system,
5109
	.fetch               = kvm_fetch_guest_virt,
5110 5111 5112
	.read_emulated       = emulator_read_emulated,
	.write_emulated      = emulator_write_emulated,
	.cmpxchg_emulated    = emulator_cmpxchg_emulated,
5113
	.invlpg              = emulator_invlpg,
5114 5115
	.pio_in_emulated     = emulator_pio_in_emulated,
	.pio_out_emulated    = emulator_pio_out_emulated,
5116 5117
	.get_segment         = emulator_get_segment,
	.set_segment         = emulator_set_segment,
5118
	.get_cached_segment_base = emulator_get_cached_segment_base,
5119
	.get_gdt             = emulator_get_gdt,
5120
	.get_idt	     = emulator_get_idt,
5121 5122
	.set_gdt             = emulator_set_gdt,
	.set_idt	     = emulator_set_idt,
5123 5124
	.get_cr              = emulator_get_cr,
	.set_cr              = emulator_set_cr,
5125
	.cpl                 = emulator_get_cpl,
5126 5127
	.get_dr              = emulator_get_dr,
	.set_dr              = emulator_set_dr,
P
Paolo Bonzini 已提交
5128 5129
	.get_smbase          = emulator_get_smbase,
	.set_smbase          = emulator_set_smbase,
5130 5131
	.set_msr             = emulator_set_msr,
	.get_msr             = emulator_get_msr,
5132
	.check_pmc	     = emulator_check_pmc,
5133
	.read_pmc            = emulator_read_pmc,
5134
	.halt                = emulator_halt,
5135
	.wbinvd              = emulator_wbinvd,
5136
	.fix_hypercall       = emulator_fix_hypercall,
5137 5138
	.get_fpu             = emulator_get_fpu,
	.put_fpu             = emulator_put_fpu,
5139
	.intercept           = emulator_intercept,
5140
	.get_cpuid           = emulator_get_cpuid,
5141
	.set_nmi_mask        = emulator_set_nmi_mask,
5142 5143
};

5144 5145
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
{
5146
	u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
5147 5148 5149 5150 5151 5152 5153
	/*
	 * an sti; sti; sequence only disable interrupts for the first
	 * instruction. So, if the last instruction, be it emulated or
	 * not, left the system with the INT_STI flag enabled, it
	 * means that the last instruction is an sti. We should not
	 * leave the flag on in this case. The same goes for mov ss
	 */
5154 5155
	if (int_shadow & mask)
		mask = 0;
5156
	if (unlikely(int_shadow || mask)) {
5157
		kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
5158 5159 5160
		if (!mask)
			kvm_make_request(KVM_REQ_EVENT, vcpu);
	}
5161 5162
}

5163
static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
5164 5165
{
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5166
	if (ctxt->exception.vector == PF_VECTOR)
5167 5168 5169
		return kvm_propagate_fault(vcpu, &ctxt->exception);

	if (ctxt->exception.error_code_valid)
5170 5171
		kvm_queue_exception_e(vcpu, ctxt->exception.vector,
				      ctxt->exception.error_code);
5172
	else
5173
		kvm_queue_exception(vcpu, ctxt->exception.vector);
5174
	return false;
5175 5176
}

5177 5178
static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
{
5179
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5180 5181 5182 5183
	int cs_db, cs_l;

	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);

5184 5185 5186 5187
	ctxt->eflags = kvm_get_rflags(vcpu);
	ctxt->eip = kvm_rip_read(vcpu);
	ctxt->mode = (!is_protmode(vcpu))		? X86EMUL_MODE_REAL :
		     (ctxt->eflags & X86_EFLAGS_VM)	? X86EMUL_MODE_VM86 :
5188
		     (cs_l && is_long_mode(vcpu))	? X86EMUL_MODE_PROT64 :
5189 5190
		     cs_db				? X86EMUL_MODE_PROT32 :
							  X86EMUL_MODE_PROT16;
5191
	BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
P
Paolo Bonzini 已提交
5192 5193
	BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
	BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
5194
	ctxt->emul_flags = vcpu->arch.hflags;
5195

5196
	init_decode_cache(ctxt);
5197
	vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
5198 5199
}

5200
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
5201
{
5202
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5203 5204 5205 5206
	int ret;

	init_emulate_ctxt(vcpu);

5207 5208 5209
	ctxt->op_bytes = 2;
	ctxt->ad_bytes = 2;
	ctxt->_eip = ctxt->eip + inc_eip;
5210
	ret = emulate_int_real(ctxt, irq);
5211 5212 5213 5214

	if (ret != X86EMUL_CONTINUE)
		return EMULATE_FAIL;

5215
	ctxt->eip = ctxt->_eip;
5216 5217
	kvm_rip_write(vcpu, ctxt->eip);
	kvm_set_rflags(vcpu, ctxt->eflags);
5218 5219

	if (irq == NMI_VECTOR)
A
Avi Kivity 已提交
5220
		vcpu->arch.nmi_pending = 0;
5221 5222 5223 5224 5225 5226 5227
	else
		vcpu->arch.interrupt.pending = false;

	return EMULATE_DONE;
}
EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);

5228 5229
static int handle_emulation_failure(struct kvm_vcpu *vcpu)
{
5230 5231
	int r = EMULATE_DONE;

5232 5233
	++vcpu->stat.insn_emulation_fail;
	trace_kvm_emulate_insn_failed(vcpu);
5234
	if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
5235 5236 5237 5238 5239
		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
		vcpu->run->internal.ndata = 0;
		r = EMULATE_FAIL;
	}
5240
	kvm_queue_exception(vcpu, UD_VECTOR);
5241 5242

	return r;
5243 5244
}

5245
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
5246 5247
				  bool write_fault_to_shadow_pgtable,
				  int emulation_type)
5248
{
5249
	gpa_t gpa = cr2;
D
Dan Williams 已提交
5250
	kvm_pfn_t pfn;
5251

5252 5253 5254
	if (emulation_type & EMULTYPE_NO_REEXECUTE)
		return false;

5255 5256 5257 5258 5259 5260
	if (!vcpu->arch.mmu.direct_map) {
		/*
		 * Write permission should be allowed since only
		 * write access need to be emulated.
		 */
		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
5261

5262 5263 5264 5265 5266 5267 5268
		/*
		 * If the mapping is invalid in guest, let cpu retry
		 * it to generate fault.
		 */
		if (gpa == UNMAPPED_GVA)
			return true;
	}
5269

5270 5271 5272 5273 5274 5275 5276
	/*
	 * Do not retry the unhandleable instruction if it faults on the
	 * readonly host memory, otherwise it will goto a infinite loop:
	 * retry instruction -> write #PF -> emulation fail -> retry
	 * instruction -> ...
	 */
	pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297

	/*
	 * If the instruction failed on the error pfn, it can not be fixed,
	 * report the error to userspace.
	 */
	if (is_error_noslot_pfn(pfn))
		return false;

	kvm_release_pfn_clean(pfn);

	/* The instructions are well-emulated on direct mmu. */
	if (vcpu->arch.mmu.direct_map) {
		unsigned int indirect_shadow_pages;

		spin_lock(&vcpu->kvm->mmu_lock);
		indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
		spin_unlock(&vcpu->kvm->mmu_lock);

		if (indirect_shadow_pages)
			kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));

5298
		return true;
5299
	}
5300

5301 5302 5303 5304 5305 5306
	/*
	 * if emulation was due to access to shadowed page table
	 * and it failed try to unshadow page and re-enter the
	 * guest to let CPU execute the instruction.
	 */
	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
5307 5308 5309 5310 5311 5312 5313

	/*
	 * If the access faults on its page table, it can not
	 * be fixed by unprotecting shadow page and it should
	 * be reported to userspace.
	 */
	return !write_fault_to_shadow_pgtable;
5314 5315
}

5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354
static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
			      unsigned long cr2,  int emulation_type)
{
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
	unsigned long last_retry_eip, last_retry_addr, gpa = cr2;

	last_retry_eip = vcpu->arch.last_retry_eip;
	last_retry_addr = vcpu->arch.last_retry_addr;

	/*
	 * If the emulation is caused by #PF and it is non-page_table
	 * writing instruction, it means the VM-EXIT is caused by shadow
	 * page protected, we can zap the shadow page and retry this
	 * instruction directly.
	 *
	 * Note: if the guest uses a non-page-table modifying instruction
	 * on the PDE that points to the instruction, then we will unmap
	 * the instruction and go to an infinite loop. So, we cache the
	 * last retried eip and the last fault address, if we meet the eip
	 * and the address again, we can break out of the potential infinite
	 * loop.
	 */
	vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;

	if (!(emulation_type & EMULTYPE_RETRY))
		return false;

	if (x86_page_table_writing_insn(ctxt))
		return false;

	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
		return false;

	vcpu->arch.last_retry_eip = ctxt->eip;
	vcpu->arch.last_retry_addr = cr2;

	if (!vcpu->arch.mmu.direct_map)
		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);

5355
	kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
5356 5357 5358 5359

	return true;
}

5360 5361 5362
static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
static int complete_emulated_pio(struct kvm_vcpu *vcpu);

P
Paolo Bonzini 已提交
5363
static void kvm_smm_changed(struct kvm_vcpu *vcpu)
5364
{
P
Paolo Bonzini 已提交
5365
	if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
5366 5367 5368
		/* This is a good place to trace that we are exiting SMM.  */
		trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);

5369 5370
		/* Process a latched INIT or SMI, if any.  */
		kvm_make_request(KVM_REQ_EVENT, vcpu);
P
Paolo Bonzini 已提交
5371
	}
5372 5373

	kvm_mmu_reset_context(vcpu);
P
Paolo Bonzini 已提交
5374 5375 5376 5377 5378 5379
}

static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags)
{
	unsigned changed = vcpu->arch.hflags ^ emul_flags;

5380
	vcpu->arch.hflags = emul_flags;
P
Paolo Bonzini 已提交
5381 5382 5383

	if (changed & HF_SMM_MASK)
		kvm_smm_changed(vcpu);
5384 5385
}

5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400
static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
				unsigned long *db)
{
	u32 dr6 = 0;
	int i;
	u32 enable, rwlen;

	enable = dr7;
	rwlen = dr7 >> 16;
	for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
		if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
			dr6 |= (1 << i);
	return dr6;
}

5401
static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
5402 5403 5404 5405
{
	struct kvm_run *kvm_run = vcpu->run;

	/*
5406 5407
	 * rflags is the old, "raw" value of the flags.  The new value has
	 * not been saved yet.
5408 5409 5410 5411 5412 5413 5414
	 *
	 * This is correct even for TF set by the guest, because "the
	 * processor will not generate this exception after the instruction
	 * that sets the TF flag".
	 */
	if (unlikely(rflags & X86_EFLAGS_TF)) {
		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
5415 5416
			kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
						  DR6_RTM;
5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428
			kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
			kvm_run->debug.arch.exception = DB_VECTOR;
			kvm_run->exit_reason = KVM_EXIT_DEBUG;
			*r = EMULATE_USER_EXIT;
		} else {
			vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
			/*
			 * "Certain debug exceptions may clear bit 0-3.  The
			 * remaining contents of the DR6 register are never
			 * cleared by the processor".
			 */
			vcpu->arch.dr6 &= ~15;
5429
			vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
5430 5431 5432 5433 5434
			kvm_queue_exception(vcpu, DB_VECTOR);
		}
	}
}

5435 5436 5437 5438
static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
{
	if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
	    (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
5439 5440 5441
		struct kvm_run *kvm_run = vcpu->run;
		unsigned long eip = kvm_get_linear_rip(vcpu);
		u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
5442 5443 5444 5445
					   vcpu->arch.guest_debug_dr7,
					   vcpu->arch.eff_db);

		if (dr6 != 0) {
5446
			kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
5447
			kvm_run->debug.arch.pc = eip;
5448 5449 5450 5451 5452 5453 5454
			kvm_run->debug.arch.exception = DB_VECTOR;
			kvm_run->exit_reason = KVM_EXIT_DEBUG;
			*r = EMULATE_USER_EXIT;
			return true;
		}
	}

5455 5456
	if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
	    !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
5457 5458
		unsigned long eip = kvm_get_linear_rip(vcpu);
		u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
5459 5460 5461 5462 5463
					   vcpu->arch.dr7,
					   vcpu->arch.db);

		if (dr6 != 0) {
			vcpu->arch.dr6 &= ~15;
5464
			vcpu->arch.dr6 |= dr6 | DR6_RTM;
5465 5466 5467 5468 5469 5470 5471 5472 5473
			kvm_queue_exception(vcpu, DB_VECTOR);
			*r = EMULATE_DONE;
			return true;
		}
	}

	return false;
}

5474 5475
int x86_emulate_instruction(struct kvm_vcpu *vcpu,
			    unsigned long cr2,
5476 5477 5478
			    int emulation_type,
			    void *insn,
			    int insn_len)
5479
{
5480
	int r;
5481
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5482
	bool writeback = true;
5483
	bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
5484

5485 5486 5487 5488 5489
	/*
	 * Clear write_fault_to_shadow_pgtable here to ensure it is
	 * never reused.
	 */
	vcpu->arch.write_fault_to_shadow_pgtable = false;
5490
	kvm_clear_exception_queue(vcpu);
G
Gleb Natapov 已提交
5491

5492
	if (!(emulation_type & EMULTYPE_NO_DECODE)) {
5493
		init_emulate_ctxt(vcpu);
5494 5495 5496 5497 5498 5499 5500 5501 5502 5503

		/*
		 * We will reenter on the same instruction since
		 * we do not set complete_userspace_io.  This does not
		 * handle watchpoints yet, those would be handled in
		 * the emulate_ops.
		 */
		if (kvm_vcpu_check_breakpoint(vcpu, &r))
			return r;

5504 5505
		ctxt->interruptibility = 0;
		ctxt->have_exception = false;
5506
		ctxt->exception.vector = -1;
5507
		ctxt->perm_ok = false;
5508

5509
		ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
5510

5511
		r = x86_decode_insn(ctxt, insn, insn_len);
5512

A
Avi Kivity 已提交
5513
		trace_kvm_emulate_insn_start(vcpu);
5514
		++vcpu->stat.insn_emulation;
5515
		if (r != EMULATION_OK)  {
5516 5517
			if (emulation_type & EMULTYPE_TRAP_UD)
				return EMULATE_FAIL;
5518 5519
			if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
						emulation_type))
5520
				return EMULATE_DONE;
5521 5522 5523
			if (emulation_type & EMULTYPE_SKIP)
				return EMULATE_FAIL;
			return handle_emulation_failure(vcpu);
5524 5525 5526
		}
	}

5527
	if (emulation_type & EMULTYPE_SKIP) {
5528
		kvm_rip_write(vcpu, ctxt->_eip);
5529 5530
		if (ctxt->eflags & X86_EFLAGS_RF)
			kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
5531 5532 5533
		return EMULATE_DONE;
	}

5534 5535 5536
	if (retry_instruction(ctxt, cr2, emulation_type))
		return EMULATE_DONE;

5537
	/* this is needed for vmware backdoor interface to work since it
5538
	   changes registers values  during IO operation */
5539 5540
	if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
		vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
5541
		emulator_invalidate_register_cache(ctxt);
5542
	}
5543

5544
restart:
5545
	r = x86_emulate_insn(ctxt);
5546

5547 5548 5549
	if (r == EMULATION_INTERCEPTED)
		return EMULATE_DONE;

5550
	if (r == EMULATION_FAILED) {
5551 5552
		if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
					emulation_type))
5553 5554
			return EMULATE_DONE;

5555
		return handle_emulation_failure(vcpu);
5556 5557
	}

5558
	if (ctxt->have_exception) {
5559
		r = EMULATE_DONE;
5560 5561
		if (inject_emulated_exception(vcpu))
			return r;
5562
	} else if (vcpu->arch.pio.count) {
5563 5564
		if (!vcpu->arch.pio.in) {
			/* FIXME: return into emulator if single-stepping.  */
5565
			vcpu->arch.pio.count = 0;
5566
		} else {
5567
			writeback = false;
5568 5569
			vcpu->arch.complete_userspace_io = complete_emulated_pio;
		}
P
Paolo Bonzini 已提交
5570
		r = EMULATE_USER_EXIT;
5571 5572 5573
	} else if (vcpu->mmio_needed) {
		if (!vcpu->mmio_is_write)
			writeback = false;
P
Paolo Bonzini 已提交
5574
		r = EMULATE_USER_EXIT;
5575
		vcpu->arch.complete_userspace_io = complete_emulated_mmio;
5576
	} else if (r == EMULATION_RESTART)
5577
		goto restart;
5578 5579
	else
		r = EMULATE_DONE;
5580

5581
	if (writeback) {
5582
		unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
5583
		toggle_interruptibility(vcpu, ctxt->interruptibility);
5584
		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5585 5586
		if (vcpu->arch.hflags != ctxt->emul_flags)
			kvm_set_hflags(vcpu, ctxt->emul_flags);
5587
		kvm_rip_write(vcpu, ctxt->eip);
5588
		if (r == EMULATE_DONE)
5589
			kvm_vcpu_check_singlestep(vcpu, rflags, &r);
5590 5591 5592
		if (!ctxt->have_exception ||
		    exception_type(ctxt->exception.vector) == EXCPT_TRAP)
			__kvm_set_rflags(vcpu, ctxt->eflags);
5593 5594 5595 5596 5597 5598 5599 5600 5601

		/*
		 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
		 * do nothing, and it will be requested again as soon as
		 * the shadow expires.  But we still need to check here,
		 * because POPF has no interrupt shadow.
		 */
		if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
			kvm_make_request(KVM_REQ_EVENT, vcpu);
5602 5603
	} else
		vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
5604 5605

	return r;
5606
}
5607
EXPORT_SYMBOL_GPL(x86_emulate_instruction);
5608

5609
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
5610
{
5611
	unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
5612 5613
	int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
					    size, port, &val, 1);
5614
	/* do not return to emulator after return from userspace */
5615
	vcpu->arch.pio.count = 0;
5616 5617
	return ret;
}
5618
EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
5619

5620
static int kvmclock_cpu_down_prep(unsigned int cpu)
5621
{
T
Tejun Heo 已提交
5622
	__this_cpu_write(cpu_tsc_khz, 0);
5623
	return 0;
5624 5625 5626
}

static void tsc_khz_changed(void *data)
5627
{
5628 5629 5630 5631 5632 5633 5634 5635 5636
	struct cpufreq_freqs *freq = data;
	unsigned long khz = 0;

	if (data)
		khz = freq->new;
	else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
		khz = cpufreq_quick_get(raw_smp_processor_id());
	if (!khz)
		khz = tsc_khz;
T
Tejun Heo 已提交
5637
	__this_cpu_write(cpu_tsc_khz, khz);
5638 5639 5640 5641 5642 5643 5644 5645 5646 5647
}

static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
				     void *data)
{
	struct cpufreq_freqs *freq = data;
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int i, send_ipi = 0;

5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686
	/*
	 * We allow guests to temporarily run on slowing clocks,
	 * provided we notify them after, or to run on accelerating
	 * clocks, provided we notify them before.  Thus time never
	 * goes backwards.
	 *
	 * However, we have a problem.  We can't atomically update
	 * the frequency of a given CPU from this function; it is
	 * merely a notifier, which can be called from any CPU.
	 * Changing the TSC frequency at arbitrary points in time
	 * requires a recomputation of local variables related to
	 * the TSC for each VCPU.  We must flag these local variables
	 * to be updated and be sure the update takes place with the
	 * new frequency before any guests proceed.
	 *
	 * Unfortunately, the combination of hotplug CPU and frequency
	 * change creates an intractable locking scenario; the order
	 * of when these callouts happen is undefined with respect to
	 * CPU hotplug, and they can race with each other.  As such,
	 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
	 * undefined; you can actually have a CPU frequency change take
	 * place in between the computation of X and the setting of the
	 * variable.  To protect against this problem, all updates of
	 * the per_cpu tsc_khz variable are done in an interrupt
	 * protected IPI, and all callers wishing to update the value
	 * must wait for a synchronous IPI to complete (which is trivial
	 * if the caller is on the CPU already).  This establishes the
	 * necessary total order on variable updates.
	 *
	 * Note that because a guest time update may take place
	 * anytime after the setting of the VCPU's request bit, the
	 * correct TSC value must be set before the request.  However,
	 * to ensure the update actually makes it to any guest which
	 * starts running in hardware virtualization between the set
	 * and the acquisition of the spinlock, we must also ping the
	 * CPU after setting the request bit.
	 *
	 */

5687 5688 5689 5690
	if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
		return 0;
	if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
		return 0;
5691 5692

	smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
5693

5694
	spin_lock(&kvm_lock);
5695
	list_for_each_entry(kvm, &vm_list, vm_list) {
5696
		kvm_for_each_vcpu(i, vcpu, kvm) {
5697 5698
			if (vcpu->cpu != freq->cpu)
				continue;
Z
Zachary Amsden 已提交
5699
			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5700
			if (vcpu->cpu != smp_processor_id())
5701
				send_ipi = 1;
5702 5703
		}
	}
5704
	spin_unlock(&kvm_lock);
5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718

	if (freq->old < freq->new && send_ipi) {
		/*
		 * We upscale the frequency.  Must make the guest
		 * doesn't see old kvmclock values while running with
		 * the new frequency, otherwise we risk the guest sees
		 * time go backwards.
		 *
		 * In case we update the frequency for another cpu
		 * (which might be in guest context) send an interrupt
		 * to kick the cpu out of guest context.  Next time
		 * guest context is entered kvmclock will be updated,
		 * so the guest will not see stale values.
		 */
5719
		smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
5720 5721 5722 5723 5724
	}
	return 0;
}

static struct notifier_block kvmclock_cpufreq_notifier_block = {
5725 5726 5727
	.notifier_call  = kvmclock_cpufreq_notifier
};

5728
static int kvmclock_cpu_online(unsigned int cpu)
5729
{
5730 5731
	tsc_khz_changed(NULL);
	return 0;
5732 5733
}

5734 5735
static void kvm_timer_init(void)
{
Z
Zachary Amsden 已提交
5736
	max_tsc_khz = tsc_khz;
5737

5738
	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
Z
Zachary Amsden 已提交
5739 5740
#ifdef CONFIG_CPU_FREQ
		struct cpufreq_policy policy;
5741 5742
		int cpu;

Z
Zachary Amsden 已提交
5743
		memset(&policy, 0, sizeof(policy));
5744 5745
		cpu = get_cpu();
		cpufreq_get_policy(&policy, cpu);
Z
Zachary Amsden 已提交
5746 5747
		if (policy.cpuinfo.max_freq)
			max_tsc_khz = policy.cpuinfo.max_freq;
5748
		put_cpu();
Z
Zachary Amsden 已提交
5749
#endif
5750 5751 5752
		cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
					  CPUFREQ_TRANSITION_NOTIFIER);
	}
Z
Zachary Amsden 已提交
5753
	pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
5754

5755 5756
	cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "AP_X86_KVM_CLK_ONLINE",
			  kvmclock_cpu_online, kvmclock_cpu_down_prep);
5757 5758
}

5759 5760
static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);

5761
int kvm_is_in_guest(void)
5762
{
5763
	return __this_cpu_read(current_vcpu) != NULL;
5764 5765 5766 5767 5768
}

static int kvm_is_user_mode(void)
{
	int user_mode = 3;
5769

5770 5771
	if (__this_cpu_read(current_vcpu))
		user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
5772

5773 5774 5775 5776 5777 5778
	return user_mode != 0;
}

static unsigned long kvm_get_guest_ip(void)
{
	unsigned long ip = 0;
5779

5780 5781
	if (__this_cpu_read(current_vcpu))
		ip = kvm_rip_read(__this_cpu_read(current_vcpu));
5782

5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793
	return ip;
}

static struct perf_guest_info_callbacks kvm_guest_cbs = {
	.is_in_guest		= kvm_is_in_guest,
	.is_user_mode		= kvm_is_user_mode,
	.get_guest_ip		= kvm_get_guest_ip,
};

void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
{
5794
	__this_cpu_write(current_vcpu, vcpu);
5795 5796 5797 5798 5799
}
EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);

void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
{
5800
	__this_cpu_write(current_vcpu, NULL);
5801 5802 5803
}
EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);

5804 5805 5806 5807 5808 5809 5810 5811 5812
static void kvm_set_mmio_spte_mask(void)
{
	u64 mask;
	int maxphyaddr = boot_cpu_data.x86_phys_bits;

	/*
	 * Set the reserved bits and the present bit of an paging-structure
	 * entry to generate page fault with PFER.RSV = 1.
	 */
5813
	 /* Mask the reserved physical address bits. */
5814
	mask = rsvd_bits(maxphyaddr, 51);
5815 5816 5817 5818 5819

	/* Bit 62 is always reserved for 32bit host. */
	mask |= 0x3ull << 62;

	/* Set the present bit. */
5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833
	mask |= 1ull;

#ifdef CONFIG_X86_64
	/*
	 * If reserved bit is not supported, clear the present bit to disable
	 * mmio page fault.
	 */
	if (maxphyaddr == 52)
		mask &= ~1ull;
#endif

	kvm_mmu_set_mmio_spte_mask(mask);
}

5834 5835 5836
#ifdef CONFIG_X86_64
static void pvclock_gtod_update_fn(struct work_struct *work)
{
5837 5838 5839 5840 5841
	struct kvm *kvm;

	struct kvm_vcpu *vcpu;
	int i;

5842
	spin_lock(&kvm_lock);
5843 5844
	list_for_each_entry(kvm, &vm_list, vm_list)
		kvm_for_each_vcpu(i, vcpu, kvm)
5845
			kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
5846
	atomic_set(&kvm_guest_has_master_clock, 0);
5847
	spin_unlock(&kvm_lock);
5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877
}

static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);

/*
 * Notification about pvclock gtod data update.
 */
static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
			       void *priv)
{
	struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
	struct timekeeper *tk = priv;

	update_pvclock_gtod(tk);

	/* disable master clock if host does not trust, or does not
	 * use, TSC clocksource
	 */
	if (gtod->clock.vclock_mode != VCLOCK_TSC &&
	    atomic_read(&kvm_guest_has_master_clock) != 0)
		queue_work(system_long_wq, &pvclock_gtod_work);

	return 0;
}

static struct notifier_block pvclock_gtod_notifier = {
	.notifier_call = pvclock_gtod_notify,
};
#endif

5878
int kvm_arch_init(void *opaque)
5879
{
5880
	int r;
M
Mathias Krause 已提交
5881
	struct kvm_x86_ops *ops = opaque;
5882 5883 5884

	if (kvm_x86_ops) {
		printk(KERN_ERR "kvm: already loaded the other module\n");
5885 5886
		r = -EEXIST;
		goto out;
5887 5888 5889 5890
	}

	if (!ops->cpu_has_kvm_support()) {
		printk(KERN_ERR "kvm: no hardware support\n");
5891 5892
		r = -EOPNOTSUPP;
		goto out;
5893 5894 5895
	}
	if (ops->disabled_by_bios()) {
		printk(KERN_ERR "kvm: disabled by bios\n");
5896 5897
		r = -EOPNOTSUPP;
		goto out;
5898 5899
	}

5900 5901 5902 5903 5904 5905 5906
	r = -ENOMEM;
	shared_msrs = alloc_percpu(struct kvm_shared_msrs);
	if (!shared_msrs) {
		printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
		goto out;
	}

5907 5908
	r = kvm_mmu_module_init();
	if (r)
5909
		goto out_free_percpu;
5910

5911
	kvm_set_mmio_spte_mask();
5912

5913
	kvm_x86_ops = ops;
P
Paolo Bonzini 已提交
5914

S
Sheng Yang 已提交
5915
	kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
5916 5917
			PT_DIRTY_MASK, PT64_NX_MASK, 0,
			PT_PRESENT_MASK);
5918
	kvm_timer_init();
5919

5920 5921
	perf_register_guest_info_callbacks(&kvm_guest_cbs);

5922
	if (boot_cpu_has(X86_FEATURE_XSAVE))
5923 5924
		host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);

5925
	kvm_lapic_init();
5926 5927 5928 5929
#ifdef CONFIG_X86_64
	pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
#endif

5930
	return 0;
5931

5932 5933
out_free_percpu:
	free_percpu(shared_msrs);
5934 5935
out:
	return r;
5936
}
5937

5938 5939
void kvm_arch_exit(void)
{
5940 5941
	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);

5942 5943 5944
	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
		cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
					    CPUFREQ_TRANSITION_NOTIFIER);
5945
	cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
5946 5947 5948
#ifdef CONFIG_X86_64
	pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
#endif
5949
	kvm_x86_ops = NULL;
5950
	kvm_mmu_module_exit();
5951
	free_percpu(shared_msrs);
5952
}
5953

5954
int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
5955 5956
{
	++vcpu->stat.halt_exits;
5957
	if (lapic_in_kernel(vcpu)) {
5958
		vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
5959 5960 5961 5962 5963 5964
		return 1;
	} else {
		vcpu->run->exit_reason = KVM_EXIT_HLT;
		return 0;
	}
}
5965 5966 5967 5968 5969 5970 5971
EXPORT_SYMBOL_GPL(kvm_vcpu_halt);

int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
	kvm_x86_ops->skip_emulated_instruction(vcpu);
	return kvm_vcpu_halt(vcpu);
}
5972 5973
EXPORT_SYMBOL_GPL(kvm_emulate_halt);

5974 5975 5976 5977 5978 5979 5980
/*
 * kvm_pv_kick_cpu_op:  Kick a vcpu.
 *
 * @apicid - apicid of vcpu to be kicked.
 */
static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
{
5981
	struct kvm_lapic_irq lapic_irq;
5982

5983 5984 5985
	lapic_irq.shorthand = 0;
	lapic_irq.dest_mode = 0;
	lapic_irq.dest_id = apicid;
5986
	lapic_irq.msi_redir_hint = false;
5987

5988
	lapic_irq.delivery_mode = APIC_DM_REMRD;
5989
	kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
5990 5991
}

5992 5993 5994 5995 5996 5997
void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
{
	vcpu->arch.apicv_active = false;
	kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
}

5998 5999 6000
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
	unsigned long nr, a0, a1, a2, a3, ret;
6001
	int op_64_bit, r = 1;
6002

6003 6004
	kvm_x86_ops->skip_emulated_instruction(vcpu);

6005 6006 6007
	if (kvm_hv_hypercall_enabled(vcpu->kvm))
		return kvm_hv_hypercall(vcpu);

6008 6009 6010 6011 6012
	nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
	a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
	a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
	a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
	a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
6013

6014
	trace_kvm_hypercall(nr, a0, a1, a2, a3);
F
Feng (Eric) Liu 已提交
6015

6016 6017
	op_64_bit = is_64_bit_mode(vcpu);
	if (!op_64_bit) {
6018 6019 6020 6021 6022 6023 6024
		nr &= 0xFFFFFFFF;
		a0 &= 0xFFFFFFFF;
		a1 &= 0xFFFFFFFF;
		a2 &= 0xFFFFFFFF;
		a3 &= 0xFFFFFFFF;
	}

6025 6026 6027 6028 6029
	if (kvm_x86_ops->get_cpl(vcpu) != 0) {
		ret = -KVM_EPERM;
		goto out;
	}

6030
	switch (nr) {
A
Avi Kivity 已提交
6031 6032 6033
	case KVM_HC_VAPIC_POLL_IRQ:
		ret = 0;
		break;
6034 6035 6036 6037
	case KVM_HC_KICK_CPU:
		kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
		ret = 0;
		break;
6038 6039 6040 6041
	default:
		ret = -KVM_ENOSYS;
		break;
	}
6042
out:
6043 6044
	if (!op_64_bit)
		ret = (u32)ret;
6045
	kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
A
Amit Shah 已提交
6046
	++vcpu->stat.hypercalls;
6047
	return r;
6048 6049 6050
}
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);

6051
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
6052
{
6053
	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
6054
	char instruction[3];
6055
	unsigned long rip = kvm_rip_read(vcpu);
6056 6057 6058

	kvm_x86_ops->patch_hypercall(vcpu, instruction);

6059
	return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
6060 6061
}

A
Avi Kivity 已提交
6062
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
6063
{
6064 6065
	return vcpu->run->request_interrupt_window &&
		likely(!pic_in_kernel(vcpu->kvm));
6066 6067
}

A
Avi Kivity 已提交
6068
static void post_kvm_run_save(struct kvm_vcpu *vcpu)
6069
{
A
Avi Kivity 已提交
6070 6071
	struct kvm_run *kvm_run = vcpu->run;

6072
	kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
6073
	kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
6074
	kvm_run->cr8 = kvm_get_cr8(vcpu);
6075
	kvm_run->apic_base = kvm_get_apic_base(vcpu);
6076 6077
	kvm_run->ready_for_interrupt_injection =
		pic_in_kernel(vcpu->kvm) ||
6078
		kvm_vcpu_ready_for_interrupt_injection(vcpu);
6079 6080
}

6081 6082 6083 6084 6085 6086 6087
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
{
	int max_irr, tpr;

	if (!kvm_x86_ops->update_cr8_intercept)
		return;

6088
	if (!lapic_in_kernel(vcpu))
6089 6090
		return;

6091 6092 6093
	if (vcpu->arch.apicv_active)
		return;

6094 6095 6096 6097
	if (!vcpu->arch.apic->vapic_addr)
		max_irr = kvm_lapic_find_highest_irr(vcpu);
	else
		max_irr = -1;
6098 6099 6100 6101 6102 6103 6104 6105 6106

	if (max_irr != -1)
		max_irr >>= 4;

	tpr = kvm_lapic_get_cr8(vcpu);

	kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
}

6107
static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
6108
{
6109 6110
	int r;

6111
	/* try to reinject previous events if any */
6112
	if (vcpu->arch.exception.pending) {
A
Avi Kivity 已提交
6113 6114 6115
		trace_kvm_inj_exception(vcpu->arch.exception.nr,
					vcpu->arch.exception.has_error_code,
					vcpu->arch.exception.error_code);
6116 6117 6118 6119 6120

		if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
			__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
					     X86_EFLAGS_RF);

6121 6122 6123 6124 6125 6126
		if (vcpu->arch.exception.nr == DB_VECTOR &&
		    (vcpu->arch.dr7 & DR7_GD)) {
			vcpu->arch.dr7 &= ~DR7_GD;
			kvm_update_dr7(vcpu);
		}

6127 6128
		kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
					  vcpu->arch.exception.has_error_code,
6129 6130
					  vcpu->arch.exception.error_code,
					  vcpu->arch.exception.reinject);
6131
		return 0;
6132 6133
	}

6134 6135
	if (vcpu->arch.nmi_injected) {
		kvm_x86_ops->set_nmi(vcpu);
6136
		return 0;
6137 6138 6139
	}

	if (vcpu->arch.interrupt.pending) {
6140
		kvm_x86_ops->set_irq(vcpu);
6141 6142 6143 6144 6145 6146 6147
		return 0;
	}

	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
		r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
		if (r != 0)
			return r;
6148 6149 6150
	}

	/* try to inject new event if pending */
6151 6152
	if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
		vcpu->arch.smi_pending = false;
6153
		enter_smm(vcpu);
6154
	} else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
6155 6156 6157
		--vcpu->arch.nmi_pending;
		vcpu->arch.nmi_injected = true;
		kvm_x86_ops->set_nmi(vcpu);
6158
	} else if (kvm_cpu_has_injectable_intr(vcpu)) {
6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170
		/*
		 * Because interrupts can be injected asynchronously, we are
		 * calling check_nested_events again here to avoid a race condition.
		 * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
		 * proposal and current concerns.  Perhaps we should be setting
		 * KVM_REQ_EVENT only on certain events and not unconditionally?
		 */
		if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
			r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
			if (r != 0)
				return r;
		}
6171
		if (kvm_x86_ops->interrupt_allowed(vcpu)) {
6172 6173 6174
			kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
					    false);
			kvm_x86_ops->set_irq(vcpu);
6175 6176
		}
	}
6177

6178
	return 0;
6179 6180
}

A
Avi Kivity 已提交
6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197
static void process_nmi(struct kvm_vcpu *vcpu)
{
	unsigned limit = 2;

	/*
	 * x86 is limited to one NMI running, and one NMI pending after it.
	 * If an NMI is already in progress, limit further NMIs to just one.
	 * Otherwise, allow two (and we'll inject the first one immediately).
	 */
	if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
		limit = 1;

	vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
	vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
	kvm_make_request(KVM_REQ_EVENT, vcpu);
}

6198 6199 6200
#define put_smstate(type, buf, offset, val)			  \
	*(type *)((buf) + (offset) - 0x7e00) = val

6201
static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214
{
	u32 flags = 0;
	flags |= seg->g       << 23;
	flags |= seg->db      << 22;
	flags |= seg->l       << 21;
	flags |= seg->avl     << 20;
	flags |= seg->present << 15;
	flags |= seg->dpl     << 13;
	flags |= seg->s       << 12;
	flags |= seg->type    << 8;
	return flags;
}

6215
static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229
{
	struct kvm_segment seg;
	int offset;

	kvm_get_segment(vcpu, &seg, n);
	put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector);

	if (n < 3)
		offset = 0x7f84 + n * 12;
	else
		offset = 0x7f2c + (n - 3) * 12;

	put_smstate(u32, buf, offset + 8, seg.base);
	put_smstate(u32, buf, offset + 4, seg.limit);
6230
	put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg));
6231 6232
}

6233
#ifdef CONFIG_X86_64
6234
static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
6235 6236 6237 6238 6239 6240 6241 6242
{
	struct kvm_segment seg;
	int offset;
	u16 flags;

	kvm_get_segment(vcpu, &seg, n);
	offset = 0x7e00 + n * 16;

6243
	flags = enter_smm_get_segment_flags(&seg) >> 8;
6244 6245 6246 6247 6248
	put_smstate(u16, buf, offset, seg.selector);
	put_smstate(u16, buf, offset + 2, flags);
	put_smstate(u32, buf, offset + 4, seg.limit);
	put_smstate(u64, buf, offset + 8, seg.base);
}
6249
#endif
6250

6251
static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274
{
	struct desc_ptr dt;
	struct kvm_segment seg;
	unsigned long val;
	int i;

	put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
	put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
	put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
	put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));

	for (i = 0; i < 8; i++)
		put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i));

	kvm_get_dr(vcpu, 6, &val);
	put_smstate(u32, buf, 0x7fcc, (u32)val);
	kvm_get_dr(vcpu, 7, &val);
	put_smstate(u32, buf, 0x7fc8, (u32)val);

	kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
	put_smstate(u32, buf, 0x7fc4, seg.selector);
	put_smstate(u32, buf, 0x7f64, seg.base);
	put_smstate(u32, buf, 0x7f60, seg.limit);
6275
	put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg));
6276 6277 6278 6279 6280

	kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
	put_smstate(u32, buf, 0x7fc0, seg.selector);
	put_smstate(u32, buf, 0x7f80, seg.base);
	put_smstate(u32, buf, 0x7f7c, seg.limit);
6281
	put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
6282 6283 6284 6285 6286 6287 6288 6289 6290 6291

	kvm_x86_ops->get_gdt(vcpu, &dt);
	put_smstate(u32, buf, 0x7f74, dt.address);
	put_smstate(u32, buf, 0x7f70, dt.size);

	kvm_x86_ops->get_idt(vcpu, &dt);
	put_smstate(u32, buf, 0x7f58, dt.address);
	put_smstate(u32, buf, 0x7f54, dt.size);

	for (i = 0; i < 6; i++)
6292
		enter_smm_save_seg_32(vcpu, buf, i);
6293 6294 6295 6296 6297 6298 6299 6300

	put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu));

	/* revision id */
	put_smstate(u32, buf, 0x7efc, 0x00020000);
	put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
}

6301
static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332
{
#ifdef CONFIG_X86_64
	struct desc_ptr dt;
	struct kvm_segment seg;
	unsigned long val;
	int i;

	for (i = 0; i < 16; i++)
		put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i));

	put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
	put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));

	kvm_get_dr(vcpu, 6, &val);
	put_smstate(u64, buf, 0x7f68, val);
	kvm_get_dr(vcpu, 7, &val);
	put_smstate(u64, buf, 0x7f60, val);

	put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
	put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
	put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu));

	put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase);

	/* revision id */
	put_smstate(u32, buf, 0x7efc, 0x00020064);

	put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer);

	kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
	put_smstate(u16, buf, 0x7e90, seg.selector);
6333
	put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
6334 6335 6336 6337 6338 6339 6340 6341 6342
	put_smstate(u32, buf, 0x7e94, seg.limit);
	put_smstate(u64, buf, 0x7e98, seg.base);

	kvm_x86_ops->get_idt(vcpu, &dt);
	put_smstate(u32, buf, 0x7e84, dt.size);
	put_smstate(u64, buf, 0x7e88, dt.address);

	kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
	put_smstate(u16, buf, 0x7e70, seg.selector);
6343
	put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
6344 6345 6346 6347 6348 6349 6350 6351
	put_smstate(u32, buf, 0x7e74, seg.limit);
	put_smstate(u64, buf, 0x7e78, seg.base);

	kvm_x86_ops->get_gdt(vcpu, &dt);
	put_smstate(u32, buf, 0x7e64, dt.size);
	put_smstate(u64, buf, 0x7e68, dt.address);

	for (i = 0; i < 6; i++)
6352
		enter_smm_save_seg_64(vcpu, buf, i);
6353 6354 6355 6356 6357
#else
	WARN_ON_ONCE(1);
#endif
}

6358
static void enter_smm(struct kvm_vcpu *vcpu)
P
Paolo Bonzini 已提交
6359
{
6360
	struct kvm_segment cs, ds;
6361
	struct desc_ptr dt;
6362 6363 6364 6365 6366 6367 6368
	char buf[512];
	u32 cr0;

	trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
	vcpu->arch.hflags |= HF_SMM_MASK;
	memset(buf, 0, 512);
	if (guest_cpuid_has_longmode(vcpu))
6369
		enter_smm_save_state_64(vcpu, buf);
6370
	else
6371
		enter_smm_save_state_32(vcpu, buf);
6372

6373
	kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388

	if (kvm_x86_ops->get_nmi_mask(vcpu))
		vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
	else
		kvm_x86_ops->set_nmi_mask(vcpu, true);

	kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
	kvm_rip_write(vcpu, 0x8000);

	cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
	kvm_x86_ops->set_cr0(vcpu, cr0);
	vcpu->arch.cr0 = cr0;

	kvm_x86_ops->set_cr4(vcpu, 0);

6389 6390 6391 6392
	/* Undocumented: IDT limit is set to zero on entry to SMM.  */
	dt.address = dt.size = 0;
	kvm_x86_ops->set_idt(vcpu, &dt);

6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424
	__kvm_set_dr(vcpu, 7, DR7_FIXED_1);

	cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
	cs.base = vcpu->arch.smbase;

	ds.selector = 0;
	ds.base = 0;

	cs.limit    = ds.limit = 0xffffffff;
	cs.type     = ds.type = 0x3;
	cs.dpl      = ds.dpl = 0;
	cs.db       = ds.db = 0;
	cs.s        = ds.s = 1;
	cs.l        = ds.l = 0;
	cs.g        = ds.g = 1;
	cs.avl      = ds.avl = 0;
	cs.present  = ds.present = 1;
	cs.unusable = ds.unusable = 0;
	cs.padding  = ds.padding = 0;

	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_DS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_ES);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_FS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
	kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);

	if (guest_cpuid_has_longmode(vcpu))
		kvm_x86_ops->set_efer(vcpu, 0);

	kvm_update_cpuid(vcpu);
	kvm_mmu_reset_context(vcpu);
P
Paolo Bonzini 已提交
6425 6426
}

6427
static void process_smi(struct kvm_vcpu *vcpu)
6428 6429 6430 6431 6432
{
	vcpu->arch.smi_pending = true;
	kvm_make_request(KVM_REQ_EVENT, vcpu);
}

6433 6434 6435 6436 6437
void kvm_make_scan_ioapic_request(struct kvm *kvm)
{
	kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
}

6438
static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
6439
{
6440 6441
	u64 eoi_exit_bitmap[4];

6442 6443
	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
		return;
6444

6445
	bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
6446

6447
	if (irqchip_split(vcpu->kvm))
6448
		kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
6449
	else {
6450 6451
		if (vcpu->arch.apicv_active)
			kvm_x86_ops->sync_pir_to_irr(vcpu);
6452
		kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
6453
	}
6454 6455 6456
	bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
		  vcpu_to_synic(vcpu)->vec_bitmap, 256);
	kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
6457 6458
}

6459 6460 6461 6462 6463 6464
static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
{
	++vcpu->stat.tlb_flush;
	kvm_x86_ops->tlb_flush(vcpu);
}

6465 6466
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
{
6467 6468
	struct page *page = NULL;

6469
	if (!lapic_in_kernel(vcpu))
6470 6471
		return;

6472 6473 6474
	if (!kvm_x86_ops->set_apic_access_page_addr)
		return;

6475
	page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
6476 6477
	if (is_error_page(page))
		return;
6478 6479 6480 6481 6482 6483 6484
	kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));

	/*
	 * Do not pin apic access page in memory, the MMU notifier
	 * will call us again if it is migrated or swapped out.
	 */
	put_page(page);
6485 6486 6487
}
EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);

6488 6489 6490
void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
					   unsigned long address)
{
6491 6492 6493 6494 6495 6496
	/*
	 * The physical address of apic access page is stored in the VMCS.
	 * Update it when it becomes invalid.
	 */
	if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT))
		kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
6497 6498
}

6499
/*
6500
 * Returns 1 to let vcpu_run() continue the guest execution loop without
6501 6502 6503
 * exiting to the userspace.  Otherwise, the value will be returned to the
 * userspace.
 */
A
Avi Kivity 已提交
6504
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6505 6506
{
	int r;
6507 6508 6509 6510
	bool req_int_win =
		dm_request_for_irq_injection(vcpu) &&
		kvm_cpu_accept_dm_intr(vcpu);

6511
	bool req_immediate_exit = false;
6512

6513
	if (vcpu->requests) {
6514
		if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
6515
			kvm_mmu_unload(vcpu);
6516
		if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
M
Marcelo Tosatti 已提交
6517
			__kvm_migrate_timers(vcpu);
6518 6519
		if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
			kvm_gen_update_masterclock(vcpu->kvm);
6520 6521
		if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
			kvm_gen_kvmclock_update(vcpu);
Z
Zachary Amsden 已提交
6522 6523
		if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
			r = kvm_guest_time_update(vcpu);
6524 6525 6526
			if (unlikely(r))
				goto out;
		}
6527
		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
6528
			kvm_mmu_sync_roots(vcpu);
6529
		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
6530
			kvm_vcpu_flush_tlb(vcpu);
6531
		if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
A
Avi Kivity 已提交
6532
			vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
A
Avi Kivity 已提交
6533 6534 6535
			r = 0;
			goto out;
		}
6536
		if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
A
Avi Kivity 已提交
6537
			vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
J
Joerg Roedel 已提交
6538 6539 6540
			r = 0;
			goto out;
		}
6541
		if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
6542 6543 6544
			vcpu->fpu_active = 0;
			kvm_x86_ops->fpu_deactivate(vcpu);
		}
6545 6546 6547 6548 6549 6550
		if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
			/* Page is swapped out. Do synthetic halt */
			vcpu->arch.apf.halted = true;
			r = 1;
			goto out;
		}
G
Glauber Costa 已提交
6551 6552
		if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
			record_steal_time(vcpu);
P
Paolo Bonzini 已提交
6553 6554
		if (kvm_check_request(KVM_REQ_SMI, vcpu))
			process_smi(vcpu);
A
Avi Kivity 已提交
6555 6556
		if (kvm_check_request(KVM_REQ_NMI, vcpu))
			process_nmi(vcpu);
6557
		if (kvm_check_request(KVM_REQ_PMU, vcpu))
6558
			kvm_pmu_handle_event(vcpu);
6559
		if (kvm_check_request(KVM_REQ_PMI, vcpu))
6560
			kvm_pmu_deliver_pmi(vcpu);
6561 6562 6563
		if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
			BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
			if (test_bit(vcpu->arch.pending_ioapic_eoi,
6564
				     vcpu->arch.ioapic_handled_vectors)) {
6565 6566 6567 6568 6569 6570 6571
				vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI;
				vcpu->run->eoi.vector =
						vcpu->arch.pending_ioapic_eoi;
				r = 0;
				goto out;
			}
		}
6572 6573
		if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
			vcpu_scan_ioapic(vcpu);
6574 6575
		if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
			kvm_vcpu_reload_apic_access_page(vcpu);
6576 6577 6578 6579 6580 6581
		if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
			r = 0;
			goto out;
		}
6582 6583 6584 6585 6586 6587
		if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
			r = 0;
			goto out;
		}
A
Andrey Smetanin 已提交
6588 6589 6590 6591 6592 6593
		if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
			vcpu->run->exit_reason = KVM_EXIT_HYPERV;
			vcpu->run->hyperv = vcpu->arch.hyperv.exit;
			r = 0;
			goto out;
		}
6594 6595 6596 6597 6598 6599

		/*
		 * KVM_REQ_HV_STIMER has to be processed after
		 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers
		 * depend on the guest clock being up-to-date
		 */
A
Andrey Smetanin 已提交
6600 6601
		if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
			kvm_hv_process_stimers(vcpu);
6602
	}
A
Avi Kivity 已提交
6603

6604 6605 6606 6607 6608 6609 6610 6611 6612
	/*
	 * KVM_REQ_EVENT is not set when posted interrupts are set by
	 * VT-d hardware, so we have to update RVI unconditionally.
	 */
	if (kvm_lapic_enabled(vcpu)) {
		/*
		 * Update architecture specific hints for APIC
		 * virtual interrupt delivery.
		 */
6613
		if (vcpu->arch.apicv_active)
6614 6615
			kvm_x86_ops->hwapic_irr_update(vcpu,
				kvm_lapic_find_highest_irr(vcpu));
6616
	}
A
Avi Kivity 已提交
6617

A
Avi Kivity 已提交
6618
	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
6619 6620 6621 6622 6623 6624
		kvm_apic_accept_events(vcpu);
		if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
			r = 1;
			goto out;
		}

6625 6626
		if (inject_pending_event(vcpu, req_int_win) != 0)
			req_immediate_exit = true;
6627
		else {
6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638
			/* Enable NMI/IRQ window open exits if needed.
			 *
			 * SMIs have two cases: 1) they can be nested, and
			 * then there is nothing to do here because RSM will
			 * cause a vmexit anyway; 2) or the SMI can be pending
			 * because inject_pending_event has completed the
			 * injection of an IRQ or NMI from the previous vmexit,
			 * and then we request an immediate exit to inject the SMI.
			 */
			if (vcpu->arch.smi_pending && !is_smm(vcpu))
				req_immediate_exit = true;
6639 6640 6641 6642 6643
			if (vcpu->arch.nmi_pending)
				kvm_x86_ops->enable_nmi_window(vcpu);
			if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
				kvm_x86_ops->enable_irq_window(vcpu);
		}
A
Avi Kivity 已提交
6644 6645 6646 6647 6648 6649 6650

		if (kvm_lapic_enabled(vcpu)) {
			update_cr8_intercept(vcpu);
			kvm_lapic_sync_to_vapic(vcpu);
		}
	}

6651 6652
	r = kvm_mmu_reload(vcpu);
	if (unlikely(r)) {
6653
		goto cancel_injection;
6654 6655
	}

6656 6657 6658
	preempt_disable();

	kvm_x86_ops->prepare_guest_switch(vcpu);
6659 6660
	if (vcpu->fpu_active)
		kvm_load_guest_fpu(vcpu);
6661 6662
	vcpu->mode = IN_GUEST_MODE;

6663 6664
	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);

6665 6666 6667 6668 6669 6670
	/*
	 * We should set ->mode before check ->requests,
	 * Please see the comment in kvm_make_all_cpus_request.
	 * This also orders the write to mode from any reads
	 * to the page tables done while the VCPU is running.
	 * Please see the comment in kvm_flush_remote_tlbs.
6671
	 */
6672
	smp_mb__after_srcu_read_unlock();
6673

A
Avi Kivity 已提交
6674
	local_irq_disable();
6675

6676
	if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
A
Avi Kivity 已提交
6677
	    || need_resched() || signal_pending(current)) {
6678
		vcpu->mode = OUTSIDE_GUEST_MODE;
A
Avi Kivity 已提交
6679
		smp_wmb();
6680 6681
		local_irq_enable();
		preempt_enable();
6682
		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
6683
		r = 1;
6684
		goto cancel_injection;
6685 6686
	}

6687 6688
	kvm_load_guest_xcr0(vcpu);

6689 6690
	if (req_immediate_exit) {
		kvm_make_request(KVM_REQ_EVENT, vcpu);
6691
		smp_send_reschedule(vcpu->cpu);
6692
	}
6693

6694 6695
	trace_kvm_entry(vcpu->vcpu_id);
	wait_lapic_expire(vcpu);
6696
	guest_enter_irqoff();
6697

6698 6699 6700 6701 6702 6703
	if (unlikely(vcpu->arch.switch_db_regs)) {
		set_debugreg(0, 7);
		set_debugreg(vcpu->arch.eff_db[0], 0);
		set_debugreg(vcpu->arch.eff_db[1], 1);
		set_debugreg(vcpu->arch.eff_db[2], 2);
		set_debugreg(vcpu->arch.eff_db[3], 3);
6704
		set_debugreg(vcpu->arch.dr6, 6);
6705
		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
6706
	}
6707

A
Avi Kivity 已提交
6708
	kvm_x86_ops->run(vcpu);
6709

6710 6711 6712 6713 6714 6715 6716 6717 6718
	/*
	 * Do this here before restoring debug registers on the host.  And
	 * since we do this before handling the vmexit, a DR access vmexit
	 * can (a) read the correct value of the debug registers, (b) set
	 * KVM_DEBUGREG_WONT_EXIT again.
	 */
	if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
		WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
		kvm_x86_ops->sync_dirty_debug_regs(vcpu);
6719 6720 6721 6722
		kvm_update_dr0123(vcpu);
		kvm_update_dr6(vcpu);
		kvm_update_dr7(vcpu);
		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
6723 6724
	}

6725 6726 6727 6728 6729 6730 6731
	/*
	 * If the guest has used debug registers, at least dr7
	 * will be disabled while returning to the host.
	 * If we don't have active breakpoints in the host, we don't
	 * care about the messed up debug address registers. But if
	 * we have some of them active, restore the old state.
	 */
6732
	if (hw_breakpoint_active())
6733
		hw_breakpoint_restore();
6734

6735
	vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
6736

6737
	vcpu->mode = OUTSIDE_GUEST_MODE;
A
Avi Kivity 已提交
6738
	smp_wmb();
6739

6740 6741
	kvm_put_guest_xcr0(vcpu);

6742
	kvm_x86_ops->handle_external_intr(vcpu);
6743 6744 6745

	++vcpu->stat.exits;

P
Paolo Bonzini 已提交
6746
	guest_exit_irqoff();
6747

P
Paolo Bonzini 已提交
6748
	local_irq_enable();
6749 6750
	preempt_enable();

6751
	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
6752

6753 6754 6755 6756
	/*
	 * Profile KVM exit RIPs:
	 */
	if (unlikely(prof_on == KVM_PROFILING)) {
6757 6758
		unsigned long rip = kvm_rip_read(vcpu);
		profile_hit(KVM_PROFILING, (void *)rip);
6759 6760
	}

6761 6762
	if (unlikely(vcpu->arch.tsc_always_catchup))
		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
6763

6764 6765
	if (vcpu->arch.apic_attention)
		kvm_lapic_sync_from_vapic(vcpu);
A
Avi Kivity 已提交
6766

A
Avi Kivity 已提交
6767
	r = kvm_x86_ops->handle_exit(vcpu);
6768 6769 6770 6771
	return r;

cancel_injection:
	kvm_x86_ops->cancel_injection(vcpu);
6772 6773
	if (unlikely(vcpu->arch.apic_attention))
		kvm_lapic_sync_from_vapic(vcpu);
6774 6775 6776
out:
	return r;
}
6777

6778 6779
static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
{
6780 6781
	if (!kvm_arch_vcpu_runnable(vcpu) &&
	    (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) {
6782 6783 6784
		srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
		kvm_vcpu_block(vcpu);
		vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6785 6786 6787 6788

		if (kvm_x86_ops->post_block)
			kvm_x86_ops->post_block(vcpu);

6789 6790 6791
		if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
			return 1;
	}
6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809

	kvm_apic_accept_events(vcpu);
	switch(vcpu->arch.mp_state) {
	case KVM_MP_STATE_HALTED:
		vcpu->arch.pv.pv_unhalted = false;
		vcpu->arch.mp_state =
			KVM_MP_STATE_RUNNABLE;
	case KVM_MP_STATE_RUNNABLE:
		vcpu->arch.apf.halted = false;
		break;
	case KVM_MP_STATE_INIT_RECEIVED:
		break;
	default:
		return -EINTR;
		break;
	}
	return 1;
}
6810

6811 6812 6813 6814 6815 6816
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{
	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
		!vcpu->arch.apf.halted);
}

6817
static int vcpu_run(struct kvm_vcpu *vcpu)
6818 6819
{
	int r;
6820
	struct kvm *kvm = vcpu->kvm;
6821

6822
	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6823

6824
	for (;;) {
6825
		if (kvm_vcpu_running(vcpu)) {
A
Avi Kivity 已提交
6826
			r = vcpu_enter_guest(vcpu);
6827
		} else {
6828
			r = vcpu_block(kvm, vcpu);
6829 6830
		}

6831 6832 6833 6834 6835 6836 6837
		if (r <= 0)
			break;

		clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
		if (kvm_cpu_has_pending_timer(vcpu))
			kvm_inject_pending_timer_irqs(vcpu);

6838 6839
		if (dm_request_for_irq_injection(vcpu) &&
			kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
6840 6841
			r = 0;
			vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
6842
			++vcpu->stat.request_irq_exits;
6843
			break;
6844
		}
6845 6846 6847

		kvm_check_async_pf_completion(vcpu);

6848 6849
		if (signal_pending(current)) {
			r = -EINTR;
A
Avi Kivity 已提交
6850
			vcpu->run->exit_reason = KVM_EXIT_INTR;
6851
			++vcpu->stat.signal_exits;
6852
			break;
6853 6854
		}
		if (need_resched()) {
6855
			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6856
			cond_resched();
6857
			vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
6858
		}
6859 6860
	}

6861
	srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
6862 6863 6864 6865

	return r;
}

6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883
static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
{
	int r;
	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
	r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
	if (r != EMULATE_DONE)
		return 0;
	return 1;
}

static int complete_emulated_pio(struct kvm_vcpu *vcpu)
{
	BUG_ON(!vcpu->arch.pio.count);

	return complete_emulated_io(vcpu);
}

A
Avi Kivity 已提交
6884 6885 6886 6887 6888
/*
 * Implements the following, as a state machine:
 *
 * read:
 *   for each fragment
6889 6890 6891 6892
 *     for each mmio piece in the fragment
 *       write gpa, len
 *       exit
 *       copy data
A
Avi Kivity 已提交
6893 6894 6895 6896
 *   execute insn
 *
 * write:
 *   for each fragment
6897 6898 6899 6900
 *     for each mmio piece in the fragment
 *       write gpa, len
 *       copy data
 *       exit
A
Avi Kivity 已提交
6901
 */
6902
static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
6903 6904
{
	struct kvm_run *run = vcpu->run;
A
Avi Kivity 已提交
6905
	struct kvm_mmio_fragment *frag;
6906
	unsigned len;
6907

6908
	BUG_ON(!vcpu->mmio_needed);
6909

6910
	/* Complete previous fragment */
6911 6912
	frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
	len = min(8u, frag->len);
6913
	if (!vcpu->mmio_is_write)
6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926
		memcpy(frag->data, run->mmio.data, len);

	if (frag->len <= 8) {
		/* Switch to the next fragment. */
		frag++;
		vcpu->mmio_cur_fragment++;
	} else {
		/* Go forward to the next mmio piece. */
		frag->data += len;
		frag->gpa += len;
		frag->len -= len;
	}

6927
	if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
6928
		vcpu->mmio_needed = 0;
6929 6930

		/* FIXME: return into emulator if single-stepping.  */
A
Avi Kivity 已提交
6931
		if (vcpu->mmio_is_write)
6932 6933 6934 6935
			return 1;
		vcpu->mmio_read_completed = 1;
		return complete_emulated_io(vcpu);
	}
6936

6937 6938 6939
	run->exit_reason = KVM_EXIT_MMIO;
	run->mmio.phys_addr = frag->gpa;
	if (vcpu->mmio_is_write)
6940 6941
		memcpy(run->mmio.data, frag->data, min(8u, frag->len));
	run->mmio.len = min(8u, frag->len);
6942 6943 6944
	run->mmio.is_write = vcpu->mmio_is_write;
	vcpu->arch.complete_userspace_io = complete_emulated_mmio;
	return 0;
6945 6946
}

6947

6948 6949
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
6950
	struct fpu *fpu = &current->thread.fpu;
6951 6952 6953
	int r;
	sigset_t sigsaved;

6954
	fpu__activate_curr(fpu);
6955

6956 6957 6958
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

6959
	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
6960
		kvm_vcpu_block(vcpu);
6961
		kvm_apic_accept_events(vcpu);
6962
		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
6963 6964
		r = -EAGAIN;
		goto out;
6965 6966 6967
	}

	/* re-sync apic's tpr */
6968
	if (!lapic_in_kernel(vcpu)) {
A
Andre Przywara 已提交
6969 6970 6971 6972 6973
		if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
			r = -EINVAL;
			goto out;
		}
	}
6974

6975 6976 6977 6978 6979 6980 6981 6982
	if (unlikely(vcpu->arch.complete_userspace_io)) {
		int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
		vcpu->arch.complete_userspace_io = NULL;
		r = cui(vcpu);
		if (r <= 0)
			goto out;
	} else
		WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
6983

6984
	r = vcpu_run(vcpu);
6985 6986

out:
6987
	post_kvm_run_save(vcpu);
6988 6989 6990 6991 6992 6993 6994 6995
	if (vcpu->sigset_active)
		sigprocmask(SIG_SETMASK, &sigsaved, NULL);

	return r;
}

int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
6996 6997 6998 6999
	if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
		/*
		 * We are here if userspace calls get_regs() in the middle of
		 * instruction emulation. Registers state needs to be copied
G
Guo Chao 已提交
7000
		 * back from emulation context to vcpu. Userspace shouldn't do
7001 7002 7003
		 * that usually, but some bad designed PV devices (vmware
		 * backdoor interface) need this to work
		 */
7004
		emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
7005 7006
		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
	}
7007 7008 7009 7010 7011 7012 7013 7014
	regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
	regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
	regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
	regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
	regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
	regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
	regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
	regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
7015
#ifdef CONFIG_X86_64
7016 7017 7018 7019 7020 7021 7022 7023
	regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
	regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
	regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
	regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
	regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
	regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
	regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
	regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
7024 7025
#endif

7026
	regs->rip = kvm_rip_read(vcpu);
7027
	regs->rflags = kvm_get_rflags(vcpu);
7028 7029 7030 7031 7032 7033

	return 0;
}

int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
7034 7035 7036
	vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
	vcpu->arch.emulate_regs_need_sync_to_vcpu = false;

7037 7038 7039 7040 7041 7042 7043 7044
	kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
	kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
	kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
	kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
	kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
	kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
	kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
	kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
7045
#ifdef CONFIG_X86_64
7046 7047 7048 7049 7050 7051 7052 7053
	kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
	kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
	kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
	kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
	kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
	kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
	kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
	kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
7054 7055
#endif

7056
	kvm_rip_write(vcpu, regs->rip);
7057
	kvm_set_rflags(vcpu, regs->rflags);
7058

7059 7060
	vcpu->arch.exception.pending = false;

7061 7062
	kvm_make_request(KVM_REQ_EVENT, vcpu);

7063 7064 7065 7066 7067 7068 7069
	return 0;
}

void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
{
	struct kvm_segment cs;

7070
	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
7071 7072 7073 7074 7075 7076 7077 7078
	*db = cs.db;
	*l = cs.l;
}
EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);

int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
7079
	struct desc_ptr dt;
7080

7081 7082 7083 7084 7085 7086
	kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
	kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
	kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
	kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
	kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
	kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
7087

7088 7089
	kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
	kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
7090 7091

	kvm_x86_ops->get_idt(vcpu, &dt);
7092 7093
	sregs->idt.limit = dt.size;
	sregs->idt.base = dt.address;
7094
	kvm_x86_ops->get_gdt(vcpu, &dt);
7095 7096
	sregs->gdt.limit = dt.size;
	sregs->gdt.base = dt.address;
7097

7098
	sregs->cr0 = kvm_read_cr0(vcpu);
7099
	sregs->cr2 = vcpu->arch.cr2;
7100
	sregs->cr3 = kvm_read_cr3(vcpu);
7101
	sregs->cr4 = kvm_read_cr4(vcpu);
7102
	sregs->cr8 = kvm_get_cr8(vcpu);
7103
	sregs->efer = vcpu->arch.efer;
7104 7105
	sregs->apic_base = kvm_get_apic_base(vcpu);

G
Gleb Natapov 已提交
7106
	memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
7107

7108
	if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
7109 7110
		set_bit(vcpu->arch.interrupt.nr,
			(unsigned long *)sregs->interrupt_bitmap);
7111

7112 7113 7114
	return 0;
}

7115 7116 7117
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
7118
	kvm_apic_accept_events(vcpu);
7119 7120 7121 7122 7123 7124
	if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
					vcpu->arch.pv.pv_unhalted)
		mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
	else
		mp_state->mp_state = vcpu->arch.mp_state;

7125 7126 7127 7128 7129 7130
	return 0;
}

int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
				    struct kvm_mp_state *mp_state)
{
7131
	if (!lapic_in_kernel(vcpu) &&
7132 7133 7134 7135 7136 7137 7138 7139
	    mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
		return -EINVAL;

	if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
		vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
		set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
	} else
		vcpu->arch.mp_state = mp_state->mp_state;
7140
	kvm_make_request(KVM_REQ_EVENT, vcpu);
7141 7142 7143
	return 0;
}

7144 7145
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
		    int reason, bool has_error_code, u32 error_code)
7146
{
7147
	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
7148
	int ret;
7149

7150
	init_emulate_ctxt(vcpu);
7151

7152
	ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
7153
				   has_error_code, error_code);
7154 7155

	if (ret)
7156
		return EMULATE_FAIL;
7157

7158 7159
	kvm_rip_write(vcpu, ctxt->eip);
	kvm_set_rflags(vcpu, ctxt->eflags);
7160
	kvm_make_request(KVM_REQ_EVENT, vcpu);
7161
	return EMULATE_DONE;
7162 7163 7164
}
EXPORT_SYMBOL_GPL(kvm_task_switch);

7165 7166 7167
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
				  struct kvm_sregs *sregs)
{
7168
	struct msr_data apic_base_msr;
7169
	int mmu_reset_needed = 0;
7170
	int pending_vec, max_bits, idx;
7171
	struct desc_ptr dt;
7172

7173 7174 7175
	if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
		return -EINVAL;

7176 7177
	dt.size = sregs->idt.limit;
	dt.address = sregs->idt.base;
7178
	kvm_x86_ops->set_idt(vcpu, &dt);
7179 7180
	dt.size = sregs->gdt.limit;
	dt.address = sregs->gdt.base;
7181 7182
	kvm_x86_ops->set_gdt(vcpu, &dt);

7183
	vcpu->arch.cr2 = sregs->cr2;
7184
	mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
7185
	vcpu->arch.cr3 = sregs->cr3;
7186
	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
7187

7188
	kvm_set_cr8(vcpu, sregs->cr8);
7189

7190
	mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
7191
	kvm_x86_ops->set_efer(vcpu, sregs->efer);
7192 7193 7194
	apic_base_msr.data = sregs->apic_base;
	apic_base_msr.host_initiated = true;
	kvm_set_apic_base(vcpu, &apic_base_msr);
7195

7196
	mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
7197
	kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
7198
	vcpu->arch.cr0 = sregs->cr0;
7199

7200
	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
7201
	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
7202
	if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE))
A
Avi Kivity 已提交
7203
		kvm_update_cpuid(vcpu);
7204 7205

	idx = srcu_read_lock(&vcpu->kvm->srcu);
7206
	if (!is_long_mode(vcpu) && is_pae(vcpu)) {
7207
		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
7208 7209
		mmu_reset_needed = 1;
	}
7210
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
7211 7212 7213 7214

	if (mmu_reset_needed)
		kvm_mmu_reset_context(vcpu);

7215
	max_bits = KVM_NR_INTERRUPTS;
G
Gleb Natapov 已提交
7216 7217 7218
	pending_vec = find_first_bit(
		(const unsigned long *)sregs->interrupt_bitmap, max_bits);
	if (pending_vec < max_bits) {
7219
		kvm_queue_interrupt(vcpu, pending_vec, false);
G
Gleb Natapov 已提交
7220
		pr_debug("Set back pending irq %d\n", pending_vec);
7221 7222
	}

7223 7224 7225 7226 7227 7228
	kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
	kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
	kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
	kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
	kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
	kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
7229

7230 7231
	kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
	kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
7232

7233 7234
	update_cr8_intercept(vcpu);

M
Marcelo Tosatti 已提交
7235
	/* Older userspace won't unhalt the vcpu on reset. */
7236
	if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
M
Marcelo Tosatti 已提交
7237
	    sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
7238
	    !is_protmode(vcpu))
M
Marcelo Tosatti 已提交
7239 7240
		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;

7241 7242
	kvm_make_request(KVM_REQ_EVENT, vcpu);

7243 7244 7245
	return 0;
}

J
Jan Kiszka 已提交
7246 7247
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
					struct kvm_guest_debug *dbg)
7248
{
7249
	unsigned long rflags;
7250
	int i, r;
7251

7252 7253 7254
	if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
		r = -EBUSY;
		if (vcpu->arch.exception.pending)
7255
			goto out;
7256 7257 7258 7259 7260 7261
		if (dbg->control & KVM_GUESTDBG_INJECT_DB)
			kvm_queue_exception(vcpu, DB_VECTOR);
		else
			kvm_queue_exception(vcpu, BP_VECTOR);
	}

7262 7263 7264 7265 7266
	/*
	 * Read rflags as long as potentially injected trace flags are still
	 * filtered out.
	 */
	rflags = kvm_get_rflags(vcpu);
7267 7268 7269 7270 7271 7272

	vcpu->guest_debug = dbg->control;
	if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
		vcpu->guest_debug = 0;

	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
7273 7274
		for (i = 0; i < KVM_NR_DB_REGS; ++i)
			vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
7275
		vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
7276 7277 7278 7279
	} else {
		for (i = 0; i < KVM_NR_DB_REGS; i++)
			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
	}
7280
	kvm_update_dr7(vcpu);
7281

J
Jan Kiszka 已提交
7282 7283 7284
	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
		vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
			get_segment_base(vcpu, VCPU_SREG_CS);
7285

7286 7287 7288 7289 7290
	/*
	 * Trigger an rflags update that will inject or remove the trace
	 * flags.
	 */
	kvm_set_rflags(vcpu, rflags);
7291

7292
	kvm_x86_ops->update_bp_intercept(vcpu);
7293

7294
	r = 0;
J
Jan Kiszka 已提交
7295

7296
out:
7297 7298 7299 7300

	return r;
}

7301 7302 7303 7304 7305 7306 7307 7308
/*
 * Translate a guest virtual address to a guest physical address.
 */
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
				    struct kvm_translation *tr)
{
	unsigned long vaddr = tr->linear_address;
	gpa_t gpa;
7309
	int idx;
7310

7311
	idx = srcu_read_lock(&vcpu->kvm->srcu);
7312
	gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
7313
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
7314 7315 7316 7317 7318 7319 7320 7321
	tr->physical_address = gpa;
	tr->valid = gpa != UNMAPPED_GVA;
	tr->writeable = 1;
	tr->usermode = 0;

	return 0;
}

7322 7323
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
7324
	struct fxregs_state *fxsave =
7325
			&vcpu->arch.guest_fpu.state.fxsave;
7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340

	memcpy(fpu->fpr, fxsave->st_space, 128);
	fpu->fcw = fxsave->cwd;
	fpu->fsw = fxsave->swd;
	fpu->ftwx = fxsave->twd;
	fpu->last_opcode = fxsave->fop;
	fpu->last_ip = fxsave->rip;
	fpu->last_dp = fxsave->rdp;
	memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);

	return 0;
}

int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
7341
	struct fxregs_state *fxsave =
7342
			&vcpu->arch.guest_fpu.state.fxsave;
7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355

	memcpy(fxsave->st_space, fpu->fpr, 128);
	fxsave->cwd = fpu->fcw;
	fxsave->swd = fpu->fsw;
	fxsave->twd = fpu->ftwx;
	fxsave->fop = fpu->last_opcode;
	fxsave->rip = fpu->last_ip;
	fxsave->rdp = fpu->last_dp;
	memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);

	return 0;
}

I
Ingo Molnar 已提交
7356
static void fx_init(struct kvm_vcpu *vcpu)
7357
{
7358
	fpstate_init(&vcpu->arch.guest_fpu.state);
7359
	if (boot_cpu_has(X86_FEATURE_XSAVES))
7360
		vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv =
7361
			host_xcr0 | XSTATE_COMPACTION_ENABLED;
7362

7363 7364 7365
	/*
	 * Ensure guest xcr0 is valid for loading
	 */
D
Dave Hansen 已提交
7366
	vcpu->arch.xcr0 = XFEATURE_MASK_FP;
7367

7368
	vcpu->arch.cr0 |= X86_CR0_ET;
7369 7370 7371 7372
}

void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{
7373
	if (vcpu->guest_fpu_loaded)
7374 7375
		return;

7376 7377 7378 7379 7380
	/*
	 * Restore all possible states in the guest,
	 * and assume host would use all available bits.
	 * Guest xcr0 would be loaded later.
	 */
7381
	vcpu->guest_fpu_loaded = 1;
7382
	__kernel_fpu_begin();
7383
	__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
7384
	trace_kvm_fpu(1);
7385 7386 7387 7388
}

void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
7389 7390
	if (!vcpu->guest_fpu_loaded) {
		vcpu->fpu_counter = 0;
7391
		return;
7392
	}
7393 7394

	vcpu->guest_fpu_loaded = 0;
7395
	copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
7396
	__kernel_fpu_end();
A
Avi Kivity 已提交
7397
	++vcpu->stat.fpu_reload;
7398 7399 7400 7401 7402 7403
	/*
	 * If using eager FPU mode, or if the guest is a frequent user
	 * of the FPU, just leave the FPU active for next time.
	 * Every 255 times fpu_counter rolls over to 0; a guest that uses
	 * the FPU in bursts will revert to loading it on demand.
	 */
7404
	if (!use_eager_fpu()) {
7405 7406 7407
		if (++vcpu->fpu_counter < 5)
			kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
	}
7408
	trace_kvm_fpu(0);
7409
}
7410 7411 7412

void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
7413 7414
	void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;

7415
	kvmclock_reset(vcpu);
7416

7417
	kvm_x86_ops->vcpu_free(vcpu);
7418
	free_cpumask_var(wbinvd_dirty_mask);
7419 7420 7421 7422 7423
}

struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
						unsigned int id)
{
7424 7425
	struct kvm_vcpu *vcpu;

Z
Zachary Amsden 已提交
7426 7427 7428 7429
	if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
		printk_once(KERN_WARNING
		"kvm: SMP vm created on host with unstable TSC; "
		"guest TSC will not be reliable\n");
7430 7431 7432 7433

	vcpu = kvm_x86_ops->vcpu_create(kvm, id);

	return vcpu;
7434
}
7435

7436 7437 7438
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
	int r;
7439

X
Xiao Guangrong 已提交
7440
	kvm_vcpu_mtrr_init(vcpu);
7441 7442 7443
	r = vcpu_load(vcpu);
	if (r)
		return r;
7444
	kvm_vcpu_reset(vcpu, false);
7445
	kvm_mmu_setup(vcpu);
7446
	vcpu_put(vcpu);
7447
	return r;
7448 7449
}

7450
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
7451
{
7452
	struct msr_data msr;
7453
	struct kvm *kvm = vcpu->kvm;
7454

7455 7456
	if (vcpu_load(vcpu))
		return;
7457 7458 7459 7460
	msr.data = 0x0;
	msr.index = MSR_IA32_TSC;
	msr.host_initiated = true;
	kvm_write_tsc(vcpu, &msr);
7461 7462
	vcpu_put(vcpu);

7463 7464 7465
	if (!kvmclock_periodic_sync)
		return;

7466 7467
	schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
					KVMCLOCK_SYNC_PERIOD);
7468 7469
}

7470
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
7471
{
7472
	int r;
7473 7474
	vcpu->arch.apf.msr_val = 0;

7475 7476
	r = vcpu_load(vcpu);
	BUG_ON(r);
7477 7478 7479 7480 7481 7482
	kvm_mmu_unload(vcpu);
	vcpu_put(vcpu);

	kvm_x86_ops->vcpu_free(vcpu);
}

7483
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
7484
{
7485 7486
	vcpu->arch.hflags = 0;

7487
	vcpu->arch.smi_pending = 0;
A
Avi Kivity 已提交
7488 7489
	atomic_set(&vcpu->arch.nmi_queued, 0);
	vcpu->arch.nmi_pending = 0;
7490
	vcpu->arch.nmi_injected = false;
7491 7492
	kvm_clear_interrupt_queue(vcpu);
	kvm_clear_exception_queue(vcpu);
7493

7494
	memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
7495
	kvm_update_dr0123(vcpu);
7496
	vcpu->arch.dr6 = DR6_INIT;
J
Jan Kiszka 已提交
7497
	kvm_update_dr6(vcpu);
7498
	vcpu->arch.dr7 = DR7_FIXED_1;
7499
	kvm_update_dr7(vcpu);
7500

N
Nadav Amit 已提交
7501 7502
	vcpu->arch.cr2 = 0;

7503
	kvm_make_request(KVM_REQ_EVENT, vcpu);
7504
	vcpu->arch.apf.msr_val = 0;
G
Glauber Costa 已提交
7505
	vcpu->arch.st.msr_val = 0;
7506

7507 7508
	kvmclock_reset(vcpu);

7509 7510 7511
	kvm_clear_async_pf_completion_queue(vcpu);
	kvm_async_pf_hash_reset(vcpu);
	vcpu->arch.apf.halted = false;
7512

P
Paolo Bonzini 已提交
7513
	if (!init_event) {
7514
		kvm_pmu_reset(vcpu);
P
Paolo Bonzini 已提交
7515 7516
		vcpu->arch.smbase = 0x30000;
	}
7517

7518 7519 7520 7521
	memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
	vcpu->arch.regs_avail = ~0;
	vcpu->arch.regs_dirty = ~0;

7522
	kvm_x86_ops->vcpu_reset(vcpu, init_event);
7523 7524
}

7525
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
7526 7527 7528 7529 7530 7531 7532 7533
{
	struct kvm_segment cs;

	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
	cs.selector = vector << 8;
	cs.base = vector << 12;
	kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
	kvm_rip_write(vcpu, 0);
7534 7535
}

7536
int kvm_arch_hardware_enable(void)
7537
{
7538 7539 7540
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int i;
7541 7542 7543 7544
	int ret;
	u64 local_tsc;
	u64 max_tsc = 0;
	bool stable, backwards_tsc = false;
A
Avi Kivity 已提交
7545 7546

	kvm_shared_msr_cpu_online();
7547
	ret = kvm_x86_ops->hardware_enable();
7548 7549 7550
	if (ret != 0)
		return ret;

7551
	local_tsc = rdtsc();
7552 7553 7554 7555
	stable = !check_tsc_unstable();
	list_for_each_entry(kvm, &vm_list, vm_list) {
		kvm_for_each_vcpu(i, vcpu, kvm) {
			if (!stable && vcpu->cpu == smp_processor_id())
7556
				kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572
			if (stable && vcpu->arch.last_host_tsc > local_tsc) {
				backwards_tsc = true;
				if (vcpu->arch.last_host_tsc > max_tsc)
					max_tsc = vcpu->arch.last_host_tsc;
			}
		}
	}

	/*
	 * Sometimes, even reliable TSCs go backwards.  This happens on
	 * platforms that reset TSC during suspend or hibernate actions, but
	 * maintain synchronization.  We must compensate.  Fortunately, we can
	 * detect that condition here, which happens early in CPU bringup,
	 * before any KVM threads can be running.  Unfortunately, we can't
	 * bring the TSCs fully up to date with real time, as we aren't yet far
	 * enough into CPU bringup that we know how much real time has actually
7573
	 * elapsed; our helper function, ktime_get_boot_ns() will be using boot
7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597
	 * variables that haven't been updated yet.
	 *
	 * So we simply find the maximum observed TSC above, then record the
	 * adjustment to TSC in each VCPU.  When the VCPU later gets loaded,
	 * the adjustment will be applied.  Note that we accumulate
	 * adjustments, in case multiple suspend cycles happen before some VCPU
	 * gets a chance to run again.  In the event that no KVM threads get a
	 * chance to run, we will miss the entire elapsed period, as we'll have
	 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
	 * loose cycle time.  This isn't too big a deal, since the loss will be
	 * uniform across all VCPUs (not to mention the scenario is extremely
	 * unlikely). It is possible that a second hibernate recovery happens
	 * much faster than a first, causing the observed TSC here to be
	 * smaller; this would require additional padding adjustment, which is
	 * why we set last_host_tsc to the local tsc observed here.
	 *
	 * N.B. - this code below runs only on platforms with reliable TSC,
	 * as that is the only way backwards_tsc is set above.  Also note
	 * that this runs for ALL vcpus, which is not a bug; all VCPUs should
	 * have the same delta_cyc adjustment applied if backwards_tsc
	 * is detected.  Note further, this adjustment is only done once,
	 * as we reset last_host_tsc on all VCPUs to stop this from being
	 * called multiple times (one for each physical CPU bringup).
	 *
G
Guo Chao 已提交
7598
	 * Platforms with unreliable TSCs don't have to deal with this, they
7599 7600 7601 7602 7603 7604
	 * will be compensated by the logic in vcpu_load, which sets the TSC to
	 * catchup mode.  This will catchup all VCPUs to real time, but cannot
	 * guarantee that they stay in perfect synchronization.
	 */
	if (backwards_tsc) {
		u64 delta_cyc = max_tsc - local_tsc;
7605
		backwards_tsc_observed = true;
7606 7607 7608 7609
		list_for_each_entry(kvm, &vm_list, vm_list) {
			kvm_for_each_vcpu(i, vcpu, kvm) {
				vcpu->arch.tsc_offset_adjustment += delta_cyc;
				vcpu->arch.last_host_tsc = local_tsc;
7610
				kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624
			}

			/*
			 * We have to disable TSC offset matching.. if you were
			 * booting a VM while issuing an S4 host suspend....
			 * you may have some problem.  Solving this issue is
			 * left as an exercise to the reader.
			 */
			kvm->arch.last_tsc_nsec = 0;
			kvm->arch.last_tsc_write = 0;
		}

	}
	return 0;
7625 7626
}

7627
void kvm_arch_hardware_disable(void)
7628
{
7629 7630
	kvm_x86_ops->hardware_disable();
	drop_user_return_notifiers();
7631 7632 7633 7634
}

int kvm_arch_hardware_setup(void)
{
7635 7636 7637 7638 7639 7640
	int r;

	r = kvm_x86_ops->hardware_setup();
	if (r != 0)
		return r;

7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651
	if (kvm_has_tsc_control) {
		/*
		 * Make sure the user can only configure tsc_khz values that
		 * fit into a signed integer.
		 * A min value is not calculated needed because it will always
		 * be 1 on all machines.
		 */
		u64 max = min(0x7fffffffULL,
			      __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz));
		kvm_max_guest_tsc_khz = max;

7652
		kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
7653
	}
7654

7655 7656
	kvm_init_msr_list();
	return 0;
7657 7658 7659 7660 7661 7662 7663 7664 7665 7666
}

void kvm_arch_hardware_unsetup(void)
{
	kvm_x86_ops->hardware_unsetup();
}

void kvm_arch_check_processor_compat(void *rtn)
{
	kvm_x86_ops->check_processor_compatibility(rtn);
7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677
}

bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
{
	return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp);

bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
{
	return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
7678 7679
}

7680
struct static_key kvm_no_apic_vcpu __read_mostly;
7681
EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
7682

7683 7684 7685 7686 7687 7688 7689 7690 7691
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
	struct page *page;
	struct kvm *kvm;
	int r;

	BUG_ON(vcpu->kvm == NULL);
	kvm = vcpu->kvm;

7692
	vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv();
7693
	vcpu->arch.pv.pv_unhalted = false;
7694
	vcpu->arch.emulate_ctxt.ops = &emulate_ops;
7695
	if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu))
7696
		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
7697
	else
7698
		vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
7699 7700 7701 7702 7703 7704

	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page) {
		r = -ENOMEM;
		goto fail;
	}
7705
	vcpu->arch.pio_data = page_address(page);
7706

7707
	kvm_set_tsc_khz(vcpu, max_tsc_khz);
Z
Zachary Amsden 已提交
7708

7709 7710 7711 7712 7713 7714 7715 7716
	r = kvm_mmu_create(vcpu);
	if (r < 0)
		goto fail_free_pio_data;

	if (irqchip_in_kernel(kvm)) {
		r = kvm_create_lapic(vcpu);
		if (r < 0)
			goto fail_mmu_destroy;
7717 7718
	} else
		static_key_slow_inc(&kvm_no_apic_vcpu);
7719

H
Huang Ying 已提交
7720 7721 7722 7723
	vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
				       GFP_KERNEL);
	if (!vcpu->arch.mce_banks) {
		r = -ENOMEM;
7724
		goto fail_free_lapic;
H
Huang Ying 已提交
7725 7726 7727
	}
	vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;

7728 7729
	if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) {
		r = -ENOMEM;
7730
		goto fail_free_mce_banks;
7731
	}
7732

I
Ingo Molnar 已提交
7733
	fx_init(vcpu);
7734

W
Will Auld 已提交
7735
	vcpu->arch.ia32_tsc_adjust_msr = 0x0;
7736
	vcpu->arch.pv_time_enabled = false;
7737 7738

	vcpu->arch.guest_supported_xcr0 = 0;
7739
	vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
7740

7741 7742
	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);

7743 7744
	vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;

7745
	kvm_async_pf_hash_reset(vcpu);
7746
	kvm_pmu_init(vcpu);
7747

7748 7749
	vcpu->arch.pending_external_vector = -1;

7750 7751
	kvm_hv_vcpu_init(vcpu);

7752
	return 0;
I
Ingo Molnar 已提交
7753

7754 7755
fail_free_mce_banks:
	kfree(vcpu->arch.mce_banks);
7756 7757
fail_free_lapic:
	kvm_free_lapic(vcpu);
7758 7759 7760
fail_mmu_destroy:
	kvm_mmu_destroy(vcpu);
fail_free_pio_data:
7761
	free_page((unsigned long)vcpu->arch.pio_data);
7762 7763 7764 7765 7766 7767
fail:
	return r;
}

void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
7768 7769
	int idx;

A
Andrey Smetanin 已提交
7770
	kvm_hv_vcpu_uninit(vcpu);
7771
	kvm_pmu_destroy(vcpu);
7772
	kfree(vcpu->arch.mce_banks);
7773
	kvm_free_lapic(vcpu);
7774
	idx = srcu_read_lock(&vcpu->kvm->srcu);
7775
	kvm_mmu_destroy(vcpu);
7776
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
7777
	free_page((unsigned long)vcpu->arch.pio_data);
7778
	if (!lapic_in_kernel(vcpu))
7779
		static_key_slow_dec(&kvm_no_apic_vcpu);
7780
}
7781

R
Radim Krčmář 已提交
7782 7783
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
7784
	kvm_x86_ops->sched_in(vcpu, cpu);
R
Radim Krčmář 已提交
7785 7786
}

7787
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
7788
{
7789 7790 7791
	if (type)
		return -EINVAL;

7792
	INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
7793
	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
7794
	INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
B
Ben-Ami Yassour 已提交
7795
	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
7796
	atomic_set(&kvm->arch.noncoherent_dma_count, 0);
7797

7798 7799
	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
7800 7801 7802
	/* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
	set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
		&kvm->arch.irq_sources_bitmap);
7803

7804
	raw_spin_lock_init(&kvm->arch.tsc_write_lock);
7805
	mutex_init(&kvm->arch.apic_map_lock);
7806 7807
	spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);

7808
	kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
7809
	pvclock_update_vm_gtod_copy(kvm);
7810

7811
	INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
7812
	INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
7813

7814
	kvm_page_track_init(kvm);
7815
	kvm_mmu_init_vm(kvm);
7816

7817 7818 7819
	if (kvm_x86_ops->vm_init)
		return kvm_x86_ops->vm_init(kvm);

7820
	return 0;
7821 7822 7823 7824
}

static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{
7825 7826 7827
	int r;
	r = vcpu_load(vcpu);
	BUG_ON(r);
7828 7829 7830 7831 7832 7833 7834
	kvm_mmu_unload(vcpu);
	vcpu_put(vcpu);
}

static void kvm_free_vcpus(struct kvm *kvm)
{
	unsigned int i;
7835
	struct kvm_vcpu *vcpu;
7836 7837 7838 7839

	/*
	 * Unpin any mmu pages first.
	 */
7840 7841
	kvm_for_each_vcpu(i, vcpu, kvm) {
		kvm_clear_async_pf_completion_queue(vcpu);
7842
		kvm_unload_vcpu_mmu(vcpu);
7843
	}
7844 7845 7846 7847 7848 7849
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_arch_vcpu_free(vcpu);

	mutex_lock(&kvm->lock);
	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
		kvm->vcpus[i] = NULL;
7850

7851 7852
	atomic_set(&kvm->online_vcpus, 0);
	mutex_unlock(&kvm->lock);
7853 7854
}

7855 7856
void kvm_arch_sync_events(struct kvm *kvm)
{
7857
	cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
7858
	cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
7859
	kvm_free_all_assigned_devices(kvm);
7860
	kvm_free_pit(kvm);
7861 7862
}

7863
int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
7864 7865
{
	int i, r;
7866
	unsigned long hva;
7867 7868
	struct kvm_memslots *slots = kvm_memslots(kvm);
	struct kvm_memory_slot *slot, old;
7869 7870

	/* Called with kvm->slots_lock held.  */
7871 7872
	if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
		return -EINVAL;
7873

7874 7875
	slot = id_to_memslot(slots, id);
	if (size) {
7876
		if (slot->npages)
7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894
			return -EEXIST;

		/*
		 * MAP_SHARED to prevent internal slot pages from being moved
		 * by fork()/COW.
		 */
		hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
			      MAP_SHARED | MAP_ANONYMOUS, 0);
		if (IS_ERR((void *)hva))
			return PTR_ERR((void *)hva);
	} else {
		if (!slot->npages)
			return 0;

		hva = 0;
	}

	old = *slot;
7895
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
7896
		struct kvm_userspace_memory_region m;
7897

7898 7899 7900
		m.slot = id | (i << 16);
		m.flags = 0;
		m.guest_phys_addr = gpa;
7901
		m.userspace_addr = hva;
7902
		m.memory_size = size;
7903 7904 7905 7906 7907
		r = __kvm_set_memory_region(kvm, &m);
		if (r < 0)
			return r;
	}

7908 7909 7910 7911 7912
	if (!size) {
		r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
		WARN_ON(r < 0);
	}

7913 7914 7915 7916
	return 0;
}
EXPORT_SYMBOL_GPL(__x86_set_memory_region);

7917
int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
7918 7919 7920 7921
{
	int r;

	mutex_lock(&kvm->slots_lock);
7922
	r = __x86_set_memory_region(kvm, id, gpa, size);
7923 7924 7925 7926 7927 7928
	mutex_unlock(&kvm->slots_lock);

	return r;
}
EXPORT_SYMBOL_GPL(x86_set_memory_region);

7929 7930
void kvm_arch_destroy_vm(struct kvm *kvm)
{
7931 7932 7933 7934 7935 7936
	if (current->mm == kvm->mm) {
		/*
		 * Free memory regions allocated on behalf of userspace,
		 * unless the the memory map has changed due to process exit
		 * or fd copying.
		 */
7937 7938 7939
		x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
		x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
		x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
7940
	}
7941 7942
	if (kvm_x86_ops->vm_destroy)
		kvm_x86_ops->vm_destroy(kvm);
7943
	kvm_iommu_unmap_guest(kvm);
7944 7945
	kfree(kvm->arch.vpic);
	kfree(kvm->arch.vioapic);
7946
	kvm_free_vcpus(kvm);
7947
	kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
7948
	kvm_mmu_uninit_vm(kvm);
7949
}
7950

7951
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
7952 7953 7954 7955
			   struct kvm_memory_slot *dont)
{
	int i;

7956 7957
	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
		if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
T
Thomas Huth 已提交
7958
			kvfree(free->arch.rmap[i]);
7959
			free->arch.rmap[i] = NULL;
7960
		}
7961 7962 7963 7964 7965
		if (i == 0)
			continue;

		if (!dont || free->arch.lpage_info[i - 1] !=
			     dont->arch.lpage_info[i - 1]) {
T
Thomas Huth 已提交
7966
			kvfree(free->arch.lpage_info[i - 1]);
7967
			free->arch.lpage_info[i - 1] = NULL;
7968 7969
		}
	}
7970 7971

	kvm_page_track_free_memslot(free, dont);
7972 7973
}

7974 7975
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
			    unsigned long npages)
7976 7977 7978
{
	int i;

7979
	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
7980
		struct kvm_lpage_info *linfo;
7981 7982
		unsigned long ugfn;
		int lpages;
7983
		int level = i + 1;
7984 7985 7986 7987

		lpages = gfn_to_index(slot->base_gfn + npages - 1,
				      slot->base_gfn, level) + 1;

7988 7989 7990
		slot->arch.rmap[i] =
			kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i]));
		if (!slot->arch.rmap[i])
7991
			goto out_free;
7992 7993
		if (i == 0)
			continue;
7994

7995 7996
		linfo = kvm_kvzalloc(lpages * sizeof(*linfo));
		if (!linfo)
7997 7998
			goto out_free;

7999 8000
		slot->arch.lpage_info[i - 1] = linfo;

8001
		if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
8002
			linfo[0].disallow_lpage = 1;
8003
		if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
8004
			linfo[lpages - 1].disallow_lpage = 1;
8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015
		ugfn = slot->userspace_addr >> PAGE_SHIFT;
		/*
		 * If the gfn and userspace address are not aligned wrt each
		 * other, or if explicitly asked to, disable large page
		 * support for this slot
		 */
		if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
		    !kvm_largepages_enabled()) {
			unsigned long j;

			for (j = 0; j < lpages; ++j)
8016
				linfo[j].disallow_lpage = 1;
8017 8018 8019
		}
	}

8020 8021 8022
	if (kvm_page_track_create_memslot(slot, npages))
		goto out_free;

8023 8024 8025
	return 0;

out_free:
8026
	for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
T
Thomas Huth 已提交
8027
		kvfree(slot->arch.rmap[i]);
8028 8029 8030 8031
		slot->arch.rmap[i] = NULL;
		if (i == 0)
			continue;

T
Thomas Huth 已提交
8032
		kvfree(slot->arch.lpage_info[i - 1]);
8033
		slot->arch.lpage_info[i - 1] = NULL;
8034 8035 8036 8037
	}
	return -ENOMEM;
}

8038
void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
8039
{
8040 8041 8042 8043
	/*
	 * memslots->generation has been incremented.
	 * mmio generation may have reached its maximum value.
	 */
8044
	kvm_mmu_invalidate_mmio_sptes(kvm, slots);
8045 8046
}

8047 8048
int kvm_arch_prepare_memory_region(struct kvm *kvm,
				struct kvm_memory_slot *memslot,
8049
				const struct kvm_userspace_memory_region *mem,
8050
				enum kvm_mr_change change)
8051
{
8052 8053 8054
	return 0;
}

8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104
static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
				     struct kvm_memory_slot *new)
{
	/* Still write protect RO slot */
	if (new->flags & KVM_MEM_READONLY) {
		kvm_mmu_slot_remove_write_access(kvm, new);
		return;
	}

	/*
	 * Call kvm_x86_ops dirty logging hooks when they are valid.
	 *
	 * kvm_x86_ops->slot_disable_log_dirty is called when:
	 *
	 *  - KVM_MR_CREATE with dirty logging is disabled
	 *  - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag
	 *
	 * The reason is, in case of PML, we need to set D-bit for any slots
	 * with dirty logging disabled in order to eliminate unnecessary GPA
	 * logging in PML buffer (and potential PML buffer full VMEXT). This
	 * guarantees leaving PML enabled during guest's lifetime won't have
	 * any additonal overhead from PML when guest is running with dirty
	 * logging disabled for memory slots.
	 *
	 * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot
	 * to dirty logging mode.
	 *
	 * If kvm_x86_ops dirty logging hooks are invalid, use write protect.
	 *
	 * In case of write protect:
	 *
	 * Write protect all pages for dirty logging.
	 *
	 * All the sptes including the large sptes which point to this
	 * slot are set to readonly. We can not create any new large
	 * spte on this slot until the end of the logging.
	 *
	 * See the comments in fast_page_fault().
	 */
	if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
		if (kvm_x86_ops->slot_enable_log_dirty)
			kvm_x86_ops->slot_enable_log_dirty(kvm, new);
		else
			kvm_mmu_slot_remove_write_access(kvm, new);
	} else {
		if (kvm_x86_ops->slot_disable_log_dirty)
			kvm_x86_ops->slot_disable_log_dirty(kvm, new);
	}
}

8105
void kvm_arch_commit_memory_region(struct kvm *kvm,
8106
				const struct kvm_userspace_memory_region *mem,
8107
				const struct kvm_memory_slot *old,
8108
				const struct kvm_memory_slot *new,
8109
				enum kvm_mr_change change)
8110
{
8111
	int nr_mmu_pages = 0;
8112

8113 8114 8115 8116
	if (!kvm->arch.n_requested_mmu_pages)
		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);

	if (nr_mmu_pages)
8117
		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
8118

8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135
	/*
	 * Dirty logging tracks sptes in 4k granularity, meaning that large
	 * sptes have to be split.  If live migration is successful, the guest
	 * in the source machine will be destroyed and large sptes will be
	 * created in the destination. However, if the guest continues to run
	 * in the source machine (for example if live migration fails), small
	 * sptes will remain around and cause bad performance.
	 *
	 * Scan sptes if dirty logging has been stopped, dropping those
	 * which can be collapsed into a single large-page spte.  Later
	 * page faults will create the large-page sptes.
	 */
	if ((change != KVM_MR_DELETE) &&
		(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
		!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
		kvm_mmu_zap_collapsible_sptes(kvm, new);

8136
	/*
8137
	 * Set up write protection and/or dirty logging for the new slot.
8138
	 *
8139 8140 8141 8142
	 * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have
	 * been zapped so no dirty logging staff is needed for old slot. For
	 * KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the
	 * new and it's also covered when dealing with the new slot.
8143 8144
	 *
	 * FIXME: const-ify all uses of struct kvm_memory_slot.
8145
	 */
8146
	if (change != KVM_MR_DELETE)
8147
		kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new);
8148
}
8149

8150
void kvm_arch_flush_shadow_all(struct kvm *kvm)
8151
{
8152
	kvm_mmu_invalidate_zap_all_pages(kvm);
8153 8154
}

8155 8156 8157
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
				   struct kvm_memory_slot *slot)
{
8158
	kvm_page_track_flush_slot(kvm, slot);
8159 8160
}

8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
{
	if (!list_empty_careful(&vcpu->async_pf.done))
		return true;

	if (kvm_apic_has_events(vcpu))
		return true;

	if (vcpu->arch.pv.pv_unhalted)
		return true;

	if (atomic_read(&vcpu->arch.nmi_queued))
		return true;

P
Paolo Bonzini 已提交
8175 8176 8177
	if (test_bit(KVM_REQ_SMI, &vcpu->requests))
		return true;

8178 8179 8180 8181
	if (kvm_arch_interrupt_allowed(vcpu) &&
	    kvm_cpu_has_interrupt(vcpu))
		return true;

A
Andrey Smetanin 已提交
8182 8183 8184
	if (kvm_hv_has_stimer_pending(vcpu))
		return true;

8185 8186 8187
	return false;
}

8188 8189
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
8190 8191 8192
	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
		kvm_x86_ops->check_nested_events(vcpu, false);

8193
	return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
8194
}
8195

8196
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
8197
{
8198
	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
8199
}
8200 8201 8202 8203 8204

int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
{
	return kvm_x86_ops->interrupt_allowed(vcpu);
}
8205

8206
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
J
Jan Kiszka 已提交
8207
{
8208 8209 8210 8211 8212 8213
	if (is_64_bit_mode(vcpu))
		return kvm_rip_read(vcpu);
	return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
		     kvm_rip_read(vcpu));
}
EXPORT_SYMBOL_GPL(kvm_get_linear_rip);
J
Jan Kiszka 已提交
8214

8215 8216 8217
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
{
	return kvm_get_linear_rip(vcpu) == linear_rip;
J
Jan Kiszka 已提交
8218 8219 8220
}
EXPORT_SYMBOL_GPL(kvm_is_linear_rip);

8221 8222 8223 8224 8225 8226
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
{
	unsigned long rflags;

	rflags = kvm_x86_ops->get_rflags(vcpu);
	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
8227
		rflags &= ~X86_EFLAGS_TF;
8228 8229 8230 8231
	return rflags;
}
EXPORT_SYMBOL_GPL(kvm_get_rflags);

8232
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
8233 8234
{
	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
J
Jan Kiszka 已提交
8235
	    kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
8236
		rflags |= X86_EFLAGS_TF;
8237
	kvm_x86_ops->set_rflags(vcpu, rflags);
8238 8239 8240 8241 8242
}

void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
	__kvm_set_rflags(vcpu, rflags);
8243
	kvm_make_request(KVM_REQ_EVENT, vcpu);
8244 8245 8246
}
EXPORT_SYMBOL_GPL(kvm_set_rflags);

G
Gleb Natapov 已提交
8247 8248 8249 8250
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
{
	int r;

X
Xiao Guangrong 已提交
8251
	if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
8252
	      work->wakeup_all)
G
Gleb Natapov 已提交
8253 8254 8255 8256 8257 8258
		return;

	r = kvm_mmu_reload(vcpu);
	if (unlikely(r))
		return;

X
Xiao Guangrong 已提交
8259 8260 8261 8262
	if (!vcpu->arch.mmu.direct_map &&
	      work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
		return;

G
Gleb Natapov 已提交
8263 8264 8265
	vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
}

8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
{
	return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
}

static inline u32 kvm_async_pf_next_probe(u32 key)
{
	return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
}

static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	u32 key = kvm_async_pf_hash_fn(gfn);

	while (vcpu->arch.apf.gfns[key] != ~0)
		key = kvm_async_pf_next_probe(key);

	vcpu->arch.apf.gfns[key] = gfn;
}

static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	int i;
	u32 key = kvm_async_pf_hash_fn(gfn);

	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
8292 8293
		     (vcpu->arch.apf.gfns[key] != gfn &&
		      vcpu->arch.apf.gfns[key] != ~0); i++)
8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326
		key = kvm_async_pf_next_probe(key);

	return key;
}

bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
}

static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
	u32 i, j, k;

	i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
	while (true) {
		vcpu->arch.apf.gfns[i] = ~0;
		do {
			j = kvm_async_pf_next_probe(j);
			if (vcpu->arch.apf.gfns[j] == ~0)
				return;
			k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
			/*
			 * k lies cyclically in ]i,j]
			 * |    i.k.j |
			 * |....j i.k.| or  |.k..j i...|
			 */
		} while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
		vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
		i = j;
	}
}

8327 8328 8329 8330 8331 8332 8333
static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
{

	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
				      sizeof(val));
}

8334 8335 8336
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
				     struct kvm_async_pf *work)
{
8337 8338
	struct x86_exception fault;

8339
	trace_kvm_async_pf_not_present(work->arch.token, work->gva);
8340
	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
8341 8342

	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
8343 8344
	    (vcpu->arch.apf.send_user_only &&
	     kvm_x86_ops->get_cpl(vcpu) == 0))
8345 8346
		kvm_make_request(KVM_REQ_APF_HALT, vcpu);
	else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
8347 8348 8349 8350 8351 8352
		fault.vector = PF_VECTOR;
		fault.error_code_valid = true;
		fault.error_code = 0;
		fault.nested_page_fault = false;
		fault.address = work->arch.token;
		kvm_inject_page_fault(vcpu, &fault);
8353
	}
8354 8355 8356 8357 8358
}

void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
				 struct kvm_async_pf *work)
{
8359 8360
	struct x86_exception fault;

8361
	trace_kvm_async_pf_ready(work->arch.token, work->gva);
8362
	if (work->wakeup_all)
8363 8364 8365 8366 8367 8368
		work->arch.token = ~0; /* broadcast wakeup */
	else
		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);

	if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
8369 8370 8371 8372 8373 8374
		fault.vector = PF_VECTOR;
		fault.error_code_valid = true;
		fault.error_code = 0;
		fault.nested_page_fault = false;
		fault.address = work->arch.token;
		kvm_inject_page_fault(vcpu, &fault);
8375
	}
8376
	vcpu->arch.apf.halted = false;
8377
	vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
8378 8379 8380 8381 8382 8383 8384 8385 8386
}

bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
{
	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
		return true;
	else
		return !kvm_event_needs_reinjection(vcpu) &&
			kvm_x86_ops->interrupt_allowed(vcpu);
8387 8388
}

8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406
void kvm_arch_start_assignment(struct kvm *kvm)
{
	atomic_inc(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);

void kvm_arch_end_assignment(struct kvm *kvm)
{
	atomic_dec(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);

bool kvm_arch_has_assigned_device(struct kvm *kvm)
{
	return atomic_read(&kvm->arch.assigned_device_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);

8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424
void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
{
	atomic_inc(&kvm->arch.noncoherent_dma_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);

void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
{
	atomic_dec(&kvm->arch.noncoherent_dma_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);

bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
{
	return atomic_read(&kvm->arch.noncoherent_dma_count);
}
EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);

8425 8426 8427 8428 8429
bool kvm_arch_has_irq_bypass(void)
{
	return kvm_x86_ops->update_pi_irte != NULL;
}

F
Feng Wu 已提交
8430 8431 8432 8433 8434 8435
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
				      struct irq_bypass_producer *prod)
{
	struct kvm_kernel_irqfd *irqfd =
		container_of(cons, struct kvm_kernel_irqfd, consumer);

8436
	irqfd->producer = prod;
F
Feng Wu 已提交
8437

8438 8439
	return kvm_x86_ops->update_pi_irte(irqfd->kvm,
					   prod->irq, irqfd->gsi, 1);
F
Feng Wu 已提交
8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454
}

void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
				      struct irq_bypass_producer *prod)
{
	int ret;
	struct kvm_kernel_irqfd *irqfd =
		container_of(cons, struct kvm_kernel_irqfd, consumer);

	WARN_ON(irqfd->producer != prod);
	irqfd->producer = NULL;

	/*
	 * When producer of consumer is unregistered, we change back to
	 * remapped mode, so we can re-use the current implementation
A
Andrea Gelmini 已提交
8455
	 * when the irq is masked/disabled or the consumer side (KVM
F
Feng Wu 已提交
8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470 8471 8472
	 * int this case doesn't want to receive the interrupts.
	*/
	ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0);
	if (ret)
		printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
		       " fails: %d\n", irqfd->consumer.token, ret);
}

int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
				   uint32_t guest_irq, bool set)
{
	if (!kvm_x86_ops->update_pi_irte)
		return -EINVAL;

	return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set);
}

8473 8474 8475 8476 8477 8478
bool kvm_vector_hashing_enabled(void)
{
	return vector_hashing;
}
EXPORT_SYMBOL_GPL(kvm_vector_hashing_enabled);

8479
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
J
Jason Wang 已提交
8480
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
8481 8482 8483 8484
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
8485
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
8486
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
8487
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
8488
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
8489
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
8490
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
8491
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
8492
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
8493
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);
K
Kai Huang 已提交
8494
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
8495
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
8496 8497
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);