hyperv.c 41.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * KVM Microsoft Hyper-V emulation
 *
 * derived from arch/x86/kvm/x86.c
 *
 * Copyright (C) 2006 Qumranet, Inc.
 * Copyright (C) 2008 Qumranet, Inc.
 * Copyright IBM Corporation, 2008
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Amit Shah    <amit.shah@qumranet.com>
 *   Ben-Ami Yassour <benami@il.ibm.com>
 *   Andrey Smetanin <asmetanin@virtuozzo.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

#include "x86.h"
#include "lapic.h"
26
#include "ioapic.h"
27 28 29
#include "hyperv.h"

#include <linux/kvm_host.h>
30
#include <linux/highmem.h>
31
#include <linux/sched/cputime.h>
32
#include <linux/eventfd.h>
33

34
#include <asm/apicdef.h>
35 36 37 38
#include <trace/events/kvm.h>

#include "trace.h"

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
{
	return atomic64_read(&synic->sint[sint]);
}

static inline int synic_get_sint_vector(u64 sint_value)
{
	if (sint_value & HV_SYNIC_SINT_MASKED)
		return -1;
	return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
}

static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
				      int vector)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
		if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
			return true;
	}
	return false;
}

static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
				     int vector)
{
	int i;
	u64 sint_value;

	for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
		sint_value = synic_read_sint(synic, i);
		if (synic_get_sint_vector(sint_value) == vector &&
		    sint_value & HV_SYNIC_SINT_AUTO_EOI)
			return true;
	}
	return false;
}

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
				int vector)
{
	if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
		return;

	if (synic_has_vector_connected(synic, vector))
		__set_bit(vector, synic->vec_bitmap);
	else
		__clear_bit(vector, synic->vec_bitmap);

	if (synic_has_vector_auto_eoi(synic, vector))
		__set_bit(vector, synic->auto_eoi_bitmap);
	else
		__clear_bit(vector, synic->auto_eoi_bitmap);
}

95 96
static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
			  u64 data, bool host)
97
{
98
	int vector, old_vector;
99
	bool masked;
100 101

	vector = data & HV_SYNIC_SINT_VECTOR_MASK;
102 103 104 105 106 107 108 109
	masked = data & HV_SYNIC_SINT_MASKED;

	/*
	 * Valid vectors are 16-255, however, nested Hyper-V attempts to write
	 * default '0x10000' value on boot and this should not #GP. We need to
	 * allow zero-initing the register from host as well.
	 */
	if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
110 111 112 113 114 115 116
		return 1;
	/*
	 * Guest may configure multiple SINTs to use the same vector, so
	 * we maintain a bitmap of vectors handled by synic, and a
	 * bitmap of vectors with auto-eoi behavior.  The bitmaps are
	 * updated here, and atomically queried on fast paths.
	 */
117
	old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
118 119 120

	atomic64_set(&synic->sint[sint], data);

121
	synic_update_vector(synic, old_vector);
122

123
	synic_update_vector(synic, vector);
124 125 126 127 128 129

	/* Load SynIC vectors into EOI exit bitmap */
	kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
	return 0;
}

130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
{
	struct kvm_vcpu *vcpu = NULL;
	int i;

	if (vpidx < KVM_MAX_VCPUS)
		vcpu = kvm_get_vcpu(kvm, vpidx);
	if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
		return vcpu;
	kvm_for_each_vcpu(i, vcpu, kvm)
		if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
			return vcpu;
	return NULL;
}

static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
146 147 148 149
{
	struct kvm_vcpu *vcpu;
	struct kvm_vcpu_hv_synic *synic;

150
	vcpu = get_vcpu_by_vpidx(kvm, vpidx);
151 152 153 154 155 156
	if (!vcpu)
		return NULL;
	synic = vcpu_to_synic(vcpu);
	return (synic->active) ? synic : NULL;
}

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic *synic,
					u32 sint)
{
	struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
	struct page *page;
	gpa_t gpa;
	struct hv_message *msg;
	struct hv_message_page *msg_page;

	gpa = synic->msg_page & PAGE_MASK;
	page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
	if (is_error_page(page)) {
		vcpu_err(vcpu, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n",
			 gpa);
		return;
	}
	msg_page = kmap_atomic(page);

	msg = &msg_page->sint_message[sint];
	msg->header.message_flags.msg_pending = 0;

	kunmap_atomic(msg_page);
	kvm_release_page_dirty(page);
	kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
}

183 184 185
static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
{
	struct kvm *kvm = vcpu->kvm;
186
	struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
A
Andrey Smetanin 已提交
187 188 189
	struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
	struct kvm_vcpu_hv_stimer *stimer;
	int gsi, idx, stimers_pending;
190

191
	trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
192

193 194 195
	if (synic->msg_page & HV_SYNIC_SIMP_ENABLE)
		synic_clear_sint_msg_pending(synic, sint);

A
Andrey Smetanin 已提交
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
	/* Try to deliver pending Hyper-V SynIC timers messages */
	stimers_pending = 0;
	for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
		stimer = &hv_vcpu->stimer[idx];
		if (stimer->msg_pending &&
		    (stimer->config & HV_STIMER_ENABLE) &&
		    HV_STIMER_SINT(stimer->config) == sint) {
			set_bit(stimer->index,
				hv_vcpu->stimer_pending_bitmap);
			stimers_pending++;
		}
	}
	if (stimers_pending)
		kvm_make_request(KVM_REQ_HV_STIMER, vcpu);

211
	idx = srcu_read_lock(&kvm->irq_srcu);
A
Andrey Smetanin 已提交
212
	gsi = atomic_read(&synic->sint_to_gsi[sint]);
213 214 215 216 217
	if (gsi != -1)
		kvm_notify_acked_gsi(kvm, gsi);
	srcu_read_unlock(&kvm->irq_srcu, idx);
}

A
Andrey Smetanin 已提交
218 219 220 221 222 223 224 225 226 227 228 229 230 231
static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
{
	struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
	struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;

	hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
	hv_vcpu->exit.u.synic.msr = msr;
	hv_vcpu->exit.u.synic.control = synic->control;
	hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
	hv_vcpu->exit.u.synic.msg_page = synic->msg_page;

	kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
}

232 233 234 235 236 237
static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
			 u32 msr, u64 data, bool host)
{
	struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
	int ret;

238
	if (!synic->active && !host)
239 240
		return 1;

241 242
	trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);

243 244 245 246
	ret = 0;
	switch (msr) {
	case HV_X64_MSR_SCONTROL:
		synic->control = data;
A
Andrey Smetanin 已提交
247 248
		if (!host)
			synic_exit(synic, msr);
249 250 251 252 253 254 255 256 257
		break;
	case HV_X64_MSR_SVERSION:
		if (!host) {
			ret = 1;
			break;
		}
		synic->version = data;
		break;
	case HV_X64_MSR_SIEFP:
258 259
		if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
		    !synic->dont_zero_synic_pages)
260 261 262 263 264 265
			if (kvm_clear_guest(vcpu->kvm,
					    data & PAGE_MASK, PAGE_SIZE)) {
				ret = 1;
				break;
			}
		synic->evt_page = data;
A
Andrey Smetanin 已提交
266 267
		if (!host)
			synic_exit(synic, msr);
268 269
		break;
	case HV_X64_MSR_SIMP:
270 271
		if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
		    !synic->dont_zero_synic_pages)
272 273 274 275 276 277
			if (kvm_clear_guest(vcpu->kvm,
					    data & PAGE_MASK, PAGE_SIZE)) {
				ret = 1;
				break;
			}
		synic->msg_page = data;
A
Andrey Smetanin 已提交
278 279
		if (!host)
			synic_exit(synic, msr);
280 281 282 283 284 285 286 287 288
		break;
	case HV_X64_MSR_EOM: {
		int i;

		for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
			kvm_hv_notify_acked_sint(vcpu, i);
		break;
	}
	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
289
		ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
290 291 292 293 294 295 296 297
		break;
	default:
		ret = 1;
		break;
	}
	return ret;
}

298 299
static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
			 bool host)
300 301 302
{
	int ret;

303
	if (!synic->active && !host)
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
		return 1;

	ret = 0;
	switch (msr) {
	case HV_X64_MSR_SCONTROL:
		*pdata = synic->control;
		break;
	case HV_X64_MSR_SVERSION:
		*pdata = synic->version;
		break;
	case HV_X64_MSR_SIEFP:
		*pdata = synic->evt_page;
		break;
	case HV_X64_MSR_SIMP:
		*pdata = synic->msg_page;
		break;
	case HV_X64_MSR_EOM:
		*pdata = 0;
		break;
	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
		*pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
		break;
	default:
		ret = 1;
		break;
	}
	return ret;
}

333
static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
334 335 336 337 338 339 340 341 342 343 344 345 346
{
	struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
	struct kvm_lapic_irq irq;
	int ret, vector;

	if (sint >= ARRAY_SIZE(synic->sint))
		return -EINVAL;

	vector = synic_get_sint_vector(synic_read_sint(synic, sint));
	if (vector < 0)
		return -ENOENT;

	memset(&irq, 0, sizeof(irq));
347
	irq.shorthand = APIC_DEST_SELF;
348 349 350 351 352
	irq.dest_mode = APIC_DEST_PHYSICAL;
	irq.delivery_mode = APIC_DM_FIXED;
	irq.vector = vector;
	irq.level = 1;

353
	ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
354
	trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
355 356 357
	return ret;
}

358
int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
359 360 361
{
	struct kvm_vcpu_hv_synic *synic;

362
	synic = synic_get(kvm, vpidx);
363 364 365 366 367 368 369 370 371 372 373
	if (!synic)
		return -EINVAL;

	return synic_set_irq(synic, sint);
}

void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
{
	struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
	int i;

374
	trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
375 376 377 378 379 380

	for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
		if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
			kvm_hv_notify_acked_sint(vcpu, i);
}

381
static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
382 383 384
{
	struct kvm_vcpu_hv_synic *synic;

385
	synic = synic_get(kvm, vpidx);
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
	if (!synic)
		return -EINVAL;

	if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
		return -EINVAL;

	atomic_set(&synic->sint_to_gsi[sint], gsi);
	return 0;
}

void kvm_hv_irq_routing_update(struct kvm *kvm)
{
	struct kvm_irq_routing_table *irq_rt;
	struct kvm_kernel_irq_routing_entry *e;
	u32 gsi;

	irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
					lockdep_is_held(&kvm->irq_lock));

	for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
		hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
			if (e->type == KVM_IRQ_ROUTING_HV_SINT)
				kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
						    e->hv_sint.sint, gsi);
		}
	}
}

static void synic_init(struct kvm_vcpu_hv_synic *synic)
{
	int i;

	memset(synic, 0, sizeof(*synic));
	synic->version = HV_SYNIC_VERSION_1;
	for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
		atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
		atomic_set(&synic->sint_to_gsi[i], -1);
	}
}

426 427
static u64 get_time_ref_counter(struct kvm *kvm)
{
P
Paolo Bonzini 已提交
428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
	struct kvm_hv *hv = &kvm->arch.hyperv;
	struct kvm_vcpu *vcpu;
	u64 tsc;

	/*
	 * The guest has not set up the TSC page or the clock isn't
	 * stable, fall back to get_kvmclock_ns.
	 */
	if (!hv->tsc_ref.tsc_sequence)
		return div_u64(get_kvmclock_ns(kvm), 100);

	vcpu = kvm_get_vcpu(kvm, 0);
	tsc = kvm_read_l1_tsc(vcpu, rdtsc());
	return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
		+ hv->tsc_ref.tsc_offset;
443 444
}

445
static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
A
Andrey Smetanin 已提交
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
				bool vcpu_kick)
{
	struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);

	set_bit(stimer->index,
		vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
	kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
	if (vcpu_kick)
		kvm_vcpu_kick(vcpu);
}

static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
{
	struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);

461 462 463
	trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id,
				    stimer->index);

464
	hrtimer_cancel(&stimer->timer);
A
Andrey Smetanin 已提交
465 466 467
	clear_bit(stimer->index,
		  vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
	stimer->msg_pending = false;
468
	stimer->exp_time = 0;
A
Andrey Smetanin 已提交
469 470 471 472 473 474 475
}

static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
{
	struct kvm_vcpu_hv_stimer *stimer;

	stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
476 477
	trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id,
				     stimer->index);
478
	stimer_mark_pending(stimer, true);
A
Andrey Smetanin 已提交
479 480 481 482

	return HRTIMER_NORESTART;
}

483 484 485 486 487
/*
 * stimer_start() assumptions:
 * a) stimer->count is not equal to 0
 * b) stimer->config has HV_STIMER_ENABLE flag
 */
A
Andrey Smetanin 已提交
488 489 490 491 492 493 494 495 496
static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
{
	u64 time_now;
	ktime_t ktime_now;

	time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
	ktime_now = ktime_get();

	if (stimer->config & HV_STIMER_PERIODIC) {
497 498 499 500 501 502 503 504 505 506 507
		if (stimer->exp_time) {
			if (time_now >= stimer->exp_time) {
				u64 remainder;

				div64_u64_rem(time_now - stimer->exp_time,
					      stimer->count, &remainder);
				stimer->exp_time =
					time_now + (stimer->count - remainder);
			}
		} else
			stimer->exp_time = time_now + stimer->count;
A
Andrey Smetanin 已提交
508

509 510 511 512 513
		trace_kvm_hv_stimer_start_periodic(
					stimer_to_vcpu(stimer)->vcpu_id,
					stimer->index,
					time_now, stimer->exp_time);

A
Andrey Smetanin 已提交
514
		hrtimer_start(&stimer->timer,
515 516
			      ktime_add_ns(ktime_now,
					   100 * (stimer->exp_time - time_now)),
A
Andrey Smetanin 已提交
517 518 519 520 521 522 523 524 525 526 527
			      HRTIMER_MODE_ABS);
		return 0;
	}
	stimer->exp_time = stimer->count;
	if (time_now >= stimer->count) {
		/*
		 * Expire timer according to Hypervisor Top-Level Functional
		 * specification v4(15.3.1):
		 * "If a one shot is enabled and the specified count is in
		 * the past, it will expire immediately."
		 */
528
		stimer_mark_pending(stimer, false);
A
Andrey Smetanin 已提交
529 530 531
		return 0;
	}

532 533 534 535
	trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id,
					   stimer->index,
					   time_now, stimer->count);

A
Andrey Smetanin 已提交
536 537 538 539 540 541 542 543 544
	hrtimer_start(&stimer->timer,
		      ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
		      HRTIMER_MODE_ABS);
	return 0;
}

static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
			     bool host)
{
545 546 547
	trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id,
				       stimer->index, config, host);

548
	stimer_cleanup(stimer);
549
	if ((stimer->config & HV_STIMER_ENABLE) && HV_STIMER_SINT(config) == 0)
A
Andrey Smetanin 已提交
550 551
		config &= ~HV_STIMER_ENABLE;
	stimer->config = config;
552
	stimer_mark_pending(stimer, false);
A
Andrey Smetanin 已提交
553 554 555 556 557 558
	return 0;
}

static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
			    bool host)
{
559 560 561
	trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id,
				      stimer->index, count, host);

A
Andrey Smetanin 已提交
562
	stimer_cleanup(stimer);
563
	stimer->count = count;
A
Andrey Smetanin 已提交
564 565
	if (stimer->count == 0)
		stimer->config &= ~HV_STIMER_ENABLE;
566
	else if (stimer->config & HV_STIMER_AUTOENABLE)
A
Andrey Smetanin 已提交
567
		stimer->config |= HV_STIMER_ENABLE;
568
	stimer_mark_pending(stimer, false);
A
Andrey Smetanin 已提交
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
	return 0;
}

static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
{
	*pconfig = stimer->config;
	return 0;
}

static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
{
	*pcount = stimer->count;
	return 0;
}

static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
			     struct hv_message *src_msg)
{
	struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
	struct page *page;
	gpa_t gpa;
	struct hv_message *dst_msg;
	int r;
	struct hv_message_page *msg_page;

	if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
		return -ENOENT;

	gpa = synic->msg_page & PAGE_MASK;
	page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
	if (is_error_page(page))
		return -EFAULT;

	msg_page = kmap_atomic(page);
	dst_msg = &msg_page->sint_message[sint];
	if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE,
			 src_msg->header.message_type) != HVMSG_NONE) {
		dst_msg->header.message_flags.msg_pending = 1;
		r = -EAGAIN;
	} else {
		memcpy(&dst_msg->u.payload, &src_msg->u.payload,
		       src_msg->header.payload_size);
		dst_msg->header.message_type = src_msg->header.message_type;
		dst_msg->header.payload_size = src_msg->header.payload_size;
		r = synic_set_irq(synic, sint);
		if (r >= 1)
			r = 0;
		else if (r == 0)
			r = -EFAULT;
	}
	kunmap_atomic(msg_page);
	kvm_release_page_dirty(page);
	kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
	return r;
}

625
static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
A
Andrey Smetanin 已提交
626 627 628 629 630 631 632 633
{
	struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
	struct hv_message *msg = &stimer->msg;
	struct hv_timer_message_payload *payload =
			(struct hv_timer_message_payload *)&msg->u.payload;

	payload->expiration_time = stimer->exp_time;
	payload->delivery_time = get_time_ref_counter(vcpu->kvm);
634 635
	return synic_deliver_msg(vcpu_to_synic(vcpu),
				 HV_STIMER_SINT(stimer->config), msg);
A
Andrey Smetanin 已提交
636 637 638 639
}

static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
{
640 641
	int r;

642
	stimer->msg_pending = true;
643 644 645 646
	r = stimer_send_msg(stimer);
	trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id,
				       stimer->index, r);
	if (!r) {
647 648 649 650
		stimer->msg_pending = false;
		if (!(stimer->config & HV_STIMER_PERIODIC))
			stimer->config &= ~HV_STIMER_ENABLE;
	}
A
Andrey Smetanin 已提交
651 652 653 654 655 656
}

void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
	struct kvm_vcpu_hv_stimer *stimer;
657
	u64 time_now, exp_time;
A
Andrey Smetanin 已提交
658 659 660 661 662 663
	int i;

	for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
		if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
			stimer = &hv_vcpu->stimer[i];
			if (stimer->config & HV_STIMER_ENABLE) {
664 665 666 667 668 669 670 671
				exp_time = stimer->exp_time;

				if (exp_time) {
					time_now =
						get_time_ref_counter(vcpu->kvm);
					if (time_now >= exp_time)
						stimer_expiration(stimer);
				}
672

673
				if ((stimer->config & HV_STIMER_ENABLE) &&
674 675 676 677
				    stimer->count) {
					if (!stimer->msg_pending)
						stimer_start(stimer);
				} else
678
					stimer_cleanup(stimer);
A
Andrey Smetanin 已提交
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
			}
		}
}

void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
	int i;

	for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
		stimer_cleanup(&hv_vcpu->stimer[i]);
}

static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
{
	struct hv_message *msg = &stimer->msg;
	struct hv_timer_message_payload *payload =
			(struct hv_timer_message_payload *)&msg->u.payload;

	memset(&msg->header, 0, sizeof(msg->header));
	msg->header.message_type = HVMSG_TIMER_EXPIRED;
	msg->header.payload_size = sizeof(*payload);

	payload->timer_index = stimer->index;
	payload->expiration_time = 0;
	payload->delivery_time = 0;
}

static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
{
	memset(stimer, 0, sizeof(*stimer));
	stimer->index = timer_index;
	hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
	stimer->timer.function = stimer_timer_callback;
	stimer_prepare_msg(stimer);
}

716 717
void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
{
A
Andrey Smetanin 已提交
718 719 720 721 722 723 724 725
	struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
	int i;

	synic_init(&hv_vcpu->synic);

	bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
	for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
		stimer_init(&hv_vcpu->stimer[i], i);
726 727
}

728 729 730 731 732 733 734
void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);

	hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
}

735
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
736
{
737 738
	struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);

739 740 741 742 743
	/*
	 * Hyper-V SynIC auto EOI SINT's are
	 * not compatible with APICV, so deactivate APICV
	 */
	kvm_vcpu_deactivate_apicv(vcpu);
744 745
	synic->active = true;
	synic->dont_zero_synic_pages = dont_zero_synic_pages;
746 747 748
	return 0;
}

749 750 751 752 753 754 755 756 757
static bool kvm_hv_msr_partition_wide(u32 msr)
{
	bool r = false;

	switch (msr) {
	case HV_X64_MSR_GUEST_OS_ID:
	case HV_X64_MSR_HYPERCALL:
	case HV_X64_MSR_REFERENCE_TSC:
	case HV_X64_MSR_TIME_REF_COUNT:
758 759
	case HV_X64_MSR_CRASH_CTL:
	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
760
	case HV_X64_MSR_RESET:
761 762 763
	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
	case HV_X64_MSR_TSC_EMULATION_CONTROL:
	case HV_X64_MSR_TSC_EMULATION_STATUS:
764 765 766 767 768 769 770
		r = true;
		break;
	}

	return r;
}

771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
				     u32 index, u64 *pdata)
{
	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;

	if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
		return -EINVAL;

	*pdata = hv->hv_crash_param[index];
	return 0;
}

static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
{
	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;

	*pdata = hv->hv_crash_ctl;
	return 0;
}

static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
{
	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;

	if (host)
		hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;

	if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {

		vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
			  hv->hv_crash_param[0],
			  hv->hv_crash_param[1],
			  hv->hv_crash_param[2],
			  hv->hv_crash_param[3],
			  hv->hv_crash_param[4]);

		/* Send notification about crash to user space */
		kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
	}

	return 0;
}

static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
				     u32 index, u64 data)
{
	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;

	if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
		return -EINVAL;

	hv->hv_crash_param[index] = data;
	return 0;
}

P
Paolo Bonzini 已提交
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
/*
 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
 * between them is possible:
 *
 * kvmclock formula:
 *    nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
 *           + system_time
 *
 * Hyper-V formula:
 *    nsec/100 = ticks * scale / 2^64 + offset
 *
 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
 * By dividing the kvmclock formula by 100 and equating what's left we get:
 *    ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
 *            scale / 2^64 =         tsc_to_system_mul * 2^(tsc_shift-32) / 100
 *            scale        =         tsc_to_system_mul * 2^(32+tsc_shift) / 100
 *
 * Now expand the kvmclock formula and divide by 100:
 *    nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
 *           - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
 *           + system_time
 *    nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
 *               - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
 *               + system_time / 100
 *
 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
 *    nsec/100 = ticks * scale / 2^64
 *               - tsc_timestamp * scale / 2^64
 *               + system_time / 100
 *
 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
 *    offset = system_time / 100 - tsc_timestamp * scale / 2^64
 *
 * These two equivalencies are implemented in this function.
 */
static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
					HV_REFERENCE_TSC_PAGE *tsc_ref)
{
	u64 max_mul;

	if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
		return false;

	/*
	 * check if scale would overflow, if so we use the time ref counter
	 *    tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
	 *    tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
	 *    tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
	 */
	max_mul = 100ull << (32 - hv_clock->tsc_shift);
	if (hv_clock->tsc_to_system_mul >= max_mul)
		return false;

	/*
	 * Otherwise compute the scale and offset according to the formulas
	 * derived above.
	 */
	tsc_ref->tsc_scale =
		mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
				hv_clock->tsc_to_system_mul,
				100);

	tsc_ref->tsc_offset = hv_clock->system_time;
	do_div(tsc_ref->tsc_offset, 100);
	tsc_ref->tsc_offset -=
		mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
	return true;
}

void kvm_hv_setup_tsc_page(struct kvm *kvm,
			   struct pvclock_vcpu_time_info *hv_clock)
{
	struct kvm_hv *hv = &kvm->arch.hyperv;
	u32 tsc_seq;
	u64 gfn;

	BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
	BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE, tsc_sequence) != 0);

	if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
		return;

908 909 910 911
	mutex_lock(&kvm->arch.hyperv.hv_lock);
	if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
		goto out_unlock;

P
Paolo Bonzini 已提交
912 913 914 915 916 917 918
	gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
	/*
	 * Because the TSC parameters only vary when there is a
	 * change in the master clock, do not bother with caching.
	 */
	if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
				    &tsc_seq, sizeof(tsc_seq))))
919
		goto out_unlock;
P
Paolo Bonzini 已提交
920 921 922 923 924 925 926 927

	/*
	 * While we're computing and writing the parameters, force the
	 * guest to use the time reference count MSR.
	 */
	hv->tsc_ref.tsc_sequence = 0;
	if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
			    &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
928
		goto out_unlock;
P
Paolo Bonzini 已提交
929 930

	if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
931
		goto out_unlock;
P
Paolo Bonzini 已提交
932 933 934 935

	/* Ensure sequence is zero before writing the rest of the struct.  */
	smp_wmb();
	if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
936
		goto out_unlock;
P
Paolo Bonzini 已提交
937 938 939 940 941 942 943 944 945 946 947 948 949 950

	/*
	 * Now switch to the TSC page mechanism by writing the sequence.
	 */
	tsc_seq++;
	if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
		tsc_seq = 1;

	/* Write the struct entirely before the non-zero sequence.  */
	smp_wmb();

	hv->tsc_ref.tsc_sequence = tsc_seq;
	kvm_write_guest(kvm, gfn_to_gpa(gfn),
			&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
951 952
out_unlock:
	mutex_unlock(&kvm->arch.hyperv.hv_lock);
P
Paolo Bonzini 已提交
953 954
}

955 956
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
			     bool host)
957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
{
	struct kvm *kvm = vcpu->kvm;
	struct kvm_hv *hv = &kvm->arch.hyperv;

	switch (msr) {
	case HV_X64_MSR_GUEST_OS_ID:
		hv->hv_guest_os_id = data;
		/* setting guest os id to zero disables hypercall page */
		if (!hv->hv_guest_os_id)
			hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
		break;
	case HV_X64_MSR_HYPERCALL: {
		u64 gfn;
		unsigned long addr;
		u8 instructions[4];

		/* if guest os id is not set hypercall should remain disabled */
		if (!hv->hv_guest_os_id)
			break;
		if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
			hv->hv_hypercall = data;
			break;
		}
		gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
		addr = gfn_to_hva(kvm, gfn);
		if (kvm_is_error_hva(addr))
			return 1;
		kvm_x86_ops->patch_hypercall(vcpu, instructions);
		((unsigned char *)instructions)[3] = 0xc3; /* ret */
		if (__copy_to_user((void __user *)addr, instructions, 4))
			return 1;
		hv->hv_hypercall = data;
		mark_page_dirty(kvm, gfn);
		break;
	}
P
Paolo Bonzini 已提交
992
	case HV_X64_MSR_REFERENCE_TSC:
993
		hv->hv_tsc_page = data;
P
Paolo Bonzini 已提交
994 995
		if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
			kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
996
		break;
997 998 999 1000 1001 1002
	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
		return kvm_hv_msr_set_crash_data(vcpu,
						 msr - HV_X64_MSR_CRASH_P0,
						 data);
	case HV_X64_MSR_CRASH_CTL:
		return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
1003 1004 1005 1006 1007 1008
	case HV_X64_MSR_RESET:
		if (data == 1) {
			vcpu_debug(vcpu, "hyper-v reset requested\n");
			kvm_make_request(KVM_REQ_HV_RESET, vcpu);
		}
		break;
1009 1010 1011 1012 1013 1014 1015 1016 1017
	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
		hv->hv_reenlightenment_control = data;
		break;
	case HV_X64_MSR_TSC_EMULATION_CONTROL:
		hv->hv_tsc_emulation_control = data;
		break;
	case HV_X64_MSR_TSC_EMULATION_STATUS:
		hv->hv_tsc_emulation_status = data;
		break;
1018 1019 1020 1021 1022
	case HV_X64_MSR_TIME_REF_COUNT:
		/* read-only, but still ignore it if host-initiated */
		if (!host)
			return 1;
		break;
1023 1024 1025 1026 1027 1028 1029 1030
	default:
		vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
			    msr, data);
		return 1;
	}
	return 0;
}

1031 1032 1033
/* Calculate cpu time spent by current task in 100ns units */
static u64 current_task_runtime_100ns(void)
{
1034
	u64 utime, stime;
1035 1036

	task_cputime_adjusted(current, &utime, &stime);
1037 1038

	return div_u64(utime + stime, 100);
1039 1040 1041
}

static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1042 1043 1044 1045
{
	struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;

	switch (msr) {
1046 1047 1048 1049 1050
	case HV_X64_MSR_VP_INDEX:
		if (!host)
			return 1;
		hv->vp_index = (u32)data;
		break;
1051
	case HV_X64_MSR_VP_ASSIST_PAGE: {
1052 1053 1054
		u64 gfn;
		unsigned long addr;

1055
		if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1056 1057 1058 1059 1060
			hv->hv_vapic = data;
			if (kvm_lapic_enable_pv_eoi(vcpu, 0))
				return 1;
			break;
		}
1061
		gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
		addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
		if (kvm_is_error_hva(addr))
			return 1;
		if (__clear_user((void __user *)addr, PAGE_SIZE))
			return 1;
		hv->hv_vapic = data;
		kvm_vcpu_mark_page_dirty(vcpu, gfn);
		if (kvm_lapic_enable_pv_eoi(vcpu,
					    gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
			return 1;
		break;
	}
	case HV_X64_MSR_EOI:
		return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
	case HV_X64_MSR_ICR:
		return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
	case HV_X64_MSR_TPR:
		return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1080 1081 1082 1083 1084
	case HV_X64_MSR_VP_RUNTIME:
		if (!host)
			return 1;
		hv->runtime_offset = data - current_task_runtime_100ns();
		break;
1085 1086 1087 1088 1089 1090 1091
	case HV_X64_MSR_SCONTROL:
	case HV_X64_MSR_SVERSION:
	case HV_X64_MSR_SIEFP:
	case HV_X64_MSR_SIMP:
	case HV_X64_MSR_EOM:
	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
		return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
A
Andrey Smetanin 已提交
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
	case HV_X64_MSR_STIMER0_CONFIG:
	case HV_X64_MSR_STIMER1_CONFIG:
	case HV_X64_MSR_STIMER2_CONFIG:
	case HV_X64_MSR_STIMER3_CONFIG: {
		int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;

		return stimer_set_config(vcpu_to_stimer(vcpu, timer_index),
					 data, host);
	}
	case HV_X64_MSR_STIMER0_COUNT:
	case HV_X64_MSR_STIMER1_COUNT:
	case HV_X64_MSR_STIMER2_COUNT:
	case HV_X64_MSR_STIMER3_COUNT: {
		int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;

		return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
					data, host);
	}
1110 1111 1112 1113 1114 1115
	case HV_X64_MSR_TSC_FREQUENCY:
	case HV_X64_MSR_APIC_FREQUENCY:
		/* read-only, but still ignore it if host-initiated */
		if (!host)
			return 1;
		break;
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
	default:
		vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
			    msr, data);
		return 1;
	}

	return 0;
}

static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{
	u64 data = 0;
	struct kvm *kvm = vcpu->kvm;
	struct kvm_hv *hv = &kvm->arch.hyperv;

	switch (msr) {
	case HV_X64_MSR_GUEST_OS_ID:
		data = hv->hv_guest_os_id;
		break;
	case HV_X64_MSR_HYPERCALL:
		data = hv->hv_hypercall;
		break;
1138 1139
	case HV_X64_MSR_TIME_REF_COUNT:
		data = get_time_ref_counter(kvm);
1140 1141 1142 1143
		break;
	case HV_X64_MSR_REFERENCE_TSC:
		data = hv->hv_tsc_page;
		break;
1144 1145 1146 1147 1148 1149
	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
		return kvm_hv_msr_get_crash_data(vcpu,
						 msr - HV_X64_MSR_CRASH_P0,
						 pdata);
	case HV_X64_MSR_CRASH_CTL:
		return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
1150 1151 1152
	case HV_X64_MSR_RESET:
		data = 0;
		break;
1153 1154 1155 1156 1157 1158 1159 1160 1161
	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
		data = hv->hv_reenlightenment_control;
		break;
	case HV_X64_MSR_TSC_EMULATION_CONTROL:
		data = hv->hv_tsc_emulation_control;
		break;
	case HV_X64_MSR_TSC_EMULATION_STATUS:
		data = hv->hv_tsc_emulation_status;
		break;
1162 1163 1164 1165 1166 1167 1168 1169 1170
	default:
		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
		return 1;
	}

	*pdata = data;
	return 0;
}

1171 1172
static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
			  bool host)
1173 1174 1175 1176 1177
{
	u64 data = 0;
	struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;

	switch (msr) {
1178 1179
	case HV_X64_MSR_VP_INDEX:
		data = hv->vp_index;
1180 1181 1182 1183 1184 1185 1186
		break;
	case HV_X64_MSR_EOI:
		return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
	case HV_X64_MSR_ICR:
		return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
	case HV_X64_MSR_TPR:
		return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1187
	case HV_X64_MSR_VP_ASSIST_PAGE:
1188 1189
		data = hv->hv_vapic;
		break;
1190 1191 1192
	case HV_X64_MSR_VP_RUNTIME:
		data = current_task_runtime_100ns() + hv->runtime_offset;
		break;
1193 1194 1195 1196 1197 1198
	case HV_X64_MSR_SCONTROL:
	case HV_X64_MSR_SVERSION:
	case HV_X64_MSR_SIEFP:
	case HV_X64_MSR_SIMP:
	case HV_X64_MSR_EOM:
	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1199
		return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host);
A
Andrey Smetanin 已提交
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
	case HV_X64_MSR_STIMER0_CONFIG:
	case HV_X64_MSR_STIMER1_CONFIG:
	case HV_X64_MSR_STIMER2_CONFIG:
	case HV_X64_MSR_STIMER3_CONFIG: {
		int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;

		return stimer_get_config(vcpu_to_stimer(vcpu, timer_index),
					 pdata);
	}
	case HV_X64_MSR_STIMER0_COUNT:
	case HV_X64_MSR_STIMER1_COUNT:
	case HV_X64_MSR_STIMER2_COUNT:
	case HV_X64_MSR_STIMER3_COUNT: {
		int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;

		return stimer_get_count(vcpu_to_stimer(vcpu, timer_index),
					pdata);
	}
1218 1219 1220 1221 1222 1223
	case HV_X64_MSR_TSC_FREQUENCY:
		data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
		break;
	case HV_X64_MSR_APIC_FREQUENCY:
		data = APIC_BUS_FREQUENCY;
		break;
1224 1225 1226 1227 1228 1229 1230 1231
	default:
		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
		return 1;
	}
	*pdata = data;
	return 0;
}

1232
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1233 1234 1235 1236
{
	if (kvm_hv_msr_partition_wide(msr)) {
		int r;

1237
		mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1238
		r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1239
		mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1240 1241
		return r;
	} else
1242
		return kvm_hv_set_msr(vcpu, msr, data, host);
1243 1244
}

1245
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1246 1247 1248 1249
{
	if (kvm_hv_msr_partition_wide(msr)) {
		int r;

1250
		mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1251
		r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
1252
		mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1253 1254
		return r;
	} else
1255
		return kvm_hv_get_msr(vcpu, msr, pdata, host);
1256 1257
}

1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no)
{
	int i = 0, j;

	if (!(valid_bank_mask & BIT_ULL(bank_no)))
		return -1;

	for (j = 0; j < bank_no; j++)
		if (valid_bank_mask & BIT_ULL(j))
			i++;

	return i;
}

1272
static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
1273
			    u16 rep_cnt, bool ex)
1274 1275 1276
{
	struct kvm *kvm = current_vcpu->kvm;
	struct kvm_vcpu_hv *hv_current = &current_vcpu->arch.hyperv;
1277
	struct hv_tlb_flush_ex flush_ex;
1278 1279 1280
	struct hv_tlb_flush flush;
	struct kvm_vcpu *vcpu;
	unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)] = {0};
1281 1282 1283 1284
	unsigned long valid_bank_mask = 0;
	u64 sparse_banks[64];
	int sparse_banks_len, i;
	bool all_cpus;
1285

1286 1287 1288
	if (!ex) {
		if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush))))
			return HV_STATUS_INVALID_HYPERCALL_INPUT;
1289

1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
		trace_kvm_hv_flush_tlb(flush.processor_mask,
				       flush.address_space, flush.flags);

		sparse_banks[0] = flush.processor_mask;
		all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS;
	} else {
		if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
					    sizeof(flush_ex))))
			return HV_STATUS_INVALID_HYPERCALL_INPUT;

		trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
					  flush_ex.hv_vp_set.format,
					  flush_ex.address_space,
					  flush_ex.flags);

		valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
		all_cpus = flush_ex.hv_vp_set.format !=
			HV_GENERIC_SET_SPARSE_4K;

		sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
			sizeof(sparse_banks[0]);

		if (!sparse_banks_len && !all_cpus)
			goto ret_success;

		if (!all_cpus &&
		    kvm_read_guest(kvm,
				   ingpa + offsetof(struct hv_tlb_flush_ex,
						    hv_vp_set.bank_contents),
				   sparse_banks,
				   sparse_banks_len))
			return HV_STATUS_INVALID_HYPERCALL_INPUT;
	}
1323 1324 1325 1326 1327

	cpumask_clear(&hv_current->tlb_lush);

	kvm_for_each_vcpu(i, vcpu, kvm) {
		struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
		int bank = hv->vp_index / 64, sbank = 0;

		if (!all_cpus) {
			/* Banks >64 can't be represented */
			if (bank >= 64)
				continue;

			/* Non-ex hypercalls can only address first 64 vCPUs */
			if (!ex && bank)
				continue;

			if (ex) {
				/*
				 * Check is the bank of this vCPU is in sparse
				 * set and get the sparse bank number.
				 */
				sbank = get_sparse_bank_no(valid_bank_mask,
							   bank);

				if (sbank < 0)
					continue;
			}
1350

1351 1352 1353
			if (!(sparse_banks[sbank] & BIT_ULL(hv->vp_index % 64)))
				continue;
		}
1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366

		/*
		 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we
		 * can't analyze it here, flush TLB regardless of the specified
		 * address space.
		 */
		__set_bit(i, vcpu_bitmap);
	}

	kvm_make_vcpus_request_mask(kvm,
				    KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
				    vcpu_bitmap, &hv_current->tlb_lush);

1367
ret_success:
1368 1369 1370 1371 1372
	/* We always do full TLB flush, set rep_done = rep_cnt. */
	return (u64)HV_STATUS_SUCCESS |
		((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
}

1373 1374
bool kvm_hv_hypercall_enabled(struct kvm *kvm)
{
1375
	return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
1376 1377
}

1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
{
	bool longmode;

	longmode = is_64_bit_mode(vcpu);
	if (longmode)
		kvm_register_write(vcpu, VCPU_REGS_RAX, result);
	else {
		kvm_register_write(vcpu, VCPU_REGS_RDX, result >> 32);
		kvm_register_write(vcpu, VCPU_REGS_RAX, result & 0xffffffff);
	}
}

1391
static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
1392
{
1393 1394
	kvm_hv_hypercall_set_result(vcpu, result);
	++vcpu->stat.hypercalls;
1395
	return kvm_skip_emulated_instruction(vcpu);
1396 1397
}

1398 1399 1400 1401 1402
static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
{
	return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
}

1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
{
	struct eventfd_ctx *eventfd;

	if (unlikely(!fast)) {
		int ret;
		gpa_t gpa = param;

		if ((gpa & (__alignof__(param) - 1)) ||
		    offset_in_page(gpa) + sizeof(param) > PAGE_SIZE)
			return HV_STATUS_INVALID_ALIGNMENT;

		ret = kvm_vcpu_read_guest(vcpu, gpa, &param, sizeof(param));
		if (ret < 0)
			return HV_STATUS_INVALID_ALIGNMENT;
	}

	/*
	 * Per spec, bits 32-47 contain the extra "flag number".  However, we
	 * have no use for it, and in all known usecases it is zero, so just
	 * report lookup failure if it isn't.
	 */
	if (param & 0xffff00000000ULL)
		return HV_STATUS_INVALID_PORT_ID;
	/* remaining bits are reserved-zero */
	if (param & ~KVM_HYPERV_CONN_ID_MASK)
		return HV_STATUS_INVALID_HYPERCALL_INPUT;

1431 1432
	/* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
	rcu_read_lock();
1433
	eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param);
1434
	rcu_read_unlock();
1435 1436 1437 1438 1439 1440 1441
	if (!eventfd)
		return HV_STATUS_INVALID_PORT_ID;

	eventfd_signal(eventfd, 1);
	return HV_STATUS_SUCCESS;
}

1442 1443
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
{
1444 1445
	u64 param, ingpa, outgpa, ret = HV_STATUS_SUCCESS;
	uint16_t code, rep_idx, rep_cnt;
1446
	bool fast, longmode, rep;
1447 1448 1449 1450 1451 1452 1453

	/*
	 * hypercall generates UD from non zero cpl and real mode
	 * per HYPER-V spec
	 */
	if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
		kvm_queue_exception(vcpu, UD_VECTOR);
1454
		return 1;
1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475
	}

	longmode = is_64_bit_mode(vcpu);

	if (!longmode) {
		param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
			(kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
		ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
			(kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
		outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
			(kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
	}
#ifdef CONFIG_X86_64
	else {
		param = kvm_register_read(vcpu, VCPU_REGS_RCX);
		ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
		outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
	}
#endif

	code = param & 0xffff;
1476 1477 1478
	fast = !!(param & HV_HYPERCALL_FAST_BIT);
	rep_cnt = (param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
	rep_idx = (param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
1479
	rep = !!(rep_cnt || rep_idx);
1480 1481 1482 1483

	trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);

	switch (code) {
1484
	case HVCALL_NOTIFY_LONG_SPIN_WAIT:
1485 1486 1487 1488
		if (unlikely(rep)) {
			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
			break;
		}
1489
		kvm_vcpu_on_spin(vcpu, true);
1490
		break;
1491
	case HVCALL_SIGNAL_EVENT:
1492 1493 1494 1495
		if (unlikely(rep)) {
			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
			break;
		}
1496 1497
		ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
		if (ret != HV_STATUS_INVALID_PORT_ID)
1498 1499 1500
			break;
		/* maybe userspace knows this conn_id: fall through */
	case HVCALL_POST_MESSAGE:
1501
		/* don't bother userspace if it has no way to handle it */
1502 1503
		if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1504 1505
			break;
		}
1506 1507 1508 1509 1510 1511 1512 1513
		vcpu->run->exit_reason = KVM_EXIT_HYPERV;
		vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
		vcpu->run->hyperv.u.hcall.input = param;
		vcpu->run->hyperv.u.hcall.params[0] = ingpa;
		vcpu->run->hyperv.u.hcall.params[1] = outgpa;
		vcpu->arch.complete_userspace_io =
				kvm_hv_hypercall_complete_userspace;
		return 0;
1514 1515 1516 1517 1518
	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
		if (unlikely(fast || !rep_cnt || rep_idx)) {
			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
			break;
		}
1519
		ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1520 1521 1522 1523 1524 1525
		break;
	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
		if (unlikely(fast || rep)) {
			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
			break;
		}
1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
		ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
		break;
	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
		if (unlikely(fast || !rep_cnt || rep_idx)) {
			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
			break;
		}
		ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
		break;
	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
		if (unlikely(fast || rep)) {
			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
			break;
		}
		ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1541
		break;
1542
	default:
1543
		ret = HV_STATUS_INVALID_HYPERCALL_CODE;
1544 1545 1546
		break;
	}

1547
	return kvm_hv_hypercall_complete(vcpu, ret);
1548
}
1549 1550 1551 1552

void kvm_hv_init_vm(struct kvm *kvm)
{
	mutex_init(&kvm->arch.hyperv.hv_lock);
1553
	idr_init(&kvm->arch.hyperv.conn_to_evt);
1554 1555 1556 1557
}

void kvm_hv_destroy_vm(struct kvm *kvm)
{
1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
	struct eventfd_ctx *eventfd;
	int i;

	idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i)
		eventfd_ctx_put(eventfd);
	idr_destroy(&kvm->arch.hyperv.conn_to_evt);
}

static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
{
	struct kvm_hv *hv = &kvm->arch.hyperv;
	struct eventfd_ctx *eventfd;
	int ret;

	eventfd = eventfd_ctx_fdget(fd);
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);

	mutex_lock(&hv->hv_lock);
	ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
			GFP_KERNEL);
	mutex_unlock(&hv->hv_lock);

	if (ret >= 0)
		return 0;

	if (ret == -ENOSPC)
		ret = -EEXIST;
	eventfd_ctx_put(eventfd);
	return ret;
}

static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
{
	struct kvm_hv *hv = &kvm->arch.hyperv;
	struct eventfd_ctx *eventfd;

	mutex_lock(&hv->hv_lock);
	eventfd = idr_remove(&hv->conn_to_evt, conn_id);
	mutex_unlock(&hv->hv_lock);

	if (!eventfd)
		return -ENOENT;

	synchronize_srcu(&kvm->srcu);
	eventfd_ctx_put(eventfd);
	return 0;
}

int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
{
	if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
	    (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
		return -EINVAL;

	if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
		return kvm_hv_eventfd_deassign(kvm, args->conn_id);
	return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
1616
}