lapic.c 56.8 KB
Newer Older
E
Eddie Dong 已提交
1 2 3 4 5 6 7

/*
 * Local APIC virtualization
 *
 * Copyright (C) 2006 Qumranet, Inc.
 * Copyright (C) 2007 Novell
 * Copyright (C) 2007 Intel
N
Nicolas Kaiser 已提交
8
 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
E
Eddie Dong 已提交
9 10 11 12 13 14 15 16 17 18 19 20
 *
 * Authors:
 *   Dor Laor <dor.laor@qumranet.com>
 *   Gregory Haskins <ghaskins@novell.com>
 *   Yaozu (Eddie) Dong <eddie.dong@intel.com>
 *
 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 */

21
#include <linux/kvm_host.h>
E
Eddie Dong 已提交
22 23 24 25 26 27 28
#include <linux/kvm.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/smp.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/module.h>
R
Roman Zippel 已提交
29
#include <linux/math64.h>
30
#include <linux/slab.h>
E
Eddie Dong 已提交
31 32 33 34 35
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/page.h>
#include <asm/current.h>
#include <asm/apicdef.h>
36
#include <asm/delay.h>
A
Arun Sharma 已提交
37
#include <linux/atomic.h>
38
#include <linux/jump_label.h>
39
#include "kvm_cache_regs.h"
E
Eddie Dong 已提交
40
#include "irq.h"
41
#include "trace.h"
42
#include "x86.h"
A
Avi Kivity 已提交
43
#include "cpuid.h"
44
#include "hyperv.h"
E
Eddie Dong 已提交
45

46 47 48 49 50 51
#ifndef CONFIG_X86_64
#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
#else
#define mod_64(x, y) ((x) % (y))
#endif

E
Eddie Dong 已提交
52 53 54 55 56 57 58 59 60 61 62
#define PRId64 "d"
#define PRIx64 "llx"
#define PRIu64 "u"
#define PRIo64 "o"

#define APIC_BUS_CYCLE_NS 1

/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
#define apic_debug(fmt, arg...)

/* 14 is the version for Xeon and Pentium 8.4.8*/
63
#define APIC_VERSION			(0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
E
Eddie Dong 已提交
64 65 66 67 68 69
#define LAPIC_MMIO_LENGTH		(1 << 12)
/* followed define is not in apicdef.h */
#define APIC_SHORT_MASK			0xc0000
#define APIC_DEST_NOSHORT		0x0
#define APIC_DEST_MASK			0x800
#define MAX_APIC_VECTOR			256
70
#define APIC_VECTORS_PER_REG		32
E
Eddie Dong 已提交
71

72 73 74
#define APIC_BROADCAST			0xFF
#define X2APIC_BROADCAST		0xFFFFFFFFul

M
Michael S. Tsirkin 已提交
75 76 77 78 79
static inline int apic_test_vector(int vec, void *bitmap)
{
	return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}

80 81 82 83 84 85 86 87
bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
{
	struct kvm_lapic *apic = vcpu->arch.apic;

	return apic_test_vector(vector, apic->regs + APIC_ISR) ||
		apic_test_vector(vector, apic->regs + APIC_IRR);
}

E
Eddie Dong 已提交
88 89 90 91 92
static inline void apic_clear_vector(int vec, void *bitmap)
{
	clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}

M
Michael S. Tsirkin 已提交
93 94 95 96 97 98 99 100 101 102
static inline int __apic_test_and_set_vector(int vec, void *bitmap)
{
	return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}

static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
{
	return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}

103
struct static_key_deferred apic_hw_disabled __read_mostly;
104 105
struct static_key_deferred apic_sw_disabled __read_mostly;

E
Eddie Dong 已提交
106 107
static inline int apic_enabled(struct kvm_lapic *apic)
{
108
	return kvm_apic_sw_enabled(apic) &&	kvm_apic_hw_enabled(apic);
109 110
}

E
Eddie Dong 已提交
111 112 113 114 115 116 117
#define LVT_MASK	\
	(APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)

#define LINT_MASK	\
	(LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
	 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)

118 119 120 121 122 123 124 125
/* The logical map is definitely wrong if we have multiple
 * modes at the same time.  (Physical map is always right.)
 */
static inline bool kvm_apic_logical_map_valid(struct kvm_apic_map *map)
{
	return !(map->mode & (map->mode - 1));
}

126 127 128 129 130 131 132 133 134 135 136 137 138 139
static inline void
apic_logical_id(struct kvm_apic_map *map, u32 dest_id, u16 *cid, u16 *lid)
{
	unsigned lid_bits;

	BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_CLUSTER !=  4);
	BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_FLAT    !=  8);
	BUILD_BUG_ON(KVM_APIC_MODE_X2APIC        != 16);
	lid_bits = map->mode;

	*cid = dest_id >> lid_bits;
	*lid = dest_id & ((1 << lid_bits) - 1);
}

140 141 142 143 144 145 146 147 148 149 150 151 152
static void recalculate_apic_map(struct kvm *kvm)
{
	struct kvm_apic_map *new, *old = NULL;
	struct kvm_vcpu *vcpu;
	int i;

	new = kzalloc(sizeof(struct kvm_apic_map), GFP_KERNEL);

	mutex_lock(&kvm->arch.apic_map_lock);

	if (!new)
		goto out;

153 154 155
	kvm_for_each_vcpu(i, vcpu, kvm) {
		struct kvm_lapic *apic = vcpu->arch.apic;
		u16 cid, lid;
156
		u32 ldr, aid;
157

158 159 160
		if (!kvm_apic_present(vcpu))
			continue;

161
		aid = kvm_apic_id(apic);
162
		ldr = kvm_lapic_get_reg(apic, APIC_LDR);
163

164 165
		if (aid < ARRAY_SIZE(new->phys_map))
			new->phys_map[aid] = apic;
166

167 168 169 170
		if (apic_x2apic_mode(apic)) {
			new->mode |= KVM_APIC_MODE_X2APIC;
		} else if (ldr) {
			ldr = GET_APIC_LOGICAL_ID(ldr);
171
			if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
172 173 174 175 176 177
				new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
			else
				new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
		}

		if (!kvm_apic_logical_map_valid(new))
178 179
			continue;

180 181
		apic_logical_id(new, ldr, &cid, &lid);

182
		if (lid && cid < ARRAY_SIZE(new->logical_map))
183 184 185 186 187 188 189 190 191 192
			new->logical_map[cid][ffs(lid) - 1] = apic;
	}
out:
	old = rcu_dereference_protected(kvm->arch.apic_map,
			lockdep_is_held(&kvm->arch.apic_map_lock));
	rcu_assign_pointer(kvm->arch.apic_map, new);
	mutex_unlock(&kvm->arch.apic_map_lock);

	if (old)
		kfree_rcu(old, rcu);
193

194
	kvm_make_scan_ioapic_request(kvm);
195 196
}

197 198
static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
{
199
	bool enabled = val & APIC_SPIV_APIC_ENABLED;
200

201
	kvm_lapic_set_reg(apic, APIC_SPIV, val);
202 203 204 205

	if (enabled != apic->sw_enabled) {
		apic->sw_enabled = enabled;
		if (enabled) {
206 207 208 209 210 211 212
			static_key_slow_dec_deferred(&apic_sw_disabled);
			recalculate_apic_map(apic->vcpu->kvm);
		} else
			static_key_slow_inc(&apic_sw_disabled.key);
	}
}

213 214
static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id)
{
215
	kvm_lapic_set_reg(apic, APIC_ID, id << 24);
216 217 218 219 220
	recalculate_apic_map(apic->vcpu->kvm);
}

static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
{
221
	kvm_lapic_set_reg(apic, APIC_LDR, id);
222 223 224
	recalculate_apic_map(apic->vcpu->kvm);
}

225 226 227 228
static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u8 id)
{
	u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));

229 230
	kvm_lapic_set_reg(apic, APIC_ID, id << 24);
	kvm_lapic_set_reg(apic, APIC_LDR, ldr);
231 232 233
	recalculate_apic_map(apic->vcpu->kvm);
}

E
Eddie Dong 已提交
234 235
static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
{
236
	return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
E
Eddie Dong 已提交
237 238 239 240
}

static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
{
241
	return kvm_lapic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
E
Eddie Dong 已提交
242 243
}

244 245
static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
{
246
	return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
247 248
}

E
Eddie Dong 已提交
249 250
static inline int apic_lvtt_period(struct kvm_lapic *apic)
{
251
	return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
252 253 254 255
}

static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
{
256
	return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
E
Eddie Dong 已提交
257 258
}

259 260 261 262 263
static inline int apic_lvt_nmi_mode(u32 lvt_val)
{
	return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
}

264 265 266 267 268 269
void kvm_apic_set_version(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic = vcpu->arch.apic;
	struct kvm_cpuid_entry2 *feat;
	u32 v = APIC_VERSION;

270
	if (!lapic_in_kernel(vcpu))
271 272 273 274 275
		return;

	feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
	if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
		v |= APIC_LVR_DIRECTED_EOI;
276
	kvm_lapic_set_reg(apic, APIC_LVR, v);
277 278
}

279
static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = {
280
	LVT_MASK ,      /* part LVTT mask, timer mode mask added at runtime */
E
Eddie Dong 已提交
281 282 283 284 285 286 287 288
	LVT_MASK | APIC_MODE_MASK,	/* LVTTHMR */
	LVT_MASK | APIC_MODE_MASK,	/* LVTPC */
	LINT_MASK, LINT_MASK,	/* LVT0-1 */
	LVT_MASK		/* LVTERR */
};

static int find_highest_vector(void *bitmap)
{
289 290
	int vec;
	u32 *reg;
E
Eddie Dong 已提交
291

292 293 294 295 296 297
	for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
	     vec >= 0; vec -= APIC_VECTORS_PER_REG) {
		reg = bitmap + REG_POS(vec);
		if (*reg)
			return fls(*reg) - 1 + vec;
	}
E
Eddie Dong 已提交
298

299
	return -1;
E
Eddie Dong 已提交
300 301
}

M
Michael S. Tsirkin 已提交
302 303
static u8 count_vectors(void *bitmap)
{
304 305
	int vec;
	u32 *reg;
M
Michael S. Tsirkin 已提交
306
	u8 count = 0;
307 308 309 310 311 312

	for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
		reg = bitmap + REG_POS(vec);
		count += hweight32(*reg);
	}

M
Michael S. Tsirkin 已提交
313 314 315
	return count;
}

316
void __kvm_apic_update_irr(u32 *pir, void *regs)
317 318 319 320 321 322
{
	u32 i, pir_val;

	for (i = 0; i <= 7; i++) {
		pir_val = xchg(&pir[i], 0);
		if (pir_val)
323
			*((u32 *)(regs + APIC_IRR + i * 0x10)) |= pir_val;
324 325
	}
}
326 327 328 329 330 331 332
EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);

void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
{
	struct kvm_lapic *apic = vcpu->arch.apic;

	__kvm_apic_update_irr(pir, apic->regs);
333 334

	kvm_make_request(KVM_REQ_EVENT, vcpu);
335
}
336 337
EXPORT_SYMBOL_GPL(kvm_apic_update_irr);

338
static inline int apic_search_irr(struct kvm_lapic *apic)
E
Eddie Dong 已提交
339
{
340
	return find_highest_vector(apic->regs + APIC_IRR);
E
Eddie Dong 已提交
341 342 343 344 345 346
}

static inline int apic_find_highest_irr(struct kvm_lapic *apic)
{
	int result;

347 348 349 350
	/*
	 * Note that irr_pending is just a hint. It will be always
	 * true with virtual interrupt delivery enabled.
	 */
351 352 353
	if (!apic->irr_pending)
		return -1;

354 355
	if (apic->vcpu->arch.apicv_active)
		kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
356
	result = apic_search_irr(apic);
E
Eddie Dong 已提交
357 358 359 360 361
	ASSERT(result == -1 || result >= 16);

	return result;
}

362 363
static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
{
364 365 366 367
	struct kvm_vcpu *vcpu;

	vcpu = apic->vcpu;

368
	if (unlikely(vcpu->arch.apicv_active)) {
369
		/* try to update RVI */
370
		apic_clear_vector(vec, apic->regs + APIC_IRR);
371
		kvm_make_request(KVM_REQ_EVENT, vcpu);
372 373 374 375 376
	} else {
		apic->irr_pending = false;
		apic_clear_vector(vec, apic->regs + APIC_IRR);
		if (apic_search_irr(apic) != -1)
			apic->irr_pending = true;
377
	}
378 379
}

M
Michael S. Tsirkin 已提交
380 381
static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
{
382 383 384 385 386 387
	struct kvm_vcpu *vcpu;

	if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
		return;

	vcpu = apic->vcpu;
388

M
Michael S. Tsirkin 已提交
389
	/*
390 391 392
	 * With APIC virtualization enabled, all caching is disabled
	 * because the processor can modify ISR under the hood.  Instead
	 * just set SVI.
M
Michael S. Tsirkin 已提交
393
	 */
394
	if (unlikely(vcpu->arch.apicv_active))
395
		kvm_x86_ops->hwapic_isr_update(vcpu, vec);
396 397 398 399 400 401 402 403 404 405
	else {
		++apic->isr_count;
		BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
		/*
		 * ISR (in service register) bit is set when injecting an interrupt.
		 * The highest vector is injected. Thus the latest bit set matches
		 * the highest bit in ISR.
		 */
		apic->highest_isr_cache = vec;
	}
M
Michael S. Tsirkin 已提交
406 407
}

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
static inline int apic_find_highest_isr(struct kvm_lapic *apic)
{
	int result;

	/*
	 * Note that isr_count is always 1, and highest_isr_cache
	 * is always -1, with APIC virtualization enabled.
	 */
	if (!apic->isr_count)
		return -1;
	if (likely(apic->highest_isr_cache != -1))
		return apic->highest_isr_cache;

	result = find_highest_vector(apic->regs + APIC_ISR);
	ASSERT(result == -1 || result >= 16);

	return result;
}

M
Michael S. Tsirkin 已提交
427 428
static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
{
429 430 431 432 433 434 435 436 437 438 439 440 441
	struct kvm_vcpu *vcpu;
	if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
		return;

	vcpu = apic->vcpu;

	/*
	 * We do get here for APIC virtualization enabled if the guest
	 * uses the Hyper-V APIC enlightenment.  In this case we may need
	 * to trigger a new interrupt delivery by writing the SVI field;
	 * on the other hand isr_count and highest_isr_cache are unused
	 * and must be left alone.
	 */
442
	if (unlikely(vcpu->arch.apicv_active))
443
		kvm_x86_ops->hwapic_isr_update(vcpu,
444 445
					       apic_find_highest_isr(apic));
	else {
M
Michael S. Tsirkin 已提交
446
		--apic->isr_count;
447 448 449
		BUG_ON(apic->isr_count < 0);
		apic->highest_isr_cache = -1;
	}
M
Michael S. Tsirkin 已提交
450 451
}

452 453
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
{
454 455 456 457 458
	/* This may race with setting of irr in __apic_accept_irq() and
	 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
	 * will cause vmexit immediately and the value will be recalculated
	 * on the next vmentry.
	 */
459
	return apic_find_highest_irr(vcpu->arch.apic);
460 461
}

462
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
463
			     int vector, int level, int trig_mode,
464
			     struct dest_map *dest_map);
465

466
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
467
		     struct dest_map *dest_map)
E
Eddie Dong 已提交
468
{
469
	struct kvm_lapic *apic = vcpu->arch.apic;
470

471
	return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
472
			irq->level, irq->trig_mode, dest_map);
E
Eddie Dong 已提交
473 474
}

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
{

	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
				      sizeof(val));
}

static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
{

	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
				      sizeof(*val));
}

static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
}

static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
{
	u8 val;
	if (pv_eoi_get_user(vcpu, &val) < 0)
		apic_debug("Can't read EOI MSR value: 0x%llx\n",
499
			   (unsigned long long)vcpu->arch.pv_eoi.msr_val);
500 501 502 503 504 505 506
	return val & 0x1;
}

static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
{
	if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
		apic_debug("Can't set EOI MSR value: 0x%llx\n",
507
			   (unsigned long long)vcpu->arch.pv_eoi.msr_val);
508 509 510 511 512 513 514 515 516
		return;
	}
	__set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
}

static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
{
	if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
		apic_debug("Can't clear EOI MSR value: 0x%llx\n",
517
			   (unsigned long long)vcpu->arch.pv_eoi.msr_val);
518 519 520 521 522
		return;
	}
	__clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
}

E
Eddie Dong 已提交
523 524
static void apic_update_ppr(struct kvm_lapic *apic)
{
525
	u32 tpr, isrv, ppr, old_ppr;
E
Eddie Dong 已提交
526 527
	int isr;

528 529
	old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
	tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
E
Eddie Dong 已提交
530 531 532 533 534 535 536 537 538 539 540
	isr = apic_find_highest_isr(apic);
	isrv = (isr != -1) ? isr : 0;

	if ((tpr & 0xf0) >= (isrv & 0xf0))
		ppr = tpr & 0xff;
	else
		ppr = isrv & 0xf0;

	apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
		   apic, ppr, isr, isrv);

541
	if (old_ppr != ppr) {
542
		kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
543 544
		if (ppr < old_ppr)
			kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
545
	}
E
Eddie Dong 已提交
546 547 548 549
}

static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
{
550
	kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
E
Eddie Dong 已提交
551 552 553
	apic_update_ppr(apic);
}

554
static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
555
{
556 557 558 559
	if (apic_x2apic_mode(apic))
		return mda == X2APIC_BROADCAST;

	return GET_APIC_DEST_FIELD(mda) == APIC_BROADCAST;
560 561
}

562
static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
E
Eddie Dong 已提交
563
{
564 565 566 567 568 569 570
	if (kvm_apic_broadcast(apic, mda))
		return true;

	if (apic_x2apic_mode(apic))
		return mda == kvm_apic_id(apic);

	return mda == SET_APIC_DEST_FIELD(kvm_apic_id(apic));
E
Eddie Dong 已提交
571 572
}

573
static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
E
Eddie Dong 已提交
574
{
G
Gleb Natapov 已提交
575 576
	u32 logical_id;

577
	if (kvm_apic_broadcast(apic, mda))
578
		return true;
579

580
	logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
E
Eddie Dong 已提交
581

582
	if (apic_x2apic_mode(apic))
583 584
		return ((logical_id >> 16) == (mda >> 16))
		       && (logical_id & mda & 0xffff) != 0;
E
Eddie Dong 已提交
585

586
	logical_id = GET_APIC_LOGICAL_ID(logical_id);
587
	mda = GET_APIC_DEST_FIELD(mda);
E
Eddie Dong 已提交
588

589
	switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
E
Eddie Dong 已提交
590
	case APIC_DFR_FLAT:
591
		return (logical_id & mda) != 0;
E
Eddie Dong 已提交
592
	case APIC_DFR_CLUSTER:
593 594
		return ((logical_id >> 4) == (mda >> 4))
		       && (logical_id & mda & 0xf) != 0;
E
Eddie Dong 已提交
595
	default:
596
		apic_debug("Bad DFR vcpu %d: %08x\n",
597
			   apic->vcpu->vcpu_id, kvm_lapic_get_reg(apic, APIC_DFR));
598
		return false;
E
Eddie Dong 已提交
599 600 601
	}
}

602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
/* KVM APIC implementation has two quirks
 *  - dest always begins at 0 while xAPIC MDA has offset 24,
 *  - IOxAPIC messages have to be delivered (directly) to x2APIC.
 */
static u32 kvm_apic_mda(unsigned int dest_id, struct kvm_lapic *source,
                                              struct kvm_lapic *target)
{
	bool ipi = source != NULL;
	bool x2apic_mda = apic_x2apic_mode(ipi ? source : target);

	if (!ipi && dest_id == APIC_BROADCAST && x2apic_mda)
		return X2APIC_BROADCAST;

	return x2apic_mda ? dest_id : SET_APIC_DEST_FIELD(dest_id);
}

618
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
619
			   int short_hand, unsigned int dest, int dest_mode)
E
Eddie Dong 已提交
620
{
621
	struct kvm_lapic *target = vcpu->arch.apic;
622
	u32 mda = kvm_apic_mda(dest, source, target);
E
Eddie Dong 已提交
623 624

	apic_debug("target %p, source %p, dest 0x%x, "
625
		   "dest_mode 0x%x, short_hand 0x%x\n",
E
Eddie Dong 已提交
626 627
		   target, source, dest, dest_mode, short_hand);

Z
Zachary Amsden 已提交
628
	ASSERT(target);
E
Eddie Dong 已提交
629 630
	switch (short_hand) {
	case APIC_DEST_NOSHORT:
631
		if (dest_mode == APIC_DEST_PHYSICAL)
632
			return kvm_apic_match_physical_addr(target, mda);
633
		else
634
			return kvm_apic_match_logical_addr(target, mda);
E
Eddie Dong 已提交
635
	case APIC_DEST_SELF:
636
		return target == source;
E
Eddie Dong 已提交
637
	case APIC_DEST_ALLINC:
638
		return true;
E
Eddie Dong 已提交
639
	case APIC_DEST_ALLBUT:
640
		return target != source;
E
Eddie Dong 已提交
641
	default:
642 643
		apic_debug("kvm: apic: Bad dest shorthand value %x\n",
			   short_hand);
644
		return false;
E
Eddie Dong 已提交
645 646
	}
}
647
EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
E
Eddie Dong 已提交
648

649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
		       const unsigned long *bitmap, u32 bitmap_size)
{
	u32 mod;
	int i, idx = -1;

	mod = vector % dest_vcpus;

	for (i = 0; i <= mod; i++) {
		idx = find_next_bit(bitmap, bitmap_size, idx + 1);
		BUG_ON(idx == bitmap_size);
	}

	return idx;
}

665 666 667 668 669 670 671 672 673
static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
{
	if (!kvm->arch.disabled_lapic_found) {
		kvm->arch.disabled_lapic_found = true;
		printk(KERN_INFO
		       "Disabled LAPIC found during irq injection\n");
	}
}

674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
/* Return true if the interrupt can be handled by using *bitmap as index mask
 * for valid destinations in *dst array.
 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
 * Note: we may have zero kvm_lapic destinations when we return true, which
 * means that the interrupt should be dropped.  In this case, *bitmap would be
 * zero and *dst undefined.
 */
static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
		struct kvm_lapic **src, struct kvm_lapic_irq *irq,
		struct kvm_apic_map *map, struct kvm_lapic ***dst,
		unsigned long *bitmap)
{
	int i, lowest;
	bool x2apic_ipi;
	u16 cid;

	if (irq->shorthand == APIC_DEST_SELF && src) {
		*dst = src;
		*bitmap = 1;
		return true;
	} else if (irq->shorthand)
		return false;
696

697 698 699
	x2apic_ipi = src && *src && apic_x2apic_mode(*src);
	if (irq->dest_id == (x2apic_ipi ? X2APIC_BROADCAST : APIC_BROADCAST))
		return false;
700

701 702 703 704 705 706 707 708 709 710
	if (!map)
		return false;

	if (irq->dest_mode == APIC_DEST_PHYSICAL) {
		if (irq->dest_id >= ARRAY_SIZE(map->phys_map)) {
			*bitmap = 0;
		} else {
			*dst = &map->phys_map[irq->dest_id];
			*bitmap = 1;
		}
711 712 713
		return true;
	}

714
	if (!kvm_apic_logical_map_valid(map))
715 716
		return false;

717
	apic_logical_id(map, irq->dest_id, &cid, (u16 *)bitmap);
718

719 720 721
	if (cid >= ARRAY_SIZE(map->logical_map)) {
		*bitmap = 0;
		return true;
722
	}
723

724
	*dst = map->logical_map[cid];
725

726 727
	if (!kvm_lowest_prio_delivery(irq))
		return true;
728

729 730 731 732 733 734 735 736 737 738
	if (!kvm_vector_hashing_enabled()) {
		lowest = -1;
		for_each_set_bit(i, bitmap, 16) {
			if (!(*dst)[i])
				continue;
			if (lowest < 0)
				lowest = i;
			else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
						(*dst)[lowest]->vcpu) < 0)
				lowest = i;
739
		}
740 741 742
	} else {
		if (!*bitmap)
			return true;
743

744 745
		lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
				bitmap, 16);
746

747 748 749 750 751 752
		if (!(*dst)[lowest]) {
			kvm_apic_disabled_lapic_found(kvm);
			*bitmap = 0;
			return true;
		}
	}
753

754
	*bitmap = (lowest >= 0) ? 1 << lowest : 0;
755

756 757
	return true;
}
758

759 760 761 762 763 764 765 766
bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
		struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
{
	struct kvm_apic_map *map;
	unsigned long bitmap;
	struct kvm_lapic **dst = NULL;
	int i;
	bool ret;
767

768
	*r = -1;
769

770 771 772 773
	if (irq->shorthand == APIC_DEST_SELF) {
		*r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
		return true;
	}
774

775 776
	rcu_read_lock();
	map = rcu_dereference(kvm->arch.apic_map);
777

778 779 780 781 782 783 784 785
	ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
	if (ret)
		for_each_set_bit(i, &bitmap, 16) {
			if (!dst[i])
				continue;
			if (*r < 0)
				*r = 0;
			*r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
786 787 788 789 790 791
		}

	rcu_read_unlock();
	return ret;
}

792 793 794 795 796 797 798 799 800 801 802 803 804 805
/*
 * This routine tries to handler interrupts in posted mode, here is how
 * it deals with different cases:
 * - For single-destination interrupts, handle it in posted mode
 * - Else if vector hashing is enabled and it is a lowest-priority
 *   interrupt, handle it in posted mode and use the following mechanism
 *   to find the destinaiton vCPU.
 *	1. For lowest-priority interrupts, store all the possible
 *	   destination vCPUs in an array.
 *	2. Use "guest vector % max number of destination vCPUs" to find
 *	   the right destination vCPU in the array for the lowest-priority
 *	   interrupt.
 * - Otherwise, use remapped mode to inject the interrupt.
 */
806 807 808 809
bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
			struct kvm_vcpu **dest_vcpu)
{
	struct kvm_apic_map *map;
810 811
	unsigned long bitmap;
	struct kvm_lapic **dst = NULL;
812 813 814 815 816 817 818 819
	bool ret = false;

	if (irq->shorthand)
		return false;

	rcu_read_lock();
	map = rcu_dereference(kvm->arch.apic_map);

820 821 822
	if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
			hweight16(bitmap) == 1) {
		unsigned long i = find_first_bit(&bitmap, 16);
823

824 825 826
		if (dst[i]) {
			*dest_vcpu = dst[i]->vcpu;
			ret = true;
827
		}
828 829 830 831 832 833
	}

	rcu_read_unlock();
	return ret;
}

E
Eddie Dong 已提交
834 835 836 837 838
/*
 * Add a pending IRQ into lapic.
 * Return 1 if successfully added and 0 if discarded.
 */
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
839
			     int vector, int level, int trig_mode,
840
			     struct dest_map *dest_map)
E
Eddie Dong 已提交
841
{
842
	int result = 0;
843
	struct kvm_vcpu *vcpu = apic->vcpu;
E
Eddie Dong 已提交
844

845 846
	trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
				  trig_mode, vector);
E
Eddie Dong 已提交
847 848
	switch (delivery_mode) {
	case APIC_DM_LOWEST:
849 850
		vcpu->arch.apic_arb_prio++;
	case APIC_DM_FIXED:
851 852 853
		if (unlikely(trig_mode && !level))
			break;

E
Eddie Dong 已提交
854 855 856 857
		/* FIXME add logic for vcpu on reset */
		if (unlikely(!apic_enabled(apic)))
			break;

858 859
		result = 1;

860
		if (dest_map) {
861
			__set_bit(vcpu->vcpu_id, dest_map->map);
862 863
			dest_map->vectors[vcpu->vcpu_id] = vector;
		}
864

865 866
		if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
			if (trig_mode)
867
				kvm_lapic_set_vector(vector, apic->regs + APIC_TMR);
868 869 870 871
			else
				apic_clear_vector(vector, apic->regs + APIC_TMR);
		}

872
		if (vcpu->arch.apicv_active)
873
			kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
874
		else {
875
			kvm_lapic_set_irr(vector, apic);
876 877 878 879

			kvm_make_request(KVM_REQ_EVENT, vcpu);
			kvm_vcpu_kick(vcpu);
		}
E
Eddie Dong 已提交
880 881 882
		break;

	case APIC_DM_REMRD:
883 884 885 886
		result = 1;
		vcpu->arch.pv.pv_unhalted = 1;
		kvm_make_request(KVM_REQ_EVENT, vcpu);
		kvm_vcpu_kick(vcpu);
E
Eddie Dong 已提交
887 888 889
		break;

	case APIC_DM_SMI:
P
Paolo Bonzini 已提交
890 891 892
		result = 1;
		kvm_make_request(KVM_REQ_SMI, vcpu);
		kvm_vcpu_kick(vcpu);
E
Eddie Dong 已提交
893
		break;
894

E
Eddie Dong 已提交
895
	case APIC_DM_NMI:
896
		result = 1;
897
		kvm_inject_nmi(vcpu);
J
Jan Kiszka 已提交
898
		kvm_vcpu_kick(vcpu);
E
Eddie Dong 已提交
899 900 901
		break;

	case APIC_DM_INIT:
902
		if (!trig_mode || level) {
903
			result = 1;
904 905 906 907 908
			/* assumes that there are only KVM_APIC_INIT/SIPI */
			apic->pending_events = (1UL << KVM_APIC_INIT);
			/* make sure pending_events is visible before sending
			 * the request */
			smp_wmb();
909
			kvm_make_request(KVM_REQ_EVENT, vcpu);
910 911
			kvm_vcpu_kick(vcpu);
		} else {
912 913
			apic_debug("Ignoring de-assert INIT to vcpu %d\n",
				   vcpu->vcpu_id);
914
		}
E
Eddie Dong 已提交
915 916 917
		break;

	case APIC_DM_STARTUP:
918 919
		apic_debug("SIPI to vcpu %d vector 0x%02x\n",
			   vcpu->vcpu_id, vector);
920 921 922 923 924 925 926
		result = 1;
		apic->sipi_vector = vector;
		/* make sure sipi_vector is visible for the receiver */
		smp_wmb();
		set_bit(KVM_APIC_SIPI, &apic->pending_events);
		kvm_make_request(KVM_REQ_EVENT, vcpu);
		kvm_vcpu_kick(vcpu);
E
Eddie Dong 已提交
927 928
		break;

929 930 931 932 933 934 935 936
	case APIC_DM_EXTINT:
		/*
		 * Should only be called by kvm_apic_local_deliver() with LVT0,
		 * before NMI watchdog was enabled. Already handled by
		 * kvm_apic_accept_pic_intr().
		 */
		break;

E
Eddie Dong 已提交
937 938 939 940 941 942 943 944
	default:
		printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
		       delivery_mode);
		break;
	}
	return result;
}

945
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
946
{
947
	return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
948 949
}

950 951
static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
{
952
	return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
953 954
}

955 956
static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
{
957 958 959 960 961
	int trigger_mode;

	/* Eoi the ioapic only if the ioapic doesn't own the vector. */
	if (!kvm_ioapic_handles_vector(apic, vector))
		return;
962

963 964 965 966 967
	/* Request a KVM exit to inform the userspace IOAPIC. */
	if (irqchip_split(apic->vcpu->kvm)) {
		apic->vcpu->arch.pending_ioapic_eoi = vector;
		kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
		return;
968
	}
969 970 971 972 973 974 975

	if (apic_test_vector(vector, apic->regs + APIC_TMR))
		trigger_mode = IOAPIC_LEVEL_TRIG;
	else
		trigger_mode = IOAPIC_EDGE_TRIG;

	kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
976 977
}

978
static int apic_set_eoi(struct kvm_lapic *apic)
E
Eddie Dong 已提交
979 980
{
	int vector = apic_find_highest_isr(apic);
981 982 983

	trace_kvm_eoi(apic, vector);

E
Eddie Dong 已提交
984 985 986 987 988
	/*
	 * Not every write EOI will has corresponding ISR,
	 * one example is when Kernel check timer on setup_IO_APIC
	 */
	if (vector == -1)
989
		return vector;
E
Eddie Dong 已提交
990

M
Michael S. Tsirkin 已提交
991
	apic_clear_isr(vector, apic);
E
Eddie Dong 已提交
992 993
	apic_update_ppr(apic);

994 995 996
	if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap))
		kvm_hv_synic_send_eoi(apic->vcpu, vector);

997
	kvm_ioapic_send_eoi(apic, vector);
998
	kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
999
	return vector;
E
Eddie Dong 已提交
1000 1001
}

1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
/*
 * this interface assumes a trap-like exit, which has already finished
 * desired side effect including vISR and vPPR update.
 */
void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
{
	struct kvm_lapic *apic = vcpu->arch.apic;

	trace_kvm_eoi(apic, vector);

	kvm_ioapic_send_eoi(apic, vector);
	kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
}
EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);

E
Eddie Dong 已提交
1017 1018
static void apic_send_ipi(struct kvm_lapic *apic)
{
1019 1020
	u32 icr_low = kvm_lapic_get_reg(apic, APIC_ICR);
	u32 icr_high = kvm_lapic_get_reg(apic, APIC_ICR2);
1021
	struct kvm_lapic_irq irq;
E
Eddie Dong 已提交
1022

1023 1024 1025
	irq.vector = icr_low & APIC_VECTOR_MASK;
	irq.delivery_mode = icr_low & APIC_MODE_MASK;
	irq.dest_mode = icr_low & APIC_DEST_MASK;
1026
	irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1027 1028
	irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
	irq.shorthand = icr_low & APIC_SHORT_MASK;
1029
	irq.msi_redir_hint = false;
G
Gleb Natapov 已提交
1030 1031 1032 1033
	if (apic_x2apic_mode(apic))
		irq.dest_id = icr_high;
	else
		irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
E
Eddie Dong 已提交
1034

1035 1036
	trace_kvm_apic_ipi(icr_low, irq.dest_id);

E
Eddie Dong 已提交
1037 1038
	apic_debug("icr_high 0x%x, icr_low 0x%x, "
		   "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
1039 1040
		   "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x, "
		   "msi_redir_hint 0x%x\n",
G
Glauber Costa 已提交
1041
		   icr_high, icr_low, irq.shorthand, irq.dest_id,
1042
		   irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
1043
		   irq.vector, irq.msi_redir_hint);
1044

1045
	kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
E
Eddie Dong 已提交
1046 1047 1048 1049
}

static u32 apic_get_tmcct(struct kvm_lapic *apic)
{
1050 1051
	ktime_t remaining;
	s64 ns;
1052
	u32 tmcct;
E
Eddie Dong 已提交
1053 1054 1055

	ASSERT(apic != NULL);

1056
	/* if initial count is 0, current count should also be 0 */
1057
	if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1058
		apic->lapic_timer.period == 0)
1059 1060
		return 0;

1061
	remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
1062 1063 1064
	if (ktime_to_ns(remaining) < 0)
		remaining = ktime_set(0, 0);

1065 1066 1067
	ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
	tmcct = div64_u64(ns,
			 (APIC_BUS_CYCLE_NS * apic->divide_count));
E
Eddie Dong 已提交
1068 1069 1070 1071

	return tmcct;
}

1072 1073 1074 1075 1076
static void __report_tpr_access(struct kvm_lapic *apic, bool write)
{
	struct kvm_vcpu *vcpu = apic->vcpu;
	struct kvm_run *run = vcpu->run;

1077
	kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1078
	run->tpr_access.rip = kvm_rip_read(vcpu);
1079 1080 1081 1082 1083 1084 1085 1086 1087
	run->tpr_access.is_write = write;
}

static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
{
	if (apic->vcpu->arch.tpr_access_reporting)
		__report_tpr_access(apic, write);
}

E
Eddie Dong 已提交
1088 1089 1090 1091 1092 1093 1094 1095
static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
{
	u32 val = 0;

	if (offset >= LAPIC_MMIO_LENGTH)
		return 0;

	switch (offset) {
G
Gleb Natapov 已提交
1096 1097 1098 1099 1100 1101
	case APIC_ID:
		if (apic_x2apic_mode(apic))
			val = kvm_apic_id(apic);
		else
			val = kvm_apic_id(apic) << 24;
		break;
E
Eddie Dong 已提交
1102
	case APIC_ARBPRI:
1103
		apic_debug("Access APIC ARBPRI register which is for P6\n");
E
Eddie Dong 已提交
1104 1105 1106
		break;

	case APIC_TMCCT:	/* Timer CCR */
1107 1108 1109
		if (apic_lvtt_tscdeadline(apic))
			return 0;

E
Eddie Dong 已提交
1110 1111
		val = apic_get_tmcct(apic);
		break;
1112 1113
	case APIC_PROCPRI:
		apic_update_ppr(apic);
1114
		val = kvm_lapic_get_reg(apic, offset);
1115
		break;
1116 1117 1118
	case APIC_TASKPRI:
		report_tpr_access(apic, false);
		/* fall thru */
E
Eddie Dong 已提交
1119
	default:
1120
		val = kvm_lapic_get_reg(apic, offset);
E
Eddie Dong 已提交
1121 1122 1123 1124 1125 1126
		break;
	}

	return val;
}

G
Gregory Haskins 已提交
1127 1128 1129 1130 1131
static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
{
	return container_of(dev, struct kvm_lapic, dev);
}

1132
int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
G
Gleb Natapov 已提交
1133
		void *data)
E
Eddie Dong 已提交
1134 1135 1136
{
	unsigned char alignment = offset & 0xf;
	u32 result;
G
Guo Chao 已提交
1137
	/* this bitmask has a bit cleared for each reserved register */
G
Gleb Natapov 已提交
1138
	static const u64 rmask = 0x43ff01ffffffe70cULL;
E
Eddie Dong 已提交
1139 1140

	if ((alignment + len) > 4) {
1141 1142
		apic_debug("KVM_APIC_READ: alignment error %x %d\n",
			   offset, len);
G
Gleb Natapov 已提交
1143
		return 1;
E
Eddie Dong 已提交
1144
	}
G
Gleb Natapov 已提交
1145 1146

	if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) {
1147 1148
		apic_debug("KVM_APIC_READ: read reserved register %x\n",
			   offset);
G
Gleb Natapov 已提交
1149 1150 1151
		return 1;
	}

E
Eddie Dong 已提交
1152 1153
	result = __apic_read(apic, offset & ~0xf);

1154 1155
	trace_kvm_apic_read(offset, result);

E
Eddie Dong 已提交
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
	switch (len) {
	case 1:
	case 2:
	case 4:
		memcpy(data, (char *)&result + alignment, len);
		break;
	default:
		printk(KERN_ERR "Local APIC read with len = %x, "
		       "should be 1,2, or 4 instead\n", len);
		break;
	}
1167
	return 0;
E
Eddie Dong 已提交
1168
}
1169
EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
E
Eddie Dong 已提交
1170

G
Gleb Natapov 已提交
1171 1172
static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
{
1173
	return kvm_apic_hw_enabled(apic) &&
G
Gleb Natapov 已提交
1174 1175 1176 1177
	    addr >= apic->base_address &&
	    addr < apic->base_address + LAPIC_MMIO_LENGTH;
}

1178
static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
G
Gleb Natapov 已提交
1179 1180 1181 1182 1183 1184 1185 1186
			   gpa_t address, int len, void *data)
{
	struct kvm_lapic *apic = to_lapic(this);
	u32 offset = address - apic->base_address;

	if (!apic_mmio_in_range(apic, address))
		return -EOPNOTSUPP;

1187
	kvm_lapic_reg_read(apic, offset, len, data);
G
Gleb Natapov 已提交
1188 1189 1190 1191

	return 0;
}

E
Eddie Dong 已提交
1192 1193 1194 1195
static void update_divide_count(struct kvm_lapic *apic)
{
	u32 tmp1, tmp2, tdcr;

1196
	tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
E
Eddie Dong 已提交
1197 1198
	tmp1 = tdcr & 0xf;
	tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1199
	apic->divide_count = 0x1 << (tmp2 & 0x7);
E
Eddie Dong 已提交
1200 1201

	apic_debug("timer divide count is 0x%x\n",
G
Glauber Costa 已提交
1202
				   apic->divide_count);
E
Eddie Dong 已提交
1203 1204
}

1205 1206
static void apic_update_lvtt(struct kvm_lapic *apic)
{
1207
	u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1208 1209 1210 1211 1212 1213 1214 1215
			apic->lapic_timer.timer_mode_mask;

	if (apic->lapic_timer.timer_mode != timer_mode) {
		apic->lapic_timer.timer_mode = timer_mode;
		hrtimer_cancel(&apic->lapic_timer.timer);
	}
}

1216 1217 1218
static void apic_timer_expired(struct kvm_lapic *apic)
{
	struct kvm_vcpu *vcpu = apic->vcpu;
1219
	struct swait_queue_head *q = &vcpu->wq;
1220
	struct kvm_timer *ktimer = &apic->lapic_timer;
1221 1222 1223 1224 1225

	if (atomic_read(&apic->lapic_timer.pending))
		return;

	atomic_inc(&apic->lapic_timer.pending);
1226
	kvm_set_pending_timer(vcpu);
1227

1228 1229
	if (swait_active(q))
		swake_up(q);
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242

	if (apic_lvtt_tscdeadline(apic))
		ktimer->expired_tscdeadline = ktimer->tscdeadline;
}

/*
 * On APICv, this test will cause a busy wait
 * during a higher-priority task.
 */

static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic = vcpu->arch.apic;
1243
	u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1244 1245 1246

	if (kvm_apic_hw_enabled(apic)) {
		int vec = reg & APIC_VECTOR_MASK;
1247
		void *bitmap = apic->regs + APIC_ISR;
1248

1249
		if (vcpu->arch.apicv_active)
1250 1251 1252 1253
			bitmap = apic->regs + APIC_IRR;

		if (apic_test_vector(vec, bitmap))
			return true;
1254 1255 1256 1257 1258 1259 1260 1261 1262
	}
	return false;
}

void wait_lapic_expire(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic = vcpu->arch.apic;
	u64 guest_tsc, tsc_deadline;

1263
	if (!lapic_in_kernel(vcpu))
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
		return;

	if (apic->lapic_timer.expired_tscdeadline == 0)
		return;

	if (!lapic_timer_int_injected(vcpu))
		return;

	tsc_deadline = apic->lapic_timer.expired_tscdeadline;
	apic->lapic_timer.expired_tscdeadline = 0;
1274
	guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1275
	trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1276 1277 1278 1279

	/* __delay is delay_tsc whenever the hardware has TSC, thus always.  */
	if (guest_tsc < tsc_deadline)
		__delay(tsc_deadline - guest_tsc);
1280 1281
}

1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
static void start_sw_tscdeadline(struct kvm_lapic *apic)
{
	u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
	u64 ns = 0;
	ktime_t expire;
	struct kvm_vcpu *vcpu = apic->vcpu;
	unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
	unsigned long flags;
	ktime_t now;

	if (unlikely(!tscdeadline || !this_tsc_khz))
		return;

	local_irq_save(flags);

	now = apic->lapic_timer.timer.base->get_time();
	guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
	if (likely(tscdeadline > guest_tsc)) {
		ns = (tscdeadline - guest_tsc) * 1000000ULL;
		do_div(ns, this_tsc_khz);
		expire = ktime_add_ns(now, ns);
		expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
		hrtimer_start(&apic->lapic_timer.timer,
				expire, HRTIMER_MODE_ABS_PINNED);
	} else
		apic_timer_expired(apic);

	local_irq_restore(flags);
}

1312 1313 1314 1315 1316 1317
bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
}
EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);

1318 1319 1320 1321 1322 1323
static void cancel_hv_tscdeadline(struct kvm_lapic *apic)
{
	kvm_x86_ops->cancel_hv_timer(apic->vcpu);
	apic->lapic_timer.hv_timer_in_use = false;
}

1324 1325 1326 1327 1328 1329
void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic = vcpu->arch.apic;

	WARN_ON(!apic->lapic_timer.hv_timer_in_use);
	WARN_ON(swait_active(&vcpu->wq));
1330
	cancel_hv_tscdeadline(apic);
1331 1332 1333 1334
	apic_timer_expired(apic);
}
EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);

1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
static bool start_hv_tscdeadline(struct kvm_lapic *apic)
{
	u64 tscdeadline = apic->lapic_timer.tscdeadline;

	if (atomic_read(&apic->lapic_timer.pending) ||
		kvm_x86_ops->set_hv_timer(apic->vcpu, tscdeadline)) {
		if (apic->lapic_timer.hv_timer_in_use)
			cancel_hv_tscdeadline(apic);
	} else {
		apic->lapic_timer.hv_timer_in_use = true;
		hrtimer_cancel(&apic->lapic_timer.timer);

		/* In case the sw timer triggered in the window */
		if (atomic_read(&apic->lapic_timer.pending))
			cancel_hv_tscdeadline(apic);
	}
	trace_kvm_hv_timer_state(apic->vcpu->vcpu_id,
			apic->lapic_timer.hv_timer_in_use);
	return apic->lapic_timer.hv_timer_in_use;
}

1356 1357 1358 1359 1360 1361
void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic = vcpu->arch.apic;

	WARN_ON(apic->lapic_timer.hv_timer_in_use);

1362 1363
	if (apic_lvtt_tscdeadline(apic))
		start_hv_tscdeadline(apic);
1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
}
EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);

void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic = vcpu->arch.apic;

	/* Possibly the TSC deadline timer is not enabled yet */
	if (!apic->lapic_timer.hv_timer_in_use)
		return;

1375
	cancel_hv_tscdeadline(apic);
1376 1377 1378 1379 1380 1381 1382 1383

	if (atomic_read(&apic->lapic_timer.pending))
		return;

	start_sw_tscdeadline(apic);
}
EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);

E
Eddie Dong 已提交
1384 1385
static void start_apic_timer(struct kvm_lapic *apic)
{
1386
	ktime_t now;
1387

1388
	atomic_set(&apic->lapic_timer.pending, 0);
1389

1390
	if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
G
Guo Chao 已提交
1391
		/* lapic timer in oneshot or periodic mode */
1392
		now = apic->lapic_timer.timer.base->get_time();
1393
		apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
			    * APIC_BUS_CYCLE_NS * apic->divide_count;

		if (!apic->lapic_timer.period)
			return;
		/*
		 * Do not allow the guest to program periodic timers with small
		 * interval, since the hrtimers are not throttled by the host
		 * scheduler.
		 */
		if (apic_lvtt_period(apic)) {
			s64 min_period = min_timer_period_us * 1000LL;

			if (apic->lapic_timer.period < min_period) {
				pr_info_ratelimited(
				    "kvm: vcpu %i: requested %lld ns "
				    "lapic timer period limited to %lld ns\n",
				    apic->vcpu->vcpu_id,
				    apic->lapic_timer.period, min_period);
				apic->lapic_timer.period = min_period;
			}
1414
		}
1415

1416 1417
		hrtimer_start(&apic->lapic_timer.timer,
			      ktime_add_ns(now, apic->lapic_timer.period),
1418
			      HRTIMER_MODE_ABS_PINNED);
E
Eddie Dong 已提交
1419

1420
		apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
E
Eddie Dong 已提交
1421 1422
			   PRIx64 ", "
			   "timer initial count 0x%x, period %lldns, "
1423
			   "expire @ 0x%016" PRIx64 ".\n", __func__,
E
Eddie Dong 已提交
1424
			   APIC_BUS_CYCLE_NS, ktime_to_ns(now),
1425
			   kvm_lapic_get_reg(apic, APIC_TMICT),
1426
			   apic->lapic_timer.period,
E
Eddie Dong 已提交
1427
			   ktime_to_ns(ktime_add_ns(now,
1428
					apic->lapic_timer.period)));
1429
	} else if (apic_lvtt_tscdeadline(apic)) {
1430
		if (!(kvm_x86_ops->set_hv_timer && start_hv_tscdeadline(apic)))
1431
			start_sw_tscdeadline(apic);
1432
	}
E
Eddie Dong 已提交
1433 1434
}

1435 1436
static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
{
1437
	bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
1438

1439 1440 1441
	if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
		apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
		if (lvt0_in_nmi_mode) {
1442 1443
			apic_debug("Receive NMI setting on APIC_LVT0 "
				   "for cpu %d\n", apic->vcpu->vcpu_id);
1444
			atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1445 1446 1447
		} else
			atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
	}
1448 1449
}

1450
int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
E
Eddie Dong 已提交
1451
{
G
Gleb Natapov 已提交
1452
	int ret = 0;
E
Eddie Dong 已提交
1453

G
Gleb Natapov 已提交
1454
	trace_kvm_apic_write(reg, val);
E
Eddie Dong 已提交
1455

G
Gleb Natapov 已提交
1456
	switch (reg) {
E
Eddie Dong 已提交
1457
	case APIC_ID:		/* Local APIC ID */
G
Gleb Natapov 已提交
1458
		if (!apic_x2apic_mode(apic))
1459
			kvm_apic_set_id(apic, val >> 24);
G
Gleb Natapov 已提交
1460 1461
		else
			ret = 1;
E
Eddie Dong 已提交
1462 1463 1464
		break;

	case APIC_TASKPRI:
1465
		report_tpr_access(apic, true);
E
Eddie Dong 已提交
1466 1467 1468 1469 1470 1471 1472 1473
		apic_set_tpr(apic, val & 0xff);
		break;

	case APIC_EOI:
		apic_set_eoi(apic);
		break;

	case APIC_LDR:
G
Gleb Natapov 已提交
1474
		if (!apic_x2apic_mode(apic))
1475
			kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
G
Gleb Natapov 已提交
1476 1477
		else
			ret = 1;
E
Eddie Dong 已提交
1478 1479 1480
		break;

	case APIC_DFR:
1481
		if (!apic_x2apic_mode(apic)) {
1482
			kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
1483 1484
			recalculate_apic_map(apic->vcpu->kvm);
		} else
G
Gleb Natapov 已提交
1485
			ret = 1;
E
Eddie Dong 已提交
1486 1487
		break;

1488 1489
	case APIC_SPIV: {
		u32 mask = 0x3ff;
1490
		if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
1491
			mask |= APIC_SPIV_DIRECTED_EOI;
1492
		apic_set_spiv(apic, val & mask);
E
Eddie Dong 已提交
1493 1494 1495 1496
		if (!(val & APIC_SPIV_APIC_ENABLED)) {
			int i;
			u32 lvt_val;

1497
			for (i = 0; i < KVM_APIC_LVT_NUM; i++) {
1498
				lvt_val = kvm_lapic_get_reg(apic,
E
Eddie Dong 已提交
1499
						       APIC_LVTT + 0x10 * i);
1500
				kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i,
E
Eddie Dong 已提交
1501 1502
					     lvt_val | APIC_LVT_MASKED);
			}
1503
			apic_update_lvtt(apic);
1504
			atomic_set(&apic->lapic_timer.pending, 0);
E
Eddie Dong 已提交
1505 1506 1507

		}
		break;
1508
	}
E
Eddie Dong 已提交
1509 1510
	case APIC_ICR:
		/* No delay here, so we always clear the pending bit */
1511
		kvm_lapic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
E
Eddie Dong 已提交
1512 1513 1514 1515
		apic_send_ipi(apic);
		break;

	case APIC_ICR2:
G
Gleb Natapov 已提交
1516 1517
		if (!apic_x2apic_mode(apic))
			val &= 0xff000000;
1518
		kvm_lapic_set_reg(apic, APIC_ICR2, val);
E
Eddie Dong 已提交
1519 1520
		break;

1521
	case APIC_LVT0:
1522
		apic_manage_nmi_watchdog(apic, val);
E
Eddie Dong 已提交
1523 1524 1525 1526 1527
	case APIC_LVTTHMR:
	case APIC_LVTPC:
	case APIC_LVT1:
	case APIC_LVTERR:
		/* TODO: Check vector */
1528
		if (!kvm_apic_sw_enabled(apic))
E
Eddie Dong 已提交
1529 1530
			val |= APIC_LVT_MASKED;

G
Gleb Natapov 已提交
1531
		val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
1532
		kvm_lapic_set_reg(apic, reg, val);
E
Eddie Dong 已提交
1533 1534 1535

		break;

1536
	case APIC_LVTT:
1537
		if (!kvm_apic_sw_enabled(apic))
1538 1539
			val |= APIC_LVT_MASKED;
		val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
1540
		kvm_lapic_set_reg(apic, APIC_LVTT, val);
1541
		apic_update_lvtt(apic);
1542 1543
		break;

E
Eddie Dong 已提交
1544
	case APIC_TMICT:
1545 1546 1547
		if (apic_lvtt_tscdeadline(apic))
			break;

1548
		hrtimer_cancel(&apic->lapic_timer.timer);
1549
		kvm_lapic_set_reg(apic, APIC_TMICT, val);
E
Eddie Dong 已提交
1550
		start_apic_timer(apic);
G
Gleb Natapov 已提交
1551
		break;
E
Eddie Dong 已提交
1552 1553 1554

	case APIC_TDCR:
		if (val & 4)
1555
			apic_debug("KVM_WRITE:TDCR %x\n", val);
1556
		kvm_lapic_set_reg(apic, APIC_TDCR, val);
E
Eddie Dong 已提交
1557 1558 1559
		update_divide_count(apic);
		break;

G
Gleb Natapov 已提交
1560 1561
	case APIC_ESR:
		if (apic_x2apic_mode(apic) && val != 0) {
1562
			apic_debug("KVM_WRITE:ESR not zero %x\n", val);
G
Gleb Natapov 已提交
1563 1564 1565 1566 1567 1568
			ret = 1;
		}
		break;

	case APIC_SELF_IPI:
		if (apic_x2apic_mode(apic)) {
1569
			kvm_lapic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
G
Gleb Natapov 已提交
1570 1571 1572
		} else
			ret = 1;
		break;
E
Eddie Dong 已提交
1573
	default:
G
Gleb Natapov 已提交
1574
		ret = 1;
E
Eddie Dong 已提交
1575 1576
		break;
	}
G
Gleb Natapov 已提交
1577 1578 1579 1580
	if (ret)
		apic_debug("Local APIC Write to read-only register %x\n", reg);
	return ret;
}
1581
EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
G
Gleb Natapov 已提交
1582

1583
static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
G
Gleb Natapov 已提交
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600
			    gpa_t address, int len, const void *data)
{
	struct kvm_lapic *apic = to_lapic(this);
	unsigned int offset = address - apic->base_address;
	u32 val;

	if (!apic_mmio_in_range(apic, address))
		return -EOPNOTSUPP;

	/*
	 * APIC register must be aligned on 128-bits boundary.
	 * 32/64/128 bits registers must be accessed thru 32 bits.
	 * Refer SDM 8.4.1
	 */
	if (len != 4 || (offset & 0xf)) {
		/* Don't shout loud, $infamous_os would cause only noise. */
		apic_debug("apic write: bad size=%d %lx\n", len, (long)address);
1601
		return 0;
G
Gleb Natapov 已提交
1602 1603 1604 1605 1606 1607 1608 1609 1610
	}

	val = *(u32*)data;

	/* too common printing */
	if (offset != APIC_EOI)
		apic_debug("%s: offset 0x%x with length 0x%x, and value is "
			   "0x%x\n", __func__, offset, len, val);

1611
	kvm_lapic_reg_write(apic, offset & 0xff0, val);
G
Gleb Natapov 已提交
1612

1613
	return 0;
E
Eddie Dong 已提交
1614 1615
}

1616 1617
void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
{
1618
	kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
1619 1620 1621
}
EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);

1622 1623 1624 1625 1626 1627 1628 1629
/* emulate APIC access in a trap manner */
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
{
	u32 val = 0;

	/* hw has done the conditional check and inst decode */
	offset &= 0xff0;

1630
	kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
1631 1632

	/* TODO: optimize to just emulate side effect w/o one more write */
1633
	kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
1634 1635 1636
}
EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);

1637
void kvm_free_lapic(struct kvm_vcpu *vcpu)
E
Eddie Dong 已提交
1638
{
1639 1640
	struct kvm_lapic *apic = vcpu->arch.apic;

1641
	if (!vcpu->arch.apic)
E
Eddie Dong 已提交
1642 1643
		return;

1644
	hrtimer_cancel(&apic->lapic_timer.timer);
E
Eddie Dong 已提交
1645

1646 1647 1648
	if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
		static_key_slow_dec_deferred(&apic_hw_disabled);

1649
	if (!apic->sw_enabled)
1650
		static_key_slow_dec_deferred(&apic_sw_disabled);
E
Eddie Dong 已提交
1651

1652 1653 1654 1655
	if (apic->regs)
		free_page((unsigned long)apic->regs);

	kfree(apic);
E
Eddie Dong 已提交
1656 1657 1658 1659 1660 1661 1662 1663
}

/*
 *----------------------------------------------------------------------
 * LAPIC interface
 *----------------------------------------------------------------------
 */

1664 1665 1666 1667
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic = vcpu->arch.apic;

1668
	if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
1669
			apic_lvtt_period(apic))
1670 1671 1672 1673 1674 1675 1676 1677 1678
		return 0;

	return apic->lapic_timer.tscdeadline;
}

void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
{
	struct kvm_lapic *apic = vcpu->arch.apic;

1679
	if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
1680
			apic_lvtt_period(apic))
1681 1682 1683 1684 1685 1686 1687
		return;

	hrtimer_cancel(&apic->lapic_timer.timer);
	apic->lapic_timer.tscdeadline = data;
	start_apic_timer(apic);
}

E
Eddie Dong 已提交
1688 1689
void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
{
1690
	struct kvm_lapic *apic = vcpu->arch.apic;
E
Eddie Dong 已提交
1691

A
Avi Kivity 已提交
1692
	apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
1693
		     | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4));
E
Eddie Dong 已提交
1694 1695 1696 1697 1698 1699
}

u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
{
	u64 tpr;

1700
	tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
E
Eddie Dong 已提交
1701 1702 1703 1704 1705 1706

	return (tpr & 0xf0) >> 4;
}

void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
{
1707
	u64 old_value = vcpu->arch.apic_base;
1708
	struct kvm_lapic *apic = vcpu->arch.apic;
E
Eddie Dong 已提交
1709 1710 1711

	if (!apic) {
		value |= MSR_IA32_APICBASE_BSP;
1712
		vcpu->arch.apic_base = value;
E
Eddie Dong 已提交
1713 1714
		return;
	}
1715

1716 1717
	vcpu->arch.apic_base = value;

1718
	/* update jump label if enable bit changes */
1719
	if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
1720 1721 1722 1723
		if (value & MSR_IA32_APICBASE_ENABLE)
			static_key_slow_dec_deferred(&apic_hw_disabled);
		else
			static_key_slow_inc(&apic_hw_disabled.key);
1724
		recalculate_apic_map(vcpu->kvm);
1725 1726
	}

1727 1728
	if ((old_value ^ value) & X2APIC_ENABLE) {
		if (value & X2APIC_ENABLE) {
1729
			kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
1730 1731 1732
			kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true);
		} else
			kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false);
G
Gleb Natapov 已提交
1733
	}
1734

1735
	apic->base_address = apic->vcpu->arch.apic_base &
E
Eddie Dong 已提交
1736 1737
			     MSR_IA32_APICBASE_BASE;

1738 1739 1740 1741
	if ((value & MSR_IA32_APICBASE_ENABLE) &&
	     apic->base_address != APIC_DEFAULT_PHYS_BASE)
		pr_warn_once("APIC base relocation is unsupported by KVM");

E
Eddie Dong 已提交
1742 1743
	/* with FSB delivery interrupt, we can restart APIC functionality */
	apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
1744
		   "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
E
Eddie Dong 已提交
1745 1746 1747

}

1748
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
E
Eddie Dong 已提交
1749 1750 1751 1752
{
	struct kvm_lapic *apic;
	int i;

1753
	apic_debug("%s\n", __func__);
E
Eddie Dong 已提交
1754 1755

	ASSERT(vcpu);
1756
	apic = vcpu->arch.apic;
E
Eddie Dong 已提交
1757 1758 1759
	ASSERT(apic != NULL);

	/* Stop the timer in case it's a reset to an active apic */
1760
	hrtimer_cancel(&apic->lapic_timer.timer);
E
Eddie Dong 已提交
1761

1762 1763
	if (!init_event)
		kvm_apic_set_id(apic, vcpu->vcpu_id);
1764
	kvm_apic_set_version(apic->vcpu);
E
Eddie Dong 已提交
1765

1766 1767
	for (i = 0; i < KVM_APIC_LVT_NUM; i++)
		kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
1768
	apic_update_lvtt(apic);
1769
	if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
1770
		kvm_lapic_set_reg(apic, APIC_LVT0,
1771
			     SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
1772
	apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
E
Eddie Dong 已提交
1773

1774
	kvm_lapic_set_reg(apic, APIC_DFR, 0xffffffffU);
1775
	apic_set_spiv(apic, 0xff);
1776
	kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
1777 1778
	if (!apic_x2apic_mode(apic))
		kvm_apic_set_ldr(apic, 0);
1779 1780 1781 1782 1783
	kvm_lapic_set_reg(apic, APIC_ESR, 0);
	kvm_lapic_set_reg(apic, APIC_ICR, 0);
	kvm_lapic_set_reg(apic, APIC_ICR2, 0);
	kvm_lapic_set_reg(apic, APIC_TDCR, 0);
	kvm_lapic_set_reg(apic, APIC_TMICT, 0);
E
Eddie Dong 已提交
1784
	for (i = 0; i < 8; i++) {
1785 1786 1787
		kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
		kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
		kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
E
Eddie Dong 已提交
1788
	}
1789 1790
	apic->irr_pending = vcpu->arch.apicv_active;
	apic->isr_count = vcpu->arch.apicv_active ? 1 : 0;
M
Michael S. Tsirkin 已提交
1791
	apic->highest_isr_cache = -1;
1792
	update_divide_count(apic);
1793
	atomic_set(&apic->lapic_timer.pending, 0);
1794
	if (kvm_vcpu_is_bsp(vcpu))
1795 1796
		kvm_lapic_set_base(vcpu,
				vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
1797
	vcpu->arch.pv_eoi.msr_val = 0;
E
Eddie Dong 已提交
1798 1799
	apic_update_ppr(apic);

1800
	vcpu->arch.apic_arb_prio = 0;
1801
	vcpu->arch.apic_attention = 0;
1802

N
Nadav Amit 已提交
1803
	apic_debug("%s: vcpu=%p, id=%d, base_msr="
1804
		   "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
E
Eddie Dong 已提交
1805
		   vcpu, kvm_apic_id(apic),
1806
		   vcpu->arch.apic_base, apic->base_address);
E
Eddie Dong 已提交
1807 1808 1809 1810 1811 1812 1813
}

/*
 *----------------------------------------------------------------------
 * timer interface
 *----------------------------------------------------------------------
 */
1814

A
Avi Kivity 已提交
1815
static bool lapic_is_periodic(struct kvm_lapic *apic)
E
Eddie Dong 已提交
1816
{
1817
	return apic_lvtt_period(apic);
E
Eddie Dong 已提交
1818 1819
}

1820 1821
int apic_has_pending_timer(struct kvm_vcpu *vcpu)
{
1822
	struct kvm_lapic *apic = vcpu->arch.apic;
1823

1824
	if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
1825
		return atomic_read(&apic->lapic_timer.pending);
1826 1827 1828 1829

	return 0;
}

A
Avi Kivity 已提交
1830
int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
1831
{
1832
	u32 reg = kvm_lapic_get_reg(apic, lvt_type);
1833 1834
	int vector, mode, trig_mode;

1835
	if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
1836 1837 1838
		vector = reg & APIC_VECTOR_MASK;
		mode = reg & APIC_MODE_MASK;
		trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
1839 1840
		return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
					NULL);
1841 1842 1843
	}
	return 0;
}
1844

1845
void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
1846
{
1847 1848 1849 1850
	struct kvm_lapic *apic = vcpu->arch.apic;

	if (apic)
		kvm_apic_local_deliver(apic, APIC_LVT0);
1851 1852
}

G
Gregory Haskins 已提交
1853 1854 1855 1856 1857
static const struct kvm_io_device_ops apic_mmio_ops = {
	.read     = apic_mmio_read,
	.write    = apic_mmio_write,
};

1858 1859 1860
static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
{
	struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
A
Avi Kivity 已提交
1861
	struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
1862

1863
	apic_timer_expired(apic);
1864

A
Avi Kivity 已提交
1865
	if (lapic_is_periodic(apic)) {
1866 1867 1868 1869 1870 1871
		hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
		return HRTIMER_RESTART;
	} else
		return HRTIMER_NORESTART;
}

E
Eddie Dong 已提交
1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882
int kvm_create_lapic(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic;

	ASSERT(vcpu != NULL);
	apic_debug("apic_init %d\n", vcpu->vcpu_id);

	apic = kzalloc(sizeof(*apic), GFP_KERNEL);
	if (!apic)
		goto nomem;

1883
	vcpu->arch.apic = apic;
E
Eddie Dong 已提交
1884

1885 1886
	apic->regs = (void *)get_zeroed_page(GFP_KERNEL);
	if (!apic->regs) {
E
Eddie Dong 已提交
1887 1888
		printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
		       vcpu->vcpu_id);
1889
		goto nomem_free_apic;
E
Eddie Dong 已提交
1890 1891 1892
	}
	apic->vcpu = vcpu;

1893
	hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
1894
		     HRTIMER_MODE_ABS_PINNED);
1895
	apic->lapic_timer.timer.function = apic_timer_fn;
1896

1897 1898 1899 1900 1901
	/*
	 * APIC is created enabled. This will prevent kvm_lapic_set_base from
	 * thinking that APIC satet has changed.
	 */
	vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
1902 1903
	kvm_lapic_set_base(vcpu,
			APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE);
E
Eddie Dong 已提交
1904

1905
	static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
1906
	kvm_lapic_reset(vcpu, false);
G
Gregory Haskins 已提交
1907
	kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
E
Eddie Dong 已提交
1908 1909

	return 0;
1910 1911
nomem_free_apic:
	kfree(apic);
E
Eddie Dong 已提交
1912 1913 1914 1915 1916 1917
nomem:
	return -ENOMEM;
}

int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
1918
	struct kvm_lapic *apic = vcpu->arch.apic;
E
Eddie Dong 已提交
1919 1920
	int highest_irr;

1921
	if (!apic_enabled(apic))
E
Eddie Dong 已提交
1922 1923
		return -1;

1924
	apic_update_ppr(apic);
E
Eddie Dong 已提交
1925 1926
	highest_irr = apic_find_highest_irr(apic);
	if ((highest_irr == -1) ||
1927
	    ((highest_irr & 0xF0) <= kvm_lapic_get_reg(apic, APIC_PROCPRI)))
E
Eddie Dong 已提交
1928 1929 1930 1931
		return -1;
	return highest_irr;
}

Q
Qing He 已提交
1932 1933
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
{
1934
	u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
Q
Qing He 已提交
1935 1936
	int r = 0;

1937
	if (!kvm_apic_hw_enabled(vcpu->arch.apic))
1938 1939 1940 1941
		r = 1;
	if ((lvt0 & APIC_LVT_MASKED) == 0 &&
	    GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
		r = 1;
Q
Qing He 已提交
1942 1943 1944
	return r;
}

1945 1946
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
{
1947
	struct kvm_lapic *apic = vcpu->arch.apic;
1948

1949
	if (atomic_read(&apic->lapic_timer.pending) > 0) {
1950
		kvm_apic_local_deliver(apic, APIC_LVTT);
1951 1952
		if (apic_lvtt_tscdeadline(apic))
			apic->lapic_timer.tscdeadline = 0;
1953
		atomic_set(&apic->lapic_timer.pending, 0);
1954 1955 1956
	}
}

E
Eddie Dong 已提交
1957 1958 1959
int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
{
	int vector = kvm_apic_has_interrupt(vcpu);
1960
	struct kvm_lapic *apic = vcpu->arch.apic;
E
Eddie Dong 已提交
1961 1962 1963 1964

	if (vector == -1)
		return -1;

1965 1966 1967 1968 1969 1970 1971
	/*
	 * We get here even with APIC virtualization enabled, if doing
	 * nested virtualization and L1 runs with the "acknowledge interrupt
	 * on exit" mode.  Then we cannot inject the interrupt via RVI,
	 * because the process would deliver it through the IDT.
	 */

M
Michael S. Tsirkin 已提交
1972
	apic_set_isr(vector, apic);
E
Eddie Dong 已提交
1973 1974
	apic_update_ppr(apic);
	apic_clear_irr(vector, apic);
1975 1976 1977 1978 1979 1980

	if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) {
		apic_clear_isr(vector, apic);
		apic_update_ppr(apic);
	}

E
Eddie Dong 已提交
1981 1982
	return vector;
}
1983

1984 1985
void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
		struct kvm_lapic_state *s)
1986
{
1987
	struct kvm_lapic *apic = vcpu->arch.apic;
1988

1989
	kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
1990 1991 1992
	/* set SPIV separately to get count of SW disabled APICs right */
	apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
	memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
1993 1994
	/* call kvm_apic_set_id() to put apic into apic_map */
	kvm_apic_set_id(apic, kvm_apic_id(apic));
1995 1996
	kvm_apic_set_version(vcpu);

1997
	apic_update_ppr(apic);
1998
	hrtimer_cancel(&apic->lapic_timer.timer);
1999
	apic_update_lvtt(apic);
2000
	apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2001 2002
	update_divide_count(apic);
	start_apic_timer(apic);
2003
	apic->irr_pending = true;
2004
	apic->isr_count = vcpu->arch.apicv_active ?
2005
				1 : count_vectors(apic->regs + APIC_ISR);
M
Michael S. Tsirkin 已提交
2006
	apic->highest_isr_cache = -1;
2007
	if (vcpu->arch.apicv_active) {
2008 2009
		if (kvm_x86_ops->apicv_post_state_restore)
			kvm_x86_ops->apicv_post_state_restore(vcpu);
W
Wei Wang 已提交
2010 2011
		kvm_x86_ops->hwapic_irr_update(vcpu,
				apic_find_highest_irr(apic));
2012
		kvm_x86_ops->hwapic_isr_update(vcpu,
2013
				apic_find_highest_isr(apic));
2014
	}
2015
	kvm_make_request(KVM_REQ_EVENT, vcpu);
2016 2017
	if (ioapic_in_kernel(vcpu->kvm))
		kvm_rtc_eoi_tracking_restore_one(vcpu);
2018 2019

	vcpu->arch.apic_arb_prio = 0;
2020
}
2021

2022
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2023 2024 2025
{
	struct hrtimer *timer;

2026
	if (!lapic_in_kernel(vcpu))
2027 2028
		return;

2029
	timer = &vcpu->arch.apic->lapic_timer.timer;
2030
	if (hrtimer_cancel(timer))
2031
		hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
2032
}
A
Avi Kivity 已提交
2033

2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070
/*
 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
 *
 * Detect whether guest triggered PV EOI since the
 * last entry. If yes, set EOI on guests's behalf.
 * Clear PV EOI in guest memory in any case.
 */
static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
					struct kvm_lapic *apic)
{
	bool pending;
	int vector;
	/*
	 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
	 * and KVM_PV_EOI_ENABLED in guest memory as follows:
	 *
	 * KVM_APIC_PV_EOI_PENDING is unset:
	 * 	-> host disabled PV EOI.
	 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
	 * 	-> host enabled PV EOI, guest did not execute EOI yet.
	 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
	 * 	-> host enabled PV EOI, guest executed EOI.
	 */
	BUG_ON(!pv_eoi_enabled(vcpu));
	pending = pv_eoi_get_pending(vcpu);
	/*
	 * Clear pending bit in any case: it will be set again on vmentry.
	 * While this might not be ideal from performance point of view,
	 * this makes sure pv eoi is only enabled when we know it's safe.
	 */
	pv_eoi_clr_pending(vcpu);
	if (pending)
		return;
	vector = apic_set_eoi(apic);
	trace_kvm_pv_eoi(apic, vector);
}

A
Avi Kivity 已提交
2071 2072 2073 2074
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
{
	u32 data;

2075 2076 2077
	if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
		apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);

2078
	if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
A
Avi Kivity 已提交
2079 2080
		return;

2081 2082 2083
	if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
				  sizeof(u32)))
		return;
A
Avi Kivity 已提交
2084 2085 2086 2087

	apic_set_tpr(vcpu->arch.apic, data & 0xff);
}

2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102
/*
 * apic_sync_pv_eoi_to_guest - called before vmentry
 *
 * Detect whether it's safe to enable PV EOI and
 * if yes do so.
 */
static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
					struct kvm_lapic *apic)
{
	if (!pv_eoi_enabled(vcpu) ||
	    /* IRR set or many bits in ISR: could be nested. */
	    apic->irr_pending ||
	    /* Cache not set: could be safe but we don't bother. */
	    apic->highest_isr_cache == -1 ||
	    /* Need EOI to update ioapic. */
2103
	    kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2104 2105 2106 2107 2108 2109 2110 2111 2112 2113
		/*
		 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
		 * so we need not do anything here.
		 */
		return;
	}

	pv_eoi_set_pending(apic->vcpu);
}

A
Avi Kivity 已提交
2114 2115 2116 2117
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
{
	u32 data, tpr;
	int max_irr, max_isr;
2118
	struct kvm_lapic *apic = vcpu->arch.apic;
A
Avi Kivity 已提交
2119

2120 2121
	apic_sync_pv_eoi_to_guest(vcpu, apic);

2122
	if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
A
Avi Kivity 已提交
2123 2124
		return;

2125
	tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
A
Avi Kivity 已提交
2126 2127 2128 2129 2130 2131 2132 2133
	max_irr = apic_find_highest_irr(apic);
	if (max_irr < 0)
		max_irr = 0;
	max_isr = apic_find_highest_isr(apic);
	if (max_isr < 0)
		max_isr = 0;
	data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);

2134 2135
	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
				sizeof(u32));
A
Avi Kivity 已提交
2136 2137
}

2138
int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
A
Avi Kivity 已提交
2139
{
2140 2141 2142 2143 2144
	if (vapic_addr) {
		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
					&vcpu->arch.apic->vapic_cache,
					vapic_addr, sizeof(u32)))
			return -EINVAL;
2145
		__set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2146
	} else {
2147
		__clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2148 2149 2150 2151
	}

	vcpu->arch.apic->vapic_addr = vapic_addr;
	return 0;
A
Avi Kivity 已提交
2152
}
G
Gleb Natapov 已提交
2153 2154 2155 2156 2157 2158

int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
	struct kvm_lapic *apic = vcpu->arch.apic;
	u32 reg = (msr - APIC_BASE_MSR) << 4;

2159
	if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
G
Gleb Natapov 已提交
2160 2161
		return 1;

2162 2163 2164
	if (reg == APIC_ICR2)
		return 1;

G
Gleb Natapov 已提交
2165
	/* if this is ICR write vector before command */
2166
	if (reg == APIC_ICR)
2167 2168
		kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
	return kvm_lapic_reg_write(apic, reg, (u32)data);
G
Gleb Natapov 已提交
2169 2170 2171 2172 2173 2174 2175
}

int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
{
	struct kvm_lapic *apic = vcpu->arch.apic;
	u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;

2176
	if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
G
Gleb Natapov 已提交
2177 2178
		return 1;

2179 2180 2181 2182 2183 2184
	if (reg == APIC_DFR || reg == APIC_ICR2) {
		apic_debug("KVM_APIC_READ: read x2apic reserved register %x\n",
			   reg);
		return 1;
	}

2185
	if (kvm_lapic_reg_read(apic, reg, 4, &low))
G
Gleb Natapov 已提交
2186
		return 1;
2187
	if (reg == APIC_ICR)
2188
		kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
G
Gleb Natapov 已提交
2189 2190 2191 2192 2193

	*data = (((u64)high) << 32) | low;

	return 0;
}
G
Gleb Natapov 已提交
2194 2195 2196 2197 2198

int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
{
	struct kvm_lapic *apic = vcpu->arch.apic;

2199
	if (!lapic_in_kernel(vcpu))
G
Gleb Natapov 已提交
2200 2201 2202 2203
		return 1;

	/* if this is ICR write vector before command */
	if (reg == APIC_ICR)
2204 2205
		kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
	return kvm_lapic_reg_write(apic, reg, (u32)data);
G
Gleb Natapov 已提交
2206 2207 2208 2209 2210 2211 2212
}

int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
{
	struct kvm_lapic *apic = vcpu->arch.apic;
	u32 low, high = 0;

2213
	if (!lapic_in_kernel(vcpu))
G
Gleb Natapov 已提交
2214 2215
		return 1;

2216
	if (kvm_lapic_reg_read(apic, reg, 4, &low))
G
Gleb Natapov 已提交
2217 2218
		return 1;
	if (reg == APIC_ICR)
2219
		kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
G
Gleb Natapov 已提交
2220 2221 2222 2223 2224

	*data = (((u64)high) << 32) | low;

	return 0;
}
2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235

int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
{
	u64 addr = data & ~KVM_MSR_ENABLED;
	if (!IS_ALIGNED(addr, 4))
		return 1;

	vcpu->arch.pv_eoi.msr_val = data;
	if (!pv_eoi_enabled(vcpu))
		return 0;
	return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
2236
					 addr, sizeof(u8));
2237
}
2238

2239 2240 2241
void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
{
	struct kvm_lapic *apic = vcpu->arch.apic;
2242
	u8 sipi_vector;
2243
	unsigned long pe;
2244

2245
	if (!lapic_in_kernel(vcpu) || !apic->pending_events)
2246 2247
		return;

2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258
	/*
	 * INITs are latched while in SMM.  Because an SMM CPU cannot
	 * be in KVM_MP_STATE_INIT_RECEIVED state, just eat SIPIs
	 * and delay processing of INIT until the next RSM.
	 */
	if (is_smm(vcpu)) {
		WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
		if (test_bit(KVM_APIC_SIPI, &apic->pending_events))
			clear_bit(KVM_APIC_SIPI, &apic->pending_events);
		return;
	}
2259

2260
	pe = xchg(&apic->pending_events, 0);
2261
	if (test_bit(KVM_APIC_INIT, &pe)) {
2262 2263
		kvm_lapic_reset(vcpu, true);
		kvm_vcpu_reset(vcpu, true);
2264 2265 2266 2267 2268
		if (kvm_vcpu_is_bsp(apic->vcpu))
			vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
		else
			vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
	}
2269
	if (test_bit(KVM_APIC_SIPI, &pe) &&
2270 2271 2272 2273
	    vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
		/* evaluate pending_events before reading the vector */
		smp_rmb();
		sipi_vector = apic->sipi_vector;
N
Nadav Amit 已提交
2274
		apic_debug("vcpu %d received sipi with vector # %x\n",
2275 2276 2277 2278 2279 2280
			 vcpu->vcpu_id, sipi_vector);
		kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
	}
}

2281 2282 2283 2284
void kvm_lapic_init(void)
{
	/* do not patch jump label more than once per second */
	jump_label_rate_limit(&apic_hw_disabled, HZ);
2285
	jump_label_rate_limit(&apic_sw_disabled, HZ);
2286
}