ioapic.c 17.1 KB
Newer Older
E
Eddie Dong 已提交
1 2
/*
 *  Copyright (C) 2001  MandrakeSoft S.A.
A
Avi Kivity 已提交
3
 *  Copyright 2010 Red Hat, Inc. and/or its affiliates.
E
Eddie Dong 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
 *
 *    MandrakeSoft S.A.
 *    43, rue d'Aboukir
 *    75002 Paris - France
 *    http://www.linux-mandrake.com/
 *    http://www.mandrakesoft.com/
 *
 *  This library is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU Lesser General Public
 *  License as published by the Free Software Foundation; either
 *  version 2 of the License, or (at your option) any later version.
 *
 *  This library is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 *  Lesser General Public License for more details.
 *
 *  You should have received a copy of the GNU Lesser General Public
 *  License along with this library; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 *
 *  Yunhong Jiang <yunhong.jiang@intel.com>
 *  Yaozu (Eddie) Dong <eddie.dong@intel.com>
 *  Based on Xen 3.1 code.
 */

30
#include <linux/kvm_host.h>
E
Eddie Dong 已提交
31 32 33 34 35 36
#include <linux/kvm.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/smp.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
37
#include <linux/slab.h>
38
#include <linux/export.h>
E
Eddie Dong 已提交
39 40 41
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/current.h>
42
#include <trace/events/kvm.h>
43 44 45

#include "ioapic.h"
#include "lapic.h"
M
Marcelo Tosatti 已提交
46
#include "irq.h"
47

48 49 50
#if 0
#define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg)
#else
E
Eddie Dong 已提交
51
#define ioapic_debug(fmt, arg...)
52
#endif
53
static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
54
		bool line_status);
E
Eddie Dong 已提交
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77

static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
					  unsigned long addr,
					  unsigned long length)
{
	unsigned long result = 0;

	switch (ioapic->ioregsel) {
	case IOAPIC_REG_VERSION:
		result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
			  | (IOAPIC_VERSION_ID & 0xff));
		break;

	case IOAPIC_REG_APIC_ID:
	case IOAPIC_REG_ARB_ID:
		result = ((ioapic->id & 0xf) << 24);
		break;

	default:
		{
			u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
			u64 redir_content;

78 79 80 81 82
			if (redir_index < IOAPIC_NUM_PINS)
				redir_content =
					ioapic->redirtbl[redir_index].bits;
			else
				redir_content = ~0ULL;
E
Eddie Dong 已提交
83 84 85 86 87 88 89 90 91 92 93

			result = (ioapic->ioregsel & 0x1) ?
			    (redir_content >> 32) & 0xffffffff :
			    redir_content & 0xffffffff;
			break;
		}
	}

	return result;
}

94 95 96 97 98 99
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
{
	ioapic->rtc_status.pending_eoi = 0;
	bitmap_zero(ioapic->rtc_status.dest_map, KVM_MAX_VCPUS);
}

100 101 102 103 104 105 106 107
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);

static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
{
	if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
		kvm_rtc_eoi_tracking_restore_all(ioapic);
}

108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
{
	bool new_val, old_val;
	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
	union kvm_ioapic_redirect_entry *e;

	e = &ioapic->redirtbl[RTC_GSI];
	if (!kvm_apic_match_dest(vcpu, NULL, 0,	e->fields.dest_id,
				e->fields.dest_mode))
		return;

	new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
	old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);

	if (new_val == old_val)
		return;

	if (new_val) {
		__set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
		ioapic->rtc_status.pending_eoi++;
	} else {
		__clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
		ioapic->rtc_status.pending_eoi--;
131
		rtc_status_pending_eoi_check_valid(ioapic);
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
	}
}

void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
{
	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;

	spin_lock(&ioapic->lock);
	__rtc_irq_eoi_tracking_restore_one(vcpu);
	spin_unlock(&ioapic->lock);
}

static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
{
	struct kvm_vcpu *vcpu;
	int i;

	if (RTC_GSI >= IOAPIC_NUM_PINS)
		return;

	rtc_irq_eoi_tracking_reset(ioapic);
	kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
	    __rtc_irq_eoi_tracking_restore_one(vcpu);
}

157 158
static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
{
159
	if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) {
160
		--ioapic->rtc_status.pending_eoi;
161 162
		rtc_status_pending_eoi_check_valid(ioapic);
	}
163 164 165 166 167 168 169 170 171 172
}

static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
{
	if (ioapic->rtc_status.pending_eoi > 0)
		return true; /* coalesced */

	return false;
}

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
		int irq_level, bool line_status)
{
	union kvm_ioapic_redirect_entry entry;
	u32 mask = 1 << irq;
	u32 old_irr;
	int edge, ret;

	entry = ioapic->redirtbl[irq];
	edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);

	if (!irq_level) {
		ioapic->irr &= ~mask;
		ret = 1;
		goto out;
	}

	/*
	 * Return 0 for coalesced interrupts; for edge-triggered interrupts,
	 * this only happens if a previous edge has not been delivered due
	 * do masking.  For level interrupts, the remote_irr field tells
	 * us if the interrupt is waiting for an EOI.
	 *
	 * RTC is special: it is edge-triggered, but userspace likes to know
	 * if it has been already ack-ed via EOI because coalesced RTC
	 * interrupts lead to time drift in Windows guests.  So we track
	 * EOI manually for the RTC interrupt.
	 */
	if (irq == RTC_GSI && line_status &&
		rtc_irq_check_coalesced(ioapic)) {
		ret = 0;
		goto out;
	}

	old_irr = ioapic->irr;
	ioapic->irr |= mask;
209 210
	if (edge)
		ioapic->irr_delivered &= ~mask;
211 212 213 214 215 216 217 218 219 220 221 222 223
	if ((edge && old_irr == ioapic->irr) ||
	    (!edge && entry.fields.remote_irr)) {
		ret = 0;
		goto out;
	}

	ret = ioapic_service(ioapic, irq, line_status);

out:
	trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
	return ret;
}

224 225 226 227 228 229 230 231 232 233 234 235
static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
{
	u32 idx;

	rtc_irq_eoi_tracking_reset(ioapic);
	for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS)
		ioapic_set_irq(ioapic, idx, 1, true);

	kvm_rtc_eoi_tracking_restore_all(ioapic);
}


236 237 238 239 240 241 242 243 244 245 246 247 248
static void update_handled_vectors(struct kvm_ioapic *ioapic)
{
	DECLARE_BITMAP(handled_vectors, 256);
	int i;

	memset(handled_vectors, 0, sizeof(handled_vectors));
	for (i = 0; i < IOAPIC_NUM_PINS; ++i)
		__set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors);
	memcpy(ioapic->handled_vectors, handled_vectors,
	       sizeof(handled_vectors));
	smp_wmb();
}

249 250
void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
			u32 *tmr)
251 252 253 254 255 256 257 258
{
	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
	union kvm_ioapic_redirect_entry *e;
	int index;

	spin_lock(&ioapic->lock);
	for (index = 0; index < IOAPIC_NUM_PINS; index++) {
		e = &ioapic->redirtbl[index];
259 260 261
		if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
		    kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
		    index == RTC_GSI) {
262
			if (kvm_apic_match_dest(vcpu, NULL, 0,
263 264 265 266 267 268 269
				e->fields.dest_id, e->fields.dest_mode)) {
				__set_bit(e->fields.vector,
					(unsigned long *)eoi_exit_bitmap);
				if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG)
					__set_bit(e->fields.vector,
						(unsigned long *)tmr);
			}
270 271 272 273 274
		}
	}
	spin_unlock(&ioapic->lock);
}

275
void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
276 277 278
{
	struct kvm_ioapic *ioapic = kvm->arch.vioapic;

279
	if (!ioapic)
280
		return;
281
	kvm_make_scan_ioapic_request(kvm);
282 283
}

E
Eddie Dong 已提交
284 285 286
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
{
	unsigned index;
287
	bool mask_before, mask_after;
288
	union kvm_ioapic_redirect_entry *e;
E
Eddie Dong 已提交
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304

	switch (ioapic->ioregsel) {
	case IOAPIC_REG_VERSION:
		/* Writes are ignored. */
		break;

	case IOAPIC_REG_APIC_ID:
		ioapic->id = (val >> 24) & 0xf;
		break;

	case IOAPIC_REG_ARB_ID:
		break;

	default:
		index = (ioapic->ioregsel - 0x10) >> 1;

305
		ioapic_debug("change redir index %x val %x\n", index, val);
E
Eddie Dong 已提交
306 307
		if (index >= IOAPIC_NUM_PINS)
			return;
308 309
		e = &ioapic->redirtbl[index];
		mask_before = e->fields.mask;
E
Eddie Dong 已提交
310
		if (ioapic->ioregsel & 1) {
311 312
			e->bits &= 0xffffffff;
			e->bits |= (u64) val << 32;
E
Eddie Dong 已提交
313
		} else {
314 315 316
			e->bits &= ~0xffffffffULL;
			e->bits |= (u32) val;
			e->fields.remote_irr = 0;
E
Eddie Dong 已提交
317
		}
318
		update_handled_vectors(ioapic);
319
		mask_after = e->fields.mask;
320
		if (mask_before != mask_after)
321
			kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
322
		if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
323
		    && ioapic->irr & (1 << index))
324
			ioapic_service(ioapic, index, false);
325
		kvm_vcpu_request_scan_ioapic(ioapic->kvm);
E
Eddie Dong 已提交
326 327 328 329
		break;
	}
}

330
static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
331
{
332 333
	union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
	struct kvm_lapic_irq irqe;
334
	int ret;
335

336 337 338
	if (entry->fields.mask)
		return -1;

339 340
	ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
		     "vector=%x trig_mode=%x\n",
341
		     entry->fields.dest_id, entry->fields.dest_mode,
342 343 344 345 346 347 348 349 350 351
		     entry->fields.delivery_mode, entry->fields.vector,
		     entry->fields.trig_mode);

	irqe.dest_id = entry->fields.dest_id;
	irqe.vector = entry->fields.vector;
	irqe.dest_mode = entry->fields.dest_mode;
	irqe.trig_mode = entry->fields.trig_mode;
	irqe.delivery_mode = entry->fields.delivery_mode << 8;
	irqe.level = 1;
	irqe.shorthand = 0;
352

353
	if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
354
		ioapic->irr_delivered |= 1 << irq;
355

356
	if (irq == RTC_GSI && line_status) {
357 358 359 360 361 362
		/*
		 * pending_eoi cannot ever become negative (see
		 * rtc_status_pending_eoi_check_valid) and the caller
		 * ensures that it is only called if it is >= zero, namely
		 * if rtc_irq_check_coalesced returns false).
		 */
363 364 365
		BUG_ON(ioapic->rtc_status.pending_eoi != 0);
		ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
				ioapic->rtc_status.dest_map);
366
		ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
367 368 369
	} else
		ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);

370 371 372
	if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
		entry->fields.remote_irr = 1;

373
	return ret;
374 375
}

376
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
377
		       int level, bool line_status)
E
Eddie Dong 已提交
378
{
379 380 381
	int ret, irq_level;

	BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
E
Eddie Dong 已提交
382

383
	spin_lock(&ioapic->lock);
384 385
	irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
					 irq_source_id, level);
386
	ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
387

388
	spin_unlock(&ioapic->lock);
G
Gleb Natapov 已提交
389

390
	return ret;
E
Eddie Dong 已提交
391 392
}

393 394 395 396 397 398 399 400 401 402
void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
{
	int i;

	spin_lock(&ioapic->lock);
	for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
		__clear_bit(irq_source_id, &ioapic->irq_states[i]);
	spin_unlock(&ioapic->lock);
}

403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
{
	int i;
	struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
						 eoi_inject.work);
	spin_lock(&ioapic->lock);
	for (i = 0; i < IOAPIC_NUM_PINS; i++) {
		union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];

		if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
			continue;

		if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
			ioapic_service(ioapic, i, false);
	}
	spin_unlock(&ioapic->lock);
}

#define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000

423 424
static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
			struct kvm_ioapic *ioapic, int vector, int trigger_mode)
E
Eddie Dong 已提交
425
{
G
Gleb Natapov 已提交
426 427 428 429
	int i;

	for (i = 0; i < IOAPIC_NUM_PINS; i++) {
		union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
E
Eddie Dong 已提交
430

G
Gleb Natapov 已提交
431 432
		if (ent->fields.vector != vector)
			continue;
E
Eddie Dong 已提交
433

434 435
		if (i == RTC_GSI)
			rtc_irq_eoi(ioapic, vcpu);
G
Gleb Natapov 已提交
436 437 438 439 440 441 442 443
		/*
		 * We are dropping lock while calling ack notifiers because ack
		 * notifier callbacks for assigned devices call into IOAPIC
		 * recursively. Since remote_irr is cleared only after call
		 * to notifiers if the same vector will be delivered while lock
		 * is dropped it will be put into irr and will be delivered
		 * after ack notifier returns.
		 */
444
		spin_unlock(&ioapic->lock);
G
Gleb Natapov 已提交
445
		kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
446
		spin_lock(&ioapic->lock);
G
Gleb Natapov 已提交
447 448 449

		if (trigger_mode != IOAPIC_LEVEL_TRIG)
			continue;
M
Marcelo Tosatti 已提交
450 451 452

		ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
		ent->fields.remote_irr = 0;
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
		if (!ent->fields.mask && (ioapic->irr & (1 << i))) {
			++ioapic->irq_eoi[i];
			if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
				/*
				 * Real hardware does not deliver the interrupt
				 * immediately during eoi broadcast, and this
				 * lets a buggy guest make slow progress
				 * even if it does not correctly handle a
				 * level-triggered interrupt.  Emulate this
				 * behavior if we detect an interrupt storm.
				 */
				schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
				ioapic->irq_eoi[i] = 0;
				trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
			} else {
				ioapic_service(ioapic, i, false);
			}
		} else {
			ioapic->irq_eoi[i] = 0;
		}
M
Marcelo Tosatti 已提交
473
	}
E
Eddie Dong 已提交
474 475
}

M
Michael S. Tsirkin 已提交
476 477 478 479 480 481 482
bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
{
	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
	smp_rmb();
	return test_bit(vector, ioapic->handled_vectors);
}

483
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
484
{
485
	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
486

487
	spin_lock(&ioapic->lock);
488
	__kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode);
489
	spin_unlock(&ioapic->lock);
490 491
}

G
Gregory Haskins 已提交
492 493 494 495 496
static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
{
	return container_of(dev, struct kvm_ioapic, dev);
}

497
static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
E
Eddie Dong 已提交
498 499 500 501 502
{
	return ((addr >= ioapic->base_address &&
		 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
}

503 504
static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
			    void *val)
E
Eddie Dong 已提交
505
{
G
Gregory Haskins 已提交
506
	struct kvm_ioapic *ioapic = to_ioapic(this);
E
Eddie Dong 已提交
507
	u32 result;
508 509
	if (!ioapic_in_range(ioapic, addr))
		return -EOPNOTSUPP;
E
Eddie Dong 已提交
510

511
	ioapic_debug("addr %lx\n", (unsigned long)addr);
E
Eddie Dong 已提交
512 513 514
	ASSERT(!(addr & 0xf));	/* check alignment */

	addr &= 0xff;
515
	spin_lock(&ioapic->lock);
E
Eddie Dong 已提交
516 517 518 519 520 521 522 523 524 525 526 527 528
	switch (addr) {
	case IOAPIC_REG_SELECT:
		result = ioapic->ioregsel;
		break;

	case IOAPIC_REG_WINDOW:
		result = ioapic_read_indirect(ioapic, addr, len);
		break;

	default:
		result = 0;
		break;
	}
529
	spin_unlock(&ioapic->lock);
G
Gleb Natapov 已提交
530

E
Eddie Dong 已提交
531 532 533 534 535 536 537 538 539 540 541 542
	switch (len) {
	case 8:
		*(u64 *) val = result;
		break;
	case 1:
	case 2:
	case 4:
		memcpy(val, (char *)&result, len);
		break;
	default:
		printk(KERN_WARNING "ioapic: wrong length %d\n", len);
	}
543
	return 0;
E
Eddie Dong 已提交
544 545
}

546 547
static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
			     const void *val)
E
Eddie Dong 已提交
548
{
G
Gregory Haskins 已提交
549
	struct kvm_ioapic *ioapic = to_ioapic(this);
E
Eddie Dong 已提交
550
	u32 data;
551 552
	if (!ioapic_in_range(ioapic, addr))
		return -EOPNOTSUPP;
E
Eddie Dong 已提交
553

554 555
	ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
		     (void*)addr, len, val);
E
Eddie Dong 已提交
556
	ASSERT(!(addr & 0xf));	/* check alignment */
557

558 559 560
	switch (len) {
	case 8:
	case 4:
E
Eddie Dong 已提交
561
		data = *(u32 *) val;
562 563 564 565 566 567 568 569
		break;
	case 2:
		data = *(u16 *) val;
		break;
	case 1:
		data = *(u8  *) val;
		break;
	default:
E
Eddie Dong 已提交
570
		printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
G
Gleb Natapov 已提交
571
		return 0;
E
Eddie Dong 已提交
572 573 574
	}

	addr &= 0xff;
575
	spin_lock(&ioapic->lock);
E
Eddie Dong 已提交
576 577
	switch (addr) {
	case IOAPIC_REG_SELECT:
578
		ioapic->ioregsel = data & 0xFF; /* 8-bit register */
E
Eddie Dong 已提交
579 580 581 582 583 584 585 586 587
		break;

	case IOAPIC_REG_WINDOW:
		ioapic_write_indirect(ioapic, data);
		break;

	default:
		break;
	}
588
	spin_unlock(&ioapic->lock);
589
	return 0;
E
Eddie Dong 已提交
590 591
}

592
static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
593 594 595
{
	int i;

596
	cancel_delayed_work_sync(&ioapic->eoi_inject);
597 598 599 600 601
	for (i = 0; i < IOAPIC_NUM_PINS; i++)
		ioapic->redirtbl[i].fields.mask = 1;
	ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
	ioapic->ioregsel = 0;
	ioapic->irr = 0;
602
	ioapic->irr_delivered = 0;
603
	ioapic->id = 0;
604
	memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
605
	rtc_irq_eoi_tracking_reset(ioapic);
606
	update_handled_vectors(ioapic);
607 608
}

G
Gregory Haskins 已提交
609 610 611 612 613
static const struct kvm_io_device_ops ioapic_mmio_ops = {
	.read     = ioapic_mmio_read,
	.write    = ioapic_mmio_write,
};

E
Eddie Dong 已提交
614 615 616
int kvm_ioapic_init(struct kvm *kvm)
{
	struct kvm_ioapic *ioapic;
617
	int ret;
E
Eddie Dong 已提交
618 619 620 621

	ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
	if (!ioapic)
		return -ENOMEM;
622
	spin_lock_init(&ioapic->lock);
623
	INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
624
	kvm->arch.vioapic = ioapic;
625
	kvm_ioapic_reset(ioapic);
G
Gregory Haskins 已提交
626
	kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
E
Eddie Dong 已提交
627
	ioapic->kvm = kvm;
628
	mutex_lock(&kvm->slots_lock);
629 630
	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
				      IOAPIC_MEM_LENGTH, &ioapic->dev);
631
	mutex_unlock(&kvm->slots_lock);
632 633
	if (ret < 0) {
		kvm->arch.vioapic = NULL;
634
		kfree(ioapic);
635
	}
636 637

	return ret;
E
Eddie Dong 已提交
638
}
639

640 641 642 643
void kvm_ioapic_destroy(struct kvm *kvm)
{
	struct kvm_ioapic *ioapic = kvm->arch.vioapic;

644
	cancel_delayed_work_sync(&ioapic->eoi_inject);
645 646 647 648 649 650 651
	if (ioapic) {
		kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
		kvm->arch.vioapic = NULL;
		kfree(ioapic);
	}
}

G
Gleb Natapov 已提交
652 653 654 655 656 657
int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
{
	struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
	if (!ioapic)
		return -EINVAL;

658
	spin_lock(&ioapic->lock);
G
Gleb Natapov 已提交
659
	memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
660
	state->irr &= ~ioapic->irr_delivered;
661
	spin_unlock(&ioapic->lock);
G
Gleb Natapov 已提交
662 663 664 665 666 667 668 669 670
	return 0;
}

int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
{
	struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
	if (!ioapic)
		return -EINVAL;

671
	spin_lock(&ioapic->lock);
G
Gleb Natapov 已提交
672
	memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
673
	ioapic->irr = 0;
674
	ioapic->irr_delivered = 0;
675
	update_handled_vectors(ioapic);
676
	kvm_vcpu_request_scan_ioapic(kvm);
677
	kvm_ioapic_inject_all(ioapic, state->irr);
678
	spin_unlock(&ioapic->lock);
G
Gleb Natapov 已提交
679 680
	return 0;
}