ioapic.c 16.9 KB
Newer Older
E
Eddie Dong 已提交
1 2
/*
 *  Copyright (C) 2001  MandrakeSoft S.A.
A
Avi Kivity 已提交
3
 *  Copyright 2010 Red Hat, Inc. and/or its affiliates.
E
Eddie Dong 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
 *
 *    MandrakeSoft S.A.
 *    43, rue d'Aboukir
 *    75002 Paris - France
 *    http://www.linux-mandrake.com/
 *    http://www.mandrakesoft.com/
 *
 *  This library is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU Lesser General Public
 *  License as published by the Free Software Foundation; either
 *  version 2 of the License, or (at your option) any later version.
 *
 *  This library is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 *  Lesser General Public License for more details.
 *
 *  You should have received a copy of the GNU Lesser General Public
 *  License along with this library; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 *
 *  Yunhong Jiang <yunhong.jiang@intel.com>
 *  Yaozu (Eddie) Dong <eddie.dong@intel.com>
 *  Based on Xen 3.1 code.
 */

30
#include <linux/kvm_host.h>
E
Eddie Dong 已提交
31 32 33 34 35 36
#include <linux/kvm.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/smp.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
37
#include <linux/slab.h>
38
#include <linux/export.h>
E
Eddie Dong 已提交
39 40 41
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/current.h>
42
#include <trace/events/kvm.h>
43 44 45

#include "ioapic.h"
#include "lapic.h"
M
Marcelo Tosatti 已提交
46
#include "irq.h"
47

48 49 50
#if 0
#define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg)
#else
E
Eddie Dong 已提交
51
#define ioapic_debug(fmt, arg...)
52
#endif
53
static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
54
		bool line_status);
E
Eddie Dong 已提交
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77

static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
					  unsigned long addr,
					  unsigned long length)
{
	unsigned long result = 0;

	switch (ioapic->ioregsel) {
	case IOAPIC_REG_VERSION:
		result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
			  | (IOAPIC_VERSION_ID & 0xff));
		break;

	case IOAPIC_REG_APIC_ID:
	case IOAPIC_REG_ARB_ID:
		result = ((ioapic->id & 0xf) << 24);
		break;

	default:
		{
			u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
			u64 redir_content;

78 79 80 81 82
			if (redir_index < IOAPIC_NUM_PINS)
				redir_content =
					ioapic->redirtbl[redir_index].bits;
			else
				redir_content = ~0ULL;
E
Eddie Dong 已提交
83 84 85 86 87 88 89 90 91 92 93

			result = (ioapic->ioregsel & 0x1) ?
			    (redir_content >> 32) & 0xffffffff :
			    redir_content & 0xffffffff;
			break;
		}
	}

	return result;
}

94 95 96 97 98 99
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
{
	ioapic->rtc_status.pending_eoi = 0;
	bitmap_zero(ioapic->rtc_status.dest_map, KVM_MAX_VCPUS);
}

100 101 102 103 104 105 106 107
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);

static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
{
	if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
		kvm_rtc_eoi_tracking_restore_all(ioapic);
}

108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
{
	bool new_val, old_val;
	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
	union kvm_ioapic_redirect_entry *e;

	e = &ioapic->redirtbl[RTC_GSI];
	if (!kvm_apic_match_dest(vcpu, NULL, 0,	e->fields.dest_id,
				e->fields.dest_mode))
		return;

	new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
	old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);

	if (new_val == old_val)
		return;

	if (new_val) {
		__set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
		ioapic->rtc_status.pending_eoi++;
	} else {
		__clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
		ioapic->rtc_status.pending_eoi--;
131
		rtc_status_pending_eoi_check_valid(ioapic);
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
	}
}

void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
{
	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;

	spin_lock(&ioapic->lock);
	__rtc_irq_eoi_tracking_restore_one(vcpu);
	spin_unlock(&ioapic->lock);
}

static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
{
	struct kvm_vcpu *vcpu;
	int i;

	if (RTC_GSI >= IOAPIC_NUM_PINS)
		return;

	rtc_irq_eoi_tracking_reset(ioapic);
	kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
	    __rtc_irq_eoi_tracking_restore_one(vcpu);
}

157 158
static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
{
159
	if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) {
160
		--ioapic->rtc_status.pending_eoi;
161 162
		rtc_status_pending_eoi_check_valid(ioapic);
	}
163 164 165 166 167 168 169 170 171 172
}

static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
{
	if (ioapic->rtc_status.pending_eoi > 0)
		return true; /* coalesced */

	return false;
}

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
		int irq_level, bool line_status)
{
	union kvm_ioapic_redirect_entry entry;
	u32 mask = 1 << irq;
	u32 old_irr;
	int edge, ret;

	entry = ioapic->redirtbl[irq];
	edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);

	if (!irq_level) {
		ioapic->irr &= ~mask;
		ret = 1;
		goto out;
	}

	/*
	 * Return 0 for coalesced interrupts; for edge-triggered interrupts,
	 * this only happens if a previous edge has not been delivered due
	 * do masking.  For level interrupts, the remote_irr field tells
	 * us if the interrupt is waiting for an EOI.
	 *
	 * RTC is special: it is edge-triggered, but userspace likes to know
	 * if it has been already ack-ed via EOI because coalesced RTC
	 * interrupts lead to time drift in Windows guests.  So we track
	 * EOI manually for the RTC interrupt.
	 */
	if (irq == RTC_GSI && line_status &&
		rtc_irq_check_coalesced(ioapic)) {
		ret = 0;
		goto out;
	}

	old_irr = ioapic->irr;
	ioapic->irr |= mask;
209 210
	if (edge)
		ioapic->irr_delivered &= ~mask;
211 212 213 214 215 216 217 218 219 220 221 222 223
	if ((edge && old_irr == ioapic->irr) ||
	    (!edge && entry.fields.remote_irr)) {
		ret = 0;
		goto out;
	}

	ret = ioapic_service(ioapic, irq, line_status);

out:
	trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
	return ret;
}

224 225 226 227 228 229 230 231 232 233 234 235
static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
{
	u32 idx;

	rtc_irq_eoi_tracking_reset(ioapic);
	for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS)
		ioapic_set_irq(ioapic, idx, 1, true);

	kvm_rtc_eoi_tracking_restore_all(ioapic);
}


236 237 238 239 240 241 242 243 244 245 246 247 248
static void update_handled_vectors(struct kvm_ioapic *ioapic)
{
	DECLARE_BITMAP(handled_vectors, 256);
	int i;

	memset(handled_vectors, 0, sizeof(handled_vectors));
	for (i = 0; i < IOAPIC_NUM_PINS; ++i)
		__set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors);
	memcpy(ioapic->handled_vectors, handled_vectors,
	       sizeof(handled_vectors));
	smp_wmb();
}

249
void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
250 251 252 253 254 255 256 257
{
	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
	union kvm_ioapic_redirect_entry *e;
	int index;

	spin_lock(&ioapic->lock);
	for (index = 0; index < IOAPIC_NUM_PINS; index++) {
		e = &ioapic->redirtbl[index];
258 259 260
		if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
		    kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
		    index == RTC_GSI) {
261
			if (kvm_apic_match_dest(vcpu, NULL, 0,
262
				e->fields.dest_id, e->fields.dest_mode))
263 264
				__set_bit(e->fields.vector,
					(unsigned long *)eoi_exit_bitmap);
265 266 267 268 269
		}
	}
	spin_unlock(&ioapic->lock);
}

270
void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
271 272 273
{
	struct kvm_ioapic *ioapic = kvm->arch.vioapic;

274
	if (!ioapic)
275
		return;
276
	kvm_make_scan_ioapic_request(kvm);
277 278
}

E
Eddie Dong 已提交
279 280 281
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
{
	unsigned index;
282
	bool mask_before, mask_after;
283
	union kvm_ioapic_redirect_entry *e;
E
Eddie Dong 已提交
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299

	switch (ioapic->ioregsel) {
	case IOAPIC_REG_VERSION:
		/* Writes are ignored. */
		break;

	case IOAPIC_REG_APIC_ID:
		ioapic->id = (val >> 24) & 0xf;
		break;

	case IOAPIC_REG_ARB_ID:
		break;

	default:
		index = (ioapic->ioregsel - 0x10) >> 1;

300
		ioapic_debug("change redir index %x val %x\n", index, val);
E
Eddie Dong 已提交
301 302
		if (index >= IOAPIC_NUM_PINS)
			return;
303 304
		e = &ioapic->redirtbl[index];
		mask_before = e->fields.mask;
E
Eddie Dong 已提交
305
		if (ioapic->ioregsel & 1) {
306 307
			e->bits &= 0xffffffff;
			e->bits |= (u64) val << 32;
E
Eddie Dong 已提交
308
		} else {
309 310 311
			e->bits &= ~0xffffffffULL;
			e->bits |= (u32) val;
			e->fields.remote_irr = 0;
E
Eddie Dong 已提交
312
		}
313
		update_handled_vectors(ioapic);
314
		mask_after = e->fields.mask;
315
		if (mask_before != mask_after)
316
			kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
317
		if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
318
		    && ioapic->irr & (1 << index))
319
			ioapic_service(ioapic, index, false);
320
		kvm_vcpu_request_scan_ioapic(ioapic->kvm);
E
Eddie Dong 已提交
321 322 323 324
		break;
	}
}

325
static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
326
{
327 328
	union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
	struct kvm_lapic_irq irqe;
329
	int ret;
330

331 332 333
	if (entry->fields.mask)
		return -1;

334 335
	ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
		     "vector=%x trig_mode=%x\n",
336
		     entry->fields.dest_id, entry->fields.dest_mode,
337 338 339 340 341 342 343 344 345 346
		     entry->fields.delivery_mode, entry->fields.vector,
		     entry->fields.trig_mode);

	irqe.dest_id = entry->fields.dest_id;
	irqe.vector = entry->fields.vector;
	irqe.dest_mode = entry->fields.dest_mode;
	irqe.trig_mode = entry->fields.trig_mode;
	irqe.delivery_mode = entry->fields.delivery_mode << 8;
	irqe.level = 1;
	irqe.shorthand = 0;
347
	irqe.msi_redir_hint = false;
348

349
	if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
350
		ioapic->irr_delivered |= 1 << irq;
351

352
	if (irq == RTC_GSI && line_status) {
353 354 355 356 357 358
		/*
		 * pending_eoi cannot ever become negative (see
		 * rtc_status_pending_eoi_check_valid) and the caller
		 * ensures that it is only called if it is >= zero, namely
		 * if rtc_irq_check_coalesced returns false).
		 */
359 360 361
		BUG_ON(ioapic->rtc_status.pending_eoi != 0);
		ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
				ioapic->rtc_status.dest_map);
362
		ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
363 364 365
	} else
		ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);

366 367 368
	if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
		entry->fields.remote_irr = 1;

369
	return ret;
370 371
}

372
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
373
		       int level, bool line_status)
E
Eddie Dong 已提交
374
{
375 376 377
	int ret, irq_level;

	BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
E
Eddie Dong 已提交
378

379
	spin_lock(&ioapic->lock);
380 381
	irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
					 irq_source_id, level);
382
	ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
383

384
	spin_unlock(&ioapic->lock);
G
Gleb Natapov 已提交
385

386
	return ret;
E
Eddie Dong 已提交
387 388
}

389 390 391 392 393 394 395 396 397 398
void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
{
	int i;

	spin_lock(&ioapic->lock);
	for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
		__clear_bit(irq_source_id, &ioapic->irq_states[i]);
	spin_unlock(&ioapic->lock);
}

399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
{
	int i;
	struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
						 eoi_inject.work);
	spin_lock(&ioapic->lock);
	for (i = 0; i < IOAPIC_NUM_PINS; i++) {
		union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];

		if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
			continue;

		if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
			ioapic_service(ioapic, i, false);
	}
	spin_unlock(&ioapic->lock);
}

#define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000

419 420
static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
			struct kvm_ioapic *ioapic, int vector, int trigger_mode)
E
Eddie Dong 已提交
421
{
G
Gleb Natapov 已提交
422
	int i;
423
	struct kvm_lapic *apic = vcpu->arch.apic;
G
Gleb Natapov 已提交
424 425 426

	for (i = 0; i < IOAPIC_NUM_PINS; i++) {
		union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
E
Eddie Dong 已提交
427

G
Gleb Natapov 已提交
428 429
		if (ent->fields.vector != vector)
			continue;
E
Eddie Dong 已提交
430

431 432
		if (i == RTC_GSI)
			rtc_irq_eoi(ioapic, vcpu);
G
Gleb Natapov 已提交
433 434 435 436 437 438 439 440
		/*
		 * We are dropping lock while calling ack notifiers because ack
		 * notifier callbacks for assigned devices call into IOAPIC
		 * recursively. Since remote_irr is cleared only after call
		 * to notifiers if the same vector will be delivered while lock
		 * is dropped it will be put into irr and will be delivered
		 * after ack notifier returns.
		 */
441
		spin_unlock(&ioapic->lock);
G
Gleb Natapov 已提交
442
		kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
443
		spin_lock(&ioapic->lock);
G
Gleb Natapov 已提交
444

445 446
		if (trigger_mode != IOAPIC_LEVEL_TRIG ||
		    kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
G
Gleb Natapov 已提交
447
			continue;
M
Marcelo Tosatti 已提交
448 449 450

		ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
		ent->fields.remote_irr = 0;
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
		if (!ent->fields.mask && (ioapic->irr & (1 << i))) {
			++ioapic->irq_eoi[i];
			if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
				/*
				 * Real hardware does not deliver the interrupt
				 * immediately during eoi broadcast, and this
				 * lets a buggy guest make slow progress
				 * even if it does not correctly handle a
				 * level-triggered interrupt.  Emulate this
				 * behavior if we detect an interrupt storm.
				 */
				schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
				ioapic->irq_eoi[i] = 0;
				trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
			} else {
				ioapic_service(ioapic, i, false);
			}
		} else {
			ioapic->irq_eoi[i] = 0;
		}
M
Marcelo Tosatti 已提交
471
	}
E
Eddie Dong 已提交
472 473
}

474
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
475
{
476
	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
477

478
	spin_lock(&ioapic->lock);
479
	__kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode);
480
	spin_unlock(&ioapic->lock);
481 482
}

G
Gregory Haskins 已提交
483 484 485 486 487
static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
{
	return container_of(dev, struct kvm_ioapic, dev);
}

488
static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
E
Eddie Dong 已提交
489 490 491 492 493
{
	return ((addr >= ioapic->base_address &&
		 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
}

494 495
static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
				gpa_t addr, int len, void *val)
E
Eddie Dong 已提交
496
{
G
Gregory Haskins 已提交
497
	struct kvm_ioapic *ioapic = to_ioapic(this);
E
Eddie Dong 已提交
498
	u32 result;
499 500
	if (!ioapic_in_range(ioapic, addr))
		return -EOPNOTSUPP;
E
Eddie Dong 已提交
501

502
	ioapic_debug("addr %lx\n", (unsigned long)addr);
E
Eddie Dong 已提交
503 504 505
	ASSERT(!(addr & 0xf));	/* check alignment */

	addr &= 0xff;
506
	spin_lock(&ioapic->lock);
E
Eddie Dong 已提交
507 508 509 510 511 512 513 514 515 516 517 518 519
	switch (addr) {
	case IOAPIC_REG_SELECT:
		result = ioapic->ioregsel;
		break;

	case IOAPIC_REG_WINDOW:
		result = ioapic_read_indirect(ioapic, addr, len);
		break;

	default:
		result = 0;
		break;
	}
520
	spin_unlock(&ioapic->lock);
G
Gleb Natapov 已提交
521

E
Eddie Dong 已提交
522 523 524 525 526 527 528 529 530 531 532 533
	switch (len) {
	case 8:
		*(u64 *) val = result;
		break;
	case 1:
	case 2:
	case 4:
		memcpy(val, (char *)&result, len);
		break;
	default:
		printk(KERN_WARNING "ioapic: wrong length %d\n", len);
	}
534
	return 0;
E
Eddie Dong 已提交
535 536
}

537 538
static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
				 gpa_t addr, int len, const void *val)
E
Eddie Dong 已提交
539
{
G
Gregory Haskins 已提交
540
	struct kvm_ioapic *ioapic = to_ioapic(this);
E
Eddie Dong 已提交
541
	u32 data;
542 543
	if (!ioapic_in_range(ioapic, addr))
		return -EOPNOTSUPP;
E
Eddie Dong 已提交
544

545 546
	ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
		     (void*)addr, len, val);
E
Eddie Dong 已提交
547
	ASSERT(!(addr & 0xf));	/* check alignment */
548

549 550 551
	switch (len) {
	case 8:
	case 4:
E
Eddie Dong 已提交
552
		data = *(u32 *) val;
553 554 555 556 557 558 559 560
		break;
	case 2:
		data = *(u16 *) val;
		break;
	case 1:
		data = *(u8  *) val;
		break;
	default:
E
Eddie Dong 已提交
561
		printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
G
Gleb Natapov 已提交
562
		return 0;
E
Eddie Dong 已提交
563 564 565
	}

	addr &= 0xff;
566
	spin_lock(&ioapic->lock);
E
Eddie Dong 已提交
567 568
	switch (addr) {
	case IOAPIC_REG_SELECT:
569
		ioapic->ioregsel = data & 0xFF; /* 8-bit register */
E
Eddie Dong 已提交
570 571 572 573 574 575 576 577 578
		break;

	case IOAPIC_REG_WINDOW:
		ioapic_write_indirect(ioapic, data);
		break;

	default:
		break;
	}
579
	spin_unlock(&ioapic->lock);
580
	return 0;
E
Eddie Dong 已提交
581 582
}

583
static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
584 585 586
{
	int i;

587
	cancel_delayed_work_sync(&ioapic->eoi_inject);
588 589 590 591 592
	for (i = 0; i < IOAPIC_NUM_PINS; i++)
		ioapic->redirtbl[i].fields.mask = 1;
	ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
	ioapic->ioregsel = 0;
	ioapic->irr = 0;
593
	ioapic->irr_delivered = 0;
594
	ioapic->id = 0;
595
	memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
596
	rtc_irq_eoi_tracking_reset(ioapic);
597
	update_handled_vectors(ioapic);
598 599
}

G
Gregory Haskins 已提交
600 601 602 603 604
static const struct kvm_io_device_ops ioapic_mmio_ops = {
	.read     = ioapic_mmio_read,
	.write    = ioapic_mmio_write,
};

E
Eddie Dong 已提交
605 606 607
int kvm_ioapic_init(struct kvm *kvm)
{
	struct kvm_ioapic *ioapic;
608
	int ret;
E
Eddie Dong 已提交
609 610 611 612

	ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
	if (!ioapic)
		return -ENOMEM;
613
	spin_lock_init(&ioapic->lock);
614
	INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
615
	kvm->arch.vioapic = ioapic;
616
	kvm_ioapic_reset(ioapic);
G
Gregory Haskins 已提交
617
	kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
E
Eddie Dong 已提交
618
	ioapic->kvm = kvm;
619
	mutex_lock(&kvm->slots_lock);
620 621
	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
				      IOAPIC_MEM_LENGTH, &ioapic->dev);
622
	mutex_unlock(&kvm->slots_lock);
623 624
	if (ret < 0) {
		kvm->arch.vioapic = NULL;
625
		kfree(ioapic);
626
	}
627 628

	return ret;
E
Eddie Dong 已提交
629
}
630

631 632 633 634
void kvm_ioapic_destroy(struct kvm *kvm)
{
	struct kvm_ioapic *ioapic = kvm->arch.vioapic;

635
	cancel_delayed_work_sync(&ioapic->eoi_inject);
J
Julia Lawall 已提交
636 637 638
	kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
	kvm->arch.vioapic = NULL;
	kfree(ioapic);
639 640
}

G
Gleb Natapov 已提交
641 642 643 644 645 646
int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
{
	struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
	if (!ioapic)
		return -EINVAL;

647
	spin_lock(&ioapic->lock);
G
Gleb Natapov 已提交
648
	memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
649
	state->irr &= ~ioapic->irr_delivered;
650
	spin_unlock(&ioapic->lock);
G
Gleb Natapov 已提交
651 652 653 654 655 656 657 658 659
	return 0;
}

int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
{
	struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
	if (!ioapic)
		return -EINVAL;

660
	spin_lock(&ioapic->lock);
G
Gleb Natapov 已提交
661
	memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
662
	ioapic->irr = 0;
663
	ioapic->irr_delivered = 0;
664
	update_handled_vectors(ioapic);
665
	kvm_vcpu_request_scan_ioapic(kvm);
666
	kvm_ioapic_inject_all(ioapic, state->irr);
667
	spin_unlock(&ioapic->lock);
G
Gleb Natapov 已提交
668 669
	return 0;
}