irq_comm.c 11.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * irq_comm.c: Common API for in kernel interrupt controller
 * Copyright (c) 2007, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 * Authors:
 *   Yaozu (Eddie) Dong <Eddie.dong@intel.com>
 *
 */

#include <linux/kvm_host.h>
S
Sheng Yang 已提交
23 24 25

#include <asm/msidef.h>

26 27 28 29
#include "irq.h"

#include "ioapic.h"

30 31
static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
			   struct kvm *kvm, int level)
32 33
{
#ifdef CONFIG_X86
34 35 36
	return kvm_pic_set_irq(pic_irqchip(kvm), e->irqchip.pin, level);
#else
	return -1;
37 38 39
#endif
}

40 41
static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
			      struct kvm *kvm, int level)
42
{
43
	return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
44 45
}

46 47 48 49
void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
				   union kvm_ioapic_redirect_entry *entry,
				   unsigned long *deliver_bitmask)
{
50 51
	int i;
	struct kvm *kvm = ioapic->kvm;
52 53
	struct kvm_vcpu *vcpu;

54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
	bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);

	if (entry->fields.dest_mode == 0) {	/* Physical mode. */
		if (entry->fields.dest_id == 0xFF) {	/* Broadcast. */
			for (i = 0; i < KVM_MAX_VCPUS; ++i)
				if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic)
					__set_bit(i, deliver_bitmask);
			/* Lowest priority shouldn't combine with broadcast */
			if (entry->fields.delivery_mode ==
			    IOAPIC_LOWEST_PRIORITY && printk_ratelimit())
				printk(KERN_INFO "kvm: apic: phys broadcast "
						  "and lowest prio\n");
			return;
		}
		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
			vcpu = kvm->vcpus[i];
			if (!vcpu)
				continue;
			if (kvm_apic_match_physical_addr(vcpu->arch.apic,
					entry->fields.dest_id)) {
				if (vcpu->arch.apic)
					__set_bit(i, deliver_bitmask);
				break;
			}
		}
	} else if (entry->fields.dest_id != 0) /* Logical mode, MDA non-zero. */
		for (i = 0; i < KVM_MAX_VCPUS; ++i) {
			vcpu = kvm->vcpus[i];
			if (!vcpu)
				continue;
			if (vcpu->arch.apic &&
			    kvm_apic_match_logical_addr(vcpu->arch.apic,
					entry->fields.dest_id))
				__set_bit(i, deliver_bitmask);
		}

90 91
	switch (entry->fields.delivery_mode) {
	case IOAPIC_LOWEST_PRIORITY:
92
		/* Select one in deliver_bitmask */
93
		vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm,
94
				entry->fields.vector, deliver_bitmask);
95 96 97
		bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
		if (!vcpu)
			return;
S
Sheng Yang 已提交
98
		__set_bit(vcpu->vcpu_id, deliver_bitmask);
99 100 101 102 103 104 105 106
		break;
	case IOAPIC_FIXED:
	case IOAPIC_NMI:
		break;
	default:
		if (printk_ratelimit())
			printk(KERN_INFO "kvm: unsupported delivery mode %d\n",
				entry->fields.delivery_mode);
107
		bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
108 109 110
	}
}

111 112
static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
		       struct kvm *kvm, int level)
S
Sheng Yang 已提交
113
{
114
	int vcpu_id, r = -1;
S
Sheng Yang 已提交
115 116
	struct kvm_vcpu *vcpu;
	struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
117
	union kvm_ioapic_redirect_entry entry;
S
Sheng Yang 已提交
118
	DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
S
Sheng Yang 已提交
119 120 121

	BUG_ON(!ioapic);

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
	entry.bits = 0;
	entry.fields.dest_id = (e->msi.address_lo &
			MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
	entry.fields.vector = (e->msi.data &
			MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
	entry.fields.dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
			(unsigned long *)&e->msi.address_lo);
	entry.fields.trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
			(unsigned long *)&e->msi.data);
	entry.fields.delivery_mode = test_bit(
			MSI_DATA_DELIVERY_MODE_SHIFT,
			(unsigned long *)&e->msi.data);

	/* TODO Deal with RH bit of MSI message address */

S
Sheng Yang 已提交
137
	kvm_get_intr_delivery_bitmask(ioapic, &entry, deliver_bitmask);
138

S
Sheng Yang 已提交
139
	if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) {
140 141 142
		printk(KERN_WARNING "kvm: no destination for MSI delivery!");
		return -1;
	}
S
Sheng Yang 已提交
143 144 145
	while ((vcpu_id = find_first_bit(deliver_bitmask,
					KVM_MAX_VCPUS)) < KVM_MAX_VCPUS) {
		__clear_bit(vcpu_id, deliver_bitmask);
146 147 148 149 150
		vcpu = ioapic->kvm->vcpus[vcpu_id];
		if (vcpu) {
			if (r < 0)
				r = 0;
			r += kvm_apic_set_irq(vcpu, entry.fields.vector,
151
					      entry.fields.dest_mode,
152
					      entry.fields.trig_mode);
S
Sheng Yang 已提交
153 154
		}
	}
155
	return r;
S
Sheng Yang 已提交
156 157
}

158 159 160 161 162 163 164
/* This should be called with the kvm->lock mutex held
 * Return value:
 *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
 *  = 0   Interrupt was coalesced (previous irq is still pending)
 *  > 0   Number of CPUs interrupt was delivered to
 */
int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
165
{
166
	struct kvm_kernel_irq_routing_entry *e;
S
Sheng Yang 已提交
167
	unsigned long *irq_state, sig_level;
168
	int ret = -1;
S
Sheng Yang 已提交
169 170 171

	if (irq < KVM_IOAPIC_NUM_PINS) {
		irq_state = (unsigned long *)&kvm->arch.irq_states[irq];
172

S
Sheng Yang 已提交
173 174 175 176 177 178 179 180
		/* Logical OR for level trig interrupt */
		if (level)
			set_bit(irq_source_id, irq_state);
		else
			clear_bit(irq_source_id, irq_state);
		sig_level = !!(*irq_state);
	} else /* Deal with MSI/MSI-X */
		sig_level = 1;
181

182 183 184 185
	/* Not possible to detect if the guest uses the PIC or the
	 * IOAPIC.  So set the bit in both. The guest will ignore
	 * writes to the unused one.
	 */
186
	list_for_each_entry(e, &kvm->irq_routing, link)
187 188 189 190 191 192 193 194
		if (e->gsi == irq) {
			int r = e->set(e, kvm, sig_level);
			if (r < 0)
				continue;

			ret = r + ((ret < 0) ? 0 : ret);
		}
	return ret;
195 196
}

197
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
198
{
199
	struct kvm_kernel_irq_routing_entry *e;
200 201
	struct kvm_irq_ack_notifier *kian;
	struct hlist_node *n;
202 203 204 205 206 207 208 209
	unsigned gsi = pin;

	list_for_each_entry(e, &kvm->irq_routing, link)
		if (e->irqchip.irqchip == irqchip &&
		    e->irqchip.pin == pin) {
			gsi = e->gsi;
			break;
		}
210 211 212 213 214 215 216 217 218 219 220 221

	hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, link)
		if (kian->gsi == gsi)
			kian->irq_acked(kian);
}

void kvm_register_irq_ack_notifier(struct kvm *kvm,
				   struct kvm_irq_ack_notifier *kian)
{
	hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list);
}

222
void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian)
223
{
224
	hlist_del_init(&kian->link);
225
}
226 227 228 229 230 231 232

/* The caller must hold kvm->lock mutex */
int kvm_request_irq_source_id(struct kvm *kvm)
{
	unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
	int irq_source_id = find_first_zero_bit(bitmap,
				sizeof(kvm->arch.irq_sources_bitmap));
233

234 235
	if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
		printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
236 237 238 239 240 241
		return -EFAULT;
	}

	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
	set_bit(irq_source_id, bitmap);

242 243 244 245 246 247 248
	return irq_source_id;
}

void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
{
	int i;

249 250 251
	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);

	if (irq_source_id < 0 ||
252 253 254 255 256 257 258 259
	    irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
		printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
		return;
	}
	for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
		clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
	clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
}
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283

void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
				    struct kvm_irq_mask_notifier *kimn)
{
	kimn->irq = irq;
	hlist_add_head(&kimn->link, &kvm->mask_notifier_list);
}

void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
				      struct kvm_irq_mask_notifier *kimn)
{
	hlist_del(&kimn->link);
}

void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
{
	struct kvm_irq_mask_notifier *kimn;
	struct hlist_node *n;

	hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link)
		if (kimn->irq == irq)
			kimn->func(kimn, mask);
}

284 285 286 287 288 289 290 291 292 293 294 295 296
static void __kvm_free_irq_routing(struct list_head *irq_routing)
{
	struct kvm_kernel_irq_routing_entry *e, *n;

	list_for_each_entry_safe(e, n, irq_routing, link)
		kfree(e);
}

void kvm_free_irq_routing(struct kvm *kvm)
{
	__kvm_free_irq_routing(&kvm->irq_routing);
}

297 298
static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e,
			       const struct kvm_irq_routing_entry *ue)
299 300 301 302 303 304 305 306 307 308 309 310 311
{
	int r = -EINVAL;
	int delta;

	e->gsi = ue->gsi;
	switch (ue->type) {
	case KVM_IRQ_ROUTING_IRQCHIP:
		delta = 0;
		switch (ue->u.irqchip.irqchip) {
		case KVM_IRQCHIP_PIC_MASTER:
			e->set = kvm_set_pic_irq;
			break;
		case KVM_IRQCHIP_PIC_SLAVE:
312
			e->set = kvm_set_pic_irq;
313 314 315 316 317 318 319 320 321 322 323
			delta = 8;
			break;
		case KVM_IRQCHIP_IOAPIC:
				e->set = kvm_set_ioapic_irq;
			break;
		default:
			goto out;
		}
		e->irqchip.irqchip = ue->u.irqchip.irqchip;
		e->irqchip.pin = ue->u.irqchip.pin + delta;
		break;
S
Sheng Yang 已提交
324 325 326 327 328 329
	case KVM_IRQ_ROUTING_MSI:
		e->set = kvm_set_msi;
		e->msi.address_lo = ue->u.msi.address_lo;
		e->msi.address_hi = ue->u.msi.address_hi;
		e->msi.data = ue->u.msi.data;
		break;
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
	default:
		goto out;
	}
	r = 0;
out:
	return r;
}


int kvm_set_irq_routing(struct kvm *kvm,
			const struct kvm_irq_routing_entry *ue,
			unsigned nr,
			unsigned flags)
{
	struct list_head irq_list = LIST_HEAD_INIT(irq_list);
	struct list_head tmp = LIST_HEAD_INIT(tmp);
	struct kvm_kernel_irq_routing_entry *e = NULL;
	unsigned i;
	int r;

	for (i = 0; i < nr; ++i) {
		r = -EINVAL;
		if (ue->gsi >= KVM_MAX_IRQ_ROUTES)
			goto out;
		if (ue->flags)
			goto out;
		r = -ENOMEM;
		e = kzalloc(sizeof(*e), GFP_KERNEL);
		if (!e)
			goto out;
		r = setup_routing_entry(e, ue);
		if (r)
			goto out;
		++ue;
		list_add(&e->link, &irq_list);
		e = NULL;
	}

	mutex_lock(&kvm->lock);
	list_splice(&kvm->irq_routing, &tmp);
	INIT_LIST_HEAD(&kvm->irq_routing);
	list_splice(&irq_list, &kvm->irq_routing);
	INIT_LIST_HEAD(&irq_list);
	list_splice(&tmp, &irq_list);
	mutex_unlock(&kvm->lock);

	r = 0;

out:
	kfree(e);
	__kvm_free_irq_routing(&irq_list);
	return r;
}

#define IOAPIC_ROUTING_ENTRY(irq) \
	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
	  .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) }
#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)

#ifdef CONFIG_X86
#  define PIC_ROUTING_ENTRY(irq) \
	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
	  .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 }
#  define ROUTING_ENTRY2(irq) \
	IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
#else
#  define ROUTING_ENTRY2(irq) \
	IOAPIC_ROUTING_ENTRY(irq)
#endif

static const struct kvm_irq_routing_entry default_routing[] = {
	ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
	ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
	ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
	ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
	ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
	ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
	ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
	ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
	ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
	ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
	ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
	ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
#ifdef CONFIG_IA64
	ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
	ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
	ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
	ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
	ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
	ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
	ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
	ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
	ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
	ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
	ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
	ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
#endif
};

int kvm_setup_default_irq_routing(struct kvm *kvm)
{
	return kvm_set_irq_routing(kvm, default_routing,
				   ARRAY_SIZE(default_routing), 0);
}