irq_comm.c 10.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * irq_comm.c: Common API for in kernel interrupt controller
 * Copyright (c) 2007, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 * Authors:
 *   Yaozu (Eddie) Dong <Eddie.dong@intel.com>
 *
 */

#include <linux/kvm_host.h>
S
Sheng Yang 已提交
23 24

#include <asm/msidef.h>
25 26 27
#ifdef CONFIG_IA64
#include <asm/iosapic.h>
#endif
S
Sheng Yang 已提交
28

29 30 31 32
#include "irq.h"

#include "ioapic.h"

33 34
static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
			   struct kvm *kvm, int level)
35 36
{
#ifdef CONFIG_X86
37 38 39
	return kvm_pic_set_irq(pic_irqchip(kvm), e->irqchip.pin, level);
#else
	return -1;
40 41 42
#endif
}

43 44
static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
			      struct kvm *kvm, int level)
45
{
46
	return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
47 48
}

49
inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
50
{
51 52 53 54 55 56 57
#ifdef CONFIG_IA64
	return irq->delivery_mode ==
		(IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
#else
	return irq->delivery_mode == APIC_DM_LOWEST;
#endif
}
58

59 60 61 62 63 64
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
		struct kvm_lapic_irq *irq)
{
	int i, r = -1;
	struct kvm_vcpu *vcpu, *lowest = NULL;

65 66
	WARN_ON(!mutex_is_locked(&kvm->irq_lock));

67 68
	if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
			kvm_is_dm_lowest_prio(irq))
69 70 71 72
		printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");

	for (i = 0; i < KVM_MAX_VCPUS; i++) {
		vcpu = kvm->vcpus[i];
73

74 75 76
		if (!vcpu || !kvm_apic_present(vcpu))
			continue;

77 78
		if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
					irq->dest_id, irq->dest_mode))
79 80
			continue;

81 82 83 84
		if (!kvm_is_dm_lowest_prio(irq)) {
			if (r < 0)
				r = 0;
			r += kvm_apic_set_irq(vcpu, irq);
85
		} else {
86 87 88 89
			if (!lowest)
				lowest = vcpu;
			else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
				lowest = vcpu;
90
		}
91 92
	}

93 94 95 96
	if (lowest)
		r = kvm_apic_set_irq(lowest, irq);

	return r;
97 98
}

99 100
static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
		       struct kvm *kvm, int level)
S
Sheng Yang 已提交
101
{
102
	struct kvm_lapic_irq irq;
S
Sheng Yang 已提交
103

104
	irq.dest_id = (e->msi.address_lo &
105
			MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
106
	irq.vector = (e->msi.data &
107
			MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
108 109 110 111 112
	irq.dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
	irq.trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
	irq.delivery_mode = e->msi.data & 0x700;
	irq.level = 1;
	irq.shorthand = 0;
113 114

	/* TODO Deal with RH bit of MSI message address */
115
	return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
S
Sheng Yang 已提交
116 117
}

118
/* This should be called with the kvm->irq_lock mutex held
119 120 121 122 123 124
 * Return value:
 *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
 *  = 0   Interrupt was coalesced (previous irq is still pending)
 *  > 0   Number of CPUs interrupt was delivered to
 */
int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
125
{
126
	struct kvm_kernel_irq_routing_entry *e;
S
Sheng Yang 已提交
127
	unsigned long *irq_state, sig_level;
128
	int ret = -1;
S
Sheng Yang 已提交
129

130 131
	WARN_ON(!mutex_is_locked(&kvm->irq_lock));

S
Sheng Yang 已提交
132 133
	if (irq < KVM_IOAPIC_NUM_PINS) {
		irq_state = (unsigned long *)&kvm->arch.irq_states[irq];
134

S
Sheng Yang 已提交
135 136 137 138 139 140 141 142
		/* Logical OR for level trig interrupt */
		if (level)
			set_bit(irq_source_id, irq_state);
		else
			clear_bit(irq_source_id, irq_state);
		sig_level = !!(*irq_state);
	} else /* Deal with MSI/MSI-X */
		sig_level = 1;
143

144 145 146 147
	/* Not possible to detect if the guest uses the PIC or the
	 * IOAPIC.  So set the bit in both. The guest will ignore
	 * writes to the unused one.
	 */
148
	list_for_each_entry(e, &kvm->irq_routing, link)
149 150 151 152 153 154 155 156
		if (e->gsi == irq) {
			int r = e->set(e, kvm, sig_level);
			if (r < 0)
				continue;

			ret = r + ((ret < 0) ? 0 : ret);
		}
	return ret;
157 158
}

159
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
160
{
161
	struct kvm_kernel_irq_routing_entry *e;
162 163
	struct kvm_irq_ack_notifier *kian;
	struct hlist_node *n;
164 165 166
	unsigned gsi = pin;

	list_for_each_entry(e, &kvm->irq_routing, link)
167 168
		if (e->type == KVM_IRQ_ROUTING_IRQCHIP &&
		    e->irqchip.irqchip == irqchip &&
169 170 171 172
		    e->irqchip.pin == pin) {
			gsi = e->gsi;
			break;
		}
173 174 175 176 177 178 179 180 181

	hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, link)
		if (kian->gsi == gsi)
			kian->irq_acked(kian);
}

void kvm_register_irq_ack_notifier(struct kvm *kvm,
				   struct kvm_irq_ack_notifier *kian)
{
182
	mutex_lock(&kvm->irq_lock);
183
	hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list);
184
	mutex_unlock(&kvm->irq_lock);
185 186
}

187 188
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
				    struct kvm_irq_ack_notifier *kian)
189
{
190
	mutex_lock(&kvm->irq_lock);
191
	hlist_del_init(&kian->link);
192
	mutex_unlock(&kvm->irq_lock);
193
}
194 195 196 197

int kvm_request_irq_source_id(struct kvm *kvm)
{
	unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
198 199 200 201
	int irq_source_id;

	mutex_lock(&kvm->irq_lock);
	irq_source_id = find_first_zero_bit(bitmap,
202
				sizeof(kvm->arch.irq_sources_bitmap));
203

204 205
	if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
		printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
206 207 208 209 210
		return -EFAULT;
	}

	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
	set_bit(irq_source_id, bitmap);
211
	mutex_unlock(&kvm->irq_lock);
212

213 214 215 216 217 218 219
	return irq_source_id;
}

void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
{
	int i;

220 221
	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);

222
	mutex_lock(&kvm->irq_lock);
223
	if (irq_source_id < 0 ||
224 225 226 227 228 229 230
	    irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
		printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
		return;
	}
	for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
		clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
	clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
231
	mutex_unlock(&kvm->irq_lock);
232
}
233 234 235 236

void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
				    struct kvm_irq_mask_notifier *kimn)
{
237
	mutex_lock(&kvm->irq_lock);
238 239
	kimn->irq = irq;
	hlist_add_head(&kimn->link, &kvm->mask_notifier_list);
240
	mutex_unlock(&kvm->irq_lock);
241 242 243 244 245
}

void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
				      struct kvm_irq_mask_notifier *kimn)
{
246
	mutex_lock(&kvm->irq_lock);
247
	hlist_del(&kimn->link);
248
	mutex_unlock(&kvm->irq_lock);
249 250 251 252 253 254 255
}

void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
{
	struct kvm_irq_mask_notifier *kimn;
	struct hlist_node *n;

256 257
	WARN_ON(!mutex_is_locked(&kvm->irq_lock));

258 259 260 261 262
	hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link)
		if (kimn->irq == irq)
			kimn->func(kimn, mask);
}

263 264 265 266 267 268 269 270 271 272
static void __kvm_free_irq_routing(struct list_head *irq_routing)
{
	struct kvm_kernel_irq_routing_entry *e, *n;

	list_for_each_entry_safe(e, n, irq_routing, link)
		kfree(e);
}

void kvm_free_irq_routing(struct kvm *kvm)
{
273
	mutex_lock(&kvm->irq_lock);
274
	__kvm_free_irq_routing(&kvm->irq_routing);
275
	mutex_unlock(&kvm->irq_lock);
276 277
}

278 279
static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e,
			       const struct kvm_irq_routing_entry *ue)
280 281 282 283 284
{
	int r = -EINVAL;
	int delta;

	e->gsi = ue->gsi;
285
	e->type = ue->type;
286 287 288 289 290 291 292 293
	switch (ue->type) {
	case KVM_IRQ_ROUTING_IRQCHIP:
		delta = 0;
		switch (ue->u.irqchip.irqchip) {
		case KVM_IRQCHIP_PIC_MASTER:
			e->set = kvm_set_pic_irq;
			break;
		case KVM_IRQCHIP_PIC_SLAVE:
294
			e->set = kvm_set_pic_irq;
295 296 297
			delta = 8;
			break;
		case KVM_IRQCHIP_IOAPIC:
298
			e->set = kvm_set_ioapic_irq;
299 300 301 302 303 304 305
			break;
		default:
			goto out;
		}
		e->irqchip.irqchip = ue->u.irqchip.irqchip;
		e->irqchip.pin = ue->u.irqchip.pin + delta;
		break;
S
Sheng Yang 已提交
306 307 308 309 310 311
	case KVM_IRQ_ROUTING_MSI:
		e->set = kvm_set_msi;
		e->msi.address_lo = ue->u.msi.address_lo;
		e->msi.address_hi = ue->u.msi.address_hi;
		e->msi.data = ue->u.msi.data;
		break;
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
	default:
		goto out;
	}
	r = 0;
out:
	return r;
}


int kvm_set_irq_routing(struct kvm *kvm,
			const struct kvm_irq_routing_entry *ue,
			unsigned nr,
			unsigned flags)
{
	struct list_head irq_list = LIST_HEAD_INIT(irq_list);
	struct list_head tmp = LIST_HEAD_INIT(tmp);
	struct kvm_kernel_irq_routing_entry *e = NULL;
	unsigned i;
	int r;

	for (i = 0; i < nr; ++i) {
		r = -EINVAL;
		if (ue->gsi >= KVM_MAX_IRQ_ROUTES)
			goto out;
		if (ue->flags)
			goto out;
		r = -ENOMEM;
		e = kzalloc(sizeof(*e), GFP_KERNEL);
		if (!e)
			goto out;
		r = setup_routing_entry(e, ue);
		if (r)
			goto out;
		++ue;
		list_add(&e->link, &irq_list);
		e = NULL;
	}

350
	mutex_lock(&kvm->irq_lock);
351 352 353 354 355
	list_splice(&kvm->irq_routing, &tmp);
	INIT_LIST_HEAD(&kvm->irq_routing);
	list_splice(&irq_list, &kvm->irq_routing);
	INIT_LIST_HEAD(&irq_list);
	list_splice(&tmp, &irq_list);
356
	mutex_unlock(&kvm->irq_lock);
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415

	r = 0;

out:
	kfree(e);
	__kvm_free_irq_routing(&irq_list);
	return r;
}

#define IOAPIC_ROUTING_ENTRY(irq) \
	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
	  .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) }
#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)

#ifdef CONFIG_X86
#  define PIC_ROUTING_ENTRY(irq) \
	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
	  .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 }
#  define ROUTING_ENTRY2(irq) \
	IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
#else
#  define ROUTING_ENTRY2(irq) \
	IOAPIC_ROUTING_ENTRY(irq)
#endif

static const struct kvm_irq_routing_entry default_routing[] = {
	ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
	ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
	ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
	ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
	ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
	ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
	ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
	ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
	ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
	ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
	ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
	ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
#ifdef CONFIG_IA64
	ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
	ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
	ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
	ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
	ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
	ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
	ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
	ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
	ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
	ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
	ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
	ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
#endif
};

int kvm_setup_default_irq_routing(struct kvm *kvm)
{
	return kvm_set_irq_routing(kvm, default_routing,
				   ARRAY_SIZE(default_routing), 0);
}