irq_comm.c 12.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * irq_comm.c: Common API for in kernel interrupt controller
 * Copyright (c) 2007, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 * Authors:
 *   Yaozu (Eddie) Dong <Eddie.dong@intel.com>
 *
N
Nicolas Kaiser 已提交
20
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
21 22 23
 */

#include <linux/kvm_host.h>
24
#include <linux/slab.h>
25
#include <trace/events/kvm.h>
S
Sheng Yang 已提交
26 27

#include <asm/msidef.h>
28 29 30
#ifdef CONFIG_IA64
#include <asm/iosapic.h>
#endif
S
Sheng Yang 已提交
31

32 33 34 35
#include "irq.h"

#include "ioapic.h"

36
static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
37
			   struct kvm *kvm, int irq_source_id, int level)
38 39
{
#ifdef CONFIG_X86
40
	struct kvm_pic *pic = pic_irqchip(kvm);
41
	return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
42 43
#else
	return -1;
44 45 46
#endif
}

47
static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
48
			      struct kvm *kvm, int irq_source_id, int level)
49
{
50
	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
51
	return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level);
52 53
}

54
inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
55
{
56 57 58 59 60 61 62
#ifdef CONFIG_IA64
	return irq->delivery_mode ==
		(IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
#else
	return irq->delivery_mode == APIC_DM_LOWEST;
#endif
}
63

64 65 66 67 68 69 70
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
		struct kvm_lapic_irq *irq)
{
	int i, r = -1;
	struct kvm_vcpu *vcpu, *lowest = NULL;

	if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
71
			kvm_is_dm_lowest_prio(irq)) {
72
		printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
73 74 75 76 77
		irq->delivery_mode = APIC_DM_FIXED;
	}

	if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r))
		return r;
78

79 80
	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (!kvm_apic_present(vcpu))
81 82
			continue;

83 84
		if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
					irq->dest_id, irq->dest_mode))
85 86
			continue;

87 88 89 90
		if (!kvm_is_dm_lowest_prio(irq)) {
			if (r < 0)
				r = 0;
			r += kvm_apic_set_irq(vcpu, irq);
91
		} else if (kvm_lapic_enabled(vcpu)) {
92 93 94 95
			if (!lowest)
				lowest = vcpu;
			else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
				lowest = vcpu;
96
		}
97 98
	}

99 100 101 102
	if (lowest)
		r = kvm_apic_set_irq(lowest, irq);

	return r;
103 104
}

105 106
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
		struct kvm *kvm, int irq_source_id, int level)
S
Sheng Yang 已提交
107
{
108
	struct kvm_lapic_irq irq;
S
Sheng Yang 已提交
109

110 111 112
	if (!level)
		return -1;

113 114
	trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data);

115
	irq.dest_id = (e->msi.address_lo &
116
			MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
117
	irq.vector = (e->msi.data &
118
			MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
119 120 121 122 123
	irq.dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
	irq.trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
	irq.delivery_mode = e->msi.data & 0x700;
	irq.level = 1;
	irq.shorthand = 0;
124 125

	/* TODO Deal with RH bit of MSI message address */
126
	return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
S
Sheng Yang 已提交
127 128
}

129 130 131 132 133 134 135 136 137 138 139 140 141 142
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
{
	struct kvm_kernel_irq_routing_entry route;

	if (!irqchip_in_kernel(kvm) || msi->flags != 0)
		return -EINVAL;

	route.msi.address_lo = msi->address_lo;
	route.msi.address_hi = msi->address_hi;
	route.msi.data = msi->data;

	return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
}

143
/*
144 145 146 147 148
 * Return value:
 *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
 *  = 0   Interrupt was coalesced (previous irq is still pending)
 *  > 0   Number of CPUs interrupt was delivered to
 */
149
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
150
{
G
Gleb Natapov 已提交
151 152
	struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
	int ret = -1, i = 0;
153 154
	struct kvm_irq_routing_table *irq_rt;
	struct hlist_node *n;
S
Sheng Yang 已提交
155

A
Avi Kivity 已提交
156
	trace_kvm_set_irq(irq, level, irq_source_id);
157

158 159 160 161
	/* Not possible to detect if the guest uses the PIC or the
	 * IOAPIC.  So set the bit in both. The guest will ignore
	 * writes to the unused one.
	 */
162 163
	rcu_read_lock();
	irq_rt = rcu_dereference(kvm->irq_routing);
164
	if (irq < irq_rt->nr_rt_entries)
G
Gleb Natapov 已提交
165 166
		hlist_for_each_entry(e, n, &irq_rt->map[irq], link)
			irq_set[i++] = *e;
167
	rcu_read_unlock();
G
Gleb Natapov 已提交
168 169 170 171 172 173 174 175 176 177

	while(i--) {
		int r;
		r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level);
		if (r < 0)
			continue;

		ret = r + ((ret < 0) ? 0 : ret);
	}

178
	return ret;
179 180
}

181
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
182 183 184
{
	struct kvm_irq_ack_notifier *kian;
	struct hlist_node *n;
185
	int gsi;
186

187 188
	trace_kvm_ack_irq(irqchip, pin);

189 190
	rcu_read_lock();
	gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
191
	if (gsi != -1)
192 193
		hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list,
					 link)
194 195
			if (kian->gsi == gsi)
				kian->irq_acked(kian);
196
	rcu_read_unlock();
197 198 199 200 201
}

void kvm_register_irq_ack_notifier(struct kvm *kvm,
				   struct kvm_irq_ack_notifier *kian)
{
202
	mutex_lock(&kvm->irq_lock);
203
	hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
204
	mutex_unlock(&kvm->irq_lock);
205 206
}

207 208
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
				    struct kvm_irq_ack_notifier *kian)
209
{
210
	mutex_lock(&kvm->irq_lock);
211
	hlist_del_init_rcu(&kian->link);
212
	mutex_unlock(&kvm->irq_lock);
213
	synchronize_rcu();
214
}
215 216 217 218

int kvm_request_irq_source_id(struct kvm *kvm)
{
	unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
219 220 221
	int irq_source_id;

	mutex_lock(&kvm->irq_lock);
222
	irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG);
223

224
	if (irq_source_id >= BITS_PER_LONG) {
225
		printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
226 227
		irq_source_id = -EFAULT;
		goto unlock;
228 229 230
	}

	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
231 232 233
#ifdef CONFIG_X86
	ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
#endif
234
	set_bit(irq_source_id, bitmap);
235
unlock:
236
	mutex_unlock(&kvm->irq_lock);
237

238 239 240 241 242
	return irq_source_id;
}

void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
{
243
	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
244 245 246
#ifdef CONFIG_X86
	ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
#endif
247

248
	mutex_lock(&kvm->irq_lock);
249
	if (irq_source_id < 0 ||
250
	    irq_source_id >= BITS_PER_LONG) {
251
		printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
252
		goto unlock;
253
	}
254 255 256 257
	clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
	if (!irqchip_in_kernel(kvm))
		goto unlock;

258
	kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
259
#ifdef CONFIG_X86
260
	kvm_pic_clear_all(pic_irqchip(kvm), irq_source_id);
261
#endif
262
unlock:
263
	mutex_unlock(&kvm->irq_lock);
264
}
265 266 267 268

void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
				    struct kvm_irq_mask_notifier *kimn)
{
269
	mutex_lock(&kvm->irq_lock);
270
	kimn->irq = irq;
271
	hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list);
272
	mutex_unlock(&kvm->irq_lock);
273 274 275 276 277
}

void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
				      struct kvm_irq_mask_notifier *kimn)
{
278
	mutex_lock(&kvm->irq_lock);
279
	hlist_del_rcu(&kimn->link);
280
	mutex_unlock(&kvm->irq_lock);
281
	synchronize_rcu();
282 283
}

284 285
void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
			     bool mask)
286 287 288
{
	struct kvm_irq_mask_notifier *kimn;
	struct hlist_node *n;
289
	int gsi;
290

291
	rcu_read_lock();
292 293 294 295 296
	gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
	if (gsi != -1)
		hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link)
			if (kimn->irq == gsi)
				kimn->func(kimn, mask);
297
	rcu_read_unlock();
298 299
}

300 301
void kvm_free_irq_routing(struct kvm *kvm)
{
302 303
	/* Called only during vm destruction. Nobody can use the pointer
	   at this stage */
304
	kfree(kvm->irq_routing);
305 306
}

307 308
static int setup_routing_entry(struct kvm_irq_routing_table *rt,
			       struct kvm_kernel_irq_routing_entry *e,
309
			       const struct kvm_irq_routing_entry *ue)
310 311 312
{
	int r = -EINVAL;
	int delta;
313
	unsigned max_pin;
314 315 316 317 318 319 320 321 322
	struct kvm_kernel_irq_routing_entry *ei;
	struct hlist_node *n;

	/*
	 * Do not allow GSI to be mapped to the same irqchip more than once.
	 * Allow only one to one mapping between GSI and MSI.
	 */
	hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link)
		if (ei->type == KVM_IRQ_ROUTING_MSI ||
323
		    ue->type == KVM_IRQ_ROUTING_MSI ||
324 325
		    ue->u.irqchip.irqchip == ei->irqchip.irqchip)
			return r;
326 327

	e->gsi = ue->gsi;
328
	e->type = ue->type;
329 330 331 332 333 334
	switch (ue->type) {
	case KVM_IRQ_ROUTING_IRQCHIP:
		delta = 0;
		switch (ue->u.irqchip.irqchip) {
		case KVM_IRQCHIP_PIC_MASTER:
			e->set = kvm_set_pic_irq;
335
			max_pin = PIC_NUM_PINS;
336 337
			break;
		case KVM_IRQCHIP_PIC_SLAVE:
338
			e->set = kvm_set_pic_irq;
339
			max_pin = PIC_NUM_PINS;
340 341 342
			delta = 8;
			break;
		case KVM_IRQCHIP_IOAPIC:
343
			max_pin = KVM_IOAPIC_NUM_PINS;
344
			e->set = kvm_set_ioapic_irq;
345 346 347 348 349 350
			break;
		default:
			goto out;
		}
		e->irqchip.irqchip = ue->u.irqchip.irqchip;
		e->irqchip.pin = ue->u.irqchip.pin + delta;
351
		if (e->irqchip.pin >= max_pin)
352 353
			goto out;
		rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi;
354
		break;
S
Sheng Yang 已提交
355 356 357 358 359 360
	case KVM_IRQ_ROUTING_MSI:
		e->set = kvm_set_msi;
		e->msi.address_lo = ue->u.msi.address_lo;
		e->msi.address_hi = ue->u.msi.address_hi;
		e->msi.data = ue->u.msi.data;
		break;
361 362 363
	default:
		goto out;
	}
364 365

	hlist_add_head(&e->link, &rt->map[e->gsi]);
366 367 368 369 370 371 372 373 374 375 376
	r = 0;
out:
	return r;
}


int kvm_set_irq_routing(struct kvm *kvm,
			const struct kvm_irq_routing_entry *ue,
			unsigned nr,
			unsigned flags)
{
377
	struct kvm_irq_routing_table *new, *old;
378
	u32 i, j, nr_rt_entries = 0;
379 380
	int r;

381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
	for (i = 0; i < nr; ++i) {
		if (ue[i].gsi >= KVM_MAX_IRQ_ROUTES)
			return -EINVAL;
		nr_rt_entries = max(nr_rt_entries, ue[i].gsi);
	}

	nr_rt_entries += 1;

	new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head))
		      + (nr * sizeof(struct kvm_kernel_irq_routing_entry)),
		      GFP_KERNEL);

	if (!new)
		return -ENOMEM;

	new->rt_entries = (void *)&new->map[nr_rt_entries];

	new->nr_rt_entries = nr_rt_entries;
399 400 401
	for (i = 0; i < 3; i++)
		for (j = 0; j < KVM_IOAPIC_NUM_PINS; j++)
			new->chip[i][j] = -1;
402

403 404 405 406
	for (i = 0; i < nr; ++i) {
		r = -EINVAL;
		if (ue->flags)
			goto out;
407
		r = setup_routing_entry(new, &new->rt_entries[i], ue);
408 409 410 411 412
		if (r)
			goto out;
		++ue;
	}

413
	mutex_lock(&kvm->irq_lock);
414
	old = kvm->irq_routing;
415
	kvm_irq_routing_update(kvm, new);
416
	mutex_unlock(&kvm->irq_lock);
417

418
	synchronize_rcu();
419

420
	new = old;
421 422 423
	r = 0;

out:
424
	kfree(new);
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
	return r;
}

#define IOAPIC_ROUTING_ENTRY(irq) \
	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
	  .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) }
#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)

#ifdef CONFIG_X86
#  define PIC_ROUTING_ENTRY(irq) \
	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
	  .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 }
#  define ROUTING_ENTRY2(irq) \
	IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
#else
#  define ROUTING_ENTRY2(irq) \
	IOAPIC_ROUTING_ENTRY(irq)
#endif

static const struct kvm_irq_routing_entry default_routing[] = {
	ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
	ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
	ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
	ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
	ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
	ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
	ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
	ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
	ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
	ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
	ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
	ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
#ifdef CONFIG_IA64
	ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
	ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
	ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
	ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
	ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
	ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
	ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
	ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
	ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
	ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
	ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
	ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
#endif
};

int kvm_setup_default_irq_routing(struct kvm *kvm)
{
	return kvm_set_irq_routing(kvm, default_routing,
				   ARRAY_SIZE(default_routing), 0);
}