irq_comm.c 12.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * irq_comm.c: Common API for in kernel interrupt controller
 * Copyright (c) 2007, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 * Authors:
 *   Yaozu (Eddie) Dong <Eddie.dong@intel.com>
 *
N
Nicolas Kaiser 已提交
20
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
21 22 23
 */

#include <linux/kvm_host.h>
24
#include <linux/slab.h>
25
#include <trace/events/kvm.h>
S
Sheng Yang 已提交
26 27

#include <asm/msidef.h>
28 29 30
#ifdef CONFIG_IA64
#include <asm/iosapic.h>
#endif
S
Sheng Yang 已提交
31

32 33 34 35
#include "irq.h"

#include "ioapic.h"

36 37 38 39 40 41 42 43 44 45 46 47
static inline int kvm_irq_line_state(unsigned long *irq_state,
				     int irq_source_id, int level)
{
	/* Logical OR for level trig interrupt */
	if (level)
		set_bit(irq_source_id, irq_state);
	else
		clear_bit(irq_source_id, irq_state);

	return !!(*irq_state);
}

48
static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
49
			   struct kvm *kvm, int irq_source_id, int level)
50 51
{
#ifdef CONFIG_X86
52 53 54 55
	struct kvm_pic *pic = pic_irqchip(kvm);
	level = kvm_irq_line_state(&pic->irq_states[e->irqchip.pin],
				   irq_source_id, level);
	return kvm_pic_set_irq(pic, e->irqchip.pin, level);
56 57
#else
	return -1;
58 59 60
#endif
}

61
static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
62
			      struct kvm *kvm, int irq_source_id, int level)
63
{
64 65 66 67 68
	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
	level = kvm_irq_line_state(&ioapic->irq_states[e->irqchip.pin],
				   irq_source_id, level);

	return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, level);
69 70
}

71
inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
72
{
73 74 75 76 77 78 79
#ifdef CONFIG_IA64
	return irq->delivery_mode ==
		(IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
#else
	return irq->delivery_mode == APIC_DM_LOWEST;
#endif
}
80

81 82 83 84 85 86 87 88
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
		struct kvm_lapic_irq *irq)
{
	int i, r = -1;
	struct kvm_vcpu *vcpu, *lowest = NULL;

	if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
			kvm_is_dm_lowest_prio(irq))
89 90
		printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");

91 92
	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (!kvm_apic_present(vcpu))
93 94
			continue;

95 96
		if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
					irq->dest_id, irq->dest_mode))
97 98
			continue;

99 100 101 102
		if (!kvm_is_dm_lowest_prio(irq)) {
			if (r < 0)
				r = 0;
			r += kvm_apic_set_irq(vcpu, irq);
103
		} else if (kvm_lapic_enabled(vcpu)) {
104 105 106 107
			if (!lowest)
				lowest = vcpu;
			else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
				lowest = vcpu;
108
		}
109 110
	}

111 112 113 114
	if (lowest)
		r = kvm_apic_set_irq(lowest, irq);

	return r;
115 116
}

117
static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
118
		       struct kvm *kvm, int irq_source_id, int level)
S
Sheng Yang 已提交
119
{
120
	struct kvm_lapic_irq irq;
S
Sheng Yang 已提交
121

122 123 124
	if (!level)
		return -1;

125 126
	trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data);

127
	irq.dest_id = (e->msi.address_lo &
128
			MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
129
	irq.vector = (e->msi.data &
130
			MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
131 132 133 134 135
	irq.dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
	irq.trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
	irq.delivery_mode = e->msi.data & 0x700;
	irq.level = 1;
	irq.shorthand = 0;
136 137

	/* TODO Deal with RH bit of MSI message address */
138
	return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
S
Sheng Yang 已提交
139 140
}

141
/*
142 143 144 145 146
 * Return value:
 *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
 *  = 0   Interrupt was coalesced (previous irq is still pending)
 *  > 0   Number of CPUs interrupt was delivered to
 */
147
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
148
{
G
Gleb Natapov 已提交
149 150
	struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
	int ret = -1, i = 0;
151 152
	struct kvm_irq_routing_table *irq_rt;
	struct hlist_node *n;
S
Sheng Yang 已提交
153

A
Avi Kivity 已提交
154
	trace_kvm_set_irq(irq, level, irq_source_id);
155

156 157 158 159
	/* Not possible to detect if the guest uses the PIC or the
	 * IOAPIC.  So set the bit in both. The guest will ignore
	 * writes to the unused one.
	 */
160 161
	rcu_read_lock();
	irq_rt = rcu_dereference(kvm->irq_routing);
162
	if (irq < irq_rt->nr_rt_entries)
G
Gleb Natapov 已提交
163 164
		hlist_for_each_entry(e, n, &irq_rt->map[irq], link)
			irq_set[i++] = *e;
165
	rcu_read_unlock();
G
Gleb Natapov 已提交
166 167 168 169 170 171 172 173 174 175

	while(i--) {
		int r;
		r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level);
		if (r < 0)
			continue;

		ret = r + ((ret < 0) ? 0 : ret);
	}

176
	return ret;
177 178
}

179
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
180 181 182
{
	struct kvm_irq_ack_notifier *kian;
	struct hlist_node *n;
183
	int gsi;
184

185 186
	trace_kvm_ack_irq(irqchip, pin);

187 188
	rcu_read_lock();
	gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
189
	if (gsi != -1)
190 191
		hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list,
					 link)
192 193
			if (kian->gsi == gsi)
				kian->irq_acked(kian);
194
	rcu_read_unlock();
195 196 197 198 199
}

void kvm_register_irq_ack_notifier(struct kvm *kvm,
				   struct kvm_irq_ack_notifier *kian)
{
200
	mutex_lock(&kvm->irq_lock);
201
	hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
202
	mutex_unlock(&kvm->irq_lock);
203 204
}

205 206
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
				    struct kvm_irq_ack_notifier *kian)
207
{
208
	mutex_lock(&kvm->irq_lock);
209
	hlist_del_init_rcu(&kian->link);
210
	mutex_unlock(&kvm->irq_lock);
211
	synchronize_rcu();
212
}
213 214 215 216

int kvm_request_irq_source_id(struct kvm *kvm)
{
	unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
217 218 219
	int irq_source_id;

	mutex_lock(&kvm->irq_lock);
220
	irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG);
221

222
	if (irq_source_id >= BITS_PER_LONG) {
223
		printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
224 225
		irq_source_id = -EFAULT;
		goto unlock;
226 227 228 229
	}

	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
	set_bit(irq_source_id, bitmap);
230
unlock:
231
	mutex_unlock(&kvm->irq_lock);
232

233 234 235 236 237 238 239
	return irq_source_id;
}

void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
{
	int i;

240 241
	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);

242
	mutex_lock(&kvm->irq_lock);
243
	if (irq_source_id < 0 ||
244
	    irq_source_id >= BITS_PER_LONG) {
245
		printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
246
		goto unlock;
247
	}
248 249 250 251
	clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
	if (!irqchip_in_kernel(kvm))
		goto unlock;

252 253 254 255 256 257 258 259
	for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++) {
		clear_bit(irq_source_id, &kvm->arch.vioapic->irq_states[i]);
		if (i >= 16)
			continue;
#ifdef CONFIG_X86
		clear_bit(irq_source_id, &pic_irqchip(kvm)->irq_states[i]);
#endif
	}
260
unlock:
261
	mutex_unlock(&kvm->irq_lock);
262
}
263 264 265 266

void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
				    struct kvm_irq_mask_notifier *kimn)
{
267
	mutex_lock(&kvm->irq_lock);
268
	kimn->irq = irq;
269
	hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list);
270
	mutex_unlock(&kvm->irq_lock);
271 272 273 274 275
}

void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
				      struct kvm_irq_mask_notifier *kimn)
{
276
	mutex_lock(&kvm->irq_lock);
277
	hlist_del_rcu(&kimn->link);
278
	mutex_unlock(&kvm->irq_lock);
279
	synchronize_rcu();
280 281
}

282 283
void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
			     bool mask)
284 285 286
{
	struct kvm_irq_mask_notifier *kimn;
	struct hlist_node *n;
287
	int gsi;
288

289
	rcu_read_lock();
290 291 292 293 294
	gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
	if (gsi != -1)
		hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link)
			if (kimn->irq == gsi)
				kimn->func(kimn, mask);
295
	rcu_read_unlock();
296 297
}

298 299
void kvm_free_irq_routing(struct kvm *kvm)
{
300 301
	/* Called only during vm destruction. Nobody can use the pointer
	   at this stage */
302
	kfree(kvm->irq_routing);
303 304
}

305 306
static int setup_routing_entry(struct kvm_irq_routing_table *rt,
			       struct kvm_kernel_irq_routing_entry *e,
307
			       const struct kvm_irq_routing_entry *ue)
308 309 310
{
	int r = -EINVAL;
	int delta;
311
	unsigned max_pin;
312 313 314 315 316 317 318 319 320 321 322
	struct kvm_kernel_irq_routing_entry *ei;
	struct hlist_node *n;

	/*
	 * Do not allow GSI to be mapped to the same irqchip more than once.
	 * Allow only one to one mapping between GSI and MSI.
	 */
	hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link)
		if (ei->type == KVM_IRQ_ROUTING_MSI ||
		    ue->u.irqchip.irqchip == ei->irqchip.irqchip)
			return r;
323 324

	e->gsi = ue->gsi;
325
	e->type = ue->type;
326 327 328 329 330 331
	switch (ue->type) {
	case KVM_IRQ_ROUTING_IRQCHIP:
		delta = 0;
		switch (ue->u.irqchip.irqchip) {
		case KVM_IRQCHIP_PIC_MASTER:
			e->set = kvm_set_pic_irq;
332
			max_pin = 16;
333 334
			break;
		case KVM_IRQCHIP_PIC_SLAVE:
335
			e->set = kvm_set_pic_irq;
336
			max_pin = 16;
337 338 339
			delta = 8;
			break;
		case KVM_IRQCHIP_IOAPIC:
340
			max_pin = KVM_IOAPIC_NUM_PINS;
341
			e->set = kvm_set_ioapic_irq;
342 343 344 345 346 347
			break;
		default:
			goto out;
		}
		e->irqchip.irqchip = ue->u.irqchip.irqchip;
		e->irqchip.pin = ue->u.irqchip.pin + delta;
348
		if (e->irqchip.pin >= max_pin)
349 350
			goto out;
		rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi;
351
		break;
S
Sheng Yang 已提交
352 353 354 355 356 357
	case KVM_IRQ_ROUTING_MSI:
		e->set = kvm_set_msi;
		e->msi.address_lo = ue->u.msi.address_lo;
		e->msi.address_hi = ue->u.msi.address_hi;
		e->msi.data = ue->u.msi.data;
		break;
358 359 360
	default:
		goto out;
	}
361 362

	hlist_add_head(&e->link, &rt->map[e->gsi]);
363 364 365 366 367 368 369 370 371 372 373
	r = 0;
out:
	return r;
}


int kvm_set_irq_routing(struct kvm *kvm,
			const struct kvm_irq_routing_entry *ue,
			unsigned nr,
			unsigned flags)
{
374
	struct kvm_irq_routing_table *new, *old;
375
	u32 i, j, nr_rt_entries = 0;
376 377
	int r;

378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
	for (i = 0; i < nr; ++i) {
		if (ue[i].gsi >= KVM_MAX_IRQ_ROUTES)
			return -EINVAL;
		nr_rt_entries = max(nr_rt_entries, ue[i].gsi);
	}

	nr_rt_entries += 1;

	new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head))
		      + (nr * sizeof(struct kvm_kernel_irq_routing_entry)),
		      GFP_KERNEL);

	if (!new)
		return -ENOMEM;

	new->rt_entries = (void *)&new->map[nr_rt_entries];

	new->nr_rt_entries = nr_rt_entries;
396 397 398
	for (i = 0; i < 3; i++)
		for (j = 0; j < KVM_IOAPIC_NUM_PINS; j++)
			new->chip[i][j] = -1;
399

400 401 402 403
	for (i = 0; i < nr; ++i) {
		r = -EINVAL;
		if (ue->flags)
			goto out;
404
		r = setup_routing_entry(new, &new->rt_entries[i], ue);
405 406 407 408 409
		if (r)
			goto out;
		++ue;
	}

410
	mutex_lock(&kvm->irq_lock);
411
	old = kvm->irq_routing;
412
	rcu_assign_pointer(kvm->irq_routing, new);
413
	mutex_unlock(&kvm->irq_lock);
414
	synchronize_rcu();
415

416
	new = old;
417 418 419
	r = 0;

out:
420
	kfree(new);
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
	return r;
}

#define IOAPIC_ROUTING_ENTRY(irq) \
	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
	  .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) }
#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)

#ifdef CONFIG_X86
#  define PIC_ROUTING_ENTRY(irq) \
	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
	  .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 }
#  define ROUTING_ENTRY2(irq) \
	IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
#else
#  define ROUTING_ENTRY2(irq) \
	IOAPIC_ROUTING_ENTRY(irq)
#endif

static const struct kvm_irq_routing_entry default_routing[] = {
	ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
	ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
	ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
	ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
	ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
	ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
	ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
	ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
	ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
	ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
	ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
	ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
#ifdef CONFIG_IA64
	ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
	ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
	ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
	ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
	ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
	ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
	ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
	ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
	ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
	ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
	ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
	ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
#endif
};

int kvm_setup_default_irq_routing(struct kvm *kvm)
{
	return kvm_set_irq_routing(kvm, default_routing,
				   ARRAY_SIZE(default_routing), 0);
}