irq_comm.c 10.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * irq_comm.c: Common API for in kernel interrupt controller
 * Copyright (c) 2007, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 * Authors:
 *   Yaozu (Eddie) Dong <Eddie.dong@intel.com>
 *
 */

#include <linux/kvm_host.h>
S
Sheng Yang 已提交
23 24 25

#include <asm/msidef.h>

26 27 28 29
#include "irq.h"

#include "ioapic.h"

30 31
static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
			   struct kvm *kvm, int level)
32 33
{
#ifdef CONFIG_X86
34 35 36
	return kvm_pic_set_irq(pic_irqchip(kvm), e->irqchip.pin, level);
#else
	return -1;
37 38 39
#endif
}

40 41
static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
			      struct kvm *kvm, int level)
42
{
43
	return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
44 45
}

46 47 48 49 50 51
void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
				   union kvm_ioapic_redirect_entry *entry,
				   unsigned long *deliver_bitmask)
{
	struct kvm_vcpu *vcpu;

52 53 54
	kvm_ioapic_get_delivery_bitmask(ioapic, entry->fields.dest_id,
					entry->fields.dest_mode,
					deliver_bitmask);
55 56 57
	switch (entry->fields.delivery_mode) {
	case IOAPIC_LOWEST_PRIORITY:
		vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm,
58
				entry->fields.vector, deliver_bitmask);
S
Sheng Yang 已提交
59
		__set_bit(vcpu->vcpu_id, deliver_bitmask);
60 61 62 63 64 65 66 67 68 69 70 71
		break;
	case IOAPIC_FIXED:
	case IOAPIC_NMI:
		break;
	default:
		if (printk_ratelimit())
			printk(KERN_INFO "kvm: unsupported delivery mode %d\n",
				entry->fields.delivery_mode);
		*deliver_bitmask = 0;
	}
}

72 73
static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
		       struct kvm *kvm, int level)
S
Sheng Yang 已提交
74
{
75
	int vcpu_id, r = -1;
S
Sheng Yang 已提交
76 77
	struct kvm_vcpu *vcpu;
	struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
78
	union kvm_ioapic_redirect_entry entry;
S
Sheng Yang 已提交
79
	DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
S
Sheng Yang 已提交
80 81 82

	BUG_ON(!ioapic);

S
Sheng Yang 已提交
83 84
	bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
	entry.bits = 0;
	entry.fields.dest_id = (e->msi.address_lo &
			MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
	entry.fields.vector = (e->msi.data &
			MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
	entry.fields.dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
			(unsigned long *)&e->msi.address_lo);
	entry.fields.trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
			(unsigned long *)&e->msi.data);
	entry.fields.delivery_mode = test_bit(
			MSI_DATA_DELIVERY_MODE_SHIFT,
			(unsigned long *)&e->msi.data);

	/* TODO Deal with RH bit of MSI message address */

S
Sheng Yang 已提交
100
	kvm_get_intr_delivery_bitmask(ioapic, &entry, deliver_bitmask);
101

S
Sheng Yang 已提交
102
	if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) {
103 104 105
		printk(KERN_WARNING "kvm: no destination for MSI delivery!");
		return -1;
	}
S
Sheng Yang 已提交
106 107 108
	while ((vcpu_id = find_first_bit(deliver_bitmask,
					KVM_MAX_VCPUS)) < KVM_MAX_VCPUS) {
		__clear_bit(vcpu_id, deliver_bitmask);
109 110 111 112 113 114
		vcpu = ioapic->kvm->vcpus[vcpu_id];
		if (vcpu) {
			if (r < 0)
				r = 0;
			r += kvm_apic_set_irq(vcpu, entry.fields.vector,
					      entry.fields.trig_mode);
S
Sheng Yang 已提交
115 116
		}
	}
117
	return r;
S
Sheng Yang 已提交
118 119
}

120 121 122 123 124 125 126
/* This should be called with the kvm->lock mutex held
 * Return value:
 *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
 *  = 0   Interrupt was coalesced (previous irq is still pending)
 *  > 0   Number of CPUs interrupt was delivered to
 */
int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
127
{
128
	struct kvm_kernel_irq_routing_entry *e;
S
Sheng Yang 已提交
129
	unsigned long *irq_state, sig_level;
130
	int ret = -1;
S
Sheng Yang 已提交
131 132 133

	if (irq < KVM_IOAPIC_NUM_PINS) {
		irq_state = (unsigned long *)&kvm->arch.irq_states[irq];
134

S
Sheng Yang 已提交
135 136 137 138 139 140 141 142
		/* Logical OR for level trig interrupt */
		if (level)
			set_bit(irq_source_id, irq_state);
		else
			clear_bit(irq_source_id, irq_state);
		sig_level = !!(*irq_state);
	} else /* Deal with MSI/MSI-X */
		sig_level = 1;
143

144 145 146 147
	/* Not possible to detect if the guest uses the PIC or the
	 * IOAPIC.  So set the bit in both. The guest will ignore
	 * writes to the unused one.
	 */
148
	list_for_each_entry(e, &kvm->irq_routing, link)
149 150 151 152 153 154 155 156
		if (e->gsi == irq) {
			int r = e->set(e, kvm, sig_level);
			if (r < 0)
				continue;

			ret = r + ((ret < 0) ? 0 : ret);
		}
	return ret;
157 158
}

159
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
160
{
161
	struct kvm_kernel_irq_routing_entry *e;
162 163
	struct kvm_irq_ack_notifier *kian;
	struct hlist_node *n;
164 165 166 167 168 169 170 171
	unsigned gsi = pin;

	list_for_each_entry(e, &kvm->irq_routing, link)
		if (e->irqchip.irqchip == irqchip &&
		    e->irqchip.pin == pin) {
			gsi = e->gsi;
			break;
		}
172 173 174 175 176 177 178 179 180 181 182 183

	hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, link)
		if (kian->gsi == gsi)
			kian->irq_acked(kian);
}

void kvm_register_irq_ack_notifier(struct kvm *kvm,
				   struct kvm_irq_ack_notifier *kian)
{
	hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list);
}

184
void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian)
185
{
186
	hlist_del_init(&kian->link);
187
}
188 189 190 191 192 193 194

/* The caller must hold kvm->lock mutex */
int kvm_request_irq_source_id(struct kvm *kvm)
{
	unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
	int irq_source_id = find_first_zero_bit(bitmap,
				sizeof(kvm->arch.irq_sources_bitmap));
195

196 197
	if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
		printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
198 199 200 201 202 203
		return -EFAULT;
	}

	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
	set_bit(irq_source_id, bitmap);

204 205 206 207 208 209 210
	return irq_source_id;
}

void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
{
	int i;

211 212 213
	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);

	if (irq_source_id < 0 ||
214 215 216 217 218 219 220 221
	    irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
		printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
		return;
	}
	for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
		clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
	clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
}
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245

void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
				    struct kvm_irq_mask_notifier *kimn)
{
	kimn->irq = irq;
	hlist_add_head(&kimn->link, &kvm->mask_notifier_list);
}

void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
				      struct kvm_irq_mask_notifier *kimn)
{
	hlist_del(&kimn->link);
}

void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
{
	struct kvm_irq_mask_notifier *kimn;
	struct hlist_node *n;

	hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link)
		if (kimn->irq == irq)
			kimn->func(kimn, mask);
}

246 247 248 249 250 251 252 253 254 255 256 257 258
static void __kvm_free_irq_routing(struct list_head *irq_routing)
{
	struct kvm_kernel_irq_routing_entry *e, *n;

	list_for_each_entry_safe(e, n, irq_routing, link)
		kfree(e);
}

void kvm_free_irq_routing(struct kvm *kvm)
{
	__kvm_free_irq_routing(&kvm->irq_routing);
}

259 260
static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e,
			       const struct kvm_irq_routing_entry *ue)
261 262 263 264 265 266 267 268 269 270 271 272 273
{
	int r = -EINVAL;
	int delta;

	e->gsi = ue->gsi;
	switch (ue->type) {
	case KVM_IRQ_ROUTING_IRQCHIP:
		delta = 0;
		switch (ue->u.irqchip.irqchip) {
		case KVM_IRQCHIP_PIC_MASTER:
			e->set = kvm_set_pic_irq;
			break;
		case KVM_IRQCHIP_PIC_SLAVE:
274
			e->set = kvm_set_pic_irq;
275 276 277 278 279 280 281 282 283 284 285
			delta = 8;
			break;
		case KVM_IRQCHIP_IOAPIC:
				e->set = kvm_set_ioapic_irq;
			break;
		default:
			goto out;
		}
		e->irqchip.irqchip = ue->u.irqchip.irqchip;
		e->irqchip.pin = ue->u.irqchip.pin + delta;
		break;
S
Sheng Yang 已提交
286 287 288 289 290 291
	case KVM_IRQ_ROUTING_MSI:
		e->set = kvm_set_msi;
		e->msi.address_lo = ue->u.msi.address_lo;
		e->msi.address_hi = ue->u.msi.address_hi;
		e->msi.data = ue->u.msi.data;
		break;
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
	default:
		goto out;
	}
	r = 0;
out:
	return r;
}


int kvm_set_irq_routing(struct kvm *kvm,
			const struct kvm_irq_routing_entry *ue,
			unsigned nr,
			unsigned flags)
{
	struct list_head irq_list = LIST_HEAD_INIT(irq_list);
	struct list_head tmp = LIST_HEAD_INIT(tmp);
	struct kvm_kernel_irq_routing_entry *e = NULL;
	unsigned i;
	int r;

	for (i = 0; i < nr; ++i) {
		r = -EINVAL;
		if (ue->gsi >= KVM_MAX_IRQ_ROUTES)
			goto out;
		if (ue->flags)
			goto out;
		r = -ENOMEM;
		e = kzalloc(sizeof(*e), GFP_KERNEL);
		if (!e)
			goto out;
		r = setup_routing_entry(e, ue);
		if (r)
			goto out;
		++ue;
		list_add(&e->link, &irq_list);
		e = NULL;
	}

	mutex_lock(&kvm->lock);
	list_splice(&kvm->irq_routing, &tmp);
	INIT_LIST_HEAD(&kvm->irq_routing);
	list_splice(&irq_list, &kvm->irq_routing);
	INIT_LIST_HEAD(&irq_list);
	list_splice(&tmp, &irq_list);
	mutex_unlock(&kvm->lock);

	r = 0;

out:
	kfree(e);
	__kvm_free_irq_routing(&irq_list);
	return r;
}

#define IOAPIC_ROUTING_ENTRY(irq) \
	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
	  .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) }
#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)

#ifdef CONFIG_X86
#  define PIC_ROUTING_ENTRY(irq) \
	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
	  .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 }
#  define ROUTING_ENTRY2(irq) \
	IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
#else
#  define ROUTING_ENTRY2(irq) \
	IOAPIC_ROUTING_ENTRY(irq)
#endif

static const struct kvm_irq_routing_entry default_routing[] = {
	ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
	ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
	ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
	ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
	ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
	ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
	ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
	ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
	ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
	ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
	ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
	ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
#ifdef CONFIG_IA64
	ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
	ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
	ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
	ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
	ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
	ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
	ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
	ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
	ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
	ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
	ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
	ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
#endif
};

int kvm_setup_default_irq_routing(struct kvm *kvm)
{
	return kvm_set_irq_routing(kvm, default_routing,
				   ARRAY_SIZE(default_routing), 0);
}