irq_comm.c 9.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * irq_comm.c: Common API for in kernel interrupt controller
 * Copyright (c) 2007, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 * Authors:
 *   Yaozu (Eddie) Dong <Eddie.dong@intel.com>
 *
N
Nicolas Kaiser 已提交
20
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
21 22 23
 */

#include <linux/kvm_host.h>
24
#include <linux/slab.h>
25
#include <linux/export.h>
26
#include <trace/events/kvm.h>
S
Sheng Yang 已提交
27 28

#include <asm/msidef.h>
29 30 31
#ifdef CONFIG_IA64
#include <asm/iosapic.h>
#endif
S
Sheng Yang 已提交
32

33 34 35 36
#include "irq.h"

#include "ioapic.h"

37
static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
38 39
			   struct kvm *kvm, int irq_source_id, int level,
			   bool line_status)
40 41
{
#ifdef CONFIG_X86
42
	struct kvm_pic *pic = pic_irqchip(kvm);
43
	return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
44 45
#else
	return -1;
46 47 48
#endif
}

49
static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
50 51
			      struct kvm *kvm, int irq_source_id, int level,
			      bool line_status)
52
{
53
	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
54 55
	return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
				line_status);
56 57
}

58
inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
59
{
60 61 62 63 64 65 66
#ifdef CONFIG_IA64
	return irq->delivery_mode ==
		(IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
#else
	return irq->delivery_mode == APIC_DM_LOWEST;
#endif
}
67

68
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
69
		struct kvm_lapic_irq *irq, unsigned long *dest_map)
70 71 72 73 74
{
	int i, r = -1;
	struct kvm_vcpu *vcpu, *lowest = NULL;

	if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
75
			kvm_is_dm_lowest_prio(irq)) {
76
		printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
77 78 79
		irq->delivery_mode = APIC_DM_FIXED;
	}

80
	if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
81
		return r;
82

83 84
	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (!kvm_apic_present(vcpu))
85 86
			continue;

87 88
		if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
					irq->dest_id, irq->dest_mode))
89 90
			continue;

91 92 93
		if (!kvm_is_dm_lowest_prio(irq)) {
			if (r < 0)
				r = 0;
94
			r += kvm_apic_set_irq(vcpu, irq, dest_map);
95
		} else if (kvm_lapic_enabled(vcpu)) {
96 97 98 99
			if (!lowest)
				lowest = vcpu;
			else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
				lowest = vcpu;
100
		}
101 102
	}

103
	if (lowest)
104
		r = kvm_apic_set_irq(lowest, irq, dest_map);
105 106

	return r;
107 108
}

M
Michael S. Tsirkin 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
				   struct kvm_lapic_irq *irq)
{
	trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data);

	irq->dest_id = (e->msi.address_lo &
			MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
	irq->vector = (e->msi.data &
			MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
	irq->dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
	irq->trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
	irq->delivery_mode = e->msi.data & 0x700;
	irq->level = 1;
	irq->shorthand = 0;
	/* TODO Deal with RH bit of MSI message address */
}

126
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
127
		struct kvm *kvm, int irq_source_id, int level, bool line_status)
S
Sheng Yang 已提交
128
{
129
	struct kvm_lapic_irq irq;
S
Sheng Yang 已提交
130

131 132 133
	if (!level)
		return -1;

M
Michael S. Tsirkin 已提交
134
	kvm_set_msi_irq(e, &irq);
135

136
	return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL);
S
Sheng Yang 已提交
137 138
}

M
Michael S. Tsirkin 已提交
139 140 141 142 143 144 145 146 147

static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e,
			 struct kvm *kvm)
{
	struct kvm_lapic_irq irq;
	int r;

	kvm_set_msi_irq(e, &irq);

148
	if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
M
Michael S. Tsirkin 已提交
149 150 151 152 153 154 155 156 157 158 159 160 161 162
		return r;
	else
		return -EWOULDBLOCK;
}

/*
 * Deliver an IRQ in an atomic context if we can, or return a failure,
 * user can retry in a process context.
 * Return value:
 *  -EWOULDBLOCK - Can't deliver in atomic context: retry in a process context.
 *  Other values - No need to retry.
 */
int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
{
163
	struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
M
Michael S. Tsirkin 已提交
164 165
	struct kvm_kernel_irq_routing_entry *e;
	int ret = -EINVAL;
166
	int idx;
M
Michael S. Tsirkin 已提交
167 168 169 170 171 172 173 174 175 176 177

	trace_kvm_set_irq(irq, level, irq_source_id);

	/*
	 * Injection into either PIC or IOAPIC might need to scan all CPUs,
	 * which would need to be retried from thread context;  when same GSI
	 * is connected to both PIC and IOAPIC, we'd have to report a
	 * partial failure here.
	 * Since there's no easy way to do this, we only support injecting MSI
	 * which is limited to 1:1 GSI mapping.
	 */
178
	idx = srcu_read_lock(&kvm->irq_srcu);
179
	if (kvm_irq_map_gsi(kvm, entries, irq) > 0) {
180 181 182 183 184 185
		e = &entries[0];
		if (likely(e->type == KVM_IRQ_ROUTING_MSI))
			ret = kvm_set_msi_inatomic(e, kvm);
		else
			ret = -EWOULDBLOCK;
	}
186
	srcu_read_unlock(&kvm->irq_srcu, idx);
M
Michael S. Tsirkin 已提交
187 188 189
	return ret;
}

190 191 192
int kvm_request_irq_source_id(struct kvm *kvm)
{
	unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
193 194 195
	int irq_source_id;

	mutex_lock(&kvm->irq_lock);
196
	irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG);
197

198
	if (irq_source_id >= BITS_PER_LONG) {
199
		printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
200 201
		irq_source_id = -EFAULT;
		goto unlock;
202 203 204
	}

	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
205 206 207
#ifdef CONFIG_X86
	ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
#endif
208
	set_bit(irq_source_id, bitmap);
209
unlock:
210
	mutex_unlock(&kvm->irq_lock);
211

212 213 214 215 216
	return irq_source_id;
}

void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
{
217
	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
218 219 220
#ifdef CONFIG_X86
	ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
#endif
221

222
	mutex_lock(&kvm->irq_lock);
223
	if (irq_source_id < 0 ||
224
	    irq_source_id >= BITS_PER_LONG) {
225
		printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
226
		goto unlock;
227
	}
228 229 230 231
	clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
	if (!irqchip_in_kernel(kvm))
		goto unlock;

232
	kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
233
#ifdef CONFIG_X86
234
	kvm_pic_clear_all(pic_irqchip(kvm), irq_source_id);
235
#endif
236
unlock:
237
	mutex_unlock(&kvm->irq_lock);
238
}
239 240 241 242

void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
				    struct kvm_irq_mask_notifier *kimn)
{
243
	mutex_lock(&kvm->irq_lock);
244
	kimn->irq = irq;
245
	hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list);
246
	mutex_unlock(&kvm->irq_lock);
247 248 249 250 251
}

void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
				      struct kvm_irq_mask_notifier *kimn)
{
252
	mutex_lock(&kvm->irq_lock);
253
	hlist_del_rcu(&kimn->link);
254
	mutex_unlock(&kvm->irq_lock);
255
	synchronize_srcu(&kvm->irq_srcu);
256 257
}

258 259
void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
			     bool mask)
260 261
{
	struct kvm_irq_mask_notifier *kimn;
262
	int idx, gsi;
263

264
	idx = srcu_read_lock(&kvm->irq_srcu);
265
	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
266
	if (gsi != -1)
267
		hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
268 269
			if (kimn->irq == gsi)
				kimn->func(kimn, mask);
270
	srcu_read_unlock(&kvm->irq_srcu, idx);
271 272
}

273
int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
274
			  const struct kvm_irq_routing_entry *ue)
275 276 277
{
	int r = -EINVAL;
	int delta;
278
	unsigned max_pin;
279

280 281 282 283 284 285
	switch (ue->type) {
	case KVM_IRQ_ROUTING_IRQCHIP:
		delta = 0;
		switch (ue->u.irqchip.irqchip) {
		case KVM_IRQCHIP_PIC_MASTER:
			e->set = kvm_set_pic_irq;
286
			max_pin = PIC_NUM_PINS;
287 288
			break;
		case KVM_IRQCHIP_PIC_SLAVE:
289
			e->set = kvm_set_pic_irq;
290
			max_pin = PIC_NUM_PINS;
291 292 293
			delta = 8;
			break;
		case KVM_IRQCHIP_IOAPIC:
294
			max_pin = KVM_IOAPIC_NUM_PINS;
295
			e->set = kvm_set_ioapic_irq;
296 297 298 299 300 301
			break;
		default:
			goto out;
		}
		e->irqchip.irqchip = ue->u.irqchip.irqchip;
		e->irqchip.pin = ue->u.irqchip.pin + delta;
302
		if (e->irqchip.pin >= max_pin)
303
			goto out;
304
		break;
S
Sheng Yang 已提交
305 306 307 308 309 310
	case KVM_IRQ_ROUTING_MSI:
		e->set = kvm_set_msi;
		e->msi.address_lo = ue->u.msi.address_lo;
		e->msi.address_hi = ue->u.msi.address_hi;
		e->msi.data = ue->u.msi.data;
		break;
311 312 313
	default:
		goto out;
	}
314

315 316 317 318 319 320 321
	r = 0;
out:
	return r;
}

#define IOAPIC_ROUTING_ENTRY(irq) \
	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
322
	  .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
323 324 325 326 327
#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)

#ifdef CONFIG_X86
#  define PIC_ROUTING_ENTRY(irq) \
	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
328
	  .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } }
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
#  define ROUTING_ENTRY2(irq) \
	IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
#else
#  define ROUTING_ENTRY2(irq) \
	IOAPIC_ROUTING_ENTRY(irq)
#endif

static const struct kvm_irq_routing_entry default_routing[] = {
	ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
	ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
	ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
	ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
	ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
	ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
	ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
	ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
	ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
	ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
	ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
	ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
#ifdef CONFIG_IA64
	ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
	ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
	ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
	ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
	ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
	ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
	ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
	ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
	ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
	ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
	ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
	ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
#endif
};

int kvm_setup_default_irq_routing(struct kvm *kvm)
{
	return kvm_set_irq_routing(kvm, default_routing,
				   ARRAY_SIZE(default_routing), 0);
}