irq_comm.c 10.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * irq_comm.c: Common API for in kernel interrupt controller
 * Copyright (c) 2007, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 * Authors:
 *   Yaozu (Eddie) Dong <Eddie.dong@intel.com>
 *
N
Nicolas Kaiser 已提交
20
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
21 22 23
 */

#include <linux/kvm_host.h>
24
#include <linux/slab.h>
25
#include <linux/export.h>
26
#include <trace/events/kvm.h>
S
Sheng Yang 已提交
27 28

#include <asm/msidef.h>
29 30 31
#ifdef CONFIG_IA64
#include <asm/iosapic.h>
#endif
S
Sheng Yang 已提交
32

33 34 35 36
#include "irq.h"

#include "ioapic.h"

37
static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
38 39
			   struct kvm *kvm, int irq_source_id, int level,
			   bool line_status)
40 41
{
#ifdef CONFIG_X86
42
	struct kvm_pic *pic = pic_irqchip(kvm);
43
	return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
44 45
#else
	return -1;
46 47 48
#endif
}

49
static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
50 51
			      struct kvm *kvm, int irq_source_id, int level,
			      bool line_status)
52
{
53
	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
54 55
	return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
				line_status);
56 57
}

58
inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
59
{
60 61 62 63 64 65 66
#ifdef CONFIG_IA64
	return irq->delivery_mode ==
		(IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
#else
	return irq->delivery_mode == APIC_DM_LOWEST;
#endif
}
67

68
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
69
		struct kvm_lapic_irq *irq, unsigned long *dest_map)
70 71 72 73 74
{
	int i, r = -1;
	struct kvm_vcpu *vcpu, *lowest = NULL;

	if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
75
			kvm_is_dm_lowest_prio(irq)) {
76
		printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
77 78 79
		irq->delivery_mode = APIC_DM_FIXED;
	}

80
	if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
81
		return r;
82

83 84
	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (!kvm_apic_present(vcpu))
85 86
			continue;

87 88
		if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
					irq->dest_id, irq->dest_mode))
89 90
			continue;

91 92 93
		if (!kvm_is_dm_lowest_prio(irq)) {
			if (r < 0)
				r = 0;
94
			r += kvm_apic_set_irq(vcpu, irq, dest_map);
95
		} else if (kvm_lapic_enabled(vcpu)) {
96 97 98 99
			if (!lowest)
				lowest = vcpu;
			else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
				lowest = vcpu;
100
		}
101 102
	}

103
	if (lowest)
104
		r = kvm_apic_set_irq(lowest, irq, dest_map);
105 106

	return r;
107 108
}

M
Michael S. Tsirkin 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
				   struct kvm_lapic_irq *irq)
{
	trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data);

	irq->dest_id = (e->msi.address_lo &
			MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
	irq->vector = (e->msi.data &
			MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
	irq->dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
	irq->trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
	irq->delivery_mode = e->msi.data & 0x700;
	irq->level = 1;
	irq->shorthand = 0;
	/* TODO Deal with RH bit of MSI message address */
}

126
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
127
		struct kvm *kvm, int irq_source_id, int level, bool line_status)
S
Sheng Yang 已提交
128
{
129
	struct kvm_lapic_irq irq;
S
Sheng Yang 已提交
130

131 132 133
	if (!level)
		return -1;

M
Michael S. Tsirkin 已提交
134
	kvm_set_msi_irq(e, &irq);
135

136
	return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL);
S
Sheng Yang 已提交
137 138
}

M
Michael S. Tsirkin 已提交
139 140 141 142 143 144 145 146 147

static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e,
			 struct kvm *kvm)
{
	struct kvm_lapic_irq irq;
	int r;

	kvm_set_msi_irq(e, &irq);

148
	if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
M
Michael S. Tsirkin 已提交
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
		return r;
	else
		return -EWOULDBLOCK;
}

/*
 * Deliver an IRQ in an atomic context if we can, or return a failure,
 * user can retry in a process context.
 * Return value:
 *  -EWOULDBLOCK - Can't deliver in atomic context: retry in a process context.
 *  Other values - No need to retry.
 */
int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
{
	struct kvm_kernel_irq_routing_entry *e;
	int ret = -EINVAL;
	struct kvm_irq_routing_table *irq_rt;
166
	int idx;
M
Michael S. Tsirkin 已提交
167 168 169 170 171 172 173 174 175 176 177

	trace_kvm_set_irq(irq, level, irq_source_id);

	/*
	 * Injection into either PIC or IOAPIC might need to scan all CPUs,
	 * which would need to be retried from thread context;  when same GSI
	 * is connected to both PIC and IOAPIC, we'd have to report a
	 * partial failure here.
	 * Since there's no easy way to do this, we only support injecting MSI
	 * which is limited to 1:1 GSI mapping.
	 */
178 179
	idx = srcu_read_lock(&kvm->irq_srcu);
	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
M
Michael S. Tsirkin 已提交
180
	if (irq < irq_rt->nr_rt_entries)
181
		hlist_for_each_entry(e, &irq_rt->map[irq], link) {
M
Michael S. Tsirkin 已提交
182 183 184 185 186 187
			if (likely(e->type == KVM_IRQ_ROUTING_MSI))
				ret = kvm_set_msi_inatomic(e, kvm);
			else
				ret = -EWOULDBLOCK;
			break;
		}
188
	srcu_read_unlock(&kvm->irq_srcu, idx);
M
Michael S. Tsirkin 已提交
189 190 191
	return ret;
}

192 193 194
int kvm_request_irq_source_id(struct kvm *kvm)
{
	unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
195 196 197
	int irq_source_id;

	mutex_lock(&kvm->irq_lock);
198
	irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG);
199

200
	if (irq_source_id >= BITS_PER_LONG) {
201
		printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
202 203
		irq_source_id = -EFAULT;
		goto unlock;
204 205 206
	}

	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
207 208 209
#ifdef CONFIG_X86
	ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
#endif
210
	set_bit(irq_source_id, bitmap);
211
unlock:
212
	mutex_unlock(&kvm->irq_lock);
213

214 215 216 217 218
	return irq_source_id;
}

void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
{
219
	ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
220 221 222
#ifdef CONFIG_X86
	ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
#endif
223

224
	mutex_lock(&kvm->irq_lock);
225
	if (irq_source_id < 0 ||
226
	    irq_source_id >= BITS_PER_LONG) {
227
		printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
228
		goto unlock;
229
	}
230 231 232 233
	clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
	if (!irqchip_in_kernel(kvm))
		goto unlock;

234
	kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
235
#ifdef CONFIG_X86
236
	kvm_pic_clear_all(pic_irqchip(kvm), irq_source_id);
237
#endif
238
unlock:
239
	mutex_unlock(&kvm->irq_lock);
240
}
241 242 243 244

void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
				    struct kvm_irq_mask_notifier *kimn)
{
245
	mutex_lock(&kvm->irq_lock);
246
	kimn->irq = irq;
247
	hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list);
248
	mutex_unlock(&kvm->irq_lock);
249 250 251 252 253
}

void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
				      struct kvm_irq_mask_notifier *kimn)
{
254
	mutex_lock(&kvm->irq_lock);
255
	hlist_del_rcu(&kimn->link);
256
	mutex_unlock(&kvm->irq_lock);
257
	synchronize_srcu(&kvm->irq_srcu);
258 259
}

260 261
void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
			     bool mask)
262 263
{
	struct kvm_irq_mask_notifier *kimn;
264
	int idx, gsi;
265

266 267
	idx = srcu_read_lock(&kvm->irq_srcu);
	gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin];
268
	if (gsi != -1)
269
		hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
270 271
			if (kimn->irq == gsi)
				kimn->func(kimn, mask);
272
	srcu_read_unlock(&kvm->irq_srcu, idx);
273 274
}

275 276 277
int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
			  struct kvm_kernel_irq_routing_entry *e,
			  const struct kvm_irq_routing_entry *ue)
278 279 280
{
	int r = -EINVAL;
	int delta;
281
	unsigned max_pin;
282

283 284 285 286 287 288
	switch (ue->type) {
	case KVM_IRQ_ROUTING_IRQCHIP:
		delta = 0;
		switch (ue->u.irqchip.irqchip) {
		case KVM_IRQCHIP_PIC_MASTER:
			e->set = kvm_set_pic_irq;
289
			max_pin = PIC_NUM_PINS;
290 291
			break;
		case KVM_IRQCHIP_PIC_SLAVE:
292
			e->set = kvm_set_pic_irq;
293
			max_pin = PIC_NUM_PINS;
294 295 296
			delta = 8;
			break;
		case KVM_IRQCHIP_IOAPIC:
297
			max_pin = KVM_IOAPIC_NUM_PINS;
298
			e->set = kvm_set_ioapic_irq;
299 300 301 302 303 304
			break;
		default:
			goto out;
		}
		e->irqchip.irqchip = ue->u.irqchip.irqchip;
		e->irqchip.pin = ue->u.irqchip.pin + delta;
305
		if (e->irqchip.pin >= max_pin)
306 307
			goto out;
		rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi;
308
		break;
S
Sheng Yang 已提交
309 310 311 312 313 314
	case KVM_IRQ_ROUTING_MSI:
		e->set = kvm_set_msi;
		e->msi.address_lo = ue->u.msi.address_lo;
		e->msi.address_hi = ue->u.msi.address_hi;
		e->msi.data = ue->u.msi.data;
		break;
315 316 317
	default:
		goto out;
	}
318

319 320 321 322 323 324 325
	r = 0;
out:
	return r;
}

#define IOAPIC_ROUTING_ENTRY(irq) \
	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
326
	  .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
327 328 329 330 331
#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)

#ifdef CONFIG_X86
#  define PIC_ROUTING_ENTRY(irq) \
	{ .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,	\
332
	  .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } }
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
#  define ROUTING_ENTRY2(irq) \
	IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
#else
#  define ROUTING_ENTRY2(irq) \
	IOAPIC_ROUTING_ENTRY(irq)
#endif

static const struct kvm_irq_routing_entry default_routing[] = {
	ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
	ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
	ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
	ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
	ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
	ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
	ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
	ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
	ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
	ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
	ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
	ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
#ifdef CONFIG_IA64
	ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
	ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
	ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
	ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
	ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
	ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
	ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
	ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
	ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
	ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
	ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
	ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
#endif
};

int kvm_setup_default_irq_routing(struct kvm *kvm)
{
	return kvm_set_irq_routing(kvm, default_routing,
				   ARRAY_SIZE(default_routing), 0);
}