irqdesc.c 14.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 *
 * This file contains the interrupt descriptor management code
 *
 * Detailed information is available in Documentation/DocBook/genericirq
 *
 */
#include <linux/irq.h>
#include <linux/slab.h>
12
#include <linux/export.h>
13 14 15
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/radix-tree.h>
16
#include <linux/bitmap.h>
17
#include <linux/irqdomain.h>
18 19 20 21 22 23

#include "internals.h"

/*
 * lockdep: we want to handle all irq_desc locks as a single lock-class:
 */
24
static struct lock_class_key irq_desc_lock_class;
25

26
#if defined(CONFIG_SMP)
27 28 29 30 31 32 33 34 35 36 37
static void __init init_irq_default_affinity(void)
{
	alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
	cpumask_setall(irq_default_affinity);
}
#else
static void __init init_irq_default_affinity(void)
{
}
#endif

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
#ifdef CONFIG_SMP
static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
{
	if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
		return -ENOMEM;

#ifdef CONFIG_GENERIC_PENDING_IRQ
	if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
		free_cpumask_var(desc->irq_data.affinity);
		return -ENOMEM;
	}
#endif
	return 0;
}

static void desc_smp_init(struct irq_desc *desc, int node)
{
55
	desc->irq_data.node = node;
56
	cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
57 58 59 60 61 62 63 64
#ifdef CONFIG_GENERIC_PENDING_IRQ
	cpumask_clear(desc->pending_mask);
#endif
}

static inline int desc_node(struct irq_desc *desc)
{
	return desc->irq_data.node;
65 66 67 68 69 70
}

#else
static inline int
alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
static inline void desc_smp_init(struct irq_desc *desc, int node) { }
71
static inline int desc_node(struct irq_desc *desc) { return 0; }
72 73
#endif

74 75
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
		struct module *owner)
76
{
E
Eric Dumazet 已提交
77 78
	int cpu;

79 80 81 82 83
	desc->irq_data.irq = irq;
	desc->irq_data.chip = &no_irq_chip;
	desc->irq_data.chip_data = NULL;
	desc->irq_data.handler_data = NULL;
	desc->irq_data.msi_desc = NULL;
84
	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
85
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
86 87
	desc->handle_irq = handle_bad_irq;
	desc->depth = 1;
88 89
	desc->irq_count = 0;
	desc->irqs_unhandled = 0;
90
	desc->name = NULL;
91
	desc->owner = owner;
E
Eric Dumazet 已提交
92 93
	for_each_possible_cpu(cpu)
		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
94 95 96
	desc_smp_init(desc, node);
}

97 98 99
int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL(nr_irqs);

T
Thomas Gleixner 已提交
100
static DEFINE_MUTEX(sparse_irq_lock);
101
static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
102

103 104
#ifdef CONFIG_SPARSE_IRQ

105
static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
106

107
static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
108 109 110 111 112 113 114 115
{
	radix_tree_insert(&irq_desc_tree, irq, desc);
}

struct irq_desc *irq_to_desc(unsigned int irq)
{
	return radix_tree_lookup(&irq_desc_tree, irq);
}
116
EXPORT_SYMBOL(irq_to_desc);
117

118 119 120 121 122 123 124 125 126 127 128
static void delete_irq_desc(unsigned int irq)
{
	radix_tree_delete(&irq_desc_tree, irq);
}

#ifdef CONFIG_SMP
static void free_masks(struct irq_desc *desc)
{
#ifdef CONFIG_GENERIC_PENDING_IRQ
	free_cpumask_var(desc->pending_mask);
#endif
129
	free_cpumask_var(desc->irq_data.affinity);
130 131 132 133 134
}
#else
static inline void free_masks(struct irq_desc *desc) { }
#endif

135 136 137 138 139 140 141 142 143 144
void irq_lock_sparse(void)
{
	mutex_lock(&sparse_irq_lock);
}

void irq_unlock_sparse(void)
{
	mutex_unlock(&sparse_irq_lock);
}

145
static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
146 147
{
	struct irq_desc *desc;
148
	gfp_t gfp = GFP_KERNEL;
149 150 151 152 153

	desc = kzalloc_node(sizeof(*desc), gfp, node);
	if (!desc)
		return NULL;
	/* allocate based on nr_cpu_ids */
E
Eric Dumazet 已提交
154
	desc->kstat_irqs = alloc_percpu(unsigned int);
155 156 157 158 159 160 161 162 163
	if (!desc->kstat_irqs)
		goto err_desc;

	if (alloc_masks(desc, gfp, node))
		goto err_kstat;

	raw_spin_lock_init(&desc->lock);
	lockdep_set_class(&desc->lock, &irq_desc_lock_class);

164
	desc_set_defaults(irq, desc, node, owner);
165 166 167 168

	return desc;

err_kstat:
E
Eric Dumazet 已提交
169
	free_percpu(desc->kstat_irqs);
170 171 172 173 174 175 176 177 178
err_desc:
	kfree(desc);
	return NULL;
}

static void free_desc(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

179 180
	unregister_irq_proc(irq, desc);

181 182 183 184 185 186
	/*
	 * sparse_irq_lock protects also show_interrupts() and
	 * kstat_irq_usr(). Once we deleted the descriptor from the
	 * sparse tree we can free it. Access in proc will fail to
	 * lookup the descriptor.
	 */
T
Thomas Gleixner 已提交
187
	mutex_lock(&sparse_irq_lock);
188
	delete_irq_desc(irq);
T
Thomas Gleixner 已提交
189
	mutex_unlock(&sparse_irq_lock);
190 191

	free_masks(desc);
E
Eric Dumazet 已提交
192
	free_percpu(desc->kstat_irqs);
193 194 195
	kfree(desc);
}

196 197
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
		       struct module *owner)
198 199 200 201 202
{
	struct irq_desc *desc;
	int i;

	for (i = 0; i < cnt; i++) {
203
		desc = alloc_desc(start + i, node, owner);
204 205
		if (!desc)
			goto err;
T
Thomas Gleixner 已提交
206
		mutex_lock(&sparse_irq_lock);
207
		irq_insert_desc(start + i, desc);
T
Thomas Gleixner 已提交
208
		mutex_unlock(&sparse_irq_lock);
209 210 211 212 213 214 215
	}
	return start;

err:
	for (i--; i >= 0; i--)
		free_desc(start + i);

T
Thomas Gleixner 已提交
216
	mutex_lock(&sparse_irq_lock);
217
	bitmap_clear(allocated_irqs, start, cnt);
T
Thomas Gleixner 已提交
218
	mutex_unlock(&sparse_irq_lock);
219 220 221
	return -ENOMEM;
}

222
static int irq_expand_nr_irqs(unsigned int nr)
223
{
224
	if (nr > IRQ_BITMAP_BITS)
225
		return -ENOMEM;
226
	nr_irqs = nr;
227 228 229
	return 0;
}

230 231
int __init early_irq_init(void)
{
232
	int i, initcnt, node = first_online_node;
233 234 235 236
	struct irq_desc *desc;

	init_irq_default_affinity();

237 238 239
	/* Let arch update nr_irqs and return the nr of preallocated irqs */
	initcnt = arch_probe_nr_irqs();
	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
240

241 242 243 244 245 246 247 248 249
	if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
		nr_irqs = IRQ_BITMAP_BITS;

	if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
		initcnt = IRQ_BITMAP_BITS;

	if (initcnt > nr_irqs)
		nr_irqs = initcnt;

250
	for (i = 0; i < initcnt; i++) {
251
		desc = alloc_desc(i, node, NULL);
252 253
		set_bit(i, allocated_irqs);
		irq_insert_desc(i, desc);
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
	}
	return arch_early_irq_init();
}

#else /* !CONFIG_SPARSE_IRQ */

struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
	[0 ... NR_IRQS-1] = {
		.handle_irq	= handle_bad_irq,
		.depth		= 1,
		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
	}
};

int __init early_irq_init(void)
{
270
	int count, i, node = first_online_node;
271 272 273 274 275 276 277 278 279 280
	struct irq_desc *desc;

	init_irq_default_affinity();

	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);

	desc = irq_desc;
	count = ARRAY_SIZE(irq_desc);

	for (i = 0; i < count; i++) {
E
Eric Dumazet 已提交
281
		desc[i].kstat_irqs = alloc_percpu(unsigned int);
282 283
		alloc_masks(&desc[i], GFP_KERNEL, node);
		raw_spin_lock_init(&desc[i].lock);
284
		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
285
		desc_set_defaults(i, &desc[i], node, NULL);
286 287 288 289 290 291 292 293
	}
	return arch_early_irq_init();
}

struct irq_desc *irq_to_desc(unsigned int irq)
{
	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
}
294
EXPORT_SYMBOL(irq_to_desc);
295

296 297
static void free_desc(unsigned int irq)
{
298 299 300 301 302 303
	struct irq_desc *desc = irq_to_desc(irq);
	unsigned long flags;

	raw_spin_lock_irqsave(&desc->lock, flags);
	desc_set_defaults(irq, desc, desc_node(desc), NULL);
	raw_spin_unlock_irqrestore(&desc->lock, flags);
304 305
}

306 307
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
			      struct module *owner)
308
{
309 310 311 312 313 314 315
	u32 i;

	for (i = 0; i < cnt; i++) {
		struct irq_desc *desc = irq_to_desc(start + i);

		desc->owner = owner;
	}
316 317
	return start;
}
318

319
static int irq_expand_nr_irqs(unsigned int nr)
320 321 322 323
{
	return -ENOMEM;
}

324 325 326 327 328 329 330
void irq_mark_irq(unsigned int irq)
{
	mutex_lock(&sparse_irq_lock);
	bitmap_set(allocated_irqs, irq, 1);
	mutex_unlock(&sparse_irq_lock);
}

331 332 333
#ifdef CONFIG_GENERIC_IRQ_LEGACY
void irq_init_desc(unsigned int irq)
{
334
	free_desc(irq);
335 336 337
}
#endif

338 339
#endif /* !CONFIG_SPARSE_IRQ */

340 341 342 343 344 345 346 347 348 349 350 351 352 353
/**
 * generic_handle_irq - Invoke the handler for a particular irq
 * @irq:	The irq number to handle
 *
 */
int generic_handle_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (!desc)
		return -EINVAL;
	generic_handle_irq_desc(irq, desc);
	return 0;
}
354
EXPORT_SYMBOL_GPL(generic_handle_irq);
355

356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
#ifdef CONFIG_HANDLE_DOMAIN_IRQ
/**
 * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
 * @domain:	The domain where to perform the lookup
 * @hwirq:	The HW irq number to convert to a logical one
 * @lookup:	Whether to perform the domain lookup or not
 * @regs:	Register file coming from the low-level handling code
 *
 * Returns:	0 on success, or -EINVAL if conversion has failed
 */
int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
			bool lookup, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);
	unsigned int irq = hwirq;
	int ret = 0;

	irq_enter();

#ifdef CONFIG_IRQ_DOMAIN
	if (lookup)
		irq = irq_find_mapping(domain, hwirq);
#endif

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (unlikely(!irq || irq >= nr_irqs)) {
		ack_bad_irq(irq);
		ret = -EINVAL;
	} else {
		generic_handle_irq(irq);
	}

	irq_exit();
	set_irq_regs(old_regs);
	return ret;
}
#endif

397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
/* Dynamic interrupt handling */

/**
 * irq_free_descs - free irq descriptors
 * @from:	Start of descriptor range
 * @cnt:	Number of consecutive irqs to free
 */
void irq_free_descs(unsigned int from, unsigned int cnt)
{
	int i;

	if (from >= nr_irqs || (from + cnt) > nr_irqs)
		return;

	for (i = 0; i < cnt; i++)
		free_desc(from + i);

T
Thomas Gleixner 已提交
414
	mutex_lock(&sparse_irq_lock);
415
	bitmap_clear(allocated_irqs, from, cnt);
T
Thomas Gleixner 已提交
416
	mutex_unlock(&sparse_irq_lock);
417
}
418
EXPORT_SYMBOL_GPL(irq_free_descs);
419 420 421 422 423 424 425

/**
 * irq_alloc_descs - allocate and initialize a range of irq descriptors
 * @irq:	Allocate for specific irq number if irq >= 0
 * @from:	Start the search from this irq number
 * @cnt:	Number of consecutive irqs to allocate.
 * @node:	Preferred node on which the irq descriptor should be allocated
426
 * @owner:	Owning module (can be NULL)
427 428 429 430
 *
 * Returns the first irq number or error code
 */
int __ref
431 432
__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
		  struct module *owner)
433 434 435 436 437 438
{
	int start, ret;

	if (!cnt)
		return -EINVAL;

439 440 441 442
	if (irq >= 0) {
		if (from > irq)
			return -EINVAL;
		from = irq;
443 444 445 446 447 448 449
	} else {
		/*
		 * For interrupts which are freely allocated the
		 * architecture can force a lower bound to the @from
		 * argument. x86 uses this to exclude the GSI space.
		 */
		from = arch_dynirq_lower_bound(from);
450 451
	}

T
Thomas Gleixner 已提交
452
	mutex_lock(&sparse_irq_lock);
453

454 455
	start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
					   from, cnt, 0);
456 457 458 459
	ret = -EEXIST;
	if (irq >=0 && start != irq)
		goto err;

460 461
	if (start + cnt > nr_irqs) {
		ret = irq_expand_nr_irqs(start + cnt);
462 463 464
		if (ret)
			goto err;
	}
465 466

	bitmap_set(allocated_irqs, start, cnt);
T
Thomas Gleixner 已提交
467
	mutex_unlock(&sparse_irq_lock);
468
	return alloc_descs(start, cnt, node, owner);
469 470

err:
T
Thomas Gleixner 已提交
471
	mutex_unlock(&sparse_irq_lock);
472 473
	return ret;
}
474
EXPORT_SYMBOL_GPL(__irq_alloc_descs);
475

476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
/**
 * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
 * @cnt:	number of interrupts to allocate
 * @node:	node on which to allocate
 *
 * Returns an interrupt number > 0 or 0, if the allocation fails.
 */
unsigned int irq_alloc_hwirqs(int cnt, int node)
{
	int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL);

	if (irq < 0)
		return 0;

	for (i = irq; cnt > 0; i++, cnt--) {
		if (arch_setup_hwirq(i, node))
			goto err;
		irq_clear_status_flags(i, _IRQ_NOREQUEST);
	}
	return irq;

err:
	for (i--; i >= irq; i--) {
		irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
		arch_teardown_hwirq(i);
	}
	irq_free_descs(irq, cnt);
	return 0;
}
EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);

/**
 * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
 * @from:	Free from irq number
 * @cnt:	number of interrupts to free
 *
 */
void irq_free_hwirqs(unsigned int from, int cnt)
{
516
	int i, j;
517

518
	for (i = from, j = cnt; j > 0; i++, j--) {
519 520 521 522 523 524 525 526
		irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
		arch_teardown_hwirq(i);
	}
	irq_free_descs(from, cnt);
}
EXPORT_SYMBOL_GPL(irq_free_hwirqs);
#endif

527 528 529 530 531 532 533 534 535 536 537
/**
 * irq_get_next_irq - get next allocated irq number
 * @offset:	where to start the search
 *
 * Returns next irq number after offset or nr_irqs if none is found.
 */
unsigned int irq_get_next_irq(unsigned int offset)
{
	return find_next_bit(allocated_irqs, nr_irqs, offset);
}

538
struct irq_desc *
539 540
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
		    unsigned int check)
541 542 543 544
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (desc) {
545 546 547 548 549 550 551 552 553 554
		if (check & _IRQ_DESC_CHECK) {
			if ((check & _IRQ_DESC_PERCPU) &&
			    !irq_settings_is_per_cpu_devid(desc))
				return NULL;

			if (!(check & _IRQ_DESC_PERCPU) &&
			    irq_settings_is_per_cpu_devid(desc))
				return NULL;
		}

555 556 557 558 559 560 561 562 563 564 565 566 567 568
		if (bus)
			chip_bus_lock(desc);
		raw_spin_lock_irqsave(&desc->lock, *flags);
	}
	return desc;
}

void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
{
	raw_spin_unlock_irqrestore(&desc->lock, flags);
	if (bus)
		chip_bus_sync_unlock(desc);
}

569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
int irq_set_percpu_devid(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (!desc)
		return -EINVAL;

	if (desc->percpu_enabled)
		return -EINVAL;

	desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);

	if (!desc->percpu_enabled)
		return -ENOMEM;

	irq_set_percpu_devid_flags(irq);
	return 0;
}

588 589 590 591 592
void kstat_incr_irq_this_cpu(unsigned int irq)
{
	kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
}

593 594 595 596 597 598 599 600 601
/**
 * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
 * @irq:	The interrupt number
 * @cpu:	The cpu number
 *
 * Returns the sum of interrupt counts on @cpu since boot for
 * @irq. The caller must ensure that the interrupt is not removed
 * concurrently.
 */
602 603 604
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
	struct irq_desc *desc = irq_to_desc(irq);
E
Eric Dumazet 已提交
605 606 607

	return desc && desc->kstat_irqs ?
			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
608
}
609

610 611 612 613 614 615 616 617
/**
 * kstat_irqs - Get the statistics for an interrupt
 * @irq:	The interrupt number
 *
 * Returns the sum of interrupt counts on all cpus since boot for
 * @irq. The caller must ensure that the interrupt is not removed
 * concurrently.
 */
618 619 620 621 622 623
unsigned int kstat_irqs(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	int cpu;
	int sum = 0;

E
Eric Dumazet 已提交
624
	if (!desc || !desc->kstat_irqs)
625 626
		return 0;
	for_each_possible_cpu(cpu)
E
Eric Dumazet 已提交
627
		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
628 629
	return sum;
}
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648

/**
 * kstat_irqs_usr - Get the statistics for an interrupt
 * @irq:	The interrupt number
 *
 * Returns the sum of interrupt counts on all cpus since boot for
 * @irq. Contrary to kstat_irqs() this can be called from any
 * preemptible context. It's protected against concurrent removal of
 * an interrupt descriptor when sparse irqs are enabled.
 */
unsigned int kstat_irqs_usr(unsigned int irq)
{
	int sum;

	irq_lock_sparse();
	sum = kstat_irqs(irq);
	irq_unlock_sparse();
	return sum;
}