irqdesc.c 15.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 *
 * This file contains the interrupt descriptor management code
 *
 * Detailed information is available in Documentation/DocBook/genericirq
 *
 */
#include <linux/irq.h>
#include <linux/slab.h>
12
#include <linux/export.h>
13 14 15
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/radix-tree.h>
16
#include <linux/bitmap.h>
17
#include <linux/irqdomain.h>
18 19 20 21 22 23

#include "internals.h"

/*
 * lockdep: we want to handle all irq_desc locks as a single lock-class:
 */
24
static struct lock_class_key irq_desc_lock_class;
25

26
#if defined(CONFIG_SMP)
27 28 29 30 31 32 33 34 35 36 37 38 39
static int __init irq_affinity_setup(char *str)
{
	zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
	cpulist_parse(str, irq_default_affinity);
	/*
	 * Set at least the boot cpu. We don't want to end up with
	 * bugreports caused by random comandline masks
	 */
	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
	return 1;
}
__setup("irqaffinity=", irq_affinity_setup);

40 41
static void __init init_irq_default_affinity(void)
{
42 43 44 45 46 47
#ifdef CONFIG_CPUMASK_OFFSTACK
	if (!irq_default_affinity)
		zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
#endif
	if (cpumask_empty(irq_default_affinity))
		cpumask_setall(irq_default_affinity);
48 49 50 51 52 53 54
}
#else
static void __init init_irq_default_affinity(void)
{
}
#endif

55 56 57
#ifdef CONFIG_SMP
static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
{
58 59
	if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
				     gfp, node))
60 61 62 63
		return -ENOMEM;

#ifdef CONFIG_GENERIC_PENDING_IRQ
	if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
64
		free_cpumask_var(desc->irq_common_data.affinity);
65 66 67 68 69 70 71 72
		return -ENOMEM;
	}
#endif
	return 0;
}

static void desc_smp_init(struct irq_desc *desc, int node)
{
73
	cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity);
74 75 76
#ifdef CONFIG_GENERIC_PENDING_IRQ
	cpumask_clear(desc->pending_mask);
#endif
77 78 79
#ifdef CONFIG_NUMA
	desc->irq_common_data.node = node;
#endif
80 81
}

82 83 84 85 86 87
#else
static inline int
alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
static inline void desc_smp_init(struct irq_desc *desc, int node) { }
#endif

88 89
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
		struct module *owner)
90
{
E
Eric Dumazet 已提交
91 92
	int cpu;

93
	desc->irq_common_data.handler_data = NULL;
94
	desc->irq_common_data.msi_desc = NULL;
95

96
	desc->irq_data.common = &desc->irq_common_data;
97 98 99
	desc->irq_data.irq = irq;
	desc->irq_data.chip = &no_irq_chip;
	desc->irq_data.chip_data = NULL;
100
	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
101
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
102 103
	desc->handle_irq = handle_bad_irq;
	desc->depth = 1;
104 105
	desc->irq_count = 0;
	desc->irqs_unhandled = 0;
106
	desc->name = NULL;
107
	desc->owner = owner;
E
Eric Dumazet 已提交
108 109
	for_each_possible_cpu(cpu)
		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
110 111 112
	desc_smp_init(desc, node);
}

113 114 115
int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL(nr_irqs);

T
Thomas Gleixner 已提交
116
static DEFINE_MUTEX(sparse_irq_lock);
117
static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
118

119 120
#ifdef CONFIG_SPARSE_IRQ

121
static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
122

123
static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
124 125 126 127 128 129 130 131
{
	radix_tree_insert(&irq_desc_tree, irq, desc);
}

struct irq_desc *irq_to_desc(unsigned int irq)
{
	return radix_tree_lookup(&irq_desc_tree, irq);
}
132
EXPORT_SYMBOL(irq_to_desc);
133

134 135 136 137 138 139 140 141 142 143 144
static void delete_irq_desc(unsigned int irq)
{
	radix_tree_delete(&irq_desc_tree, irq);
}

#ifdef CONFIG_SMP
static void free_masks(struct irq_desc *desc)
{
#ifdef CONFIG_GENERIC_PENDING_IRQ
	free_cpumask_var(desc->pending_mask);
#endif
145
	free_cpumask_var(desc->irq_common_data.affinity);
146 147 148 149 150
}
#else
static inline void free_masks(struct irq_desc *desc) { }
#endif

151 152 153 154 155 156 157 158 159 160
void irq_lock_sparse(void)
{
	mutex_lock(&sparse_irq_lock);
}

void irq_unlock_sparse(void)
{
	mutex_unlock(&sparse_irq_lock);
}

161
static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
162 163
{
	struct irq_desc *desc;
164
	gfp_t gfp = GFP_KERNEL;
165 166 167 168 169

	desc = kzalloc_node(sizeof(*desc), gfp, node);
	if (!desc)
		return NULL;
	/* allocate based on nr_cpu_ids */
E
Eric Dumazet 已提交
170
	desc->kstat_irqs = alloc_percpu(unsigned int);
171 172 173 174 175 176 177 178
	if (!desc->kstat_irqs)
		goto err_desc;

	if (alloc_masks(desc, gfp, node))
		goto err_kstat;

	raw_spin_lock_init(&desc->lock);
	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
T
Thomas Gleixner 已提交
179
	init_rcu_head(&desc->rcu);
180

181
	desc_set_defaults(irq, desc, node, owner);
182 183 184 185

	return desc;

err_kstat:
E
Eric Dumazet 已提交
186
	free_percpu(desc->kstat_irqs);
187 188 189 190 191
err_desc:
	kfree(desc);
	return NULL;
}

T
Thomas Gleixner 已提交
192 193 194 195 196 197 198 199 200
static void delayed_free_desc(struct rcu_head *rhp)
{
	struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);

	free_masks(desc);
	free_percpu(desc->kstat_irqs);
	kfree(desc);
}

201 202 203 204
static void free_desc(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

205 206
	unregister_irq_proc(irq, desc);

207 208 209 210 211 212
	/*
	 * sparse_irq_lock protects also show_interrupts() and
	 * kstat_irq_usr(). Once we deleted the descriptor from the
	 * sparse tree we can free it. Access in proc will fail to
	 * lookup the descriptor.
	 */
T
Thomas Gleixner 已提交
213
	mutex_lock(&sparse_irq_lock);
214
	delete_irq_desc(irq);
T
Thomas Gleixner 已提交
215
	mutex_unlock(&sparse_irq_lock);
216

T
Thomas Gleixner 已提交
217 218 219 220 221 222
	/*
	 * We free the descriptor, masks and stat fields via RCU. That
	 * allows demultiplex interrupts to do rcu based management of
	 * the child interrupts.
	 */
	call_rcu(&desc->rcu, delayed_free_desc);
223 224
}

225 226
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
		       struct module *owner)
227 228 229 230 231
{
	struct irq_desc *desc;
	int i;

	for (i = 0; i < cnt; i++) {
232
		desc = alloc_desc(start + i, node, owner);
233 234
		if (!desc)
			goto err;
T
Thomas Gleixner 已提交
235
		mutex_lock(&sparse_irq_lock);
236
		irq_insert_desc(start + i, desc);
T
Thomas Gleixner 已提交
237
		mutex_unlock(&sparse_irq_lock);
238 239 240 241 242 243 244
	}
	return start;

err:
	for (i--; i >= 0; i--)
		free_desc(start + i);

T
Thomas Gleixner 已提交
245
	mutex_lock(&sparse_irq_lock);
246
	bitmap_clear(allocated_irqs, start, cnt);
T
Thomas Gleixner 已提交
247
	mutex_unlock(&sparse_irq_lock);
248 249 250
	return -ENOMEM;
}

251
static int irq_expand_nr_irqs(unsigned int nr)
252
{
253
	if (nr > IRQ_BITMAP_BITS)
254
		return -ENOMEM;
255
	nr_irqs = nr;
256 257 258
	return 0;
}

259 260
int __init early_irq_init(void)
{
261
	int i, initcnt, node = first_online_node;
262 263 264 265
	struct irq_desc *desc;

	init_irq_default_affinity();

266 267 268
	/* Let arch update nr_irqs and return the nr of preallocated irqs */
	initcnt = arch_probe_nr_irqs();
	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
269

270 271 272 273 274 275 276 277 278
	if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
		nr_irqs = IRQ_BITMAP_BITS;

	if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
		initcnt = IRQ_BITMAP_BITS;

	if (initcnt > nr_irqs)
		nr_irqs = initcnt;

279
	for (i = 0; i < initcnt; i++) {
280
		desc = alloc_desc(i, node, NULL);
281 282
		set_bit(i, allocated_irqs);
		irq_insert_desc(i, desc);
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
	}
	return arch_early_irq_init();
}

#else /* !CONFIG_SPARSE_IRQ */

struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
	[0 ... NR_IRQS-1] = {
		.handle_irq	= handle_bad_irq,
		.depth		= 1,
		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
	}
};

int __init early_irq_init(void)
{
299
	int count, i, node = first_online_node;
300 301 302 303 304 305 306 307 308 309
	struct irq_desc *desc;

	init_irq_default_affinity();

	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);

	desc = irq_desc;
	count = ARRAY_SIZE(irq_desc);

	for (i = 0; i < count; i++) {
E
Eric Dumazet 已提交
310
		desc[i].kstat_irqs = alloc_percpu(unsigned int);
311 312
		alloc_masks(&desc[i], GFP_KERNEL, node);
		raw_spin_lock_init(&desc[i].lock);
313
		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
314
		desc_set_defaults(i, &desc[i], node, NULL);
315 316 317 318 319 320 321 322
	}
	return arch_early_irq_init();
}

struct irq_desc *irq_to_desc(unsigned int irq)
{
	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
}
323
EXPORT_SYMBOL(irq_to_desc);
324

325 326
static void free_desc(unsigned int irq)
{
327 328 329 330
	struct irq_desc *desc = irq_to_desc(irq);
	unsigned long flags;

	raw_spin_lock_irqsave(&desc->lock, flags);
331
	desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL);
332
	raw_spin_unlock_irqrestore(&desc->lock, flags);
333 334
}

335 336
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
			      struct module *owner)
337
{
338 339 340 341 342 343 344
	u32 i;

	for (i = 0; i < cnt; i++) {
		struct irq_desc *desc = irq_to_desc(start + i);

		desc->owner = owner;
	}
345 346
	return start;
}
347

348
static int irq_expand_nr_irqs(unsigned int nr)
349 350 351 352
{
	return -ENOMEM;
}

353 354 355 356 357 358 359
void irq_mark_irq(unsigned int irq)
{
	mutex_lock(&sparse_irq_lock);
	bitmap_set(allocated_irqs, irq, 1);
	mutex_unlock(&sparse_irq_lock);
}

360 361 362
#ifdef CONFIG_GENERIC_IRQ_LEGACY
void irq_init_desc(unsigned int irq)
{
363
	free_desc(irq);
364 365 366
}
#endif

367 368
#endif /* !CONFIG_SPARSE_IRQ */

369 370 371 372 373 374 375 376 377 378 379
/**
 * generic_handle_irq - Invoke the handler for a particular irq
 * @irq:	The irq number to handle
 *
 */
int generic_handle_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (!desc)
		return -EINVAL;
380
	generic_handle_irq_desc(desc);
381 382
	return 0;
}
383
EXPORT_SYMBOL_GPL(generic_handle_irq);
384

385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
#ifdef CONFIG_HANDLE_DOMAIN_IRQ
/**
 * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
 * @domain:	The domain where to perform the lookup
 * @hwirq:	The HW irq number to convert to a logical one
 * @lookup:	Whether to perform the domain lookup or not
 * @regs:	Register file coming from the low-level handling code
 *
 * Returns:	0 on success, or -EINVAL if conversion has failed
 */
int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
			bool lookup, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);
	unsigned int irq = hwirq;
	int ret = 0;

	irq_enter();

#ifdef CONFIG_IRQ_DOMAIN
	if (lookup)
		irq = irq_find_mapping(domain, hwirq);
#endif

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (unlikely(!irq || irq >= nr_irqs)) {
		ack_bad_irq(irq);
		ret = -EINVAL;
	} else {
		generic_handle_irq(irq);
	}

	irq_exit();
	set_irq_regs(old_regs);
	return ret;
}
#endif

426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
/* Dynamic interrupt handling */

/**
 * irq_free_descs - free irq descriptors
 * @from:	Start of descriptor range
 * @cnt:	Number of consecutive irqs to free
 */
void irq_free_descs(unsigned int from, unsigned int cnt)
{
	int i;

	if (from >= nr_irqs || (from + cnt) > nr_irqs)
		return;

	for (i = 0; i < cnt; i++)
		free_desc(from + i);

T
Thomas Gleixner 已提交
443
	mutex_lock(&sparse_irq_lock);
444
	bitmap_clear(allocated_irqs, from, cnt);
T
Thomas Gleixner 已提交
445
	mutex_unlock(&sparse_irq_lock);
446
}
447
EXPORT_SYMBOL_GPL(irq_free_descs);
448 449 450 451 452 453 454

/**
 * irq_alloc_descs - allocate and initialize a range of irq descriptors
 * @irq:	Allocate for specific irq number if irq >= 0
 * @from:	Start the search from this irq number
 * @cnt:	Number of consecutive irqs to allocate.
 * @node:	Preferred node on which the irq descriptor should be allocated
455
 * @owner:	Owning module (can be NULL)
456 457 458 459
 *
 * Returns the first irq number or error code
 */
int __ref
460 461
__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
		  struct module *owner)
462 463 464 465 466 467
{
	int start, ret;

	if (!cnt)
		return -EINVAL;

468 469 470 471
	if (irq >= 0) {
		if (from > irq)
			return -EINVAL;
		from = irq;
472 473 474 475 476 477 478
	} else {
		/*
		 * For interrupts which are freely allocated the
		 * architecture can force a lower bound to the @from
		 * argument. x86 uses this to exclude the GSI space.
		 */
		from = arch_dynirq_lower_bound(from);
479 480
	}

T
Thomas Gleixner 已提交
481
	mutex_lock(&sparse_irq_lock);
482

483 484
	start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
					   from, cnt, 0);
485 486 487 488
	ret = -EEXIST;
	if (irq >=0 && start != irq)
		goto err;

489 490
	if (start + cnt > nr_irqs) {
		ret = irq_expand_nr_irqs(start + cnt);
491 492 493
		if (ret)
			goto err;
	}
494 495

	bitmap_set(allocated_irqs, start, cnt);
T
Thomas Gleixner 已提交
496
	mutex_unlock(&sparse_irq_lock);
497
	return alloc_descs(start, cnt, node, owner);
498 499

err:
T
Thomas Gleixner 已提交
500
	mutex_unlock(&sparse_irq_lock);
501 502
	return ret;
}
503
EXPORT_SYMBOL_GPL(__irq_alloc_descs);
504

505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
/**
 * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
 * @cnt:	number of interrupts to allocate
 * @node:	node on which to allocate
 *
 * Returns an interrupt number > 0 or 0, if the allocation fails.
 */
unsigned int irq_alloc_hwirqs(int cnt, int node)
{
	int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL);

	if (irq < 0)
		return 0;

	for (i = irq; cnt > 0; i++, cnt--) {
		if (arch_setup_hwirq(i, node))
			goto err;
		irq_clear_status_flags(i, _IRQ_NOREQUEST);
	}
	return irq;

err:
	for (i--; i >= irq; i--) {
		irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
		arch_teardown_hwirq(i);
	}
	irq_free_descs(irq, cnt);
	return 0;
}
EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);

/**
 * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
 * @from:	Free from irq number
 * @cnt:	number of interrupts to free
 *
 */
void irq_free_hwirqs(unsigned int from, int cnt)
{
545
	int i, j;
546

547
	for (i = from, j = cnt; j > 0; i++, j--) {
548 549 550 551 552 553 554 555
		irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
		arch_teardown_hwirq(i);
	}
	irq_free_descs(from, cnt);
}
EXPORT_SYMBOL_GPL(irq_free_hwirqs);
#endif

556 557 558 559 560 561 562 563 564 565 566
/**
 * irq_get_next_irq - get next allocated irq number
 * @offset:	where to start the search
 *
 * Returns next irq number after offset or nr_irqs if none is found.
 */
unsigned int irq_get_next_irq(unsigned int offset)
{
	return find_next_bit(allocated_irqs, nr_irqs, offset);
}

567
struct irq_desc *
568 569
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
		    unsigned int check)
570 571 572 573
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (desc) {
574 575 576 577 578 579 580 581 582 583
		if (check & _IRQ_DESC_CHECK) {
			if ((check & _IRQ_DESC_PERCPU) &&
			    !irq_settings_is_per_cpu_devid(desc))
				return NULL;

			if (!(check & _IRQ_DESC_PERCPU) &&
			    irq_settings_is_per_cpu_devid(desc))
				return NULL;
		}

584 585 586 587 588 589 590 591 592 593 594 595 596 597
		if (bus)
			chip_bus_lock(desc);
		raw_spin_lock_irqsave(&desc->lock, *flags);
	}
	return desc;
}

void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
{
	raw_spin_unlock_irqrestore(&desc->lock, flags);
	if (bus)
		chip_bus_sync_unlock(desc);
}

598 599
int irq_set_percpu_devid_partition(unsigned int irq,
				   const struct cpumask *affinity)
600 601 602 603 604 605 606 607 608 609 610 611 612 613
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (!desc)
		return -EINVAL;

	if (desc->percpu_enabled)
		return -EINVAL;

	desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);

	if (!desc->percpu_enabled)
		return -ENOMEM;

614 615 616 617 618
	if (affinity)
		desc->percpu_affinity = affinity;
	else
		desc->percpu_affinity = cpu_possible_mask;

619 620 621 622
	irq_set_percpu_devid_flags(irq);
	return 0;
}

623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
int irq_set_percpu_devid(unsigned int irq)
{
	return irq_set_percpu_devid_partition(irq, NULL);
}

int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (!desc || !desc->percpu_enabled)
		return -EINVAL;

	if (affinity)
		cpumask_copy(affinity, desc->percpu_affinity);

	return 0;
}

641 642
void kstat_incr_irq_this_cpu(unsigned int irq)
{
643
	kstat_incr_irqs_this_cpu(irq_to_desc(irq));
644 645
}

646 647 648 649 650 651 652 653 654
/**
 * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
 * @irq:	The interrupt number
 * @cpu:	The cpu number
 *
 * Returns the sum of interrupt counts on @cpu since boot for
 * @irq. The caller must ensure that the interrupt is not removed
 * concurrently.
 */
655 656 657
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
	struct irq_desc *desc = irq_to_desc(irq);
E
Eric Dumazet 已提交
658 659 660

	return desc && desc->kstat_irqs ?
			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
661
}
662

663 664 665 666 667 668 669 670
/**
 * kstat_irqs - Get the statistics for an interrupt
 * @irq:	The interrupt number
 *
 * Returns the sum of interrupt counts on all cpus since boot for
 * @irq. The caller must ensure that the interrupt is not removed
 * concurrently.
 */
671 672 673 674
unsigned int kstat_irqs(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	int cpu;
675
	unsigned int sum = 0;
676

E
Eric Dumazet 已提交
677
	if (!desc || !desc->kstat_irqs)
678 679
		return 0;
	for_each_possible_cpu(cpu)
E
Eric Dumazet 已提交
680
		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
681 682
	return sum;
}
683 684 685 686 687 688 689 690 691 692 693 694

/**
 * kstat_irqs_usr - Get the statistics for an interrupt
 * @irq:	The interrupt number
 *
 * Returns the sum of interrupt counts on all cpus since boot for
 * @irq. Contrary to kstat_irqs() this can be called from any
 * preemptible context. It's protected against concurrent removal of
 * an interrupt descriptor when sparse irqs are enabled.
 */
unsigned int kstat_irqs_usr(unsigned int irq)
{
695
	unsigned int sum;
696 697 698 699 700 701

	irq_lock_sparse();
	sum = kstat_irqs(irq);
	irq_unlock_sparse();
	return sum;
}