irqdesc.c 22.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5
/*
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 *
6 7
 * This file contains the interrupt descriptor management code. Detailed
 * information is available in Documentation/core-api/genericirq.rst
8 9 10 11
 *
 */
#include <linux/irq.h>
#include <linux/slab.h>
12
#include <linux/export.h>
13 14 15
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/radix-tree.h>
16
#include <linux/bitmap.h>
17
#include <linux/irqdomain.h>
18
#include <linux/sysfs.h>
19 20 21 22 23 24

#include "internals.h"

/*
 * lockdep: we want to handle all irq_desc locks as a single lock-class:
 */
25
static struct lock_class_key irq_desc_lock_class;
26

27
#if defined(CONFIG_SMP)
28 29
static int __init irq_affinity_setup(char *str)
{
30
	alloc_bootmem_cpumask_var(&irq_default_affinity);
31 32 33 34 35 36 37 38 39 40
	cpulist_parse(str, irq_default_affinity);
	/*
	 * Set at least the boot cpu. We don't want to end up with
	 * bugreports caused by random comandline masks
	 */
	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
	return 1;
}
__setup("irqaffinity=", irq_affinity_setup);

41 42
static void __init init_irq_default_affinity(void)
{
43
	if (!cpumask_available(irq_default_affinity))
44 45 46
		zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
	if (cpumask_empty(irq_default_affinity))
		cpumask_setall(irq_default_affinity);
47 48 49 50 51 52 53
}
#else
static void __init init_irq_default_affinity(void)
{
}
#endif

54
#ifdef CONFIG_SMP
55
static int alloc_masks(struct irq_desc *desc, int node)
56
{
57
	if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
58
				     GFP_KERNEL, node))
59 60
		return -ENOMEM;

61 62 63 64 65 66 67 68
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
	if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity,
				     GFP_KERNEL, node)) {
		free_cpumask_var(desc->irq_common_data.affinity);
		return -ENOMEM;
	}
#endif

69
#ifdef CONFIG_GENERIC_PENDING_IRQ
70
	if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
71 72 73
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
		free_cpumask_var(desc->irq_common_data.effective_affinity);
#endif
74
		free_cpumask_var(desc->irq_common_data.affinity);
75 76 77 78 79 80
		return -ENOMEM;
	}
#endif
	return 0;
}

81 82
static void desc_smp_init(struct irq_desc *desc, int node,
			  const struct cpumask *affinity)
83
{
84 85 86 87
	if (!affinity)
		affinity = irq_default_affinity;
	cpumask_copy(desc->irq_common_data.affinity, affinity);

88 89 90
#ifdef CONFIG_GENERIC_PENDING_IRQ
	cpumask_clear(desc->pending_mask);
#endif
91 92 93
#ifdef CONFIG_NUMA
	desc->irq_common_data.node = node;
#endif
94 95
}

96 97
#else
static inline int
98
alloc_masks(struct irq_desc *desc, int node) { return 0; }
99 100
static inline void
desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
101 102
#endif

103
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
104
			      const struct cpumask *affinity, struct module *owner)
105
{
E
Eric Dumazet 已提交
106 107
	int cpu;

108
	desc->irq_common_data.handler_data = NULL;
109
	desc->irq_common_data.msi_desc = NULL;
110

111
	desc->irq_data.common = &desc->irq_common_data;
112 113 114
	desc->irq_data.irq = irq;
	desc->irq_data.chip = &no_irq_chip;
	desc->irq_data.chip_data = NULL;
115
	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
116
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
117
	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
118 119
	desc->handle_irq = handle_bad_irq;
	desc->depth = 1;
120 121
	desc->irq_count = 0;
	desc->irqs_unhandled = 0;
122
	desc->tot_count = 0;
123
	desc->name = NULL;
124
	desc->owner = owner;
E
Eric Dumazet 已提交
125 126
	for_each_possible_cpu(cpu)
		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
127
	desc_smp_init(desc, node, affinity);
128 129
}

130 131 132
int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL(nr_irqs);

T
Thomas Gleixner 已提交
133
static DEFINE_MUTEX(sparse_irq_lock);
134
static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
135

136 137
#ifdef CONFIG_SPARSE_IRQ

138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
static void irq_kobj_release(struct kobject *kobj);

#ifdef CONFIG_SYSFS
static struct kobject *irq_kobj_base;

#define IRQ_ATTR_RO(_name) \
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)

static ssize_t per_cpu_count_show(struct kobject *kobj,
				  struct kobj_attribute *attr, char *buf)
{
	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
	int cpu, irq = desc->irq_data.irq;
	ssize_t ret = 0;
	char *p = "";

	for_each_possible_cpu(cpu) {
		unsigned int c = kstat_irqs_cpu(irq, cpu);

		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
		p = ",";
	}

	ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
	return ret;
}
IRQ_ATTR_RO(per_cpu_count);

static ssize_t chip_name_show(struct kobject *kobj,
			      struct kobj_attribute *attr, char *buf)
{
	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
	ssize_t ret = 0;

	raw_spin_lock_irq(&desc->lock);
	if (desc->irq_data.chip && desc->irq_data.chip->name) {
		ret = scnprintf(buf, PAGE_SIZE, "%s\n",
				desc->irq_data.chip->name);
	}
	raw_spin_unlock_irq(&desc->lock);

	return ret;
}
IRQ_ATTR_RO(chip_name);

static ssize_t hwirq_show(struct kobject *kobj,
			  struct kobj_attribute *attr, char *buf)
{
	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
	ssize_t ret = 0;

	raw_spin_lock_irq(&desc->lock);
	if (desc->irq_data.domain)
		ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq);
	raw_spin_unlock_irq(&desc->lock);

	return ret;
}
IRQ_ATTR_RO(hwirq);

static ssize_t type_show(struct kobject *kobj,
			 struct kobj_attribute *attr, char *buf)
{
	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
	ssize_t ret = 0;

	raw_spin_lock_irq(&desc->lock);
	ret = sprintf(buf, "%s\n",
		      irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
	raw_spin_unlock_irq(&desc->lock);

	return ret;

}
IRQ_ATTR_RO(type);

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
static ssize_t wakeup_show(struct kobject *kobj,
			   struct kobj_attribute *attr, char *buf)
{
	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
	ssize_t ret = 0;

	raw_spin_lock_irq(&desc->lock);
	ret = sprintf(buf, "%s\n",
		      irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled");
	raw_spin_unlock_irq(&desc->lock);

	return ret;

}
IRQ_ATTR_RO(wakeup);

230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
static ssize_t name_show(struct kobject *kobj,
			 struct kobj_attribute *attr, char *buf)
{
	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
	ssize_t ret = 0;

	raw_spin_lock_irq(&desc->lock);
	if (desc->name)
		ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
	raw_spin_unlock_irq(&desc->lock);

	return ret;
}
IRQ_ATTR_RO(name);

static ssize_t actions_show(struct kobject *kobj,
			    struct kobj_attribute *attr, char *buf)
{
	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
	struct irqaction *action;
	ssize_t ret = 0;
	char *p = "";

	raw_spin_lock_irq(&desc->lock);
	for (action = desc->action; action != NULL; action = action->next) {
		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
				 p, action->name);
		p = ",";
	}
	raw_spin_unlock_irq(&desc->lock);

	if (ret)
		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");

	return ret;
}
IRQ_ATTR_RO(actions);

static struct attribute *irq_attrs[] = {
	&per_cpu_count_attr.attr,
	&chip_name_attr.attr,
	&hwirq_attr.attr,
	&type_attr.attr,
273
	&wakeup_attr.attr,
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
	&name_attr.attr,
	&actions_attr.attr,
	NULL
};

static struct kobj_type irq_kobj_type = {
	.release	= irq_kobj_release,
	.sysfs_ops	= &kobj_sysfs_ops,
	.default_attrs	= irq_attrs,
};

static void irq_sysfs_add(int irq, struct irq_desc *desc)
{
	if (irq_kobj_base) {
		/*
		 * Continue even in case of failure as this is nothing
		 * crucial.
		 */
		if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
			pr_warn("Failed to add kobject for irq %d\n", irq);
	}
}

297 298 299 300 301 302 303 304 305 306 307 308
static void irq_sysfs_del(struct irq_desc *desc)
{
	/*
	 * If irq_sysfs_init() has not yet been invoked (early boot), then
	 * irq_kobj_base is NULL and the descriptor was never added.
	 * kobject_del() complains about a object with no parent, so make
	 * it conditional.
	 */
	if (irq_kobj_base)
		kobject_del(&desc->kobj);
}

309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
static int __init irq_sysfs_init(void)
{
	struct irq_desc *desc;
	int irq;

	/* Prevent concurrent irq alloc/free */
	irq_lock_sparse();

	irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
	if (!irq_kobj_base) {
		irq_unlock_sparse();
		return -ENOMEM;
	}

	/* Add the already allocated interrupts */
	for_each_irq_desc(irq, desc)
		irq_sysfs_add(irq, desc);
	irq_unlock_sparse();

	return 0;
}
postcore_initcall(irq_sysfs_init);

#else /* !CONFIG_SYSFS */

static struct kobj_type irq_kobj_type = {
	.release	= irq_kobj_release,
};

static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
339
static void irq_sysfs_del(struct irq_desc *desc) {}
340 341 342

#endif /* CONFIG_SYSFS */

343
static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
344

345
static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
346 347 348 349 350 351 352 353
{
	radix_tree_insert(&irq_desc_tree, irq, desc);
}

struct irq_desc *irq_to_desc(unsigned int irq)
{
	return radix_tree_lookup(&irq_desc_tree, irq);
}
354
EXPORT_SYMBOL(irq_to_desc);
355

356 357 358 359 360 361 362 363 364 365 366
static void delete_irq_desc(unsigned int irq)
{
	radix_tree_delete(&irq_desc_tree, irq);
}

#ifdef CONFIG_SMP
static void free_masks(struct irq_desc *desc)
{
#ifdef CONFIG_GENERIC_PENDING_IRQ
	free_cpumask_var(desc->pending_mask);
#endif
367
	free_cpumask_var(desc->irq_common_data.affinity);
368 369 370
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
	free_cpumask_var(desc->irq_common_data.effective_affinity);
#endif
371 372 373 374 375
}
#else
static inline void free_masks(struct irq_desc *desc) { }
#endif

376 377 378 379 380 381 382 383 384 385
void irq_lock_sparse(void)
{
	mutex_lock(&sparse_irq_lock);
}

void irq_unlock_sparse(void)
{
	mutex_unlock(&sparse_irq_lock);
}

386 387 388
static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
				   const struct cpumask *affinity,
				   struct module *owner)
389 390 391
{
	struct irq_desc *desc;

392
	desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
393 394 395
	if (!desc)
		return NULL;
	/* allocate based on nr_cpu_ids */
E
Eric Dumazet 已提交
396
	desc->kstat_irqs = alloc_percpu(unsigned int);
397 398 399
	if (!desc->kstat_irqs)
		goto err_desc;

400
	if (alloc_masks(desc, node))
401 402 403 404
		goto err_kstat;

	raw_spin_lock_init(&desc->lock);
	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
405
	mutex_init(&desc->request_mutex);
T
Thomas Gleixner 已提交
406
	init_rcu_head(&desc->rcu);
407

408 409
	desc_set_defaults(irq, desc, node, affinity, owner);
	irqd_set(&desc->irq_data, flags);
410
	kobject_init(&desc->kobj, &irq_kobj_type);
411 412 413 414

	return desc;

err_kstat:
E
Eric Dumazet 已提交
415
	free_percpu(desc->kstat_irqs);
416 417 418 419 420
err_desc:
	kfree(desc);
	return NULL;
}

421
static void irq_kobj_release(struct kobject *kobj)
T
Thomas Gleixner 已提交
422
{
423
	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
T
Thomas Gleixner 已提交
424 425 426 427 428 429

	free_masks(desc);
	free_percpu(desc->kstat_irqs);
	kfree(desc);
}

430 431 432 433 434 435 436
static void delayed_free_desc(struct rcu_head *rhp)
{
	struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);

	kobject_put(&desc->kobj);
}

437 438 439 440
static void free_desc(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

441
	irq_remove_debugfs_entry(desc);
442 443
	unregister_irq_proc(irq, desc);

444 445 446 447 448
	/*
	 * sparse_irq_lock protects also show_interrupts() and
	 * kstat_irq_usr(). Once we deleted the descriptor from the
	 * sparse tree we can free it. Access in proc will fail to
	 * lookup the descriptor.
449 450 451
	 *
	 * The sysfs entry must be serialized against a concurrent
	 * irq_sysfs_init() as well.
452
	 */
453
	irq_sysfs_del(desc);
454 455
	delete_irq_desc(irq);

T
Thomas Gleixner 已提交
456 457 458 459
	/*
	 * We free the descriptor, masks and stat fields via RCU. That
	 * allows demultiplex interrupts to do rcu based management of
	 * the child interrupts.
460
	 * This also allows us to use rcu in kstat_irqs_usr().
T
Thomas Gleixner 已提交
461 462
	 */
	call_rcu(&desc->rcu, delayed_free_desc);
463 464
}

465
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
466
		       const struct cpumask *affinity, struct module *owner)
467
{
468
	const struct cpumask *mask = NULL;
469
	struct irq_desc *desc;
470
	unsigned int flags;
471
	int i;
472

473 474 475 476 477 478 479
	/* Validate affinity mask(s) */
	if (affinity) {
		for (i = 0, mask = affinity; i < cnt; i++, mask++) {
			if (cpumask_empty(mask))
				return -EINVAL;
		}
	}
480

481
	flags = affinity ? IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN : 0;
482
	mask = NULL;
483 484

	for (i = 0; i < cnt; i++) {
485
		if (affinity) {
486 487 488
			node = cpu_to_node(cpumask_first(affinity));
			mask = affinity;
			affinity++;
489 490
		}
		desc = alloc_desc(start + i, node, flags, mask, owner);
491 492 493
		if (!desc)
			goto err;
		irq_insert_desc(start + i, desc);
494
		irq_sysfs_add(start + i, desc);
495
		irq_add_debugfs_entry(start + i, desc);
496
	}
497
	bitmap_set(allocated_irqs, start, cnt);
498 499 500 501 502 503 504 505
	return start;

err:
	for (i--; i >= 0; i--)
		free_desc(start + i);
	return -ENOMEM;
}

506
static int irq_expand_nr_irqs(unsigned int nr)
507
{
508
	if (nr > IRQ_BITMAP_BITS)
509
		return -ENOMEM;
510
	nr_irqs = nr;
511 512 513
	return 0;
}

514 515
int __init early_irq_init(void)
{
516
	int i, initcnt, node = first_online_node;
517 518 519 520
	struct irq_desc *desc;

	init_irq_default_affinity();

521 522
	/* Let arch update nr_irqs and return the nr of preallocated irqs */
	initcnt = arch_probe_nr_irqs();
523 524
	printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n",
	       NR_IRQS, nr_irqs, initcnt);
525

526 527 528 529 530 531 532 533 534
	if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
		nr_irqs = IRQ_BITMAP_BITS;

	if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
		initcnt = IRQ_BITMAP_BITS;

	if (initcnt > nr_irqs)
		nr_irqs = initcnt;

535
	for (i = 0; i < initcnt; i++) {
536
		desc = alloc_desc(i, node, 0, NULL, NULL);
537 538
		set_bit(i, allocated_irqs);
		irq_insert_desc(i, desc);
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
	}
	return arch_early_irq_init();
}

#else /* !CONFIG_SPARSE_IRQ */

struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
	[0 ... NR_IRQS-1] = {
		.handle_irq	= handle_bad_irq,
		.depth		= 1,
		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
	}
};

int __init early_irq_init(void)
{
555
	int count, i, node = first_online_node;
556 557 558 559
	struct irq_desc *desc;

	init_irq_default_affinity();

560
	printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
561 562 563 564 565

	desc = irq_desc;
	count = ARRAY_SIZE(irq_desc);

	for (i = 0; i < count; i++) {
E
Eric Dumazet 已提交
566
		desc[i].kstat_irqs = alloc_percpu(unsigned int);
567
		alloc_masks(&desc[i], node);
568
		raw_spin_lock_init(&desc[i].lock);
569
		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
570
		mutex_init(&desc[i].request_mutex);
571
		desc_set_defaults(i, &desc[i], node, NULL, NULL);
572 573 574 575 576 577 578 579
	}
	return arch_early_irq_init();
}

struct irq_desc *irq_to_desc(unsigned int irq)
{
	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
}
580
EXPORT_SYMBOL(irq_to_desc);
581

582 583
static void free_desc(unsigned int irq)
{
584 585 586 587
	struct irq_desc *desc = irq_to_desc(irq);
	unsigned long flags;

	raw_spin_lock_irqsave(&desc->lock, flags);
588
	desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
589
	raw_spin_unlock_irqrestore(&desc->lock, flags);
590 591
}

592
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
593
			      const struct cpumask *affinity,
594
			      struct module *owner)
595
{
596 597 598 599 600 601 602
	u32 i;

	for (i = 0; i < cnt; i++) {
		struct irq_desc *desc = irq_to_desc(start + i);

		desc->owner = owner;
	}
603
	bitmap_set(allocated_irqs, start, cnt);
604 605
	return start;
}
606

607
static int irq_expand_nr_irqs(unsigned int nr)
608 609 610 611
{
	return -ENOMEM;
}

612 613 614 615 616 617 618
void irq_mark_irq(unsigned int irq)
{
	mutex_lock(&sparse_irq_lock);
	bitmap_set(allocated_irqs, irq, 1);
	mutex_unlock(&sparse_irq_lock);
}

619 620 621
#ifdef CONFIG_GENERIC_IRQ_LEGACY
void irq_init_desc(unsigned int irq)
{
622
	free_desc(irq);
623 624 625
}
#endif

626 627
#endif /* !CONFIG_SPARSE_IRQ */

628 629 630 631 632 633 634 635 636 637 638
/**
 * generic_handle_irq - Invoke the handler for a particular irq
 * @irq:	The irq number to handle
 *
 */
int generic_handle_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (!desc)
		return -EINVAL;
639
	generic_handle_irq_desc(desc);
640 641
	return 0;
}
642
EXPORT_SYMBOL_GPL(generic_handle_irq);
643

644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
#ifdef CONFIG_HANDLE_DOMAIN_IRQ
/**
 * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
 * @domain:	The domain where to perform the lookup
 * @hwirq:	The HW irq number to convert to a logical one
 * @lookup:	Whether to perform the domain lookup or not
 * @regs:	Register file coming from the low-level handling code
 *
 * Returns:	0 on success, or -EINVAL if conversion has failed
 */
int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
			bool lookup, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);
	unsigned int irq = hwirq;
	int ret = 0;

	irq_enter();

#ifdef CONFIG_IRQ_DOMAIN
	if (lookup)
		irq = irq_find_mapping(domain, hwirq);
#endif

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (unlikely(!irq || irq >= nr_irqs)) {
		ack_bad_irq(irq);
		ret = -EINVAL;
	} else {
		generic_handle_irq(irq);
	}

	irq_exit();
	set_irq_regs(old_regs);
	return ret;
}
#endif

685 686 687 688 689 690 691 692 693 694 695 696 697 698
/* Dynamic interrupt handling */

/**
 * irq_free_descs - free irq descriptors
 * @from:	Start of descriptor range
 * @cnt:	Number of consecutive irqs to free
 */
void irq_free_descs(unsigned int from, unsigned int cnt)
{
	int i;

	if (from >= nr_irqs || (from + cnt) > nr_irqs)
		return;

699
	mutex_lock(&sparse_irq_lock);
700 701 702 703
	for (i = 0; i < cnt; i++)
		free_desc(from + i);

	bitmap_clear(allocated_irqs, from, cnt);
T
Thomas Gleixner 已提交
704
	mutex_unlock(&sparse_irq_lock);
705
}
706
EXPORT_SYMBOL_GPL(irq_free_descs);
707 708 709 710 711 712 713

/**
 * irq_alloc_descs - allocate and initialize a range of irq descriptors
 * @irq:	Allocate for specific irq number if irq >= 0
 * @from:	Start the search from this irq number
 * @cnt:	Number of consecutive irqs to allocate.
 * @node:	Preferred node on which the irq descriptor should be allocated
714
 * @owner:	Owning module (can be NULL)
715 716 717
 * @affinity:	Optional pointer to an affinity mask array of size @cnt which
 *		hints where the irq descriptors should be allocated and which
 *		default affinities to use
718 719 720 721
 *
 * Returns the first irq number or error code
 */
int __ref
722
__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
723
		  struct module *owner, const struct cpumask *affinity)
724 725 726 727 728 729
{
	int start, ret;

	if (!cnt)
		return -EINVAL;

730 731 732 733
	if (irq >= 0) {
		if (from > irq)
			return -EINVAL;
		from = irq;
734 735 736 737 738 739 740
	} else {
		/*
		 * For interrupts which are freely allocated the
		 * architecture can force a lower bound to the @from
		 * argument. x86 uses this to exclude the GSI space.
		 */
		from = arch_dynirq_lower_bound(from);
741 742
	}

T
Thomas Gleixner 已提交
743
	mutex_lock(&sparse_irq_lock);
744

745 746
	start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
					   from, cnt, 0);
747 748
	ret = -EEXIST;
	if (irq >=0 && start != irq)
749
		goto unlock;
750

751 752
	if (start + cnt > nr_irqs) {
		ret = irq_expand_nr_irqs(start + cnt);
753
		if (ret)
754
			goto unlock;
755
	}
756 757
	ret = alloc_descs(start, cnt, node, affinity, owner);
unlock:
T
Thomas Gleixner 已提交
758
	mutex_unlock(&sparse_irq_lock);
759 760
	return ret;
}
761
EXPORT_SYMBOL_GPL(__irq_alloc_descs);
762

763 764 765 766 767 768 769 770 771 772
#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
/**
 * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
 * @cnt:	number of interrupts to allocate
 * @node:	node on which to allocate
 *
 * Returns an interrupt number > 0 or 0, if the allocation fails.
 */
unsigned int irq_alloc_hwirqs(int cnt, int node)
{
773
	int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL);
774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802

	if (irq < 0)
		return 0;

	for (i = irq; cnt > 0; i++, cnt--) {
		if (arch_setup_hwirq(i, node))
			goto err;
		irq_clear_status_flags(i, _IRQ_NOREQUEST);
	}
	return irq;

err:
	for (i--; i >= irq; i--) {
		irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
		arch_teardown_hwirq(i);
	}
	irq_free_descs(irq, cnt);
	return 0;
}
EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);

/**
 * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
 * @from:	Free from irq number
 * @cnt:	number of interrupts to free
 *
 */
void irq_free_hwirqs(unsigned int from, int cnt)
{
803
	int i, j;
804

805
	for (i = from, j = cnt; j > 0; i++, j--) {
806 807 808 809 810 811 812 813
		irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
		arch_teardown_hwirq(i);
	}
	irq_free_descs(from, cnt);
}
EXPORT_SYMBOL_GPL(irq_free_hwirqs);
#endif

814 815 816 817 818 819 820 821 822 823 824
/**
 * irq_get_next_irq - get next allocated irq number
 * @offset:	where to start the search
 *
 * Returns next irq number after offset or nr_irqs if none is found.
 */
unsigned int irq_get_next_irq(unsigned int offset)
{
	return find_next_bit(allocated_irqs, nr_irqs, offset);
}

825
struct irq_desc *
826 827
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
		    unsigned int check)
828 829 830 831
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (desc) {
832 833 834 835 836 837 838 839 840 841
		if (check & _IRQ_DESC_CHECK) {
			if ((check & _IRQ_DESC_PERCPU) &&
			    !irq_settings_is_per_cpu_devid(desc))
				return NULL;

			if (!(check & _IRQ_DESC_PERCPU) &&
			    irq_settings_is_per_cpu_devid(desc))
				return NULL;
		}

842 843 844 845 846 847 848 849 850 851 852 853 854 855
		if (bus)
			chip_bus_lock(desc);
		raw_spin_lock_irqsave(&desc->lock, *flags);
	}
	return desc;
}

void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
{
	raw_spin_unlock_irqrestore(&desc->lock, flags);
	if (bus)
		chip_bus_sync_unlock(desc);
}

856 857
int irq_set_percpu_devid_partition(unsigned int irq,
				   const struct cpumask *affinity)
858 859 860 861 862 863 864 865 866 867 868 869 870 871
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (!desc)
		return -EINVAL;

	if (desc->percpu_enabled)
		return -EINVAL;

	desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);

	if (!desc->percpu_enabled)
		return -ENOMEM;

872 873 874 875 876
	if (affinity)
		desc->percpu_affinity = affinity;
	else
		desc->percpu_affinity = cpu_possible_mask;

877 878 879 880
	irq_set_percpu_devid_flags(irq);
	return 0;
}

881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
int irq_set_percpu_devid(unsigned int irq)
{
	return irq_set_percpu_devid_partition(irq, NULL);
}

int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (!desc || !desc->percpu_enabled)
		return -EINVAL;

	if (affinity)
		cpumask_copy(affinity, desc->percpu_affinity);

	return 0;
}
898
EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition);
899

900 901
void kstat_incr_irq_this_cpu(unsigned int irq)
{
902
	kstat_incr_irqs_this_cpu(irq_to_desc(irq));
903 904
}

905 906 907 908 909 910 911 912 913
/**
 * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
 * @irq:	The interrupt number
 * @cpu:	The cpu number
 *
 * Returns the sum of interrupt counts on @cpu since boot for
 * @irq. The caller must ensure that the interrupt is not removed
 * concurrently.
 */
914 915 916
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
	struct irq_desc *desc = irq_to_desc(irq);
E
Eric Dumazet 已提交
917 918 919

	return desc && desc->kstat_irqs ?
			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
920
}
921

922 923 924 925 926 927 928 929
/**
 * kstat_irqs - Get the statistics for an interrupt
 * @irq:	The interrupt number
 *
 * Returns the sum of interrupt counts on all cpus since boot for
 * @irq. The caller must ensure that the interrupt is not removed
 * concurrently.
 */
930 931 932
unsigned int kstat_irqs(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
933
	unsigned int sum = 0;
934
	int cpu;
935

E
Eric Dumazet 已提交
936
	if (!desc || !desc->kstat_irqs)
937
		return 0;
938 939 940 941
	if (!irq_settings_is_per_cpu_devid(desc) &&
	    !irq_settings_is_per_cpu(desc))
	    return desc->tot_count;

942
	for_each_possible_cpu(cpu)
E
Eric Dumazet 已提交
943
		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
944 945
	return sum;
}
946 947 948 949 950

/**
 * kstat_irqs_usr - Get the statistics for an interrupt
 * @irq:	The interrupt number
 *
951 952 953 954
 * Returns the sum of interrupt counts on all cpus since boot for @irq.
 * Contrary to kstat_irqs() this can be called from any context.
 * It uses rcu since a concurrent removal of an interrupt descriptor is
 * observing an rcu grace period before delayed_free_desc()/irq_kobj_release().
955 956 957
 */
unsigned int kstat_irqs_usr(unsigned int irq)
{
958
	unsigned int sum;
959

960
	rcu_read_lock();
961
	sum = kstat_irqs(irq);
962
	rcu_read_unlock();
963 964
	return sum;
}