irqdesc.c 21.5 KB
Newer Older
1 2 3 4 5 6
/*
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 *
 * This file contains the interrupt descriptor management code
 *
7
 * Detailed information is available in Documentation/core-api/genericirq.rst
8 9 10 11
 *
 */
#include <linux/irq.h>
#include <linux/slab.h>
12
#include <linux/export.h>
13 14 15
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/radix-tree.h>
16
#include <linux/bitmap.h>
17
#include <linux/irqdomain.h>
18
#include <linux/sysfs.h>
19 20 21 22 23 24

#include "internals.h"

/*
 * lockdep: we want to handle all irq_desc locks as a single lock-class:
 */
25
static struct lock_class_key irq_desc_lock_class;
26

27
#if defined(CONFIG_SMP)
28 29 30 31 32 33 34 35 36 37 38 39 40
static int __init irq_affinity_setup(char *str)
{
	zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
	cpulist_parse(str, irq_default_affinity);
	/*
	 * Set at least the boot cpu. We don't want to end up with
	 * bugreports caused by random comandline masks
	 */
	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
	return 1;
}
__setup("irqaffinity=", irq_affinity_setup);

41 42
static void __init init_irq_default_affinity(void)
{
43 44 45 46 47 48
#ifdef CONFIG_CPUMASK_OFFSTACK
	if (!irq_default_affinity)
		zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
#endif
	if (cpumask_empty(irq_default_affinity))
		cpumask_setall(irq_default_affinity);
49 50 51 52 53 54 55
}
#else
static void __init init_irq_default_affinity(void)
{
}
#endif

56
#ifdef CONFIG_SMP
57
static int alloc_masks(struct irq_desc *desc, int node)
58
{
59
	if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
60
				     GFP_KERNEL, node))
61 62
		return -ENOMEM;

63 64 65 66 67 68 69 70
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
	if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity,
				     GFP_KERNEL, node)) {
		free_cpumask_var(desc->irq_common_data.affinity);
		return -ENOMEM;
	}
#endif

71
#ifdef CONFIG_GENERIC_PENDING_IRQ
72
	if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
73 74 75
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
		free_cpumask_var(desc->irq_common_data.effective_affinity);
#endif
76
		free_cpumask_var(desc->irq_common_data.affinity);
77 78 79 80 81 82
		return -ENOMEM;
	}
#endif
	return 0;
}

83 84
static void desc_smp_init(struct irq_desc *desc, int node,
			  const struct cpumask *affinity)
85
{
86 87 88 89
	if (!affinity)
		affinity = irq_default_affinity;
	cpumask_copy(desc->irq_common_data.affinity, affinity);

90 91 92
#ifdef CONFIG_GENERIC_PENDING_IRQ
	cpumask_clear(desc->pending_mask);
#endif
93 94 95
#ifdef CONFIG_NUMA
	desc->irq_common_data.node = node;
#endif
96 97
}

98 99
#else
static inline int
100
alloc_masks(struct irq_desc *desc, int node) { return 0; }
101 102
static inline void
desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
103 104
#endif

105
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
106
			      const struct cpumask *affinity, struct module *owner)
107
{
E
Eric Dumazet 已提交
108 109
	int cpu;

110
	desc->irq_common_data.handler_data = NULL;
111
	desc->irq_common_data.msi_desc = NULL;
112

113
	desc->irq_data.common = &desc->irq_common_data;
114 115 116
	desc->irq_data.irq = irq;
	desc->irq_data.chip = &no_irq_chip;
	desc->irq_data.chip_data = NULL;
117
	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
118
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
119
	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
120 121
	desc->handle_irq = handle_bad_irq;
	desc->depth = 1;
122 123
	desc->irq_count = 0;
	desc->irqs_unhandled = 0;
124
	desc->name = NULL;
125
	desc->owner = owner;
E
Eric Dumazet 已提交
126 127
	for_each_possible_cpu(cpu)
		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
128
	desc_smp_init(desc, node, affinity);
129 130
}

131 132 133
int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL(nr_irqs);

T
Thomas Gleixner 已提交
134
static DEFINE_MUTEX(sparse_irq_lock);
135
static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
136

137 138
#ifdef CONFIG_SPARSE_IRQ

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
static void irq_kobj_release(struct kobject *kobj);

#ifdef CONFIG_SYSFS
static struct kobject *irq_kobj_base;

#define IRQ_ATTR_RO(_name) \
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)

static ssize_t per_cpu_count_show(struct kobject *kobj,
				  struct kobj_attribute *attr, char *buf)
{
	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
	int cpu, irq = desc->irq_data.irq;
	ssize_t ret = 0;
	char *p = "";

	for_each_possible_cpu(cpu) {
		unsigned int c = kstat_irqs_cpu(irq, cpu);

		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
		p = ",";
	}

	ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
	return ret;
}
IRQ_ATTR_RO(per_cpu_count);

static ssize_t chip_name_show(struct kobject *kobj,
			      struct kobj_attribute *attr, char *buf)
{
	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
	ssize_t ret = 0;

	raw_spin_lock_irq(&desc->lock);
	if (desc->irq_data.chip && desc->irq_data.chip->name) {
		ret = scnprintf(buf, PAGE_SIZE, "%s\n",
				desc->irq_data.chip->name);
	}
	raw_spin_unlock_irq(&desc->lock);

	return ret;
}
IRQ_ATTR_RO(chip_name);

static ssize_t hwirq_show(struct kobject *kobj,
			  struct kobj_attribute *attr, char *buf)
{
	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
	ssize_t ret = 0;

	raw_spin_lock_irq(&desc->lock);
	if (desc->irq_data.domain)
		ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq);
	raw_spin_unlock_irq(&desc->lock);

	return ret;
}
IRQ_ATTR_RO(hwirq);

static ssize_t type_show(struct kobject *kobj,
			 struct kobj_attribute *attr, char *buf)
{
	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
	ssize_t ret = 0;

	raw_spin_lock_irq(&desc->lock);
	ret = sprintf(buf, "%s\n",
		      irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
	raw_spin_unlock_irq(&desc->lock);

	return ret;

}
IRQ_ATTR_RO(type);

static ssize_t name_show(struct kobject *kobj,
			 struct kobj_attribute *attr, char *buf)
{
	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
	ssize_t ret = 0;

	raw_spin_lock_irq(&desc->lock);
	if (desc->name)
		ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
	raw_spin_unlock_irq(&desc->lock);

	return ret;
}
IRQ_ATTR_RO(name);

static ssize_t actions_show(struct kobject *kobj,
			    struct kobj_attribute *attr, char *buf)
{
	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
	struct irqaction *action;
	ssize_t ret = 0;
	char *p = "";

	raw_spin_lock_irq(&desc->lock);
	for (action = desc->action; action != NULL; action = action->next) {
		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
				 p, action->name);
		p = ",";
	}
	raw_spin_unlock_irq(&desc->lock);

	if (ret)
		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");

	return ret;
}
IRQ_ATTR_RO(actions);

static struct attribute *irq_attrs[] = {
	&per_cpu_count_attr.attr,
	&chip_name_attr.attr,
	&hwirq_attr.attr,
	&type_attr.attr,
	&name_attr.attr,
	&actions_attr.attr,
	NULL
};

static struct kobj_type irq_kobj_type = {
	.release	= irq_kobj_release,
	.sysfs_ops	= &kobj_sysfs_ops,
	.default_attrs	= irq_attrs,
};

static void irq_sysfs_add(int irq, struct irq_desc *desc)
{
	if (irq_kobj_base) {
		/*
		 * Continue even in case of failure as this is nothing
		 * crucial.
		 */
		if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
			pr_warn("Failed to add kobject for irq %d\n", irq);
	}
}

static int __init irq_sysfs_init(void)
{
	struct irq_desc *desc;
	int irq;

	/* Prevent concurrent irq alloc/free */
	irq_lock_sparse();

	irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
	if (!irq_kobj_base) {
		irq_unlock_sparse();
		return -ENOMEM;
	}

	/* Add the already allocated interrupts */
	for_each_irq_desc(irq, desc)
		irq_sysfs_add(irq, desc);
	irq_unlock_sparse();

	return 0;
}
postcore_initcall(irq_sysfs_init);

#else /* !CONFIG_SYSFS */

static struct kobj_type irq_kobj_type = {
	.release	= irq_kobj_release,
};

static void irq_sysfs_add(int irq, struct irq_desc *desc) {}

#endif /* CONFIG_SYSFS */

314
static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
315

316
static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
317 318 319 320 321 322 323 324
{
	radix_tree_insert(&irq_desc_tree, irq, desc);
}

struct irq_desc *irq_to_desc(unsigned int irq)
{
	return radix_tree_lookup(&irq_desc_tree, irq);
}
325
EXPORT_SYMBOL(irq_to_desc);
326

327 328 329 330 331 332 333 334 335 336 337
static void delete_irq_desc(unsigned int irq)
{
	radix_tree_delete(&irq_desc_tree, irq);
}

#ifdef CONFIG_SMP
static void free_masks(struct irq_desc *desc)
{
#ifdef CONFIG_GENERIC_PENDING_IRQ
	free_cpumask_var(desc->pending_mask);
#endif
338
	free_cpumask_var(desc->irq_common_data.affinity);
339 340 341
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
	free_cpumask_var(desc->irq_common_data.effective_affinity);
#endif
342 343 344 345 346
}
#else
static inline void free_masks(struct irq_desc *desc) { }
#endif

347 348 349 350 351 352 353 354 355 356
void irq_lock_sparse(void)
{
	mutex_lock(&sparse_irq_lock);
}

void irq_unlock_sparse(void)
{
	mutex_unlock(&sparse_irq_lock);
}

357 358 359
static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
				   const struct cpumask *affinity,
				   struct module *owner)
360 361 362
{
	struct irq_desc *desc;

363
	desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
364 365 366
	if (!desc)
		return NULL;
	/* allocate based on nr_cpu_ids */
E
Eric Dumazet 已提交
367
	desc->kstat_irqs = alloc_percpu(unsigned int);
368 369 370
	if (!desc->kstat_irqs)
		goto err_desc;

371
	if (alloc_masks(desc, node))
372 373 374 375
		goto err_kstat;

	raw_spin_lock_init(&desc->lock);
	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
376
	mutex_init(&desc->request_mutex);
T
Thomas Gleixner 已提交
377
	init_rcu_head(&desc->rcu);
378

379 380
	desc_set_defaults(irq, desc, node, affinity, owner);
	irqd_set(&desc->irq_data, flags);
381
	kobject_init(&desc->kobj, &irq_kobj_type);
382 383 384 385

	return desc;

err_kstat:
E
Eric Dumazet 已提交
386
	free_percpu(desc->kstat_irqs);
387 388 389 390 391
err_desc:
	kfree(desc);
	return NULL;
}

392
static void irq_kobj_release(struct kobject *kobj)
T
Thomas Gleixner 已提交
393
{
394
	struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
T
Thomas Gleixner 已提交
395 396 397 398 399 400

	free_masks(desc);
	free_percpu(desc->kstat_irqs);
	kfree(desc);
}

401 402 403 404 405 406 407
static void delayed_free_desc(struct rcu_head *rhp)
{
	struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);

	kobject_put(&desc->kobj);
}

408 409 410 411
static void free_desc(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

412
	irq_remove_debugfs_entry(desc);
413 414
	unregister_irq_proc(irq, desc);

415 416 417 418 419
	/*
	 * sparse_irq_lock protects also show_interrupts() and
	 * kstat_irq_usr(). Once we deleted the descriptor from the
	 * sparse tree we can free it. Access in proc will fail to
	 * lookup the descriptor.
420 421 422
	 *
	 * The sysfs entry must be serialized against a concurrent
	 * irq_sysfs_init() as well.
423
	 */
T
Thomas Gleixner 已提交
424
	mutex_lock(&sparse_irq_lock);
425
	kobject_del(&desc->kobj);
426
	delete_irq_desc(irq);
T
Thomas Gleixner 已提交
427
	mutex_unlock(&sparse_irq_lock);
428

T
Thomas Gleixner 已提交
429 430 431 432 433 434
	/*
	 * We free the descriptor, masks and stat fields via RCU. That
	 * allows demultiplex interrupts to do rcu based management of
	 * the child interrupts.
	 */
	call_rcu(&desc->rcu, delayed_free_desc);
435 436
}

437
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
438
		       const struct cpumask *affinity, struct module *owner)
439
{
440
	const struct cpumask *mask = NULL;
441
	struct irq_desc *desc;
442
	unsigned int flags;
443
	int i;
444

445 446 447 448 449 450 451
	/* Validate affinity mask(s) */
	if (affinity) {
		for (i = 0, mask = affinity; i < cnt; i++, mask++) {
			if (cpumask_empty(mask))
				return -EINVAL;
		}
	}
452 453

	flags = affinity ? IRQD_AFFINITY_MANAGED : 0;
454
	mask = NULL;
455 456

	for (i = 0; i < cnt; i++) {
457
		if (affinity) {
458 459 460
			node = cpu_to_node(cpumask_first(affinity));
			mask = affinity;
			affinity++;
461 462
		}
		desc = alloc_desc(start + i, node, flags, mask, owner);
463 464
		if (!desc)
			goto err;
T
Thomas Gleixner 已提交
465
		mutex_lock(&sparse_irq_lock);
466
		irq_insert_desc(start + i, desc);
467
		irq_sysfs_add(start + i, desc);
T
Thomas Gleixner 已提交
468
		mutex_unlock(&sparse_irq_lock);
469 470 471 472 473 474 475
	}
	return start;

err:
	for (i--; i >= 0; i--)
		free_desc(start + i);

T
Thomas Gleixner 已提交
476
	mutex_lock(&sparse_irq_lock);
477
	bitmap_clear(allocated_irqs, start, cnt);
T
Thomas Gleixner 已提交
478
	mutex_unlock(&sparse_irq_lock);
479 480 481
	return -ENOMEM;
}

482
static int irq_expand_nr_irqs(unsigned int nr)
483
{
484
	if (nr > IRQ_BITMAP_BITS)
485
		return -ENOMEM;
486
	nr_irqs = nr;
487 488 489
	return 0;
}

490 491
int __init early_irq_init(void)
{
492
	int i, initcnt, node = first_online_node;
493 494 495 496
	struct irq_desc *desc;

	init_irq_default_affinity();

497 498
	/* Let arch update nr_irqs and return the nr of preallocated irqs */
	initcnt = arch_probe_nr_irqs();
499 500
	printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n",
	       NR_IRQS, nr_irqs, initcnt);
501

502 503 504 505 506 507 508 509 510
	if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
		nr_irqs = IRQ_BITMAP_BITS;

	if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
		initcnt = IRQ_BITMAP_BITS;

	if (initcnt > nr_irqs)
		nr_irqs = initcnt;

511
	for (i = 0; i < initcnt; i++) {
512
		desc = alloc_desc(i, node, 0, NULL, NULL);
513 514
		set_bit(i, allocated_irqs);
		irq_insert_desc(i, desc);
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
	}
	return arch_early_irq_init();
}

#else /* !CONFIG_SPARSE_IRQ */

struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
	[0 ... NR_IRQS-1] = {
		.handle_irq	= handle_bad_irq,
		.depth		= 1,
		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
	}
};

int __init early_irq_init(void)
{
531
	int count, i, node = first_online_node;
532 533 534 535
	struct irq_desc *desc;

	init_irq_default_affinity();

536
	printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
537 538 539 540 541

	desc = irq_desc;
	count = ARRAY_SIZE(irq_desc);

	for (i = 0; i < count; i++) {
E
Eric Dumazet 已提交
542
		desc[i].kstat_irqs = alloc_percpu(unsigned int);
543
		alloc_masks(&desc[i], node);
544
		raw_spin_lock_init(&desc[i].lock);
545
		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
546
		desc_set_defaults(i, &desc[i], node, NULL, NULL);
547 548 549 550 551 552 553 554
	}
	return arch_early_irq_init();
}

struct irq_desc *irq_to_desc(unsigned int irq)
{
	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
}
555
EXPORT_SYMBOL(irq_to_desc);
556

557 558
static void free_desc(unsigned int irq)
{
559 560 561 562
	struct irq_desc *desc = irq_to_desc(irq);
	unsigned long flags;

	raw_spin_lock_irqsave(&desc->lock, flags);
563
	desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
564
	raw_spin_unlock_irqrestore(&desc->lock, flags);
565 566
}

567
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
568
			      const struct cpumask *affinity,
569
			      struct module *owner)
570
{
571 572 573 574 575 576 577
	u32 i;

	for (i = 0; i < cnt; i++) {
		struct irq_desc *desc = irq_to_desc(start + i);

		desc->owner = owner;
	}
578 579
	return start;
}
580

581
static int irq_expand_nr_irqs(unsigned int nr)
582 583 584 585
{
	return -ENOMEM;
}

586 587 588 589 590 591 592
void irq_mark_irq(unsigned int irq)
{
	mutex_lock(&sparse_irq_lock);
	bitmap_set(allocated_irqs, irq, 1);
	mutex_unlock(&sparse_irq_lock);
}

593 594 595
#ifdef CONFIG_GENERIC_IRQ_LEGACY
void irq_init_desc(unsigned int irq)
{
596
	free_desc(irq);
597 598 599
}
#endif

600 601
#endif /* !CONFIG_SPARSE_IRQ */

602 603 604 605 606 607 608 609 610 611 612
/**
 * generic_handle_irq - Invoke the handler for a particular irq
 * @irq:	The irq number to handle
 *
 */
int generic_handle_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (!desc)
		return -EINVAL;
613
	generic_handle_irq_desc(desc);
614 615
	return 0;
}
616
EXPORT_SYMBOL_GPL(generic_handle_irq);
617

618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
#ifdef CONFIG_HANDLE_DOMAIN_IRQ
/**
 * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
 * @domain:	The domain where to perform the lookup
 * @hwirq:	The HW irq number to convert to a logical one
 * @lookup:	Whether to perform the domain lookup or not
 * @regs:	Register file coming from the low-level handling code
 *
 * Returns:	0 on success, or -EINVAL if conversion has failed
 */
int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
			bool lookup, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);
	unsigned int irq = hwirq;
	int ret = 0;

	irq_enter();

#ifdef CONFIG_IRQ_DOMAIN
	if (lookup)
		irq = irq_find_mapping(domain, hwirq);
#endif

	/*
	 * Some hardware gives randomly wrong interrupts.  Rather
	 * than crashing, do something sensible.
	 */
	if (unlikely(!irq || irq >= nr_irqs)) {
		ack_bad_irq(irq);
		ret = -EINVAL;
	} else {
		generic_handle_irq(irq);
	}

	irq_exit();
	set_irq_regs(old_regs);
	return ret;
}
#endif

659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
/* Dynamic interrupt handling */

/**
 * irq_free_descs - free irq descriptors
 * @from:	Start of descriptor range
 * @cnt:	Number of consecutive irqs to free
 */
void irq_free_descs(unsigned int from, unsigned int cnt)
{
	int i;

	if (from >= nr_irqs || (from + cnt) > nr_irqs)
		return;

	for (i = 0; i < cnt; i++)
		free_desc(from + i);

T
Thomas Gleixner 已提交
676
	mutex_lock(&sparse_irq_lock);
677
	bitmap_clear(allocated_irqs, from, cnt);
T
Thomas Gleixner 已提交
678
	mutex_unlock(&sparse_irq_lock);
679
}
680
EXPORT_SYMBOL_GPL(irq_free_descs);
681 682 683 684 685 686 687

/**
 * irq_alloc_descs - allocate and initialize a range of irq descriptors
 * @irq:	Allocate for specific irq number if irq >= 0
 * @from:	Start the search from this irq number
 * @cnt:	Number of consecutive irqs to allocate.
 * @node:	Preferred node on which the irq descriptor should be allocated
688
 * @owner:	Owning module (can be NULL)
689 690 691
 * @affinity:	Optional pointer to an affinity mask array of size @cnt which
 *		hints where the irq descriptors should be allocated and which
 *		default affinities to use
692 693 694 695
 *
 * Returns the first irq number or error code
 */
int __ref
696
__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
697
		  struct module *owner, const struct cpumask *affinity)
698 699 700 701 702 703
{
	int start, ret;

	if (!cnt)
		return -EINVAL;

704 705 706 707
	if (irq >= 0) {
		if (from > irq)
			return -EINVAL;
		from = irq;
708 709 710 711 712 713 714
	} else {
		/*
		 * For interrupts which are freely allocated the
		 * architecture can force a lower bound to the @from
		 * argument. x86 uses this to exclude the GSI space.
		 */
		from = arch_dynirq_lower_bound(from);
715 716
	}

T
Thomas Gleixner 已提交
717
	mutex_lock(&sparse_irq_lock);
718

719 720
	start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
					   from, cnt, 0);
721 722 723 724
	ret = -EEXIST;
	if (irq >=0 && start != irq)
		goto err;

725 726
	if (start + cnt > nr_irqs) {
		ret = irq_expand_nr_irqs(start + cnt);
727 728 729
		if (ret)
			goto err;
	}
730 731

	bitmap_set(allocated_irqs, start, cnt);
T
Thomas Gleixner 已提交
732
	mutex_unlock(&sparse_irq_lock);
733
	return alloc_descs(start, cnt, node, affinity, owner);
734 735

err:
T
Thomas Gleixner 已提交
736
	mutex_unlock(&sparse_irq_lock);
737 738
	return ret;
}
739
EXPORT_SYMBOL_GPL(__irq_alloc_descs);
740

741 742 743 744 745 746 747 748 749 750
#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
/**
 * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
 * @cnt:	number of interrupts to allocate
 * @node:	node on which to allocate
 *
 * Returns an interrupt number > 0 or 0, if the allocation fails.
 */
unsigned int irq_alloc_hwirqs(int cnt, int node)
{
751
	int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL);
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780

	if (irq < 0)
		return 0;

	for (i = irq; cnt > 0; i++, cnt--) {
		if (arch_setup_hwirq(i, node))
			goto err;
		irq_clear_status_flags(i, _IRQ_NOREQUEST);
	}
	return irq;

err:
	for (i--; i >= irq; i--) {
		irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
		arch_teardown_hwirq(i);
	}
	irq_free_descs(irq, cnt);
	return 0;
}
EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);

/**
 * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
 * @from:	Free from irq number
 * @cnt:	number of interrupts to free
 *
 */
void irq_free_hwirqs(unsigned int from, int cnt)
{
781
	int i, j;
782

783
	for (i = from, j = cnt; j > 0; i++, j--) {
784 785 786 787 788 789 790 791
		irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
		arch_teardown_hwirq(i);
	}
	irq_free_descs(from, cnt);
}
EXPORT_SYMBOL_GPL(irq_free_hwirqs);
#endif

792 793 794 795 796 797 798 799 800 801 802
/**
 * irq_get_next_irq - get next allocated irq number
 * @offset:	where to start the search
 *
 * Returns next irq number after offset or nr_irqs if none is found.
 */
unsigned int irq_get_next_irq(unsigned int offset)
{
	return find_next_bit(allocated_irqs, nr_irqs, offset);
}

803
struct irq_desc *
804 805
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
		    unsigned int check)
806 807 808 809
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (desc) {
810 811 812 813 814 815 816 817 818 819
		if (check & _IRQ_DESC_CHECK) {
			if ((check & _IRQ_DESC_PERCPU) &&
			    !irq_settings_is_per_cpu_devid(desc))
				return NULL;

			if (!(check & _IRQ_DESC_PERCPU) &&
			    irq_settings_is_per_cpu_devid(desc))
				return NULL;
		}

820 821 822 823 824 825 826 827 828 829 830 831 832 833
		if (bus)
			chip_bus_lock(desc);
		raw_spin_lock_irqsave(&desc->lock, *flags);
	}
	return desc;
}

void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
{
	raw_spin_unlock_irqrestore(&desc->lock, flags);
	if (bus)
		chip_bus_sync_unlock(desc);
}

834 835
int irq_set_percpu_devid_partition(unsigned int irq,
				   const struct cpumask *affinity)
836 837 838 839 840 841 842 843 844 845 846 847 848 849
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (!desc)
		return -EINVAL;

	if (desc->percpu_enabled)
		return -EINVAL;

	desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);

	if (!desc->percpu_enabled)
		return -ENOMEM;

850 851 852 853 854
	if (affinity)
		desc->percpu_affinity = affinity;
	else
		desc->percpu_affinity = cpu_possible_mask;

855 856 857 858
	irq_set_percpu_devid_flags(irq);
	return 0;
}

859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
int irq_set_percpu_devid(unsigned int irq)
{
	return irq_set_percpu_devid_partition(irq, NULL);
}

int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
{
	struct irq_desc *desc = irq_to_desc(irq);

	if (!desc || !desc->percpu_enabled)
		return -EINVAL;

	if (affinity)
		cpumask_copy(affinity, desc->percpu_affinity);

	return 0;
}

877 878
void kstat_incr_irq_this_cpu(unsigned int irq)
{
879
	kstat_incr_irqs_this_cpu(irq_to_desc(irq));
880 881
}

882 883 884 885 886 887 888 889 890
/**
 * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
 * @irq:	The interrupt number
 * @cpu:	The cpu number
 *
 * Returns the sum of interrupt counts on @cpu since boot for
 * @irq. The caller must ensure that the interrupt is not removed
 * concurrently.
 */
891 892 893
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
	struct irq_desc *desc = irq_to_desc(irq);
E
Eric Dumazet 已提交
894 895 896

	return desc && desc->kstat_irqs ?
			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
897
}
898

899 900 901 902 903 904 905 906
/**
 * kstat_irqs - Get the statistics for an interrupt
 * @irq:	The interrupt number
 *
 * Returns the sum of interrupt counts on all cpus since boot for
 * @irq. The caller must ensure that the interrupt is not removed
 * concurrently.
 */
907 908 909 910
unsigned int kstat_irqs(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	int cpu;
911
	unsigned int sum = 0;
912

E
Eric Dumazet 已提交
913
	if (!desc || !desc->kstat_irqs)
914 915
		return 0;
	for_each_possible_cpu(cpu)
E
Eric Dumazet 已提交
916
		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
917 918
	return sum;
}
919 920 921 922 923 924 925 926 927 928 929 930

/**
 * kstat_irqs_usr - Get the statistics for an interrupt
 * @irq:	The interrupt number
 *
 * Returns the sum of interrupt counts on all cpus since boot for
 * @irq. Contrary to kstat_irqs() this can be called from any
 * preemptible context. It's protected against concurrent removal of
 * an interrupt descriptor when sparse irqs are enabled.
 */
unsigned int kstat_irqs_usr(unsigned int irq)
{
931
	unsigned int sum;
932 933 934 935 936 937

	irq_lock_sparse();
	sum = kstat_irqs(irq);
	irq_unlock_sparse();
	return sum;
}