irqdomain.c 20.4 KB
Newer Older
1 2
#define pr_fmt(fmt)  "irq: " fmt

3 4 5
#include <linux/debugfs.h>
#include <linux/hardirq.h>
#include <linux/interrupt.h>
6
#include <linux/irq.h>
7
#include <linux/irqdesc.h>
8 9 10 11
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
12
#include <linux/of_address.h>
13
#include <linux/of_irq.h>
P
Paul Mundt 已提交
14
#include <linux/topology.h>
15
#include <linux/seq_file.h>
16
#include <linux/slab.h>
17 18
#include <linux/smp.h>
#include <linux/fs.h>
19 20 21 22

static LIST_HEAD(irq_domain_list);
static DEFINE_MUTEX(irq_domain_mutex);

23
static DEFINE_MUTEX(revmap_trees_mutex);
24
static struct irq_domain *irq_default_domain;
25 26

/**
27
 * __irq_domain_add() - Allocate a new irq_domain data structure
28
 * @of_node: optional device-tree node of the interrupt controller
29
 * @size: Size of linear map; 0 for radix mapping only
30
 * @hwirq_max: Maximum number of interrupts supported by controller
31 32
 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
 *              direct mapping
33
 * @ops: map/unmap domain callbacks
34
 * @host_data: Controller private data pointer
35
 *
36 37
 * Allocates and initialize and irq_domain structure.
 * Returns pointer to IRQ domain, or NULL on failure.
38
 */
39 40
struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
				    irq_hw_number_t hwirq_max, int direct_max,
41 42
				    const struct irq_domain_ops *ops,
				    void *host_data)
43
{
44
	struct irq_domain *domain;
45

46 47
	domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
			      GFP_KERNEL, of_node_to_nid(of_node));
48
	if (WARN_ON(!domain))
49 50 51
		return NULL;

	/* Fill structure */
G
Grant Likely 已提交
52
	INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
53
	domain->ops = ops;
54
	domain->host_data = host_data;
55
	domain->of_node = of_node_get(of_node);
56
	domain->hwirq_max = hwirq_max;
G
Grant Likely 已提交
57
	domain->revmap_size = size;
58
	domain->revmap_direct_max_irq = direct_max;
59

60 61 62
	mutex_lock(&irq_domain_mutex);
	list_add(&domain->link, &irq_domain_list);
	mutex_unlock(&irq_domain_mutex);
63

G
Grant Likely 已提交
64
	pr_debug("Added domain %s\n", domain->name);
65
	return domain;
66
}
67
EXPORT_SYMBOL_GPL(__irq_domain_add);
68

69 70 71 72 73 74 75 76 77 78 79 80
/**
 * irq_domain_remove() - Remove an irq domain.
 * @domain: domain to remove
 *
 * This routine is used to remove an irq domain. The caller must ensure
 * that all mappings within the domain have been disposed of prior to
 * use, depending on the revmap type.
 */
void irq_domain_remove(struct irq_domain *domain)
{
	mutex_lock(&irq_domain_mutex);

81 82 83 84 85
	/*
	 * radix_tree_delete() takes care of destroying the root
	 * node when all entries are removed. Shout if there are
	 * any mappings left.
	 */
G
Grant Likely 已提交
86
	WARN_ON(domain->revmap_tree.height);
87 88 89 90 91 92 93 94 95 96 97

	list_del(&domain->link);

	/*
	 * If the going away domain is the default one, reset it.
	 */
	if (unlikely(irq_default_domain == domain))
		irq_set_default_host(NULL);

	mutex_unlock(&irq_domain_mutex);

G
Grant Likely 已提交
98
	pr_debug("Removed domain %s\n", domain->name);
99

100 101
	of_node_put(domain->of_node);
	kfree(domain);
102
}
103
EXPORT_SYMBOL_GPL(irq_domain_remove);
104

105
/**
106
 * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
107 108
 * @of_node: pointer to interrupt controller's device tree node.
 * @size: total number of irqs in mapping
109
 * @first_irq: first number of irq block assigned to the domain,
110 111
 *	pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
 *	pre-map all of the irqs in the domain to virqs starting at first_irq.
112 113 114
 * @ops: map/unmap domain callbacks
 * @host_data: Controller private data pointer
 *
115 116
 * Allocates an irq_domain, and optionally if first_irq is positive then also
 * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
117 118
 *
 * This is intended to implement the expected behaviour for most
119 120 121
 * interrupt controllers. If device tree is used, then first_irq will be 0 and
 * irqs get mapped dynamically on the fly. However, if the controller requires
 * static virq assignments (non-DT boot) then it will set that up correctly.
122 123 124 125 126 127 128
 */
struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
					 unsigned int size,
					 unsigned int first_irq,
					 const struct irq_domain_ops *ops,
					 void *host_data)
{
129 130
	struct irq_domain *domain;

131
	domain = __irq_domain_add(of_node, size, size, 0, ops, host_data);
132 133
	if (!domain)
		return NULL;
134

135
	if (first_irq > 0) {
136
		if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
137 138 139 140
			/* attempt to allocated irq_descs */
			int rc = irq_alloc_descs(first_irq, first_irq, size,
						 of_node_to_nid(of_node));
			if (rc < 0)
141 142
				pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
					first_irq);
143
		}
144
		irq_domain_associate_many(domain, first_irq, 0, size);
145 146
	}

147
	return domain;
148
}
149
EXPORT_SYMBOL_GPL(irq_domain_add_simple);
150

151 152 153
/**
 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
 * @of_node: pointer to interrupt controller's device tree node.
154 155 156 157 158
 * @size: total number of irqs in legacy mapping
 * @first_irq: first number of irq block assigned to the domain
 * @first_hwirq: first hwirq number to use for the translation. Should normally
 *               be '0', but a positive integer can be used if the effective
 *               hwirqs numbering does not begin at zero.
159 160 161 162 163 164 165 166
 * @ops: map/unmap domain callbacks
 * @host_data: Controller private data pointer
 *
 * Note: the map() callback will be called before this function returns
 * for all legacy interrupts except 0 (which is always the invalid irq for
 * a legacy controller).
 */
struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
167 168 169
					 unsigned int size,
					 unsigned int first_irq,
					 irq_hw_number_t first_hwirq,
170
					 const struct irq_domain_ops *ops,
171 172
					 void *host_data)
{
173
	struct irq_domain *domain;
174

175 176
	domain = __irq_domain_add(of_node, first_hwirq + size,
				  first_hwirq + size, 0, ops, host_data);
177 178 179
	if (!domain)
		return NULL;

180
	irq_domain_associate_many(domain, first_irq, first_hwirq, size);
181

182 183
	return domain;
}
184
EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
185

186 187 188 189 190 191 192
/**
 * irq_find_host() - Locates a domain for a given device node
 * @node: device-tree node of the interrupt controller
 */
struct irq_domain *irq_find_host(struct device_node *node)
{
	struct irq_domain *h, *found = NULL;
193
	int rc;
194 195 196 197 198 199 200

	/* We might want to match the legacy controller last since
	 * it might potentially be set to match all interrupts in
	 * the absence of a device node. This isn't a problem so far
	 * yet though...
	 */
	mutex_lock(&irq_domain_mutex);
201 202 203 204 205 206 207
	list_for_each_entry(h, &irq_domain_list, link) {
		if (h->ops->match)
			rc = h->ops->match(h, node);
		else
			rc = (h->of_node != NULL) && (h->of_node == node);

		if (rc) {
208 209 210
			found = h;
			break;
		}
211
	}
212 213 214 215 216 217 218
	mutex_unlock(&irq_domain_mutex);
	return found;
}
EXPORT_SYMBOL_GPL(irq_find_host);

/**
 * irq_set_default_host() - Set a "default" irq domain
219
 * @domain: default domain pointer
220 221 222 223 224 225
 *
 * For convenience, it's possible to set a "default" domain that will be used
 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
 * platforms that want to manipulate a few hard coded interrupt numbers that
 * aren't properly represented in the device-tree.
 */
226
void irq_set_default_host(struct irq_domain *domain)
227
{
228
	pr_debug("Default domain set to @0x%p\n", domain);
229

230
	irq_default_domain = domain;
231
}
232
EXPORT_SYMBOL_GPL(irq_set_default_host);
233

234
static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
235
{
236 237
	struct irq_data *irq_data = irq_get_irq_data(irq);
	irq_hw_number_t hwirq;
238

239 240 241
	if (WARN(!irq_data || irq_data->domain != domain,
		 "virq%i doesn't exist; cannot disassociate\n", irq))
		return;
242

243 244
	hwirq = irq_data->hwirq;
	irq_set_status_flags(irq, IRQ_NOREQUEST);
245

246 247
	/* remove chip and handler */
	irq_set_chip_and_handler(irq, NULL, NULL);
248

249 250
	/* Make sure it's completed */
	synchronize_irq(irq);
251

252 253 254 255
	/* Tell the PIC about it */
	if (domain->ops->unmap)
		domain->ops->unmap(domain, irq);
	smp_mb();
256

257 258
	irq_data->domain = NULL;
	irq_data->hwirq = 0;
259

260 261 262 263 264 265 266
	/* Clear reverse map for this hwirq */
	if (hwirq < domain->revmap_size) {
		domain->linear_revmap[hwirq] = 0;
	} else {
		mutex_lock(&revmap_trees_mutex);
		radix_tree_delete(&domain->revmap_tree, hwirq);
		mutex_unlock(&revmap_trees_mutex);
267 268 269
	}
}

270 271
int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
			 irq_hw_number_t hwirq)
272
{
273 274
	struct irq_data *irq_data = irq_get_irq_data(virq);
	int ret;
275

276 277 278 279 280 281 282
	if (WARN(hwirq >= domain->hwirq_max,
		 "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
		return -EINVAL;
	if (WARN(!irq_data, "error: virq%i is not allocated", virq))
		return -EINVAL;
	if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
		return -EINVAL;
283

284 285 286 287 288 289 290 291 292 293 294 295 296 297
	mutex_lock(&irq_domain_mutex);
	irq_data->hwirq = hwirq;
	irq_data->domain = domain;
	if (domain->ops->map) {
		ret = domain->ops->map(domain, virq, hwirq);
		if (ret != 0) {
			/*
			 * If map() returns -EPERM, this interrupt is protected
			 * by the firmware or some other service and shall not
			 * be mapped. Don't bother telling the user about it.
			 */
			if (ret != -EPERM) {
				pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
				       domain->name, hwirq, virq, ret);
298
			}
299 300 301 302
			irq_data->domain = NULL;
			irq_data->hwirq = 0;
			mutex_unlock(&irq_domain_mutex);
			return ret;
303 304
		}

305 306 307 308
		/* If not already assigned, give the domain the chip's name */
		if (!domain->name && irq_data->chip)
			domain->name = irq_data->chip->name;
	}
309

310 311 312 313 314 315
	if (hwirq < domain->revmap_size) {
		domain->linear_revmap[hwirq] = virq;
	} else {
		mutex_lock(&revmap_trees_mutex);
		radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
		mutex_unlock(&revmap_trees_mutex);
316
	}
317 318 319
	mutex_unlock(&irq_domain_mutex);

	irq_clear_status_flags(virq, IRQ_NOREQUEST);
320 321 322

	return 0;
}
323
EXPORT_SYMBOL_GPL(irq_domain_associate);
324

325 326 327 328 329 330 331 332 333 334 335
void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
			       irq_hw_number_t hwirq_base, int count)
{
	int i;

	pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
		of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);

	for (i = 0; i < count; i++) {
		irq_domain_associate(domain, irq_base + i, hwirq_base + i);
	}
336
}
337
EXPORT_SYMBOL_GPL(irq_domain_associate_many);
338 339 340

/**
 * irq_create_direct_mapping() - Allocate an irq for direct mapping
341
 * @domain: domain to allocate the irq for or NULL for default domain
342 343 344
 *
 * This routine is used for irq controllers which can choose the hardware
 * interrupt numbers they generate. In such a case it's simplest to use
G
Grant Likely 已提交
345 346 347
 * the linux irq as the hardware interrupt number. It still uses the linear
 * or radix tree to store the mapping, but the irq controller can optimize
 * the revmap path by using the hwirq directly.
348
 */
349
unsigned int irq_create_direct_mapping(struct irq_domain *domain)
350 351 352
{
	unsigned int virq;

353 354
	if (domain == NULL)
		domain = irq_default_domain;
355

P
Paul Mundt 已提交
356
	virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
357
	if (!virq) {
358
		pr_debug("create_direct virq allocation failed\n");
359
		return 0;
360
	}
G
Grant Likely 已提交
361
	if (virq >= domain->revmap_direct_max_irq) {
362
		pr_err("ERROR: no free irqs available below %i maximum\n",
G
Grant Likely 已提交
363
			domain->revmap_direct_max_irq);
364 365 366
		irq_free_desc(virq);
		return 0;
	}
367
	pr_debug("create_direct obtained virq %d\n", virq);
368

369
	if (irq_domain_associate(domain, virq, virq)) {
370
		irq_free_desc(virq);
371
		return 0;
372 373 374 375
	}

	return virq;
}
376
EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
377 378 379

/**
 * irq_create_mapping() - Map a hardware interrupt into linux irq space
380 381
 * @domain: domain owning this hardware interrupt or NULL for default domain
 * @hwirq: hardware irq number in that domain space
382 383 384 385 386 387
 *
 * Only one mapping per hardware interrupt is permitted. Returns a linux
 * irq number.
 * If the sense/trigger is to be specified, set_irq_type() should be called
 * on the number returned from that call.
 */
388
unsigned int irq_create_mapping(struct irq_domain *domain,
389 390
				irq_hw_number_t hwirq)
{
391 392
	unsigned int hint;
	int virq;
393

394
	pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
395

396 397 398 399
	/* Look for default domain if nececssary */
	if (domain == NULL)
		domain = irq_default_domain;
	if (domain == NULL) {
K
Kefeng Wang 已提交
400
		WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
401
		return 0;
402
	}
403
	pr_debug("-> using domain @%p\n", domain);
404 405

	/* Check if mapping already exists */
406
	virq = irq_find_mapping(domain, hwirq);
407
	if (virq) {
408
		pr_debug("-> existing mapping on virq %d\n", virq);
409 410 411
		return virq;
	}

412
	/* Allocate a virtual interrupt number */
413
	hint = hwirq % nr_irqs;
414 415
	if (hint == 0)
		hint++;
P
Paul Mundt 已提交
416
	virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
417
	if (virq <= 0)
P
Paul Mundt 已提交
418
		virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
419
	if (virq <= 0) {
420
		pr_debug("-> virq allocation failed\n");
421
		return 0;
422 423
	}

424
	if (irq_domain_associate(domain, virq, hwirq)) {
425
		irq_free_desc(virq);
426
		return 0;
427 428
	}

429
	pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
430
		hwirq, of_node_full_name(domain->of_node), virq);
431 432 433 434 435

	return virq;
}
EXPORT_SYMBOL_GPL(irq_create_mapping);

436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
/**
 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
 * @domain: domain owning the interrupt range
 * @irq_base: beginning of linux IRQ range
 * @hwirq_base: beginning of hardware IRQ range
 * @count: Number of interrupts to map
 *
 * This routine is used for allocating and mapping a range of hardware
 * irqs to linux irqs where the linux irq numbers are at pre-defined
 * locations. For use by controllers that already have static mappings
 * to insert in to the domain.
 *
 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
 * domain insertion.
 *
 * 0 is returned upon success, while any failure to establish a static
 * mapping is treated as an error.
 */
int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
			       irq_hw_number_t hwirq_base, int count)
{
	int ret;

	ret = irq_alloc_descs(irq_base, irq_base, count,
			      of_node_to_nid(domain->of_node));
	if (unlikely(ret < 0))
		return ret;

464
	irq_domain_associate_many(domain, irq_base, hwirq_base, count);
465 466 467 468
	return 0;
}
EXPORT_SYMBOL_GPL(irq_create_strict_mappings);

469
unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
470
{
471
	struct irq_domain *domain;
472 473 474 475
	irq_hw_number_t hwirq;
	unsigned int type = IRQ_TYPE_NONE;
	unsigned int virq;

476
	domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain;
477
	if (!domain) {
K
Kefeng Wang 已提交
478
		pr_warn("no irq domain found for %s !\n",
479
			of_node_full_name(irq_data->np));
480
		return 0;
481 482
	}

483 484
	/* If domain has no translation, then we assume interrupt line */
	if (domain->ops->xlate == NULL)
485
		hwirq = irq_data->args[0];
486
	else {
487 488
		if (domain->ops->xlate(domain, irq_data->np, irq_data->args,
					irq_data->args_count, &hwirq, &type))
489
			return 0;
490 491 492
	}

	/* Create mapping */
493
	virq = irq_create_mapping(domain, hwirq);
494
	if (!virq)
495 496 497 498
		return virq;

	/* Set type if specified and different than the current one */
	if (type != IRQ_TYPE_NONE &&
499
	    type != irq_get_trigger_type(virq))
500 501 502 503 504 505 506 507 508 509 510 511
		irq_set_irq_type(virq, type);
	return virq;
}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);

/**
 * irq_dispose_mapping() - Unmap an interrupt
 * @virq: linux irq number of the interrupt to unmap
 */
void irq_dispose_mapping(unsigned int virq)
{
	struct irq_data *irq_data = irq_get_irq_data(virq);
512
	struct irq_domain *domain;
513

514
	if (!virq || !irq_data)
515 516
		return;

517 518
	domain = irq_data->domain;
	if (WARN_ON(domain == NULL))
519 520
		return;

521
	irq_domain_disassociate(domain, virq);
522 523 524 525 526 527
	irq_free_desc(virq);
}
EXPORT_SYMBOL_GPL(irq_dispose_mapping);

/**
 * irq_find_mapping() - Find a linux irq from an hw irq number.
528 529
 * @domain: domain owning this hardware interrupt
 * @hwirq: hardware irq number in that domain space
530
 */
531
unsigned int irq_find_mapping(struct irq_domain *domain,
532 533
			      irq_hw_number_t hwirq)
{
534
	struct irq_data *data;
535

536 537 538 539
	/* Look for default domain if nececssary */
	if (domain == NULL)
		domain = irq_default_domain;
	if (domain == NULL)
540
		return 0;
541

G
Grant Likely 已提交
542
	if (hwirq < domain->revmap_direct_max_irq) {
543
		data = irq_get_irq_data(hwirq);
544
		if (data && (data->domain == domain) && (data->hwirq == hwirq))
545 546 547
			return hwirq;
	}

548 549 550
	/* Check if the hwirq is in the linear revmap. */
	if (hwirq < domain->revmap_size)
		return domain->linear_revmap[hwirq];
551

552 553 554 555
	rcu_read_lock();
	data = radix_tree_lookup(&domain->revmap_tree, hwirq);
	rcu_read_unlock();
	return data ? data->irq : 0;
556 557 558
}
EXPORT_SYMBOL_GPL(irq_find_mapping);

559
#ifdef CONFIG_IRQ_DOMAIN_DEBUG
560 561 562 563
static int virq_debug_show(struct seq_file *m, void *private)
{
	unsigned long flags;
	struct irq_desc *desc;
G
Grant Likely 已提交
564 565 566
	struct irq_domain *domain;
	struct radix_tree_iter iter;
	void *data, **slot;
567 568
	int i;

G
Grant Likely 已提交
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
	seq_printf(m, " %-16s  %-6s  %-10s  %-10s  %s\n",
		   "name", "mapped", "linear-max", "direct-max", "devtree-node");
	mutex_lock(&irq_domain_mutex);
	list_for_each_entry(domain, &irq_domain_list, link) {
		int count = 0;
		radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
			count++;
		seq_printf(m, "%c%-16s  %6u  %10u  %10u  %s\n",
			   domain == irq_default_domain ? '*' : ' ', domain->name,
			   domain->revmap_size + count, domain->revmap_size,
			   domain->revmap_direct_max_irq,
			   domain->of_node ? of_node_full_name(domain->of_node) : "");
	}
	mutex_unlock(&irq_domain_mutex);

	seq_printf(m, "%-5s  %-7s  %-15s  %-*s  %6s  %-14s  %s\n", "irq", "hwirq",
585
		      "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
G
Grant Likely 已提交
586
		      "active", "type", "domain");
587 588 589 590 591 592 593

	for (i = 1; i < nr_irqs; i++) {
		desc = irq_to_desc(i);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);
G
Grant Likely 已提交
594
		domain = desc->irq_data.domain;
595

G
Grant Likely 已提交
596
		if (domain) {
597
			struct irq_chip *chip;
G
Grant Likely 已提交
598 599
			int hwirq = desc->irq_data.hwirq;
			bool direct;
600 601

			seq_printf(m, "%5d  ", i);
G
Grant Likely 已提交
602
			seq_printf(m, "0x%05x  ", hwirq);
603 604

			chip = irq_desc_get_chip(desc);
G
Grant Likely 已提交
605
			seq_printf(m, "%-15s  ", (chip && chip->name) ? chip->name : "none");
606 607

			data = irq_desc_get_chip_data(desc);
G
Grant Likely 已提交
608
			seq_printf(m, data ? "0x%p  " : "  %p  ", data);
609

G
Grant Likely 已提交
610 611 612 613 614
			seq_printf(m, "   %c    ", (desc->action && desc->action->handler) ? '*' : ' ');
			direct = (i == hwirq) && (i < domain->revmap_direct_max_irq);
			seq_printf(m, "%6s%-8s  ",
				   (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX",
				   direct ? "(DIRECT)" : "");
G
Grant Likely 已提交
615
			seq_printf(m, "%s\n", desc->irq_data.domain->name);
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
		}

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}

	return 0;
}

static int virq_debug_open(struct inode *inode, struct file *file)
{
	return single_open(file, virq_debug_show, inode->i_private);
}

static const struct file_operations virq_debug_fops = {
	.open = virq_debug_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

static int __init irq_debugfs_init(void)
{
638
	if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
639 640 641 642 643 644
				 NULL, &virq_debug_fops) == NULL)
		return -ENOMEM;

	return 0;
}
__initcall(irq_debugfs_init);
645
#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
646

647 648 649 650 651 652 653 654 655
/**
 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
 *
 * Device Tree IRQ specifier translation function which works with one cell
 * bindings where the cell value maps directly to the hwirq number.
 */
int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
			     const u32 *intspec, unsigned int intsize,
			     unsigned long *out_hwirq, unsigned int *out_type)
656
{
657
	if (WARN_ON(intsize < 1))
658 659 660 661 662
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = IRQ_TYPE_NONE;
	return 0;
}
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);

/**
 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
 *
 * Device Tree IRQ specifier translation function which works with two cell
 * bindings where the cell values map directly to the hwirq number
 * and linux irq flags.
 */
int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
			const u32 *intspec, unsigned int intsize,
			irq_hw_number_t *out_hwirq, unsigned int *out_type)
{
	if (WARN_ON(intsize < 2))
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
	return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);

/**
 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
 *
 * Device Tree IRQ specifier translation function which works with either one
 * or two cell bindings where the cell values map directly to the hwirq number
 * and linux irq flags.
 *
 * Note: don't use this function unless your interrupt controller explicitly
 * supports both one and two cell bindings.  For the majority of controllers
 * the _onecell() or _twocell() variants above should be used.
 */
int irq_domain_xlate_onetwocell(struct irq_domain *d,
				struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				unsigned long *out_hwirq, unsigned int *out_type)
{
	if (WARN_ON(intsize < 1))
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
	return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
707

708
const struct irq_domain_ops irq_domain_simple_ops = {
709
	.xlate = irq_domain_xlate_onetwocell,
710 711
};
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);