irqdomain.c 21.3 KB
Newer Older
1 2
#define pr_fmt(fmt)  "irq: " fmt

3 4 5
#include <linux/debugfs.h>
#include <linux/hardirq.h>
#include <linux/interrupt.h>
6
#include <linux/irq.h>
7
#include <linux/irqdesc.h>
8 9 10 11
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
12
#include <linux/of_address.h>
P
Paul Mundt 已提交
13
#include <linux/topology.h>
14
#include <linux/seq_file.h>
15
#include <linux/slab.h>
16 17
#include <linux/smp.h>
#include <linux/fs.h>
18 19 20 21

static LIST_HEAD(irq_domain_list);
static DEFINE_MUTEX(irq_domain_mutex);

22
static DEFINE_MUTEX(revmap_trees_mutex);
23
static struct irq_domain *irq_default_domain;
24 25

/**
26
 * __irq_domain_add() - Allocate a new irq_domain data structure
27
 * @of_node: optional device-tree node of the interrupt controller
28 29 30
 * @size: Size of linear map; 0 for radix mapping only
 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
 *              direct mapping
31
 * @ops: map/unmap domain callbacks
32
 * @host_data: Controller private data pointer
33
 *
34 35 36
 * Allocates and initialize and irq_domain structure.  Caller is expected to
 * register allocated irq_domain with irq_domain_register().  Returns pointer
 * to IRQ domain, or NULL on failure.
37
 */
38 39
struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
				    irq_hw_number_t hwirq_max, int direct_max,
40 41
				    const struct irq_domain_ops *ops,
				    void *host_data)
42
{
43
	struct irq_domain *domain;
44

45 46
	domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
			      GFP_KERNEL, of_node_to_nid(of_node));
47
	if (WARN_ON(!domain))
48 49 50
		return NULL;

	/* Fill structure */
G
Grant Likely 已提交
51
	INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
52
	domain->ops = ops;
53
	domain->host_data = host_data;
54
	domain->of_node = of_node_get(of_node);
55
	domain->hwirq_max = hwirq_max;
G
Grant Likely 已提交
56
	domain->revmap_size = size;
57
	domain->revmap_direct_max_irq = direct_max;
58

59 60 61
	mutex_lock(&irq_domain_mutex);
	list_add(&domain->link, &irq_domain_list);
	mutex_unlock(&irq_domain_mutex);
62

G
Grant Likely 已提交
63
	pr_debug("Added domain %s\n", domain->name);
64
	return domain;
65
}
66
EXPORT_SYMBOL_GPL(__irq_domain_add);
67

68 69 70 71 72 73 74 75 76 77 78 79
/**
 * irq_domain_remove() - Remove an irq domain.
 * @domain: domain to remove
 *
 * This routine is used to remove an irq domain. The caller must ensure
 * that all mappings within the domain have been disposed of prior to
 * use, depending on the revmap type.
 */
void irq_domain_remove(struct irq_domain *domain)
{
	mutex_lock(&irq_domain_mutex);

80 81 82 83 84
	/*
	 * radix_tree_delete() takes care of destroying the root
	 * node when all entries are removed. Shout if there are
	 * any mappings left.
	 */
G
Grant Likely 已提交
85
	WARN_ON(domain->revmap_tree.height);
86 87 88 89 90 91 92 93 94 95 96

	list_del(&domain->link);

	/*
	 * If the going away domain is the default one, reset it.
	 */
	if (unlikely(irq_default_domain == domain))
		irq_set_default_host(NULL);

	mutex_unlock(&irq_domain_mutex);

G
Grant Likely 已提交
97
	pr_debug("Removed domain %s\n", domain->name);
98

99 100
	of_node_put(domain->of_node);
	kfree(domain);
101
}
102
EXPORT_SYMBOL_GPL(irq_domain_remove);
103

104
/**
105
 * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
106 107
 * @of_node: pointer to interrupt controller's device tree node.
 * @size: total number of irqs in mapping
108
 * @first_irq: first number of irq block assigned to the domain,
109 110
 *	pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
 *	pre-map all of the irqs in the domain to virqs starting at first_irq.
111 112 113
 * @ops: map/unmap domain callbacks
 * @host_data: Controller private data pointer
 *
114 115
 * Allocates an irq_domain, and optionally if first_irq is positive then also
 * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
116 117
 *
 * This is intended to implement the expected behaviour for most
118 119 120
 * interrupt controllers. If device tree is used, then first_irq will be 0 and
 * irqs get mapped dynamically on the fly. However, if the controller requires
 * static virq assignments (non-DT boot) then it will set that up correctly.
121 122 123 124 125 126 127
 */
struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
					 unsigned int size,
					 unsigned int first_irq,
					 const struct irq_domain_ops *ops,
					 void *host_data)
{
128 129
	struct irq_domain *domain;

130
	domain = __irq_domain_add(of_node, size, size, 0, ops, host_data);
131 132
	if (!domain)
		return NULL;
133

134
	if (first_irq > 0) {
135
		if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
136 137 138 139
			/* attempt to allocated irq_descs */
			int rc = irq_alloc_descs(first_irq, first_irq, size,
						 of_node_to_nid(of_node));
			if (rc < 0)
140 141
				pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
					first_irq);
142
		}
143
		irq_domain_associate_many(domain, first_irq, 0, size);
144 145
	}

146
	return domain;
147
}
148
EXPORT_SYMBOL_GPL(irq_domain_add_simple);
149

150 151 152
/**
 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
 * @of_node: pointer to interrupt controller's device tree node.
153 154 155 156 157
 * @size: total number of irqs in legacy mapping
 * @first_irq: first number of irq block assigned to the domain
 * @first_hwirq: first hwirq number to use for the translation. Should normally
 *               be '0', but a positive integer can be used if the effective
 *               hwirqs numbering does not begin at zero.
158 159 160 161 162 163 164 165
 * @ops: map/unmap domain callbacks
 * @host_data: Controller private data pointer
 *
 * Note: the map() callback will be called before this function returns
 * for all legacy interrupts except 0 (which is always the invalid irq for
 * a legacy controller).
 */
struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
166 167 168
					 unsigned int size,
					 unsigned int first_irq,
					 irq_hw_number_t first_hwirq,
169
					 const struct irq_domain_ops *ops,
170 171
					 void *host_data)
{
172
	struct irq_domain *domain;
173

174 175
	domain = __irq_domain_add(of_node, first_hwirq + size,
				  first_hwirq + size, 0, ops, host_data);
176 177 178
	if (!domain)
		return NULL;

179
	irq_domain_associate_many(domain, first_irq, first_hwirq, size);
180

181 182
	return domain;
}
183
EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
184

185 186 187 188 189 190 191
/**
 * irq_find_host() - Locates a domain for a given device node
 * @node: device-tree node of the interrupt controller
 */
struct irq_domain *irq_find_host(struct device_node *node)
{
	struct irq_domain *h, *found = NULL;
192
	int rc;
193 194 195 196 197 198 199

	/* We might want to match the legacy controller last since
	 * it might potentially be set to match all interrupts in
	 * the absence of a device node. This isn't a problem so far
	 * yet though...
	 */
	mutex_lock(&irq_domain_mutex);
200 201 202 203 204 205 206
	list_for_each_entry(h, &irq_domain_list, link) {
		if (h->ops->match)
			rc = h->ops->match(h, node);
		else
			rc = (h->of_node != NULL) && (h->of_node == node);

		if (rc) {
207 208 209
			found = h;
			break;
		}
210
	}
211 212 213 214 215 216 217
	mutex_unlock(&irq_domain_mutex);
	return found;
}
EXPORT_SYMBOL_GPL(irq_find_host);

/**
 * irq_set_default_host() - Set a "default" irq domain
218
 * @domain: default domain pointer
219 220 221 222 223 224
 *
 * For convenience, it's possible to set a "default" domain that will be used
 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
 * platforms that want to manipulate a few hard coded interrupt numbers that
 * aren't properly represented in the device-tree.
 */
225
void irq_set_default_host(struct irq_domain *domain)
226
{
227
	pr_debug("Default domain set to @0x%p\n", domain);
228

229
	irq_default_domain = domain;
230
}
231
EXPORT_SYMBOL_GPL(irq_set_default_host);
232

233
static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
234
{
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
	struct irq_data *irq_data = irq_get_irq_data(irq);
	irq_hw_number_t hwirq;

	if (WARN(!irq_data || irq_data->domain != domain,
		 "virq%i doesn't exist; cannot disassociate\n", irq))
		return;

	hwirq = irq_data->hwirq;
	irq_set_status_flags(irq, IRQ_NOREQUEST);

	/* remove chip and handler */
	irq_set_chip_and_handler(irq, NULL, NULL);

	/* Make sure it's completed */
	synchronize_irq(irq);

	/* Tell the PIC about it */
	if (domain->ops->unmap)
		domain->ops->unmap(domain, irq);
	smp_mb();

	irq_data->domain = NULL;
	irq_data->hwirq = 0;

	/* Clear reverse map for this hwirq */
	if (hwirq < domain->revmap_size) {
		domain->linear_revmap[hwirq] = 0;
	} else {
		mutex_lock(&revmap_trees_mutex);
		radix_tree_delete(&domain->revmap_tree, hwirq);
		mutex_unlock(&revmap_trees_mutex);
266 267 268
	}
}

269 270
int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
			 irq_hw_number_t hwirq)
271
{
272 273
	struct irq_data *irq_data = irq_get_irq_data(virq);
	int ret;
274

275 276 277 278 279 280 281
	if (WARN(hwirq >= domain->hwirq_max,
		 "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
		return -EINVAL;
	if (WARN(!irq_data, "error: virq%i is not allocated", virq))
		return -EINVAL;
	if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
		return -EINVAL;
282

283 284 285 286 287 288 289 290 291 292 293 294 295 296
	mutex_lock(&irq_domain_mutex);
	irq_data->hwirq = hwirq;
	irq_data->domain = domain;
	if (domain->ops->map) {
		ret = domain->ops->map(domain, virq, hwirq);
		if (ret != 0) {
			/*
			 * If map() returns -EPERM, this interrupt is protected
			 * by the firmware or some other service and shall not
			 * be mapped. Don't bother telling the user about it.
			 */
			if (ret != -EPERM) {
				pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
				       domain->name, hwirq, virq, ret);
297
			}
298 299 300 301
			irq_data->domain = NULL;
			irq_data->hwirq = 0;
			mutex_unlock(&irq_domain_mutex);
			return ret;
302 303
		}

304 305 306 307
		/* If not already assigned, give the domain the chip's name */
		if (!domain->name && irq_data->chip)
			domain->name = irq_data->chip->name;
	}
308

309 310 311 312 313 314
	if (hwirq < domain->revmap_size) {
		domain->linear_revmap[hwirq] = virq;
	} else {
		mutex_lock(&revmap_trees_mutex);
		radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
		mutex_unlock(&revmap_trees_mutex);
315
	}
316 317 318
	mutex_unlock(&irq_domain_mutex);

	irq_clear_status_flags(virq, IRQ_NOREQUEST);
319 320 321

	return 0;
}
322 323 324 325 326 327 328 329 330 331 332 333 334 335
EXPORT_SYMBOL_GPL(irq_domain_associate);

void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
			       irq_hw_number_t hwirq_base, int count)
{
	int i;

	pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
		of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);

	for (i = 0; i < count; i++) {
		irq_domain_associate(domain, irq_base + i, hwirq_base + i);
	}
}
336
EXPORT_SYMBOL_GPL(irq_domain_associate_many);
337 338 339

/**
 * irq_create_direct_mapping() - Allocate an irq for direct mapping
340
 * @domain: domain to allocate the irq for or NULL for default domain
341 342 343
 *
 * This routine is used for irq controllers which can choose the hardware
 * interrupt numbers they generate. In such a case it's simplest to use
G
Grant Likely 已提交
344 345 346
 * the linux irq as the hardware interrupt number. It still uses the linear
 * or radix tree to store the mapping, but the irq controller can optimize
 * the revmap path by using the hwirq directly.
347
 */
348
unsigned int irq_create_direct_mapping(struct irq_domain *domain)
349 350 351
{
	unsigned int virq;

352 353
	if (domain == NULL)
		domain = irq_default_domain;
354

P
Paul Mundt 已提交
355
	virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
356
	if (!virq) {
357
		pr_debug("create_direct virq allocation failed\n");
358
		return 0;
359
	}
G
Grant Likely 已提交
360
	if (virq >= domain->revmap_direct_max_irq) {
361
		pr_err("ERROR: no free irqs available below %i maximum\n",
G
Grant Likely 已提交
362
			domain->revmap_direct_max_irq);
363 364 365
		irq_free_desc(virq);
		return 0;
	}
366
	pr_debug("create_direct obtained virq %d\n", virq);
367

368
	if (irq_domain_associate(domain, virq, virq)) {
369
		irq_free_desc(virq);
370
		return 0;
371 372 373 374
	}

	return virq;
}
375
EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
376 377 378

/**
 * irq_create_mapping() - Map a hardware interrupt into linux irq space
379 380
 * @domain: domain owning this hardware interrupt or NULL for default domain
 * @hwirq: hardware irq number in that domain space
381 382 383 384 385 386
 *
 * Only one mapping per hardware interrupt is permitted. Returns a linux
 * irq number.
 * If the sense/trigger is to be specified, set_irq_type() should be called
 * on the number returned from that call.
 */
387
unsigned int irq_create_mapping(struct irq_domain *domain,
388 389
				irq_hw_number_t hwirq)
{
390 391
	unsigned int hint;
	int virq;
392

393
	pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
394

395 396 397 398
	/* Look for default domain if nececssary */
	if (domain == NULL)
		domain = irq_default_domain;
	if (domain == NULL) {
399 400
		pr_warning("irq_create_mapping called for"
			   " NULL domain, hwirq=%lx\n", hwirq);
401
		WARN_ON(1);
402
		return 0;
403
	}
404
	pr_debug("-> using domain @%p\n", domain);
405 406

	/* Check if mapping already exists */
407
	virq = irq_find_mapping(domain, hwirq);
408
	if (virq) {
409
		pr_debug("-> existing mapping on virq %d\n", virq);
410 411 412
		return virq;
	}

413
	/* Allocate a virtual interrupt number */
414
	hint = hwirq % nr_irqs;
415 416
	if (hint == 0)
		hint++;
P
Paul Mundt 已提交
417
	virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
418
	if (virq <= 0)
P
Paul Mundt 已提交
419
		virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
420
	if (virq <= 0) {
421
		pr_debug("-> virq allocation failed\n");
422
		return 0;
423 424
	}

425
	if (irq_domain_associate(domain, virq, hwirq)) {
426
		irq_free_desc(virq);
427
		return 0;
428 429
	}

430
	pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
431
		hwirq, of_node_full_name(domain->of_node), virq);
432 433 434 435 436

	return virq;
}
EXPORT_SYMBOL_GPL(irq_create_mapping);

437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
/**
 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
 * @domain: domain owning the interrupt range
 * @irq_base: beginning of linux IRQ range
 * @hwirq_base: beginning of hardware IRQ range
 * @count: Number of interrupts to map
 *
 * This routine is used for allocating and mapping a range of hardware
 * irqs to linux irqs where the linux irq numbers are at pre-defined
 * locations. For use by controllers that already have static mappings
 * to insert in to the domain.
 *
 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
 * domain insertion.
 *
 * 0 is returned upon success, while any failure to establish a static
 * mapping is treated as an error.
 */
int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
			       irq_hw_number_t hwirq_base, int count)
{
	int ret;

	ret = irq_alloc_descs(irq_base, irq_base, count,
			      of_node_to_nid(domain->of_node));
	if (unlikely(ret < 0))
		return ret;

465
	irq_domain_associate_many(domain, irq_base, hwirq_base, count);
466 467 468 469
	return 0;
}
EXPORT_SYMBOL_GPL(irq_create_strict_mappings);

470 471 472
unsigned int irq_create_of_mapping(struct device_node *controller,
				   const u32 *intspec, unsigned int intsize)
{
473
	struct irq_domain *domain;
474 475 476 477
	irq_hw_number_t hwirq;
	unsigned int type = IRQ_TYPE_NONE;
	unsigned int virq;

478 479
	domain = controller ? irq_find_host(controller) : irq_default_domain;
	if (!domain) {
480 481 482 483 484 485 486 487 488 489 490 491
#ifdef CONFIG_MIPS
		/*
		 * Workaround to avoid breaking interrupt controller drivers
		 * that don't yet register an irq_domain.  This is temporary
		 * code. ~~~gcl, Feb 24, 2012
		 *
		 * Scheduled for removal in Linux v3.6.  That should be enough
		 * time.
		 */
		if (intsize > 0)
			return intspec[0];
#endif
492
		pr_warning("no irq domain found for %s !\n",
493
			   of_node_full_name(controller));
494
		return 0;
495 496
	}

497 498
	/* If domain has no translation, then we assume interrupt line */
	if (domain->ops->xlate == NULL)
499 500
		hwirq = intspec[0];
	else {
501
		if (domain->ops->xlate(domain, controller, intspec, intsize,
502
				     &hwirq, &type))
503
			return 0;
504 505 506
	}

	/* Create mapping */
507
	virq = irq_create_mapping(domain, hwirq);
508
	if (!virq)
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
		return virq;

	/* Set type if specified and different than the current one */
	if (type != IRQ_TYPE_NONE &&
	    type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
		irq_set_irq_type(virq, type);
	return virq;
}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);

/**
 * irq_dispose_mapping() - Unmap an interrupt
 * @virq: linux irq number of the interrupt to unmap
 */
void irq_dispose_mapping(unsigned int virq)
{
	struct irq_data *irq_data = irq_get_irq_data(virq);
526
	struct irq_domain *domain;
527

528
	if (!virq || !irq_data)
529 530
		return;

531 532
	domain = irq_data->domain;
	if (WARN_ON(domain == NULL))
533 534
		return;

535
	irq_domain_disassociate(domain, virq);
536 537 538 539 540 541
	irq_free_desc(virq);
}
EXPORT_SYMBOL_GPL(irq_dispose_mapping);

/**
 * irq_find_mapping() - Find a linux irq from an hw irq number.
542 543
 * @domain: domain owning this hardware interrupt
 * @hwirq: hardware irq number in that domain space
544
 */
545
unsigned int irq_find_mapping(struct irq_domain *domain,
546 547
			      irq_hw_number_t hwirq)
{
548
	struct irq_data *data;
549

550 551 552 553
	/* Look for default domain if nececssary */
	if (domain == NULL)
		domain = irq_default_domain;
	if (domain == NULL)
554
		return 0;
555

G
Grant Likely 已提交
556
	if (hwirq < domain->revmap_direct_max_irq) {
557
		data = irq_get_irq_data(hwirq);
558
		if (data && (data->domain == domain) && (data->hwirq == hwirq))
559 560 561
			return hwirq;
	}

G
Grant Likely 已提交
562
	return irq_linear_revmap(domain, hwirq);
563 564 565 566 567
}
EXPORT_SYMBOL_GPL(irq_find_mapping);

/**
 * irq_linear_revmap() - Find a linux irq from a hw irq number.
568 569
 * @domain: domain owning this hardware interrupt
 * @hwirq: hardware irq number in that domain space
570
 *
571 572
 * This is a fast path that can be called directly by irq controller code to
 * save a handful of instructions.
573
 */
574
unsigned int irq_linear_revmap(struct irq_domain *domain,
575 576
			       irq_hw_number_t hwirq)
{
577
	struct irq_data *data;
578

579
	/* Check revmap bounds; complain if exceeded */
G
Grant Likely 已提交
580
	if (hwirq >= domain->revmap_size) {
581
		rcu_read_lock();
G
Grant Likely 已提交
582
		data = radix_tree_lookup(&domain->revmap_tree, hwirq);
583 584 585
		rcu_read_unlock();
		return data ? data->irq : 0;
	}
586

587
	return domain->linear_revmap[hwirq];
588
}
589
EXPORT_SYMBOL_GPL(irq_linear_revmap);
590

591
#ifdef CONFIG_IRQ_DOMAIN_DEBUG
592 593 594 595
static int virq_debug_show(struct seq_file *m, void *private)
{
	unsigned long flags;
	struct irq_desc *desc;
G
Grant Likely 已提交
596 597 598
	struct irq_domain *domain;
	struct radix_tree_iter iter;
	void *data, **slot;
599 600
	int i;

G
Grant Likely 已提交
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
	seq_printf(m, " %-16s  %-6s  %-10s  %-10s  %s\n",
		   "name", "mapped", "linear-max", "direct-max", "devtree-node");
	mutex_lock(&irq_domain_mutex);
	list_for_each_entry(domain, &irq_domain_list, link) {
		int count = 0;
		radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
			count++;
		seq_printf(m, "%c%-16s  %6u  %10u  %10u  %s\n",
			   domain == irq_default_domain ? '*' : ' ', domain->name,
			   domain->revmap_size + count, domain->revmap_size,
			   domain->revmap_direct_max_irq,
			   domain->of_node ? of_node_full_name(domain->of_node) : "");
	}
	mutex_unlock(&irq_domain_mutex);

	seq_printf(m, "%-5s  %-7s  %-15s  %-*s  %6s  %-14s  %s\n", "irq", "hwirq",
617
		      "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
G
Grant Likely 已提交
618
		      "active", "type", "domain");
619 620 621 622 623 624 625

	for (i = 1; i < nr_irqs; i++) {
		desc = irq_to_desc(i);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);
G
Grant Likely 已提交
626
		domain = desc->irq_data.domain;
627

G
Grant Likely 已提交
628
		if (domain) {
629
			struct irq_chip *chip;
G
Grant Likely 已提交
630 631
			int hwirq = desc->irq_data.hwirq;
			bool direct;
632 633

			seq_printf(m, "%5d  ", i);
G
Grant Likely 已提交
634
			seq_printf(m, "0x%05x  ", hwirq);
635 636

			chip = irq_desc_get_chip(desc);
G
Grant Likely 已提交
637
			seq_printf(m, "%-15s  ", (chip && chip->name) ? chip->name : "none");
638 639

			data = irq_desc_get_chip_data(desc);
G
Grant Likely 已提交
640
			seq_printf(m, data ? "0x%p  " : "  %p  ", data);
641

G
Grant Likely 已提交
642 643 644 645 646
			seq_printf(m, "   %c    ", (desc->action && desc->action->handler) ? '*' : ' ');
			direct = (i == hwirq) && (i < domain->revmap_direct_max_irq);
			seq_printf(m, "%6s%-8s  ",
				   (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX",
				   direct ? "(DIRECT)" : "");
G
Grant Likely 已提交
647
			seq_printf(m, "%s\n", desc->irq_data.domain->name);
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
		}

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}

	return 0;
}

static int virq_debug_open(struct inode *inode, struct file *file)
{
	return single_open(file, virq_debug_show, inode->i_private);
}

static const struct file_operations virq_debug_fops = {
	.open = virq_debug_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

static int __init irq_debugfs_init(void)
{
670
	if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
671 672 673 674 675 676
				 NULL, &virq_debug_fops) == NULL)
		return -ENOMEM;

	return 0;
}
__initcall(irq_debugfs_init);
677
#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
678

679 680 681 682 683 684 685 686 687
/**
 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
 *
 * Device Tree IRQ specifier translation function which works with one cell
 * bindings where the cell value maps directly to the hwirq number.
 */
int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
			     const u32 *intspec, unsigned int intsize,
			     unsigned long *out_hwirq, unsigned int *out_type)
688
{
689
	if (WARN_ON(intsize < 1))
690 691 692 693 694
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = IRQ_TYPE_NONE;
	return 0;
}
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);

/**
 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
 *
 * Device Tree IRQ specifier translation function which works with two cell
 * bindings where the cell values map directly to the hwirq number
 * and linux irq flags.
 */
int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
			const u32 *intspec, unsigned int intsize,
			irq_hw_number_t *out_hwirq, unsigned int *out_type)
{
	if (WARN_ON(intsize < 2))
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
	return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);

/**
 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
 *
 * Device Tree IRQ specifier translation function which works with either one
 * or two cell bindings where the cell values map directly to the hwirq number
 * and linux irq flags.
 *
 * Note: don't use this function unless your interrupt controller explicitly
 * supports both one and two cell bindings.  For the majority of controllers
 * the _onecell() or _twocell() variants above should be used.
 */
int irq_domain_xlate_onetwocell(struct irq_domain *d,
				struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				unsigned long *out_hwirq, unsigned int *out_type)
{
	if (WARN_ON(intsize < 1))
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
	return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
739

740
const struct irq_domain_ops irq_domain_simple_ops = {
741
	.xlate = irq_domain_xlate_onetwocell,
742 743
};
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);