irqdomain.c 25.5 KB
Newer Older
1 2
#define pr_fmt(fmt)  "irq: " fmt

3 4 5
#include <linux/debugfs.h>
#include <linux/hardirq.h>
#include <linux/interrupt.h>
6
#include <linux/irq.h>
7
#include <linux/irqdesc.h>
8 9 10 11
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
12
#include <linux/of_address.h>
P
Paul Mundt 已提交
13
#include <linux/topology.h>
14
#include <linux/seq_file.h>
15
#include <linux/slab.h>
16 17
#include <linux/smp.h>
#include <linux/fs.h>
18

19 20
#define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs.
				 * ie. legacy 8259, gets irqs 1..15 */
21 22 23 24
#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */

25 26 27
static LIST_HEAD(irq_domain_list);
static DEFINE_MUTEX(irq_domain_mutex);

28
static DEFINE_MUTEX(revmap_trees_mutex);
29
static struct irq_domain *irq_default_domain;
30 31

/**
32
 * irq_domain_alloc() - Allocate a new irq_domain data structure
33 34
 * @of_node: optional device-tree node of the interrupt controller
 * @revmap_type: type of reverse mapping to use
35
 * @ops: map/unmap domain callbacks
36
 * @host_data: Controller private data pointer
37
 *
38 39 40
 * Allocates and initialize and irq_domain structure.  Caller is expected to
 * register allocated irq_domain with irq_domain_register().  Returns pointer
 * to IRQ domain, or NULL on failure.
41
 */
42 43
static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
					   unsigned int revmap_type,
44
					   const struct irq_domain_ops *ops,
45
					   void *host_data)
46
{
47
	struct irq_domain *domain;
48

P
Paul Mundt 已提交
49 50
	domain = kzalloc_node(sizeof(*domain), GFP_KERNEL,
			      of_node_to_nid(of_node));
51
	if (WARN_ON(!domain))
52 53 54
		return NULL;

	/* Fill structure */
55 56
	domain->revmap_type = revmap_type;
	domain->ops = ops;
57
	domain->host_data = host_data;
58
	domain->of_node = of_node_get(of_node);
59

60 61 62
	return domain;
}

63 64 65 66 67 68
static void irq_domain_free(struct irq_domain *domain)
{
	of_node_put(domain->of_node);
	kfree(domain);
}

69 70 71 72 73
static void irq_domain_add(struct irq_domain *domain)
{
	mutex_lock(&irq_domain_mutex);
	list_add(&domain->link, &irq_domain_list);
	mutex_unlock(&irq_domain_mutex);
74
	pr_debug("Allocated domain of type %d @0x%p\n",
75 76 77
		 domain->revmap_type, domain);
}

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
/**
 * irq_domain_remove() - Remove an irq domain.
 * @domain: domain to remove
 *
 * This routine is used to remove an irq domain. The caller must ensure
 * that all mappings within the domain have been disposed of prior to
 * use, depending on the revmap type.
 */
void irq_domain_remove(struct irq_domain *domain)
{
	mutex_lock(&irq_domain_mutex);

	switch (domain->revmap_type) {
	case IRQ_DOMAIN_MAP_LEGACY:
		/*
		 * Legacy domains don't manage their own irq_desc
		 * allocations, we expect the caller to handle irq_desc
		 * freeing on their own.
		 */
		break;
	case IRQ_DOMAIN_MAP_TREE:
		/*
		 * radix_tree_delete() takes care of destroying the root
		 * node when all entries are removed. Shout if there are
		 * any mappings left.
		 */
		WARN_ON(domain->revmap_data.tree.height);
		break;
	case IRQ_DOMAIN_MAP_LINEAR:
		kfree(domain->revmap_data.linear.revmap);
		domain->revmap_data.linear.size = 0;
		break;
	case IRQ_DOMAIN_MAP_NOMAP:
		break;
	}

	list_del(&domain->link);

	/*
	 * If the going away domain is the default one, reset it.
	 */
	if (unlikely(irq_default_domain == domain))
		irq_set_default_host(NULL);

	mutex_unlock(&irq_domain_mutex);

124
	pr_debug("Removed domain of type %d @0x%p\n",
125 126 127 128
		 domain->revmap_type, domain);

	irq_domain_free(domain);
}
129
EXPORT_SYMBOL_GPL(irq_domain_remove);
130

131 132 133 134 135 136 137 138 139 140 141
static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
					     irq_hw_number_t hwirq)
{
	irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq;
	int size = domain->revmap_data.legacy.size;

	if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size))
		return 0;
	return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq;
}

142 143 144 145 146 147 148 149 150
/**
 * irq_domain_add_simple() - Allocate and register a simple irq_domain.
 * @of_node: pointer to interrupt controller's device tree node.
 * @size: total number of irqs in mapping
 * @first_irq: first number of irq block assigned to the domain
 * @ops: map/unmap domain callbacks
 * @host_data: Controller private data pointer
 *
 * Allocates a legacy irq_domain if irq_base is positive or a linear
151 152
 * domain otherwise. For the legacy domain, IRQ descriptors will also
 * be allocated.
153 154 155 156 157 158 159 160 161 162 163 164 165
 *
 * This is intended to implement the expected behaviour for most
 * interrupt controllers which is that a linear mapping should
 * normally be used unless the system requires a legacy mapping in
 * order to support supplying interrupt numbers during non-DT
 * registration of devices.
 */
struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
					 unsigned int size,
					 unsigned int first_irq,
					 const struct irq_domain_ops *ops,
					 void *host_data)
{
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
	if (first_irq > 0) {
		int irq_base;

		if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
			/*
			 * Set the descriptor allocator to search for a
			 * 1-to-1 mapping, such as irq_alloc_desc_at().
			 * Use of_node_to_nid() which is defined to
			 * numa_node_id() on platforms that have no custom
			 * implementation.
			 */
			irq_base = irq_alloc_descs(first_irq, first_irq, size,
						   of_node_to_nid(of_node));
			if (irq_base < 0) {
				WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
				     first_irq);
				irq_base = first_irq;
			}
		} else
			irq_base = first_irq;

		return irq_domain_add_legacy(of_node, size, irq_base, 0,
188
					     ops, host_data);
189 190 191 192
	}

	/* A linear domain is the default */
	return irq_domain_add_linear(of_node, size, ops, host_data);
193 194
}

195 196 197
/**
 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
 * @of_node: pointer to interrupt controller's device tree node.
198 199 200 201 202
 * @size: total number of irqs in legacy mapping
 * @first_irq: first number of irq block assigned to the domain
 * @first_hwirq: first hwirq number to use for the translation. Should normally
 *               be '0', but a positive integer can be used if the effective
 *               hwirqs numbering does not begin at zero.
203 204 205 206 207 208 209 210
 * @ops: map/unmap domain callbacks
 * @host_data: Controller private data pointer
 *
 * Note: the map() callback will be called before this function returns
 * for all legacy interrupts except 0 (which is always the invalid irq for
 * a legacy controller).
 */
struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
211 212 213
					 unsigned int size,
					 unsigned int first_irq,
					 irq_hw_number_t first_hwirq,
214
					 const struct irq_domain_ops *ops,
215 216
					 void *host_data)
{
217
	struct irq_domain *domain;
218 219 220 221 222 223
	unsigned int i;

	domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data);
	if (!domain)
		return NULL;

224 225 226 227
	domain->revmap_data.legacy.first_irq = first_irq;
	domain->revmap_data.legacy.first_hwirq = first_hwirq;
	domain->revmap_data.legacy.size = size;

228
	mutex_lock(&irq_domain_mutex);
229 230 231 232 233 234
	/* Verify that all the irqs are available */
	for (i = 0; i < size; i++) {
		int irq = first_irq + i;
		struct irq_data *irq_data = irq_get_irq_data(irq);

		if (WARN_ON(!irq_data || irq_data->domain)) {
235
			mutex_unlock(&irq_domain_mutex);
236
			irq_domain_free(domain);
237
			return NULL;
238 239 240
		}
	}

241 242 243 244
	/* Claim all of the irqs before registering a legacy domain */
	for (i = 0; i < size; i++) {
		struct irq_data *irq_data = irq_get_irq_data(first_irq + i);
		irq_data->hwirq = first_hwirq + i;
245
		irq_data->domain = domain;
246 247 248 249 250 251 252 253 254 255
	}
	mutex_unlock(&irq_domain_mutex);

	for (i = 0; i < size; i++) {
		int irq = first_irq + i;
		int hwirq = first_hwirq + i;

		/* IRQ0 gets ignored */
		if (!irq)
			continue;
256 257 258 259 260

		/* Legacy flags are left to default at this point,
		 * one can then use irq_create_mapping() to
		 * explicitly change them
		 */
261 262
		if (ops->map)
			ops->map(domain, irq, hwirq);
263 264

		/* Clear norequest flags */
265
		irq_clear_status_flags(irq, IRQ_NOREQUEST);
266
	}
267 268

	irq_domain_add(domain);
269 270
	return domain;
}
271
EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
272 273

/**
274
 * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
275
 * @of_node: pointer to interrupt controller's device tree node.
276
 * @size: Number of interrupts in the domain.
277 278 279 280 281
 * @ops: map/unmap domain callbacks
 * @host_data: Controller private data pointer
 */
struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
					 unsigned int size,
282
					 const struct irq_domain_ops *ops,
283 284 285 286
					 void *host_data)
{
	struct irq_domain *domain;
	unsigned int *revmap;
287

P
Paul Mundt 已提交
288 289
	revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL,
			      of_node_to_nid(of_node));
290 291
	if (WARN_ON(!revmap))
		return NULL;
292

293 294 295 296 297 298 299 300 301 302
	domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data);
	if (!domain) {
		kfree(revmap);
		return NULL;
	}
	domain->revmap_data.linear.size = size;
	domain->revmap_data.linear.revmap = revmap;
	irq_domain_add(domain);
	return domain;
}
303
EXPORT_SYMBOL_GPL(irq_domain_add_linear);
304 305

struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
306
					 unsigned int max_irq,
307
					 const struct irq_domain_ops *ops,
308 309 310 311
					 void *host_data)
{
	struct irq_domain *domain = irq_domain_alloc(of_node,
					IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
312 313
	if (domain) {
		domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
314
		irq_domain_add(domain);
315
	}
316 317
	return domain;
}
318
EXPORT_SYMBOL_GPL(irq_domain_add_nomap);
319 320 321 322 323 324 325 326 327 328

/**
 * irq_domain_add_tree()
 * @of_node: pointer to interrupt controller's device tree node.
 * @ops: map/unmap domain callbacks
 *
 * Note: The radix tree will be allocated later during boot automatically
 * (the reverse mapping will use the slow path until that happens).
 */
struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
329
					 const struct irq_domain_ops *ops,
330 331 332 333 334 335 336 337
					 void *host_data)
{
	struct irq_domain *domain = irq_domain_alloc(of_node,
					IRQ_DOMAIN_MAP_TREE, ops, host_data);
	if (domain) {
		INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
		irq_domain_add(domain);
	}
338
	return domain;
339
}
340
EXPORT_SYMBOL_GPL(irq_domain_add_tree);
341 342 343 344 345 346 347 348

/**
 * irq_find_host() - Locates a domain for a given device node
 * @node: device-tree node of the interrupt controller
 */
struct irq_domain *irq_find_host(struct device_node *node)
{
	struct irq_domain *h, *found = NULL;
349
	int rc;
350 351 352 353 354 355 356

	/* We might want to match the legacy controller last since
	 * it might potentially be set to match all interrupts in
	 * the absence of a device node. This isn't a problem so far
	 * yet though...
	 */
	mutex_lock(&irq_domain_mutex);
357 358 359 360 361 362 363
	list_for_each_entry(h, &irq_domain_list, link) {
		if (h->ops->match)
			rc = h->ops->match(h, node);
		else
			rc = (h->of_node != NULL) && (h->of_node == node);

		if (rc) {
364 365 366
			found = h;
			break;
		}
367
	}
368 369 370 371 372 373 374
	mutex_unlock(&irq_domain_mutex);
	return found;
}
EXPORT_SYMBOL_GPL(irq_find_host);

/**
 * irq_set_default_host() - Set a "default" irq domain
375
 * @domain: default domain pointer
376 377 378 379 380 381
 *
 * For convenience, it's possible to set a "default" domain that will be used
 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
 * platforms that want to manipulate a few hard coded interrupt numbers that
 * aren't properly represented in the device-tree.
 */
382
void irq_set_default_host(struct irq_domain *domain)
383
{
384
	pr_debug("Default domain set to @0x%p\n", domain);
385

386
	irq_default_domain = domain;
387
}
388
EXPORT_SYMBOL_GPL(irq_set_default_host);
389

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
static void irq_domain_disassociate_many(struct irq_domain *domain,
					 unsigned int irq_base, int count)
{
	/*
	 * disassociate in reverse order;
	 * not strictly necessary, but nice for unwinding
	 */
	while (count--) {
		int irq = irq_base + count;
		struct irq_data *irq_data = irq_get_irq_data(irq);
		irq_hw_number_t hwirq = irq_data->hwirq;

		if (WARN_ON(!irq_data || irq_data->domain != domain))
			continue;

		irq_set_status_flags(irq, IRQ_NOREQUEST);

		/* remove chip and handler */
		irq_set_chip_and_handler(irq, NULL, NULL);

		/* Make sure it's completed */
		synchronize_irq(irq);

		/* Tell the PIC about it */
		if (domain->ops->unmap)
			domain->ops->unmap(domain, irq);
		smp_mb();

		irq_data->domain = NULL;
		irq_data->hwirq = 0;

		/* Clear reverse map */
		switch(domain->revmap_type) {
		case IRQ_DOMAIN_MAP_LINEAR:
			if (hwirq < domain->revmap_data.linear.size)
				domain->revmap_data.linear.revmap[hwirq] = 0;
			break;
		case IRQ_DOMAIN_MAP_TREE:
			mutex_lock(&revmap_trees_mutex);
			radix_tree_delete(&domain->revmap_data.tree, hwirq);
			mutex_unlock(&revmap_trees_mutex);
			break;
		}
	}
}

436 437
int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
			      irq_hw_number_t hwirq_base, int count)
438
{
439 440
	unsigned int virq = irq_base;
	irq_hw_number_t hwirq = hwirq_base;
441
	int i, ret;
442

443 444
	pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
		of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
445

446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
	for (i = 0; i < count; i++) {
		struct irq_data *irq_data = irq_get_irq_data(virq + i);

		if (WARN(!irq_data, "error: irq_desc not allocated; "
			 "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
			return -EINVAL;
		if (WARN(irq_data->domain, "error: irq_desc already associated; "
			 "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
			return -EINVAL;
	};

	for (i = 0; i < count; i++, virq++, hwirq++) {
		struct irq_data *irq_data = irq_get_irq_data(virq);

		irq_data->hwirq = hwirq;
		irq_data->domain = domain;
462 463 464 465 466 467 468 469 470 471
		if (domain->ops->map) {
			ret = domain->ops->map(domain, virq, hwirq);
			if (ret != 0) {
				pr_err("irq-%i==>hwirq-0x%lx mapping failed: %d\n",
				       virq, hwirq, ret);
				WARN_ON(1);
				irq_data->domain = NULL;
				irq_data->hwirq = 0;
				goto err_unmap;
			}
472 473 474 475 476 477 478 479 480
		}

		switch (domain->revmap_type) {
		case IRQ_DOMAIN_MAP_LINEAR:
			if (hwirq < domain->revmap_data.linear.size)
				domain->revmap_data.linear.revmap[hwirq] = virq;
			break;
		case IRQ_DOMAIN_MAP_TREE:
			mutex_lock(&revmap_trees_mutex);
481
			radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
482 483 484
			mutex_unlock(&revmap_trees_mutex);
			break;
		}
485

486 487
		irq_clear_status_flags(virq, IRQ_NOREQUEST);
	}
488 489

	return 0;
490 491 492 493

 err_unmap:
	irq_domain_disassociate_many(domain, irq_base, i);
	return -EINVAL;
494
}
495
EXPORT_SYMBOL_GPL(irq_domain_associate_many);
496 497 498

/**
 * irq_create_direct_mapping() - Allocate an irq for direct mapping
499
 * @domain: domain to allocate the irq for or NULL for default domain
500 501 502 503 504
 *
 * This routine is used for irq controllers which can choose the hardware
 * interrupt numbers they generate. In such a case it's simplest to use
 * the linux irq as the hardware interrupt number.
 */
505
unsigned int irq_create_direct_mapping(struct irq_domain *domain)
506 507 508
{
	unsigned int virq;

509 510
	if (domain == NULL)
		domain = irq_default_domain;
511

512 513
	if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP))
		return 0;
514

P
Paul Mundt 已提交
515
	virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
516
	if (!virq) {
517
		pr_debug("create_direct virq allocation failed\n");
518
		return 0;
519
	}
520
	if (virq >= domain->revmap_data.nomap.max_irq) {
521
		pr_err("ERROR: no free irqs available below %i maximum\n",
522
			domain->revmap_data.nomap.max_irq);
523 524 525
		irq_free_desc(virq);
		return 0;
	}
526
	pr_debug("create_direct obtained virq %d\n", virq);
527

528
	if (irq_domain_associate(domain, virq, virq)) {
529
		irq_free_desc(virq);
530
		return 0;
531 532 533 534
	}

	return virq;
}
535
EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
536 537 538

/**
 * irq_create_mapping() - Map a hardware interrupt into linux irq space
539 540
 * @domain: domain owning this hardware interrupt or NULL for default domain
 * @hwirq: hardware irq number in that domain space
541 542 543 544 545 546
 *
 * Only one mapping per hardware interrupt is permitted. Returns a linux
 * irq number.
 * If the sense/trigger is to be specified, set_irq_type() should be called
 * on the number returned from that call.
 */
547
unsigned int irq_create_mapping(struct irq_domain *domain,
548 549
				irq_hw_number_t hwirq)
{
550 551
	unsigned int hint;
	int virq;
552

553
	pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
554

555 556 557 558
	/* Look for default domain if nececssary */
	if (domain == NULL)
		domain = irq_default_domain;
	if (domain == NULL) {
559 560
		pr_warning("irq_create_mapping called for"
			   " NULL domain, hwirq=%lx\n", hwirq);
561
		WARN_ON(1);
562
		return 0;
563
	}
564
	pr_debug("-> using domain @%p\n", domain);
565 566

	/* Check if mapping already exists */
567
	virq = irq_find_mapping(domain, hwirq);
568
	if (virq) {
569
		pr_debug("-> existing mapping on virq %d\n", virq);
570 571 572 573
		return virq;
	}

	/* Get a virtual interrupt number */
574 575 576 577
	if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
		return irq_domain_legacy_revmap(domain, hwirq);

	/* Allocate a virtual interrupt number */
578
	hint = hwirq % nr_irqs;
579 580
	if (hint == 0)
		hint++;
P
Paul Mundt 已提交
581
	virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
582
	if (virq <= 0)
P
Paul Mundt 已提交
583
		virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
584
	if (virq <= 0) {
585
		pr_debug("-> virq allocation failed\n");
586
		return 0;
587 588
	}

589
	if (irq_domain_associate(domain, virq, hwirq)) {
590
		irq_free_desc(virq);
591
		return 0;
592 593
	}

594
	pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
595
		hwirq, of_node_full_name(domain->of_node), virq);
596 597 598 599 600

	return virq;
}
EXPORT_SYMBOL_GPL(irq_create_mapping);

601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
/**
 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
 * @domain: domain owning the interrupt range
 * @irq_base: beginning of linux IRQ range
 * @hwirq_base: beginning of hardware IRQ range
 * @count: Number of interrupts to map
 *
 * This routine is used for allocating and mapping a range of hardware
 * irqs to linux irqs where the linux irq numbers are at pre-defined
 * locations. For use by controllers that already have static mappings
 * to insert in to the domain.
 *
 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
 * domain insertion.
 *
 * 0 is returned upon success, while any failure to establish a static
 * mapping is treated as an error.
 */
int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
			       irq_hw_number_t hwirq_base, int count)
{
	int ret;

	ret = irq_alloc_descs(irq_base, irq_base, count,
			      of_node_to_nid(domain->of_node));
	if (unlikely(ret < 0))
		return ret;

	ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count);
	if (unlikely(ret < 0)) {
		irq_free_descs(irq_base, count);
		return ret;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(irq_create_strict_mappings);

639 640 641
unsigned int irq_create_of_mapping(struct device_node *controller,
				   const u32 *intspec, unsigned int intsize)
{
642
	struct irq_domain *domain;
643 644 645 646
	irq_hw_number_t hwirq;
	unsigned int type = IRQ_TYPE_NONE;
	unsigned int virq;

647 648
	domain = controller ? irq_find_host(controller) : irq_default_domain;
	if (!domain) {
649 650 651 652 653 654 655 656 657 658 659 660
#ifdef CONFIG_MIPS
		/*
		 * Workaround to avoid breaking interrupt controller drivers
		 * that don't yet register an irq_domain.  This is temporary
		 * code. ~~~gcl, Feb 24, 2012
		 *
		 * Scheduled for removal in Linux v3.6.  That should be enough
		 * time.
		 */
		if (intsize > 0)
			return intspec[0];
#endif
661
		pr_warning("no irq domain found for %s !\n",
662
			   of_node_full_name(controller));
663
		return 0;
664 665
	}

666 667
	/* If domain has no translation, then we assume interrupt line */
	if (domain->ops->xlate == NULL)
668 669
		hwirq = intspec[0];
	else {
670
		if (domain->ops->xlate(domain, controller, intspec, intsize,
671
				     &hwirq, &type))
672
			return 0;
673 674 675
	}

	/* Create mapping */
676
	virq = irq_create_mapping(domain, hwirq);
677
	if (!virq)
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
		return virq;

	/* Set type if specified and different than the current one */
	if (type != IRQ_TYPE_NONE &&
	    type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
		irq_set_irq_type(virq, type);
	return virq;
}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);

/**
 * irq_dispose_mapping() - Unmap an interrupt
 * @virq: linux irq number of the interrupt to unmap
 */
void irq_dispose_mapping(unsigned int virq)
{
	struct irq_data *irq_data = irq_get_irq_data(virq);
695
	struct irq_domain *domain;
696

697
	if (!virq || !irq_data)
698 699
		return;

700 701
	domain = irq_data->domain;
	if (WARN_ON(domain == NULL))
702 703 704
		return;

	/* Never unmap legacy interrupts */
705
	if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
706 707
		return;

708
	irq_domain_disassociate_many(domain, virq, 1);
709 710 711 712 713 714
	irq_free_desc(virq);
}
EXPORT_SYMBOL_GPL(irq_dispose_mapping);

/**
 * irq_find_mapping() - Find a linux irq from an hw irq number.
715 716
 * @domain: domain owning this hardware interrupt
 * @hwirq: hardware irq number in that domain space
717
 */
718
unsigned int irq_find_mapping(struct irq_domain *domain,
719 720
			      irq_hw_number_t hwirq)
{
721
	struct irq_data *data;
722

723 724 725 726
	/* Look for default domain if nececssary */
	if (domain == NULL)
		domain = irq_default_domain;
	if (domain == NULL)
727
		return 0;
728

729 730
	switch (domain->revmap_type) {
	case IRQ_DOMAIN_MAP_LEGACY:
731
		return irq_domain_legacy_revmap(domain, hwirq);
732 733 734 735 736 737 738 739 740 741 742
	case IRQ_DOMAIN_MAP_LINEAR:
		return irq_linear_revmap(domain, hwirq);
	case IRQ_DOMAIN_MAP_TREE:
		rcu_read_lock();
		data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
		rcu_read_unlock();
		if (data)
			return data->irq;
		break;
	case IRQ_DOMAIN_MAP_NOMAP:
		data = irq_get_irq_data(hwirq);
743
		if (data && (data->domain == domain) && (data->hwirq == hwirq))
744 745 746 747
			return hwirq;
		break;
	}

748
	return 0;
749 750 751 752 753
}
EXPORT_SYMBOL_GPL(irq_find_mapping);

/**
 * irq_linear_revmap() - Find a linux irq from a hw irq number.
754 755
 * @domain: domain owning this hardware interrupt
 * @hwirq: hardware irq number in that domain space
756
 *
757 758
 * This is a fast path that can be called directly by irq controller code to
 * save a handful of instructions.
759
 */
760
unsigned int irq_linear_revmap(struct irq_domain *domain,
761 762
			       irq_hw_number_t hwirq)
{
763
	BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR);
764

765 766 767
	/* Check revmap bounds; complain if exceeded */
	if (WARN_ON(hwirq >= domain->revmap_data.linear.size))
		return 0;
768

769
	return domain->revmap_data.linear.revmap[hwirq];
770
}
771
EXPORT_SYMBOL_GPL(irq_linear_revmap);
772

773
#ifdef CONFIG_IRQ_DOMAIN_DEBUG
774 775 776 777 778 779 780 781 782
static int virq_debug_show(struct seq_file *m, void *private)
{
	unsigned long flags;
	struct irq_desc *desc;
	const char *p;
	static const char none[] = "none";
	void *data;
	int i;

G
Grant Likely 已提交
783
	seq_printf(m, "%-5s  %-7s  %-15s  %-*s  %s\n", "irq", "hwirq",
784 785
		      "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
		      "domain name");
786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807

	for (i = 1; i < nr_irqs; i++) {
		desc = irq_to_desc(i);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		if (desc->action && desc->action->handler) {
			struct irq_chip *chip;

			seq_printf(m, "%5d  ", i);
			seq_printf(m, "0x%05lx  ", desc->irq_data.hwirq);

			chip = irq_desc_get_chip(desc);
			if (chip && chip->name)
				p = chip->name;
			else
				p = none;
			seq_printf(m, "%-15s  ", p);

			data = irq_desc_get_chip_data(desc);
G
Grant Likely 已提交
808
			seq_printf(m, data ? "0x%p  " : "  %p  ", data);
809

810 811
			if (desc->irq_data.domain)
				p = of_node_full_name(desc->irq_data.domain->of_node);
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836
			else
				p = none;
			seq_printf(m, "%s\n", p);
		}

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}

	return 0;
}

static int virq_debug_open(struct inode *inode, struct file *file)
{
	return single_open(file, virq_debug_show, inode->i_private);
}

static const struct file_operations virq_debug_fops = {
	.open = virq_debug_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

static int __init irq_debugfs_init(void)
{
837
	if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
838 839 840 841 842 843
				 NULL, &virq_debug_fops) == NULL)
		return -ENOMEM;

	return 0;
}
__initcall(irq_debugfs_init);
844
#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
845

846 847 848 849 850 851 852 853 854
/**
 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
 *
 * Device Tree IRQ specifier translation function which works with one cell
 * bindings where the cell value maps directly to the hwirq number.
 */
int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
			     const u32 *intspec, unsigned int intsize,
			     unsigned long *out_hwirq, unsigned int *out_type)
855
{
856
	if (WARN_ON(intsize < 1))
857 858 859 860 861
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = IRQ_TYPE_NONE;
	return 0;
}
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);

/**
 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
 *
 * Device Tree IRQ specifier translation function which works with two cell
 * bindings where the cell values map directly to the hwirq number
 * and linux irq flags.
 */
int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
			const u32 *intspec, unsigned int intsize,
			irq_hw_number_t *out_hwirq, unsigned int *out_type)
{
	if (WARN_ON(intsize < 2))
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
	return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);

/**
 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
 *
 * Device Tree IRQ specifier translation function which works with either one
 * or two cell bindings where the cell values map directly to the hwirq number
 * and linux irq flags.
 *
 * Note: don't use this function unless your interrupt controller explicitly
 * supports both one and two cell bindings.  For the majority of controllers
 * the _onecell() or _twocell() variants above should be used.
 */
int irq_domain_xlate_onetwocell(struct irq_domain *d,
				struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				unsigned long *out_hwirq, unsigned int *out_type)
{
	if (WARN_ON(intsize < 1))
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
	return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
906

907
const struct irq_domain_ops irq_domain_simple_ops = {
908
	.xlate = irq_domain_xlate_onetwocell,
909 910 911 912
};
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);

#ifdef CONFIG_OF_IRQ
913 914 915 916
void irq_domain_generate_simple(const struct of_device_id *match,
				u64 phys_base, unsigned int irq_start)
{
	struct device_node *node;
G
Grant Likely 已提交
917
	pr_debug("looking for phys_base=%llx, irq_start=%i\n",
918 919 920
		(unsigned long long) phys_base, (int) irq_start);
	node = of_find_matching_node_by_address(NULL, match, phys_base);
	if (node)
921 922
		irq_domain_add_legacy(node, 32, irq_start, 0,
				      &irq_domain_simple_ops, NULL);
923 924
}
EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
925
#endif