irqdomain.c 22.5 KB
Newer Older
1 2
#define pr_fmt(fmt)  "irq: " fmt

3 4 5
#include <linux/debugfs.h>
#include <linux/hardirq.h>
#include <linux/interrupt.h>
6
#include <linux/irq.h>
7
#include <linux/irqdesc.h>
8 9 10 11
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
12
#include <linux/of_address.h>
P
Paul Mundt 已提交
13
#include <linux/topology.h>
14
#include <linux/seq_file.h>
15
#include <linux/slab.h>
16 17
#include <linux/smp.h>
#include <linux/fs.h>
18 19 20 21

static LIST_HEAD(irq_domain_list);
static DEFINE_MUTEX(irq_domain_mutex);

22
static DEFINE_MUTEX(revmap_trees_mutex);
23
static struct irq_domain *irq_default_domain;
24 25

/**
26
 * irq_domain_alloc() - Allocate a new irq_domain data structure
27 28
 * @of_node: optional device-tree node of the interrupt controller
 * @revmap_type: type of reverse mapping to use
29
 * @ops: map/unmap domain callbacks
30
 * @host_data: Controller private data pointer
31
 *
32 33 34
 * Allocates and initialize and irq_domain structure.  Caller is expected to
 * register allocated irq_domain with irq_domain_register().  Returns pointer
 * to IRQ domain, or NULL on failure.
35
 */
36
static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
37
					   unsigned int revmap_type, int size,
38
					   const struct irq_domain_ops *ops,
39
					   void *host_data)
40
{
41
	struct irq_domain *domain;
42

43 44
	domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
			      GFP_KERNEL, of_node_to_nid(of_node));
45
	if (WARN_ON(!domain))
46 47 48
		return NULL;

	/* Fill structure */
49
	INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
50 51
	domain->revmap_type = revmap_type;
	domain->ops = ops;
52
	domain->host_data = host_data;
53
	domain->of_node = of_node_get(of_node);
54
	domain->revmap_data.linear.size = size;
55

56 57 58
	return domain;
}

59 60 61 62 63 64
static void irq_domain_free(struct irq_domain *domain)
{
	of_node_put(domain->of_node);
	kfree(domain);
}

65 66 67 68 69
static void irq_domain_add(struct irq_domain *domain)
{
	mutex_lock(&irq_domain_mutex);
	list_add(&domain->link, &irq_domain_list);
	mutex_unlock(&irq_domain_mutex);
70
	pr_debug("Allocated domain of type %d @0x%p\n",
71 72 73
		 domain->revmap_type, domain);
}

74 75 76 77 78 79 80 81 82 83 84 85
/**
 * irq_domain_remove() - Remove an irq domain.
 * @domain: domain to remove
 *
 * This routine is used to remove an irq domain. The caller must ensure
 * that all mappings within the domain have been disposed of prior to
 * use, depending on the revmap type.
 */
void irq_domain_remove(struct irq_domain *domain)
{
	mutex_lock(&irq_domain_mutex);

86 87 88 89 90 91
	/*
	 * radix_tree_delete() takes care of destroying the root
	 * node when all entries are removed. Shout if there are
	 * any mappings left.
	 */
	WARN_ON(domain->revmap_data.tree.height);
92 93 94 95 96 97 98 99 100 101 102

	list_del(&domain->link);

	/*
	 * If the going away domain is the default one, reset it.
	 */
	if (unlikely(irq_default_domain == domain))
		irq_set_default_host(NULL);

	mutex_unlock(&irq_domain_mutex);

103
	pr_debug("Removed domain of type %d @0x%p\n",
104 105 106 107
		 domain->revmap_type, domain);

	irq_domain_free(domain);
}
108
EXPORT_SYMBOL_GPL(irq_domain_remove);
109

110 111 112 113
/**
 * irq_domain_add_simple() - Allocate and register a simple irq_domain.
 * @of_node: pointer to interrupt controller's device tree node.
 * @size: total number of irqs in mapping
114 115 116 117
 * @first_irq: first number of irq block assigned to the domain,
 *	pass zero to assign irqs on-the-fly. This will result in a
 *	linear IRQ domain so it is important to use irq_create_mapping()
 *	for each used IRQ, especially when SPARSE_IRQ is enabled.
118 119 120 121
 * @ops: map/unmap domain callbacks
 * @host_data: Controller private data pointer
 *
 * Allocates a legacy irq_domain if irq_base is positive or a linear
122 123
 * domain otherwise. For the legacy domain, IRQ descriptors will also
 * be allocated.
124 125 126 127 128 129 130 131 132 133 134 135 136
 *
 * This is intended to implement the expected behaviour for most
 * interrupt controllers which is that a linear mapping should
 * normally be used unless the system requires a legacy mapping in
 * order to support supplying interrupt numbers during non-DT
 * registration of devices.
 */
struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
					 unsigned int size,
					 unsigned int first_irq,
					 const struct irq_domain_ops *ops,
					 void *host_data)
{
137 138 139 140 141 142 143 144 145 146 147 148 149 150
	if (first_irq > 0) {
		int irq_base;

		if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
			/*
			 * Set the descriptor allocator to search for a
			 * 1-to-1 mapping, such as irq_alloc_desc_at().
			 * Use of_node_to_nid() which is defined to
			 * numa_node_id() on platforms that have no custom
			 * implementation.
			 */
			irq_base = irq_alloc_descs(first_irq, first_irq, size,
						   of_node_to_nid(of_node));
			if (irq_base < 0) {
151 152
				pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
					first_irq);
153 154 155 156 157 158
				irq_base = first_irq;
			}
		} else
			irq_base = first_irq;

		return irq_domain_add_legacy(of_node, size, irq_base, 0,
159
					     ops, host_data);
160 161 162 163
	}

	/* A linear domain is the default */
	return irq_domain_add_linear(of_node, size, ops, host_data);
164
}
165
EXPORT_SYMBOL_GPL(irq_domain_add_simple);
166

167 168 169
/**
 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
 * @of_node: pointer to interrupt controller's device tree node.
170 171 172 173 174
 * @size: total number of irqs in legacy mapping
 * @first_irq: first number of irq block assigned to the domain
 * @first_hwirq: first hwirq number to use for the translation. Should normally
 *               be '0', but a positive integer can be used if the effective
 *               hwirqs numbering does not begin at zero.
175 176 177 178 179 180 181 182
 * @ops: map/unmap domain callbacks
 * @host_data: Controller private data pointer
 *
 * Note: the map() callback will be called before this function returns
 * for all legacy interrupts except 0 (which is always the invalid irq for
 * a legacy controller).
 */
struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
183 184 185
					 unsigned int size,
					 unsigned int first_irq,
					 irq_hw_number_t first_hwirq,
186
					 const struct irq_domain_ops *ops,
187 188
					 void *host_data)
{
189
	struct irq_domain *domain;
190

191 192 193 194 195
	pr_debug("Setting up legacy domain virq[%i:%i] ==> hwirq[%i:%i]\n",
		 first_irq, first_irq + size - 1,
		 (int)first_hwirq, (int)first_hwirq + size -1);

	domain = irq_domain_add_linear(of_node, first_hwirq + size, ops, host_data);
196 197 198
	if (!domain)
		return NULL;

199
	WARN_ON(irq_domain_associate_many(domain, first_irq, first_hwirq, size));
200

201 202
	return domain;
}
203
EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
204 205

/**
206
 * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
207
 * @of_node: pointer to interrupt controller's device tree node.
208
 * @size: Number of interrupts in the domain.
209 210 211 212 213
 * @ops: map/unmap domain callbacks
 * @host_data: Controller private data pointer
 */
struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
					 unsigned int size,
214
					 const struct irq_domain_ops *ops,
215 216 217
					 void *host_data)
{
	struct irq_domain *domain;
218

219 220
	domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, size, ops, host_data);
	if (!domain)
221
		return NULL;
222

223 224 225
	irq_domain_add(domain);
	return domain;
}
226
EXPORT_SYMBOL_GPL(irq_domain_add_linear);
227 228

struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
229
					 unsigned int max_irq,
230
					 const struct irq_domain_ops *ops,
231 232 233
					 void *host_data)
{
	struct irq_domain *domain = irq_domain_alloc(of_node,
234
					IRQ_DOMAIN_MAP_NOMAP, 0, ops, host_data);
235 236
	if (domain) {
		domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
237
		irq_domain_add(domain);
238
	}
239 240
	return domain;
}
241
EXPORT_SYMBOL_GPL(irq_domain_add_nomap);
242

243 244 245 246 247 248 249
/**
 * irq_find_host() - Locates a domain for a given device node
 * @node: device-tree node of the interrupt controller
 */
struct irq_domain *irq_find_host(struct device_node *node)
{
	struct irq_domain *h, *found = NULL;
250
	int rc;
251 252 253 254 255 256 257

	/* We might want to match the legacy controller last since
	 * it might potentially be set to match all interrupts in
	 * the absence of a device node. This isn't a problem so far
	 * yet though...
	 */
	mutex_lock(&irq_domain_mutex);
258 259 260 261 262 263 264
	list_for_each_entry(h, &irq_domain_list, link) {
		if (h->ops->match)
			rc = h->ops->match(h, node);
		else
			rc = (h->of_node != NULL) && (h->of_node == node);

		if (rc) {
265 266 267
			found = h;
			break;
		}
268
	}
269 270 271 272 273 274 275
	mutex_unlock(&irq_domain_mutex);
	return found;
}
EXPORT_SYMBOL_GPL(irq_find_host);

/**
 * irq_set_default_host() - Set a "default" irq domain
276
 * @domain: default domain pointer
277 278 279 280 281 282
 *
 * For convenience, it's possible to set a "default" domain that will be used
 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
 * platforms that want to manipulate a few hard coded interrupt numbers that
 * aren't properly represented in the device-tree.
 */
283
void irq_set_default_host(struct irq_domain *domain)
284
{
285
	pr_debug("Default domain set to @0x%p\n", domain);
286

287
	irq_default_domain = domain;
288
}
289
EXPORT_SYMBOL_GPL(irq_set_default_host);
290

291 292 293 294 295 296 297 298 299 300
static void irq_domain_disassociate_many(struct irq_domain *domain,
					 unsigned int irq_base, int count)
{
	/*
	 * disassociate in reverse order;
	 * not strictly necessary, but nice for unwinding
	 */
	while (count--) {
		int irq = irq_base + count;
		struct irq_data *irq_data = irq_get_irq_data(irq);
301
		irq_hw_number_t hwirq;
302 303 304 305

		if (WARN_ON(!irq_data || irq_data->domain != domain))
			continue;

306
		hwirq = irq_data->hwirq;
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
		irq_set_status_flags(irq, IRQ_NOREQUEST);

		/* remove chip and handler */
		irq_set_chip_and_handler(irq, NULL, NULL);

		/* Make sure it's completed */
		synchronize_irq(irq);

		/* Tell the PIC about it */
		if (domain->ops->unmap)
			domain->ops->unmap(domain, irq);
		smp_mb();

		irq_data->domain = NULL;
		irq_data->hwirq = 0;

323 324 325 326
		/* Clear reverse map for this hwirq */
		if (hwirq < domain->revmap_data.linear.size) {
			domain->linear_revmap[hwirq] = 0;
		} else {
327 328 329 330 331 332 333
			mutex_lock(&revmap_trees_mutex);
			radix_tree_delete(&domain->revmap_data.tree, hwirq);
			mutex_unlock(&revmap_trees_mutex);
		}
	}
}

334 335
int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
			      irq_hw_number_t hwirq_base, int count)
336
{
337 338
	unsigned int virq = irq_base;
	irq_hw_number_t hwirq = hwirq_base;
339
	int i, ret;
340

341 342
	pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
		of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
343

344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
	for (i = 0; i < count; i++) {
		struct irq_data *irq_data = irq_get_irq_data(virq + i);

		if (WARN(!irq_data, "error: irq_desc not allocated; "
			 "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
			return -EINVAL;
		if (WARN(irq_data->domain, "error: irq_desc already associated; "
			 "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
			return -EINVAL;
	};

	for (i = 0; i < count; i++, virq++, hwirq++) {
		struct irq_data *irq_data = irq_get_irq_data(virq);

		irq_data->hwirq = hwirq;
		irq_data->domain = domain;
360 361 362
		if (domain->ops->map) {
			ret = domain->ops->map(domain, virq, hwirq);
			if (ret != 0) {
363 364 365
				/*
				 * If map() returns -EPERM, this interrupt is protected
				 * by the firmware or some other service and shall not
366
				 * be mapped. Don't bother telling the user about it.
367 368
				 */
				if (ret != -EPERM) {
369
					pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
G
Grant Likely 已提交
370
					       domain->name, hwirq, virq, ret);
371
				}
372 373
				irq_data->domain = NULL;
				irq_data->hwirq = 0;
374
				continue;
375
			}
G
Grant Likely 已提交
376 377 378
			/* If not already assigned, give the domain the chip's name */
			if (!domain->name && irq_data->chip)
				domain->name = irq_data->chip->name;
379 380
		}

381 382 383
		if (hwirq < domain->revmap_data.linear.size) {
			domain->linear_revmap[hwirq] = virq;
		} else {
384
			mutex_lock(&revmap_trees_mutex);
385
			radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
386 387
			mutex_unlock(&revmap_trees_mutex);
		}
388

389 390
		irq_clear_status_flags(virq, IRQ_NOREQUEST);
	}
391 392 393

	return 0;
}
394
EXPORT_SYMBOL_GPL(irq_domain_associate_many);
395 396 397

/**
 * irq_create_direct_mapping() - Allocate an irq for direct mapping
398
 * @domain: domain to allocate the irq for or NULL for default domain
399 400 401 402 403
 *
 * This routine is used for irq controllers which can choose the hardware
 * interrupt numbers they generate. In such a case it's simplest to use
 * the linux irq as the hardware interrupt number.
 */
404
unsigned int irq_create_direct_mapping(struct irq_domain *domain)
405 406 407
{
	unsigned int virq;

408 409
	if (domain == NULL)
		domain = irq_default_domain;
410

411 412
	if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP))
		return 0;
413

P
Paul Mundt 已提交
414
	virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
415
	if (!virq) {
416
		pr_debug("create_direct virq allocation failed\n");
417
		return 0;
418
	}
419
	if (virq >= domain->revmap_data.nomap.max_irq) {
420
		pr_err("ERROR: no free irqs available below %i maximum\n",
421
			domain->revmap_data.nomap.max_irq);
422 423 424
		irq_free_desc(virq);
		return 0;
	}
425
	pr_debug("create_direct obtained virq %d\n", virq);
426

427
	if (irq_domain_associate(domain, virq, virq)) {
428
		irq_free_desc(virq);
429
		return 0;
430 431 432 433
	}

	return virq;
}
434
EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
435 436 437

/**
 * irq_create_mapping() - Map a hardware interrupt into linux irq space
438 439
 * @domain: domain owning this hardware interrupt or NULL for default domain
 * @hwirq: hardware irq number in that domain space
440 441 442 443 444 445
 *
 * Only one mapping per hardware interrupt is permitted. Returns a linux
 * irq number.
 * If the sense/trigger is to be specified, set_irq_type() should be called
 * on the number returned from that call.
 */
446
unsigned int irq_create_mapping(struct irq_domain *domain,
447 448
				irq_hw_number_t hwirq)
{
449 450
	unsigned int hint;
	int virq;
451

452
	pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
453

454 455 456 457
	/* Look for default domain if nececssary */
	if (domain == NULL)
		domain = irq_default_domain;
	if (domain == NULL) {
458 459
		pr_warning("irq_create_mapping called for"
			   " NULL domain, hwirq=%lx\n", hwirq);
460
		WARN_ON(1);
461
		return 0;
462
	}
463
	pr_debug("-> using domain @%p\n", domain);
464 465

	/* Check if mapping already exists */
466
	virq = irq_find_mapping(domain, hwirq);
467
	if (virq) {
468
		pr_debug("-> existing mapping on virq %d\n", virq);
469 470 471
		return virq;
	}

472
	/* Allocate a virtual interrupt number */
473
	hint = hwirq % nr_irqs;
474 475
	if (hint == 0)
		hint++;
P
Paul Mundt 已提交
476
	virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
477
	if (virq <= 0)
P
Paul Mundt 已提交
478
		virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
479
	if (virq <= 0) {
480
		pr_debug("-> virq allocation failed\n");
481
		return 0;
482 483
	}

484
	if (irq_domain_associate(domain, virq, hwirq)) {
485
		irq_free_desc(virq);
486
		return 0;
487 488
	}

489
	pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
490
		hwirq, of_node_full_name(domain->of_node), virq);
491 492 493 494 495

	return virq;
}
EXPORT_SYMBOL_GPL(irq_create_mapping);

496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
/**
 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
 * @domain: domain owning the interrupt range
 * @irq_base: beginning of linux IRQ range
 * @hwirq_base: beginning of hardware IRQ range
 * @count: Number of interrupts to map
 *
 * This routine is used for allocating and mapping a range of hardware
 * irqs to linux irqs where the linux irq numbers are at pre-defined
 * locations. For use by controllers that already have static mappings
 * to insert in to the domain.
 *
 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
 * domain insertion.
 *
 * 0 is returned upon success, while any failure to establish a static
 * mapping is treated as an error.
 */
int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
			       irq_hw_number_t hwirq_base, int count)
{
	int ret;

	ret = irq_alloc_descs(irq_base, irq_base, count,
			      of_node_to_nid(domain->of_node));
	if (unlikely(ret < 0))
		return ret;

	ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count);
	if (unlikely(ret < 0)) {
		irq_free_descs(irq_base, count);
		return ret;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(irq_create_strict_mappings);

534 535 536
unsigned int irq_create_of_mapping(struct device_node *controller,
				   const u32 *intspec, unsigned int intsize)
{
537
	struct irq_domain *domain;
538 539 540 541
	irq_hw_number_t hwirq;
	unsigned int type = IRQ_TYPE_NONE;
	unsigned int virq;

542 543
	domain = controller ? irq_find_host(controller) : irq_default_domain;
	if (!domain) {
544 545 546 547 548 549 550 551 552 553 554 555
#ifdef CONFIG_MIPS
		/*
		 * Workaround to avoid breaking interrupt controller drivers
		 * that don't yet register an irq_domain.  This is temporary
		 * code. ~~~gcl, Feb 24, 2012
		 *
		 * Scheduled for removal in Linux v3.6.  That should be enough
		 * time.
		 */
		if (intsize > 0)
			return intspec[0];
#endif
556
		pr_warning("no irq domain found for %s !\n",
557
			   of_node_full_name(controller));
558
		return 0;
559 560
	}

561 562
	/* If domain has no translation, then we assume interrupt line */
	if (domain->ops->xlate == NULL)
563 564
		hwirq = intspec[0];
	else {
565
		if (domain->ops->xlate(domain, controller, intspec, intsize,
566
				     &hwirq, &type))
567
			return 0;
568 569 570
	}

	/* Create mapping */
571
	virq = irq_create_mapping(domain, hwirq);
572
	if (!virq)
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
		return virq;

	/* Set type if specified and different than the current one */
	if (type != IRQ_TYPE_NONE &&
	    type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
		irq_set_irq_type(virq, type);
	return virq;
}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);

/**
 * irq_dispose_mapping() - Unmap an interrupt
 * @virq: linux irq number of the interrupt to unmap
 */
void irq_dispose_mapping(unsigned int virq)
{
	struct irq_data *irq_data = irq_get_irq_data(virq);
590
	struct irq_domain *domain;
591

592
	if (!virq || !irq_data)
593 594
		return;

595 596
	domain = irq_data->domain;
	if (WARN_ON(domain == NULL))
597 598
		return;

599
	irq_domain_disassociate_many(domain, virq, 1);
600 601 602 603 604 605
	irq_free_desc(virq);
}
EXPORT_SYMBOL_GPL(irq_dispose_mapping);

/**
 * irq_find_mapping() - Find a linux irq from an hw irq number.
606 607
 * @domain: domain owning this hardware interrupt
 * @hwirq: hardware irq number in that domain space
608
 */
609
unsigned int irq_find_mapping(struct irq_domain *domain,
610 611
			      irq_hw_number_t hwirq)
{
612
	struct irq_data *data;
613

614 615 616 617
	/* Look for default domain if nececssary */
	if (domain == NULL)
		domain = irq_default_domain;
	if (domain == NULL)
618
		return 0;
619

620 621 622 623 624
	switch (domain->revmap_type) {
	case IRQ_DOMAIN_MAP_LINEAR:
		return irq_linear_revmap(domain, hwirq);
	case IRQ_DOMAIN_MAP_NOMAP:
		data = irq_get_irq_data(hwirq);
625
		if (data && (data->domain == domain) && (data->hwirq == hwirq))
626 627 628 629
			return hwirq;
		break;
	}

630
	return 0;
631 632 633 634 635
}
EXPORT_SYMBOL_GPL(irq_find_mapping);

/**
 * irq_linear_revmap() - Find a linux irq from a hw irq number.
636 637
 * @domain: domain owning this hardware interrupt
 * @hwirq: hardware irq number in that domain space
638
 *
639 640
 * This is a fast path that can be called directly by irq controller code to
 * save a handful of instructions.
641
 */
642
unsigned int irq_linear_revmap(struct irq_domain *domain,
643 644
			       irq_hw_number_t hwirq)
{
645
	struct irq_data *data;
646
	BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR);
647

648
	/* Check revmap bounds; complain if exceeded */
649 650 651 652 653 654
	if (hwirq >= domain->revmap_data.linear.size) {
		rcu_read_lock();
		data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
		rcu_read_unlock();
		return data ? data->irq : 0;
	}
655

656
	return domain->linear_revmap[hwirq];
657
}
658
EXPORT_SYMBOL_GPL(irq_linear_revmap);
659

660
#ifdef CONFIG_IRQ_DOMAIN_DEBUG
661 662 663 664 665 666 667
static int virq_debug_show(struct seq_file *m, void *private)
{
	unsigned long flags;
	struct irq_desc *desc;
	void *data;
	int i;

G
Grant Likely 已提交
668
	seq_printf(m, "%-5s  %-7s  %-15s  %-*s  %s\n", "irq", "hwirq",
669 670
		      "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
		      "domain name");
671 672 673 674 675 676 677 678 679 680 681 682 683 684 685

	for (i = 1; i < nr_irqs; i++) {
		desc = irq_to_desc(i);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		if (desc->action && desc->action->handler) {
			struct irq_chip *chip;

			seq_printf(m, "%5d  ", i);
			seq_printf(m, "0x%05lx  ", desc->irq_data.hwirq);

			chip = irq_desc_get_chip(desc);
G
Grant Likely 已提交
686
			seq_printf(m, "%-15s  ", (chip && chip->name) ? chip->name : "none");
687 688

			data = irq_desc_get_chip_data(desc);
G
Grant Likely 已提交
689
			seq_printf(m, data ? "0x%p  " : "  %p  ", data);
690

G
Grant Likely 已提交
691
			seq_printf(m, "%s\n", desc->irq_data.domain->name);
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
		}

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}

	return 0;
}

static int virq_debug_open(struct inode *inode, struct file *file)
{
	return single_open(file, virq_debug_show, inode->i_private);
}

static const struct file_operations virq_debug_fops = {
	.open = virq_debug_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

static int __init irq_debugfs_init(void)
{
714
	if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
715 716 717 718 719 720
				 NULL, &virq_debug_fops) == NULL)
		return -ENOMEM;

	return 0;
}
__initcall(irq_debugfs_init);
721
#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
722

723 724 725 726 727 728 729 730 731
/**
 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
 *
 * Device Tree IRQ specifier translation function which works with one cell
 * bindings where the cell value maps directly to the hwirq number.
 */
int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
			     const u32 *intspec, unsigned int intsize,
			     unsigned long *out_hwirq, unsigned int *out_type)
732
{
733
	if (WARN_ON(intsize < 1))
734 735 736 737 738
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = IRQ_TYPE_NONE;
	return 0;
}
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);

/**
 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
 *
 * Device Tree IRQ specifier translation function which works with two cell
 * bindings where the cell values map directly to the hwirq number
 * and linux irq flags.
 */
int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
			const u32 *intspec, unsigned int intsize,
			irq_hw_number_t *out_hwirq, unsigned int *out_type)
{
	if (WARN_ON(intsize < 2))
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
	return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);

/**
 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
 *
 * Device Tree IRQ specifier translation function which works with either one
 * or two cell bindings where the cell values map directly to the hwirq number
 * and linux irq flags.
 *
 * Note: don't use this function unless your interrupt controller explicitly
 * supports both one and two cell bindings.  For the majority of controllers
 * the _onecell() or _twocell() variants above should be used.
 */
int irq_domain_xlate_onetwocell(struct irq_domain *d,
				struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				unsigned long *out_hwirq, unsigned int *out_type)
{
	if (WARN_ON(intsize < 1))
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
	return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
783

784
const struct irq_domain_ops irq_domain_simple_ops = {
785
	.xlate = irq_domain_xlate_onetwocell,
786 787 788 789
};
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);

#ifdef CONFIG_OF_IRQ
790 791 792 793
void irq_domain_generate_simple(const struct of_device_id *match,
				u64 phys_base, unsigned int irq_start)
{
	struct device_node *node;
G
Grant Likely 已提交
794
	pr_debug("looking for phys_base=%llx, irq_start=%i\n",
795 796 797
		(unsigned long long) phys_base, (int) irq_start);
	node = of_find_matching_node_by_address(NULL, match, phys_base);
	if (node)
798 799
		irq_domain_add_legacy(node, 32, irq_start, 0,
				      &irq_domain_simple_ops, NULL);
800 801
}
EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
802
#endif