irqdomain.c 22.2 KB
Newer Older
1 2
#define pr_fmt(fmt)  "irq: " fmt

3 4 5
#include <linux/debugfs.h>
#include <linux/hardirq.h>
#include <linux/interrupt.h>
6
#include <linux/irq.h>
7
#include <linux/irqdesc.h>
8 9 10 11
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
12
#include <linux/of_address.h>
P
Paul Mundt 已提交
13
#include <linux/topology.h>
14
#include <linux/seq_file.h>
15
#include <linux/slab.h>
16 17
#include <linux/smp.h>
#include <linux/fs.h>
18 19 20 21

static LIST_HEAD(irq_domain_list);
static DEFINE_MUTEX(irq_domain_mutex);

22
static DEFINE_MUTEX(revmap_trees_mutex);
23
static struct irq_domain *irq_default_domain;
24 25

/**
26
 * irq_domain_alloc() - Allocate a new irq_domain data structure
27
 * @of_node: optional device-tree node of the interrupt controller
28
 * @ops: map/unmap domain callbacks
29
 * @host_data: Controller private data pointer
30
 *
31 32 33
 * Allocates and initialize and irq_domain structure.  Caller is expected to
 * register allocated irq_domain with irq_domain_register().  Returns pointer
 * to IRQ domain, or NULL on failure.
34
 */
35
static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
G
Grant Likely 已提交
36
					   int size,
37
					   const struct irq_domain_ops *ops,
38
					   void *host_data)
39
{
40
	struct irq_domain *domain;
41

42 43
	domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
			      GFP_KERNEL, of_node_to_nid(of_node));
44
	if (WARN_ON(!domain))
45 46 47
		return NULL;

	/* Fill structure */
G
Grant Likely 已提交
48
	INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
49
	domain->ops = ops;
50
	domain->host_data = host_data;
51
	domain->of_node = of_node_get(of_node);
G
Grant Likely 已提交
52
	domain->revmap_size = size;
53

54 55 56
	return domain;
}

57 58 59 60 61 62
static void irq_domain_free(struct irq_domain *domain)
{
	of_node_put(domain->of_node);
	kfree(domain);
}

63 64 65 66 67
static void irq_domain_add(struct irq_domain *domain)
{
	mutex_lock(&irq_domain_mutex);
	list_add(&domain->link, &irq_domain_list);
	mutex_unlock(&irq_domain_mutex);
G
Grant Likely 已提交
68
	pr_debug("Added domain %s\n", domain->name);
69 70
}

71 72 73 74 75 76 77 78 79 80 81 82
/**
 * irq_domain_remove() - Remove an irq domain.
 * @domain: domain to remove
 *
 * This routine is used to remove an irq domain. The caller must ensure
 * that all mappings within the domain have been disposed of prior to
 * use, depending on the revmap type.
 */
void irq_domain_remove(struct irq_domain *domain)
{
	mutex_lock(&irq_domain_mutex);

83 84 85 86 87
	/*
	 * radix_tree_delete() takes care of destroying the root
	 * node when all entries are removed. Shout if there are
	 * any mappings left.
	 */
G
Grant Likely 已提交
88
	WARN_ON(domain->revmap_tree.height);
89 90 91 92 93 94 95 96 97 98 99

	list_del(&domain->link);

	/*
	 * If the going away domain is the default one, reset it.
	 */
	if (unlikely(irq_default_domain == domain))
		irq_set_default_host(NULL);

	mutex_unlock(&irq_domain_mutex);

G
Grant Likely 已提交
100
	pr_debug("Removed domain %s\n", domain->name);
101 102 103

	irq_domain_free(domain);
}
104
EXPORT_SYMBOL_GPL(irq_domain_remove);
105

106 107 108 109
/**
 * irq_domain_add_simple() - Allocate and register a simple irq_domain.
 * @of_node: pointer to interrupt controller's device tree node.
 * @size: total number of irqs in mapping
110 111 112 113
 * @first_irq: first number of irq block assigned to the domain,
 *	pass zero to assign irqs on-the-fly. This will result in a
 *	linear IRQ domain so it is important to use irq_create_mapping()
 *	for each used IRQ, especially when SPARSE_IRQ is enabled.
114 115 116 117
 * @ops: map/unmap domain callbacks
 * @host_data: Controller private data pointer
 *
 * Allocates a legacy irq_domain if irq_base is positive or a linear
118 119
 * domain otherwise. For the legacy domain, IRQ descriptors will also
 * be allocated.
120 121 122 123 124 125 126 127 128 129 130 131 132
 *
 * This is intended to implement the expected behaviour for most
 * interrupt controllers which is that a linear mapping should
 * normally be used unless the system requires a legacy mapping in
 * order to support supplying interrupt numbers during non-DT
 * registration of devices.
 */
struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
					 unsigned int size,
					 unsigned int first_irq,
					 const struct irq_domain_ops *ops,
					 void *host_data)
{
133 134 135 136 137 138 139 140 141 142 143 144 145 146
	if (first_irq > 0) {
		int irq_base;

		if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
			/*
			 * Set the descriptor allocator to search for a
			 * 1-to-1 mapping, such as irq_alloc_desc_at().
			 * Use of_node_to_nid() which is defined to
			 * numa_node_id() on platforms that have no custom
			 * implementation.
			 */
			irq_base = irq_alloc_descs(first_irq, first_irq, size,
						   of_node_to_nid(of_node));
			if (irq_base < 0) {
147 148
				pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
					first_irq);
149 150 151 152 153 154
				irq_base = first_irq;
			}
		} else
			irq_base = first_irq;

		return irq_domain_add_legacy(of_node, size, irq_base, 0,
155
					     ops, host_data);
156 157 158 159
	}

	/* A linear domain is the default */
	return irq_domain_add_linear(of_node, size, ops, host_data);
160
}
161
EXPORT_SYMBOL_GPL(irq_domain_add_simple);
162

163 164 165
/**
 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
 * @of_node: pointer to interrupt controller's device tree node.
166 167 168 169 170
 * @size: total number of irqs in legacy mapping
 * @first_irq: first number of irq block assigned to the domain
 * @first_hwirq: first hwirq number to use for the translation. Should normally
 *               be '0', but a positive integer can be used if the effective
 *               hwirqs numbering does not begin at zero.
171 172 173 174 175 176 177 178
 * @ops: map/unmap domain callbacks
 * @host_data: Controller private data pointer
 *
 * Note: the map() callback will be called before this function returns
 * for all legacy interrupts except 0 (which is always the invalid irq for
 * a legacy controller).
 */
struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
179 180 181
					 unsigned int size,
					 unsigned int first_irq,
					 irq_hw_number_t first_hwirq,
182
					 const struct irq_domain_ops *ops,
183 184
					 void *host_data)
{
185
	struct irq_domain *domain;
186

187 188 189 190 191
	pr_debug("Setting up legacy domain virq[%i:%i] ==> hwirq[%i:%i]\n",
		 first_irq, first_irq + size - 1,
		 (int)first_hwirq, (int)first_hwirq + size -1);

	domain = irq_domain_add_linear(of_node, first_hwirq + size, ops, host_data);
192 193 194
	if (!domain)
		return NULL;

195
	WARN_ON(irq_domain_associate_many(domain, first_irq, first_hwirq, size));
196

197 198
	return domain;
}
199
EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
200 201

/**
202
 * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
203
 * @of_node: pointer to interrupt controller's device tree node.
204
 * @size: Number of interrupts in the domain.
205 206 207 208 209
 * @ops: map/unmap domain callbacks
 * @host_data: Controller private data pointer
 */
struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
					 unsigned int size,
210
					 const struct irq_domain_ops *ops,
211 212 213
					 void *host_data)
{
	struct irq_domain *domain;
214

G
Grant Likely 已提交
215
	domain = irq_domain_alloc(of_node, size, ops, host_data);
216
	if (!domain)
217
		return NULL;
218

219 220 221
	irq_domain_add(domain);
	return domain;
}
222
EXPORT_SYMBOL_GPL(irq_domain_add_linear);
223 224

struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
225
					 unsigned int max_irq,
226
					 const struct irq_domain_ops *ops,
227 228
					 void *host_data)
{
G
Grant Likely 已提交
229
	struct irq_domain *domain = irq_domain_alloc(of_node, 0, ops, host_data);
230
	if (domain) {
G
Grant Likely 已提交
231
		domain->revmap_direct_max_irq = max_irq ? max_irq : ~0;
232
		irq_domain_add(domain);
233
	}
234 235
	return domain;
}
236
EXPORT_SYMBOL_GPL(irq_domain_add_nomap);
237

238 239 240 241 242 243 244
/**
 * irq_find_host() - Locates a domain for a given device node
 * @node: device-tree node of the interrupt controller
 */
struct irq_domain *irq_find_host(struct device_node *node)
{
	struct irq_domain *h, *found = NULL;
245
	int rc;
246 247 248 249 250 251 252

	/* We might want to match the legacy controller last since
	 * it might potentially be set to match all interrupts in
	 * the absence of a device node. This isn't a problem so far
	 * yet though...
	 */
	mutex_lock(&irq_domain_mutex);
253 254 255 256 257 258 259
	list_for_each_entry(h, &irq_domain_list, link) {
		if (h->ops->match)
			rc = h->ops->match(h, node);
		else
			rc = (h->of_node != NULL) && (h->of_node == node);

		if (rc) {
260 261 262
			found = h;
			break;
		}
263
	}
264 265 266 267 268 269 270
	mutex_unlock(&irq_domain_mutex);
	return found;
}
EXPORT_SYMBOL_GPL(irq_find_host);

/**
 * irq_set_default_host() - Set a "default" irq domain
271
 * @domain: default domain pointer
272 273 274 275 276 277
 *
 * For convenience, it's possible to set a "default" domain that will be used
 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
 * platforms that want to manipulate a few hard coded interrupt numbers that
 * aren't properly represented in the device-tree.
 */
278
void irq_set_default_host(struct irq_domain *domain)
279
{
280
	pr_debug("Default domain set to @0x%p\n", domain);
281

282
	irq_default_domain = domain;
283
}
284
EXPORT_SYMBOL_GPL(irq_set_default_host);
285

286 287 288 289 290 291 292 293 294 295
static void irq_domain_disassociate_many(struct irq_domain *domain,
					 unsigned int irq_base, int count)
{
	/*
	 * disassociate in reverse order;
	 * not strictly necessary, but nice for unwinding
	 */
	while (count--) {
		int irq = irq_base + count;
		struct irq_data *irq_data = irq_get_irq_data(irq);
296
		irq_hw_number_t hwirq;
297 298 299 300

		if (WARN_ON(!irq_data || irq_data->domain != domain))
			continue;

301
		hwirq = irq_data->hwirq;
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
		irq_set_status_flags(irq, IRQ_NOREQUEST);

		/* remove chip and handler */
		irq_set_chip_and_handler(irq, NULL, NULL);

		/* Make sure it's completed */
		synchronize_irq(irq);

		/* Tell the PIC about it */
		if (domain->ops->unmap)
			domain->ops->unmap(domain, irq);
		smp_mb();

		irq_data->domain = NULL;
		irq_data->hwirq = 0;

318
		/* Clear reverse map for this hwirq */
G
Grant Likely 已提交
319
		if (hwirq < domain->revmap_size) {
320 321
			domain->linear_revmap[hwirq] = 0;
		} else {
322
			mutex_lock(&revmap_trees_mutex);
G
Grant Likely 已提交
323
			radix_tree_delete(&domain->revmap_tree, hwirq);
324 325 326 327 328
			mutex_unlock(&revmap_trees_mutex);
		}
	}
}

329 330
int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
			      irq_hw_number_t hwirq_base, int count)
331
{
332 333
	unsigned int virq = irq_base;
	irq_hw_number_t hwirq = hwirq_base;
334
	int i, ret;
335

336 337
	pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
		of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
338

339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
	for (i = 0; i < count; i++) {
		struct irq_data *irq_data = irq_get_irq_data(virq + i);

		if (WARN(!irq_data, "error: irq_desc not allocated; "
			 "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
			return -EINVAL;
		if (WARN(irq_data->domain, "error: irq_desc already associated; "
			 "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
			return -EINVAL;
	};

	for (i = 0; i < count; i++, virq++, hwirq++) {
		struct irq_data *irq_data = irq_get_irq_data(virq);

		irq_data->hwirq = hwirq;
		irq_data->domain = domain;
355 356 357
		if (domain->ops->map) {
			ret = domain->ops->map(domain, virq, hwirq);
			if (ret != 0) {
358 359 360
				/*
				 * If map() returns -EPERM, this interrupt is protected
				 * by the firmware or some other service and shall not
361
				 * be mapped. Don't bother telling the user about it.
362 363
				 */
				if (ret != -EPERM) {
364
					pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
G
Grant Likely 已提交
365
					       domain->name, hwirq, virq, ret);
366
				}
367 368
				irq_data->domain = NULL;
				irq_data->hwirq = 0;
369
				continue;
370
			}
G
Grant Likely 已提交
371 372 373
			/* If not already assigned, give the domain the chip's name */
			if (!domain->name && irq_data->chip)
				domain->name = irq_data->chip->name;
374 375
		}

G
Grant Likely 已提交
376
		if (hwirq < domain->revmap_size) {
377 378
			domain->linear_revmap[hwirq] = virq;
		} else {
379
			mutex_lock(&revmap_trees_mutex);
G
Grant Likely 已提交
380
			radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
381 382
			mutex_unlock(&revmap_trees_mutex);
		}
383

384 385
		irq_clear_status_flags(virq, IRQ_NOREQUEST);
	}
386 387 388

	return 0;
}
389
EXPORT_SYMBOL_GPL(irq_domain_associate_many);
390 391 392

/**
 * irq_create_direct_mapping() - Allocate an irq for direct mapping
393
 * @domain: domain to allocate the irq for or NULL for default domain
394 395 396
 *
 * This routine is used for irq controllers which can choose the hardware
 * interrupt numbers they generate. In such a case it's simplest to use
G
Grant Likely 已提交
397 398 399
 * the linux irq as the hardware interrupt number. It still uses the linear
 * or radix tree to store the mapping, but the irq controller can optimize
 * the revmap path by using the hwirq directly.
400
 */
401
unsigned int irq_create_direct_mapping(struct irq_domain *domain)
402 403 404
{
	unsigned int virq;

405 406
	if (domain == NULL)
		domain = irq_default_domain;
407

P
Paul Mundt 已提交
408
	virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
409
	if (!virq) {
410
		pr_debug("create_direct virq allocation failed\n");
411
		return 0;
412
	}
G
Grant Likely 已提交
413
	if (virq >= domain->revmap_direct_max_irq) {
414
		pr_err("ERROR: no free irqs available below %i maximum\n",
G
Grant Likely 已提交
415
			domain->revmap_direct_max_irq);
416 417 418
		irq_free_desc(virq);
		return 0;
	}
419
	pr_debug("create_direct obtained virq %d\n", virq);
420

421
	if (irq_domain_associate(domain, virq, virq)) {
422
		irq_free_desc(virq);
423
		return 0;
424 425 426 427
	}

	return virq;
}
428
EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
429 430 431

/**
 * irq_create_mapping() - Map a hardware interrupt into linux irq space
432 433
 * @domain: domain owning this hardware interrupt or NULL for default domain
 * @hwirq: hardware irq number in that domain space
434 435 436 437 438 439
 *
 * Only one mapping per hardware interrupt is permitted. Returns a linux
 * irq number.
 * If the sense/trigger is to be specified, set_irq_type() should be called
 * on the number returned from that call.
 */
440
unsigned int irq_create_mapping(struct irq_domain *domain,
441 442
				irq_hw_number_t hwirq)
{
443 444
	unsigned int hint;
	int virq;
445

446
	pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
447

448 449 450 451
	/* Look for default domain if nececssary */
	if (domain == NULL)
		domain = irq_default_domain;
	if (domain == NULL) {
452 453
		pr_warning("irq_create_mapping called for"
			   " NULL domain, hwirq=%lx\n", hwirq);
454
		WARN_ON(1);
455
		return 0;
456
	}
457
	pr_debug("-> using domain @%p\n", domain);
458 459

	/* Check if mapping already exists */
460
	virq = irq_find_mapping(domain, hwirq);
461
	if (virq) {
462
		pr_debug("-> existing mapping on virq %d\n", virq);
463 464 465
		return virq;
	}

466
	/* Allocate a virtual interrupt number */
467
	hint = hwirq % nr_irqs;
468 469
	if (hint == 0)
		hint++;
P
Paul Mundt 已提交
470
	virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
471
	if (virq <= 0)
P
Paul Mundt 已提交
472
		virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
473
	if (virq <= 0) {
474
		pr_debug("-> virq allocation failed\n");
475
		return 0;
476 477
	}

478
	if (irq_domain_associate(domain, virq, hwirq)) {
479
		irq_free_desc(virq);
480
		return 0;
481 482
	}

483
	pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
484
		hwirq, of_node_full_name(domain->of_node), virq);
485 486 487 488 489

	return virq;
}
EXPORT_SYMBOL_GPL(irq_create_mapping);

490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
/**
 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
 * @domain: domain owning the interrupt range
 * @irq_base: beginning of linux IRQ range
 * @hwirq_base: beginning of hardware IRQ range
 * @count: Number of interrupts to map
 *
 * This routine is used for allocating and mapping a range of hardware
 * irqs to linux irqs where the linux irq numbers are at pre-defined
 * locations. For use by controllers that already have static mappings
 * to insert in to the domain.
 *
 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
 * domain insertion.
 *
 * 0 is returned upon success, while any failure to establish a static
 * mapping is treated as an error.
 */
int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
			       irq_hw_number_t hwirq_base, int count)
{
	int ret;

	ret = irq_alloc_descs(irq_base, irq_base, count,
			      of_node_to_nid(domain->of_node));
	if (unlikely(ret < 0))
		return ret;

	ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count);
	if (unlikely(ret < 0)) {
		irq_free_descs(irq_base, count);
		return ret;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(irq_create_strict_mappings);

528 529 530
unsigned int irq_create_of_mapping(struct device_node *controller,
				   const u32 *intspec, unsigned int intsize)
{
531
	struct irq_domain *domain;
532 533 534 535
	irq_hw_number_t hwirq;
	unsigned int type = IRQ_TYPE_NONE;
	unsigned int virq;

536 537
	domain = controller ? irq_find_host(controller) : irq_default_domain;
	if (!domain) {
538 539 540 541 542 543 544 545 546 547 548 549
#ifdef CONFIG_MIPS
		/*
		 * Workaround to avoid breaking interrupt controller drivers
		 * that don't yet register an irq_domain.  This is temporary
		 * code. ~~~gcl, Feb 24, 2012
		 *
		 * Scheduled for removal in Linux v3.6.  That should be enough
		 * time.
		 */
		if (intsize > 0)
			return intspec[0];
#endif
550
		pr_warning("no irq domain found for %s !\n",
551
			   of_node_full_name(controller));
552
		return 0;
553 554
	}

555 556
	/* If domain has no translation, then we assume interrupt line */
	if (domain->ops->xlate == NULL)
557 558
		hwirq = intspec[0];
	else {
559
		if (domain->ops->xlate(domain, controller, intspec, intsize,
560
				     &hwirq, &type))
561
			return 0;
562 563 564
	}

	/* Create mapping */
565
	virq = irq_create_mapping(domain, hwirq);
566
	if (!virq)
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
		return virq;

	/* Set type if specified and different than the current one */
	if (type != IRQ_TYPE_NONE &&
	    type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
		irq_set_irq_type(virq, type);
	return virq;
}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);

/**
 * irq_dispose_mapping() - Unmap an interrupt
 * @virq: linux irq number of the interrupt to unmap
 */
void irq_dispose_mapping(unsigned int virq)
{
	struct irq_data *irq_data = irq_get_irq_data(virq);
584
	struct irq_domain *domain;
585

586
	if (!virq || !irq_data)
587 588
		return;

589 590
	domain = irq_data->domain;
	if (WARN_ON(domain == NULL))
591 592
		return;

593
	irq_domain_disassociate_many(domain, virq, 1);
594 595 596 597 598 599
	irq_free_desc(virq);
}
EXPORT_SYMBOL_GPL(irq_dispose_mapping);

/**
 * irq_find_mapping() - Find a linux irq from an hw irq number.
600 601
 * @domain: domain owning this hardware interrupt
 * @hwirq: hardware irq number in that domain space
602
 */
603
unsigned int irq_find_mapping(struct irq_domain *domain,
604 605
			      irq_hw_number_t hwirq)
{
606
	struct irq_data *data;
607

608 609 610 611
	/* Look for default domain if nececssary */
	if (domain == NULL)
		domain = irq_default_domain;
	if (domain == NULL)
612
		return 0;
613

G
Grant Likely 已提交
614
	if (hwirq < domain->revmap_direct_max_irq) {
615
		data = irq_get_irq_data(hwirq);
616
		if (data && (data->domain == domain) && (data->hwirq == hwirq))
617 618 619
			return hwirq;
	}

G
Grant Likely 已提交
620
	return irq_linear_revmap(domain, hwirq);
621 622 623 624 625
}
EXPORT_SYMBOL_GPL(irq_find_mapping);

/**
 * irq_linear_revmap() - Find a linux irq from a hw irq number.
626 627
 * @domain: domain owning this hardware interrupt
 * @hwirq: hardware irq number in that domain space
628
 *
629 630
 * This is a fast path that can be called directly by irq controller code to
 * save a handful of instructions.
631
 */
632
unsigned int irq_linear_revmap(struct irq_domain *domain,
633 634
			       irq_hw_number_t hwirq)
{
635
	struct irq_data *data;
636

637
	/* Check revmap bounds; complain if exceeded */
G
Grant Likely 已提交
638
	if (hwirq >= domain->revmap_size) {
639
		rcu_read_lock();
G
Grant Likely 已提交
640
		data = radix_tree_lookup(&domain->revmap_tree, hwirq);
641 642 643
		rcu_read_unlock();
		return data ? data->irq : 0;
	}
644

645
	return domain->linear_revmap[hwirq];
646
}
647
EXPORT_SYMBOL_GPL(irq_linear_revmap);
648

649
#ifdef CONFIG_IRQ_DOMAIN_DEBUG
650 651 652 653 654 655 656
static int virq_debug_show(struct seq_file *m, void *private)
{
	unsigned long flags;
	struct irq_desc *desc;
	void *data;
	int i;

G
Grant Likely 已提交
657
	seq_printf(m, "%-5s  %-7s  %-15s  %-*s  %s\n", "irq", "hwirq",
658 659
		      "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
		      "domain name");
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674

	for (i = 1; i < nr_irqs; i++) {
		desc = irq_to_desc(i);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		if (desc->action && desc->action->handler) {
			struct irq_chip *chip;

			seq_printf(m, "%5d  ", i);
			seq_printf(m, "0x%05lx  ", desc->irq_data.hwirq);

			chip = irq_desc_get_chip(desc);
G
Grant Likely 已提交
675
			seq_printf(m, "%-15s  ", (chip && chip->name) ? chip->name : "none");
676 677

			data = irq_desc_get_chip_data(desc);
G
Grant Likely 已提交
678
			seq_printf(m, data ? "0x%p  " : "  %p  ", data);
679

G
Grant Likely 已提交
680
			seq_printf(m, "%s\n", desc->irq_data.domain->name);
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
		}

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}

	return 0;
}

static int virq_debug_open(struct inode *inode, struct file *file)
{
	return single_open(file, virq_debug_show, inode->i_private);
}

static const struct file_operations virq_debug_fops = {
	.open = virq_debug_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

static int __init irq_debugfs_init(void)
{
703
	if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
704 705 706 707 708 709
				 NULL, &virq_debug_fops) == NULL)
		return -ENOMEM;

	return 0;
}
__initcall(irq_debugfs_init);
710
#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
711

712 713 714 715 716 717 718 719 720
/**
 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
 *
 * Device Tree IRQ specifier translation function which works with one cell
 * bindings where the cell value maps directly to the hwirq number.
 */
int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
			     const u32 *intspec, unsigned int intsize,
			     unsigned long *out_hwirq, unsigned int *out_type)
721
{
722
	if (WARN_ON(intsize < 1))
723 724 725 726 727
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = IRQ_TYPE_NONE;
	return 0;
}
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);

/**
 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
 *
 * Device Tree IRQ specifier translation function which works with two cell
 * bindings where the cell values map directly to the hwirq number
 * and linux irq flags.
 */
int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
			const u32 *intspec, unsigned int intsize,
			irq_hw_number_t *out_hwirq, unsigned int *out_type)
{
	if (WARN_ON(intsize < 2))
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
	return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);

/**
 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
 *
 * Device Tree IRQ specifier translation function which works with either one
 * or two cell bindings where the cell values map directly to the hwirq number
 * and linux irq flags.
 *
 * Note: don't use this function unless your interrupt controller explicitly
 * supports both one and two cell bindings.  For the majority of controllers
 * the _onecell() or _twocell() variants above should be used.
 */
int irq_domain_xlate_onetwocell(struct irq_domain *d,
				struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				unsigned long *out_hwirq, unsigned int *out_type)
{
	if (WARN_ON(intsize < 1))
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
	return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
772

773
const struct irq_domain_ops irq_domain_simple_ops = {
774
	.xlate = irq_domain_xlate_onetwocell,
775 776 777 778
};
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);

#ifdef CONFIG_OF_IRQ
779 780 781 782
void irq_domain_generate_simple(const struct of_device_id *match,
				u64 phys_base, unsigned int irq_start)
{
	struct device_node *node;
G
Grant Likely 已提交
783
	pr_debug("looking for phys_base=%llx, irq_start=%i\n",
784 785 786
		(unsigned long long) phys_base, (int) irq_start);
	node = of_find_matching_node_by_address(NULL, match, phys_base);
	if (node)
787 788
		irq_domain_add_legacy(node, 32, irq_start, 0,
				      &irq_domain_simple_ops, NULL);
789 790
}
EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
791
#endif