irqdomain.c 21.8 KB
Newer Older
1 2
#define pr_fmt(fmt)  "irq: " fmt

3 4 5
#include <linux/debugfs.h>
#include <linux/hardirq.h>
#include <linux/interrupt.h>
6
#include <linux/irq.h>
7
#include <linux/irqdesc.h>
8 9 10 11
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
12
#include <linux/of_address.h>
P
Paul Mundt 已提交
13
#include <linux/topology.h>
14
#include <linux/seq_file.h>
15
#include <linux/slab.h>
16 17
#include <linux/smp.h>
#include <linux/fs.h>
18 19 20 21

static LIST_HEAD(irq_domain_list);
static DEFINE_MUTEX(irq_domain_mutex);

22
static DEFINE_MUTEX(revmap_trees_mutex);
23
static struct irq_domain *irq_default_domain;
24 25

/**
26
 * __irq_domain_add() - Allocate a new irq_domain data structure
27
 * @of_node: optional device-tree node of the interrupt controller
28 29 30
 * @size: Size of linear map; 0 for radix mapping only
 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
 *              direct mapping
31
 * @ops: map/unmap domain callbacks
32
 * @host_data: Controller private data pointer
33
 *
34 35 36
 * Allocates and initialize and irq_domain structure.  Caller is expected to
 * register allocated irq_domain with irq_domain_register().  Returns pointer
 * to IRQ domain, or NULL on failure.
37
 */
38 39 40 41
struct irq_domain *__irq_domain_add(struct device_node *of_node,
				    int size, int direct_max,
				    const struct irq_domain_ops *ops,
				    void *host_data)
42
{
43
	struct irq_domain *domain;
44

45 46
	domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
			      GFP_KERNEL, of_node_to_nid(of_node));
47
	if (WARN_ON(!domain))
48 49 50
		return NULL;

	/* Fill structure */
G
Grant Likely 已提交
51
	INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
52
	domain->ops = ops;
53
	domain->host_data = host_data;
54
	domain->of_node = of_node_get(of_node);
G
Grant Likely 已提交
55
	domain->revmap_size = size;
56
	domain->revmap_direct_max_irq = direct_max;
57

58 59 60
	mutex_lock(&irq_domain_mutex);
	list_add(&domain->link, &irq_domain_list);
	mutex_unlock(&irq_domain_mutex);
61

G
Grant Likely 已提交
62
	pr_debug("Added domain %s\n", domain->name);
63
	return domain;
64
}
65
EXPORT_SYMBOL_GPL(__irq_domain_add);
66

67 68 69 70 71 72 73 74 75 76 77 78
/**
 * irq_domain_remove() - Remove an irq domain.
 * @domain: domain to remove
 *
 * This routine is used to remove an irq domain. The caller must ensure
 * that all mappings within the domain have been disposed of prior to
 * use, depending on the revmap type.
 */
void irq_domain_remove(struct irq_domain *domain)
{
	mutex_lock(&irq_domain_mutex);

79 80 81 82 83
	/*
	 * radix_tree_delete() takes care of destroying the root
	 * node when all entries are removed. Shout if there are
	 * any mappings left.
	 */
G
Grant Likely 已提交
84
	WARN_ON(domain->revmap_tree.height);
85 86 87 88 89 90 91 92 93 94 95

	list_del(&domain->link);

	/*
	 * If the going away domain is the default one, reset it.
	 */
	if (unlikely(irq_default_domain == domain))
		irq_set_default_host(NULL);

	mutex_unlock(&irq_domain_mutex);

G
Grant Likely 已提交
96
	pr_debug("Removed domain %s\n", domain->name);
97

98 99
	of_node_put(domain->of_node);
	kfree(domain);
100
}
101
EXPORT_SYMBOL_GPL(irq_domain_remove);
102

103
/**
104
 * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
105 106
 * @of_node: pointer to interrupt controller's device tree node.
 * @size: total number of irqs in mapping
107
 * @first_irq: first number of irq block assigned to the domain,
108 109
 *	pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
 *	pre-map all of the irqs in the domain to virqs starting at first_irq.
110 111 112
 * @ops: map/unmap domain callbacks
 * @host_data: Controller private data pointer
 *
113 114
 * Allocates an irq_domain, and optionally if first_irq is positive then also
 * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
115 116
 *
 * This is intended to implement the expected behaviour for most
117 118 119
 * interrupt controllers. If device tree is used, then first_irq will be 0 and
 * irqs get mapped dynamically on the fly. However, if the controller requires
 * static virq assignments (non-DT boot) then it will set that up correctly.
120 121 122 123 124 125 126
 */
struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
					 unsigned int size,
					 unsigned int first_irq,
					 const struct irq_domain_ops *ops,
					 void *host_data)
{
127 128 129 130 131
	struct irq_domain *domain;

	domain = __irq_domain_add(of_node, size, 0, ops, host_data);
	if (!domain)
		return NULL;
132

133
	if (first_irq > 0) {
134
		if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
135 136 137 138
			/* attempt to allocated irq_descs */
			int rc = irq_alloc_descs(first_irq, first_irq, size,
						 of_node_to_nid(of_node));
			if (rc < 0)
139 140
				pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
					first_irq);
141 142
		}
		WARN_ON(irq_domain_associate_many(domain, first_irq, 0, size));
143 144
	}

145
	return domain;
146
}
147
EXPORT_SYMBOL_GPL(irq_domain_add_simple);
148

149 150 151
/**
 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
 * @of_node: pointer to interrupt controller's device tree node.
152 153 154 155 156
 * @size: total number of irqs in legacy mapping
 * @first_irq: first number of irq block assigned to the domain
 * @first_hwirq: first hwirq number to use for the translation. Should normally
 *               be '0', but a positive integer can be used if the effective
 *               hwirqs numbering does not begin at zero.
157 158 159 160 161 162 163 164
 * @ops: map/unmap domain callbacks
 * @host_data: Controller private data pointer
 *
 * Note: the map() callback will be called before this function returns
 * for all legacy interrupts except 0 (which is always the invalid irq for
 * a legacy controller).
 */
struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
165 166 167
					 unsigned int size,
					 unsigned int first_irq,
					 irq_hw_number_t first_hwirq,
168
					 const struct irq_domain_ops *ops,
169 170
					 void *host_data)
{
171
	struct irq_domain *domain;
172

173
	domain = __irq_domain_add(of_node, first_hwirq + size, 0, ops, host_data);
174 175 176
	if (!domain)
		return NULL;

177
	WARN_ON(irq_domain_associate_many(domain, first_irq, first_hwirq, size));
178

179 180
	return domain;
}
181
EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
182

183 184 185 186 187 188 189
/**
 * irq_find_host() - Locates a domain for a given device node
 * @node: device-tree node of the interrupt controller
 */
struct irq_domain *irq_find_host(struct device_node *node)
{
	struct irq_domain *h, *found = NULL;
190
	int rc;
191 192 193 194 195 196 197

	/* We might want to match the legacy controller last since
	 * it might potentially be set to match all interrupts in
	 * the absence of a device node. This isn't a problem so far
	 * yet though...
	 */
	mutex_lock(&irq_domain_mutex);
198 199 200 201 202 203 204
	list_for_each_entry(h, &irq_domain_list, link) {
		if (h->ops->match)
			rc = h->ops->match(h, node);
		else
			rc = (h->of_node != NULL) && (h->of_node == node);

		if (rc) {
205 206 207
			found = h;
			break;
		}
208
	}
209 210 211 212 213 214 215
	mutex_unlock(&irq_domain_mutex);
	return found;
}
EXPORT_SYMBOL_GPL(irq_find_host);

/**
 * irq_set_default_host() - Set a "default" irq domain
216
 * @domain: default domain pointer
217 218 219 220 221 222
 *
 * For convenience, it's possible to set a "default" domain that will be used
 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
 * platforms that want to manipulate a few hard coded interrupt numbers that
 * aren't properly represented in the device-tree.
 */
223
void irq_set_default_host(struct irq_domain *domain)
224
{
225
	pr_debug("Default domain set to @0x%p\n", domain);
226

227
	irq_default_domain = domain;
228
}
229
EXPORT_SYMBOL_GPL(irq_set_default_host);
230

231 232 233 234 235 236 237 238 239 240
static void irq_domain_disassociate_many(struct irq_domain *domain,
					 unsigned int irq_base, int count)
{
	/*
	 * disassociate in reverse order;
	 * not strictly necessary, but nice for unwinding
	 */
	while (count--) {
		int irq = irq_base + count;
		struct irq_data *irq_data = irq_get_irq_data(irq);
241
		irq_hw_number_t hwirq;
242 243 244 245

		if (WARN_ON(!irq_data || irq_data->domain != domain))
			continue;

246
		hwirq = irq_data->hwirq;
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
		irq_set_status_flags(irq, IRQ_NOREQUEST);

		/* remove chip and handler */
		irq_set_chip_and_handler(irq, NULL, NULL);

		/* Make sure it's completed */
		synchronize_irq(irq);

		/* Tell the PIC about it */
		if (domain->ops->unmap)
			domain->ops->unmap(domain, irq);
		smp_mb();

		irq_data->domain = NULL;
		irq_data->hwirq = 0;

263
		/* Clear reverse map for this hwirq */
G
Grant Likely 已提交
264
		if (hwirq < domain->revmap_size) {
265 266
			domain->linear_revmap[hwirq] = 0;
		} else {
267
			mutex_lock(&revmap_trees_mutex);
G
Grant Likely 已提交
268
			radix_tree_delete(&domain->revmap_tree, hwirq);
269 270 271 272 273
			mutex_unlock(&revmap_trees_mutex);
		}
	}
}

274 275
int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
			      irq_hw_number_t hwirq_base, int count)
276
{
277 278
	unsigned int virq = irq_base;
	irq_hw_number_t hwirq = hwirq_base;
279
	int i, ret;
280

281 282
	pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
		of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
283

284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
	for (i = 0; i < count; i++) {
		struct irq_data *irq_data = irq_get_irq_data(virq + i);

		if (WARN(!irq_data, "error: irq_desc not allocated; "
			 "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
			return -EINVAL;
		if (WARN(irq_data->domain, "error: irq_desc already associated; "
			 "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
			return -EINVAL;
	};

	for (i = 0; i < count; i++, virq++, hwirq++) {
		struct irq_data *irq_data = irq_get_irq_data(virq);

		irq_data->hwirq = hwirq;
		irq_data->domain = domain;
300 301 302
		if (domain->ops->map) {
			ret = domain->ops->map(domain, virq, hwirq);
			if (ret != 0) {
303 304 305
				/*
				 * If map() returns -EPERM, this interrupt is protected
				 * by the firmware or some other service and shall not
306
				 * be mapped. Don't bother telling the user about it.
307 308
				 */
				if (ret != -EPERM) {
309
					pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
G
Grant Likely 已提交
310
					       domain->name, hwirq, virq, ret);
311
				}
312 313
				irq_data->domain = NULL;
				irq_data->hwirq = 0;
314
				continue;
315
			}
G
Grant Likely 已提交
316 317 318
			/* If not already assigned, give the domain the chip's name */
			if (!domain->name && irq_data->chip)
				domain->name = irq_data->chip->name;
319 320
		}

G
Grant Likely 已提交
321
		if (hwirq < domain->revmap_size) {
322 323
			domain->linear_revmap[hwirq] = virq;
		} else {
324
			mutex_lock(&revmap_trees_mutex);
G
Grant Likely 已提交
325
			radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
326 327
			mutex_unlock(&revmap_trees_mutex);
		}
328

329 330
		irq_clear_status_flags(virq, IRQ_NOREQUEST);
	}
331 332 333

	return 0;
}
334
EXPORT_SYMBOL_GPL(irq_domain_associate_many);
335 336 337

/**
 * irq_create_direct_mapping() - Allocate an irq for direct mapping
338
 * @domain: domain to allocate the irq for or NULL for default domain
339 340 341
 *
 * This routine is used for irq controllers which can choose the hardware
 * interrupt numbers they generate. In such a case it's simplest to use
G
Grant Likely 已提交
342 343 344
 * the linux irq as the hardware interrupt number. It still uses the linear
 * or radix tree to store the mapping, but the irq controller can optimize
 * the revmap path by using the hwirq directly.
345
 */
346
unsigned int irq_create_direct_mapping(struct irq_domain *domain)
347 348 349
{
	unsigned int virq;

350 351
	if (domain == NULL)
		domain = irq_default_domain;
352

P
Paul Mundt 已提交
353
	virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
354
	if (!virq) {
355
		pr_debug("create_direct virq allocation failed\n");
356
		return 0;
357
	}
G
Grant Likely 已提交
358
	if (virq >= domain->revmap_direct_max_irq) {
359
		pr_err("ERROR: no free irqs available below %i maximum\n",
G
Grant Likely 已提交
360
			domain->revmap_direct_max_irq);
361 362 363
		irq_free_desc(virq);
		return 0;
	}
364
	pr_debug("create_direct obtained virq %d\n", virq);
365

366
	if (irq_domain_associate(domain, virq, virq)) {
367
		irq_free_desc(virq);
368
		return 0;
369 370 371 372
	}

	return virq;
}
373
EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
374 375 376

/**
 * irq_create_mapping() - Map a hardware interrupt into linux irq space
377 378
 * @domain: domain owning this hardware interrupt or NULL for default domain
 * @hwirq: hardware irq number in that domain space
379 380 381 382 383 384
 *
 * Only one mapping per hardware interrupt is permitted. Returns a linux
 * irq number.
 * If the sense/trigger is to be specified, set_irq_type() should be called
 * on the number returned from that call.
 */
385
unsigned int irq_create_mapping(struct irq_domain *domain,
386 387
				irq_hw_number_t hwirq)
{
388 389
	unsigned int hint;
	int virq;
390

391
	pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
392

393 394 395 396
	/* Look for default domain if nececssary */
	if (domain == NULL)
		domain = irq_default_domain;
	if (domain == NULL) {
397 398
		pr_warning("irq_create_mapping called for"
			   " NULL domain, hwirq=%lx\n", hwirq);
399
		WARN_ON(1);
400
		return 0;
401
	}
402
	pr_debug("-> using domain @%p\n", domain);
403 404

	/* Check if mapping already exists */
405
	virq = irq_find_mapping(domain, hwirq);
406
	if (virq) {
407
		pr_debug("-> existing mapping on virq %d\n", virq);
408 409 410
		return virq;
	}

411
	/* Allocate a virtual interrupt number */
412
	hint = hwirq % nr_irqs;
413 414
	if (hint == 0)
		hint++;
P
Paul Mundt 已提交
415
	virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
416
	if (virq <= 0)
P
Paul Mundt 已提交
417
		virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
418
	if (virq <= 0) {
419
		pr_debug("-> virq allocation failed\n");
420
		return 0;
421 422
	}

423
	if (irq_domain_associate(domain, virq, hwirq)) {
424
		irq_free_desc(virq);
425
		return 0;
426 427
	}

428
	pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
429
		hwirq, of_node_full_name(domain->of_node), virq);
430 431 432 433 434

	return virq;
}
EXPORT_SYMBOL_GPL(irq_create_mapping);

435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
/**
 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
 * @domain: domain owning the interrupt range
 * @irq_base: beginning of linux IRQ range
 * @hwirq_base: beginning of hardware IRQ range
 * @count: Number of interrupts to map
 *
 * This routine is used for allocating and mapping a range of hardware
 * irqs to linux irqs where the linux irq numbers are at pre-defined
 * locations. For use by controllers that already have static mappings
 * to insert in to the domain.
 *
 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
 * domain insertion.
 *
 * 0 is returned upon success, while any failure to establish a static
 * mapping is treated as an error.
 */
int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
			       irq_hw_number_t hwirq_base, int count)
{
	int ret;

	ret = irq_alloc_descs(irq_base, irq_base, count,
			      of_node_to_nid(domain->of_node));
	if (unlikely(ret < 0))
		return ret;

	ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count);
	if (unlikely(ret < 0)) {
		irq_free_descs(irq_base, count);
		return ret;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(irq_create_strict_mappings);

473 474 475
unsigned int irq_create_of_mapping(struct device_node *controller,
				   const u32 *intspec, unsigned int intsize)
{
476
	struct irq_domain *domain;
477 478 479 480
	irq_hw_number_t hwirq;
	unsigned int type = IRQ_TYPE_NONE;
	unsigned int virq;

481 482
	domain = controller ? irq_find_host(controller) : irq_default_domain;
	if (!domain) {
483 484 485 486 487 488 489 490 491 492 493 494
#ifdef CONFIG_MIPS
		/*
		 * Workaround to avoid breaking interrupt controller drivers
		 * that don't yet register an irq_domain.  This is temporary
		 * code. ~~~gcl, Feb 24, 2012
		 *
		 * Scheduled for removal in Linux v3.6.  That should be enough
		 * time.
		 */
		if (intsize > 0)
			return intspec[0];
#endif
495
		pr_warning("no irq domain found for %s !\n",
496
			   of_node_full_name(controller));
497
		return 0;
498 499
	}

500 501
	/* If domain has no translation, then we assume interrupt line */
	if (domain->ops->xlate == NULL)
502 503
		hwirq = intspec[0];
	else {
504
		if (domain->ops->xlate(domain, controller, intspec, intsize,
505
				     &hwirq, &type))
506
			return 0;
507 508 509
	}

	/* Create mapping */
510
	virq = irq_create_mapping(domain, hwirq);
511
	if (!virq)
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
		return virq;

	/* Set type if specified and different than the current one */
	if (type != IRQ_TYPE_NONE &&
	    type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
		irq_set_irq_type(virq, type);
	return virq;
}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);

/**
 * irq_dispose_mapping() - Unmap an interrupt
 * @virq: linux irq number of the interrupt to unmap
 */
void irq_dispose_mapping(unsigned int virq)
{
	struct irq_data *irq_data = irq_get_irq_data(virq);
529
	struct irq_domain *domain;
530

531
	if (!virq || !irq_data)
532 533
		return;

534 535
	domain = irq_data->domain;
	if (WARN_ON(domain == NULL))
536 537
		return;

538
	irq_domain_disassociate_many(domain, virq, 1);
539 540 541 542 543 544
	irq_free_desc(virq);
}
EXPORT_SYMBOL_GPL(irq_dispose_mapping);

/**
 * irq_find_mapping() - Find a linux irq from an hw irq number.
545 546
 * @domain: domain owning this hardware interrupt
 * @hwirq: hardware irq number in that domain space
547
 */
548
unsigned int irq_find_mapping(struct irq_domain *domain,
549 550
			      irq_hw_number_t hwirq)
{
551
	struct irq_data *data;
552

553 554 555 556
	/* Look for default domain if nececssary */
	if (domain == NULL)
		domain = irq_default_domain;
	if (domain == NULL)
557
		return 0;
558

G
Grant Likely 已提交
559
	if (hwirq < domain->revmap_direct_max_irq) {
560
		data = irq_get_irq_data(hwirq);
561
		if (data && (data->domain == domain) && (data->hwirq == hwirq))
562 563 564
			return hwirq;
	}

G
Grant Likely 已提交
565
	return irq_linear_revmap(domain, hwirq);
566 567 568 569 570
}
EXPORT_SYMBOL_GPL(irq_find_mapping);

/**
 * irq_linear_revmap() - Find a linux irq from a hw irq number.
571 572
 * @domain: domain owning this hardware interrupt
 * @hwirq: hardware irq number in that domain space
573
 *
574 575
 * This is a fast path that can be called directly by irq controller code to
 * save a handful of instructions.
576
 */
577
unsigned int irq_linear_revmap(struct irq_domain *domain,
578 579
			       irq_hw_number_t hwirq)
{
580
	struct irq_data *data;
581

582
	/* Check revmap bounds; complain if exceeded */
G
Grant Likely 已提交
583
	if (hwirq >= domain->revmap_size) {
584
		rcu_read_lock();
G
Grant Likely 已提交
585
		data = radix_tree_lookup(&domain->revmap_tree, hwirq);
586 587 588
		rcu_read_unlock();
		return data ? data->irq : 0;
	}
589

590
	return domain->linear_revmap[hwirq];
591
}
592
EXPORT_SYMBOL_GPL(irq_linear_revmap);
593

594
#ifdef CONFIG_IRQ_DOMAIN_DEBUG
595 596 597 598
static int virq_debug_show(struct seq_file *m, void *private)
{
	unsigned long flags;
	struct irq_desc *desc;
G
Grant Likely 已提交
599 600 601
	struct irq_domain *domain;
	struct radix_tree_iter iter;
	void *data, **slot;
602 603
	int i;

G
Grant Likely 已提交
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
	seq_printf(m, " %-16s  %-6s  %-10s  %-10s  %s\n",
		   "name", "mapped", "linear-max", "direct-max", "devtree-node");
	mutex_lock(&irq_domain_mutex);
	list_for_each_entry(domain, &irq_domain_list, link) {
		int count = 0;
		radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
			count++;
		seq_printf(m, "%c%-16s  %6u  %10u  %10u  %s\n",
			   domain == irq_default_domain ? '*' : ' ', domain->name,
			   domain->revmap_size + count, domain->revmap_size,
			   domain->revmap_direct_max_irq,
			   domain->of_node ? of_node_full_name(domain->of_node) : "");
	}
	mutex_unlock(&irq_domain_mutex);

	seq_printf(m, "%-5s  %-7s  %-15s  %-*s  %6s  %-14s  %s\n", "irq", "hwirq",
620
		      "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
G
Grant Likely 已提交
621
		      "active", "type", "domain");
622 623 624 625 626 627 628

	for (i = 1; i < nr_irqs; i++) {
		desc = irq_to_desc(i);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);
G
Grant Likely 已提交
629
		domain = desc->irq_data.domain;
630

G
Grant Likely 已提交
631
		if (domain) {
632
			struct irq_chip *chip;
G
Grant Likely 已提交
633 634
			int hwirq = desc->irq_data.hwirq;
			bool direct;
635 636

			seq_printf(m, "%5d  ", i);
G
Grant Likely 已提交
637
			seq_printf(m, "0x%05x  ", hwirq);
638 639

			chip = irq_desc_get_chip(desc);
G
Grant Likely 已提交
640
			seq_printf(m, "%-15s  ", (chip && chip->name) ? chip->name : "none");
641 642

			data = irq_desc_get_chip_data(desc);
G
Grant Likely 已提交
643
			seq_printf(m, data ? "0x%p  " : "  %p  ", data);
644

G
Grant Likely 已提交
645 646 647 648 649
			seq_printf(m, "   %c    ", (desc->action && desc->action->handler) ? '*' : ' ');
			direct = (i == hwirq) && (i < domain->revmap_direct_max_irq);
			seq_printf(m, "%6s%-8s  ",
				   (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX",
				   direct ? "(DIRECT)" : "");
G
Grant Likely 已提交
650
			seq_printf(m, "%s\n", desc->irq_data.domain->name);
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
		}

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}

	return 0;
}

static int virq_debug_open(struct inode *inode, struct file *file)
{
	return single_open(file, virq_debug_show, inode->i_private);
}

static const struct file_operations virq_debug_fops = {
	.open = virq_debug_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

static int __init irq_debugfs_init(void)
{
673
	if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
674 675 676 677 678 679
				 NULL, &virq_debug_fops) == NULL)
		return -ENOMEM;

	return 0;
}
__initcall(irq_debugfs_init);
680
#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
681

682 683 684 685 686 687 688 689 690
/**
 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
 *
 * Device Tree IRQ specifier translation function which works with one cell
 * bindings where the cell value maps directly to the hwirq number.
 */
int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
			     const u32 *intspec, unsigned int intsize,
			     unsigned long *out_hwirq, unsigned int *out_type)
691
{
692
	if (WARN_ON(intsize < 1))
693 694 695 696 697
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = IRQ_TYPE_NONE;
	return 0;
}
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);

/**
 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
 *
 * Device Tree IRQ specifier translation function which works with two cell
 * bindings where the cell values map directly to the hwirq number
 * and linux irq flags.
 */
int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
			const u32 *intspec, unsigned int intsize,
			irq_hw_number_t *out_hwirq, unsigned int *out_type)
{
	if (WARN_ON(intsize < 2))
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
	return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);

/**
 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
 *
 * Device Tree IRQ specifier translation function which works with either one
 * or two cell bindings where the cell values map directly to the hwirq number
 * and linux irq flags.
 *
 * Note: don't use this function unless your interrupt controller explicitly
 * supports both one and two cell bindings.  For the majority of controllers
 * the _onecell() or _twocell() variants above should be used.
 */
int irq_domain_xlate_onetwocell(struct irq_domain *d,
				struct device_node *ctrlr,
				const u32 *intspec, unsigned int intsize,
				unsigned long *out_hwirq, unsigned int *out_type)
{
	if (WARN_ON(intsize < 1))
		return -EINVAL;
	*out_hwirq = intspec[0];
	*out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
	return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
742

743
const struct irq_domain_ops irq_domain_simple_ops = {
744
	.xlate = irq_domain_xlate_onetwocell,
745 746 747 748
};
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);

#ifdef CONFIG_OF_IRQ
749 750 751 752
void irq_domain_generate_simple(const struct of_device_id *match,
				u64 phys_base, unsigned int irq_start)
{
	struct device_node *node;
G
Grant Likely 已提交
753
	pr_debug("looking for phys_base=%llx, irq_start=%i\n",
754 755 756
		(unsigned long long) phys_base, (int) irq_start);
	node = of_find_matching_node_by_address(NULL, match, phys_base);
	if (node)
757 758
		irq_domain_add_legacy(node, 32, irq_start, 0,
				      &irq_domain_simple_ops, NULL);
759 760
}
EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
761
#endif