irq.c 27.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *  Derived from arch/i386/kernel/irq.c
 *    Copyright (C) 1992 Linus Torvalds
 *  Adapted from arch/i386 by Gary Thomas
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
S
Stephen Rothwell 已提交
6 7
 *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
 *    Copyright (C) 1996-2001 Cort Dougan
L
Linus Torvalds 已提交
8 9
 *  Adapted for Power Macintosh by Paul Mackerras
 *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
S
Stephen Rothwell 已提交
10
 *
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19 20
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 *
 * This file contains the code used by various IRQ handling routines:
 * asking for different IRQ's should be done through these routines
 * instead of just grabbing them. Thus setups with different IRQ numbers
 * shouldn't result in any weird surprises, and installing new handlers
 * should be easier.
S
Stephen Rothwell 已提交
21 22 23 24 25 26 27 28
 *
 * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
 * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
 * mask register (of which only 16 are defined), hence the weird shifting
 * and complement of the cached_irq_mask.  I want to be able to stuff
 * this right into the SIU SMASK register.
 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
 * to reduce code space and undefined function references.
L
Linus Torvalds 已提交
29 30
 */

31 32
#undef DEBUG

L
Linus Torvalds 已提交
33 34 35 36 37
#include <linux/module.h>
#include <linux/threads.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
S
Stephen Rothwell 已提交
38
#include <linux/ptrace.h>
L
Linus Torvalds 已提交
39 40 41 42 43 44 45
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/irq.h>
S
Stephen Rothwell 已提交
46 47
#include <linux/seq_file.h>
#include <linux/cpumask.h>
L
Linus Torvalds 已提交
48 49
#include <linux/profile.h>
#include <linux/bitops.h>
50 51 52 53
#include <linux/list.h>
#include <linux/radix-tree.h>
#include <linux/mutex.h>
#include <linux/bootmem.h>
J
Jake Moilanen 已提交
54
#include <linux/pci.h>
55
#include <linux/debugfs.h>
56 57
#include <linux/of.h>
#include <linux/of_irq.h>
L
Linus Torvalds 已提交
58 59 60 61 62 63 64 65 66 67

#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/cache.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
68
#include <asm/udbg.h>
69
#include <asm/dbell.h>
70
#include <asm/smp.h>
71

72
#ifdef CONFIG_PPC64
L
Linus Torvalds 已提交
73
#include <asm/paca.h>
74
#include <asm/firmware.h>
75
#include <asm/lv1call.h>
S
Stephen Rothwell 已提交
76
#endif
77 78
#define CREATE_TRACE_POINTS
#include <asm/trace.h>
L
Linus Torvalds 已提交
79

80 81 82
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);

83
int __irq_offset_value;
S
Stephen Rothwell 已提交
84 85

#ifdef CONFIG_PPC32
86 87
EXPORT_SYMBOL(__irq_offset_value);
atomic_t ppc_n_lost_interrupts;
S
Stephen Rothwell 已提交
88 89 90 91 92

#ifdef CONFIG_TAU_INT
extern int tau_initialized;
extern int tau_interrupts(int);
#endif
93
#endif /* CONFIG_PPC32 */
S
Stephen Rothwell 已提交
94 95

#ifdef CONFIG_PPC64
96 97

#ifndef CONFIG_SPARSE_IRQ
L
Linus Torvalds 已提交
98
EXPORT_SYMBOL(irq_desc);
99
#endif
L
Linus Torvalds 已提交
100 101

int distribute_irqs = 1;
102

S
Steven Rostedt 已提交
103
static inline notrace unsigned long get_hard_enabled(void)
104 105 106 107 108 109 110 111 112
{
	unsigned long enabled;

	__asm__ __volatile__("lbz %0,%1(13)"
	: "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));

	return enabled;
}

S
Steven Rostedt 已提交
113
static inline notrace void set_soft_enabled(unsigned long enable)
114 115 116 117 118
{
	__asm__ __volatile__("stb %0,%1(13)"
	: : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
}

D
David Howells 已提交
119
notrace void arch_local_irq_restore(unsigned long en)
120
{
121 122 123 124 125 126 127 128
	/*
	 * get_paca()->soft_enabled = en;
	 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
	 * That was allowed before, and in such a case we do need to take care
	 * that gcc will set soft_enabled directly via r13, not choose to use
	 * an intermediate register, lest we're preempted to a different cpu.
	 */
	set_soft_enabled(en);
129 130 131
	if (!en)
		return;

132
#ifdef CONFIG_PPC_STD_MMU_64
133
	if (firmware_has_feature(FW_FEATURE_ISERIES)) {
134 135 136 137 138 139 140 141 142 143 144 145 146 147
		/*
		 * Do we need to disable preemption here?  Not really: in the
		 * unlikely event that we're preempted to a different cpu in
		 * between getting r13, loading its lppaca_ptr, and loading
		 * its any_int, we might call iseries_handle_interrupts without
		 * an interrupt pending on the new cpu, but that's no disaster,
		 * is it?  And the business of preempting us off the old cpu
		 * would itself involve a local_irq_restore which handles the
		 * interrupt to that cpu.
		 *
		 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
		 * to avoid any preemption checking added into get_paca().
		 */
		if (local_paca->lppaca_ptr->int_dword.any_int)
148 149
			iseries_handle_interrupts();
	}
150
#endif /* CONFIG_PPC_STD_MMU_64 */
151

152 153 154 155 156 157 158
	/*
	 * if (get_paca()->hard_enabled) return;
	 * But again we need to take care that gcc gets hard_enabled directly
	 * via r13, not choose to use an intermediate register, lest we're
	 * preempted to a different cpu in between the two instructions.
	 */
	if (get_hard_enabled())
159
		return;
160

161
#if defined(CONFIG_BOOKE) && defined(CONFIG_SMP)
162 163
	/* Check for pending doorbell interrupts and resend to ourself */
	doorbell_check_self();
164 165
#endif

166 167 168 169 170 171
	/*
	 * Need to hard-enable interrupts here.  Since currently disabled,
	 * no need to take further asm precautions against preemption; but
	 * use local_paca instead of get_paca() to avoid preemption checking.
	 */
	local_paca->hard_enabled = en;
172 173 174 175 176 177 178 179

#ifndef CONFIG_BOOKE
	/* On server, re-trigger the decrementer if it went negative since
	 * some processors only trigger on edge transitions of the sign bit.
	 *
	 * BookE has a level sensitive decrementer (latches in TSR) so we
	 * don't need that
	 */
180 181
	if ((int)mfspr(SPRN_DEC) < 0)
		mtspr(SPRN_DEC, 1);
182
#endif /* CONFIG_BOOKE */
183 184 185 186 187 188 189 190 191 192

	/*
	 * Force the delivery of pending soft-disabled interrupts on PS3.
	 * Any HV call will have this side effect.
	 */
	if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
		u64 tmp;
		lv1_get_version_info(&tmp);
	}

193
	__hard_irq_enable();
194
}
D
David Howells 已提交
195
EXPORT_SYMBOL(arch_local_irq_restore);
S
Stephen Rothwell 已提交
196
#endif /* CONFIG_PPC64 */
L
Linus Torvalds 已提交
197

198
int arch_show_interrupts(struct seq_file *p, int prec)
199 200 201 202 203 204 205 206 207 208 209 210
{
	int j;

#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
	if (tau_initialized) {
		seq_printf(p, "%*s: ", prec, "TAU");
		for_each_online_cpu(j)
			seq_printf(p, "%10u ", tau_interrupts(j));
		seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
	}
#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */

211 212 213 214 215
	seq_printf(p, "%*s: ", prec, "LOC");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
        seq_printf(p, "  Local timer interrupts\n");

216 217 218 219 220
	seq_printf(p, "%*s: ", prec, "SPU");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
	seq_printf(p, "  Spurious interrupts\n");

221 222 223 224 225 226 227 228 229 230
	seq_printf(p, "%*s: ", prec, "CNT");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
	seq_printf(p, "  Performance monitoring interrupts\n");

	seq_printf(p, "%*s: ", prec, "MCE");
	for_each_online_cpu(j)
		seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
	seq_printf(p, "  Machine check exceptions\n");

231 232 233
	return 0;
}

234 235 236 237 238 239 240 241 242
/*
 * /proc/stat helpers
 */
u64 arch_irq_stat_cpu(unsigned int cpu)
{
	u64 sum = per_cpu(irq_stat, cpu).timer_irqs;

	sum += per_cpu(irq_stat, cpu).pmu_irqs;
	sum += per_cpu(irq_stat, cpu).mce_exceptions;
243
	sum += per_cpu(irq_stat, cpu).spurious_irqs;
244 245 246 247

	return sum;
}

L
Linus Torvalds 已提交
248
#ifdef CONFIG_HOTPLUG_CPU
249
void fixup_irqs(const struct cpumask *map)
L
Linus Torvalds 已提交
250
{
M
Michael Ellerman 已提交
251
	struct irq_desc *desc;
L
Linus Torvalds 已提交
252 253
	unsigned int irq;
	static int warned;
254
	cpumask_var_t mask;
L
Linus Torvalds 已提交
255

256
	alloc_cpumask_var(&mask, GFP_KERNEL);
L
Linus Torvalds 已提交
257

258
	for_each_irq(irq) {
259
		struct irq_data *data;
260 261
		struct irq_chip *chip;

M
Michael Ellerman 已提交
262
		desc = irq_to_desc(irq);
263 264 265
		if (!desc)
			continue;

266 267
		data = irq_desc_get_irq_data(desc);
		if (irqd_is_per_cpu(data))
L
Linus Torvalds 已提交
268 269
			continue;

270
		chip = irq_data_get_irq_chip(data);
271

272
		cpumask_and(mask, data->affinity, map);
273
		if (cpumask_any(mask) >= nr_cpu_ids) {
L
Linus Torvalds 已提交
274
			printk("Breaking affinity for irq %i\n", irq);
275
			cpumask_copy(mask, map);
L
Linus Torvalds 已提交
276
		}
277
		if (chip->irq_set_affinity)
278
			chip->irq_set_affinity(data, mask, true);
M
Michael Ellerman 已提交
279
		else if (desc->action && !(warned++))
L
Linus Torvalds 已提交
280 281 282
			printk("Cannot set affinity for irq %i\n", irq);
	}

283 284
	free_cpumask_var(mask);

L
Linus Torvalds 已提交
285 286 287 288 289 290
	local_irq_enable();
	mdelay(1);
	local_irq_disable();
}
#endif

291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
static inline void handle_one_irq(unsigned int irq)
{
	struct thread_info *curtp, *irqtp;
	unsigned long saved_sp_limit;
	struct irq_desc *desc;

	/* Switch to the irq stack to handle this */
	curtp = current_thread_info();
	irqtp = hardirq_ctx[smp_processor_id()];

	if (curtp == irqtp) {
		/* We're already on the irq stack, just handle it */
		generic_handle_irq(irq);
		return;
	}

M
Michael Ellerman 已提交
307
	desc = irq_to_desc(irq);
308 309 310 311 312 313 314 315 316 317 318 319 320
	saved_sp_limit = current->thread.ksp_limit;

	irqtp->task = curtp->task;
	irqtp->flags = 0;

	/* Copy the softirq bits in preempt_count so that the
	 * softirq checks work in the hardirq context. */
	irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
			       (curtp->preempt_count & SOFTIRQ_MASK);

	current->thread.ksp_limit = (unsigned long)irqtp +
		_ALIGN_UP(sizeof(struct thread_info), 16);

321
	call_handle_irq(irq, desc, irqtp, desc->handle_irq);
322 323 324 325 326 327 328 329 330 331
	current->thread.ksp_limit = saved_sp_limit;
	irqtp->task = NULL;

	/* Set any flag that may have been set on the
	 * alternate stack
	 */
	if (irqtp->flags)
		set_bits(irqtp->flags, &curtp->flags);
}

332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
static inline void check_stack_overflow(void)
{
#ifdef CONFIG_DEBUG_STACKOVERFLOW
	long sp;

	sp = __get_SP() & (THREAD_SIZE-1);

	/* check for stack overflow: is there less than 2KB free? */
	if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
		printk("do_IRQ: stack overflow: %ld\n",
			sp - sizeof(struct thread_info));
		dump_stack();
	}
#endif
}

L
Linus Torvalds 已提交
348 349
void do_IRQ(struct pt_regs *regs)
{
350
	struct pt_regs *old_regs = set_irq_regs(regs);
351
	unsigned int irq;
L
Linus Torvalds 已提交
352

353 354
	trace_irq_entry(regs);

355
	irq_enter();
L
Linus Torvalds 已提交
356

357
	check_stack_overflow();
L
Linus Torvalds 已提交
358

O
Olaf Hering 已提交
359
	irq = ppc_md.get_irq();
L
Linus Torvalds 已提交
360

361 362 363
	if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
		handle_one_irq(irq);
	else if (irq != NO_IRQ_IGNORE)
364
		__get_cpu_var(irq_stat).spurious_irqs++;
365

366
	irq_exit();
367
	set_irq_regs(old_regs);
S
Stephen Rothwell 已提交
368

369
#ifdef CONFIG_PPC_ISERIES
370 371
	if (firmware_has_feature(FW_FEATURE_ISERIES) &&
			get_lppaca()->int_dword.fields.decr_int) {
372 373 374
		get_lppaca()->int_dword.fields.decr_int = 0;
		/* Signal a fake decrementer interrupt */
		timer_interrupt(regs);
375 376
	}
#endif
377 378

	trace_irq_exit(regs);
379
}
L
Linus Torvalds 已提交
380 381 382

void __init init_IRQ(void)
{
383 384
	if (ppc_md.init_IRQ)
		ppc_md.init_IRQ();
385 386 387

	exc_lvl_ctx_init();

L
Linus Torvalds 已提交
388 389 390
	irq_ctx_init();
}

391 392 393 394 395 396 397 398
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
struct thread_info   *critirq_ctx[NR_CPUS] __read_mostly;
struct thread_info    *dbgirq_ctx[NR_CPUS] __read_mostly;
struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;

void exc_lvl_ctx_init(void)
{
	struct thread_info *tp;
399
	int i, hw_cpu;
400 401

	for_each_possible_cpu(i) {
402 403 404
		hw_cpu = get_hard_smp_processor_id(i);
		memset((void *)critirq_ctx[hw_cpu], 0, THREAD_SIZE);
		tp = critirq_ctx[hw_cpu];
405 406 407 408
		tp->cpu = i;
		tp->preempt_count = 0;

#ifdef CONFIG_BOOKE
409 410
		memset((void *)dbgirq_ctx[hw_cpu], 0, THREAD_SIZE);
		tp = dbgirq_ctx[hw_cpu];
411 412 413
		tp->cpu = i;
		tp->preempt_count = 0;

414 415
		memset((void *)mcheckirq_ctx[hw_cpu], 0, THREAD_SIZE);
		tp = mcheckirq_ctx[hw_cpu];
416 417 418 419 420 421
		tp->cpu = i;
		tp->preempt_count = HARDIRQ_OFFSET;
#endif
	}
}
#endif
L
Linus Torvalds 已提交
422

423 424
struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
L
Linus Torvalds 已提交
425 426 427 428 429 430

void irq_ctx_init(void)
{
	struct thread_info *tp;
	int i;

431
	for_each_possible_cpu(i) {
L
Linus Torvalds 已提交
432 433 434
		memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
		tp = softirq_ctx[i];
		tp->cpu = i;
435
		tp->preempt_count = 0;
L
Linus Torvalds 已提交
436 437 438 439 440 441 442 443

		memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
		tp = hardirq_ctx[i];
		tp->cpu = i;
		tp->preempt_count = HARDIRQ_OFFSET;
	}
}

444 445 446
static inline void do_softirq_onstack(void)
{
	struct thread_info *curtp, *irqtp;
447
	unsigned long saved_sp_limit = current->thread.ksp_limit;
448 449 450 451

	curtp = current_thread_info();
	irqtp = softirq_ctx[smp_processor_id()];
	irqtp->task = curtp->task;
452 453
	current->thread.ksp_limit = (unsigned long)irqtp +
				    _ALIGN_UP(sizeof(struct thread_info), 16);
454
	call_do_softirq(irqtp);
455
	current->thread.ksp_limit = saved_sp_limit;
456 457
	irqtp->task = NULL;
}
L
Linus Torvalds 已提交
458 459 460 461 462 463 464 465 466 467

void do_softirq(void)
{
	unsigned long flags;

	if (in_interrupt())
		return;

	local_irq_save(flags);

468
	if (local_softirq_pending())
469
		do_softirq_onstack();
L
Linus Torvalds 已提交
470 471 472 473 474 475

	local_irq_restore(flags);
}


/*
476
 * IRQ controller and virtual interrupts
L
Linus Torvalds 已提交
477 478
 */

479
static LIST_HEAD(irq_hosts);
480
static DEFINE_RAW_SPINLOCK(irq_big_lock);
481
static unsigned int revmap_trees_allocated;
482
static DEFINE_MUTEX(revmap_trees_mutex);
483 484 485
struct irq_map_entry irq_map[NR_IRQS];
static unsigned int irq_virq_count = NR_IRQS;
static struct irq_host *irq_default_host;
L
Linus Torvalds 已提交
486

487 488 489 490 491 492
irq_hw_number_t virq_to_hw(unsigned int virq)
{
	return irq_map[virq].hwirq;
}
EXPORT_SYMBOL_GPL(virq_to_hw);

493 494 495 496 497
static int default_irq_host_match(struct irq_host *h, struct device_node *np)
{
	return h->of_node != NULL && h->of_node == np;
}

498
struct irq_host *irq_alloc_host(struct device_node *of_node,
499 500 501 502
				unsigned int revmap_type,
				unsigned int revmap_arg,
				struct irq_host_ops *ops,
				irq_hw_number_t inval_irq)
L
Linus Torvalds 已提交
503
{
504 505 506 507 508 509 510 511 512
	struct irq_host *host;
	unsigned int size = sizeof(struct irq_host);
	unsigned int i;
	unsigned int *rmap;
	unsigned long flags;

	/* Allocate structure and revmap table if using linear mapping */
	if (revmap_type == IRQ_HOST_MAP_LINEAR)
		size += revmap_arg * sizeof(unsigned int);
513
	host = zalloc_maybe_bootmem(size, GFP_KERNEL);
514 515
	if (host == NULL)
		return NULL;
516

517 518 519 520
	/* Fill structure */
	host->revmap_type = revmap_type;
	host->inval_irq = inval_irq;
	host->ops = ops;
521
	host->of_node = of_node_get(of_node);
522

523 524
	if (host->ops->match == NULL)
		host->ops->match = default_irq_host_match;
525

526
	raw_spin_lock_irqsave(&irq_big_lock, flags);
527 528 529 530 531 532

	/* If it's a legacy controller, check for duplicates and
	 * mark it as allocated (we use irq 0 host pointer for that
	 */
	if (revmap_type == IRQ_HOST_MAP_LEGACY) {
		if (irq_map[0].host != NULL) {
533
			raw_spin_unlock_irqrestore(&irq_big_lock, flags);
534 535 536 537 538
			/* If we are early boot, we can't free the structure,
			 * too bad...
			 * this will be fixed once slab is made available early
			 * instead of the current cruft
			 */
539 540
			if (mem_init_done) {
				of_node_put(host->of_node);
541
				kfree(host);
542
			}
543 544 545 546 547 548
			return NULL;
		}
		irq_map[0].host = host;
	}

	list_add(&host->link, &irq_hosts);
549
	raw_spin_unlock_irqrestore(&irq_big_lock, flags);
550 551 552 553 554 555 556 557

	/* Additional setups per revmap type */
	switch(revmap_type) {
	case IRQ_HOST_MAP_LEGACY:
		/* 0 is always the invalid number for legacy */
		host->inval_irq = 0;
		/* setup us as the host for all legacy interrupts */
		for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
558
			irq_map[i].hwirq = i;
559 560 561 562
			smp_wmb();
			irq_map[i].host = host;
			smp_wmb();

563
			/* Clear norequest flags */
564
			irq_clear_status_flags(i, IRQ_NOREQUEST);
565 566 567

			/* Legacy flags are left to default at this point,
			 * one can then use irq_create_mapping() to
J
Jean Delvare 已提交
568
			 * explicitly change them
569
			 */
570
			ops->map(host, i, i);
571 572 573 574 575
		}
		break;
	case IRQ_HOST_MAP_LINEAR:
		rmap = (unsigned int *)(host + 1);
		for (i = 0; i < revmap_arg; i++)
576
			rmap[i] = NO_IRQ;
577 578 579 580 581 582 583 584 585 586 587
		host->revmap_data.linear.size = revmap_arg;
		smp_wmb();
		host->revmap_data.linear.revmap = rmap;
		break;
	default:
		break;
	}

	pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);

	return host;
L
Linus Torvalds 已提交
588 589
}

590
struct irq_host *irq_find_host(struct device_node *node)
L
Linus Torvalds 已提交
591
{
592 593 594 595 596 597 598 599
	struct irq_host *h, *found = NULL;
	unsigned long flags;

	/* We might want to match the legacy controller last since
	 * it might potentially be set to match all interrupts in
	 * the absence of a device node. This isn't a problem so far
	 * yet though...
	 */
600
	raw_spin_lock_irqsave(&irq_big_lock, flags);
601
	list_for_each_entry(h, &irq_hosts, link)
602
		if (h->ops->match(h, node)) {
603 604 605
			found = h;
			break;
		}
606
	raw_spin_unlock_irqrestore(&irq_big_lock, flags);
607 608 609 610 611 612 613
	return found;
}
EXPORT_SYMBOL_GPL(irq_find_host);

void irq_set_default_host(struct irq_host *host)
{
	pr_debug("irq: Default host set to @0x%p\n", host);
L
Linus Torvalds 已提交
614

615 616
	irq_default_host = host;
}
L
Linus Torvalds 已提交
617

618 619 620
void irq_set_virq_count(unsigned int count)
{
	pr_debug("irq: Trying to set virq count to %d\n", count);
621

622 623 624 625 626
	BUG_ON(count < NUM_ISA_INTERRUPTS);
	if (count < NR_IRQS)
		irq_virq_count = count;
}

627 628 629
static int irq_setup_virq(struct irq_host *host, unsigned int virq,
			    irq_hw_number_t hwirq)
{
T
Thomas Gleixner 已提交
630
	int res;
631

T
Thomas Gleixner 已提交
632 633
	res = irq_alloc_desc_at(virq, 0);
	if (res != virq) {
634 635 636 637
		pr_debug("irq: -> allocating desc failed\n");
		goto error;
	}

T
Thomas Gleixner 已提交
638
	irq_clear_status_flags(virq, IRQ_NOREQUEST);
639 640 641 642 643 644 645 646

	/* map it */
	smp_wmb();
	irq_map[virq].hwirq = hwirq;
	smp_mb();

	if (host->ops->map(host, virq, hwirq)) {
		pr_debug("irq: -> mapping failed, freeing\n");
T
Thomas Gleixner 已提交
647
		goto errdesc;
648 649 650
	}

	return 0;
651

T
Thomas Gleixner 已提交
652 653
errdesc:
	irq_free_descs(virq, 1);
654 655 656
error:
	irq_free_virt(virq, 1);
	return -1;
657
}
658

659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
unsigned int irq_create_direct_mapping(struct irq_host *host)
{
	unsigned int virq;

	if (host == NULL)
		host = irq_default_host;

	BUG_ON(host == NULL);
	WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);

	virq = irq_alloc_virt(host, 1, 0);
	if (virq == NO_IRQ) {
		pr_debug("irq: create_direct virq allocation failed\n");
		return NO_IRQ;
	}

	pr_debug("irq: create_direct obtained virq %d\n", virq);

	if (irq_setup_virq(host, virq, virq))
		return NO_IRQ;

	return virq;
}

683
unsigned int irq_create_mapping(struct irq_host *host,
684
				irq_hw_number_t hwirq)
685 686 687
{
	unsigned int virq, hint;

688
	pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
689 690 691 692 693 694 695 696 697

	/* Look for default host if nececssary */
	if (host == NULL)
		host = irq_default_host;
	if (host == NULL) {
		printk(KERN_WARNING "irq_create_mapping called for"
		       " NULL host, hwirq=%lx\n", hwirq);
		WARN_ON(1);
		return NO_IRQ;
L
Linus Torvalds 已提交
698
	}
699
	pr_debug("irq: -> using host @%p\n", host);
L
Linus Torvalds 已提交
700

701 702 703 704
	/* Check if mapping already exist, if it does, call
	 * host->ops->map() to update the flags
	 */
	virq = irq_find_mapping(host, hwirq);
705
	if (virq != NO_IRQ) {
I
Ishizaki Kou 已提交
706 707
		if (host->ops->remap)
			host->ops->remap(host, virq, hwirq);
708 709
		pr_debug("irq: -> existing mapping on virq %d\n", virq);
		return virq;
L
Linus Torvalds 已提交
710 711
	}

712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
	/* Get a virtual interrupt number */
	if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
		/* Handle legacy */
		virq = (unsigned int)hwirq;
		if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
			return NO_IRQ;
		return virq;
	} else {
		/* Allocate a virtual interrupt number */
		hint = hwirq % irq_virq_count;
		virq = irq_alloc_virt(host, 1, hint);
		if (virq == NO_IRQ) {
			pr_debug("irq: -> virq allocation failed\n");
			return NO_IRQ;
		}
	}

729
	if (irq_setup_virq(host, virq, hwirq))
730
		return NO_IRQ;
731

732 733 734
	printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n",
		hwirq, host->of_node ? host->of_node->full_name : "null", virq);

L
Linus Torvalds 已提交
735
	return virq;
736 737 738
}
EXPORT_SYMBOL_GPL(irq_create_mapping);

739
unsigned int irq_create_of_mapping(struct device_node *controller,
740
				   const u32 *intspec, unsigned int intsize)
741 742 743
{
	struct irq_host *host;
	irq_hw_number_t hwirq;
744 745
	unsigned int type = IRQ_TYPE_NONE;
	unsigned int virq;
L
Linus Torvalds 已提交
746

747 748 749 750
	if (controller == NULL)
		host = irq_default_host;
	else
		host = irq_find_host(controller);
751 752 753
	if (host == NULL) {
		printk(KERN_WARNING "irq: no irq host found for %s !\n",
		       controller->full_name);
754
		return NO_IRQ;
755
	}
756 757 758 759 760 761

	/* If host has no translation, then we assume interrupt line */
	if (host->ops->xlate == NULL)
		hwirq = intspec[0];
	else {
		if (host->ops->xlate(host, controller, intspec, intsize,
762
				     &hwirq, &type))
763
			return NO_IRQ;
L
Linus Torvalds 已提交
764
	}
765

766 767 768 769 770 771 772
	/* Create mapping */
	virq = irq_create_mapping(host, hwirq);
	if (virq == NO_IRQ)
		return virq;

	/* Set type if specified and different than the current one */
	if (type != IRQ_TYPE_NONE &&
773
	    type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
774
		irq_set_irq_type(virq, type);
775
	return virq;
L
Linus Torvalds 已提交
776
}
777
EXPORT_SYMBOL_GPL(irq_create_of_mapping);
L
Linus Torvalds 已提交
778

779 780
void irq_dispose_mapping(unsigned int virq)
{
781
	struct irq_host *host;
782
	irq_hw_number_t hwirq;
L
Linus Torvalds 已提交
783

784 785 786 787
	if (virq == NO_IRQ)
		return;

	host = irq_map[virq].host;
788 789 790
	WARN_ON (host == NULL);
	if (host == NULL)
		return;
L
Linus Torvalds 已提交
791

792 793 794
	/* Never unmap legacy interrupts */
	if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
		return;
L
Linus Torvalds 已提交
795

796
	/* remove chip and handler */
797
	irq_set_chip_and_handler(virq, NULL, NULL);
798 799 800 801 802 803 804 805 806 807 808 809 810 811

	/* Make sure it's completed */
	synchronize_irq(virq);

	/* Tell the PIC about it */
	if (host->ops->unmap)
		host->ops->unmap(host, virq);
	smp_mb();

	/* Clear reverse map */
	hwirq = irq_map[virq].hwirq;
	switch(host->revmap_type) {
	case IRQ_HOST_MAP_LINEAR:
		if (hwirq < host->revmap_data.linear.size)
812
			host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
813 814
		break;
	case IRQ_HOST_MAP_TREE:
815 816 817 818 819 820
		/*
		 * Check if radix tree allocated yet, if not then nothing to
		 * remove.
		 */
		smp_rmb();
		if (revmap_trees_allocated < 1)
821
			break;
822
		mutex_lock(&revmap_trees_mutex);
823
		radix_tree_delete(&host->revmap_data.tree, hwirq);
824
		mutex_unlock(&revmap_trees_mutex);
825 826
		break;
	}
L
Linus Torvalds 已提交
827

828 829 830
	/* Destroy map */
	smp_mb();
	irq_map[virq].hwirq = host->inval_irq;
L
Linus Torvalds 已提交
831

T
Thomas Gleixner 已提交
832
	irq_set_status_flags(virq, IRQ_NOREQUEST);
L
Linus Torvalds 已提交
833

T
Thomas Gleixner 已提交
834
	irq_free_descs(virq, 1);
835 836
	/* Free it */
	irq_free_virt(virq, 1);
L
Linus Torvalds 已提交
837
}
838
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
L
Linus Torvalds 已提交
839

840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870
unsigned int irq_find_mapping(struct irq_host *host,
			      irq_hw_number_t hwirq)
{
	unsigned int i;
	unsigned int hint = hwirq % irq_virq_count;

	/* Look for default host if nececssary */
	if (host == NULL)
		host = irq_default_host;
	if (host == NULL)
		return NO_IRQ;

	/* legacy -> bail early */
	if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
		return hwirq;

	/* Slow path does a linear search of the map */
	if (hint < NUM_ISA_INTERRUPTS)
		hint = NUM_ISA_INTERRUPTS;
	i = hint;
	do  {
		if (irq_map[i].host == host &&
		    irq_map[i].hwirq == hwirq)
			return i;
		i++;
		if (i >= irq_virq_count)
			i = NUM_ISA_INTERRUPTS;
	} while(i != hint);
	return NO_IRQ;
}
EXPORT_SYMBOL_GPL(irq_find_mapping);
L
Linus Torvalds 已提交
871

872

873 874
unsigned int irq_radix_revmap_lookup(struct irq_host *host,
				     irq_hw_number_t hwirq)
L
Linus Torvalds 已提交
875
{
876 877
	struct irq_map_entry *ptr;
	unsigned int virq;
L
Linus Torvalds 已提交
878

879
	WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
L
Linus Torvalds 已提交
880

881 882 883
	/*
	 * Check if the radix tree exists and has bee initialized.
	 * If not, we fallback to slow mode
884
	 */
885
	if (revmap_trees_allocated < 2)
886 887 888
		return irq_find_mapping(host, hwirq);

	/* Now try to resolve */
889 890 891 892
	/*
	 * No rcu_read_lock(ing) needed, the ptr returned can't go under us
	 * as it's referencing an entry in the static irq_map table.
	 */
893
	ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
894

895 896 897 898 899 900
	/*
	 * If found in radix tree, then fine.
	 * Else fallback to linear lookup - this should not happen in practice
	 * as it means that we failed to insert the node in the radix tree.
	 */
	if (ptr)
901
		virq = ptr - irq_map;
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921
	else
		virq = irq_find_mapping(host, hwirq);

	return virq;
}

void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
			     irq_hw_number_t hwirq)
{

	WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);

	/*
	 * Check if the radix tree exists yet.
	 * If not, then the irq will be inserted into the tree when it gets
	 * initialized.
	 */
	smp_rmb();
	if (revmap_trees_allocated < 1)
		return;
922

923
	if (virq != NO_IRQ) {
924
		mutex_lock(&revmap_trees_mutex);
925 926
		radix_tree_insert(&host->revmap_data.tree, hwirq,
				  &irq_map[virq]);
927
		mutex_unlock(&revmap_trees_mutex);
928
	}
L
Linus Torvalds 已提交
929 930
}

931 932
unsigned int irq_linear_revmap(struct irq_host *host,
			       irq_hw_number_t hwirq)
933
{
934
	unsigned int *revmap;
935

936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
	WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);

	/* Check revmap bounds */
	if (unlikely(hwirq >= host->revmap_data.linear.size))
		return irq_find_mapping(host, hwirq);

	/* Check if revmap was allocated */
	revmap = host->revmap_data.linear.revmap;
	if (unlikely(revmap == NULL))
		return irq_find_mapping(host, hwirq);

	/* Fill up revmap with slow path if no mapping found */
	if (unlikely(revmap[hwirq] == NO_IRQ))
		revmap[hwirq] = irq_find_mapping(host, hwirq);

	return revmap[hwirq];
952 953
}

954 955 956 957 958 959
unsigned int irq_alloc_virt(struct irq_host *host,
			    unsigned int count,
			    unsigned int hint)
{
	unsigned long flags;
	unsigned int i, j, found = NO_IRQ;
960

961 962 963
	if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
		return NO_IRQ;

964
	raw_spin_lock_irqsave(&irq_big_lock, flags);
965 966 967 968 969 970 971 972 973 974 975

	/* Use hint for 1 interrupt if any */
	if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
	    hint < irq_virq_count && irq_map[hint].host == NULL) {
		found = hint;
		goto hint_found;
	}

	/* Look for count consecutive numbers in the allocatable
	 * (non-legacy) space
	 */
976 977 978 979 980 981 982 983 984 985
	for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
		if (irq_map[i].host != NULL)
			j = 0;
		else
			j++;

		if (j == count) {
			found = i - count + 1;
			break;
		}
986 987
	}
	if (found == NO_IRQ) {
988
		raw_spin_unlock_irqrestore(&irq_big_lock, flags);
989 990 991 992 993 994 995 996
		return NO_IRQ;
	}
 hint_found:
	for (i = found; i < (found + count); i++) {
		irq_map[i].hwirq = host->inval_irq;
		smp_wmb();
		irq_map[i].host = host;
	}
997
	raw_spin_unlock_irqrestore(&irq_big_lock, flags);
998 999 1000 1001
	return found;
}

void irq_free_virt(unsigned int virq, unsigned int count)
L
Linus Torvalds 已提交
1002 1003
{
	unsigned long flags;
1004
	unsigned int i;
L
Linus Torvalds 已提交
1005

1006 1007
	WARN_ON (virq < NUM_ISA_INTERRUPTS);
	WARN_ON (count == 0 || (virq + count) > irq_virq_count);
L
Linus Torvalds 已提交
1008

1009
	raw_spin_lock_irqsave(&irq_big_lock, flags);
1010 1011
	for (i = virq; i < (virq + count); i++) {
		struct irq_host *host;
L
Linus Torvalds 已提交
1012

1013 1014 1015
		if (i < NUM_ISA_INTERRUPTS ||
		    (virq + count) > irq_virq_count)
			continue;
L
Linus Torvalds 已提交
1016

1017 1018 1019 1020 1021
		host = irq_map[i].host;
		irq_map[i].hwirq = host->inval_irq;
		smp_wmb();
		irq_map[i].host = NULL;
	}
1022
	raw_spin_unlock_irqrestore(&irq_big_lock, flags);
L
Linus Torvalds 已提交
1023
}
1024

1025
int arch_early_irq_init(void)
1026
{
1027
	return 0;
1028 1029 1030 1031 1032 1033
}

/* We need to create the radix trees late */
static int irq_late_init(void)
{
	struct irq_host *h;
1034
	unsigned int i;
1035

1036 1037 1038 1039 1040
	/*
	 * No mutual exclusion with respect to accessors of the tree is needed
	 * here as the synchronization is done via the state variable
	 * revmap_trees_allocated.
	 */
1041 1042
	list_for_each_entry(h, &irq_hosts, link) {
		if (h->revmap_type == IRQ_HOST_MAP_TREE)
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
			INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
	}

	/*
	 * Make sure the radix trees inits are visible before setting
	 * the flag
	 */
	smp_wmb();
	revmap_trees_allocated = 1;

	/*
	 * Insert the reverse mapping for those interrupts already present
	 * in irq_map[].
	 */
1057
	mutex_lock(&revmap_trees_mutex);
1058 1059 1060 1061 1062
	for (i = 0; i < irq_virq_count; i++) {
		if (irq_map[i].host &&
		    (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
			radix_tree_insert(&irq_map[i].host->revmap_data.tree,
					  irq_map[i].hwirq, &irq_map[i]);
1063
	}
1064
	mutex_unlock(&revmap_trees_mutex);
1065

1066 1067 1068 1069 1070 1071 1072
	/*
	 * Make sure the radix trees insertions are visible before setting
	 * the flag
	 */
	smp_wmb();
	revmap_trees_allocated = 2;

1073 1074 1075 1076
	return 0;
}
arch_initcall(irq_late_init);

1077 1078 1079 1080
#ifdef CONFIG_VIRQ_DEBUG
static int virq_debug_show(struct seq_file *m, void *private)
{
	unsigned long flags;
1081
	struct irq_desc *desc;
1082
	const char *p;
1083
	static const char none[] = "none";
1084 1085 1086 1087 1088
	int i;

	seq_printf(m, "%-5s  %-7s  %-15s  %s\n", "virq", "hwirq",
		      "chip name", "host name");

1089
	for (i = 1; i < nr_irqs; i++) {
M
Michael Ellerman 已提交
1090
		desc = irq_to_desc(i);
1091 1092 1093
		if (!desc)
			continue;

1094
		raw_spin_lock_irqsave(&desc->lock, flags);
1095 1096

		if (desc->action && desc->action->handler) {
1097 1098
			struct irq_chip *chip;

1099 1100 1101
			seq_printf(m, "%5d  ", i);
			seq_printf(m, "0x%05lx  ", virq_to_hw(i));

1102
			chip = irq_desc_get_chip(desc);
1103 1104
			if (chip && chip->name)
				p = chip->name;
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
			else
				p = none;
			seq_printf(m, "%-15s  ", p);

			if (irq_map[i].host && irq_map[i].host->of_node)
				p = irq_map[i].host->of_node->full_name;
			else
				p = none;
			seq_printf(m, "%s\n", p);
		}

1116
		raw_spin_unlock_irqrestore(&desc->lock, flags);
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
	}

	return 0;
}

static int virq_debug_open(struct inode *inode, struct file *file)
{
	return single_open(file, virq_debug_show, inode->i_private);
}

static const struct file_operations virq_debug_fops = {
	.open = virq_debug_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

static int __init irq_debugfs_init(void)
{
	if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1137
				 NULL, &virq_debug_fops) == NULL)
1138 1139 1140 1141 1142 1143 1144
		return -ENOMEM;

	return 0;
}
__initcall(irq_debugfs_init);
#endif /* CONFIG_VIRQ_DEBUG */

1145
#ifdef CONFIG_PPC64
L
Linus Torvalds 已提交
1146 1147 1148 1149 1150 1151 1152
static int __init setup_noirqdistrib(char *str)
{
	distribute_irqs = 0;
	return 1;
}

__setup("noirqdistrib", setup_noirqdistrib);
S
Stephen Rothwell 已提交
1153
#endif /* CONFIG_PPC64 */