manage.c 18.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * linux/kernel/irq/manage.c
 *
I
Ingo Molnar 已提交
4 5
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006 Thomas Gleixner
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13
 *
 * This file contains driver APIs to the irq subsystem.
 */

#include <linux/irq.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/interrupt.h>
14
#include <linux/slab.h>
L
Linus Torvalds 已提交
15 16 17 18

#include "internals.h"

#ifdef CONFIG_SMP
R
Rusty Russell 已提交
19
cpumask_var_t irq_default_affinity;
L
Linus Torvalds 已提交
20

R
Rusty Russell 已提交
21 22 23 24 25 26 27
static int init_irq_default_affinity(void)
{
	alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL);
	cpumask_setall(irq_default_affinity);
	return 0;
}
core_initcall(init_irq_default_affinity);
28

L
Linus Torvalds 已提交
29 30
/**
 *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
31
 *	@irq: interrupt number to wait for
L
Linus Torvalds 已提交
32 33 34 35 36 37 38 39 40
 *
 *	This function waits for any pending IRQ handlers for this interrupt
 *	to complete before returning. If you use this function while
 *	holding a resource the IRQ handler may need you will deadlock.
 *
 *	This function may be called - with care - from IRQ context.
 */
void synchronize_irq(unsigned int irq)
{
41
	struct irq_desc *desc = irq_to_desc(irq);
42
	unsigned int status;
L
Linus Torvalds 已提交
43

44
	if (!desc)
45 46
		return;

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
	do {
		unsigned long flags;

		/*
		 * Wait until we're out of the critical section.  This might
		 * give the wrong answer due to the lack of memory barriers.
		 */
		while (desc->status & IRQ_INPROGRESS)
			cpu_relax();

		/* Ok, that indicated we're done: double-check carefully. */
		spin_lock_irqsave(&desc->lock, flags);
		status = desc->status;
		spin_unlock_irqrestore(&desc->lock, flags);

		/* Oops, that failed? */
	} while (status & IRQ_INPROGRESS);
L
Linus Torvalds 已提交
64 65 66
}
EXPORT_SYMBOL(synchronize_irq);

67 68 69 70 71 72 73
/**
 *	irq_can_set_affinity - Check if the affinity of a given irq can be set
 *	@irq:		Interrupt to check
 *
 */
int irq_can_set_affinity(unsigned int irq)
{
74
	struct irq_desc *desc = irq_to_desc(irq);
75 76 77 78 79 80 81 82 83 84 85 86 87 88

	if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
	    !desc->chip->set_affinity)
		return 0;

	return 1;
}

/**
 *	irq_set_affinity - Set the irq affinity of a given irq
 *	@irq:		Interrupt to set affinity
 *	@cpumask:	cpumask
 *
 */
89
int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
90
{
91
	struct irq_desc *desc = irq_to_desc(irq);
92
	unsigned long flags;
93 94 95 96

	if (!desc->chip->set_affinity)
		return -EINVAL;

97 98
	spin_lock_irqsave(&desc->lock, flags);

99
#ifdef CONFIG_GENERIC_PENDING_IRQ
100
	if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
101
		cpumask_copy(&desc->affinity, cpumask);
102
		desc->chip->set_affinity(irq, cpumask);
103 104
	} else {
		desc->status |= IRQ_MOVE_PENDING;
105
		cpumask_copy(&desc->pending_mask, cpumask);
106
	}
107
#else
108
	cpumask_copy(&desc->affinity, cpumask);
109 110
	desc->chip->set_affinity(irq, cpumask);
#endif
111 112
	desc->status |= IRQ_AFFINITY_SET;
	spin_unlock_irqrestore(&desc->lock, flags);
113 114 115
	return 0;
}

116 117 118 119
#ifndef CONFIG_AUTO_IRQ_AFFINITY
/*
 * Generic version of the affinity autoselector.
 */
120
int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
121 122 123 124
{
	if (!irq_can_set_affinity(irq))
		return 0;

125 126 127 128
	/*
	 * Preserve an userspace affinity setup, but make sure that
	 * one of the targets is online.
	 */
129
	if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
130 131 132
		if (cpumask_any_and(&desc->affinity, cpu_online_mask)
		    < nr_cpu_ids)
			goto set_affinity;
133 134 135 136
		else
			desc->status &= ~IRQ_AFFINITY_SET;
	}

R
Rusty Russell 已提交
137
	cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
138 139
set_affinity:
	desc->chip->set_affinity(irq, &desc->affinity);
140 141 142

	return 0;
}
143 144 145 146 147
#else
static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d)
{
	return irq_select_affinity(irq);
}
148 149
#endif

150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
/*
 * Called when affinity is set via /proc/irq
 */
int irq_select_affinity_usr(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&desc->lock, flags);
	ret = do_irq_select_affinity(irq, desc);
	spin_unlock_irqrestore(&desc->lock, flags);

	return ret;
}

#else
I
Ingo Molnar 已提交
167
static inline int do_irq_select_affinity(int irq, struct irq_desc *desc)
168 169 170
{
	return 0;
}
L
Linus Torvalds 已提交
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
#endif

/**
 *	disable_irq_nosync - disable an irq without waiting
 *	@irq: Interrupt to disable
 *
 *	Disable the selected interrupt line.  Disables and Enables are
 *	nested.
 *	Unlike disable_irq(), this function does not ensure existing
 *	instances of the IRQ handler have completed before returning.
 *
 *	This function may be called from IRQ context.
 */
void disable_irq_nosync(unsigned int irq)
{
186
	struct irq_desc *desc = irq_to_desc(irq);
L
Linus Torvalds 已提交
187 188
	unsigned long flags;

189
	if (!desc)
190 191
		return;

L
Linus Torvalds 已提交
192 193 194
	spin_lock_irqsave(&desc->lock, flags);
	if (!desc->depth++) {
		desc->status |= IRQ_DISABLED;
195
		desc->chip->disable(irq);
L
Linus Torvalds 已提交
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
	}
	spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(disable_irq_nosync);

/**
 *	disable_irq - disable an irq and wait for completion
 *	@irq: Interrupt to disable
 *
 *	Disable the selected interrupt line.  Enables and Disables are
 *	nested.
 *	This function waits for any pending IRQ handlers for this interrupt
 *	to complete before returning. If you use this function while
 *	holding a resource the IRQ handler may need you will deadlock.
 *
 *	This function may be called - with care - from IRQ context.
 */
void disable_irq(unsigned int irq)
{
215
	struct irq_desc *desc = irq_to_desc(irq);
L
Linus Torvalds 已提交
216

217
	if (!desc)
218 219
		return;

L
Linus Torvalds 已提交
220 221 222 223 224 225
	disable_irq_nosync(irq);
	if (desc->action)
		synchronize_irq(irq);
}
EXPORT_SYMBOL(disable_irq);

226 227 228 229
static void __enable_irq(struct irq_desc *desc, unsigned int irq)
{
	switch (desc->depth) {
	case 0:
230
		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
231 232 233 234 235 236 237 238 239 240 241 242 243 244
		break;
	case 1: {
		unsigned int status = desc->status & ~IRQ_DISABLED;

		/* Prevent probing on this irq: */
		desc->status = status | IRQ_NOPROBE;
		check_irq_resend(desc, irq);
		/* fall-through */
	}
	default:
		desc->depth--;
	}
}

L
Linus Torvalds 已提交
245 246 247 248 249 250 251 252 253 254 255 256
/**
 *	enable_irq - enable handling of an irq
 *	@irq: Interrupt to enable
 *
 *	Undoes the effect of one call to disable_irq().  If this
 *	matches the last disable, processing of interrupts on this
 *	IRQ line is re-enabled.
 *
 *	This function may be called from IRQ context.
 */
void enable_irq(unsigned int irq)
{
257
	struct irq_desc *desc = irq_to_desc(irq);
L
Linus Torvalds 已提交
258 259
	unsigned long flags;

260
	if (!desc)
261 262
		return;

L
Linus Torvalds 已提交
263
	spin_lock_irqsave(&desc->lock, flags);
264
	__enable_irq(desc, irq);
L
Linus Torvalds 已提交
265 266 267 268
	spin_unlock_irqrestore(&desc->lock, flags);
}
EXPORT_SYMBOL(enable_irq);

D
David Brownell 已提交
269
static int set_irq_wake_real(unsigned int irq, unsigned int on)
270
{
271
	struct irq_desc *desc = irq_to_desc(irq);
272 273 274 275 276 277 278 279
	int ret = -ENXIO;

	if (desc->chip->set_wake)
		ret = desc->chip->set_wake(irq, on);

	return ret;
}

280 281 282 283 284
/**
 *	set_irq_wake - control irq power management wakeup
 *	@irq:	interrupt to control
 *	@on:	enable/disable power management wakeup
 *
285 286 287 288 289 290
 *	Enable/disable power management wakeup mode, which is
 *	disabled by default.  Enables and disables must match,
 *	just as they match for non-wakeup mode support.
 *
 *	Wakeup mode lets this IRQ wake the system from sleep
 *	states like "suspend to RAM".
291 292 293
 */
int set_irq_wake(unsigned int irq, unsigned int on)
{
294
	struct irq_desc *desc = irq_to_desc(irq);
295
	unsigned long flags;
296
	int ret = 0;
297

298 299 300
	/* wakeup-capable irqs can be shared between drivers that
	 * don't need to have the same sleep mode behaviors.
	 */
301
	spin_lock_irqsave(&desc->lock, flags);
302
	if (on) {
303 304 305 306 307 308 309
		if (desc->wake_depth++ == 0) {
			ret = set_irq_wake_real(irq, on);
			if (ret)
				desc->wake_depth = 0;
			else
				desc->status |= IRQ_WAKEUP;
		}
310 311
	} else {
		if (desc->wake_depth == 0) {
312
			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
313 314 315 316 317 318 319
		} else if (--desc->wake_depth == 0) {
			ret = set_irq_wake_real(irq, on);
			if (ret)
				desc->wake_depth = 1;
			else
				desc->status &= ~IRQ_WAKEUP;
		}
320
	}
321

322 323 324 325 326
	spin_unlock_irqrestore(&desc->lock, flags);
	return ret;
}
EXPORT_SYMBOL(set_irq_wake);

L
Linus Torvalds 已提交
327 328 329 330 331 332 333
/*
 * Internal function that tells the architecture code whether a
 * particular irq has been exclusively allocated or is available
 * for driver use.
 */
int can_request_irq(unsigned int irq, unsigned long irqflags)
{
334
	struct irq_desc *desc = irq_to_desc(irq);
L
Linus Torvalds 已提交
335 336
	struct irqaction *action;

337 338 339 340
	if (!desc)
		return 0;

	if (desc->status & IRQ_NOREQUEST)
L
Linus Torvalds 已提交
341 342
		return 0;

343
	action = desc->action;
L
Linus Torvalds 已提交
344
	if (action)
345
		if (irqflags & action->flags & IRQF_SHARED)
L
Linus Torvalds 已提交
346 347 348 349 350
			action = NULL;

	return !action;
}

T
Thomas Gleixner 已提交
351 352 353 354 355 356 357 358 359 360 361
void compat_irq_chip_set_default_handler(struct irq_desc *desc)
{
	/*
	 * If the architecture still has not overriden
	 * the flow handler then zap the default. This
	 * should catch incorrect flow-type setting.
	 */
	if (desc->handle_irq == &handle_bad_irq)
		desc->handle_irq = NULL;
}

D
David Brownell 已提交
362
int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
363 364 365
		unsigned long flags)
{
	int ret;
D
David Brownell 已提交
366
	struct irq_chip *chip = desc->chip;
367 368 369 370 371 372

	if (!chip || !chip->set_type) {
		/*
		 * IRQF_TRIGGER_* but the PIC does not support multiple
		 * flow-types?
		 */
373
		pr_debug("No set_type function for IRQ %d (%s)\n", irq,
374 375 376 377
				chip ? (chip->name ? : "unknown") : "unknown");
		return 0;
	}

378 379
	/* caller masked out all except trigger mode flags */
	ret = chip->set_type(irq, flags);
380 381

	if (ret)
382
		pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
383
				(int)flags, irq, chip->set_type);
D
David Brownell 已提交
384
	else {
385 386
		if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
			flags |= IRQ_LEVEL;
D
David Brownell 已提交
387
		/* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
388 389
		desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
		desc->status |= flags;
D
David Brownell 已提交
390
	}
391 392 393 394

	return ret;
}

L
Linus Torvalds 已提交
395 396 397 398
/*
 * Internal function to register an irqaction - typically used to
 * allocate special interrupts that are part of the architecture.
 */
399 400
static int
__setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
L
Linus Torvalds 已提交
401 402
{
	struct irqaction *old, **p;
403
	const char *old_name = NULL;
L
Linus Torvalds 已提交
404 405
	unsigned long flags;
	int shared = 0;
406
	int ret;
L
Linus Torvalds 已提交
407

408
	if (!desc)
409 410
		return -EINVAL;

411
	if (desc->chip == &no_irq_chip)
L
Linus Torvalds 已提交
412 413 414 415 416 417
		return -ENOSYS;
	/*
	 * Some drivers like serial.c use request_irq() heavily,
	 * so we have to be careful not to interfere with a
	 * running system.
	 */
418
	if (new->flags & IRQF_SAMPLE_RANDOM) {
L
Linus Torvalds 已提交
419 420 421 422 423 424 425 426 427 428 429 430 431 432
		/*
		 * This function might sleep, we want to call it first,
		 * outside of the atomic block.
		 * Yes, this might clear the entropy pool if the wrong
		 * driver is attempted to be loaded, without actually
		 * installing a new handler, but is this really a problem,
		 * only the sysadmin is able to do this.
		 */
		rand_initialize_irq(irq);
	}

	/*
	 * The following block of code has to be executed atomically
	 */
433
	spin_lock_irqsave(&desc->lock, flags);
L
Linus Torvalds 已提交
434
	p = &desc->action;
435 436
	old = *p;
	if (old) {
437 438 439
		/*
		 * Can't share interrupts unless both agree to and are
		 * the same type (level, edge, polarity). So both flag
440
		 * fields must have IRQF_SHARED set and the bits which
441 442
		 * set the trigger type must match.
		 */
443
		if (!((old->flags & new->flags) & IRQF_SHARED) ||
444 445
		    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) {
			old_name = old->name;
446
			goto mismatch;
447
		}
448

449
#if defined(CONFIG_IRQ_PER_CPU)
450
		/* All handlers must agree on per-cpuness */
451 452
		if ((old->flags & IRQF_PERCPU) !=
		    (new->flags & IRQF_PERCPU))
453 454
			goto mismatch;
#endif
L
Linus Torvalds 已提交
455 456 457 458 459 460 461 462 463 464

		/* add new interrupt at end of irq queue */
		do {
			p = &old->next;
			old = *p;
		} while (old);
		shared = 1;
	}

	if (!shared) {
T
Thomas Gleixner 已提交
465
		irq_chip_set_defaults(desc->chip);
466 467

		/* Setup the type (level, edge polarity) if configured: */
468
		if (new->flags & IRQF_TRIGGER_MASK) {
469 470
			ret = __irq_set_trigger(desc, irq,
					new->flags & IRQF_TRIGGER_MASK);
471 472 473 474 475

			if (ret) {
				spin_unlock_irqrestore(&desc->lock, flags);
				return ret;
			}
476 477
		} else
			compat_irq_chip_set_default_handler(desc);
478 479 480 481
#if defined(CONFIG_IRQ_PER_CPU)
		if (new->flags & IRQF_PERCPU)
			desc->status |= IRQ_PER_CPU;
#endif
T
Thomas Gleixner 已提交
482

483
		desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING |
484
				  IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
485 486 487 488

		if (!(desc->status & IRQ_NOAUTOEN)) {
			desc->depth = 0;
			desc->status &= ~IRQ_DISABLED;
489
			desc->chip->startup(irq);
490 491 492
		} else
			/* Undo nested disables: */
			desc->depth = 1;
493

494 495 496 497
		/* Exclude IRQ from balancing if requested */
		if (new->flags & IRQF_NOBALANCING)
			desc->status |= IRQ_NO_BALANCING;

498
		/* Set default affinity mask once everything is setup */
499
		do_irq_select_affinity(irq, desc);
D
David Brownell 已提交
500 501 502 503 504 505 506 507

	} else if ((new->flags & IRQF_TRIGGER_MASK)
			&& (new->flags & IRQF_TRIGGER_MASK)
				!= (desc->status & IRQ_TYPE_SENSE_MASK)) {
		/* hope the handler works with the actual trigger mode... */
		pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
				irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK),
				(int)(new->flags & IRQF_TRIGGER_MASK));
L
Linus Torvalds 已提交
508
	}
509 510 511

	*p = new;

512 513 514
	/* Reset broken irq detection when installing new handler */
	desc->irq_count = 0;
	desc->irqs_unhandled = 0;
515 516 517 518 519 520 521 522 523 524

	/*
	 * Check whether we disabled the irq via the spurious handler
	 * before. Reenable it and give it another chance.
	 */
	if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
		desc->status &= ~IRQ_SPURIOUS_DISABLED;
		__enable_irq(desc, irq);
	}

525
	spin_unlock_irqrestore(&desc->lock, flags);
L
Linus Torvalds 已提交
526 527

	new->irq = irq;
528
	register_irq_proc(irq, desc);
L
Linus Torvalds 已提交
529 530 531 532
	new->dir = NULL;
	register_handler_proc(irq, new);

	return 0;
533 534

mismatch:
535
#ifdef CONFIG_DEBUG_SHIRQ
536
	if (!(new->flags & IRQF_PROBE_SHARED)) {
537
		printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
538 539
		if (old_name)
			printk(KERN_ERR "current handler: %s\n", old_name);
540 541
		dump_stack();
	}
542
#endif
543
	spin_unlock_irqrestore(&desc->lock, flags);
544
	return -EBUSY;
L
Linus Torvalds 已提交
545 546
}

547 548 549 550 551 552 553 554 555 556 557 558 559 560
/**
 *	setup_irq - setup an interrupt
 *	@irq: Interrupt line to setup
 *	@act: irqaction for the interrupt
 *
 * Used to statically setup interrupts in the early boot process.
 */
int setup_irq(unsigned int irq, struct irqaction *act)
{
	struct irq_desc *desc = irq_to_desc(irq);

	return __setup_irq(irq, desc, act);
}

L
Linus Torvalds 已提交
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
/**
 *	free_irq - free an interrupt
 *	@irq: Interrupt line to free
 *	@dev_id: Device identity to free
 *
 *	Remove an interrupt handler. The handler is removed and if the
 *	interrupt line is no longer in use by any driver it is disabled.
 *	On a shared IRQ the caller must ensure the interrupt is disabled
 *	on the card it drives before calling this function. The function
 *	does not return until any executing interrupts for this IRQ
 *	have completed.
 *
 *	This function must not be called from interrupt context.
 */
void free_irq(unsigned int irq, void *dev_id)
{
577
	struct irq_desc *desc = irq_to_desc(irq);
L
Linus Torvalds 已提交
578 579 580
	struct irqaction **p;
	unsigned long flags;

581
	WARN_ON(in_interrupt());
582 583

	if (!desc)
L
Linus Torvalds 已提交
584 585
		return;

586
	spin_lock_irqsave(&desc->lock, flags);
L
Linus Torvalds 已提交
587 588
	p = &desc->action;
	for (;;) {
589
		struct irqaction *action = *p;
L
Linus Torvalds 已提交
590 591 592 593 594 595 596 597 598 599

		if (action) {
			struct irqaction **pp = p;

			p = &action->next;
			if (action->dev_id != dev_id)
				continue;

			/* Found it - now remove it from the list of entries */
			*pp = action->next;
600

601 602
			/* Currently used only by UML, might disappear one day.*/
#ifdef CONFIG_IRQ_RELEASE_METHOD
603 604
			if (desc->chip->release)
				desc->chip->release(irq, dev_id);
605
#endif
606

L
Linus Torvalds 已提交
607 608
			if (!desc->action) {
				desc->status |= IRQ_DISABLED;
609 610
				if (desc->chip->shutdown)
					desc->chip->shutdown(irq);
L
Linus Torvalds 已提交
611
				else
612
					desc->chip->disable(irq);
L
Linus Torvalds 已提交
613
			}
614
			spin_unlock_irqrestore(&desc->lock, flags);
L
Linus Torvalds 已提交
615 616 617 618
			unregister_handler_proc(irq, action);

			/* Make sure it's not being used on another CPU */
			synchronize_irq(irq);
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
#ifdef CONFIG_DEBUG_SHIRQ
			/*
			 * It's a shared IRQ -- the driver ought to be
			 * prepared for it to happen even now it's
			 * being freed, so let's make sure....  We do
			 * this after actually deregistering it, to
			 * make sure that a 'real' IRQ doesn't run in
			 * parallel with our fake
			 */
			if (action->flags & IRQF_SHARED) {
				local_irq_save(flags);
				action->handler(irq, dev_id);
				local_irq_restore(flags);
			}
#endif
L
Linus Torvalds 已提交
634 635 636
			kfree(action);
			return;
		}
637
		printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
638 639 640
#ifdef CONFIG_DEBUG_SHIRQ
		dump_stack();
#endif
641
		spin_unlock_irqrestore(&desc->lock, flags);
L
Linus Torvalds 已提交
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
		return;
	}
}
EXPORT_SYMBOL(free_irq);

/**
 *	request_irq - allocate an interrupt line
 *	@irq: Interrupt line to allocate
 *	@handler: Function to be called when the IRQ occurs
 *	@irqflags: Interrupt type flags
 *	@devname: An ascii name for the claiming device
 *	@dev_id: A cookie passed back to the handler function
 *
 *	This call allocates interrupt resources and enables the
 *	interrupt line and IRQ handling. From the point this
 *	call is made your handler function may be invoked. Since
 *	your handler function must clear any interrupt the board
 *	raises, you must take care both to initialise your hardware
 *	and to set up the interrupt handler in the right order.
 *
 *	Dev_id must be globally unique. Normally the address of the
 *	device data structure is used as the cookie. Since the handler
 *	receives this value it makes sense to use it.
 *
 *	If your interrupt is shared you must pass a non NULL dev_id
 *	as this is required when freeing the interrupt.
 *
 *	Flags:
 *
671 672 673
 *	IRQF_SHARED		Interrupt is shared
 *	IRQF_DISABLED	Disable local interrupts while processing
 *	IRQF_SAMPLE_RANDOM	The interrupt can be used for entropy
D
David Brownell 已提交
674
 *	IRQF_TRIGGER_*		Specify active edge(s) or level
L
Linus Torvalds 已提交
675 676
 *
 */
677
int request_irq(unsigned int irq, irq_handler_t handler,
678
		unsigned long irqflags, const char *devname, void *dev_id)
L
Linus Torvalds 已提交
679
{
680
	struct irqaction *action;
681
	struct irq_desc *desc;
682
	int retval;
L
Linus Torvalds 已提交
683

684 685 686 687 688 689 690 691 692 693 694 695
	/*
	 * handle_IRQ_event() always ignores IRQF_DISABLED except for
	 * the _first_ irqaction (sigh).  That can cause oopsing, but
	 * the behavior is classified as "will not fix" so we need to
	 * start nudging drivers away from using that idiom.
	 */
	if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
			== (IRQF_SHARED|IRQF_DISABLED))
		pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
				"guaranteed on shared IRQs\n",
				irq, devname);

I
Ingo Molnar 已提交
696 697 698 699
#ifdef CONFIG_LOCKDEP
	/*
	 * Lockdep wants atomic interrupt handlers:
	 */
700
	irqflags |= IRQF_DISABLED;
I
Ingo Molnar 已提交
701
#endif
L
Linus Torvalds 已提交
702 703 704 705 706 707
	/*
	 * Sanity-check: shared interrupts must pass in a real dev-ID,
	 * otherwise we'll have trouble later trying to figure out
	 * which interrupt is which (messes up the interrupt freeing
	 * logic etc).
	 */
708
	if ((irqflags & IRQF_SHARED) && !dev_id)
L
Linus Torvalds 已提交
709
		return -EINVAL;
710

711
	desc = irq_to_desc(irq);
712
	if (!desc)
L
Linus Torvalds 已提交
713
		return -EINVAL;
714

715
	if (desc->status & IRQ_NOREQUEST)
716
		return -EINVAL;
L
Linus Torvalds 已提交
717 718 719 720 721 722 723 724 725 726 727 728 729 730
	if (!handler)
		return -EINVAL;

	action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
	if (!action)
		return -ENOMEM;

	action->handler = handler;
	action->flags = irqflags;
	cpus_clear(action->mask);
	action->name = devname;
	action->next = NULL;
	action->dev_id = dev_id;

731
	retval = __setup_irq(irq, desc, action);
732 733 734
	if (retval)
		kfree(action);

D
David Woodhouse 已提交
735 736 737 738 739
#ifdef CONFIG_DEBUG_SHIRQ
	if (irqflags & IRQF_SHARED) {
		/*
		 * It's a shared IRQ -- the driver ought to be prepared for it
		 * to happen immediately, so let's make sure....
740 741
		 * We disable the irq to make sure that a 'real' IRQ doesn't
		 * run in parallel with our fake.
D
David Woodhouse 已提交
742
		 */
743
		unsigned long flags;
D
David Woodhouse 已提交
744

745
		disable_irq(irq);
746
		local_irq_save(flags);
747

748
		handler(irq, dev_id);
749

750
		local_irq_restore(flags);
751
		enable_irq(irq);
D
David Woodhouse 已提交
752 753
	}
#endif
L
Linus Torvalds 已提交
754 755 756
	return retval;
}
EXPORT_SYMBOL(request_irq);