chip.c 20.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * linux/kernel/irq/chip.c
 *
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 *
 * This file contains the core interrupt handling code, for irq-chip
 * based architectures.
 *
 * Detailed information is available in Documentation/DocBook/genericirq
 */

#include <linux/irq.h>
14
#include <linux/msi.h>
15 16 17 18
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>

19 20
#include <trace/events/irq.h>

21 22 23
#include "internals.h"

/**
T
Thomas Gleixner 已提交
24
 *	irq_set_chip - set the irq chip for an irq
25 26 27
 *	@irq:	irq number
 *	@chip:	pointer to irq chip description structure
 */
T
Thomas Gleixner 已提交
28
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
29 30
{
	unsigned long flags;
31
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
32

33
	if (!desc)
34 35 36 37 38
		return -EINVAL;

	if (!chip)
		chip = &no_irq_chip;

39
	desc->irq_data.chip = chip;
40
	irq_put_desc_unlock(desc, flags);
41 42
	/*
	 * For !CONFIG_SPARSE_IRQ make the irq show up in
43
	 * allocated_irqs.
44
	 */
45
	irq_mark_irq(irq);
46 47
	return 0;
}
T
Thomas Gleixner 已提交
48
EXPORT_SYMBOL(irq_set_chip);
49 50

/**
T
Thomas Gleixner 已提交
51
 *	irq_set_type - set the irq trigger type for an irq
52
 *	@irq:	irq number
D
David Brownell 已提交
53
 *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
54
 */
T
Thomas Gleixner 已提交
55
int irq_set_irq_type(unsigned int irq, unsigned int type)
56 57
{
	unsigned long flags;
58
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
59
	int ret = 0;
60

61 62
	if (!desc)
		return -EINVAL;
63

64
	type &= IRQ_TYPE_SENSE_MASK;
65
	ret = __irq_set_trigger(desc, irq, type);
66
	irq_put_desc_busunlock(desc, flags);
67 68
	return ret;
}
T
Thomas Gleixner 已提交
69
EXPORT_SYMBOL(irq_set_irq_type);
70 71

/**
T
Thomas Gleixner 已提交
72
 *	irq_set_handler_data - set irq handler data for an irq
73 74 75 76 77
 *	@irq:	Interrupt number
 *	@data:	Pointer to interrupt specific data
 *
 *	Set the hardware irq controller data for an irq
 */
T
Thomas Gleixner 已提交
78
int irq_set_handler_data(unsigned int irq, void *data)
79 80
{
	unsigned long flags;
81
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
82

83
	if (!desc)
84
		return -EINVAL;
85
	desc->irq_data.handler_data = data;
86
	irq_put_desc_unlock(desc, flags);
87 88
	return 0;
}
T
Thomas Gleixner 已提交
89
EXPORT_SYMBOL(irq_set_handler_data);
90

91
/**
92 93 94 95
 *	irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
 *	@irq_base:	Interrupt number base
 *	@irq_offset:	Interrupt number offset
 *	@entry:		Pointer to MSI descriptor data
96
 *
97
 *	Set the MSI descriptor entry for an irq at offset
98
 */
99 100
int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
			 struct msi_desc *entry)
101 102
{
	unsigned long flags;
103
	struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
104

105
	if (!desc)
106
		return -EINVAL;
107
	desc->irq_data.msi_desc = entry;
108 109
	if (entry && !irq_offset)
		entry->irq = irq_base;
110
	irq_put_desc_unlock(desc, flags);
111 112 113
	return 0;
}

114 115 116 117 118 119 120 121 122 123 124 125
/**
 *	irq_set_msi_desc - set MSI descriptor data for an irq
 *	@irq:	Interrupt number
 *	@entry:	Pointer to MSI descriptor data
 *
 *	Set the MSI descriptor entry for an irq
 */
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
{
	return irq_set_msi_desc_off(irq, 0, entry);
}

126
/**
T
Thomas Gleixner 已提交
127
 *	irq_set_chip_data - set irq chip data for an irq
128 129 130 131 132
 *	@irq:	Interrupt number
 *	@data:	Pointer to chip specific data
 *
 *	Set the hardware irq chip data for an irq
 */
T
Thomas Gleixner 已提交
133
int irq_set_chip_data(unsigned int irq, void *data)
134 135
{
	unsigned long flags;
136
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
137

138
	if (!desc)
139
		return -EINVAL;
140
	desc->irq_data.chip_data = data;
141
	irq_put_desc_unlock(desc, flags);
142 143
	return 0;
}
T
Thomas Gleixner 已提交
144
EXPORT_SYMBOL(irq_set_chip_data);
145

146 147 148 149 150 151 152 153
struct irq_data *irq_get_irq_data(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	return desc ? &desc->irq_data : NULL;
}
EXPORT_SYMBOL_GPL(irq_get_irq_data);

154 155
static void irq_state_clr_disabled(struct irq_desc *desc)
{
156
	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
157 158 159 160
}

static void irq_state_set_disabled(struct irq_desc *desc)
{
161
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
162 163
}

164 165
static void irq_state_clr_masked(struct irq_desc *desc)
{
166
	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
167 168 169 170
}

static void irq_state_set_masked(struct irq_desc *desc)
{
171
	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
172 173
}

174
int irq_startup(struct irq_desc *desc, bool resend)
175
{
176 177
	int ret = 0;

178
	irq_state_clr_disabled(desc);
179 180
	desc->depth = 0;

181
	if (desc->irq_data.chip->irq_startup) {
182
		ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
183
		irq_state_clr_masked(desc);
184 185
	} else {
		irq_enable(desc);
186
	}
187 188 189
	if (resend)
		check_irq_resend(desc, desc->irq_data.irq);
	return ret;
190 191 192 193
}

void irq_shutdown(struct irq_desc *desc)
{
194
	irq_state_set_disabled(desc);
195
	desc->depth = 1;
T
Thomas Gleixner 已提交
196 197
	if (desc->irq_data.chip->irq_shutdown)
		desc->irq_data.chip->irq_shutdown(&desc->irq_data);
198
	else if (desc->irq_data.chip->irq_disable)
T
Thomas Gleixner 已提交
199 200 201
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
202
	irq_state_set_masked(desc);
203 204
}

205 206
void irq_enable(struct irq_desc *desc)
{
207
	irq_state_clr_disabled(desc);
T
Thomas Gleixner 已提交
208 209 210 211
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
212
	irq_state_clr_masked(desc);
213 214
}

215
/**
216
 * irq_disable - Mark interrupt disabled
217 218 219 220 221 222 223 224 225 226 227
 * @desc:	irq descriptor which should be disabled
 *
 * If the chip does not implement the irq_disable callback, we
 * use a lazy disable approach. That means we mark the interrupt
 * disabled, but leave the hardware unmasked. That's an
 * optimization because we avoid the hardware access for the
 * common case where no interrupt happens after we marked it
 * disabled. If an interrupt happens, then the interrupt flow
 * handler masks the line at the hardware level and marks it
 * pending.
 */
T
Thomas Gleixner 已提交
228
void irq_disable(struct irq_desc *desc)
229
{
230
	irq_state_set_disabled(desc);
T
Thomas Gleixner 已提交
231 232
	if (desc->irq_data.chip->irq_disable) {
		desc->irq_data.chip->irq_disable(&desc->irq_data);
233
		irq_state_set_masked(desc);
T
Thomas Gleixner 已提交
234
	}
235 236
}

237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
{
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
	cpumask_set_cpu(cpu, desc->percpu_enabled);
}

void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
{
	if (desc->irq_data.chip->irq_disable)
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
	cpumask_clear_cpu(cpu, desc->percpu_enabled);
}

255
static inline void mask_ack_irq(struct irq_desc *desc)
256
{
257 258
	if (desc->irq_data.chip->irq_mask_ack)
		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
259
	else {
260
		desc->irq_data.chip->irq_mask(&desc->irq_data);
261 262
		if (desc->irq_data.chip->irq_ack)
			desc->irq_data.chip->irq_ack(&desc->irq_data);
263
	}
264
	irq_state_set_masked(desc);
265 266
}

267
void mask_irq(struct irq_desc *desc)
268
{
269 270
	if (desc->irq_data.chip->irq_mask) {
		desc->irq_data.chip->irq_mask(&desc->irq_data);
271
		irq_state_set_masked(desc);
272 273 274
	}
}

275
void unmask_irq(struct irq_desc *desc)
276
{
277 278
	if (desc->irq_data.chip->irq_unmask) {
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
279
		irq_state_clr_masked(desc);
280
	}
281 282
}

283 284 285 286 287 288 289 290 291 292 293 294 295
void unmask_threaded_irq(struct irq_desc *desc)
{
	struct irq_chip *chip = desc->irq_data.chip;

	if (chip->flags & IRQCHIP_EOI_THREADED)
		chip->irq_eoi(&desc->irq_data);

	if (chip->irq_unmask) {
		chip->irq_unmask(&desc->irq_data);
		irq_state_clr_masked(desc);
	}
}

296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
/*
 *	handle_nested_irq - Handle a nested irq from a irq thread
 *	@irq:	the interrupt number
 *
 *	Handle interrupts which are nested into a threaded interrupt
 *	handler. The handler function is called inside the calling
 *	threads context.
 */
void handle_nested_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	irqreturn_t action_ret;

	might_sleep();

312
	raw_spin_lock_irq(&desc->lock);
313

314
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
315 316 317
	kstat_incr_irqs_this_cpu(irq, desc);

	action = desc->action;
318 319
	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
320
		goto out_unlock;
321
	}
322

323
	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
324
	raw_spin_unlock_irq(&desc->lock);
325 326 327 328 329

	action_ret = action->thread_fn(action->irq, action->dev_id);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

330
	raw_spin_lock_irq(&desc->lock);
331
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
332 333

out_unlock:
334
	raw_spin_unlock_irq(&desc->lock);
335 336 337
}
EXPORT_SYMBOL_GPL(handle_nested_irq);

338 339
static bool irq_check_poll(struct irq_desc *desc)
{
340
	if (!(desc->istate & IRQS_POLL_INPROGRESS))
341 342 343 344
		return false;
	return irq_wait_for_poll(desc);
}

345 346
static bool irq_may_run(struct irq_desc *desc)
{
347 348 349 350 351 352 353
	unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;

	/*
	 * If the interrupt is not in progress and is not an armed
	 * wakeup interrupt, proceed.
	 */
	if (!irqd_has_set(&desc->irq_data, mask))
354
		return true;
355 356 357 358 359 360 361 362 363 364 365 366

	/*
	 * If the interrupt is an armed wakeup source, mark it pending
	 * and suspended, disable it and notify the pm core about the
	 * event.
	 */
	if (irq_pm_check_wakeup(desc))
		return false;

	/*
	 * Handle a potential concurrent poll on a different core.
	 */
367 368 369
	return irq_check_poll(desc);
}

370 371 372 373 374 375 376 377 378 379 380 381
/**
 *	handle_simple_irq - Simple and software-decoded IRQs.
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Simple interrupts are either sent from a demultiplexing interrupt
 *	handler or come from hardware, where no interrupt hardware control
 *	is necessary.
 *
 *	Note: The caller is expected to handle the ack, clear, mask and
 *	unmask issues if necessary.
 */
382
void
383
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
384
{
385
	raw_spin_lock(&desc->lock);
386

387 388
	if (!irq_may_run(desc))
		goto out_unlock;
389

390
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
391
	kstat_incr_irqs_this_cpu(irq, desc);
392

393 394
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
395
		goto out_unlock;
396
	}
397

398
	handle_irq_event(desc);
399 400

out_unlock:
401
	raw_spin_unlock(&desc->lock);
402
}
403
EXPORT_SYMBOL_GPL(handle_simple_irq);
404

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
/*
 * Called unconditionally from handle_level_irq() and only for oneshot
 * interrupts from handle_fasteoi_irq()
 */
static void cond_unmask_irq(struct irq_desc *desc)
{
	/*
	 * We need to unmask in the following cases:
	 * - Standard level irq (IRQF_ONESHOT is not set)
	 * - Oneshot irq which did not wake the thread (caused by a
	 *   spurious interrupt or a primary handler handling it
	 *   completely).
	 */
	if (!irqd_irq_disabled(&desc->irq_data) &&
	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
		unmask_irq(desc);
}

423 424 425 426 427 428 429 430 431 432
/**
 *	handle_level_irq - Level type irq handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Level type interrupts are active as long as the hardware line has
 *	the active level. This may require to mask the interrupt and unmask
 *	it after the associated handler has acknowledged the device, so the
 *	interrupt line is back to inactive.
 */
433
void
434
handle_level_irq(unsigned int irq, struct irq_desc *desc)
435
{
436
	raw_spin_lock(&desc->lock);
437
	mask_ack_irq(desc);
438

439 440
	if (!irq_may_run(desc))
		goto out_unlock;
441

442
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
443
	kstat_incr_irqs_this_cpu(irq, desc);
444 445 446 447 448

	/*
	 * If its disabled or no action available
	 * keep it masked and get out of here
	 */
449 450
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
451
		goto out_unlock;
452
	}
453

454
	handle_irq_event(desc);
T
Thomas Gleixner 已提交
455

456 457
	cond_unmask_irq(desc);

458
out_unlock:
459
	raw_spin_unlock(&desc->lock);
460
}
461
EXPORT_SYMBOL_GPL(handle_level_irq);
462

463 464 465 466 467 468 469 470 471 472
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
static inline void preflow_handler(struct irq_desc *desc)
{
	if (desc->preflow_handler)
		desc->preflow_handler(&desc->irq_data);
}
#else
static inline void preflow_handler(struct irq_desc *desc) { }
#endif

473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
{
	if (!(desc->istate & IRQS_ONESHOT)) {
		chip->irq_eoi(&desc->irq_data);
		return;
	}
	/*
	 * We need to unmask in the following cases:
	 * - Oneshot irq which did not wake the thread (caused by a
	 *   spurious interrupt or a primary handler handling it
	 *   completely).
	 */
	if (!irqd_irq_disabled(&desc->irq_data) &&
	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
		chip->irq_eoi(&desc->irq_data);
		unmask_irq(desc);
	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
		chip->irq_eoi(&desc->irq_data);
	}
}

494
/**
495
 *	handle_fasteoi_irq - irq handler for transparent controllers
496 497 498
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
499
 *	Only a single callback will be issued to the chip: an ->eoi()
500 501 502 503
 *	call when the interrupt has been serviced. This enables support
 *	for modern forms of interrupt handlers, which handle the flow
 *	details in hardware, transparently.
 */
504
void
505
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
506
{
507 508
	struct irq_chip *chip = desc->irq_data.chip;

509
	raw_spin_lock(&desc->lock);
510

511 512
	if (!irq_may_run(desc))
		goto out;
513

514
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
515
	kstat_incr_irqs_this_cpu(irq, desc);
516 517 518

	/*
	 * If its disabled or no action available
519
	 * then mask it and get out of here:
520
	 */
521
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
522
		desc->istate |= IRQS_PENDING;
523
		mask_irq(desc);
524
		goto out;
525
	}
526 527 528 529

	if (desc->istate & IRQS_ONESHOT)
		mask_irq(desc);

530
	preflow_handler(desc);
531
	handle_irq_event(desc);
532

533
	cond_unmask_eoi_irq(desc, chip);
534

535
	raw_spin_unlock(&desc->lock);
536 537
	return;
out:
538 539 540
	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
		chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
541
}
V
Vincent Stehlé 已提交
542
EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
543 544 545 546 547 548 549

/**
 *	handle_edge_irq - edge type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
L
Lucas De Marchi 已提交
550
 *	signal. The occurrence is latched into the irq controller hardware
551 552
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
553
 *	is handled by the associated event handler. If this happens it
554 555 556 557 558 559
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
560
void
561
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
562
{
563
	raw_spin_lock(&desc->lock);
564

565
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
566

567 568 569 570
	if (!irq_may_run(desc)) {
		desc->istate |= IRQS_PENDING;
		mask_ack_irq(desc);
		goto out_unlock;
571
	}
572

573
	/*
574 575
	 * If its disabled or no action available then mask it and get
	 * out of here.
576
	 */
577 578 579 580
	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
		desc->istate |= IRQS_PENDING;
		mask_ack_irq(desc);
		goto out_unlock;
581
	}
582

T
Thomas Gleixner 已提交
583
	kstat_incr_irqs_this_cpu(irq, desc);
584 585

	/* Start handling the irq */
586
	desc->irq_data.chip->irq_ack(&desc->irq_data);
587 588

	do {
589
		if (unlikely(!desc->action)) {
590
			mask_irq(desc);
591 592 593 594 595 596 597 598
			goto out_unlock;
		}

		/*
		 * When another irq arrived while we were handling
		 * one, we could have masked the irq.
		 * Renable it, if it was not disabled in meantime.
		 */
599
		if (unlikely(desc->istate & IRQS_PENDING)) {
600 601
			if (!irqd_irq_disabled(&desc->irq_data) &&
			    irqd_irq_masked(&desc->irq_data))
602
				unmask_irq(desc);
603 604
		}

605
		handle_irq_event(desc);
606

607
	} while ((desc->istate & IRQS_PENDING) &&
608
		 !irqd_irq_disabled(&desc->irq_data));
609 610

out_unlock:
611
	raw_spin_unlock(&desc->lock);
612
}
613
EXPORT_SYMBOL(handle_edge_irq);
614

615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
/**
 *	handle_edge_eoi_irq - edge eoi type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 * Similar as the above handle_edge_irq, but using eoi and w/o the
 * mask/unmask logic.
 */
void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);

	raw_spin_lock(&desc->lock);

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
631

632 633 634
	if (!irq_may_run(desc)) {
		desc->istate |= IRQS_PENDING;
		goto out_eoi;
635
	}
636

637
	/*
638 639
	 * If its disabled or no action available then mask it and get
	 * out of here.
640
	 */
641 642 643
	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
		desc->istate |= IRQS_PENDING;
		goto out_eoi;
644
	}
645

646 647 648 649 650 651 652 653 654 655 656
	kstat_incr_irqs_this_cpu(irq, desc);

	do {
		if (unlikely(!desc->action))
			goto out_eoi;

		handle_irq_event(desc);

	} while ((desc->istate & IRQS_PENDING) &&
		 !irqd_irq_disabled(&desc->irq_data));

657
out_eoi:
658 659 660 661 662
	chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
}
#endif

663
/**
L
Liuweni 已提交
664
 *	handle_percpu_irq - Per CPU local irq handler
665 666 667 668 669
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Per CPU interrupts on SMP machines without locking requirements
 */
670
void
671
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
672
{
673
	struct irq_chip *chip = irq_desc_get_chip(desc);
674

T
Thomas Gleixner 已提交
675
	kstat_incr_irqs_this_cpu(irq, desc);
676

677 678
	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);
679

680
	handle_irq_event_percpu(desc, desc->action);
681

682 683
	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
684 685
}

686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
/**
 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 * @irq:	the interrupt number
 * @desc:	the interrupt description structure for this irq
 *
 * Per CPU interrupts on SMP machines without locking requirements. Same as
 * handle_percpu_irq() above but with the following extras:
 *
 * action->percpu_dev_id is a pointer to percpu variables which
 * contain the real device id for the cpu on which this handler is
 * called
 */
void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct irqaction *action = desc->action;
702
	void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
	irqreturn_t res;

	kstat_incr_irqs_this_cpu(irq, desc);

	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);

	trace_irq_handler_entry(irq, action);
	res = action->handler(irq, dev_id);
	trace_irq_handler_exit(irq, action, res);

	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
}

718
void
719
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
720
		  const char *name)
721 722
{
	unsigned long flags;
723
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
724

725
	if (!desc)
726 727
		return;

728
	if (!handle) {
729
		handle = handle_bad_irq;
730 731
	} else {
		if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
732
			goto out;
733
	}
734 735 736

	/* Uninstall? */
	if (handle == handle_bad_irq) {
737
		if (desc->irq_data.chip != &no_irq_chip)
738
			mask_ack_irq(desc);
739
		irq_state_set_disabled(desc);
740 741 742
		desc->depth = 1;
	}
	desc->handle_irq = handle;
743
	desc->name = name;
744 745

	if (handle != handle_bad_irq && is_chained) {
746 747
		irq_settings_set_noprobe(desc);
		irq_settings_set_norequest(desc);
748
		irq_settings_set_nothread(desc);
749
		irq_startup(desc, true);
750
	}
751 752
out:
	irq_put_desc_busunlock(desc, flags);
753
}
754
EXPORT_SYMBOL_GPL(__irq_set_handler);
755 756

void
757
irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
758
			      irq_flow_handler_t handle, const char *name)
759
{
760
	irq_set_chip(irq, chip);
761
	__irq_set_handler(irq, handle, 0, name);
762
}
763
EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
R
Ralf Baechle 已提交
764

T
Thomas Gleixner 已提交
765
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
R
Ralf Baechle 已提交
766 767
{
	unsigned long flags;
768
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
R
Ralf Baechle 已提交
769

T
Thomas Gleixner 已提交
770
	if (!desc)
R
Ralf Baechle 已提交
771
		return;
772 773
	irq_settings_clr_and_set(desc, clr, set);

774
	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
775
		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
776 777 778 779
	if (irq_settings_has_no_balance_set(desc))
		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
	if (irq_settings_is_per_cpu(desc))
		irqd_set(&desc->irq_data, IRQD_PER_CPU);
780 781
	if (irq_settings_can_move_pcntxt(desc))
		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
T
Thomas Gleixner 已提交
782 783
	if (irq_settings_is_level(desc))
		irqd_set(&desc->irq_data, IRQD_LEVEL);
784

785 786
	irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));

787
	irq_put_desc_unlock(desc, flags);
R
Ralf Baechle 已提交
788
}
789
EXPORT_SYMBOL_GPL(irq_modify_status);
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811

/**
 *	irq_cpu_online - Invoke all irq_cpu_online functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_online()
 *	for each.
 */
void irq_cpu_online(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
812 813
		if (chip && chip->irq_cpu_online &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
814
		     !irqd_irq_disabled(&desc->irq_data)))
815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
			chip->irq_cpu_online(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}

/**
 *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
 *	for each.
 */
void irq_cpu_offline(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
842 843
		if (chip && chip->irq_cpu_offline &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
844
		     !irqd_irq_disabled(&desc->irq_data)))
845 846 847 848 849
			chip->irq_cpu_offline(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}