chip.c 20.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * linux/kernel/irq/chip.c
 *
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 *
 * This file contains the core interrupt handling code, for irq-chip
 * based architectures.
 *
 * Detailed information is available in Documentation/DocBook/genericirq
 */

#include <linux/irq.h>
14
#include <linux/msi.h>
15 16 17 18
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>

19 20
#include <trace/events/irq.h>

21 22 23
#include "internals.h"

/**
T
Thomas Gleixner 已提交
24
 *	irq_set_chip - set the irq chip for an irq
25 26 27
 *	@irq:	irq number
 *	@chip:	pointer to irq chip description structure
 */
T
Thomas Gleixner 已提交
28
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
29 30
{
	unsigned long flags;
31
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
32

33
	if (!desc)
34 35 36 37 38
		return -EINVAL;

	if (!chip)
		chip = &no_irq_chip;

39
	desc->irq_data.chip = chip;
40
	irq_put_desc_unlock(desc, flags);
41 42 43 44 45 46
	/*
	 * For !CONFIG_SPARSE_IRQ make the irq show up in
	 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
	 * already marked, and this call is harmless.
	 */
	irq_reserve_irq(irq);
47 48
	return 0;
}
T
Thomas Gleixner 已提交
49
EXPORT_SYMBOL(irq_set_chip);
50 51

/**
T
Thomas Gleixner 已提交
52
 *	irq_set_type - set the irq trigger type for an irq
53
 *	@irq:	irq number
D
David Brownell 已提交
54
 *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
55
 */
T
Thomas Gleixner 已提交
56
int irq_set_irq_type(unsigned int irq, unsigned int type)
57 58
{
	unsigned long flags;
59
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
60
	int ret = 0;
61

62 63
	if (!desc)
		return -EINVAL;
64

65
	type &= IRQ_TYPE_SENSE_MASK;
66
	ret = __irq_set_trigger(desc, irq, type);
67
	irq_put_desc_busunlock(desc, flags);
68 69
	return ret;
}
T
Thomas Gleixner 已提交
70
EXPORT_SYMBOL(irq_set_irq_type);
71 72

/**
T
Thomas Gleixner 已提交
73
 *	irq_set_handler_data - set irq handler data for an irq
74 75 76 77 78
 *	@irq:	Interrupt number
 *	@data:	Pointer to interrupt specific data
 *
 *	Set the hardware irq controller data for an irq
 */
T
Thomas Gleixner 已提交
79
int irq_set_handler_data(unsigned int irq, void *data)
80 81
{
	unsigned long flags;
82
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
83

84
	if (!desc)
85
		return -EINVAL;
86
	desc->irq_data.handler_data = data;
87
	irq_put_desc_unlock(desc, flags);
88 89
	return 0;
}
T
Thomas Gleixner 已提交
90
EXPORT_SYMBOL(irq_set_handler_data);
91

92
/**
93 94 95 96
 *	irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
 *	@irq_base:	Interrupt number base
 *	@irq_offset:	Interrupt number offset
 *	@entry:		Pointer to MSI descriptor data
97
 *
98
 *	Set the MSI descriptor entry for an irq at offset
99
 */
100 101
int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
			 struct msi_desc *entry)
102 103
{
	unsigned long flags;
104
	struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
105

106
	if (!desc)
107
		return -EINVAL;
108
	desc->irq_data.msi_desc = entry;
109 110
	if (entry && !irq_offset)
		entry->irq = irq_base;
111
	irq_put_desc_unlock(desc, flags);
112 113 114
	return 0;
}

115 116 117 118 119 120 121 122 123 124 125 126
/**
 *	irq_set_msi_desc - set MSI descriptor data for an irq
 *	@irq:	Interrupt number
 *	@entry:	Pointer to MSI descriptor data
 *
 *	Set the MSI descriptor entry for an irq
 */
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
{
	return irq_set_msi_desc_off(irq, 0, entry);
}

127
/**
T
Thomas Gleixner 已提交
128
 *	irq_set_chip_data - set irq chip data for an irq
129 130 131 132 133
 *	@irq:	Interrupt number
 *	@data:	Pointer to chip specific data
 *
 *	Set the hardware irq chip data for an irq
 */
T
Thomas Gleixner 已提交
134
int irq_set_chip_data(unsigned int irq, void *data)
135 136
{
	unsigned long flags;
137
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
138

139
	if (!desc)
140
		return -EINVAL;
141
	desc->irq_data.chip_data = data;
142
	irq_put_desc_unlock(desc, flags);
143 144
	return 0;
}
T
Thomas Gleixner 已提交
145
EXPORT_SYMBOL(irq_set_chip_data);
146

147 148 149 150 151 152 153 154
struct irq_data *irq_get_irq_data(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	return desc ? &desc->irq_data : NULL;
}
EXPORT_SYMBOL_GPL(irq_get_irq_data);

155 156
static void irq_state_clr_disabled(struct irq_desc *desc)
{
157
	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
158 159 160 161
}

static void irq_state_set_disabled(struct irq_desc *desc)
{
162
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
163 164
}

165 166
static void irq_state_clr_masked(struct irq_desc *desc)
{
167
	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
168 169 170 171
}

static void irq_state_set_masked(struct irq_desc *desc)
{
172
	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
173 174
}

175
int irq_startup(struct irq_desc *desc, bool resend)
176
{
177 178
	int ret = 0;

179
	irq_state_clr_disabled(desc);
180 181
	desc->depth = 0;

182
	if (desc->irq_data.chip->irq_startup) {
183
		ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
184
		irq_state_clr_masked(desc);
185 186
	} else {
		irq_enable(desc);
187
	}
188 189 190
	if (resend)
		check_irq_resend(desc, desc->irq_data.irq);
	return ret;
191 192 193 194
}

void irq_shutdown(struct irq_desc *desc)
{
195
	irq_state_set_disabled(desc);
196
	desc->depth = 1;
T
Thomas Gleixner 已提交
197 198
	if (desc->irq_data.chip->irq_shutdown)
		desc->irq_data.chip->irq_shutdown(&desc->irq_data);
199
	else if (desc->irq_data.chip->irq_disable)
T
Thomas Gleixner 已提交
200 201 202
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
203
	irq_state_set_masked(desc);
204 205
}

206 207
void irq_enable(struct irq_desc *desc)
{
208
	irq_state_clr_disabled(desc);
T
Thomas Gleixner 已提交
209 210 211 212
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
213
	irq_state_clr_masked(desc);
214 215
}

216
/**
217
 * irq_disable - Mark interrupt disabled
218 219 220 221 222 223 224 225 226 227 228
 * @desc:	irq descriptor which should be disabled
 *
 * If the chip does not implement the irq_disable callback, we
 * use a lazy disable approach. That means we mark the interrupt
 * disabled, but leave the hardware unmasked. That's an
 * optimization because we avoid the hardware access for the
 * common case where no interrupt happens after we marked it
 * disabled. If an interrupt happens, then the interrupt flow
 * handler masks the line at the hardware level and marks it
 * pending.
 */
T
Thomas Gleixner 已提交
229
void irq_disable(struct irq_desc *desc)
230
{
231
	irq_state_set_disabled(desc);
T
Thomas Gleixner 已提交
232 233
	if (desc->irq_data.chip->irq_disable) {
		desc->irq_data.chip->irq_disable(&desc->irq_data);
234
		irq_state_set_masked(desc);
T
Thomas Gleixner 已提交
235
	}
236 237
}

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
{
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
	cpumask_set_cpu(cpu, desc->percpu_enabled);
}

void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
{
	if (desc->irq_data.chip->irq_disable)
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
	cpumask_clear_cpu(cpu, desc->percpu_enabled);
}

256
static inline void mask_ack_irq(struct irq_desc *desc)
257
{
258 259
	if (desc->irq_data.chip->irq_mask_ack)
		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
260
	else {
261
		desc->irq_data.chip->irq_mask(&desc->irq_data);
262 263
		if (desc->irq_data.chip->irq_ack)
			desc->irq_data.chip->irq_ack(&desc->irq_data);
264
	}
265
	irq_state_set_masked(desc);
266 267
}

268
void mask_irq(struct irq_desc *desc)
269
{
270 271
	if (desc->irq_data.chip->irq_mask) {
		desc->irq_data.chip->irq_mask(&desc->irq_data);
272
		irq_state_set_masked(desc);
273 274 275
	}
}

276
void unmask_irq(struct irq_desc *desc)
277
{
278 279
	if (desc->irq_data.chip->irq_unmask) {
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
280
		irq_state_clr_masked(desc);
281
	}
282 283
}

284 285 286 287 288 289 290 291 292 293 294 295 296
void unmask_threaded_irq(struct irq_desc *desc)
{
	struct irq_chip *chip = desc->irq_data.chip;

	if (chip->flags & IRQCHIP_EOI_THREADED)
		chip->irq_eoi(&desc->irq_data);

	if (chip->irq_unmask) {
		chip->irq_unmask(&desc->irq_data);
		irq_state_clr_masked(desc);
	}
}

297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
/*
 *	handle_nested_irq - Handle a nested irq from a irq thread
 *	@irq:	the interrupt number
 *
 *	Handle interrupts which are nested into a threaded interrupt
 *	handler. The handler function is called inside the calling
 *	threads context.
 */
void handle_nested_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	irqreturn_t action_ret;

	might_sleep();

313
	raw_spin_lock_irq(&desc->lock);
314

315
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
316 317 318
	kstat_incr_irqs_this_cpu(irq, desc);

	action = desc->action;
319 320
	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
321
		goto out_unlock;
322
	}
323

324
	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
325
	raw_spin_unlock_irq(&desc->lock);
326 327 328 329 330

	action_ret = action->thread_fn(action->irq, action->dev_id);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

331
	raw_spin_lock_irq(&desc->lock);
332
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
333 334

out_unlock:
335
	raw_spin_unlock_irq(&desc->lock);
336 337 338
}
EXPORT_SYMBOL_GPL(handle_nested_irq);

339 340
static bool irq_check_poll(struct irq_desc *desc)
{
341
	if (!(desc->istate & IRQS_POLL_INPROGRESS))
342 343 344 345
		return false;
	return irq_wait_for_poll(desc);
}

346 347 348 349 350 351 352 353 354 355 356 357
/**
 *	handle_simple_irq - Simple and software-decoded IRQs.
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Simple interrupts are either sent from a demultiplexing interrupt
 *	handler or come from hardware, where no interrupt hardware control
 *	is necessary.
 *
 *	Note: The caller is expected to handle the ack, clear, mask and
 *	unmask issues if necessary.
 */
358
void
359
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
360
{
361
	raw_spin_lock(&desc->lock);
362

363
	if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
364 365 366
		if (!irq_check_poll(desc))
			goto out_unlock;

367
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
368
	kstat_incr_irqs_this_cpu(irq, desc);
369

370 371
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
372
		goto out_unlock;
373
	}
374

375
	handle_irq_event(desc);
376 377

out_unlock:
378
	raw_spin_unlock(&desc->lock);
379
}
380
EXPORT_SYMBOL_GPL(handle_simple_irq);
381

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
/*
 * Called unconditionally from handle_level_irq() and only for oneshot
 * interrupts from handle_fasteoi_irq()
 */
static void cond_unmask_irq(struct irq_desc *desc)
{
	/*
	 * We need to unmask in the following cases:
	 * - Standard level irq (IRQF_ONESHOT is not set)
	 * - Oneshot irq which did not wake the thread (caused by a
	 *   spurious interrupt or a primary handler handling it
	 *   completely).
	 */
	if (!irqd_irq_disabled(&desc->irq_data) &&
	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
		unmask_irq(desc);
}

400 401 402 403 404 405 406 407 408 409
/**
 *	handle_level_irq - Level type irq handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Level type interrupts are active as long as the hardware line has
 *	the active level. This may require to mask the interrupt and unmask
 *	it after the associated handler has acknowledged the device, so the
 *	interrupt line is back to inactive.
 */
410
void
411
handle_level_irq(unsigned int irq, struct irq_desc *desc)
412
{
413
	raw_spin_lock(&desc->lock);
414
	mask_ack_irq(desc);
415

416
	if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
417 418 419
		if (!irq_check_poll(desc))
			goto out_unlock;

420
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
421
	kstat_incr_irqs_this_cpu(irq, desc);
422 423 424 425 426

	/*
	 * If its disabled or no action available
	 * keep it masked and get out of here
	 */
427 428
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
429
		goto out_unlock;
430
	}
431

432
	handle_irq_event(desc);
T
Thomas Gleixner 已提交
433

434 435
	cond_unmask_irq(desc);

436
out_unlock:
437
	raw_spin_unlock(&desc->lock);
438
}
439
EXPORT_SYMBOL_GPL(handle_level_irq);
440

441 442 443 444 445 446 447 448 449 450
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
static inline void preflow_handler(struct irq_desc *desc)
{
	if (desc->preflow_handler)
		desc->preflow_handler(&desc->irq_data);
}
#else
static inline void preflow_handler(struct irq_desc *desc) { }
#endif

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
{
	if (!(desc->istate & IRQS_ONESHOT)) {
		chip->irq_eoi(&desc->irq_data);
		return;
	}
	/*
	 * We need to unmask in the following cases:
	 * - Oneshot irq which did not wake the thread (caused by a
	 *   spurious interrupt or a primary handler handling it
	 *   completely).
	 */
	if (!irqd_irq_disabled(&desc->irq_data) &&
	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
		chip->irq_eoi(&desc->irq_data);
		unmask_irq(desc);
	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
		chip->irq_eoi(&desc->irq_data);
	}
}

472
/**
473
 *	handle_fasteoi_irq - irq handler for transparent controllers
474 475 476
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
477
 *	Only a single callback will be issued to the chip: an ->eoi()
478 479 480 481
 *	call when the interrupt has been serviced. This enables support
 *	for modern forms of interrupt handlers, which handle the flow
 *	details in hardware, transparently.
 */
482
void
483
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
484
{
485 486
	struct irq_chip *chip = desc->irq_data.chip;

487
	raw_spin_lock(&desc->lock);
488

489
	if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
490 491
		if (!irq_check_poll(desc))
			goto out;
492

493
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
494
	kstat_incr_irqs_this_cpu(irq, desc);
495 496 497

	/*
	 * If its disabled or no action available
498
	 * then mask it and get out of here:
499
	 */
500
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
501
		desc->istate |= IRQS_PENDING;
502
		mask_irq(desc);
503
		goto out;
504
	}
505 506 507 508

	if (desc->istate & IRQS_ONESHOT)
		mask_irq(desc);

509
	preflow_handler(desc);
510
	handle_irq_event(desc);
511

512
	cond_unmask_eoi_irq(desc, chip);
513

514
	raw_spin_unlock(&desc->lock);
515 516
	return;
out:
517 518 519
	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
		chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
520 521 522 523 524 525 526 527
}

/**
 *	handle_edge_irq - edge type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
L
Lucas De Marchi 已提交
528
 *	signal. The occurrence is latched into the irq controller hardware
529 530
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
531
 *	is handled by the associated event handler. If this happens it
532 533 534 535 536 537
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
538
void
539
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
540
{
541
	raw_spin_lock(&desc->lock);
542

543
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
544 545 546 547 548
	/*
	 * If we're currently running this IRQ, or its disabled,
	 * we shouldn't process the IRQ. Mark it pending, handle
	 * the necessary masking and go out
	 */
549 550
	if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
		     irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
551
		if (!irq_check_poll(desc)) {
552
			desc->istate |= IRQS_PENDING;
553 554 555
			mask_ack_irq(desc);
			goto out_unlock;
		}
556
	}
T
Thomas Gleixner 已提交
557
	kstat_incr_irqs_this_cpu(irq, desc);
558 559

	/* Start handling the irq */
560
	desc->irq_data.chip->irq_ack(&desc->irq_data);
561 562

	do {
563
		if (unlikely(!desc->action)) {
564
			mask_irq(desc);
565 566 567 568 569 570 571 572
			goto out_unlock;
		}

		/*
		 * When another irq arrived while we were handling
		 * one, we could have masked the irq.
		 * Renable it, if it was not disabled in meantime.
		 */
573
		if (unlikely(desc->istate & IRQS_PENDING)) {
574 575
			if (!irqd_irq_disabled(&desc->irq_data) &&
			    irqd_irq_masked(&desc->irq_data))
576
				unmask_irq(desc);
577 578
		}

579
		handle_irq_event(desc);
580

581
	} while ((desc->istate & IRQS_PENDING) &&
582
		 !irqd_irq_disabled(&desc->irq_data));
583 584

out_unlock:
585
	raw_spin_unlock(&desc->lock);
586
}
587
EXPORT_SYMBOL(handle_edge_irq);
588

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
/**
 *	handle_edge_eoi_irq - edge eoi type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 * Similar as the above handle_edge_irq, but using eoi and w/o the
 * mask/unmask logic.
 */
void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);

	raw_spin_lock(&desc->lock);

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
	/*
	 * If we're currently running this IRQ, or its disabled,
	 * we shouldn't process the IRQ. Mark it pending, handle
	 * the necessary masking and go out
	 */
	if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
		     irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
		if (!irq_check_poll(desc)) {
			desc->istate |= IRQS_PENDING;
			goto out_eoi;
		}
	}
	kstat_incr_irqs_this_cpu(irq, desc);

	do {
		if (unlikely(!desc->action))
			goto out_eoi;

		handle_irq_event(desc);

	} while ((desc->istate & IRQS_PENDING) &&
		 !irqd_irq_disabled(&desc->irq_data));

628
out_eoi:
629 630 631 632 633
	chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
}
#endif

634
/**
L
Liuweni 已提交
635
 *	handle_percpu_irq - Per CPU local irq handler
636 637 638 639 640
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Per CPU interrupts on SMP machines without locking requirements
 */
641
void
642
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
643
{
644
	struct irq_chip *chip = irq_desc_get_chip(desc);
645

T
Thomas Gleixner 已提交
646
	kstat_incr_irqs_this_cpu(irq, desc);
647

648 649
	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);
650

651
	handle_irq_event_percpu(desc, desc->action);
652

653 654
	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
655 656
}

657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
/**
 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 * @irq:	the interrupt number
 * @desc:	the interrupt description structure for this irq
 *
 * Per CPU interrupts on SMP machines without locking requirements. Same as
 * handle_percpu_irq() above but with the following extras:
 *
 * action->percpu_dev_id is a pointer to percpu variables which
 * contain the real device id for the cpu on which this handler is
 * called
 */
void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct irqaction *action = desc->action;
	void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
	irqreturn_t res;

	kstat_incr_irqs_this_cpu(irq, desc);

	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);

	trace_irq_handler_entry(irq, action);
	res = action->handler(irq, dev_id);
	trace_irq_handler_exit(irq, action, res);

	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
}

689
void
690
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
691
		  const char *name)
692 693
{
	unsigned long flags;
694
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
695

696
	if (!desc)
697 698
		return;

699
	if (!handle) {
700
		handle = handle_bad_irq;
701 702
	} else {
		if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
703
			goto out;
704
	}
705 706 707

	/* Uninstall? */
	if (handle == handle_bad_irq) {
708
		if (desc->irq_data.chip != &no_irq_chip)
709
			mask_ack_irq(desc);
710
		irq_state_set_disabled(desc);
711 712 713
		desc->depth = 1;
	}
	desc->handle_irq = handle;
714
	desc->name = name;
715 716

	if (handle != handle_bad_irq && is_chained) {
717 718
		irq_settings_set_noprobe(desc);
		irq_settings_set_norequest(desc);
719
		irq_settings_set_nothread(desc);
720
		irq_startup(desc, true);
721
	}
722 723
out:
	irq_put_desc_busunlock(desc, flags);
724
}
725
EXPORT_SYMBOL_GPL(__irq_set_handler);
726 727

void
728
irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
729
			      irq_flow_handler_t handle, const char *name)
730
{
731
	irq_set_chip(irq, chip);
732
	__irq_set_handler(irq, handle, 0, name);
733
}
734
EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
R
Ralf Baechle 已提交
735

T
Thomas Gleixner 已提交
736
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
R
Ralf Baechle 已提交
737 738
{
	unsigned long flags;
739
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
R
Ralf Baechle 已提交
740

T
Thomas Gleixner 已提交
741
	if (!desc)
R
Ralf Baechle 已提交
742
		return;
743 744
	irq_settings_clr_and_set(desc, clr, set);

745
	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
746
		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
747 748 749 750
	if (irq_settings_has_no_balance_set(desc))
		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
	if (irq_settings_is_per_cpu(desc))
		irqd_set(&desc->irq_data, IRQD_PER_CPU);
751 752
	if (irq_settings_can_move_pcntxt(desc))
		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
T
Thomas Gleixner 已提交
753 754
	if (irq_settings_is_level(desc))
		irqd_set(&desc->irq_data, IRQD_LEVEL);
755

756 757
	irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));

758
	irq_put_desc_unlock(desc, flags);
R
Ralf Baechle 已提交
759
}
760
EXPORT_SYMBOL_GPL(irq_modify_status);
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782

/**
 *	irq_cpu_online - Invoke all irq_cpu_online functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_online()
 *	for each.
 */
void irq_cpu_online(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
783 784
		if (chip && chip->irq_cpu_online &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
785
		     !irqd_irq_disabled(&desc->irq_data)))
786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812
			chip->irq_cpu_online(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}

/**
 *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
 *	for each.
 */
void irq_cpu_offline(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
813 814
		if (chip && chip->irq_cpu_offline &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
815
		     !irqd_irq_disabled(&desc->irq_data)))
816 817 818 819 820
			chip->irq_cpu_offline(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}