chip.c 24.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * linux/kernel/irq/chip.c
 *
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 *
 * This file contains the core interrupt handling code, for irq-chip
 * based architectures.
 *
 * Detailed information is available in Documentation/DocBook/genericirq
 */

#include <linux/irq.h>
14
#include <linux/msi.h>
15 16 17
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
18
#include <linux/irqdomain.h>
19

20 21
#include <trace/events/irq.h>

22 23 24
#include "internals.h"

/**
T
Thomas Gleixner 已提交
25
 *	irq_set_chip - set the irq chip for an irq
26 27 28
 *	@irq:	irq number
 *	@chip:	pointer to irq chip description structure
 */
T
Thomas Gleixner 已提交
29
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
30 31
{
	unsigned long flags;
32
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
33

34
	if (!desc)
35 36 37 38 39
		return -EINVAL;

	if (!chip)
		chip = &no_irq_chip;

40
	desc->irq_data.chip = chip;
41
	irq_put_desc_unlock(desc, flags);
42 43
	/*
	 * For !CONFIG_SPARSE_IRQ make the irq show up in
44
	 * allocated_irqs.
45
	 */
46
	irq_mark_irq(irq);
47 48
	return 0;
}
T
Thomas Gleixner 已提交
49
EXPORT_SYMBOL(irq_set_chip);
50 51

/**
T
Thomas Gleixner 已提交
52
 *	irq_set_type - set the irq trigger type for an irq
53
 *	@irq:	irq number
D
David Brownell 已提交
54
 *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
55
 */
T
Thomas Gleixner 已提交
56
int irq_set_irq_type(unsigned int irq, unsigned int type)
57 58
{
	unsigned long flags;
59
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
60
	int ret = 0;
61

62 63
	if (!desc)
		return -EINVAL;
64

65
	type &= IRQ_TYPE_SENSE_MASK;
66
	ret = __irq_set_trigger(desc, irq, type);
67
	irq_put_desc_busunlock(desc, flags);
68 69
	return ret;
}
T
Thomas Gleixner 已提交
70
EXPORT_SYMBOL(irq_set_irq_type);
71 72

/**
T
Thomas Gleixner 已提交
73
 *	irq_set_handler_data - set irq handler data for an irq
74 75 76 77 78
 *	@irq:	Interrupt number
 *	@data:	Pointer to interrupt specific data
 *
 *	Set the hardware irq controller data for an irq
 */
T
Thomas Gleixner 已提交
79
int irq_set_handler_data(unsigned int irq, void *data)
80 81
{
	unsigned long flags;
82
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
83

84
	if (!desc)
85
		return -EINVAL;
86
	desc->irq_data.handler_data = data;
87
	irq_put_desc_unlock(desc, flags);
88 89
	return 0;
}
T
Thomas Gleixner 已提交
90
EXPORT_SYMBOL(irq_set_handler_data);
91

92
/**
93 94 95 96
 *	irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
 *	@irq_base:	Interrupt number base
 *	@irq_offset:	Interrupt number offset
 *	@entry:		Pointer to MSI descriptor data
97
 *
98
 *	Set the MSI descriptor entry for an irq at offset
99
 */
100 101
int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
			 struct msi_desc *entry)
102 103
{
	unsigned long flags;
104
	struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
105

106
	if (!desc)
107
		return -EINVAL;
108
	desc->irq_data.msi_desc = entry;
109 110
	if (entry && !irq_offset)
		entry->irq = irq_base;
111
	irq_put_desc_unlock(desc, flags);
112 113 114
	return 0;
}

115 116 117 118 119 120 121 122 123 124 125 126
/**
 *	irq_set_msi_desc - set MSI descriptor data for an irq
 *	@irq:	Interrupt number
 *	@entry:	Pointer to MSI descriptor data
 *
 *	Set the MSI descriptor entry for an irq
 */
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
{
	return irq_set_msi_desc_off(irq, 0, entry);
}

127
/**
T
Thomas Gleixner 已提交
128
 *	irq_set_chip_data - set irq chip data for an irq
129 130 131 132 133
 *	@irq:	Interrupt number
 *	@data:	Pointer to chip specific data
 *
 *	Set the hardware irq chip data for an irq
 */
T
Thomas Gleixner 已提交
134
int irq_set_chip_data(unsigned int irq, void *data)
135 136
{
	unsigned long flags;
137
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
138

139
	if (!desc)
140
		return -EINVAL;
141
	desc->irq_data.chip_data = data;
142
	irq_put_desc_unlock(desc, flags);
143 144
	return 0;
}
T
Thomas Gleixner 已提交
145
EXPORT_SYMBOL(irq_set_chip_data);
146

147 148 149 150 151 152 153 154
struct irq_data *irq_get_irq_data(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	return desc ? &desc->irq_data : NULL;
}
EXPORT_SYMBOL_GPL(irq_get_irq_data);

155 156
static void irq_state_clr_disabled(struct irq_desc *desc)
{
157
	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
158 159 160 161
}

static void irq_state_set_disabled(struct irq_desc *desc)
{
162
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
163 164
}

165 166
static void irq_state_clr_masked(struct irq_desc *desc)
{
167
	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
168 169 170 171
}

static void irq_state_set_masked(struct irq_desc *desc)
{
172
	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
173 174
}

175
int irq_startup(struct irq_desc *desc, bool resend)
176
{
177 178
	int ret = 0;

179
	irq_state_clr_disabled(desc);
180 181
	desc->depth = 0;

182
	irq_domain_activate_irq(&desc->irq_data);
183
	if (desc->irq_data.chip->irq_startup) {
184
		ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
185
		irq_state_clr_masked(desc);
186 187
	} else {
		irq_enable(desc);
188
	}
189 190 191
	if (resend)
		check_irq_resend(desc, desc->irq_data.irq);
	return ret;
192 193 194 195
}

void irq_shutdown(struct irq_desc *desc)
{
196
	irq_state_set_disabled(desc);
197
	desc->depth = 1;
T
Thomas Gleixner 已提交
198 199
	if (desc->irq_data.chip->irq_shutdown)
		desc->irq_data.chip->irq_shutdown(&desc->irq_data);
200
	else if (desc->irq_data.chip->irq_disable)
T
Thomas Gleixner 已提交
201 202 203
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
204
	irq_domain_deactivate_irq(&desc->irq_data);
205
	irq_state_set_masked(desc);
206 207
}

208 209
void irq_enable(struct irq_desc *desc)
{
210
	irq_state_clr_disabled(desc);
T
Thomas Gleixner 已提交
211 212 213 214
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
215
	irq_state_clr_masked(desc);
216 217
}

218
/**
219
 * irq_disable - Mark interrupt disabled
220 221 222 223 224 225 226 227 228 229 230
 * @desc:	irq descriptor which should be disabled
 *
 * If the chip does not implement the irq_disable callback, we
 * use a lazy disable approach. That means we mark the interrupt
 * disabled, but leave the hardware unmasked. That's an
 * optimization because we avoid the hardware access for the
 * common case where no interrupt happens after we marked it
 * disabled. If an interrupt happens, then the interrupt flow
 * handler masks the line at the hardware level and marks it
 * pending.
 */
T
Thomas Gleixner 已提交
231
void irq_disable(struct irq_desc *desc)
232
{
233
	irq_state_set_disabled(desc);
T
Thomas Gleixner 已提交
234 235
	if (desc->irq_data.chip->irq_disable) {
		desc->irq_data.chip->irq_disable(&desc->irq_data);
236
		irq_state_set_masked(desc);
T
Thomas Gleixner 已提交
237
	}
238 239
}

240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
{
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
	cpumask_set_cpu(cpu, desc->percpu_enabled);
}

void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
{
	if (desc->irq_data.chip->irq_disable)
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
	cpumask_clear_cpu(cpu, desc->percpu_enabled);
}

258
static inline void mask_ack_irq(struct irq_desc *desc)
259
{
260 261
	if (desc->irq_data.chip->irq_mask_ack)
		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
262
	else {
263
		desc->irq_data.chip->irq_mask(&desc->irq_data);
264 265
		if (desc->irq_data.chip->irq_ack)
			desc->irq_data.chip->irq_ack(&desc->irq_data);
266
	}
267
	irq_state_set_masked(desc);
268 269
}

270
void mask_irq(struct irq_desc *desc)
271
{
272 273
	if (desc->irq_data.chip->irq_mask) {
		desc->irq_data.chip->irq_mask(&desc->irq_data);
274
		irq_state_set_masked(desc);
275 276 277
	}
}

278
void unmask_irq(struct irq_desc *desc)
279
{
280 281
	if (desc->irq_data.chip->irq_unmask) {
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
282
		irq_state_clr_masked(desc);
283
	}
284 285
}

286 287 288 289 290 291 292 293 294 295 296 297 298
void unmask_threaded_irq(struct irq_desc *desc)
{
	struct irq_chip *chip = desc->irq_data.chip;

	if (chip->flags & IRQCHIP_EOI_THREADED)
		chip->irq_eoi(&desc->irq_data);

	if (chip->irq_unmask) {
		chip->irq_unmask(&desc->irq_data);
		irq_state_clr_masked(desc);
	}
}

299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
/*
 *	handle_nested_irq - Handle a nested irq from a irq thread
 *	@irq:	the interrupt number
 *
 *	Handle interrupts which are nested into a threaded interrupt
 *	handler. The handler function is called inside the calling
 *	threads context.
 */
void handle_nested_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	irqreturn_t action_ret;

	might_sleep();

315
	raw_spin_lock_irq(&desc->lock);
316

317
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
318 319 320
	kstat_incr_irqs_this_cpu(irq, desc);

	action = desc->action;
321 322
	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
323
		goto out_unlock;
324
	}
325

326
	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
327
	raw_spin_unlock_irq(&desc->lock);
328 329 330 331 332

	action_ret = action->thread_fn(action->irq, action->dev_id);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

333
	raw_spin_lock_irq(&desc->lock);
334
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
335 336

out_unlock:
337
	raw_spin_unlock_irq(&desc->lock);
338 339 340
}
EXPORT_SYMBOL_GPL(handle_nested_irq);

341 342
static bool irq_check_poll(struct irq_desc *desc)
{
343
	if (!(desc->istate & IRQS_POLL_INPROGRESS))
344 345 346 347
		return false;
	return irq_wait_for_poll(desc);
}

348 349
static bool irq_may_run(struct irq_desc *desc)
{
350 351 352 353 354 355 356
	unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;

	/*
	 * If the interrupt is not in progress and is not an armed
	 * wakeup interrupt, proceed.
	 */
	if (!irqd_has_set(&desc->irq_data, mask))
357
		return true;
358 359 360 361 362 363 364 365 366 367 368 369

	/*
	 * If the interrupt is an armed wakeup source, mark it pending
	 * and suspended, disable it and notify the pm core about the
	 * event.
	 */
	if (irq_pm_check_wakeup(desc))
		return false;

	/*
	 * Handle a potential concurrent poll on a different core.
	 */
370 371 372
	return irq_check_poll(desc);
}

373 374 375 376 377 378 379 380 381 382 383 384
/**
 *	handle_simple_irq - Simple and software-decoded IRQs.
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Simple interrupts are either sent from a demultiplexing interrupt
 *	handler or come from hardware, where no interrupt hardware control
 *	is necessary.
 *
 *	Note: The caller is expected to handle the ack, clear, mask and
 *	unmask issues if necessary.
 */
385
void
386
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
387
{
388
	raw_spin_lock(&desc->lock);
389

390 391
	if (!irq_may_run(desc))
		goto out_unlock;
392

393
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
394
	kstat_incr_irqs_this_cpu(irq, desc);
395

396 397
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
398
		goto out_unlock;
399
	}
400

401
	handle_irq_event(desc);
402 403

out_unlock:
404
	raw_spin_unlock(&desc->lock);
405
}
406
EXPORT_SYMBOL_GPL(handle_simple_irq);
407

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
/*
 * Called unconditionally from handle_level_irq() and only for oneshot
 * interrupts from handle_fasteoi_irq()
 */
static void cond_unmask_irq(struct irq_desc *desc)
{
	/*
	 * We need to unmask in the following cases:
	 * - Standard level irq (IRQF_ONESHOT is not set)
	 * - Oneshot irq which did not wake the thread (caused by a
	 *   spurious interrupt or a primary handler handling it
	 *   completely).
	 */
	if (!irqd_irq_disabled(&desc->irq_data) &&
	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
		unmask_irq(desc);
}

426 427 428 429 430 431 432 433 434 435
/**
 *	handle_level_irq - Level type irq handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Level type interrupts are active as long as the hardware line has
 *	the active level. This may require to mask the interrupt and unmask
 *	it after the associated handler has acknowledged the device, so the
 *	interrupt line is back to inactive.
 */
436
void
437
handle_level_irq(unsigned int irq, struct irq_desc *desc)
438
{
439
	raw_spin_lock(&desc->lock);
440
	mask_ack_irq(desc);
441

442 443
	if (!irq_may_run(desc))
		goto out_unlock;
444

445
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
446
	kstat_incr_irqs_this_cpu(irq, desc);
447 448 449 450 451

	/*
	 * If its disabled or no action available
	 * keep it masked and get out of here
	 */
452 453
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
454
		goto out_unlock;
455
	}
456

457
	handle_irq_event(desc);
T
Thomas Gleixner 已提交
458

459 460
	cond_unmask_irq(desc);

461
out_unlock:
462
	raw_spin_unlock(&desc->lock);
463
}
464
EXPORT_SYMBOL_GPL(handle_level_irq);
465

466 467 468 469 470 471 472 473 474 475
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
static inline void preflow_handler(struct irq_desc *desc)
{
	if (desc->preflow_handler)
		desc->preflow_handler(&desc->irq_data);
}
#else
static inline void preflow_handler(struct irq_desc *desc) { }
#endif

476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
{
	if (!(desc->istate & IRQS_ONESHOT)) {
		chip->irq_eoi(&desc->irq_data);
		return;
	}
	/*
	 * We need to unmask in the following cases:
	 * - Oneshot irq which did not wake the thread (caused by a
	 *   spurious interrupt or a primary handler handling it
	 *   completely).
	 */
	if (!irqd_irq_disabled(&desc->irq_data) &&
	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
		chip->irq_eoi(&desc->irq_data);
		unmask_irq(desc);
	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
		chip->irq_eoi(&desc->irq_data);
	}
}

497
/**
498
 *	handle_fasteoi_irq - irq handler for transparent controllers
499 500 501
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
502
 *	Only a single callback will be issued to the chip: an ->eoi()
503 504 505 506
 *	call when the interrupt has been serviced. This enables support
 *	for modern forms of interrupt handlers, which handle the flow
 *	details in hardware, transparently.
 */
507
void
508
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
509
{
510 511
	struct irq_chip *chip = desc->irq_data.chip;

512
	raw_spin_lock(&desc->lock);
513

514 515
	if (!irq_may_run(desc))
		goto out;
516

517
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
518
	kstat_incr_irqs_this_cpu(irq, desc);
519 520 521

	/*
	 * If its disabled or no action available
522
	 * then mask it and get out of here:
523
	 */
524
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
525
		desc->istate |= IRQS_PENDING;
526
		mask_irq(desc);
527
		goto out;
528
	}
529 530 531 532

	if (desc->istate & IRQS_ONESHOT)
		mask_irq(desc);

533
	preflow_handler(desc);
534
	handle_irq_event(desc);
535

536
	cond_unmask_eoi_irq(desc, chip);
537

538
	raw_spin_unlock(&desc->lock);
539 540
	return;
out:
541 542 543
	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
		chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
544
}
V
Vincent Stehlé 已提交
545
EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
546 547 548 549 550 551 552

/**
 *	handle_edge_irq - edge type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
L
Lucas De Marchi 已提交
553
 *	signal. The occurrence is latched into the irq controller hardware
554 555
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
556
 *	is handled by the associated event handler. If this happens it
557 558 559 560 561 562
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
563
void
564
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
565
{
566
	raw_spin_lock(&desc->lock);
567

568
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
569

570 571 572 573
	if (!irq_may_run(desc)) {
		desc->istate |= IRQS_PENDING;
		mask_ack_irq(desc);
		goto out_unlock;
574
	}
575

576
	/*
577 578
	 * If its disabled or no action available then mask it and get
	 * out of here.
579
	 */
580 581 582 583
	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
		desc->istate |= IRQS_PENDING;
		mask_ack_irq(desc);
		goto out_unlock;
584
	}
585

T
Thomas Gleixner 已提交
586
	kstat_incr_irqs_this_cpu(irq, desc);
587 588

	/* Start handling the irq */
589
	desc->irq_data.chip->irq_ack(&desc->irq_data);
590 591

	do {
592
		if (unlikely(!desc->action)) {
593
			mask_irq(desc);
594 595 596 597 598 599 600 601
			goto out_unlock;
		}

		/*
		 * When another irq arrived while we were handling
		 * one, we could have masked the irq.
		 * Renable it, if it was not disabled in meantime.
		 */
602
		if (unlikely(desc->istate & IRQS_PENDING)) {
603 604
			if (!irqd_irq_disabled(&desc->irq_data) &&
			    irqd_irq_masked(&desc->irq_data))
605
				unmask_irq(desc);
606 607
		}

608
		handle_irq_event(desc);
609

610
	} while ((desc->istate & IRQS_PENDING) &&
611
		 !irqd_irq_disabled(&desc->irq_data));
612 613

out_unlock:
614
	raw_spin_unlock(&desc->lock);
615
}
616
EXPORT_SYMBOL(handle_edge_irq);
617

618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
/**
 *	handle_edge_eoi_irq - edge eoi type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 * Similar as the above handle_edge_irq, but using eoi and w/o the
 * mask/unmask logic.
 */
void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);

	raw_spin_lock(&desc->lock);

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
634

635 636 637
	if (!irq_may_run(desc)) {
		desc->istate |= IRQS_PENDING;
		goto out_eoi;
638
	}
639

640
	/*
641 642
	 * If its disabled or no action available then mask it and get
	 * out of here.
643
	 */
644 645 646
	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
		desc->istate |= IRQS_PENDING;
		goto out_eoi;
647
	}
648

649 650 651 652 653 654 655 656 657 658 659
	kstat_incr_irqs_this_cpu(irq, desc);

	do {
		if (unlikely(!desc->action))
			goto out_eoi;

		handle_irq_event(desc);

	} while ((desc->istate & IRQS_PENDING) &&
		 !irqd_irq_disabled(&desc->irq_data));

660
out_eoi:
661 662 663 664 665
	chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
}
#endif

666
/**
L
Liuweni 已提交
667
 *	handle_percpu_irq - Per CPU local irq handler
668 669 670 671 672
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Per CPU interrupts on SMP machines without locking requirements
 */
673
void
674
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
675
{
676
	struct irq_chip *chip = irq_desc_get_chip(desc);
677

T
Thomas Gleixner 已提交
678
	kstat_incr_irqs_this_cpu(irq, desc);
679

680 681
	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);
682

683
	handle_irq_event_percpu(desc, desc->action);
684

685 686
	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
687 688
}

689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
/**
 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 * @irq:	the interrupt number
 * @desc:	the interrupt description structure for this irq
 *
 * Per CPU interrupts on SMP machines without locking requirements. Same as
 * handle_percpu_irq() above but with the following extras:
 *
 * action->percpu_dev_id is a pointer to percpu variables which
 * contain the real device id for the cpu on which this handler is
 * called
 */
void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct irqaction *action = desc->action;
705
	void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
	irqreturn_t res;

	kstat_incr_irqs_this_cpu(irq, desc);

	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);

	trace_irq_handler_entry(irq, action);
	res = action->handler(irq, dev_id);
	trace_irq_handler_exit(irq, action, res);

	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
}

721
void
722
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
723
		  const char *name)
724 725
{
	unsigned long flags;
726
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
727

728
	if (!desc)
729 730
		return;

731
	if (!handle) {
732
		handle = handle_bad_irq;
733
	} else {
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
		struct irq_data *irq_data = &desc->irq_data;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
		/*
		 * With hierarchical domains we might run into a
		 * situation where the outermost chip is not yet set
		 * up, but the inner chips are there.  Instead of
		 * bailing we install the handler, but obviously we
		 * cannot enable/startup the interrupt at this point.
		 */
		while (irq_data) {
			if (irq_data->chip != &no_irq_chip)
				break;
			/*
			 * Bail out if the outer chip is not set up
			 * and the interrrupt supposed to be started
			 * right away.
			 */
			if (WARN_ON(is_chained))
				goto out;
			/* Try the parent */
			irq_data = irq_data->parent_data;
		}
#endif
		if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
758
			goto out;
759
	}
760 761 762

	/* Uninstall? */
	if (handle == handle_bad_irq) {
763
		if (desc->irq_data.chip != &no_irq_chip)
764
			mask_ack_irq(desc);
765
		irq_state_set_disabled(desc);
766 767 768
		desc->depth = 1;
	}
	desc->handle_irq = handle;
769
	desc->name = name;
770 771

	if (handle != handle_bad_irq && is_chained) {
772 773
		irq_settings_set_noprobe(desc);
		irq_settings_set_norequest(desc);
774
		irq_settings_set_nothread(desc);
775
		irq_startup(desc, true);
776
	}
777 778
out:
	irq_put_desc_busunlock(desc, flags);
779
}
780
EXPORT_SYMBOL_GPL(__irq_set_handler);
781 782

void
783
irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
784
			      irq_flow_handler_t handle, const char *name)
785
{
786
	irq_set_chip(irq, chip);
787
	__irq_set_handler(irq, handle, 0, name);
788
}
789
EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
R
Ralf Baechle 已提交
790

T
Thomas Gleixner 已提交
791
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
R
Ralf Baechle 已提交
792 793
{
	unsigned long flags;
794
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
R
Ralf Baechle 已提交
795

T
Thomas Gleixner 已提交
796
	if (!desc)
R
Ralf Baechle 已提交
797
		return;
798 799
	irq_settings_clr_and_set(desc, clr, set);

800
	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
801
		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
802 803 804 805
	if (irq_settings_has_no_balance_set(desc))
		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
	if (irq_settings_is_per_cpu(desc))
		irqd_set(&desc->irq_data, IRQD_PER_CPU);
806 807
	if (irq_settings_can_move_pcntxt(desc))
		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
T
Thomas Gleixner 已提交
808 809
	if (irq_settings_is_level(desc))
		irqd_set(&desc->irq_data, IRQD_LEVEL);
810

811 812
	irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));

813
	irq_put_desc_unlock(desc, flags);
R
Ralf Baechle 已提交
814
}
815
EXPORT_SYMBOL_GPL(irq_modify_status);
816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837

/**
 *	irq_cpu_online - Invoke all irq_cpu_online functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_online()
 *	for each.
 */
void irq_cpu_online(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
838 839
		if (chip && chip->irq_cpu_online &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
840
		     !irqd_irq_disabled(&desc->irq_data)))
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
			chip->irq_cpu_online(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}

/**
 *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
 *	for each.
 */
void irq_cpu_offline(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
868 869
		if (chip && chip->irq_cpu_offline &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
870
		     !irqd_irq_disabled(&desc->irq_data)))
871 872 873 874 875
			chip->irq_cpu_offline(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}
876 877 878 879 880 881 882 883 884 885 886 887

#ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
/**
 * irq_chip_ack_parent - Acknowledge the parent interrupt
 * @data:	Pointer to interrupt specific data
 */
void irq_chip_ack_parent(struct irq_data *data)
{
	data = data->parent_data;
	data->chip->irq_ack(data);
}

888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
/**
 * irq_chip_mask_parent - Mask the parent interrupt
 * @data:	Pointer to interrupt specific data
 */
void irq_chip_mask_parent(struct irq_data *data)
{
	data = data->parent_data;
	data->chip->irq_mask(data);
}

/**
 * irq_chip_unmask_parent - Unmask the parent interrupt
 * @data:	Pointer to interrupt specific data
 */
void irq_chip_unmask_parent(struct irq_data *data)
{
	data = data->parent_data;
	data->chip->irq_unmask(data);
}

/**
 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
 * @data:	Pointer to interrupt specific data
 */
void irq_chip_eoi_parent(struct irq_data *data)
{
	data = data->parent_data;
	data->chip->irq_eoi(data);
}

/**
 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
 * @data:	Pointer to interrupt specific data
 * @dest:	The affinity mask to set
 * @force:	Flag to enforce setting (disable online checks)
 *
 * Conditinal, as the underlying parent chip might not implement it.
 */
int irq_chip_set_affinity_parent(struct irq_data *data,
				 const struct cpumask *dest, bool force)
{
	data = data->parent_data;
	if (data->chip->irq_set_affinity)
		return data->chip->irq_set_affinity(data, dest, force);

	return -ENOSYS;
}

936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
/**
 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
 * @data:	Pointer to interrupt specific data
 *
 * Iterate through the domain hierarchy of the interrupt and check
 * whether a hw retrigger function exists. If yes, invoke it.
 */
int irq_chip_retrigger_hierarchy(struct irq_data *data)
{
	for (data = data->parent_data; data; data = data->parent_data)
		if (data->chip && data->chip->irq_retrigger)
			return data->chip->irq_retrigger(data);

	return -ENOSYS;
}
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966

/**
 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
 * @data:	Pointer to interrupt specific data
 * @on:		Whether to set or reset the wake-up capability of this irq
 *
 * Conditional, as the underlying parent chip might not implement it.
 */
int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
{
	data = data->parent_data;
	if (data->chip->irq_set_wake)
		return data->chip->irq_set_wake(data, on);

	return -ENOSYS;
}
967
#endif
968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993

/**
 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
 * @data:	Pointer to interrupt specific data
 * @msg:	Pointer to the MSI message
 *
 * For hierarchical domains we find the first chip in the hierarchy
 * which implements the irq_compose_msi_msg callback. For non
 * hierarchical we use the top level chip.
 */
int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
	struct irq_data *pos = NULL;

#ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
	for (; data; data = data->parent_data)
#endif
		if (data->chip && data->chip->irq_compose_msi_msg)
			pos = data;
	if (!pos)
		return -ENOSYS;

	pos->chip->irq_compose_msi_msg(pos, msg);

	return 0;
}