chip.c 30.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * linux/kernel/irq/chip.c
 *
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 *
 * This file contains the core interrupt handling code, for irq-chip
 * based architectures.
 *
 * Detailed information is available in Documentation/DocBook/genericirq
 */

#include <linux/irq.h>
14
#include <linux/msi.h>
15 16 17
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
18
#include <linux/irqdomain.h>
19

20 21
#include <trace/events/irq.h>

22 23
#include "internals.h"

24 25 26 27 28 29 30 31 32 33 34 35 36 37
static irqreturn_t bad_chained_irq(int irq, void *dev_id)
{
	WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
	return IRQ_NONE;
}

/*
 * Chained handlers should never call action on their IRQ. This default
 * action will emit warning if such thing happens.
 */
struct irqaction chained_action = {
	.handler = bad_chained_irq,
};

38
/**
T
Thomas Gleixner 已提交
39
 *	irq_set_chip - set the irq chip for an irq
40 41 42
 *	@irq:	irq number
 *	@chip:	pointer to irq chip description structure
 */
T
Thomas Gleixner 已提交
43
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
44 45
{
	unsigned long flags;
46
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
47

48
	if (!desc)
49 50 51 52 53
		return -EINVAL;

	if (!chip)
		chip = &no_irq_chip;

54
	desc->irq_data.chip = chip;
55
	irq_put_desc_unlock(desc, flags);
56 57
	/*
	 * For !CONFIG_SPARSE_IRQ make the irq show up in
58
	 * allocated_irqs.
59
	 */
60
	irq_mark_irq(irq);
61 62
	return 0;
}
T
Thomas Gleixner 已提交
63
EXPORT_SYMBOL(irq_set_chip);
64 65

/**
T
Thomas Gleixner 已提交
66
 *	irq_set_type - set the irq trigger type for an irq
67
 *	@irq:	irq number
D
David Brownell 已提交
68
 *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
69
 */
T
Thomas Gleixner 已提交
70
int irq_set_irq_type(unsigned int irq, unsigned int type)
71 72
{
	unsigned long flags;
73
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
74
	int ret = 0;
75

76 77
	if (!desc)
		return -EINVAL;
78

79
	ret = __irq_set_trigger(desc, type);
80
	irq_put_desc_busunlock(desc, flags);
81 82
	return ret;
}
T
Thomas Gleixner 已提交
83
EXPORT_SYMBOL(irq_set_irq_type);
84 85

/**
T
Thomas Gleixner 已提交
86
 *	irq_set_handler_data - set irq handler data for an irq
87 88 89 90 91
 *	@irq:	Interrupt number
 *	@data:	Pointer to interrupt specific data
 *
 *	Set the hardware irq controller data for an irq
 */
T
Thomas Gleixner 已提交
92
int irq_set_handler_data(unsigned int irq, void *data)
93 94
{
	unsigned long flags;
95
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
96

97
	if (!desc)
98
		return -EINVAL;
99
	desc->irq_common_data.handler_data = data;
100
	irq_put_desc_unlock(desc, flags);
101 102
	return 0;
}
T
Thomas Gleixner 已提交
103
EXPORT_SYMBOL(irq_set_handler_data);
104

105
/**
106 107 108 109
 *	irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
 *	@irq_base:	Interrupt number base
 *	@irq_offset:	Interrupt number offset
 *	@entry:		Pointer to MSI descriptor data
110
 *
111
 *	Set the MSI descriptor entry for an irq at offset
112
 */
113 114
int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
			 struct msi_desc *entry)
115 116
{
	unsigned long flags;
117
	struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
118

119
	if (!desc)
120
		return -EINVAL;
121
	desc->irq_common_data.msi_desc = entry;
122 123
	if (entry && !irq_offset)
		entry->irq = irq_base;
124
	irq_put_desc_unlock(desc, flags);
125 126 127
	return 0;
}

128 129 130 131 132 133 134 135 136 137 138 139
/**
 *	irq_set_msi_desc - set MSI descriptor data for an irq
 *	@irq:	Interrupt number
 *	@entry:	Pointer to MSI descriptor data
 *
 *	Set the MSI descriptor entry for an irq
 */
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
{
	return irq_set_msi_desc_off(irq, 0, entry);
}

140
/**
T
Thomas Gleixner 已提交
141
 *	irq_set_chip_data - set irq chip data for an irq
142 143 144 145 146
 *	@irq:	Interrupt number
 *	@data:	Pointer to chip specific data
 *
 *	Set the hardware irq chip data for an irq
 */
T
Thomas Gleixner 已提交
147
int irq_set_chip_data(unsigned int irq, void *data)
148 149
{
	unsigned long flags;
150
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
151

152
	if (!desc)
153
		return -EINVAL;
154
	desc->irq_data.chip_data = data;
155
	irq_put_desc_unlock(desc, flags);
156 157
	return 0;
}
T
Thomas Gleixner 已提交
158
EXPORT_SYMBOL(irq_set_chip_data);
159

160 161 162 163 164 165 166 167
struct irq_data *irq_get_irq_data(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	return desc ? &desc->irq_data : NULL;
}
EXPORT_SYMBOL_GPL(irq_get_irq_data);

168 169
static void irq_state_clr_disabled(struct irq_desc *desc)
{
170
	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
171 172 173 174
}

static void irq_state_set_disabled(struct irq_desc *desc)
{
175
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
176 177
}

178 179
static void irq_state_clr_masked(struct irq_desc *desc)
{
180
	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
181 182 183 184
}

static void irq_state_set_masked(struct irq_desc *desc)
{
185
	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
186 187
}

188
int irq_startup(struct irq_desc *desc, bool resend)
189
{
190 191
	int ret = 0;

192
	irq_state_clr_disabled(desc);
193 194
	desc->depth = 0;

195
	irq_domain_activate_irq(&desc->irq_data);
196
	if (desc->irq_data.chip->irq_startup) {
197
		ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
198
		irq_state_clr_masked(desc);
199 200
	} else {
		irq_enable(desc);
201
	}
202
	if (resend)
203
		check_irq_resend(desc);
204
	return ret;
205 206 207 208
}

void irq_shutdown(struct irq_desc *desc)
{
209
	irq_state_set_disabled(desc);
210
	desc->depth = 1;
T
Thomas Gleixner 已提交
211 212
	if (desc->irq_data.chip->irq_shutdown)
		desc->irq_data.chip->irq_shutdown(&desc->irq_data);
213
	else if (desc->irq_data.chip->irq_disable)
T
Thomas Gleixner 已提交
214 215 216
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
217
	irq_domain_deactivate_irq(&desc->irq_data);
218
	irq_state_set_masked(desc);
219 220
}

221 222
void irq_enable(struct irq_desc *desc)
{
223
	irq_state_clr_disabled(desc);
T
Thomas Gleixner 已提交
224 225 226 227
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
228
	irq_state_clr_masked(desc);
229 230
}

231
/**
232
 * irq_disable - Mark interrupt disabled
233 234 235 236 237 238 239 240 241 242
 * @desc:	irq descriptor which should be disabled
 *
 * If the chip does not implement the irq_disable callback, we
 * use a lazy disable approach. That means we mark the interrupt
 * disabled, but leave the hardware unmasked. That's an
 * optimization because we avoid the hardware access for the
 * common case where no interrupt happens after we marked it
 * disabled. If an interrupt happens, then the interrupt flow
 * handler masks the line at the hardware level and marks it
 * pending.
243 244 245 246 247 248 249
 *
 * If the interrupt chip does not implement the irq_disable callback,
 * a driver can disable the lazy approach for a particular irq line by
 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
 * be used for devices which cannot disable the interrupt at the
 * device level under certain circumstances and have to use
 * disable_irq[_nosync] instead.
250
 */
T
Thomas Gleixner 已提交
251
void irq_disable(struct irq_desc *desc)
252
{
253
	irq_state_set_disabled(desc);
T
Thomas Gleixner 已提交
254 255
	if (desc->irq_data.chip->irq_disable) {
		desc->irq_data.chip->irq_disable(&desc->irq_data);
256
		irq_state_set_masked(desc);
257 258
	} else if (irq_settings_disable_unlazy(desc)) {
		mask_irq(desc);
T
Thomas Gleixner 已提交
259
	}
260 261
}

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
{
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
	cpumask_set_cpu(cpu, desc->percpu_enabled);
}

void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
{
	if (desc->irq_data.chip->irq_disable)
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
	cpumask_clear_cpu(cpu, desc->percpu_enabled);
}

280
static inline void mask_ack_irq(struct irq_desc *desc)
281
{
282 283
	if (desc->irq_data.chip->irq_mask_ack)
		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
284
	else {
285
		desc->irq_data.chip->irq_mask(&desc->irq_data);
286 287
		if (desc->irq_data.chip->irq_ack)
			desc->irq_data.chip->irq_ack(&desc->irq_data);
288
	}
289
	irq_state_set_masked(desc);
290 291
}

292
void mask_irq(struct irq_desc *desc)
293
{
294 295
	if (desc->irq_data.chip->irq_mask) {
		desc->irq_data.chip->irq_mask(&desc->irq_data);
296
		irq_state_set_masked(desc);
297 298 299
	}
}

300
void unmask_irq(struct irq_desc *desc)
301
{
302 303
	if (desc->irq_data.chip->irq_unmask) {
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
304
		irq_state_clr_masked(desc);
305
	}
306 307
}

308 309 310 311 312 313 314 315 316 317 318 319 320
void unmask_threaded_irq(struct irq_desc *desc)
{
	struct irq_chip *chip = desc->irq_data.chip;

	if (chip->flags & IRQCHIP_EOI_THREADED)
		chip->irq_eoi(&desc->irq_data);

	if (chip->irq_unmask) {
		chip->irq_unmask(&desc->irq_data);
		irq_state_clr_masked(desc);
	}
}

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
/*
 *	handle_nested_irq - Handle a nested irq from a irq thread
 *	@irq:	the interrupt number
 *
 *	Handle interrupts which are nested into a threaded interrupt
 *	handler. The handler function is called inside the calling
 *	threads context.
 */
void handle_nested_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	irqreturn_t action_ret;

	might_sleep();

337
	raw_spin_lock_irq(&desc->lock);
338

339
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
340 341

	action = desc->action;
342 343
	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
344
		goto out_unlock;
345
	}
346

347
	kstat_incr_irqs_this_cpu(desc);
348
	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
349
	raw_spin_unlock_irq(&desc->lock);
350

351 352 353 354
	action_ret = IRQ_NONE;
	for_each_action_of_desc(desc, action)
		action_ret |= action->thread_fn(action->irq, action->dev_id);

355
	if (!noirqdebug)
356
		note_interrupt(desc, action_ret);
357

358
	raw_spin_lock_irq(&desc->lock);
359
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
360 361

out_unlock:
362
	raw_spin_unlock_irq(&desc->lock);
363 364 365
}
EXPORT_SYMBOL_GPL(handle_nested_irq);

366 367
static bool irq_check_poll(struct irq_desc *desc)
{
368
	if (!(desc->istate & IRQS_POLL_INPROGRESS))
369 370 371 372
		return false;
	return irq_wait_for_poll(desc);
}

373 374
static bool irq_may_run(struct irq_desc *desc)
{
375 376 377 378 379 380 381
	unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;

	/*
	 * If the interrupt is not in progress and is not an armed
	 * wakeup interrupt, proceed.
	 */
	if (!irqd_has_set(&desc->irq_data, mask))
382
		return true;
383 384 385 386 387 388 389 390 391 392 393 394

	/*
	 * If the interrupt is an armed wakeup source, mark it pending
	 * and suspended, disable it and notify the pm core about the
	 * event.
	 */
	if (irq_pm_check_wakeup(desc))
		return false;

	/*
	 * Handle a potential concurrent poll on a different core.
	 */
395 396 397
	return irq_check_poll(desc);
}

398 399 400 401 402 403 404 405 406 407 408
/**
 *	handle_simple_irq - Simple and software-decoded IRQs.
 *	@desc:	the interrupt description structure for this irq
 *
 *	Simple interrupts are either sent from a demultiplexing interrupt
 *	handler or come from hardware, where no interrupt hardware control
 *	is necessary.
 *
 *	Note: The caller is expected to handle the ack, clear, mask and
 *	unmask issues if necessary.
 */
409
void handle_simple_irq(struct irq_desc *desc)
410
{
411
	raw_spin_lock(&desc->lock);
412

413 414
	if (!irq_may_run(desc))
		goto out_unlock;
415

416
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
417

418 419
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
420
		goto out_unlock;
421
	}
422

423
	kstat_incr_irqs_this_cpu(desc);
424
	handle_irq_event(desc);
425 426

out_unlock:
427
	raw_spin_unlock(&desc->lock);
428
}
429
EXPORT_SYMBOL_GPL(handle_simple_irq);
430

K
Keith Busch 已提交
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
/**
 *	handle_untracked_irq - Simple and software-decoded IRQs.
 *	@desc:	the interrupt description structure for this irq
 *
 *	Untracked interrupts are sent from a demultiplexing interrupt
 *	handler when the demultiplexer does not know which device it its
 *	multiplexed irq domain generated the interrupt. IRQ's handled
 *	through here are not subjected to stats tracking, randomness, or
 *	spurious interrupt detection.
 *
 *	Note: Like handle_simple_irq, the caller is expected to handle
 *	the ack, clear, mask and unmask issues if necessary.
 */
void handle_untracked_irq(struct irq_desc *desc)
{
	unsigned int flags = 0;

	raw_spin_lock(&desc->lock);

	if (!irq_may_run(desc))
		goto out_unlock;

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);

	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
		goto out_unlock;
	}

	desc->istate &= ~IRQS_PENDING;
	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
	raw_spin_unlock(&desc->lock);

	__handle_irq_event_percpu(desc, &flags);

	raw_spin_lock(&desc->lock);
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);

out_unlock:
	raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_untracked_irq);

474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
/*
 * Called unconditionally from handle_level_irq() and only for oneshot
 * interrupts from handle_fasteoi_irq()
 */
static void cond_unmask_irq(struct irq_desc *desc)
{
	/*
	 * We need to unmask in the following cases:
	 * - Standard level irq (IRQF_ONESHOT is not set)
	 * - Oneshot irq which did not wake the thread (caused by a
	 *   spurious interrupt or a primary handler handling it
	 *   completely).
	 */
	if (!irqd_irq_disabled(&desc->irq_data) &&
	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
		unmask_irq(desc);
}

492 493 494 495 496 497 498 499 500
/**
 *	handle_level_irq - Level type irq handler
 *	@desc:	the interrupt description structure for this irq
 *
 *	Level type interrupts are active as long as the hardware line has
 *	the active level. This may require to mask the interrupt and unmask
 *	it after the associated handler has acknowledged the device, so the
 *	interrupt line is back to inactive.
 */
501
void handle_level_irq(struct irq_desc *desc)
502
{
503
	raw_spin_lock(&desc->lock);
504
	mask_ack_irq(desc);
505

506 507
	if (!irq_may_run(desc))
		goto out_unlock;
508

509
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
510 511 512 513 514

	/*
	 * If its disabled or no action available
	 * keep it masked and get out of here
	 */
515 516
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
517
		goto out_unlock;
518
	}
519

520
	kstat_incr_irqs_this_cpu(desc);
521
	handle_irq_event(desc);
T
Thomas Gleixner 已提交
522

523 524
	cond_unmask_irq(desc);

525
out_unlock:
526
	raw_spin_unlock(&desc->lock);
527
}
528
EXPORT_SYMBOL_GPL(handle_level_irq);
529

530 531 532 533 534 535 536 537 538 539
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
static inline void preflow_handler(struct irq_desc *desc)
{
	if (desc->preflow_handler)
		desc->preflow_handler(&desc->irq_data);
}
#else
static inline void preflow_handler(struct irq_desc *desc) { }
#endif

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
{
	if (!(desc->istate & IRQS_ONESHOT)) {
		chip->irq_eoi(&desc->irq_data);
		return;
	}
	/*
	 * We need to unmask in the following cases:
	 * - Oneshot irq which did not wake the thread (caused by a
	 *   spurious interrupt or a primary handler handling it
	 *   completely).
	 */
	if (!irqd_irq_disabled(&desc->irq_data) &&
	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
		chip->irq_eoi(&desc->irq_data);
		unmask_irq(desc);
	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
		chip->irq_eoi(&desc->irq_data);
	}
}

561
/**
562
 *	handle_fasteoi_irq - irq handler for transparent controllers
563 564
 *	@desc:	the interrupt description structure for this irq
 *
565
 *	Only a single callback will be issued to the chip: an ->eoi()
566 567 568 569
 *	call when the interrupt has been serviced. This enables support
 *	for modern forms of interrupt handlers, which handle the flow
 *	details in hardware, transparently.
 */
570
void handle_fasteoi_irq(struct irq_desc *desc)
571
{
572 573
	struct irq_chip *chip = desc->irq_data.chip;

574
	raw_spin_lock(&desc->lock);
575

576 577
	if (!irq_may_run(desc))
		goto out;
578

579
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
580 581 582

	/*
	 * If its disabled or no action available
583
	 * then mask it and get out of here:
584
	 */
585
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
586
		desc->istate |= IRQS_PENDING;
587
		mask_irq(desc);
588
		goto out;
589
	}
590

591
	kstat_incr_irqs_this_cpu(desc);
592 593 594
	if (desc->istate & IRQS_ONESHOT)
		mask_irq(desc);

595
	preflow_handler(desc);
596
	handle_irq_event(desc);
597

598
	cond_unmask_eoi_irq(desc, chip);
599

600
	raw_spin_unlock(&desc->lock);
601 602
	return;
out:
603 604 605
	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
		chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
606
}
V
Vincent Stehlé 已提交
607
EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
608 609 610 611 612 613

/**
 *	handle_edge_irq - edge type IRQ handler
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
L
Lucas De Marchi 已提交
614
 *	signal. The occurrence is latched into the irq controller hardware
615 616
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
617
 *	is handled by the associated event handler. If this happens it
618 619 620 621 622 623
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
624
void handle_edge_irq(struct irq_desc *desc)
625
{
626
	raw_spin_lock(&desc->lock);
627

628
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
629

630 631 632 633
	if (!irq_may_run(desc)) {
		desc->istate |= IRQS_PENDING;
		mask_ack_irq(desc);
		goto out_unlock;
634
	}
635

636
	/*
637 638
	 * If its disabled or no action available then mask it and get
	 * out of here.
639
	 */
640 641 642 643
	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
		desc->istate |= IRQS_PENDING;
		mask_ack_irq(desc);
		goto out_unlock;
644
	}
645

646
	kstat_incr_irqs_this_cpu(desc);
647 648

	/* Start handling the irq */
649
	desc->irq_data.chip->irq_ack(&desc->irq_data);
650 651

	do {
652
		if (unlikely(!desc->action)) {
653
			mask_irq(desc);
654 655 656 657 658 659 660 661
			goto out_unlock;
		}

		/*
		 * When another irq arrived while we were handling
		 * one, we could have masked the irq.
		 * Renable it, if it was not disabled in meantime.
		 */
662
		if (unlikely(desc->istate & IRQS_PENDING)) {
663 664
			if (!irqd_irq_disabled(&desc->irq_data) &&
			    irqd_irq_masked(&desc->irq_data))
665
				unmask_irq(desc);
666 667
		}

668
		handle_irq_event(desc);
669

670
	} while ((desc->istate & IRQS_PENDING) &&
671
		 !irqd_irq_disabled(&desc->irq_data));
672 673

out_unlock:
674
	raw_spin_unlock(&desc->lock);
675
}
676
EXPORT_SYMBOL(handle_edge_irq);
677

678 679 680 681 682 683 684 685
#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
/**
 *	handle_edge_eoi_irq - edge eoi type IRQ handler
 *	@desc:	the interrupt description structure for this irq
 *
 * Similar as the above handle_edge_irq, but using eoi and w/o the
 * mask/unmask logic.
 */
686
void handle_edge_eoi_irq(struct irq_desc *desc)
687 688 689 690 691 692
{
	struct irq_chip *chip = irq_desc_get_chip(desc);

	raw_spin_lock(&desc->lock);

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
693

694 695 696
	if (!irq_may_run(desc)) {
		desc->istate |= IRQS_PENDING;
		goto out_eoi;
697
	}
698

699
	/*
700 701
	 * If its disabled or no action available then mask it and get
	 * out of here.
702
	 */
703 704 705
	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
		desc->istate |= IRQS_PENDING;
		goto out_eoi;
706
	}
707

708
	kstat_incr_irqs_this_cpu(desc);
709 710 711 712 713 714 715 716 717 718

	do {
		if (unlikely(!desc->action))
			goto out_eoi;

		handle_irq_event(desc);

	} while ((desc->istate & IRQS_PENDING) &&
		 !irqd_irq_disabled(&desc->irq_data));

719
out_eoi:
720 721 722 723 724
	chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
}
#endif

725
/**
L
Liuweni 已提交
726
 *	handle_percpu_irq - Per CPU local irq handler
727 728 729 730
 *	@desc:	the interrupt description structure for this irq
 *
 *	Per CPU interrupts on SMP machines without locking requirements
 */
731
void handle_percpu_irq(struct irq_desc *desc)
732
{
733
	struct irq_chip *chip = irq_desc_get_chip(desc);
734

735
	kstat_incr_irqs_this_cpu(desc);
736

737 738
	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);
739

740
	handle_irq_event_percpu(desc);
741

742 743
	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
744 745
}

746 747 748 749 750 751 752 753 754 755 756
/**
 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 * @desc:	the interrupt description structure for this irq
 *
 * Per CPU interrupts on SMP machines without locking requirements. Same as
 * handle_percpu_irq() above but with the following extras:
 *
 * action->percpu_dev_id is a pointer to percpu variables which
 * contain the real device id for the cpu on which this handler is
 * called
 */
757
void handle_percpu_devid_irq(struct irq_desc *desc)
758 759 760
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct irqaction *action = desc->action;
761
	unsigned int irq = irq_desc_get_irq(desc);
762 763
	irqreturn_t res;

764
	kstat_incr_irqs_this_cpu(desc);
765 766 767 768

	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);

769 770 771 772 773 774 775 776 777 778 779 780 781 782
	if (likely(action)) {
		trace_irq_handler_entry(irq, action);
		res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
		trace_irq_handler_exit(irq, action, res);
	} else {
		unsigned int cpu = smp_processor_id();
		bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);

		if (enabled)
			irq_percpu_disable(desc, cpu);

		pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
			    enabled ? " and unmasked" : "", irq, cpu);
	}
783 784 785 786 787

	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
}

788
static void
789 790
__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
		     int is_chained, const char *name)
791
{
792
	if (!handle) {
793
		handle = handle_bad_irq;
794
	} else {
795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812
		struct irq_data *irq_data = &desc->irq_data;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
		/*
		 * With hierarchical domains we might run into a
		 * situation where the outermost chip is not yet set
		 * up, but the inner chips are there.  Instead of
		 * bailing we install the handler, but obviously we
		 * cannot enable/startup the interrupt at this point.
		 */
		while (irq_data) {
			if (irq_data->chip != &no_irq_chip)
				break;
			/*
			 * Bail out if the outer chip is not set up
			 * and the interrrupt supposed to be started
			 * right away.
			 */
			if (WARN_ON(is_chained))
813
				return;
814 815 816 817 818
			/* Try the parent */
			irq_data = irq_data->parent_data;
		}
#endif
		if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
819
			return;
820
	}
821 822 823

	/* Uninstall? */
	if (handle == handle_bad_irq) {
824
		if (desc->irq_data.chip != &no_irq_chip)
825
			mask_ack_irq(desc);
826
		irq_state_set_disabled(desc);
827 828
		if (is_chained)
			desc->action = NULL;
829 830 831
		desc->depth = 1;
	}
	desc->handle_irq = handle;
832
	desc->name = name;
833 834

	if (handle != handle_bad_irq && is_chained) {
835 836
		unsigned int type = irqd_get_trigger_type(&desc->irq_data);

837 838 839 840 841 842 843 844
		/*
		 * We're about to start this interrupt immediately,
		 * hence the need to set the trigger configuration.
		 * But the .set_type callback may have overridden the
		 * flow handler, ignoring that we're dealing with a
		 * chained interrupt. Reset it immediately because we
		 * do know better.
		 */
845 846 847 848
		if (type != IRQ_TYPE_NONE) {
			__irq_set_trigger(desc, type);
			desc->handle_irq = handle;
		}
849

850 851
		irq_settings_set_noprobe(desc);
		irq_settings_set_norequest(desc);
852
		irq_settings_set_nothread(desc);
853
		desc->action = &chained_action;
854
		irq_startup(desc, true);
855
	}
856 857 858 859 860 861 862 863 864 865 866 867 868
}

void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
		  const char *name)
{
	unsigned long flags;
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);

	if (!desc)
		return;

	__irq_do_set_handler(desc, handle, is_chained, name);
869
	irq_put_desc_busunlock(desc, flags);
870
}
871
EXPORT_SYMBOL_GPL(__irq_set_handler);
872

873 874 875 876 877 878 879 880 881 882 883
void
irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
				 void *data)
{
	unsigned long flags;
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);

	if (!desc)
		return;

	__irq_do_set_handler(desc, handle, 1, NULL);
884
	desc->irq_common_data.handler_data = data;
885 886 887 888 889

	irq_put_desc_busunlock(desc, flags);
}
EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);

890
void
891
irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
892
			      irq_flow_handler_t handle, const char *name)
893
{
894
	irq_set_chip(irq, chip);
895
	__irq_set_handler(irq, handle, 0, name);
896
}
897
EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
R
Ralf Baechle 已提交
898

T
Thomas Gleixner 已提交
899
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
R
Ralf Baechle 已提交
900 901
{
	unsigned long flags;
902
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
R
Ralf Baechle 已提交
903

T
Thomas Gleixner 已提交
904
	if (!desc)
R
Ralf Baechle 已提交
905
		return;
906 907
	irq_settings_clr_and_set(desc, clr, set);

908
	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
909
		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
910 911 912 913
	if (irq_settings_has_no_balance_set(desc))
		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
	if (irq_settings_is_per_cpu(desc))
		irqd_set(&desc->irq_data, IRQD_PER_CPU);
914 915
	if (irq_settings_can_move_pcntxt(desc))
		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
T
Thomas Gleixner 已提交
916 917
	if (irq_settings_is_level(desc))
		irqd_set(&desc->irq_data, IRQD_LEVEL);
918

919 920
	irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));

921
	irq_put_desc_unlock(desc, flags);
R
Ralf Baechle 已提交
922
}
923
EXPORT_SYMBOL_GPL(irq_modify_status);
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945

/**
 *	irq_cpu_online - Invoke all irq_cpu_online functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_online()
 *	for each.
 */
void irq_cpu_online(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
946 947
		if (chip && chip->irq_cpu_online &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
948
		     !irqd_irq_disabled(&desc->irq_data)))
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
			chip->irq_cpu_online(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}

/**
 *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
 *	for each.
 */
void irq_cpu_offline(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
976 977
		if (chip && chip->irq_cpu_offline &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
978
		     !irqd_irq_disabled(&desc->irq_data)))
979 980 981 982 983
			chip->irq_cpu_offline(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}
984 985

#ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
/**
 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
 * NULL)
 * @data:	Pointer to interrupt specific data
 */
void irq_chip_enable_parent(struct irq_data *data)
{
	data = data->parent_data;
	if (data->chip->irq_enable)
		data->chip->irq_enable(data);
	else
		data->chip->irq_unmask(data);
}

/**
 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
 * NULL)
 * @data:	Pointer to interrupt specific data
 */
void irq_chip_disable_parent(struct irq_data *data)
{
	data = data->parent_data;
	if (data->chip->irq_disable)
		data->chip->irq_disable(data);
	else
		data->chip->irq_mask(data);
}

1014 1015 1016 1017 1018 1019 1020 1021 1022
/**
 * irq_chip_ack_parent - Acknowledge the parent interrupt
 * @data:	Pointer to interrupt specific data
 */
void irq_chip_ack_parent(struct irq_data *data)
{
	data = data->parent_data;
	data->chip->irq_ack(data);
}
1023
EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1024

1025 1026 1027 1028 1029 1030 1031 1032 1033
/**
 * irq_chip_mask_parent - Mask the parent interrupt
 * @data:	Pointer to interrupt specific data
 */
void irq_chip_mask_parent(struct irq_data *data)
{
	data = data->parent_data;
	data->chip->irq_mask(data);
}
1034
EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044

/**
 * irq_chip_unmask_parent - Unmask the parent interrupt
 * @data:	Pointer to interrupt specific data
 */
void irq_chip_unmask_parent(struct irq_data *data)
{
	data = data->parent_data;
	data->chip->irq_unmask(data);
}
1045
EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055

/**
 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
 * @data:	Pointer to interrupt specific data
 */
void irq_chip_eoi_parent(struct irq_data *data)
{
	data = data->parent_data;
	data->chip->irq_eoi(data);
}
1056
EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071

/**
 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
 * @data:	Pointer to interrupt specific data
 * @dest:	The affinity mask to set
 * @force:	Flag to enforce setting (disable online checks)
 *
 * Conditinal, as the underlying parent chip might not implement it.
 */
int irq_chip_set_affinity_parent(struct irq_data *data,
				 const struct cpumask *dest, bool force)
{
	data = data->parent_data;
	if (data->chip->irq_set_affinity)
		return data->chip->irq_set_affinity(data, dest, force);
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088

	return -ENOSYS;
}

/**
 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
 * @data:	Pointer to interrupt specific data
 * @type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
 *
 * Conditional, as the underlying parent chip might not implement it.
 */
int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
{
	data = data->parent_data;

	if (data->chip->irq_set_type)
		return data->chip->irq_set_type(data, type);
1089 1090 1091

	return -ENOSYS;
}
1092
EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1093

1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
/**
 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
 * @data:	Pointer to interrupt specific data
 *
 * Iterate through the domain hierarchy of the interrupt and check
 * whether a hw retrigger function exists. If yes, invoke it.
 */
int irq_chip_retrigger_hierarchy(struct irq_data *data)
{
	for (data = data->parent_data; data; data = data->parent_data)
		if (data->chip && data->chip->irq_retrigger)
			return data->chip->irq_retrigger(data);

1107
	return 0;
1108
}
1109

1110 1111 1112
/**
 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
 * @data:	Pointer to interrupt specific data
1113
 * @vcpu_info:	The vcpu affinity information
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
 */
int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
{
	data = data->parent_data;
	if (data->chip->irq_set_vcpu_affinity)
		return data->chip->irq_set_vcpu_affinity(data, vcpu_info);

	return -ENOSYS;
}

1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
/**
 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
 * @data:	Pointer to interrupt specific data
 * @on:		Whether to set or reset the wake-up capability of this irq
 *
 * Conditional, as the underlying parent chip might not implement it.
 */
int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
{
	data = data->parent_data;
	if (data->chip->irq_set_wake)
		return data->chip->irq_set_wake(data, on);

	return -ENOSYS;
}
1139
#endif
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165

/**
 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
 * @data:	Pointer to interrupt specific data
 * @msg:	Pointer to the MSI message
 *
 * For hierarchical domains we find the first chip in the hierarchy
 * which implements the irq_compose_msi_msg callback. For non
 * hierarchical we use the top level chip.
 */
int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
	struct irq_data *pos = NULL;

#ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
	for (; data; data = data->parent_data)
#endif
		if (data->chip && data->chip->irq_compose_msi_msg)
			pos = data;
	if (!pos)
		return -ENOSYS;

	pos->chip->irq_compose_msi_msg(pos, msg);

	return 0;
}
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205

/**
 * irq_chip_pm_get - Enable power for an IRQ chip
 * @data:	Pointer to interrupt specific data
 *
 * Enable the power to the IRQ chip referenced by the interrupt data
 * structure.
 */
int irq_chip_pm_get(struct irq_data *data)
{
	int retval;

	if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
		retval = pm_runtime_get_sync(data->chip->parent_device);
		if (retval < 0) {
			pm_runtime_put_noidle(data->chip->parent_device);
			return retval;
		}
	}

	return 0;
}

/**
 * irq_chip_pm_put - Disable power for an IRQ chip
 * @data:	Pointer to interrupt specific data
 *
 * Disable the power to the IRQ chip referenced by the interrupt data
 * structure, belongs. Note that power will only be disabled, once this
 * function has been called for all IRQs that have called irq_chip_pm_get().
 */
int irq_chip_pm_put(struct irq_data *data)
{
	int retval = 0;

	if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
		retval = pm_runtime_put(data->chip->parent_device);

	return (retval < 0) ? retval : 0;
}