chip.c 32.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * linux/kernel/irq/chip.c
 *
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 *
 * This file contains the core interrupt handling code, for irq-chip
 * based architectures.
 *
 * Detailed information is available in Documentation/DocBook/genericirq
 */

#include <linux/irq.h>
14
#include <linux/msi.h>
15 16 17
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
18
#include <linux/irqdomain.h>
19

20 21
#include <trace/events/irq.h>

22 23
#include "internals.h"

24 25 26 27 28 29 30 31 32 33 34 35 36 37
static irqreturn_t bad_chained_irq(int irq, void *dev_id)
{
	WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
	return IRQ_NONE;
}

/*
 * Chained handlers should never call action on their IRQ. This default
 * action will emit warning if such thing happens.
 */
struct irqaction chained_action = {
	.handler = bad_chained_irq,
};

38
/**
T
Thomas Gleixner 已提交
39
 *	irq_set_chip - set the irq chip for an irq
40 41 42
 *	@irq:	irq number
 *	@chip:	pointer to irq chip description structure
 */
T
Thomas Gleixner 已提交
43
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
44 45
{
	unsigned long flags;
46
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
47

48
	if (!desc)
49 50 51 52 53
		return -EINVAL;

	if (!chip)
		chip = &no_irq_chip;

54
	desc->irq_data.chip = chip;
55
	irq_put_desc_unlock(desc, flags);
56 57
	/*
	 * For !CONFIG_SPARSE_IRQ make the irq show up in
58
	 * allocated_irqs.
59
	 */
60
	irq_mark_irq(irq);
61 62
	return 0;
}
T
Thomas Gleixner 已提交
63
EXPORT_SYMBOL(irq_set_chip);
64 65

/**
T
Thomas Gleixner 已提交
66
 *	irq_set_type - set the irq trigger type for an irq
67
 *	@irq:	irq number
D
David Brownell 已提交
68
 *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
69
 */
T
Thomas Gleixner 已提交
70
int irq_set_irq_type(unsigned int irq, unsigned int type)
71 72
{
	unsigned long flags;
73
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
74
	int ret = 0;
75

76 77
	if (!desc)
		return -EINVAL;
78

79
	ret = __irq_set_trigger(desc, type);
80
	irq_put_desc_busunlock(desc, flags);
81 82
	return ret;
}
T
Thomas Gleixner 已提交
83
EXPORT_SYMBOL(irq_set_irq_type);
84 85

/**
T
Thomas Gleixner 已提交
86
 *	irq_set_handler_data - set irq handler data for an irq
87 88 89 90 91
 *	@irq:	Interrupt number
 *	@data:	Pointer to interrupt specific data
 *
 *	Set the hardware irq controller data for an irq
 */
T
Thomas Gleixner 已提交
92
int irq_set_handler_data(unsigned int irq, void *data)
93 94
{
	unsigned long flags;
95
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
96

97
	if (!desc)
98
		return -EINVAL;
99
	desc->irq_common_data.handler_data = data;
100
	irq_put_desc_unlock(desc, flags);
101 102
	return 0;
}
T
Thomas Gleixner 已提交
103
EXPORT_SYMBOL(irq_set_handler_data);
104

105
/**
106 107 108 109
 *	irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
 *	@irq_base:	Interrupt number base
 *	@irq_offset:	Interrupt number offset
 *	@entry:		Pointer to MSI descriptor data
110
 *
111
 *	Set the MSI descriptor entry for an irq at offset
112
 */
113 114
int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
			 struct msi_desc *entry)
115 116
{
	unsigned long flags;
117
	struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
118

119
	if (!desc)
120
		return -EINVAL;
121
	desc->irq_common_data.msi_desc = entry;
122 123
	if (entry && !irq_offset)
		entry->irq = irq_base;
124
	irq_put_desc_unlock(desc, flags);
125 126 127
	return 0;
}

128 129 130 131 132 133 134 135 136 137 138 139
/**
 *	irq_set_msi_desc - set MSI descriptor data for an irq
 *	@irq:	Interrupt number
 *	@entry:	Pointer to MSI descriptor data
 *
 *	Set the MSI descriptor entry for an irq
 */
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
{
	return irq_set_msi_desc_off(irq, 0, entry);
}

140
/**
T
Thomas Gleixner 已提交
141
 *	irq_set_chip_data - set irq chip data for an irq
142 143 144 145 146
 *	@irq:	Interrupt number
 *	@data:	Pointer to chip specific data
 *
 *	Set the hardware irq chip data for an irq
 */
T
Thomas Gleixner 已提交
147
int irq_set_chip_data(unsigned int irq, void *data)
148 149
{
	unsigned long flags;
150
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
151

152
	if (!desc)
153
		return -EINVAL;
154
	desc->irq_data.chip_data = data;
155
	irq_put_desc_unlock(desc, flags);
156 157
	return 0;
}
T
Thomas Gleixner 已提交
158
EXPORT_SYMBOL(irq_set_chip_data);
159

160 161 162 163 164 165 166 167
struct irq_data *irq_get_irq_data(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	return desc ? &desc->irq_data : NULL;
}
EXPORT_SYMBOL_GPL(irq_get_irq_data);

168 169
static void irq_state_clr_disabled(struct irq_desc *desc)
{
170
	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
171 172 173 174
}

static void irq_state_set_disabled(struct irq_desc *desc)
{
175
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
176 177
}

178 179
static void irq_state_clr_masked(struct irq_desc *desc)
{
180
	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
181 182 183 184
}

static void irq_state_set_masked(struct irq_desc *desc)
{
185
	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
186 187
}

188 189 190 191 192 193 194 195 196 197
static void irq_state_clr_started(struct irq_desc *desc)
{
	irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
}

static void irq_state_set_started(struct irq_desc *desc)
{
	irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
}

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
enum {
	IRQ_STARTUP_NORMAL,
	IRQ_STARTUP_MANAGED,
	IRQ_STARTUP_ABORT,
};

#ifdef CONFIG_SMP
static int
__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
{
	struct irq_data *d = irq_desc_get_irq_data(desc);

	if (!irqd_affinity_is_managed(d))
		return IRQ_STARTUP_NORMAL;

	irqd_clr_managed_shutdown(d);

	if (cpumask_any_and(aff, cpu_online_mask) > nr_cpu_ids) {
		/*
		 * Catch code which fiddles with enable_irq() on a managed
		 * and potentially shutdown IRQ. Chained interrupt
		 * installment or irq auto probing should not happen on
		 * managed irqs either. Emit a warning, break the affinity
		 * and start it up as a normal interrupt.
		 */
		if (WARN_ON_ONCE(force))
			return IRQ_STARTUP_NORMAL;
		/*
		 * The interrupt was requested, but there is no online CPU
		 * in it's affinity mask. Put it into managed shutdown
		 * state and let the cpu hotplug mechanism start it up once
		 * a CPU in the mask becomes available.
		 */
		irqd_set_managed_shutdown(d);
		return IRQ_STARTUP_ABORT;
	}
	return IRQ_STARTUP_MANAGED;
}
#else
static int
__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
{
	return IRQ_STARTUP_NORMAL;
}
#endif

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
static int __irq_startup(struct irq_desc *desc)
{
	struct irq_data *d = irq_desc_get_irq_data(desc);
	int ret = 0;

	irq_domain_activate_irq(d);
	if (d->chip->irq_startup) {
		ret = d->chip->irq_startup(d);
		irq_state_clr_disabled(desc);
		irq_state_clr_masked(desc);
	} else {
		irq_enable(desc);
	}
	irq_state_set_started(desc);
	return ret;
}

261
int irq_startup(struct irq_desc *desc, bool resend, bool force)
262
{
263 264
	struct irq_data *d = irq_desc_get_irq_data(desc);
	struct cpumask *aff = irq_data_get_affinity_mask(d);
265 266
	int ret = 0;

267 268
	desc->depth = 0;

269
	if (irqd_is_started(d)) {
270
		irq_enable(desc);
271
	} else {
272 273 274 275 276 277 278 279 280 281 282 283
		switch (__irq_startup_managed(desc, aff, force)) {
		case IRQ_STARTUP_NORMAL:
			ret = __irq_startup(desc);
			irq_setup_affinity(desc);
			break;
		case IRQ_STARTUP_MANAGED:
			ret = __irq_startup(desc);
			irq_set_affinity_locked(d, aff, false);
			break;
		case IRQ_STARTUP_ABORT:
			return 0;
		}
284
	}
285
	if (resend)
286
		check_irq_resend(desc);
287

288
	return ret;
289 290
}

291 292
static void __irq_disable(struct irq_desc *desc, bool mask);

293 294
void irq_shutdown(struct irq_desc *desc)
{
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
	if (irqd_is_started(&desc->irq_data)) {
		desc->depth = 1;
		if (desc->irq_data.chip->irq_shutdown) {
			desc->irq_data.chip->irq_shutdown(&desc->irq_data);
			irq_state_set_disabled(desc);
			irq_state_set_masked(desc);
		} else {
			__irq_disable(desc, true);
		}
		irq_state_clr_started(desc);
	}
	/*
	 * This must be called even if the interrupt was never started up,
	 * because the activation can happen before the interrupt is
	 * available for request/startup. It has it's own state tracking so
	 * it's safe to call it unconditionally.
	 */
312
	irq_domain_deactivate_irq(&desc->irq_data);
313 314
}

315 316
void irq_enable(struct irq_desc *desc)
{
317
	irq_state_clr_disabled(desc);
T
Thomas Gleixner 已提交
318 319 320 321
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
322
	irq_state_clr_masked(desc);
323 324
}

325 326 327 328 329 330 331 332 333 334 335
static void __irq_disable(struct irq_desc *desc, bool mask)
{
	irq_state_set_disabled(desc);
	if (desc->irq_data.chip->irq_disable) {
		desc->irq_data.chip->irq_disable(&desc->irq_data);
		irq_state_set_masked(desc);
	} else if (mask) {
		mask_irq(desc);
	}
}

336
/**
337
 * irq_disable - Mark interrupt disabled
338 339 340 341 342 343 344 345 346 347
 * @desc:	irq descriptor which should be disabled
 *
 * If the chip does not implement the irq_disable callback, we
 * use a lazy disable approach. That means we mark the interrupt
 * disabled, but leave the hardware unmasked. That's an
 * optimization because we avoid the hardware access for the
 * common case where no interrupt happens after we marked it
 * disabled. If an interrupt happens, then the interrupt flow
 * handler masks the line at the hardware level and marks it
 * pending.
348 349 350 351 352 353 354
 *
 * If the interrupt chip does not implement the irq_disable callback,
 * a driver can disable the lazy approach for a particular irq line by
 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
 * be used for devices which cannot disable the interrupt at the
 * device level under certain circumstances and have to use
 * disable_irq[_nosync] instead.
355
 */
T
Thomas Gleixner 已提交
356
void irq_disable(struct irq_desc *desc)
357
{
358
	__irq_disable(desc, irq_settings_disable_unlazy(desc));
359 360
}

361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
{
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
	cpumask_set_cpu(cpu, desc->percpu_enabled);
}

void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
{
	if (desc->irq_data.chip->irq_disable)
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
	cpumask_clear_cpu(cpu, desc->percpu_enabled);
}

379
static inline void mask_ack_irq(struct irq_desc *desc)
380
{
381 382
	if (desc->irq_data.chip->irq_mask_ack)
		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
383
	else {
384
		desc->irq_data.chip->irq_mask(&desc->irq_data);
385 386
		if (desc->irq_data.chip->irq_ack)
			desc->irq_data.chip->irq_ack(&desc->irq_data);
387
	}
388
	irq_state_set_masked(desc);
389 390
}

391
void mask_irq(struct irq_desc *desc)
392
{
393 394
	if (desc->irq_data.chip->irq_mask) {
		desc->irq_data.chip->irq_mask(&desc->irq_data);
395
		irq_state_set_masked(desc);
396 397 398
	}
}

399
void unmask_irq(struct irq_desc *desc)
400
{
401 402
	if (desc->irq_data.chip->irq_unmask) {
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
403
		irq_state_clr_masked(desc);
404
	}
405 406
}

407 408 409 410 411 412 413 414 415 416 417 418 419
void unmask_threaded_irq(struct irq_desc *desc)
{
	struct irq_chip *chip = desc->irq_data.chip;

	if (chip->flags & IRQCHIP_EOI_THREADED)
		chip->irq_eoi(&desc->irq_data);

	if (chip->irq_unmask) {
		chip->irq_unmask(&desc->irq_data);
		irq_state_clr_masked(desc);
	}
}

420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
/*
 *	handle_nested_irq - Handle a nested irq from a irq thread
 *	@irq:	the interrupt number
 *
 *	Handle interrupts which are nested into a threaded interrupt
 *	handler. The handler function is called inside the calling
 *	threads context.
 */
void handle_nested_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	irqreturn_t action_ret;

	might_sleep();

436
	raw_spin_lock_irq(&desc->lock);
437

438
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
439 440

	action = desc->action;
441 442
	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
443
		goto out_unlock;
444
	}
445

446
	kstat_incr_irqs_this_cpu(desc);
447
	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
448
	raw_spin_unlock_irq(&desc->lock);
449

450 451 452 453
	action_ret = IRQ_NONE;
	for_each_action_of_desc(desc, action)
		action_ret |= action->thread_fn(action->irq, action->dev_id);

454
	if (!noirqdebug)
455
		note_interrupt(desc, action_ret);
456

457
	raw_spin_lock_irq(&desc->lock);
458
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
459 460

out_unlock:
461
	raw_spin_unlock_irq(&desc->lock);
462 463 464
}
EXPORT_SYMBOL_GPL(handle_nested_irq);

465 466
static bool irq_check_poll(struct irq_desc *desc)
{
467
	if (!(desc->istate & IRQS_POLL_INPROGRESS))
468 469 470 471
		return false;
	return irq_wait_for_poll(desc);
}

472 473
static bool irq_may_run(struct irq_desc *desc)
{
474 475 476 477 478 479 480
	unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;

	/*
	 * If the interrupt is not in progress and is not an armed
	 * wakeup interrupt, proceed.
	 */
	if (!irqd_has_set(&desc->irq_data, mask))
481
		return true;
482 483 484 485 486 487 488 489 490 491 492 493

	/*
	 * If the interrupt is an armed wakeup source, mark it pending
	 * and suspended, disable it and notify the pm core about the
	 * event.
	 */
	if (irq_pm_check_wakeup(desc))
		return false;

	/*
	 * Handle a potential concurrent poll on a different core.
	 */
494 495 496
	return irq_check_poll(desc);
}

497 498 499 500 501 502 503 504 505 506 507
/**
 *	handle_simple_irq - Simple and software-decoded IRQs.
 *	@desc:	the interrupt description structure for this irq
 *
 *	Simple interrupts are either sent from a demultiplexing interrupt
 *	handler or come from hardware, where no interrupt hardware control
 *	is necessary.
 *
 *	Note: The caller is expected to handle the ack, clear, mask and
 *	unmask issues if necessary.
 */
508
void handle_simple_irq(struct irq_desc *desc)
509
{
510
	raw_spin_lock(&desc->lock);
511

512 513
	if (!irq_may_run(desc))
		goto out_unlock;
514

515
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
516

517 518
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
519
		goto out_unlock;
520
	}
521

522
	kstat_incr_irqs_this_cpu(desc);
523
	handle_irq_event(desc);
524 525

out_unlock:
526
	raw_spin_unlock(&desc->lock);
527
}
528
EXPORT_SYMBOL_GPL(handle_simple_irq);
529

K
Keith Busch 已提交
530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
/**
 *	handle_untracked_irq - Simple and software-decoded IRQs.
 *	@desc:	the interrupt description structure for this irq
 *
 *	Untracked interrupts are sent from a demultiplexing interrupt
 *	handler when the demultiplexer does not know which device it its
 *	multiplexed irq domain generated the interrupt. IRQ's handled
 *	through here are not subjected to stats tracking, randomness, or
 *	spurious interrupt detection.
 *
 *	Note: Like handle_simple_irq, the caller is expected to handle
 *	the ack, clear, mask and unmask issues if necessary.
 */
void handle_untracked_irq(struct irq_desc *desc)
{
	unsigned int flags = 0;

	raw_spin_lock(&desc->lock);

	if (!irq_may_run(desc))
		goto out_unlock;

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);

	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
		goto out_unlock;
	}

	desc->istate &= ~IRQS_PENDING;
	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
	raw_spin_unlock(&desc->lock);

	__handle_irq_event_percpu(desc, &flags);

	raw_spin_lock(&desc->lock);
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);

out_unlock:
	raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_untracked_irq);

573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
/*
 * Called unconditionally from handle_level_irq() and only for oneshot
 * interrupts from handle_fasteoi_irq()
 */
static void cond_unmask_irq(struct irq_desc *desc)
{
	/*
	 * We need to unmask in the following cases:
	 * - Standard level irq (IRQF_ONESHOT is not set)
	 * - Oneshot irq which did not wake the thread (caused by a
	 *   spurious interrupt or a primary handler handling it
	 *   completely).
	 */
	if (!irqd_irq_disabled(&desc->irq_data) &&
	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
		unmask_irq(desc);
}

591 592 593 594 595 596 597 598 599
/**
 *	handle_level_irq - Level type irq handler
 *	@desc:	the interrupt description structure for this irq
 *
 *	Level type interrupts are active as long as the hardware line has
 *	the active level. This may require to mask the interrupt and unmask
 *	it after the associated handler has acknowledged the device, so the
 *	interrupt line is back to inactive.
 */
600
void handle_level_irq(struct irq_desc *desc)
601
{
602
	raw_spin_lock(&desc->lock);
603
	mask_ack_irq(desc);
604

605 606
	if (!irq_may_run(desc))
		goto out_unlock;
607

608
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
609 610 611 612 613

	/*
	 * If its disabled or no action available
	 * keep it masked and get out of here
	 */
614 615
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
		desc->istate |= IRQS_PENDING;
616
		goto out_unlock;
617
	}
618

619
	kstat_incr_irqs_this_cpu(desc);
620
	handle_irq_event(desc);
T
Thomas Gleixner 已提交
621

622 623
	cond_unmask_irq(desc);

624
out_unlock:
625
	raw_spin_unlock(&desc->lock);
626
}
627
EXPORT_SYMBOL_GPL(handle_level_irq);
628

629 630 631 632 633 634 635 636 637 638
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
static inline void preflow_handler(struct irq_desc *desc)
{
	if (desc->preflow_handler)
		desc->preflow_handler(&desc->irq_data);
}
#else
static inline void preflow_handler(struct irq_desc *desc) { }
#endif

639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
{
	if (!(desc->istate & IRQS_ONESHOT)) {
		chip->irq_eoi(&desc->irq_data);
		return;
	}
	/*
	 * We need to unmask in the following cases:
	 * - Oneshot irq which did not wake the thread (caused by a
	 *   spurious interrupt or a primary handler handling it
	 *   completely).
	 */
	if (!irqd_irq_disabled(&desc->irq_data) &&
	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
		chip->irq_eoi(&desc->irq_data);
		unmask_irq(desc);
	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
		chip->irq_eoi(&desc->irq_data);
	}
}

660
/**
661
 *	handle_fasteoi_irq - irq handler for transparent controllers
662 663
 *	@desc:	the interrupt description structure for this irq
 *
664
 *	Only a single callback will be issued to the chip: an ->eoi()
665 666 667 668
 *	call when the interrupt has been serviced. This enables support
 *	for modern forms of interrupt handlers, which handle the flow
 *	details in hardware, transparently.
 */
669
void handle_fasteoi_irq(struct irq_desc *desc)
670
{
671 672
	struct irq_chip *chip = desc->irq_data.chip;

673
	raw_spin_lock(&desc->lock);
674

675 676
	if (!irq_may_run(desc))
		goto out;
677

678
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
679 680 681

	/*
	 * If its disabled or no action available
682
	 * then mask it and get out of here:
683
	 */
684
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
685
		desc->istate |= IRQS_PENDING;
686
		mask_irq(desc);
687
		goto out;
688
	}
689

690
	kstat_incr_irqs_this_cpu(desc);
691 692 693
	if (desc->istate & IRQS_ONESHOT)
		mask_irq(desc);

694
	preflow_handler(desc);
695
	handle_irq_event(desc);
696

697
	cond_unmask_eoi_irq(desc, chip);
698

699
	raw_spin_unlock(&desc->lock);
700 701
	return;
out:
702 703 704
	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
		chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
705
}
V
Vincent Stehlé 已提交
706
EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
707 708 709 710 711 712

/**
 *	handle_edge_irq - edge type IRQ handler
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
L
Lucas De Marchi 已提交
713
 *	signal. The occurrence is latched into the irq controller hardware
714 715
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
716
 *	is handled by the associated event handler. If this happens it
717 718 719 720 721 722
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
723
void handle_edge_irq(struct irq_desc *desc)
724
{
725
	raw_spin_lock(&desc->lock);
726

727
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
728

729 730 731 732
	if (!irq_may_run(desc)) {
		desc->istate |= IRQS_PENDING;
		mask_ack_irq(desc);
		goto out_unlock;
733
	}
734

735
	/*
736 737
	 * If its disabled or no action available then mask it and get
	 * out of here.
738
	 */
739 740 741 742
	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
		desc->istate |= IRQS_PENDING;
		mask_ack_irq(desc);
		goto out_unlock;
743
	}
744

745
	kstat_incr_irqs_this_cpu(desc);
746 747

	/* Start handling the irq */
748
	desc->irq_data.chip->irq_ack(&desc->irq_data);
749 750

	do {
751
		if (unlikely(!desc->action)) {
752
			mask_irq(desc);
753 754 755 756 757 758 759 760
			goto out_unlock;
		}

		/*
		 * When another irq arrived while we were handling
		 * one, we could have masked the irq.
		 * Renable it, if it was not disabled in meantime.
		 */
761
		if (unlikely(desc->istate & IRQS_PENDING)) {
762 763
			if (!irqd_irq_disabled(&desc->irq_data) &&
			    irqd_irq_masked(&desc->irq_data))
764
				unmask_irq(desc);
765 766
		}

767
		handle_irq_event(desc);
768

769
	} while ((desc->istate & IRQS_PENDING) &&
770
		 !irqd_irq_disabled(&desc->irq_data));
771 772

out_unlock:
773
	raw_spin_unlock(&desc->lock);
774
}
775
EXPORT_SYMBOL(handle_edge_irq);
776

777 778 779 780 781 782 783 784
#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
/**
 *	handle_edge_eoi_irq - edge eoi type IRQ handler
 *	@desc:	the interrupt description structure for this irq
 *
 * Similar as the above handle_edge_irq, but using eoi and w/o the
 * mask/unmask logic.
 */
785
void handle_edge_eoi_irq(struct irq_desc *desc)
786 787 788 789 790 791
{
	struct irq_chip *chip = irq_desc_get_chip(desc);

	raw_spin_lock(&desc->lock);

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
792

793 794 795
	if (!irq_may_run(desc)) {
		desc->istate |= IRQS_PENDING;
		goto out_eoi;
796
	}
797

798
	/*
799 800
	 * If its disabled or no action available then mask it and get
	 * out of here.
801
	 */
802 803 804
	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
		desc->istate |= IRQS_PENDING;
		goto out_eoi;
805
	}
806

807
	kstat_incr_irqs_this_cpu(desc);
808 809 810 811 812 813 814 815 816 817

	do {
		if (unlikely(!desc->action))
			goto out_eoi;

		handle_irq_event(desc);

	} while ((desc->istate & IRQS_PENDING) &&
		 !irqd_irq_disabled(&desc->irq_data));

818
out_eoi:
819 820 821 822 823
	chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
}
#endif

824
/**
L
Liuweni 已提交
825
 *	handle_percpu_irq - Per CPU local irq handler
826 827 828 829
 *	@desc:	the interrupt description structure for this irq
 *
 *	Per CPU interrupts on SMP machines without locking requirements
 */
830
void handle_percpu_irq(struct irq_desc *desc)
831
{
832
	struct irq_chip *chip = irq_desc_get_chip(desc);
833

834
	kstat_incr_irqs_this_cpu(desc);
835

836 837
	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);
838

839
	handle_irq_event_percpu(desc);
840

841 842
	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
843 844
}

845 846 847 848 849 850 851 852 853 854 855
/**
 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 * @desc:	the interrupt description structure for this irq
 *
 * Per CPU interrupts on SMP machines without locking requirements. Same as
 * handle_percpu_irq() above but with the following extras:
 *
 * action->percpu_dev_id is a pointer to percpu variables which
 * contain the real device id for the cpu on which this handler is
 * called
 */
856
void handle_percpu_devid_irq(struct irq_desc *desc)
857 858 859
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct irqaction *action = desc->action;
860
	unsigned int irq = irq_desc_get_irq(desc);
861 862
	irqreturn_t res;

863
	kstat_incr_irqs_this_cpu(desc);
864 865 866 867

	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);

868 869 870 871 872 873 874 875 876 877 878 879 880 881
	if (likely(action)) {
		trace_irq_handler_entry(irq, action);
		res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
		trace_irq_handler_exit(irq, action, res);
	} else {
		unsigned int cpu = smp_processor_id();
		bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);

		if (enabled)
			irq_percpu_disable(desc, cpu);

		pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
			    enabled ? " and unmasked" : "", irq, cpu);
	}
882 883 884 885 886

	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
}

887
static void
888 889
__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
		     int is_chained, const char *name)
890
{
891
	if (!handle) {
892
		handle = handle_bad_irq;
893
	} else {
894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
		struct irq_data *irq_data = &desc->irq_data;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
		/*
		 * With hierarchical domains we might run into a
		 * situation where the outermost chip is not yet set
		 * up, but the inner chips are there.  Instead of
		 * bailing we install the handler, but obviously we
		 * cannot enable/startup the interrupt at this point.
		 */
		while (irq_data) {
			if (irq_data->chip != &no_irq_chip)
				break;
			/*
			 * Bail out if the outer chip is not set up
			 * and the interrrupt supposed to be started
			 * right away.
			 */
			if (WARN_ON(is_chained))
912
				return;
913 914 915 916 917
			/* Try the parent */
			irq_data = irq_data->parent_data;
		}
#endif
		if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
918
			return;
919
	}
920 921 922

	/* Uninstall? */
	if (handle == handle_bad_irq) {
923
		if (desc->irq_data.chip != &no_irq_chip)
924
			mask_ack_irq(desc);
925
		irq_state_set_disabled(desc);
926 927
		if (is_chained)
			desc->action = NULL;
928 929 930
		desc->depth = 1;
	}
	desc->handle_irq = handle;
931
	desc->name = name;
932 933

	if (handle != handle_bad_irq && is_chained) {
934 935
		unsigned int type = irqd_get_trigger_type(&desc->irq_data);

936 937 938 939 940 941 942 943
		/*
		 * We're about to start this interrupt immediately,
		 * hence the need to set the trigger configuration.
		 * But the .set_type callback may have overridden the
		 * flow handler, ignoring that we're dealing with a
		 * chained interrupt. Reset it immediately because we
		 * do know better.
		 */
944 945 946 947
		if (type != IRQ_TYPE_NONE) {
			__irq_set_trigger(desc, type);
			desc->handle_irq = handle;
		}
948

949 950
		irq_settings_set_noprobe(desc);
		irq_settings_set_norequest(desc);
951
		irq_settings_set_nothread(desc);
952
		desc->action = &chained_action;
953
		irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
954
	}
955 956 957 958 959 960 961 962 963 964 965 966 967
}

void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
		  const char *name)
{
	unsigned long flags;
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);

	if (!desc)
		return;

	__irq_do_set_handler(desc, handle, is_chained, name);
968
	irq_put_desc_busunlock(desc, flags);
969
}
970
EXPORT_SYMBOL_GPL(__irq_set_handler);
971

972 973 974 975 976 977 978 979 980 981
void
irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
				 void *data)
{
	unsigned long flags;
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);

	if (!desc)
		return;

982
	desc->irq_common_data.handler_data = data;
983
	__irq_do_set_handler(desc, handle, 1, NULL);
984 985 986 987 988

	irq_put_desc_busunlock(desc, flags);
}
EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);

989
void
990
irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
991
			      irq_flow_handler_t handle, const char *name)
992
{
993
	irq_set_chip(irq, chip);
994
	__irq_set_handler(irq, handle, 0, name);
995
}
996
EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
R
Ralf Baechle 已提交
997

T
Thomas Gleixner 已提交
998
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
R
Ralf Baechle 已提交
999 1000
{
	unsigned long flags;
1001
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
R
Ralf Baechle 已提交
1002

T
Thomas Gleixner 已提交
1003
	if (!desc)
R
Ralf Baechle 已提交
1004
		return;
1005 1006 1007 1008 1009 1010 1011

	/*
	 * Warn when a driver sets the no autoenable flag on an already
	 * active interrupt.
	 */
	WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));

1012 1013
	irq_settings_clr_and_set(desc, clr, set);

1014
	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1015
		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
1016 1017 1018 1019
	if (irq_settings_has_no_balance_set(desc))
		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
	if (irq_settings_is_per_cpu(desc))
		irqd_set(&desc->irq_data, IRQD_PER_CPU);
1020 1021
	if (irq_settings_can_move_pcntxt(desc))
		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
T
Thomas Gleixner 已提交
1022 1023
	if (irq_settings_is_level(desc))
		irqd_set(&desc->irq_data, IRQD_LEVEL);
1024

1025 1026
	irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));

1027
	irq_put_desc_unlock(desc, flags);
R
Ralf Baechle 已提交
1028
}
1029
EXPORT_SYMBOL_GPL(irq_modify_status);
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051

/**
 *	irq_cpu_online - Invoke all irq_cpu_online functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_online()
 *	for each.
 */
void irq_cpu_online(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
1052 1053
		if (chip && chip->irq_cpu_online &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1054
		     !irqd_irq_disabled(&desc->irq_data)))
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
			chip->irq_cpu_online(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}

/**
 *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
 *	for each.
 */
void irq_cpu_offline(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
1082 1083
		if (chip && chip->irq_cpu_offline &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1084
		     !irqd_irq_disabled(&desc->irq_data)))
1085 1086 1087 1088 1089
			chip->irq_cpu_offline(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}
1090 1091

#ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
/**
 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
 * NULL)
 * @data:	Pointer to interrupt specific data
 */
void irq_chip_enable_parent(struct irq_data *data)
{
	data = data->parent_data;
	if (data->chip->irq_enable)
		data->chip->irq_enable(data);
	else
		data->chip->irq_unmask(data);
}

/**
 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
 * NULL)
 * @data:	Pointer to interrupt specific data
 */
void irq_chip_disable_parent(struct irq_data *data)
{
	data = data->parent_data;
	if (data->chip->irq_disable)
		data->chip->irq_disable(data);
	else
		data->chip->irq_mask(data);
}

1120 1121 1122 1123 1124 1125 1126 1127 1128
/**
 * irq_chip_ack_parent - Acknowledge the parent interrupt
 * @data:	Pointer to interrupt specific data
 */
void irq_chip_ack_parent(struct irq_data *data)
{
	data = data->parent_data;
	data->chip->irq_ack(data);
}
1129
EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1130

1131 1132 1133 1134 1135 1136 1137 1138 1139
/**
 * irq_chip_mask_parent - Mask the parent interrupt
 * @data:	Pointer to interrupt specific data
 */
void irq_chip_mask_parent(struct irq_data *data)
{
	data = data->parent_data;
	data->chip->irq_mask(data);
}
1140
EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150

/**
 * irq_chip_unmask_parent - Unmask the parent interrupt
 * @data:	Pointer to interrupt specific data
 */
void irq_chip_unmask_parent(struct irq_data *data)
{
	data = data->parent_data;
	data->chip->irq_unmask(data);
}
1151
EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161

/**
 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
 * @data:	Pointer to interrupt specific data
 */
void irq_chip_eoi_parent(struct irq_data *data)
{
	data = data->parent_data;
	data->chip->irq_eoi(data);
}
1162
EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177

/**
 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
 * @data:	Pointer to interrupt specific data
 * @dest:	The affinity mask to set
 * @force:	Flag to enforce setting (disable online checks)
 *
 * Conditinal, as the underlying parent chip might not implement it.
 */
int irq_chip_set_affinity_parent(struct irq_data *data,
				 const struct cpumask *dest, bool force)
{
	data = data->parent_data;
	if (data->chip->irq_set_affinity)
		return data->chip->irq_set_affinity(data, dest, force);
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194

	return -ENOSYS;
}

/**
 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
 * @data:	Pointer to interrupt specific data
 * @type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
 *
 * Conditional, as the underlying parent chip might not implement it.
 */
int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
{
	data = data->parent_data;

	if (data->chip->irq_set_type)
		return data->chip->irq_set_type(data, type);
1195 1196 1197

	return -ENOSYS;
}
1198
EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1199

1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
/**
 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
 * @data:	Pointer to interrupt specific data
 *
 * Iterate through the domain hierarchy of the interrupt and check
 * whether a hw retrigger function exists. If yes, invoke it.
 */
int irq_chip_retrigger_hierarchy(struct irq_data *data)
{
	for (data = data->parent_data; data; data = data->parent_data)
		if (data->chip && data->chip->irq_retrigger)
			return data->chip->irq_retrigger(data);

1213
	return 0;
1214
}
1215

1216 1217 1218
/**
 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
 * @data:	Pointer to interrupt specific data
1219
 * @vcpu_info:	The vcpu affinity information
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
 */
int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
{
	data = data->parent_data;
	if (data->chip->irq_set_vcpu_affinity)
		return data->chip->irq_set_vcpu_affinity(data, vcpu_info);

	return -ENOSYS;
}

1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
/**
 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
 * @data:	Pointer to interrupt specific data
 * @on:		Whether to set or reset the wake-up capability of this irq
 *
 * Conditional, as the underlying parent chip might not implement it.
 */
int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
{
	data = data->parent_data;
	if (data->chip->irq_set_wake)
		return data->chip->irq_set_wake(data, on);

	return -ENOSYS;
}
1245
#endif
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271

/**
 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
 * @data:	Pointer to interrupt specific data
 * @msg:	Pointer to the MSI message
 *
 * For hierarchical domains we find the first chip in the hierarchy
 * which implements the irq_compose_msi_msg callback. For non
 * hierarchical we use the top level chip.
 */
int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
	struct irq_data *pos = NULL;

#ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
	for (; data; data = data->parent_data)
#endif
		if (data->chip && data->chip->irq_compose_msi_msg)
			pos = data;
	if (!pos)
		return -ENOSYS;

	pos->chip->irq_compose_msi_msg(pos, msg);

	return 0;
}
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311

/**
 * irq_chip_pm_get - Enable power for an IRQ chip
 * @data:	Pointer to interrupt specific data
 *
 * Enable the power to the IRQ chip referenced by the interrupt data
 * structure.
 */
int irq_chip_pm_get(struct irq_data *data)
{
	int retval;

	if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
		retval = pm_runtime_get_sync(data->chip->parent_device);
		if (retval < 0) {
			pm_runtime_put_noidle(data->chip->parent_device);
			return retval;
		}
	}

	return 0;
}

/**
 * irq_chip_pm_put - Disable power for an IRQ chip
 * @data:	Pointer to interrupt specific data
 *
 * Disable the power to the IRQ chip referenced by the interrupt data
 * structure, belongs. Note that power will only be disabled, once this
 * function has been called for all IRQs that have called irq_chip_pm_get().
 */
int irq_chip_pm_put(struct irq_data *data)
{
	int retval = 0;

	if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
		retval = pm_runtime_put(data->chip->parent_device);

	return (retval < 0) ? retval : 0;
}