chip.c 16.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * linux/kernel/irq/chip.c
 *
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 *
 * This file contains the core interrupt handling code, for irq-chip
 * based architectures.
 *
 * Detailed information is available in Documentation/DocBook/genericirq
 */

#include <linux/irq.h>
14
#include <linux/msi.h>
15 16 17 18 19 20 21
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>

#include "internals.h"

/**
T
Thomas Gleixner 已提交
22
 *	irq_set_chip - set the irq chip for an irq
23 24 25
 *	@irq:	irq number
 *	@chip:	pointer to irq chip description structure
 */
T
Thomas Gleixner 已提交
26
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
27 28
{
	unsigned long flags;
29
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
30

31
	if (!desc)
32 33 34 35 36
		return -EINVAL;

	if (!chip)
		chip = &no_irq_chip;

37
	desc->irq_data.chip = chip;
38
	irq_put_desc_unlock(desc, flags);
39 40 41 42 43 44
	/*
	 * For !CONFIG_SPARSE_IRQ make the irq show up in
	 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
	 * already marked, and this call is harmless.
	 */
	irq_reserve_irq(irq);
45 46
	return 0;
}
T
Thomas Gleixner 已提交
47
EXPORT_SYMBOL(irq_set_chip);
48 49

/**
T
Thomas Gleixner 已提交
50
 *	irq_set_type - set the irq trigger type for an irq
51
 *	@irq:	irq number
D
David Brownell 已提交
52
 *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
53
 */
T
Thomas Gleixner 已提交
54
int irq_set_irq_type(unsigned int irq, unsigned int type)
55 56
{
	unsigned long flags;
57 58
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
	int ret = 0;
59

60 61
	if (!desc)
		return -EINVAL;
62

63
	type &= IRQ_TYPE_SENSE_MASK;
64 65 66
	if (type != IRQ_TYPE_NONE)
		ret = __irq_set_trigger(desc, irq, type);
	irq_put_desc_busunlock(desc, flags);
67 68
	return ret;
}
T
Thomas Gleixner 已提交
69
EXPORT_SYMBOL(irq_set_irq_type);
70 71

/**
T
Thomas Gleixner 已提交
72
 *	irq_set_handler_data - set irq handler data for an irq
73 74 75 76 77
 *	@irq:	Interrupt number
 *	@data:	Pointer to interrupt specific data
 *
 *	Set the hardware irq controller data for an irq
 */
T
Thomas Gleixner 已提交
78
int irq_set_handler_data(unsigned int irq, void *data)
79 80
{
	unsigned long flags;
81
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
82

83
	if (!desc)
84
		return -EINVAL;
85
	desc->irq_data.handler_data = data;
86
	irq_put_desc_unlock(desc, flags);
87 88
	return 0;
}
T
Thomas Gleixner 已提交
89
EXPORT_SYMBOL(irq_set_handler_data);
90

91
/**
T
Thomas Gleixner 已提交
92
 *	irq_set_msi_desc - set MSI descriptor data for an irq
93
 *	@irq:	Interrupt number
R
Randy Dunlap 已提交
94
 *	@entry:	Pointer to MSI descriptor data
95
 *
L
Liuweni 已提交
96
 *	Set the MSI descriptor entry for an irq
97
 */
T
Thomas Gleixner 已提交
98
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
99 100
{
	unsigned long flags;
101
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
102

103
	if (!desc)
104
		return -EINVAL;
105
	desc->irq_data.msi_desc = entry;
106 107
	if (entry)
		entry->irq = irq;
108
	irq_put_desc_unlock(desc, flags);
109 110 111
	return 0;
}

112
/**
T
Thomas Gleixner 已提交
113
 *	irq_set_chip_data - set irq chip data for an irq
114 115 116 117 118
 *	@irq:	Interrupt number
 *	@data:	Pointer to chip specific data
 *
 *	Set the hardware irq chip data for an irq
 */
T
Thomas Gleixner 已提交
119
int irq_set_chip_data(unsigned int irq, void *data)
120 121
{
	unsigned long flags;
122
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
123

124
	if (!desc)
125
		return -EINVAL;
126
	desc->irq_data.chip_data = data;
127
	irq_put_desc_unlock(desc, flags);
128 129
	return 0;
}
T
Thomas Gleixner 已提交
130
EXPORT_SYMBOL(irq_set_chip_data);
131

132 133 134 135 136 137 138 139
struct irq_data *irq_get_irq_data(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	return desc ? &desc->irq_data : NULL;
}
EXPORT_SYMBOL_GPL(irq_get_irq_data);

140 141
static void irq_state_clr_disabled(struct irq_desc *desc)
{
142
	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
143 144 145 146
}

static void irq_state_set_disabled(struct irq_desc *desc)
{
147
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
148 149
}

150 151
static void irq_state_clr_masked(struct irq_desc *desc)
{
152
	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
153 154 155 156
}

static void irq_state_set_masked(struct irq_desc *desc)
{
157
	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
158 159
}

160 161
int irq_startup(struct irq_desc *desc)
{
162
	irq_state_clr_disabled(desc);
163 164
	desc->depth = 0;

165 166
	if (desc->irq_data.chip->irq_startup) {
		int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
167
		irq_state_clr_masked(desc);
168 169
		return ret;
	}
170

171
	irq_enable(desc);
172 173 174 175 176
	return 0;
}

void irq_shutdown(struct irq_desc *desc)
{
177
	irq_state_set_disabled(desc);
178
	desc->depth = 1;
T
Thomas Gleixner 已提交
179 180 181 182 183 184
	if (desc->irq_data.chip->irq_shutdown)
		desc->irq_data.chip->irq_shutdown(&desc->irq_data);
	if (desc->irq_data.chip->irq_disable)
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
185
	irq_state_set_masked(desc);
186 187
}

188 189
void irq_enable(struct irq_desc *desc)
{
190
	irq_state_clr_disabled(desc);
T
Thomas Gleixner 已提交
191 192 193 194
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
195
	irq_state_clr_masked(desc);
196 197
}

T
Thomas Gleixner 已提交
198
void irq_disable(struct irq_desc *desc)
199
{
200
	irq_state_set_disabled(desc);
T
Thomas Gleixner 已提交
201 202
	if (desc->irq_data.chip->irq_disable) {
		desc->irq_data.chip->irq_disable(&desc->irq_data);
203
		irq_state_set_masked(desc);
T
Thomas Gleixner 已提交
204
	}
205 206
}

207
static inline void mask_ack_irq(struct irq_desc *desc)
208
{
209 210
	if (desc->irq_data.chip->irq_mask_ack)
		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
211
	else {
212
		desc->irq_data.chip->irq_mask(&desc->irq_data);
213 214
		if (desc->irq_data.chip->irq_ack)
			desc->irq_data.chip->irq_ack(&desc->irq_data);
215
	}
216
	irq_state_set_masked(desc);
217 218
}

219
void mask_irq(struct irq_desc *desc)
220
{
221 222
	if (desc->irq_data.chip->irq_mask) {
		desc->irq_data.chip->irq_mask(&desc->irq_data);
223
		irq_state_set_masked(desc);
224 225 226
	}
}

227
void unmask_irq(struct irq_desc *desc)
228
{
229 230
	if (desc->irq_data.chip->irq_unmask) {
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
231
		irq_state_clr_masked(desc);
232
	}
233 234
}

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
/*
 *	handle_nested_irq - Handle a nested irq from a irq thread
 *	@irq:	the interrupt number
 *
 *	Handle interrupts which are nested into a threaded interrupt
 *	handler. The handler function is called inside the calling
 *	threads context.
 */
void handle_nested_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	irqreturn_t action_ret;

	might_sleep();

251
	raw_spin_lock_irq(&desc->lock);
252 253 254 255

	kstat_incr_irqs_this_cpu(irq, desc);

	action = desc->action;
256
	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
257 258
		goto out_unlock;

259
	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
260
	raw_spin_unlock_irq(&desc->lock);
261 262 263 264 265

	action_ret = action->thread_fn(action->irq, action->dev_id);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

266
	raw_spin_lock_irq(&desc->lock);
267
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
268 269

out_unlock:
270
	raw_spin_unlock_irq(&desc->lock);
271 272 273
}
EXPORT_SYMBOL_GPL(handle_nested_irq);

274 275
static bool irq_check_poll(struct irq_desc *desc)
{
276
	if (!(desc->istate & IRQS_POLL_INPROGRESS))
277 278 279 280
		return false;
	return irq_wait_for_poll(desc);
}

281 282 283 284 285 286 287 288 289 290 291 292
/**
 *	handle_simple_irq - Simple and software-decoded IRQs.
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Simple interrupts are either sent from a demultiplexing interrupt
 *	handler or come from hardware, where no interrupt hardware control
 *	is necessary.
 *
 *	Note: The caller is expected to handle the ack, clear, mask and
 *	unmask issues if necessary.
 */
293
void
294
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
295
{
296
	raw_spin_lock(&desc->lock);
297

298
	if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
299 300 301
		if (!irq_check_poll(desc))
			goto out_unlock;

302
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
303
	kstat_incr_irqs_this_cpu(irq, desc);
304

305
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
306 307
		goto out_unlock;

308
	handle_irq_event(desc);
309 310

out_unlock:
311
	raw_spin_unlock(&desc->lock);
312 313 314 315 316 317 318 319 320 321 322 323
}

/**
 *	handle_level_irq - Level type irq handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Level type interrupts are active as long as the hardware line has
 *	the active level. This may require to mask the interrupt and unmask
 *	it after the associated handler has acknowledged the device, so the
 *	interrupt line is back to inactive.
 */
324
void
325
handle_level_irq(unsigned int irq, struct irq_desc *desc)
326
{
327
	raw_spin_lock(&desc->lock);
328
	mask_ack_irq(desc);
329

330
	if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
331 332 333
		if (!irq_check_poll(desc))
			goto out_unlock;

334
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
335
	kstat_incr_irqs_this_cpu(irq, desc);
336 337 338 339 340

	/*
	 * If its disabled or no action available
	 * keep it masked and get out of here
	 */
341
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
342
		goto out_unlock;
343

344
	handle_irq_event(desc);
T
Thomas Gleixner 已提交
345

346
	if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT))
347
		unmask_irq(desc);
348
out_unlock:
349
	raw_spin_unlock(&desc->lock);
350
}
351
EXPORT_SYMBOL_GPL(handle_level_irq);
352

353 354 355 356 357 358 359 360 361 362
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
static inline void preflow_handler(struct irq_desc *desc)
{
	if (desc->preflow_handler)
		desc->preflow_handler(&desc->irq_data);
}
#else
static inline void preflow_handler(struct irq_desc *desc) { }
#endif

363
/**
364
 *	handle_fasteoi_irq - irq handler for transparent controllers
365 366 367
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
368
 *	Only a single callback will be issued to the chip: an ->eoi()
369 370 371 372
 *	call when the interrupt has been serviced. This enables support
 *	for modern forms of interrupt handlers, which handle the flow
 *	details in hardware, transparently.
 */
373
void
374
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
375
{
376
	raw_spin_lock(&desc->lock);
377

378
	if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
379 380
		if (!irq_check_poll(desc))
			goto out;
381

382
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
383
	kstat_incr_irqs_this_cpu(irq, desc);
384 385 386

	/*
	 * If its disabled or no action available
387
	 * then mask it and get out of here:
388
	 */
389
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
390
		desc->istate |= IRQS_PENDING;
391
		mask_irq(desc);
392
		goto out;
393
	}
394 395 396 397

	if (desc->istate & IRQS_ONESHOT)
		mask_irq(desc);

398
	preflow_handler(desc);
399
	handle_irq_event(desc);
400 401

out_eoi:
402
	desc->irq_data.chip->irq_eoi(&desc->irq_data);
403
out_unlock:
404
	raw_spin_unlock(&desc->lock);
405 406 407 408 409
	return;
out:
	if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
		goto out_eoi;
	goto out_unlock;
410 411 412 413 414 415 416 417
}

/**
 *	handle_edge_irq - edge type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
L
Lucas De Marchi 已提交
418
 *	signal. The occurrence is latched into the irq controller hardware
419 420
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
421
 *	is handled by the associated event handler. If this happens it
422 423 424 425 426 427
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
428
void
429
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
430
{
431
	raw_spin_lock(&desc->lock);
432

433
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
434 435 436 437 438
	/*
	 * If we're currently running this IRQ, or its disabled,
	 * we shouldn't process the IRQ. Mark it pending, handle
	 * the necessary masking and go out
	 */
439 440
	if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
		     irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
441
		if (!irq_check_poll(desc)) {
442
			desc->istate |= IRQS_PENDING;
443 444 445
			mask_ack_irq(desc);
			goto out_unlock;
		}
446
	}
T
Thomas Gleixner 已提交
447
	kstat_incr_irqs_this_cpu(irq, desc);
448 449

	/* Start handling the irq */
450
	desc->irq_data.chip->irq_ack(&desc->irq_data);
451 452

	do {
453
		if (unlikely(!desc->action)) {
454
			mask_irq(desc);
455 456 457 458 459 460 461 462
			goto out_unlock;
		}

		/*
		 * When another irq arrived while we were handling
		 * one, we could have masked the irq.
		 * Renable it, if it was not disabled in meantime.
		 */
463
		if (unlikely(desc->istate & IRQS_PENDING)) {
464 465
			if (!irqd_irq_disabled(&desc->irq_data) &&
			    irqd_irq_masked(&desc->irq_data))
466
				unmask_irq(desc);
467 468
		}

469
		handle_irq_event(desc);
470

471
	} while ((desc->istate & IRQS_PENDING) &&
472
		 !irqd_irq_disabled(&desc->irq_data));
473 474

out_unlock:
475
	raw_spin_unlock(&desc->lock);
476 477
}

478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
/**
 *	handle_edge_eoi_irq - edge eoi type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 * Similar as the above handle_edge_irq, but using eoi and w/o the
 * mask/unmask logic.
 */
void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);

	raw_spin_lock(&desc->lock);

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
	/*
	 * If we're currently running this IRQ, or its disabled,
	 * we shouldn't process the IRQ. Mark it pending, handle
	 * the necessary masking and go out
	 */
	if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
		     irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
		if (!irq_check_poll(desc)) {
			desc->istate |= IRQS_PENDING;
			goto out_eoi;
		}
	}
	kstat_incr_irqs_this_cpu(irq, desc);

	do {
		if (unlikely(!desc->action))
			goto out_eoi;

		handle_irq_event(desc);

	} while ((desc->istate & IRQS_PENDING) &&
		 !irqd_irq_disabled(&desc->irq_data));

517
out_eoi:
518 519 520 521 522
	chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
}
#endif

523
/**
L
Liuweni 已提交
524
 *	handle_percpu_irq - Per CPU local irq handler
525 526 527 528 529
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Per CPU interrupts on SMP machines without locking requirements
 */
530
void
531
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
532
{
533
	struct irq_chip *chip = irq_desc_get_chip(desc);
534

T
Thomas Gleixner 已提交
535
	kstat_incr_irqs_this_cpu(irq, desc);
536

537 538
	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);
539

540
	handle_irq_event_percpu(desc, desc->action);
541

542 543
	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
544 545 546
}

void
547
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
548
		  const char *name)
549 550
{
	unsigned long flags;
551
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
552

553
	if (!desc)
554 555
		return;

556
	if (!handle) {
557
		handle = handle_bad_irq;
558 559
	} else {
		if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
560
			goto out;
561
	}
562 563 564

	/* Uninstall? */
	if (handle == handle_bad_irq) {
565
		if (desc->irq_data.chip != &no_irq_chip)
566
			mask_ack_irq(desc);
567
		irq_state_set_disabled(desc);
568 569 570
		desc->depth = 1;
	}
	desc->handle_irq = handle;
571
	desc->name = name;
572 573

	if (handle != handle_bad_irq && is_chained) {
574 575
		irq_settings_set_noprobe(desc);
		irq_settings_set_norequest(desc);
576
		irq_settings_set_nothread(desc);
577
		irq_startup(desc);
578
	}
579 580
out:
	irq_put_desc_busunlock(desc, flags);
581
}
582
EXPORT_SYMBOL_GPL(__irq_set_handler);
583 584

void
585
irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
586
			      irq_flow_handler_t handle, const char *name)
587
{
588
	irq_set_chip(irq, chip);
589
	__irq_set_handler(irq, handle, 0, name);
590
}
R
Ralf Baechle 已提交
591

T
Thomas Gleixner 已提交
592
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
R
Ralf Baechle 已提交
593 594
{
	unsigned long flags;
595
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
R
Ralf Baechle 已提交
596

T
Thomas Gleixner 已提交
597
	if (!desc)
R
Ralf Baechle 已提交
598
		return;
599 600
	irq_settings_clr_and_set(desc, clr, set);

601
	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
602
		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
603 604 605 606
	if (irq_settings_has_no_balance_set(desc))
		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
	if (irq_settings_is_per_cpu(desc))
		irqd_set(&desc->irq_data, IRQD_PER_CPU);
607 608
	if (irq_settings_can_move_pcntxt(desc))
		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
T
Thomas Gleixner 已提交
609 610
	if (irq_settings_is_level(desc))
		irqd_set(&desc->irq_data, IRQD_LEVEL);
611

612 613
	irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));

614
	irq_put_desc_unlock(desc, flags);
R
Ralf Baechle 已提交
615
}
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637

/**
 *	irq_cpu_online - Invoke all irq_cpu_online functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_online()
 *	for each.
 */
void irq_cpu_online(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
638 639
		if (chip && chip->irq_cpu_online &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
640
		     !irqd_irq_disabled(&desc->irq_data)))
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
			chip->irq_cpu_online(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}

/**
 *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
 *	for each.
 */
void irq_cpu_offline(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
668 669
		if (chip && chip->irq_cpu_offline &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
670
		     !irqd_irq_disabled(&desc->irq_data)))
671 672 673 674 675
			chip->irq_cpu_offline(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}