chip.c 16.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * linux/kernel/irq/chip.c
 *
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 *
 * This file contains the core interrupt handling code, for irq-chip
 * based architectures.
 *
 * Detailed information is available in Documentation/DocBook/genericirq
 */

#include <linux/irq.h>
14
#include <linux/msi.h>
15 16 17 18 19 20 21
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>

#include "internals.h"

/**
T
Thomas Gleixner 已提交
22
 *	irq_set_chip - set the irq chip for an irq
23 24 25
 *	@irq:	irq number
 *	@chip:	pointer to irq chip description structure
 */
T
Thomas Gleixner 已提交
26
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
27 28
{
	unsigned long flags;
29
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
30

31
	if (!desc)
32 33 34 35 36
		return -EINVAL;

	if (!chip)
		chip = &no_irq_chip;

37
	desc->irq_data.chip = chip;
38
	irq_put_desc_unlock(desc, flags);
39 40 41 42 43 44
	/*
	 * For !CONFIG_SPARSE_IRQ make the irq show up in
	 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
	 * already marked, and this call is harmless.
	 */
	irq_reserve_irq(irq);
45 46
	return 0;
}
T
Thomas Gleixner 已提交
47
EXPORT_SYMBOL(irq_set_chip);
48 49

/**
T
Thomas Gleixner 已提交
50
 *	irq_set_type - set the irq trigger type for an irq
51
 *	@irq:	irq number
D
David Brownell 已提交
52
 *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
53
 */
T
Thomas Gleixner 已提交
54
int irq_set_irq_type(unsigned int irq, unsigned int type)
55 56
{
	unsigned long flags;
57 58
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
	int ret = 0;
59

60 61
	if (!desc)
		return -EINVAL;
62

63
	type &= IRQ_TYPE_SENSE_MASK;
64 65 66
	if (type != IRQ_TYPE_NONE)
		ret = __irq_set_trigger(desc, irq, type);
	irq_put_desc_busunlock(desc, flags);
67 68
	return ret;
}
T
Thomas Gleixner 已提交
69
EXPORT_SYMBOL(irq_set_irq_type);
70 71

/**
T
Thomas Gleixner 已提交
72
 *	irq_set_handler_data - set irq handler data for an irq
73 74 75 76 77
 *	@irq:	Interrupt number
 *	@data:	Pointer to interrupt specific data
 *
 *	Set the hardware irq controller data for an irq
 */
T
Thomas Gleixner 已提交
78
int irq_set_handler_data(unsigned int irq, void *data)
79 80
{
	unsigned long flags;
81
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
82

83
	if (!desc)
84
		return -EINVAL;
85
	desc->irq_data.handler_data = data;
86
	irq_put_desc_unlock(desc, flags);
87 88
	return 0;
}
T
Thomas Gleixner 已提交
89
EXPORT_SYMBOL(irq_set_handler_data);
90

91
/**
T
Thomas Gleixner 已提交
92
 *	irq_set_msi_desc - set MSI descriptor data for an irq
93
 *	@irq:	Interrupt number
R
Randy Dunlap 已提交
94
 *	@entry:	Pointer to MSI descriptor data
95
 *
L
Liuweni 已提交
96
 *	Set the MSI descriptor entry for an irq
97
 */
T
Thomas Gleixner 已提交
98
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
99 100
{
	unsigned long flags;
101
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
102

103
	if (!desc)
104
		return -EINVAL;
105
	desc->irq_data.msi_desc = entry;
106 107
	if (entry)
		entry->irq = irq;
108
	irq_put_desc_unlock(desc, flags);
109 110 111
	return 0;
}

112
/**
T
Thomas Gleixner 已提交
113
 *	irq_set_chip_data - set irq chip data for an irq
114 115 116 117 118
 *	@irq:	Interrupt number
 *	@data:	Pointer to chip specific data
 *
 *	Set the hardware irq chip data for an irq
 */
T
Thomas Gleixner 已提交
119
int irq_set_chip_data(unsigned int irq, void *data)
120 121
{
	unsigned long flags;
122
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
123

124
	if (!desc)
125
		return -EINVAL;
126
	desc->irq_data.chip_data = data;
127
	irq_put_desc_unlock(desc, flags);
128 129
	return 0;
}
T
Thomas Gleixner 已提交
130
EXPORT_SYMBOL(irq_set_chip_data);
131

132 133 134 135 136 137 138 139
struct irq_data *irq_get_irq_data(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	return desc ? &desc->irq_data : NULL;
}
EXPORT_SYMBOL_GPL(irq_get_irq_data);

140 141
static void irq_state_clr_disabled(struct irq_desc *desc)
{
142
	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
143 144 145 146
}

static void irq_state_set_disabled(struct irq_desc *desc)
{
147
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
148 149
}

150 151
static void irq_state_clr_masked(struct irq_desc *desc)
{
152
	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
153 154 155 156
}

static void irq_state_set_masked(struct irq_desc *desc)
{
157
	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
158 159
}

160 161
int irq_startup(struct irq_desc *desc)
{
162
	irq_state_clr_disabled(desc);
163 164
	desc->depth = 0;

165 166
	if (desc->irq_data.chip->irq_startup) {
		int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
167
		irq_state_clr_masked(desc);
168 169
		return ret;
	}
170

171
	irq_enable(desc);
172 173 174 175 176
	return 0;
}

void irq_shutdown(struct irq_desc *desc)
{
177
	irq_state_set_disabled(desc);
178
	desc->depth = 1;
T
Thomas Gleixner 已提交
179 180
	if (desc->irq_data.chip->irq_shutdown)
		desc->irq_data.chip->irq_shutdown(&desc->irq_data);
181
	else if (desc->irq_data.chip->irq_disable)
T
Thomas Gleixner 已提交
182 183 184
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
185
	irq_state_set_masked(desc);
186 187
}

188 189
void irq_enable(struct irq_desc *desc)
{
190
	irq_state_clr_disabled(desc);
T
Thomas Gleixner 已提交
191 192 193 194
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
195
	irq_state_clr_masked(desc);
196 197
}

T
Thomas Gleixner 已提交
198
void irq_disable(struct irq_desc *desc)
199
{
200
	irq_state_set_disabled(desc);
T
Thomas Gleixner 已提交
201 202
	if (desc->irq_data.chip->irq_disable) {
		desc->irq_data.chip->irq_disable(&desc->irq_data);
203
		irq_state_set_masked(desc);
T
Thomas Gleixner 已提交
204
	}
205 206
}

207
static inline void mask_ack_irq(struct irq_desc *desc)
208
{
209 210
	if (desc->irq_data.chip->irq_mask_ack)
		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
211
	else {
212
		desc->irq_data.chip->irq_mask(&desc->irq_data);
213 214
		if (desc->irq_data.chip->irq_ack)
			desc->irq_data.chip->irq_ack(&desc->irq_data);
215
	}
216
	irq_state_set_masked(desc);
217 218
}

219
void mask_irq(struct irq_desc *desc)
220
{
221 222
	if (desc->irq_data.chip->irq_mask) {
		desc->irq_data.chip->irq_mask(&desc->irq_data);
223
		irq_state_set_masked(desc);
224 225 226
	}
}

227
void unmask_irq(struct irq_desc *desc)
228
{
229 230
	if (desc->irq_data.chip->irq_unmask) {
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
231
		irq_state_clr_masked(desc);
232
	}
233 234
}

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
/*
 *	handle_nested_irq - Handle a nested irq from a irq thread
 *	@irq:	the interrupt number
 *
 *	Handle interrupts which are nested into a threaded interrupt
 *	handler. The handler function is called inside the calling
 *	threads context.
 */
void handle_nested_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	irqreturn_t action_ret;

	might_sleep();

251
	raw_spin_lock_irq(&desc->lock);
252 253 254 255

	kstat_incr_irqs_this_cpu(irq, desc);

	action = desc->action;
256
	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
257 258
		goto out_unlock;

259
	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
260
	raw_spin_unlock_irq(&desc->lock);
261 262 263 264 265

	action_ret = action->thread_fn(action->irq, action->dev_id);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

266
	raw_spin_lock_irq(&desc->lock);
267
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
268 269

out_unlock:
270
	raw_spin_unlock_irq(&desc->lock);
271 272 273
}
EXPORT_SYMBOL_GPL(handle_nested_irq);

274 275
static bool irq_check_poll(struct irq_desc *desc)
{
276
	if (!(desc->istate & IRQS_POLL_INPROGRESS))
277 278 279 280
		return false;
	return irq_wait_for_poll(desc);
}

281 282 283 284 285 286 287 288 289 290 291 292
/**
 *	handle_simple_irq - Simple and software-decoded IRQs.
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Simple interrupts are either sent from a demultiplexing interrupt
 *	handler or come from hardware, where no interrupt hardware control
 *	is necessary.
 *
 *	Note: The caller is expected to handle the ack, clear, mask and
 *	unmask issues if necessary.
 */
293
void
294
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
295
{
296
	raw_spin_lock(&desc->lock);
297

298
	if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
299 300 301
		if (!irq_check_poll(desc))
			goto out_unlock;

302
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
303
	kstat_incr_irqs_this_cpu(irq, desc);
304

305
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
306 307
		goto out_unlock;

308
	handle_irq_event(desc);
309 310

out_unlock:
311
	raw_spin_unlock(&desc->lock);
312
}
313
EXPORT_SYMBOL_GPL(handle_simple_irq);
314 315 316 317 318 319 320 321 322 323 324

/**
 *	handle_level_irq - Level type irq handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Level type interrupts are active as long as the hardware line has
 *	the active level. This may require to mask the interrupt and unmask
 *	it after the associated handler has acknowledged the device, so the
 *	interrupt line is back to inactive.
 */
325
void
326
handle_level_irq(unsigned int irq, struct irq_desc *desc)
327
{
328
	raw_spin_lock(&desc->lock);
329
	mask_ack_irq(desc);
330

331
	if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
332 333 334
		if (!irq_check_poll(desc))
			goto out_unlock;

335
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
336
	kstat_incr_irqs_this_cpu(irq, desc);
337 338 339 340 341

	/*
	 * If its disabled or no action available
	 * keep it masked and get out of here
	 */
342
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
343
		goto out_unlock;
344

345
	handle_irq_event(desc);
T
Thomas Gleixner 已提交
346

347
	if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT))
348
		unmask_irq(desc);
349
out_unlock:
350
	raw_spin_unlock(&desc->lock);
351
}
352
EXPORT_SYMBOL_GPL(handle_level_irq);
353

354 355 356 357 358 359 360 361 362 363
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
static inline void preflow_handler(struct irq_desc *desc)
{
	if (desc->preflow_handler)
		desc->preflow_handler(&desc->irq_data);
}
#else
static inline void preflow_handler(struct irq_desc *desc) { }
#endif

364
/**
365
 *	handle_fasteoi_irq - irq handler for transparent controllers
366 367 368
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
369
 *	Only a single callback will be issued to the chip: an ->eoi()
370 371 372 373
 *	call when the interrupt has been serviced. This enables support
 *	for modern forms of interrupt handlers, which handle the flow
 *	details in hardware, transparently.
 */
374
void
375
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
376
{
377
	raw_spin_lock(&desc->lock);
378

379
	if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
380 381
		if (!irq_check_poll(desc))
			goto out;
382

383
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
384
	kstat_incr_irqs_this_cpu(irq, desc);
385 386 387

	/*
	 * If its disabled or no action available
388
	 * then mask it and get out of here:
389
	 */
390
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
391
		desc->istate |= IRQS_PENDING;
392
		mask_irq(desc);
393
		goto out;
394
	}
395 396 397 398

	if (desc->istate & IRQS_ONESHOT)
		mask_irq(desc);

399
	preflow_handler(desc);
400
	handle_irq_event(desc);
401 402

out_eoi:
403
	desc->irq_data.chip->irq_eoi(&desc->irq_data);
404
out_unlock:
405
	raw_spin_unlock(&desc->lock);
406 407 408 409 410
	return;
out:
	if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
		goto out_eoi;
	goto out_unlock;
411 412 413 414 415 416 417 418
}

/**
 *	handle_edge_irq - edge type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
L
Lucas De Marchi 已提交
419
 *	signal. The occurrence is latched into the irq controller hardware
420 421
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
422
 *	is handled by the associated event handler. If this happens it
423 424 425 426 427 428
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
429
void
430
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
431
{
432
	raw_spin_lock(&desc->lock);
433

434
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
435 436 437 438 439
	/*
	 * If we're currently running this IRQ, or its disabled,
	 * we shouldn't process the IRQ. Mark it pending, handle
	 * the necessary masking and go out
	 */
440 441
	if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
		     irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
442
		if (!irq_check_poll(desc)) {
443
			desc->istate |= IRQS_PENDING;
444 445 446
			mask_ack_irq(desc);
			goto out_unlock;
		}
447
	}
T
Thomas Gleixner 已提交
448
	kstat_incr_irqs_this_cpu(irq, desc);
449 450

	/* Start handling the irq */
451
	desc->irq_data.chip->irq_ack(&desc->irq_data);
452 453

	do {
454
		if (unlikely(!desc->action)) {
455
			mask_irq(desc);
456 457 458 459 460 461 462 463
			goto out_unlock;
		}

		/*
		 * When another irq arrived while we were handling
		 * one, we could have masked the irq.
		 * Renable it, if it was not disabled in meantime.
		 */
464
		if (unlikely(desc->istate & IRQS_PENDING)) {
465 466
			if (!irqd_irq_disabled(&desc->irq_data) &&
			    irqd_irq_masked(&desc->irq_data))
467
				unmask_irq(desc);
468 469
		}

470
		handle_irq_event(desc);
471

472
	} while ((desc->istate & IRQS_PENDING) &&
473
		 !irqd_irq_disabled(&desc->irq_data));
474 475

out_unlock:
476
	raw_spin_unlock(&desc->lock);
477 478
}

479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
/**
 *	handle_edge_eoi_irq - edge eoi type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 * Similar as the above handle_edge_irq, but using eoi and w/o the
 * mask/unmask logic.
 */
void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);

	raw_spin_lock(&desc->lock);

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
	/*
	 * If we're currently running this IRQ, or its disabled,
	 * we shouldn't process the IRQ. Mark it pending, handle
	 * the necessary masking and go out
	 */
	if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
		     irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
		if (!irq_check_poll(desc)) {
			desc->istate |= IRQS_PENDING;
			goto out_eoi;
		}
	}
	kstat_incr_irqs_this_cpu(irq, desc);

	do {
		if (unlikely(!desc->action))
			goto out_eoi;

		handle_irq_event(desc);

	} while ((desc->istate & IRQS_PENDING) &&
		 !irqd_irq_disabled(&desc->irq_data));

518
out_eoi:
519 520 521 522 523
	chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
}
#endif

524
/**
L
Liuweni 已提交
525
 *	handle_percpu_irq - Per CPU local irq handler
526 527 528 529 530
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Per CPU interrupts on SMP machines without locking requirements
 */
531
void
532
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
533
{
534
	struct irq_chip *chip = irq_desc_get_chip(desc);
535

T
Thomas Gleixner 已提交
536
	kstat_incr_irqs_this_cpu(irq, desc);
537

538 539
	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);
540

541
	handle_irq_event_percpu(desc, desc->action);
542

543 544
	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
545 546 547
}

void
548
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
549
		  const char *name)
550 551
{
	unsigned long flags;
552
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
553

554
	if (!desc)
555 556
		return;

557
	if (!handle) {
558
		handle = handle_bad_irq;
559 560
	} else {
		if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
561
			goto out;
562
	}
563 564 565

	/* Uninstall? */
	if (handle == handle_bad_irq) {
566
		if (desc->irq_data.chip != &no_irq_chip)
567
			mask_ack_irq(desc);
568
		irq_state_set_disabled(desc);
569 570 571
		desc->depth = 1;
	}
	desc->handle_irq = handle;
572
	desc->name = name;
573 574

	if (handle != handle_bad_irq && is_chained) {
575 576
		irq_settings_set_noprobe(desc);
		irq_settings_set_norequest(desc);
577
		irq_settings_set_nothread(desc);
578
		irq_startup(desc);
579
	}
580 581
out:
	irq_put_desc_busunlock(desc, flags);
582
}
583
EXPORT_SYMBOL_GPL(__irq_set_handler);
584 585

void
586
irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
587
			      irq_flow_handler_t handle, const char *name)
588
{
589
	irq_set_chip(irq, chip);
590
	__irq_set_handler(irq, handle, 0, name);
591
}
R
Ralf Baechle 已提交
592

T
Thomas Gleixner 已提交
593
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
R
Ralf Baechle 已提交
594 595
{
	unsigned long flags;
596
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
R
Ralf Baechle 已提交
597

T
Thomas Gleixner 已提交
598
	if (!desc)
R
Ralf Baechle 已提交
599
		return;
600 601
	irq_settings_clr_and_set(desc, clr, set);

602
	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
603
		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
604 605 606 607
	if (irq_settings_has_no_balance_set(desc))
		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
	if (irq_settings_is_per_cpu(desc))
		irqd_set(&desc->irq_data, IRQD_PER_CPU);
608 609
	if (irq_settings_can_move_pcntxt(desc))
		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
T
Thomas Gleixner 已提交
610 611
	if (irq_settings_is_level(desc))
		irqd_set(&desc->irq_data, IRQD_LEVEL);
612

613 614
	irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));

615
	irq_put_desc_unlock(desc, flags);
R
Ralf Baechle 已提交
616
}
617
EXPORT_SYMBOL_GPL(irq_modify_status);
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639

/**
 *	irq_cpu_online - Invoke all irq_cpu_online functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_online()
 *	for each.
 */
void irq_cpu_online(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
640 641
		if (chip && chip->irq_cpu_online &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
642
		     !irqd_irq_disabled(&desc->irq_data)))
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
			chip->irq_cpu_online(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}

/**
 *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
 *	for each.
 */
void irq_cpu_offline(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
670 671
		if (chip && chip->irq_cpu_offline &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
672
		     !irqd_irq_disabled(&desc->irq_data)))
673 674 675 676 677
			chip->irq_cpu_offline(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}