chip.c 18.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * linux/kernel/irq/chip.c
 *
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 *
 * This file contains the core interrupt handling code, for irq-chip
 * based architectures.
 *
 * Detailed information is available in Documentation/DocBook/genericirq
 */

#include <linux/irq.h>
14
#include <linux/msi.h>
15 16 17 18 19 20 21
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>

#include "internals.h"

/**
T
Thomas Gleixner 已提交
22
 *	irq_set_chip - set the irq chip for an irq
23 24 25
 *	@irq:	irq number
 *	@chip:	pointer to irq chip description structure
 */
T
Thomas Gleixner 已提交
26
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
27 28
{
	unsigned long flags;
29
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
30

31
	if (!desc)
32 33 34 35 36
		return -EINVAL;

	if (!chip)
		chip = &no_irq_chip;

37
	desc->irq_data.chip = chip;
38
	irq_put_desc_unlock(desc, flags);
39 40 41 42 43 44
	/*
	 * For !CONFIG_SPARSE_IRQ make the irq show up in
	 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
	 * already marked, and this call is harmless.
	 */
	irq_reserve_irq(irq);
45 46
	return 0;
}
T
Thomas Gleixner 已提交
47
EXPORT_SYMBOL(irq_set_chip);
48 49

/**
T
Thomas Gleixner 已提交
50
 *	irq_set_type - set the irq trigger type for an irq
51
 *	@irq:	irq number
D
David Brownell 已提交
52
 *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
53
 */
T
Thomas Gleixner 已提交
54
int irq_set_irq_type(unsigned int irq, unsigned int type)
55 56
{
	unsigned long flags;
57
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
58
	int ret = 0;
59

60 61
	if (!desc)
		return -EINVAL;
62

63
	type &= IRQ_TYPE_SENSE_MASK;
64 65 66
	if (type != IRQ_TYPE_NONE)
		ret = __irq_set_trigger(desc, irq, type);
	irq_put_desc_busunlock(desc, flags);
67 68
	return ret;
}
T
Thomas Gleixner 已提交
69
EXPORT_SYMBOL(irq_set_irq_type);
70 71

/**
T
Thomas Gleixner 已提交
72
 *	irq_set_handler_data - set irq handler data for an irq
73 74 75 76 77
 *	@irq:	Interrupt number
 *	@data:	Pointer to interrupt specific data
 *
 *	Set the hardware irq controller data for an irq
 */
T
Thomas Gleixner 已提交
78
int irq_set_handler_data(unsigned int irq, void *data)
79 80
{
	unsigned long flags;
81
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
82

83
	if (!desc)
84
		return -EINVAL;
85
	desc->irq_data.handler_data = data;
86
	irq_put_desc_unlock(desc, flags);
87 88
	return 0;
}
T
Thomas Gleixner 已提交
89
EXPORT_SYMBOL(irq_set_handler_data);
90

91
/**
T
Thomas Gleixner 已提交
92
 *	irq_set_msi_desc - set MSI descriptor data for an irq
93
 *	@irq:	Interrupt number
R
Randy Dunlap 已提交
94
 *	@entry:	Pointer to MSI descriptor data
95
 *
L
Liuweni 已提交
96
 *	Set the MSI descriptor entry for an irq
97
 */
T
Thomas Gleixner 已提交
98
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
99 100
{
	unsigned long flags;
101
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
102

103
	if (!desc)
104
		return -EINVAL;
105
	desc->irq_data.msi_desc = entry;
106 107
	if (entry)
		entry->irq = irq;
108
	irq_put_desc_unlock(desc, flags);
109 110 111
	return 0;
}

112
/**
T
Thomas Gleixner 已提交
113
 *	irq_set_chip_data - set irq chip data for an irq
114 115 116 117 118
 *	@irq:	Interrupt number
 *	@data:	Pointer to chip specific data
 *
 *	Set the hardware irq chip data for an irq
 */
T
Thomas Gleixner 已提交
119
int irq_set_chip_data(unsigned int irq, void *data)
120 121
{
	unsigned long flags;
122
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
123

124
	if (!desc)
125
		return -EINVAL;
126
	desc->irq_data.chip_data = data;
127
	irq_put_desc_unlock(desc, flags);
128 129
	return 0;
}
T
Thomas Gleixner 已提交
130
EXPORT_SYMBOL(irq_set_chip_data);
131

132 133 134 135 136 137 138 139
struct irq_data *irq_get_irq_data(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	return desc ? &desc->irq_data : NULL;
}
EXPORT_SYMBOL_GPL(irq_get_irq_data);

140 141
static void irq_state_clr_disabled(struct irq_desc *desc)
{
142
	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
143 144 145 146
}

static void irq_state_set_disabled(struct irq_desc *desc)
{
147
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
148 149
}

150 151
static void irq_state_clr_masked(struct irq_desc *desc)
{
152
	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
153 154 155 156
}

static void irq_state_set_masked(struct irq_desc *desc)
{
157
	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
158 159
}

160
int irq_startup(struct irq_desc *desc, bool resend)
161
{
162 163
	int ret = 0;

164
	irq_state_clr_disabled(desc);
165 166
	desc->depth = 0;

167
	if (desc->irq_data.chip->irq_startup) {
168
		ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
169
		irq_state_clr_masked(desc);
170 171
	} else {
		irq_enable(desc);
172
	}
173 174 175
	if (resend)
		check_irq_resend(desc, desc->irq_data.irq);
	return ret;
176 177 178 179
}

void irq_shutdown(struct irq_desc *desc)
{
180
	irq_state_set_disabled(desc);
181
	desc->depth = 1;
T
Thomas Gleixner 已提交
182 183
	if (desc->irq_data.chip->irq_shutdown)
		desc->irq_data.chip->irq_shutdown(&desc->irq_data);
184
	else if (desc->irq_data.chip->irq_disable)
T
Thomas Gleixner 已提交
185 186 187
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
188
	irq_state_set_masked(desc);
189 190
}

191 192
void irq_enable(struct irq_desc *desc)
{
193
	irq_state_clr_disabled(desc);
T
Thomas Gleixner 已提交
194 195 196 197
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
198
	irq_state_clr_masked(desc);
199 200
}

T
Thomas Gleixner 已提交
201
void irq_disable(struct irq_desc *desc)
202
{
203
	irq_state_set_disabled(desc);
T
Thomas Gleixner 已提交
204 205
	if (desc->irq_data.chip->irq_disable) {
		desc->irq_data.chip->irq_disable(&desc->irq_data);
206
		irq_state_set_masked(desc);
T
Thomas Gleixner 已提交
207
	}
208 209
}

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
{
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
	cpumask_set_cpu(cpu, desc->percpu_enabled);
}

void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
{
	if (desc->irq_data.chip->irq_disable)
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
	cpumask_clear_cpu(cpu, desc->percpu_enabled);
}

228
static inline void mask_ack_irq(struct irq_desc *desc)
229
{
230 231
	if (desc->irq_data.chip->irq_mask_ack)
		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
232
	else {
233
		desc->irq_data.chip->irq_mask(&desc->irq_data);
234 235
		if (desc->irq_data.chip->irq_ack)
			desc->irq_data.chip->irq_ack(&desc->irq_data);
236
	}
237
	irq_state_set_masked(desc);
238 239
}

240
void mask_irq(struct irq_desc *desc)
241
{
242 243
	if (desc->irq_data.chip->irq_mask) {
		desc->irq_data.chip->irq_mask(&desc->irq_data);
244
		irq_state_set_masked(desc);
245 246 247
	}
}

248
void unmask_irq(struct irq_desc *desc)
249
{
250 251
	if (desc->irq_data.chip->irq_unmask) {
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
252
		irq_state_clr_masked(desc);
253
	}
254 255
}

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
/*
 *	handle_nested_irq - Handle a nested irq from a irq thread
 *	@irq:	the interrupt number
 *
 *	Handle interrupts which are nested into a threaded interrupt
 *	handler. The handler function is called inside the calling
 *	threads context.
 */
void handle_nested_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	irqreturn_t action_ret;

	might_sleep();

272
	raw_spin_lock_irq(&desc->lock);
273 274 275 276

	kstat_incr_irqs_this_cpu(irq, desc);

	action = desc->action;
277
	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
278 279
		goto out_unlock;

280
	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
281
	raw_spin_unlock_irq(&desc->lock);
282 283 284 285 286

	action_ret = action->thread_fn(action->irq, action->dev_id);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

287
	raw_spin_lock_irq(&desc->lock);
288
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
289 290

out_unlock:
291
	raw_spin_unlock_irq(&desc->lock);
292 293 294
}
EXPORT_SYMBOL_GPL(handle_nested_irq);

295 296
static bool irq_check_poll(struct irq_desc *desc)
{
297
	if (!(desc->istate & IRQS_POLL_INPROGRESS))
298 299 300 301
		return false;
	return irq_wait_for_poll(desc);
}

302 303 304 305 306 307 308 309 310 311 312 313
/**
 *	handle_simple_irq - Simple and software-decoded IRQs.
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Simple interrupts are either sent from a demultiplexing interrupt
 *	handler or come from hardware, where no interrupt hardware control
 *	is necessary.
 *
 *	Note: The caller is expected to handle the ack, clear, mask and
 *	unmask issues if necessary.
 */
314
void
315
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
316
{
317
	raw_spin_lock(&desc->lock);
318

319
	if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
320 321 322
		if (!irq_check_poll(desc))
			goto out_unlock;

323
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
324
	kstat_incr_irqs_this_cpu(irq, desc);
325

326
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
327 328
		goto out_unlock;

329
	handle_irq_event(desc);
330 331

out_unlock:
332
	raw_spin_unlock(&desc->lock);
333
}
334
EXPORT_SYMBOL_GPL(handle_simple_irq);
335

336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
/*
 * Called unconditionally from handle_level_irq() and only for oneshot
 * interrupts from handle_fasteoi_irq()
 */
static void cond_unmask_irq(struct irq_desc *desc)
{
	/*
	 * We need to unmask in the following cases:
	 * - Standard level irq (IRQF_ONESHOT is not set)
	 * - Oneshot irq which did not wake the thread (caused by a
	 *   spurious interrupt or a primary handler handling it
	 *   completely).
	 */
	if (!irqd_irq_disabled(&desc->irq_data) &&
	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
		unmask_irq(desc);
}

354 355 356 357 358 359 360 361 362 363
/**
 *	handle_level_irq - Level type irq handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Level type interrupts are active as long as the hardware line has
 *	the active level. This may require to mask the interrupt and unmask
 *	it after the associated handler has acknowledged the device, so the
 *	interrupt line is back to inactive.
 */
364
void
365
handle_level_irq(unsigned int irq, struct irq_desc *desc)
366
{
367
	raw_spin_lock(&desc->lock);
368
	mask_ack_irq(desc);
369

370
	if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
371 372 373
		if (!irq_check_poll(desc))
			goto out_unlock;

374
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
375
	kstat_incr_irqs_this_cpu(irq, desc);
376 377 378 379 380

	/*
	 * If its disabled or no action available
	 * keep it masked and get out of here
	 */
381
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
382
		goto out_unlock;
383

384
	handle_irq_event(desc);
T
Thomas Gleixner 已提交
385

386 387
	cond_unmask_irq(desc);

388
out_unlock:
389
	raw_spin_unlock(&desc->lock);
390
}
391
EXPORT_SYMBOL_GPL(handle_level_irq);
392

393 394 395 396 397 398 399 400 401 402
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
static inline void preflow_handler(struct irq_desc *desc)
{
	if (desc->preflow_handler)
		desc->preflow_handler(&desc->irq_data);
}
#else
static inline void preflow_handler(struct irq_desc *desc) { }
#endif

403
/**
404
 *	handle_fasteoi_irq - irq handler for transparent controllers
405 406 407
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
408
 *	Only a single callback will be issued to the chip: an ->eoi()
409 410 411 412
 *	call when the interrupt has been serviced. This enables support
 *	for modern forms of interrupt handlers, which handle the flow
 *	details in hardware, transparently.
 */
413
void
414
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
415
{
416
	raw_spin_lock(&desc->lock);
417

418
	if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
419 420
		if (!irq_check_poll(desc))
			goto out;
421

422
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
423
	kstat_incr_irqs_this_cpu(irq, desc);
424 425 426

	/*
	 * If its disabled or no action available
427
	 * then mask it and get out of here:
428
	 */
429
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
430
		desc->istate |= IRQS_PENDING;
431
		mask_irq(desc);
432
		goto out;
433
	}
434 435 436 437

	if (desc->istate & IRQS_ONESHOT)
		mask_irq(desc);

438
	preflow_handler(desc);
439
	handle_irq_event(desc);
440

441 442 443
	if (desc->istate & IRQS_ONESHOT)
		cond_unmask_irq(desc);

444
out_eoi:
445
	desc->irq_data.chip->irq_eoi(&desc->irq_data);
446
out_unlock:
447
	raw_spin_unlock(&desc->lock);
448 449 450 451 452
	return;
out:
	if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
		goto out_eoi;
	goto out_unlock;
453 454 455 456 457 458 459 460
}

/**
 *	handle_edge_irq - edge type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
L
Lucas De Marchi 已提交
461
 *	signal. The occurrence is latched into the irq controller hardware
462 463
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
464
 *	is handled by the associated event handler. If this happens it
465 466 467 468 469 470
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
471
void
472
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
473
{
474
	raw_spin_lock(&desc->lock);
475

476
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
477 478 479 480 481
	/*
	 * If we're currently running this IRQ, or its disabled,
	 * we shouldn't process the IRQ. Mark it pending, handle
	 * the necessary masking and go out
	 */
482 483
	if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
		     irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
484
		if (!irq_check_poll(desc)) {
485
			desc->istate |= IRQS_PENDING;
486 487 488
			mask_ack_irq(desc);
			goto out_unlock;
		}
489
	}
T
Thomas Gleixner 已提交
490
	kstat_incr_irqs_this_cpu(irq, desc);
491 492

	/* Start handling the irq */
493
	desc->irq_data.chip->irq_ack(&desc->irq_data);
494 495

	do {
496
		if (unlikely(!desc->action)) {
497
			mask_irq(desc);
498 499 500 501 502 503 504 505
			goto out_unlock;
		}

		/*
		 * When another irq arrived while we were handling
		 * one, we could have masked the irq.
		 * Renable it, if it was not disabled in meantime.
		 */
506
		if (unlikely(desc->istate & IRQS_PENDING)) {
507 508
			if (!irqd_irq_disabled(&desc->irq_data) &&
			    irqd_irq_masked(&desc->irq_data))
509
				unmask_irq(desc);
510 511
		}

512
		handle_irq_event(desc);
513

514
	} while ((desc->istate & IRQS_PENDING) &&
515
		 !irqd_irq_disabled(&desc->irq_data));
516 517

out_unlock:
518
	raw_spin_unlock(&desc->lock);
519 520
}

521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
/**
 *	handle_edge_eoi_irq - edge eoi type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 * Similar as the above handle_edge_irq, but using eoi and w/o the
 * mask/unmask logic.
 */
void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);

	raw_spin_lock(&desc->lock);

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
	/*
	 * If we're currently running this IRQ, or its disabled,
	 * we shouldn't process the IRQ. Mark it pending, handle
	 * the necessary masking and go out
	 */
	if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
		     irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
		if (!irq_check_poll(desc)) {
			desc->istate |= IRQS_PENDING;
			goto out_eoi;
		}
	}
	kstat_incr_irqs_this_cpu(irq, desc);

	do {
		if (unlikely(!desc->action))
			goto out_eoi;

		handle_irq_event(desc);

	} while ((desc->istate & IRQS_PENDING) &&
		 !irqd_irq_disabled(&desc->irq_data));

560
out_eoi:
561 562 563 564 565
	chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
}
#endif

566
/**
L
Liuweni 已提交
567
 *	handle_percpu_irq - Per CPU local irq handler
568 569 570 571 572
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Per CPU interrupts on SMP machines without locking requirements
 */
573
void
574
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
575
{
576
	struct irq_chip *chip = irq_desc_get_chip(desc);
577

T
Thomas Gleixner 已提交
578
	kstat_incr_irqs_this_cpu(irq, desc);
579

580 581
	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);
582

583
	handle_irq_event_percpu(desc, desc->action);
584

585 586
	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
587 588
}

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
/**
 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 * @irq:	the interrupt number
 * @desc:	the interrupt description structure for this irq
 *
 * Per CPU interrupts on SMP machines without locking requirements. Same as
 * handle_percpu_irq() above but with the following extras:
 *
 * action->percpu_dev_id is a pointer to percpu variables which
 * contain the real device id for the cpu on which this handler is
 * called
 */
void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct irqaction *action = desc->action;
	void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
	irqreturn_t res;

	kstat_incr_irqs_this_cpu(irq, desc);

	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);

	trace_irq_handler_entry(irq, action);
	res = action->handler(irq, dev_id);
	trace_irq_handler_exit(irq, action, res);

	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
}

621
void
622
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
623
		  const char *name)
624 625
{
	unsigned long flags;
626
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
627

628
	if (!desc)
629 630
		return;

631
	if (!handle) {
632
		handle = handle_bad_irq;
633 634
	} else {
		if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
635
			goto out;
636
	}
637 638 639

	/* Uninstall? */
	if (handle == handle_bad_irq) {
640
		if (desc->irq_data.chip != &no_irq_chip)
641
			mask_ack_irq(desc);
642
		irq_state_set_disabled(desc);
643 644 645
		desc->depth = 1;
	}
	desc->handle_irq = handle;
646
	desc->name = name;
647 648

	if (handle != handle_bad_irq && is_chained) {
649 650
		irq_settings_set_noprobe(desc);
		irq_settings_set_norequest(desc);
651
		irq_settings_set_nothread(desc);
652
		irq_startup(desc, true);
653
	}
654 655
out:
	irq_put_desc_busunlock(desc, flags);
656
}
657
EXPORT_SYMBOL_GPL(__irq_set_handler);
658 659

void
660
irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
661
			      irq_flow_handler_t handle, const char *name)
662
{
663
	irq_set_chip(irq, chip);
664
	__irq_set_handler(irq, handle, 0, name);
665
}
R
Ralf Baechle 已提交
666

T
Thomas Gleixner 已提交
667
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
R
Ralf Baechle 已提交
668 669
{
	unsigned long flags;
670
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
R
Ralf Baechle 已提交
671

T
Thomas Gleixner 已提交
672
	if (!desc)
R
Ralf Baechle 已提交
673
		return;
674 675
	irq_settings_clr_and_set(desc, clr, set);

676
	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
677
		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
678 679 680 681
	if (irq_settings_has_no_balance_set(desc))
		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
	if (irq_settings_is_per_cpu(desc))
		irqd_set(&desc->irq_data, IRQD_PER_CPU);
682 683
	if (irq_settings_can_move_pcntxt(desc))
		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
T
Thomas Gleixner 已提交
684 685
	if (irq_settings_is_level(desc))
		irqd_set(&desc->irq_data, IRQD_LEVEL);
686

687 688
	irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));

689
	irq_put_desc_unlock(desc, flags);
R
Ralf Baechle 已提交
690
}
691
EXPORT_SYMBOL_GPL(irq_modify_status);
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713

/**
 *	irq_cpu_online - Invoke all irq_cpu_online functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_online()
 *	for each.
 */
void irq_cpu_online(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
714 715
		if (chip && chip->irq_cpu_online &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
716
		     !irqd_irq_disabled(&desc->irq_data)))
717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
			chip->irq_cpu_online(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}

/**
 *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
 *	for each.
 */
void irq_cpu_offline(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
744 745
		if (chip && chip->irq_cpu_offline &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
746
		     !irqd_irq_disabled(&desc->irq_data)))
747 748 749 750 751
			chip->irq_cpu_offline(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}