chip.c 18.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * linux/kernel/irq/chip.c
 *
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 *
 * This file contains the core interrupt handling code, for irq-chip
 * based architectures.
 *
 * Detailed information is available in Documentation/DocBook/genericirq
 */

#include <linux/irq.h>
14
#include <linux/msi.h>
15 16 17 18 19 20 21
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>

#include "internals.h"

/**
T
Thomas Gleixner 已提交
22
 *	irq_set_chip - set the irq chip for an irq
23 24 25
 *	@irq:	irq number
 *	@chip:	pointer to irq chip description structure
 */
T
Thomas Gleixner 已提交
26
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
27 28
{
	unsigned long flags;
29
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
30

31
	if (!desc)
32 33 34 35 36
		return -EINVAL;

	if (!chip)
		chip = &no_irq_chip;

37
	desc->irq_data.chip = chip;
38
	irq_put_desc_unlock(desc, flags);
39 40 41 42 43 44
	/*
	 * For !CONFIG_SPARSE_IRQ make the irq show up in
	 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
	 * already marked, and this call is harmless.
	 */
	irq_reserve_irq(irq);
45 46
	return 0;
}
T
Thomas Gleixner 已提交
47
EXPORT_SYMBOL(irq_set_chip);
48 49

/**
T
Thomas Gleixner 已提交
50
 *	irq_set_type - set the irq trigger type for an irq
51
 *	@irq:	irq number
D
David Brownell 已提交
52
 *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
53
 */
T
Thomas Gleixner 已提交
54
int irq_set_irq_type(unsigned int irq, unsigned int type)
55 56
{
	unsigned long flags;
57
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
58
	int ret = 0;
59

60 61
	if (!desc)
		return -EINVAL;
62

63
	type &= IRQ_TYPE_SENSE_MASK;
64 65 66
	if (type != IRQ_TYPE_NONE)
		ret = __irq_set_trigger(desc, irq, type);
	irq_put_desc_busunlock(desc, flags);
67 68
	return ret;
}
T
Thomas Gleixner 已提交
69
EXPORT_SYMBOL(irq_set_irq_type);
70 71

/**
T
Thomas Gleixner 已提交
72
 *	irq_set_handler_data - set irq handler data for an irq
73 74 75 76 77
 *	@irq:	Interrupt number
 *	@data:	Pointer to interrupt specific data
 *
 *	Set the hardware irq controller data for an irq
 */
T
Thomas Gleixner 已提交
78
int irq_set_handler_data(unsigned int irq, void *data)
79 80
{
	unsigned long flags;
81
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
82

83
	if (!desc)
84
		return -EINVAL;
85
	desc->irq_data.handler_data = data;
86
	irq_put_desc_unlock(desc, flags);
87 88
	return 0;
}
T
Thomas Gleixner 已提交
89
EXPORT_SYMBOL(irq_set_handler_data);
90

91
/**
T
Thomas Gleixner 已提交
92
 *	irq_set_msi_desc - set MSI descriptor data for an irq
93
 *	@irq:	Interrupt number
R
Randy Dunlap 已提交
94
 *	@entry:	Pointer to MSI descriptor data
95
 *
L
Liuweni 已提交
96
 *	Set the MSI descriptor entry for an irq
97
 */
T
Thomas Gleixner 已提交
98
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
99 100
{
	unsigned long flags;
101
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
102

103
	if (!desc)
104
		return -EINVAL;
105
	desc->irq_data.msi_desc = entry;
106 107
	if (entry)
		entry->irq = irq;
108
	irq_put_desc_unlock(desc, flags);
109 110 111
	return 0;
}

112
/**
T
Thomas Gleixner 已提交
113
 *	irq_set_chip_data - set irq chip data for an irq
114 115 116 117 118
 *	@irq:	Interrupt number
 *	@data:	Pointer to chip specific data
 *
 *	Set the hardware irq chip data for an irq
 */
T
Thomas Gleixner 已提交
119
int irq_set_chip_data(unsigned int irq, void *data)
120 121
{
	unsigned long flags;
122
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
123

124
	if (!desc)
125
		return -EINVAL;
126
	desc->irq_data.chip_data = data;
127
	irq_put_desc_unlock(desc, flags);
128 129
	return 0;
}
T
Thomas Gleixner 已提交
130
EXPORT_SYMBOL(irq_set_chip_data);
131

132 133 134 135 136 137 138 139
struct irq_data *irq_get_irq_data(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	return desc ? &desc->irq_data : NULL;
}
EXPORT_SYMBOL_GPL(irq_get_irq_data);

140 141
static void irq_state_clr_disabled(struct irq_desc *desc)
{
142
	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
143 144 145 146
}

static void irq_state_set_disabled(struct irq_desc *desc)
{
147
	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
148 149
}

150 151
static void irq_state_clr_masked(struct irq_desc *desc)
{
152
	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
153 154 155 156
}

static void irq_state_set_masked(struct irq_desc *desc)
{
157
	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
158 159
}

160 161
int irq_startup(struct irq_desc *desc)
{
162
	irq_state_clr_disabled(desc);
163 164
	desc->depth = 0;

165 166
	if (desc->irq_data.chip->irq_startup) {
		int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
167
		irq_state_clr_masked(desc);
168 169
		return ret;
	}
170

171
	irq_enable(desc);
172 173 174 175 176
	return 0;
}

void irq_shutdown(struct irq_desc *desc)
{
177
	irq_state_set_disabled(desc);
178
	desc->depth = 1;
T
Thomas Gleixner 已提交
179 180
	if (desc->irq_data.chip->irq_shutdown)
		desc->irq_data.chip->irq_shutdown(&desc->irq_data);
181
	else if (desc->irq_data.chip->irq_disable)
T
Thomas Gleixner 已提交
182 183 184
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
185
	irq_state_set_masked(desc);
186 187
}

188 189
void irq_enable(struct irq_desc *desc)
{
190
	irq_state_clr_disabled(desc);
T
Thomas Gleixner 已提交
191 192 193 194
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
195
	irq_state_clr_masked(desc);
196 197
}

T
Thomas Gleixner 已提交
198
void irq_disable(struct irq_desc *desc)
199
{
200
	irq_state_set_disabled(desc);
T
Thomas Gleixner 已提交
201 202
	if (desc->irq_data.chip->irq_disable) {
		desc->irq_data.chip->irq_disable(&desc->irq_data);
203
		irq_state_set_masked(desc);
T
Thomas Gleixner 已提交
204
	}
205 206
}

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
{
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
	cpumask_set_cpu(cpu, desc->percpu_enabled);
}

void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
{
	if (desc->irq_data.chip->irq_disable)
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
	cpumask_clear_cpu(cpu, desc->percpu_enabled);
}

225
static inline void mask_ack_irq(struct irq_desc *desc)
226
{
227 228
	if (desc->irq_data.chip->irq_mask_ack)
		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
229
	else {
230
		desc->irq_data.chip->irq_mask(&desc->irq_data);
231 232
		if (desc->irq_data.chip->irq_ack)
			desc->irq_data.chip->irq_ack(&desc->irq_data);
233
	}
234
	irq_state_set_masked(desc);
235 236
}

237
void mask_irq(struct irq_desc *desc)
238
{
239 240
	if (desc->irq_data.chip->irq_mask) {
		desc->irq_data.chip->irq_mask(&desc->irq_data);
241
		irq_state_set_masked(desc);
242 243 244
	}
}

245
void unmask_irq(struct irq_desc *desc)
246
{
247 248
	if (desc->irq_data.chip->irq_unmask) {
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
249
		irq_state_clr_masked(desc);
250
	}
251 252
}

253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
/*
 *	handle_nested_irq - Handle a nested irq from a irq thread
 *	@irq:	the interrupt number
 *
 *	Handle interrupts which are nested into a threaded interrupt
 *	handler. The handler function is called inside the calling
 *	threads context.
 */
void handle_nested_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	irqreturn_t action_ret;

	might_sleep();

269
	raw_spin_lock_irq(&desc->lock);
270 271 272 273

	kstat_incr_irqs_this_cpu(irq, desc);

	action = desc->action;
274
	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
275 276
		goto out_unlock;

277
	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
278
	raw_spin_unlock_irq(&desc->lock);
279 280 281 282 283

	action_ret = action->thread_fn(action->irq, action->dev_id);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

284
	raw_spin_lock_irq(&desc->lock);
285
	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
286 287

out_unlock:
288
	raw_spin_unlock_irq(&desc->lock);
289 290 291
}
EXPORT_SYMBOL_GPL(handle_nested_irq);

292 293
static bool irq_check_poll(struct irq_desc *desc)
{
294
	if (!(desc->istate & IRQS_POLL_INPROGRESS))
295 296 297 298
		return false;
	return irq_wait_for_poll(desc);
}

299 300 301 302 303 304 305 306 307 308 309 310
/**
 *	handle_simple_irq - Simple and software-decoded IRQs.
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Simple interrupts are either sent from a demultiplexing interrupt
 *	handler or come from hardware, where no interrupt hardware control
 *	is necessary.
 *
 *	Note: The caller is expected to handle the ack, clear, mask and
 *	unmask issues if necessary.
 */
311
void
312
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
313
{
314
	raw_spin_lock(&desc->lock);
315

316
	if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
317 318 319
		if (!irq_check_poll(desc))
			goto out_unlock;

320
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
321
	kstat_incr_irqs_this_cpu(irq, desc);
322

323
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
324 325
		goto out_unlock;

326
	handle_irq_event(desc);
327 328

out_unlock:
329
	raw_spin_unlock(&desc->lock);
330
}
331
EXPORT_SYMBOL_GPL(handle_simple_irq);
332 333 334 335 336 337 338 339 340 341 342

/**
 *	handle_level_irq - Level type irq handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Level type interrupts are active as long as the hardware line has
 *	the active level. This may require to mask the interrupt and unmask
 *	it after the associated handler has acknowledged the device, so the
 *	interrupt line is back to inactive.
 */
343
void
344
handle_level_irq(unsigned int irq, struct irq_desc *desc)
345
{
346
	raw_spin_lock(&desc->lock);
347
	mask_ack_irq(desc);
348

349
	if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
350 351 352
		if (!irq_check_poll(desc))
			goto out_unlock;

353
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
354
	kstat_incr_irqs_this_cpu(irq, desc);
355 356 357 358 359

	/*
	 * If its disabled or no action available
	 * keep it masked and get out of here
	 */
360
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
361
		goto out_unlock;
362

363
	handle_irq_event(desc);
T
Thomas Gleixner 已提交
364

365
	if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT))
366
		unmask_irq(desc);
367
out_unlock:
368
	raw_spin_unlock(&desc->lock);
369
}
370
EXPORT_SYMBOL_GPL(handle_level_irq);
371

372 373 374 375 376 377 378 379 380 381
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
static inline void preflow_handler(struct irq_desc *desc)
{
	if (desc->preflow_handler)
		desc->preflow_handler(&desc->irq_data);
}
#else
static inline void preflow_handler(struct irq_desc *desc) { }
#endif

382
/**
383
 *	handle_fasteoi_irq - irq handler for transparent controllers
384 385 386
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
387
 *	Only a single callback will be issued to the chip: an ->eoi()
388 389 390 391
 *	call when the interrupt has been serviced. This enables support
 *	for modern forms of interrupt handlers, which handle the flow
 *	details in hardware, transparently.
 */
392
void
393
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
394
{
395
	raw_spin_lock(&desc->lock);
396

397
	if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
398 399
		if (!irq_check_poll(desc))
			goto out;
400

401
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
402
	kstat_incr_irqs_this_cpu(irq, desc);
403 404 405

	/*
	 * If its disabled or no action available
406
	 * then mask it and get out of here:
407
	 */
408
	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
409
		desc->istate |= IRQS_PENDING;
410
		mask_irq(desc);
411
		goto out;
412
	}
413 414 415 416

	if (desc->istate & IRQS_ONESHOT)
		mask_irq(desc);

417
	preflow_handler(desc);
418
	handle_irq_event(desc);
419 420

out_eoi:
421
	desc->irq_data.chip->irq_eoi(&desc->irq_data);
422
out_unlock:
423
	raw_spin_unlock(&desc->lock);
424 425 426 427 428
	return;
out:
	if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
		goto out_eoi;
	goto out_unlock;
429 430 431 432 433 434 435 436
}

/**
 *	handle_edge_irq - edge type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
L
Lucas De Marchi 已提交
437
 *	signal. The occurrence is latched into the irq controller hardware
438 439
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
440
 *	is handled by the associated event handler. If this happens it
441 442 443 444 445 446
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
447
void
448
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
449
{
450
	raw_spin_lock(&desc->lock);
451

452
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
453 454 455 456 457
	/*
	 * If we're currently running this IRQ, or its disabled,
	 * we shouldn't process the IRQ. Mark it pending, handle
	 * the necessary masking and go out
	 */
458 459
	if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
		     irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
460
		if (!irq_check_poll(desc)) {
461
			desc->istate |= IRQS_PENDING;
462 463 464
			mask_ack_irq(desc);
			goto out_unlock;
		}
465
	}
T
Thomas Gleixner 已提交
466
	kstat_incr_irqs_this_cpu(irq, desc);
467 468

	/* Start handling the irq */
469
	desc->irq_data.chip->irq_ack(&desc->irq_data);
470 471

	do {
472
		if (unlikely(!desc->action)) {
473
			mask_irq(desc);
474 475 476 477 478 479 480 481
			goto out_unlock;
		}

		/*
		 * When another irq arrived while we were handling
		 * one, we could have masked the irq.
		 * Renable it, if it was not disabled in meantime.
		 */
482
		if (unlikely(desc->istate & IRQS_PENDING)) {
483 484
			if (!irqd_irq_disabled(&desc->irq_data) &&
			    irqd_irq_masked(&desc->irq_data))
485
				unmask_irq(desc);
486 487
		}

488
		handle_irq_event(desc);
489

490
	} while ((desc->istate & IRQS_PENDING) &&
491
		 !irqd_irq_disabled(&desc->irq_data));
492 493

out_unlock:
494
	raw_spin_unlock(&desc->lock);
495 496
}

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
/**
 *	handle_edge_eoi_irq - edge eoi type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 * Similar as the above handle_edge_irq, but using eoi and w/o the
 * mask/unmask logic.
 */
void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);

	raw_spin_lock(&desc->lock);

	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
	/*
	 * If we're currently running this IRQ, or its disabled,
	 * we shouldn't process the IRQ. Mark it pending, handle
	 * the necessary masking and go out
	 */
	if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
		     irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
		if (!irq_check_poll(desc)) {
			desc->istate |= IRQS_PENDING;
			goto out_eoi;
		}
	}
	kstat_incr_irqs_this_cpu(irq, desc);

	do {
		if (unlikely(!desc->action))
			goto out_eoi;

		handle_irq_event(desc);

	} while ((desc->istate & IRQS_PENDING) &&
		 !irqd_irq_disabled(&desc->irq_data));

536
out_eoi:
537 538 539 540 541
	chip->irq_eoi(&desc->irq_data);
	raw_spin_unlock(&desc->lock);
}
#endif

542
/**
L
Liuweni 已提交
543
 *	handle_percpu_irq - Per CPU local irq handler
544 545 546 547 548
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Per CPU interrupts on SMP machines without locking requirements
 */
549
void
550
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
551
{
552
	struct irq_chip *chip = irq_desc_get_chip(desc);
553

T
Thomas Gleixner 已提交
554
	kstat_incr_irqs_this_cpu(irq, desc);
555

556 557
	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);
558

559
	handle_irq_event_percpu(desc, desc->action);
560

561 562
	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
563 564
}

565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
/**
 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 * @irq:	the interrupt number
 * @desc:	the interrupt description structure for this irq
 *
 * Per CPU interrupts on SMP machines without locking requirements. Same as
 * handle_percpu_irq() above but with the following extras:
 *
 * action->percpu_dev_id is a pointer to percpu variables which
 * contain the real device id for the cpu on which this handler is
 * called
 */
void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
{
	struct irq_chip *chip = irq_desc_get_chip(desc);
	struct irqaction *action = desc->action;
	void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
	irqreturn_t res;

	kstat_incr_irqs_this_cpu(irq, desc);

	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);

	trace_irq_handler_entry(irq, action);
	res = action->handler(irq, dev_id);
	trace_irq_handler_exit(irq, action, res);

	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
}

597
void
598
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
599
		  const char *name)
600 601
{
	unsigned long flags;
602
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
603

604
	if (!desc)
605 606
		return;

607
	if (!handle) {
608
		handle = handle_bad_irq;
609 610
	} else {
		if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
611
			goto out;
612
	}
613 614 615

	/* Uninstall? */
	if (handle == handle_bad_irq) {
616
		if (desc->irq_data.chip != &no_irq_chip)
617
			mask_ack_irq(desc);
618
		irq_state_set_disabled(desc);
619 620 621
		desc->depth = 1;
	}
	desc->handle_irq = handle;
622
	desc->name = name;
623 624

	if (handle != handle_bad_irq && is_chained) {
625 626
		irq_settings_set_noprobe(desc);
		irq_settings_set_norequest(desc);
627
		irq_settings_set_nothread(desc);
628
		irq_startup(desc);
629
	}
630 631
out:
	irq_put_desc_busunlock(desc, flags);
632
}
633
EXPORT_SYMBOL_GPL(__irq_set_handler);
634 635

void
636
irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
637
			      irq_flow_handler_t handle, const char *name)
638
{
639
	irq_set_chip(irq, chip);
640
	__irq_set_handler(irq, handle, 0, name);
641
}
R
Ralf Baechle 已提交
642

T
Thomas Gleixner 已提交
643
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
R
Ralf Baechle 已提交
644 645
{
	unsigned long flags;
646
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
R
Ralf Baechle 已提交
647

T
Thomas Gleixner 已提交
648
	if (!desc)
R
Ralf Baechle 已提交
649
		return;
650 651
	irq_settings_clr_and_set(desc, clr, set);

652
	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
653
		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
654 655 656 657
	if (irq_settings_has_no_balance_set(desc))
		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
	if (irq_settings_is_per_cpu(desc))
		irqd_set(&desc->irq_data, IRQD_PER_CPU);
658 659
	if (irq_settings_can_move_pcntxt(desc))
		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
T
Thomas Gleixner 已提交
660 661
	if (irq_settings_is_level(desc))
		irqd_set(&desc->irq_data, IRQD_LEVEL);
662

663 664
	irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));

665
	irq_put_desc_unlock(desc, flags);
R
Ralf Baechle 已提交
666
}
667
EXPORT_SYMBOL_GPL(irq_modify_status);
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689

/**
 *	irq_cpu_online - Invoke all irq_cpu_online functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_online()
 *	for each.
 */
void irq_cpu_online(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
690 691
		if (chip && chip->irq_cpu_online &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
692
		     !irqd_irq_disabled(&desc->irq_data)))
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
			chip->irq_cpu_online(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}

/**
 *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
 *
 *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
 *	for each.
 */
void irq_cpu_offline(void)
{
	struct irq_desc *desc;
	struct irq_chip *chip;
	unsigned long flags;
	unsigned int irq;

	for_each_active_irq(irq) {
		desc = irq_to_desc(irq);
		if (!desc)
			continue;

		raw_spin_lock_irqsave(&desc->lock, flags);

		chip = irq_data_get_irq_chip(&desc->irq_data);
720 721
		if (chip && chip->irq_cpu_offline &&
		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
722
		     !irqd_irq_disabled(&desc->irq_data)))
723 724 725 726 727
			chip->irq_cpu_offline(&desc->irq_data);

		raw_spin_unlock_irqrestore(&desc->lock, flags);
	}
}