chip.c 16.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * linux/kernel/irq/chip.c
 *
 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 *
 * This file contains the core interrupt handling code, for irq-chip
 * based architectures.
 *
 * Detailed information is available in Documentation/DocBook/genericirq
 */

#include <linux/irq.h>
14
#include <linux/msi.h>
15 16 17 18 19 20 21
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>

#include "internals.h"

/**
T
Thomas Gleixner 已提交
22
 *	irq_set_chip - set the irq chip for an irq
23 24 25
 *	@irq:	irq number
 *	@chip:	pointer to irq chip description structure
 */
T
Thomas Gleixner 已提交
26
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
27 28
{
	unsigned long flags;
29
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
30

31
	if (!desc)
32 33 34 35 36 37
		return -EINVAL;

	if (!chip)
		chip = &no_irq_chip;

	irq_chip_set_defaults(chip);
38
	desc->irq_data.chip = chip;
39
	irq_put_desc_unlock(desc, flags);
40 41
	return 0;
}
T
Thomas Gleixner 已提交
42
EXPORT_SYMBOL(irq_set_chip);
43 44

/**
T
Thomas Gleixner 已提交
45
 *	irq_set_type - set the irq trigger type for an irq
46
 *	@irq:	irq number
D
David Brownell 已提交
47
 *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
48
 */
T
Thomas Gleixner 已提交
49
int irq_set_irq_type(unsigned int irq, unsigned int type)
50 51
{
	unsigned long flags;
52 53
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
	int ret = 0;
54

55 56
	if (!desc)
		return -EINVAL;
57

58
	type &= IRQ_TYPE_SENSE_MASK;
59 60 61
	if (type != IRQ_TYPE_NONE)
		ret = __irq_set_trigger(desc, irq, type);
	irq_put_desc_busunlock(desc, flags);
62 63
	return ret;
}
T
Thomas Gleixner 已提交
64
EXPORT_SYMBOL(irq_set_irq_type);
65 66

/**
T
Thomas Gleixner 已提交
67
 *	irq_set_handler_data - set irq handler data for an irq
68 69 70 71 72
 *	@irq:	Interrupt number
 *	@data:	Pointer to interrupt specific data
 *
 *	Set the hardware irq controller data for an irq
 */
T
Thomas Gleixner 已提交
73
int irq_set_handler_data(unsigned int irq, void *data)
74 75
{
	unsigned long flags;
76
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
77

78
	if (!desc)
79
		return -EINVAL;
80
	desc->irq_data.handler_data = data;
81
	irq_put_desc_unlock(desc, flags);
82 83
	return 0;
}
T
Thomas Gleixner 已提交
84
EXPORT_SYMBOL(irq_set_handler_data);
85

86
/**
T
Thomas Gleixner 已提交
87
 *	irq_set_msi_desc - set MSI descriptor data for an irq
88
 *	@irq:	Interrupt number
R
Randy Dunlap 已提交
89
 *	@entry:	Pointer to MSI descriptor data
90
 *
L
Liuweni 已提交
91
 *	Set the MSI descriptor entry for an irq
92
 */
T
Thomas Gleixner 已提交
93
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
94 95
{
	unsigned long flags;
96
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
97

98
	if (!desc)
99
		return -EINVAL;
100
	desc->irq_data.msi_desc = entry;
101 102
	if (entry)
		entry->irq = irq;
103
	irq_put_desc_unlock(desc, flags);
104 105 106
	return 0;
}

107
/**
T
Thomas Gleixner 已提交
108
 *	irq_set_chip_data - set irq chip data for an irq
109 110 111 112 113
 *	@irq:	Interrupt number
 *	@data:	Pointer to chip specific data
 *
 *	Set the hardware irq chip data for an irq
 */
T
Thomas Gleixner 已提交
114
int irq_set_chip_data(unsigned int irq, void *data)
115 116
{
	unsigned long flags;
117
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
118

119
	if (!desc)
120
		return -EINVAL;
121
	desc->irq_data.chip_data = data;
122
	irq_put_desc_unlock(desc, flags);
123 124
	return 0;
}
T
Thomas Gleixner 已提交
125
EXPORT_SYMBOL(irq_set_chip_data);
126

127 128 129 130 131 132 133 134
struct irq_data *irq_get_irq_data(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	return desc ? &desc->irq_data : NULL;
}
EXPORT_SYMBOL_GPL(irq_get_irq_data);

135 136 137 138 139 140 141 142 143 144 145 146
static void irq_state_clr_disabled(struct irq_desc *desc)
{
	desc->istate &= ~IRQS_DISABLED;
	irq_compat_clr_disabled(desc);
}

static void irq_state_set_disabled(struct irq_desc *desc)
{
	desc->istate |= IRQS_DISABLED;
	irq_compat_set_disabled(desc);
}

147 148 149 150 151 152 153 154 155 156 157 158
static void irq_state_clr_masked(struct irq_desc *desc)
{
	desc->istate &= ~IRQS_MASKED;
	irq_compat_clr_masked(desc);
}

static void irq_state_set_masked(struct irq_desc *desc)
{
	desc->istate |= IRQS_MASKED;
	irq_compat_set_masked(desc);
}

159 160
int irq_startup(struct irq_desc *desc)
{
161
	irq_state_clr_disabled(desc);
162 163
	desc->depth = 0;

164 165
	if (desc->irq_data.chip->irq_startup) {
		int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
166
		irq_state_clr_masked(desc);
167 168
		return ret;
	}
169

170
	irq_enable(desc);
171 172 173 174 175
	return 0;
}

void irq_shutdown(struct irq_desc *desc)
{
176
	irq_state_set_disabled(desc);
177
	desc->depth = 1;
T
Thomas Gleixner 已提交
178 179 180 181 182 183
	if (desc->irq_data.chip->irq_shutdown)
		desc->irq_data.chip->irq_shutdown(&desc->irq_data);
	if (desc->irq_data.chip->irq_disable)
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_mask(&desc->irq_data);
184
	irq_state_set_masked(desc);
185 186
}

187 188
void irq_enable(struct irq_desc *desc)
{
189
	irq_state_clr_disabled(desc);
T
Thomas Gleixner 已提交
190 191 192 193
	if (desc->irq_data.chip->irq_enable)
		desc->irq_data.chip->irq_enable(&desc->irq_data);
	else
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
194
	irq_state_clr_masked(desc);
195 196
}

T
Thomas Gleixner 已提交
197
void irq_disable(struct irq_desc *desc)
198
{
199
	irq_state_set_disabled(desc);
T
Thomas Gleixner 已提交
200 201 202
	if (desc->irq_data.chip->irq_disable) {
		desc->irq_data.chip->irq_disable(&desc->irq_data);
	}
203
	irq_state_set_masked(desc);
204 205
}

206
#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
207
/* Temporary migration helpers */
208 209 210 211 212
static void compat_irq_mask(struct irq_data *data)
{
	data->chip->mask(data->irq);
}

213 214 215 216 217
static void compat_irq_unmask(struct irq_data *data)
{
	data->chip->unmask(data->irq);
}

218 219 220 221 222
static void compat_irq_ack(struct irq_data *data)
{
	data->chip->ack(data->irq);
}

223 224 225 226 227
static void compat_irq_mask_ack(struct irq_data *data)
{
	data->chip->mask_ack(data->irq);
}

228 229 230 231 232
static void compat_irq_eoi(struct irq_data *data)
{
	data->chip->eoi(data->irq);
}

233 234 235 236 237
static void compat_irq_enable(struct irq_data *data)
{
	data->chip->enable(data->irq);
}

238 239 240 241 242 243 244 245 246 247
static void compat_irq_disable(struct irq_data *data)
{
	data->chip->disable(data->irq);
}

static void compat_irq_shutdown(struct irq_data *data)
{
	data->chip->shutdown(data->irq);
}

248 249 250 251 252
static unsigned int compat_irq_startup(struct irq_data *data)
{
	return data->chip->startup(data->irq);
}

253 254 255 256 257 258
static int compat_irq_set_affinity(struct irq_data *data,
				   const struct cpumask *dest, bool force)
{
	return data->chip->set_affinity(data->irq, dest);
}

259 260 261 262 263
static int compat_irq_set_type(struct irq_data *data, unsigned int type)
{
	return data->chip->set_type(data->irq, type);
}

264 265 266 267 268
static int compat_irq_set_wake(struct irq_data *data, unsigned int on)
{
	return data->chip->set_wake(data->irq, on);
}

269 270 271 272 273
static int compat_irq_retrigger(struct irq_data *data)
{
	return data->chip->retrigger(data->irq);
}

274 275 276 277 278 279 280 281 282
static void compat_bus_lock(struct irq_data *data)
{
	data->chip->bus_lock(data->irq);
}

static void compat_bus_sync_unlock(struct irq_data *data)
{
	data->chip->bus_sync_unlock(data->irq);
}
283
#endif
284

285 286 287 288 289
/*
 * Fixup enable/disable function pointers
 */
void irq_chip_set_defaults(struct irq_chip *chip)
{
290
#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
291 292
	if (chip->enable)
		chip->irq_enable = compat_irq_enable;
293 294 295 296
	if (chip->disable)
		chip->irq_disable = compat_irq_disable;
	if (chip->shutdown)
		chip->irq_shutdown = compat_irq_shutdown;
297 298
	if (chip->startup)
		chip->irq_startup = compat_irq_startup;
299 300
	if (!chip->end)
		chip->end = dummy_irq_chip.end;
301 302 303 304
	if (chip->bus_lock)
		chip->irq_bus_lock = compat_bus_lock;
	if (chip->bus_sync_unlock)
		chip->irq_bus_sync_unlock = compat_bus_sync_unlock;
305 306
	if (chip->mask)
		chip->irq_mask = compat_irq_mask;
307 308
	if (chip->unmask)
		chip->irq_unmask = compat_irq_unmask;
309 310
	if (chip->ack)
		chip->irq_ack = compat_irq_ack;
311 312
	if (chip->mask_ack)
		chip->irq_mask_ack = compat_irq_mask_ack;
313 314
	if (chip->eoi)
		chip->irq_eoi = compat_irq_eoi;
315 316
	if (chip->set_affinity)
		chip->irq_set_affinity = compat_irq_set_affinity;
317 318
	if (chip->set_type)
		chip->irq_set_type = compat_irq_set_type;
319 320
	if (chip->set_wake)
		chip->irq_set_wake = compat_irq_set_wake;
321 322
	if (chip->retrigger)
		chip->irq_retrigger = compat_irq_retrigger;
323
#endif
324 325
}

326
static inline void mask_ack_irq(struct irq_desc *desc)
327
{
328 329
	if (desc->irq_data.chip->irq_mask_ack)
		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
330
	else {
331
		desc->irq_data.chip->irq_mask(&desc->irq_data);
332 333
		if (desc->irq_data.chip->irq_ack)
			desc->irq_data.chip->irq_ack(&desc->irq_data);
334
	}
335
	irq_state_set_masked(desc);
336 337
}

338
void mask_irq(struct irq_desc *desc)
339
{
340 341
	if (desc->irq_data.chip->irq_mask) {
		desc->irq_data.chip->irq_mask(&desc->irq_data);
342
		irq_state_set_masked(desc);
343 344 345
	}
}

346
void unmask_irq(struct irq_desc *desc)
347
{
348 349
	if (desc->irq_data.chip->irq_unmask) {
		desc->irq_data.chip->irq_unmask(&desc->irq_data);
350
		irq_state_clr_masked(desc);
351
	}
352 353
}

354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
/*
 *	handle_nested_irq - Handle a nested irq from a irq thread
 *	@irq:	the interrupt number
 *
 *	Handle interrupts which are nested into a threaded interrupt
 *	handler. The handler function is called inside the calling
 *	threads context.
 */
void handle_nested_irq(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	struct irqaction *action;
	irqreturn_t action_ret;

	might_sleep();

370
	raw_spin_lock_irq(&desc->lock);
371 372 373 374

	kstat_incr_irqs_this_cpu(irq, desc);

	action = desc->action;
375
	if (unlikely(!action || (desc->istate & IRQS_DISABLED)))
376 377
		goto out_unlock;

378 379
	irq_compat_set_progress(desc);
	desc->istate |= IRQS_INPROGRESS;
380
	raw_spin_unlock_irq(&desc->lock);
381 382 383 384 385

	action_ret = action->thread_fn(action->irq, action->dev_id);
	if (!noirqdebug)
		note_interrupt(irq, desc, action_ret);

386
	raw_spin_lock_irq(&desc->lock);
387 388
	desc->istate &= ~IRQS_INPROGRESS;
	irq_compat_clr_progress(desc);
389 390

out_unlock:
391
	raw_spin_unlock_irq(&desc->lock);
392 393 394
}
EXPORT_SYMBOL_GPL(handle_nested_irq);

395 396
static bool irq_check_poll(struct irq_desc *desc)
{
397
	if (!(desc->istate & IRQS_POLL_INPROGRESS))
398 399 400 401
		return false;
	return irq_wait_for_poll(desc);
}

402 403 404 405 406 407 408 409 410 411 412 413
/**
 *	handle_simple_irq - Simple and software-decoded IRQs.
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Simple interrupts are either sent from a demultiplexing interrupt
 *	handler or come from hardware, where no interrupt hardware control
 *	is necessary.
 *
 *	Note: The caller is expected to handle the ack, clear, mask and
 *	unmask issues if necessary.
 */
414
void
415
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
416
{
417
	raw_spin_lock(&desc->lock);
418

419
	if (unlikely(desc->istate & IRQS_INPROGRESS))
420 421 422
		if (!irq_check_poll(desc))
			goto out_unlock;

423
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
424
	kstat_incr_irqs_this_cpu(irq, desc);
425

426
	if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED)))
427 428
		goto out_unlock;

429
	handle_irq_event(desc);
430 431

out_unlock:
432
	raw_spin_unlock(&desc->lock);
433 434 435 436 437 438 439 440 441 442 443 444
}

/**
 *	handle_level_irq - Level type irq handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Level type interrupts are active as long as the hardware line has
 *	the active level. This may require to mask the interrupt and unmask
 *	it after the associated handler has acknowledged the device, so the
 *	interrupt line is back to inactive.
 */
445
void
446
handle_level_irq(unsigned int irq, struct irq_desc *desc)
447
{
448
	raw_spin_lock(&desc->lock);
449
	mask_ack_irq(desc);
450

451
	if (unlikely(desc->istate & IRQS_INPROGRESS))
452 453 454
		if (!irq_check_poll(desc))
			goto out_unlock;

455
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
456
	kstat_incr_irqs_this_cpu(irq, desc);
457 458 459 460 461

	/*
	 * If its disabled or no action available
	 * keep it masked and get out of here
	 */
462
	if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED)))
463
		goto out_unlock;
464

465
	handle_irq_event(desc);
T
Thomas Gleixner 已提交
466

467
	if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT)))
468
		unmask_irq(desc);
469
out_unlock:
470
	raw_spin_unlock(&desc->lock);
471
}
472
EXPORT_SYMBOL_GPL(handle_level_irq);
473 474

/**
475
 *	handle_fasteoi_irq - irq handler for transparent controllers
476 477 478
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
479
 *	Only a single callback will be issued to the chip: an ->eoi()
480 481 482 483
 *	call when the interrupt has been serviced. This enables support
 *	for modern forms of interrupt handlers, which handle the flow
 *	details in hardware, transparently.
 */
484
void
485
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
486
{
487
	raw_spin_lock(&desc->lock);
488

489
	if (unlikely(desc->istate & IRQS_INPROGRESS))
490 491
		if (!irq_check_poll(desc))
			goto out;
492

493
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
T
Thomas Gleixner 已提交
494
	kstat_incr_irqs_this_cpu(irq, desc);
495 496 497

	/*
	 * If its disabled or no action available
498
	 * then mask it and get out of here:
499
	 */
500
	if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) {
501 502
		irq_compat_set_pending(desc);
		desc->istate |= IRQS_PENDING;
503
		mask_irq(desc);
504
		goto out;
505
	}
506
	handle_irq_event(desc);
507
out:
508
	desc->irq_data.chip->irq_eoi(&desc->irq_data);
509
	raw_spin_unlock(&desc->lock);
510 511 512 513 514 515 516 517 518 519 520
}

/**
 *	handle_edge_irq - edge type IRQ handler
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Interrupt occures on the falling and/or rising edge of a hardware
 *	signal. The occurence is latched into the irq controller hardware
 *	and must be acked in order to be reenabled. After the ack another
 *	interrupt can happen on the same source even before the first one
521
 *	is handled by the associated event handler. If this happens it
522 523 524 525 526 527
 *	might be necessary to disable (mask) the interrupt depending on the
 *	controller hardware. This requires to reenable the interrupt inside
 *	of the loop which handles the interrupts which have arrived while
 *	the handler was running. If all pending interrupts are handled, the
 *	loop is left.
 */
528
void
529
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
530
{
531
	raw_spin_lock(&desc->lock);
532

533
	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
534 535 536 537 538
	/*
	 * If we're currently running this IRQ, or its disabled,
	 * we shouldn't process the IRQ. Mark it pending, handle
	 * the necessary masking and go out
	 */
539 540
	if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) ||
		      !desc->action))) {
541
		if (!irq_check_poll(desc)) {
542 543
			irq_compat_set_pending(desc);
			desc->istate |= IRQS_PENDING;
544 545 546
			mask_ack_irq(desc);
			goto out_unlock;
		}
547
	}
T
Thomas Gleixner 已提交
548
	kstat_incr_irqs_this_cpu(irq, desc);
549 550

	/* Start handling the irq */
551
	desc->irq_data.chip->irq_ack(&desc->irq_data);
552 553

	do {
554
		if (unlikely(!desc->action)) {
555
			mask_irq(desc);
556 557 558 559 560 561 562 563
			goto out_unlock;
		}

		/*
		 * When another irq arrived while we were handling
		 * one, we could have masked the irq.
		 * Renable it, if it was not disabled in meantime.
		 */
564
		if (unlikely(desc->istate & IRQS_PENDING)) {
565
			if (!(desc->istate & IRQS_DISABLED) &&
566
			    (desc->istate & IRQS_MASKED))
567
				unmask_irq(desc);
568 569
		}

570
		handle_irq_event(desc);
571

572
	} while ((desc->istate & IRQS_PENDING) &&
573
		 !(desc->istate & IRQS_DISABLED));
574 575

out_unlock:
576
	raw_spin_unlock(&desc->lock);
577 578 579
}

/**
L
Liuweni 已提交
580
 *	handle_percpu_irq - Per CPU local irq handler
581 582 583 584 585
 *	@irq:	the interrupt number
 *	@desc:	the interrupt description structure for this irq
 *
 *	Per CPU interrupts on SMP machines without locking requirements
 */
586
void
587
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
588
{
589
	struct irq_chip *chip = irq_desc_get_chip(desc);
590

T
Thomas Gleixner 已提交
591
	kstat_incr_irqs_this_cpu(irq, desc);
592

593 594
	if (chip->irq_ack)
		chip->irq_ack(&desc->irq_data);
595

596
	handle_irq_event_percpu(desc, desc->action);
597

598 599
	if (chip->irq_eoi)
		chip->irq_eoi(&desc->irq_data);
600 601 602
}

void
603
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
604
		  const char *name)
605 606
{
	unsigned long flags;
607
	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
608

609
	if (!desc)
610 611
		return;

612
	if (!handle) {
613
		handle = handle_bad_irq;
614 615
	} else {
		if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
616
			goto out;
617
	}
618 619 620

	/* Uninstall? */
	if (handle == handle_bad_irq) {
621
		if (desc->irq_data.chip != &no_irq_chip)
622
			mask_ack_irq(desc);
623 624
		irq_compat_set_disabled(desc);
		desc->istate |= IRQS_DISABLED;
625 626 627
		desc->depth = 1;
	}
	desc->handle_irq = handle;
628
	desc->name = name;
629 630

	if (handle != handle_bad_irq && is_chained) {
631 632
		irq_settings_set_noprobe(desc);
		irq_settings_set_norequest(desc);
633
		irq_startup(desc);
634
	}
635 636
out:
	irq_put_desc_busunlock(desc, flags);
637
}
638
EXPORT_SYMBOL_GPL(__irq_set_handler);
639 640

void
641
irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
642
			      irq_flow_handler_t handle, const char *name)
643
{
644
	irq_set_chip(irq, chip);
645
	__irq_set_handler(irq, handle, 0, name);
646
}
R
Ralf Baechle 已提交
647

T
Thomas Gleixner 已提交
648
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
R
Ralf Baechle 已提交
649 650
{
	unsigned long flags;
651
	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
R
Ralf Baechle 已提交
652

T
Thomas Gleixner 已提交
653
	if (!desc)
R
Ralf Baechle 已提交
654
		return;
655 656
	irq_settings_clr_and_set(desc, clr, set);

657
	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
658
		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
659 660 661 662
	if (irq_settings_has_no_balance_set(desc))
		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
	if (irq_settings_is_per_cpu(desc))
		irqd_set(&desc->irq_data, IRQD_PER_CPU);
663 664
	if (irq_settings_can_move_pcntxt(desc))
		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
665

666 667
	irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));

668
	irq_put_desc_unlock(desc, flags);
R
Ralf Baechle 已提交
669
}