irq.h 15.4 KB
Newer Older
1 2
#ifndef _LINUX_IRQ_H
#define _LINUX_IRQ_H
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11

/*
 * Please do not include this file in generic code.  There is currently
 * no requirement for any architecture to implement anything held
 * within this file.
 *
 * Thanks. --rmk
 */

12
#include <linux/smp.h>
L
Linus Torvalds 已提交
13

14
#ifndef CONFIG_S390
L
Linus Torvalds 已提交
15 16 17 18 19

#include <linux/linkage.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
R
Ralf Baechle 已提交
20
#include <linux/gfp.h>
21
#include <linux/irqreturn.h>
T
Thomas Gleixner 已提交
22
#include <linux/irqnr.h>
D
David Howells 已提交
23
#include <linux/errno.h>
R
Ralf Baechle 已提交
24
#include <linux/topology.h>
25
#include <linux/wait.h>
L
Linus Torvalds 已提交
26 27 28

#include <asm/irq.h>
#include <asm/ptrace.h>
29
#include <asm/irq_regs.h>
L
Linus Torvalds 已提交
30

31
struct irq_desc;
32
typedef	void (*irq_flow_handler_t)(unsigned int irq,
33
					    struct irq_desc *desc);
34 35


L
Linus Torvalds 已提交
36 37
/*
 * IRQ line status.
38
 *
39
 * Bits 0-7 are reserved for the IRQF_* bits in linux/interrupt.h
40 41
 *
 * IRQ types
L
Linus Torvalds 已提交
42
 */
43 44 45 46 47 48 49 50 51 52
#define IRQ_TYPE_NONE		0x00000000	/* Default, unspecified type */
#define IRQ_TYPE_EDGE_RISING	0x00000001	/* Edge rising type */
#define IRQ_TYPE_EDGE_FALLING	0x00000002	/* Edge falling type */
#define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)
#define IRQ_TYPE_LEVEL_HIGH	0x00000004	/* Level high type */
#define IRQ_TYPE_LEVEL_LOW	0x00000008	/* Level low type */
#define IRQ_TYPE_SENSE_MASK	0x0000000f	/* Mask of the above */
#define IRQ_TYPE_PROBE		0x00000010	/* Probing in progress */

/* Internal flags */
53 54 55 56 57 58 59 60 61 62 63 64
#define IRQ_INPROGRESS		0x00000100	/* IRQ handler active - do not enter! */
#define IRQ_DISABLED		0x00000200	/* IRQ disabled - do not enter! */
#define IRQ_PENDING		0x00000400	/* IRQ pending - replay on enable */
#define IRQ_REPLAY		0x00000800	/* IRQ has been replayed but not acked yet */
#define IRQ_AUTODETECT		0x00001000	/* IRQ is being autodetected */
#define IRQ_WAITING		0x00002000	/* IRQ not yet seen - for autodetection */
#define IRQ_LEVEL		0x00004000	/* IRQ level triggered */
#define IRQ_MASKED		0x00008000	/* IRQ masked - shouldn't be seen again */
#define IRQ_PER_CPU		0x00010000	/* IRQ is per CPU */
#define IRQ_NOPROBE		0x00020000	/* IRQ is not valid for probing */
#define IRQ_NOREQUEST		0x00040000	/* IRQ cannot be requested */
#define IRQ_NOAUTOEN		0x00080000	/* IRQ will not be enabled on request irq */
65 66 67
#define IRQ_WAKEUP		0x00100000	/* IRQ triggers system wakeup */
#define IRQ_MOVE_PENDING	0x00200000	/* need to re-target IRQ destination */
#define IRQ_NO_BALANCING	0x00400000	/* IRQ is excluded from balancing */
68
#define IRQ_SPURIOUS_DISABLED	0x00800000	/* IRQ was disabled by the spurious trap */
69 70
#define IRQ_MOVE_PCNTXT		0x01000000	/* IRQ migration from process context */
#define IRQ_AFFINITY_SET	0x02000000	/* IRQ affinity was set from userspace*/
71
#define IRQ_SUSPENDED		0x04000000	/* IRQ has gone through suspend sequence */
72

73
#ifdef CONFIG_IRQ_PER_CPU
74
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
75
# define IRQ_NO_BALANCING_MASK	(IRQ_PER_CPU | IRQ_NO_BALANCING)
76 77
#else
# define CHECK_IRQ_PER_CPU(var) 0
78
# define IRQ_NO_BALANCING_MASK	IRQ_NO_BALANCING
79
#endif
L
Linus Torvalds 已提交
80

T
Thomas Gleixner 已提交
81
struct proc_dir_entry;
82
struct msi_desc;
T
Thomas Gleixner 已提交
83

84
/**
T
Thomas Gleixner 已提交
85
 * struct irq_chip - hardware interrupt chip descriptor
86 87 88 89 90 91 92 93 94 95
 *
 * @name:		name for /proc/interrupts
 * @startup:		start up the interrupt (defaults to ->enable if NULL)
 * @shutdown:		shut down the interrupt (defaults to ->disable if NULL)
 * @enable:		enable the interrupt (defaults to chip->unmask if NULL)
 * @disable:		disable the interrupt (defaults to chip->mask if NULL)
 * @ack:		start of a new interrupt
 * @mask:		mask an interrupt source
 * @mask_ack:		ack and mask an interrupt source
 * @unmask:		unmask an interrupt source
96 97
 * @eoi:		end of interrupt - chip level
 * @end:		end of interrupt - flow level
98 99 100 101 102 103
 * @set_affinity:	set the CPU affinity on SMP machines
 * @retrigger:		resend an IRQ to the CPU
 * @set_type:		set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
 * @set_wake:		enable/disable power-management wake-on of an IRQ
 *
 * @release:		release function solely used by UML
T
Thomas Gleixner 已提交
104
 * @typename:		obsoleted by name, kept as migration helper
L
Linus Torvalds 已提交
105
 */
T
Thomas Gleixner 已提交
106 107
struct irq_chip {
	const char	*name;
108 109 110 111
	unsigned int	(*startup)(unsigned int irq);
	void		(*shutdown)(unsigned int irq);
	void		(*enable)(unsigned int irq);
	void		(*disable)(unsigned int irq);
T
Thomas Gleixner 已提交
112

113
	void		(*ack)(unsigned int irq);
T
Thomas Gleixner 已提交
114 115 116
	void		(*mask)(unsigned int irq);
	void		(*mask_ack)(unsigned int irq);
	void		(*unmask)(unsigned int irq);
117
	void		(*eoi)(unsigned int irq);
T
Thomas Gleixner 已提交
118

119
	void		(*end)(unsigned int irq);
120 121
	void		(*set_affinity)(unsigned int irq,
					const struct cpumask *dest);
122
	int		(*retrigger)(unsigned int irq);
T
Thomas Gleixner 已提交
123 124
	int		(*set_type)(unsigned int irq, unsigned int flow_type);
	int		(*set_wake)(unsigned int irq, unsigned int on);
125

126 127
	/* Currently used only by UML, might disappear one day.*/
#ifdef CONFIG_IRQ_RELEASE_METHOD
128
	void		(*release)(unsigned int irq, void *dev_id);
129
#endif
T
Thomas Gleixner 已提交
130 131 132 133 134
	/*
	 * For compatibility, ->typename is copied into ->name.
	 * Will disappear.
	 */
	const char	*typename;
L
Linus Torvalds 已提交
135 136
};

137 138
struct timer_rand_state;
struct irq_2_iommu;
139 140
/**
 * struct irq_desc - interrupt descriptor
R
Randy Dunlap 已提交
141
 * @irq:		interrupt number for this descriptor
142 143 144
 * @timer_rand_state:	pointer to timer rand state struct
 * @kstat_irqs:		irq stats per cpu
 * @irq_2_iommu:	iommu with this irq
T
Thomas Gleixner 已提交
145 146
 * @handle_irq:		highlevel irq-events handler [if NULL, __do_IRQ()]
 * @chip:		low level interrupt hardware access
R
Randy Dunlap 已提交
147
 * @msi_desc:		MSI descriptor
T
Thomas Gleixner 已提交
148 149 150
 * @handler_data:	per-IRQ data for the irq_chip methods
 * @chip_data:		platform-specific per-chip private data for the chip
 *			methods, to allow shared chip implementations
151 152 153
 * @action:		the irq action chain
 * @status:		status information
 * @depth:		disable-depth, for nested irq_disable() calls
154
 * @wake_depth:		enable depth, for multiple set_irq_wake() callers
155
 * @irq_count:		stats field to detect stalled irqs
R
Randy Dunlap 已提交
156
 * @last_unhandled:	aging timer for unhandled count
157
 * @irqs_unhandled:	stats field for spurious unhandled interrupts
158 159
 * @lock:		locking for SMP
 * @affinity:		IRQ affinity on SMP
T
Thomas Gleixner 已提交
160
 * @cpu:		cpu index useful for balancing
161
 * @pending_mask:	pending rebalanced interrupts
162 163
 * @threads_active:	number of irqaction threads currently running
 * @wait_for_threads:	wait queue for sync_irq to wait for threaded handlers
164
 * @dir:		/proc/irq/ procfs entry
165
 * @name:		flow handler name for /proc/interrupts output
L
Linus Torvalds 已提交
166
 */
167
struct irq_desc {
168
	unsigned int		irq;
169 170
	struct timer_rand_state *timer_rand_state;
	unsigned int            *kstat_irqs;
171
#ifdef CONFIG_INTR_REMAP
172 173
	struct irq_2_iommu      *irq_2_iommu;
#endif
174
	irq_flow_handler_t	handle_irq;
T
Thomas Gleixner 已提交
175
	struct irq_chip		*chip;
176
	struct msi_desc		*msi_desc;
T
Thomas Gleixner 已提交
177
	void			*handler_data;
178 179 180
	void			*chip_data;
	struct irqaction	*action;	/* IRQ action list */
	unsigned int		status;		/* IRQ status */
T
Thomas Gleixner 已提交
181

182
	unsigned int		depth;		/* nested irq disables */
183
	unsigned int		wake_depth;	/* nested wake enables */
184
	unsigned int		irq_count;	/* For detecting broken IRQs */
185
	unsigned long		last_unhandled;	/* Aging timer for unhandled count */
186
	unsigned int		irqs_unhandled;
187
	spinlock_t		lock;
188
#ifdef CONFIG_SMP
189
	cpumask_var_t		affinity;
T
Thomas Gleixner 已提交
190
	unsigned int		cpu;
191
#ifdef CONFIG_GENERIC_PENDING_IRQ
192 193
	cpumask_var_t		pending_mask;
#endif
194
#endif
195 196
	atomic_t		threads_active;
	wait_queue_head_t       wait_for_threads;
197
#ifdef CONFIG_PROC_FS
198
	struct proc_dir_entry	*dir;
199
#endif
200
	const char		*name;
201
} ____cacheline_internodealigned_in_smp;
L
Linus Torvalds 已提交
202

203 204 205
extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
					struct irq_desc *desc, int cpu);
extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
Y
Yinghai Lu 已提交
206

207
#ifndef CONFIG_SPARSE_IRQ
208
extern struct irq_desc irq_desc[NR_IRQS];
209
#else /* CONFIG_SPARSE_IRQ */
210
extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
211
#endif /* CONFIG_SPARSE_IRQ */
212

213
extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
214

215 216 217 218 219 220 221 222
static inline struct irq_desc *
irq_remap_to_desc(unsigned int irq, struct irq_desc *desc)
{
#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
	return irq_to_desc(irq);
#else
	return desc;
#endif
223 224
}

225 226 227
/*
 * Migration helpers for obsolete names, they will go away:
 */
T
Thomas Gleixner 已提交
228 229
#define hw_interrupt_type	irq_chip
#define no_irq_type		no_irq_chip
230 231 232 233 234 235
typedef struct irq_desc		irq_desc_t;

/*
 * Pick up the arch-dependent methods:
 */
#include <asm/hw_irq.h>
L
Linus Torvalds 已提交
236

237
extern int setup_irq(unsigned int irq, struct irqaction *new);
238
extern void remove_irq(unsigned int irq, struct irqaction *act);
L
Linus Torvalds 已提交
239 240

#ifdef CONFIG_GENERIC_HARDIRQS
241

242 243
#ifdef CONFIG_SMP

244
#ifdef CONFIG_GENERIC_PENDING_IRQ
245

246
void move_native_irq(int irq);
247
void move_masked_irq(int irq);
248

249
#else /* CONFIG_GENERIC_PENDING_IRQ */
250 251 252 253 254 255 256 257 258

static inline void move_irq(int irq)
{
}

static inline void move_native_irq(int irq)
{
}

259 260 261 262
static inline void move_masked_irq(int irq)
{
}

263
#endif /* CONFIG_GENERIC_PENDING_IRQ */
264

265
#else /* CONFIG_SMP */
266 267

#define move_native_irq(x)
268
#define move_masked_irq(x)
269

270
#endif /* CONFIG_SMP */
271

L
Linus Torvalds 已提交
272 273
extern int no_irq_affinity;

274 275
static inline int irq_balancing_disabled(unsigned int irq)
{
276 277 278 279
	struct irq_desc *desc;

	desc = irq_to_desc(irq);
	return desc->status & IRQ_NO_BALANCING_MASK;
280 281
}

T
Thomas Gleixner 已提交
282
/* Handle irq action chains: */
283
extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
T
Thomas Gleixner 已提交
284 285 286 287 288

/*
 * Built-in IRQ handlers for various IRQ types,
 * callable via desc->chip->handle_irq()
 */
289 290 291 292 293 294
extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
T
Thomas Gleixner 已提交
295

296
/*
T
Thomas Gleixner 已提交
297
 * Monolithic do_IRQ implementation.
298
 */
299
#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
300
extern unsigned int __do_IRQ(unsigned int irq);
301
#endif
302

I
Ingo Molnar 已提交
303 304 305 306 307 308
/*
 * Architectures call this to let the generic IRQ layer
 * handle an interrupt. If the descriptor is attached to an
 * irqchip-style controller then we call the ->handle_irq() handler,
 * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
 */
309
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
I
Ingo Molnar 已提交
310
{
311
#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
312
	desc->handle_irq(irq, desc);
313
#else
I
Ingo Molnar 已提交
314
	if (likely(desc->handle_irq))
315
		desc->handle_irq(irq, desc);
I
Ingo Molnar 已提交
316
	else
317
		__do_IRQ(irq);
318
#endif
I
Ingo Molnar 已提交
319 320
}

321 322 323 324 325
static inline void generic_handle_irq(unsigned int irq)
{
	generic_handle_irq_desc(irq, irq_to_desc(irq));
}

T
Thomas Gleixner 已提交
326
/* Handling of unhandled and spurious interrupts: */
327
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
328
			   irqreturn_t action_ret);
L
Linus Torvalds 已提交
329

330 331 332
/* Resending of interrupts :*/
void check_irq_resend(struct irq_desc *desc, unsigned int irq);

T
Thomas Gleixner 已提交
333 334 335 336 337 338
/* Enable/disable irq debugging output: */
extern int noirqdebug_setup(char *str);

/* Checks whether the interrupt can be requested by request_irq(): */
extern int can_request_irq(unsigned int irq, unsigned long irqflags);

339
/* Dummy irq-chip implementations: */
T
Thomas Gleixner 已提交
340
extern struct irq_chip no_irq_chip;
341
extern struct irq_chip dummy_irq_chip;
T
Thomas Gleixner 已提交
342

343 344 345
extern void
set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
			 irq_flow_handler_t handle);
T
Thomas Gleixner 已提交
346
extern void
347 348 349
set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
			      irq_flow_handler_t handle, const char *name);

T
Thomas Gleixner 已提交
350
extern void
351 352
__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
		  const char *name);
L
Linus Torvalds 已提交
353

354 355 356 357
/* caller has locked the irq_desc and both params are valid */
static inline void __set_irq_handler_unlocked(int irq,
					      irq_flow_handler_t handler)
{
358 359 360 361
	struct irq_desc *desc;

	desc = irq_to_desc(irq);
	desc->handle_irq = handler;
362 363
}

T
Thomas Gleixner 已提交
364 365 366 367
/*
 * Set a highlevel flow handler for a given IRQ:
 */
static inline void
368
set_irq_handler(unsigned int irq, irq_flow_handler_t handle)
T
Thomas Gleixner 已提交
369
{
370
	__set_irq_handler(irq, handle, 0, NULL);
T
Thomas Gleixner 已提交
371 372 373 374 375 376 377 378 379
}

/*
 * Set a highlevel chained flow handler for a given IRQ.
 * (a chained handler is automatically enabled and set to
 *  IRQ_NOREQUEST and IRQ_NOPROBE)
 */
static inline void
set_irq_chained_handler(unsigned int irq,
380
			irq_flow_handler_t handle)
T
Thomas Gleixner 已提交
381
{
382
	__set_irq_handler(irq, handle, 1, NULL);
T
Thomas Gleixner 已提交
383 384
}

R
Ralf Baechle 已提交
385 386 387
extern void set_irq_noprobe(unsigned int irq);
extern void set_irq_probe(unsigned int irq);

388
/* Handle dynamic irq creation and destruction */
389
extern unsigned int create_irq_nr(unsigned int irq_want);
390 391 392
extern int create_irq(void);
extern void destroy_irq(unsigned int irq);

393 394 395
/* Test to see if a driver has successfully requested an irq */
static inline int irq_has_action(unsigned int irq)
{
396
	struct irq_desc *desc = irq_to_desc(irq);
397 398 399
	return desc->action != NULL;
}

400 401 402
/* Dynamic irq helper functions */
extern void dynamic_irq_init(unsigned int irq);
extern void dynamic_irq_cleanup(unsigned int irq);
403

404
/* Set/get chip/data for an IRQ: */
405 406 407 408
extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
extern int set_irq_data(unsigned int irq, void *data);
extern int set_irq_chip_data(unsigned int irq, void *data);
extern int set_irq_type(unsigned int irq, unsigned int type);
409
extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
410

411 412 413 414
#define get_irq_chip(irq)	(irq_to_desc(irq)->chip)
#define get_irq_chip_data(irq)	(irq_to_desc(irq)->chip_data)
#define get_irq_data(irq)	(irq_to_desc(irq)->handler_data)
#define get_irq_msi(irq)	(irq_to_desc(irq)->msi_desc)
415

416 417 418 419 420
#define get_irq_desc_chip(desc)		((desc)->chip)
#define get_irq_desc_chip_data(desc)	((desc)->chip_data)
#define get_irq_desc_data(desc)		((desc)->handler_data)
#define get_irq_desc_msi(desc)		((desc)->msi_desc)

T
Thomas Gleixner 已提交
421
#endif /* CONFIG_GENERIC_HARDIRQS */
L
Linus Torvalds 已提交
422

423
#endif /* !CONFIG_S390 */
L
Linus Torvalds 已提交
424

425 426 427 428
#ifdef CONFIG_SMP
/**
 * init_alloc_desc_masks - allocate cpumasks for irq_desc
 * @desc:	pointer to irq_desc struct
429
 * @cpu:	cpu which will be handling the cpumasks
430 431 432 433 434 435
 * @boot:	true if need bootmem
 *
 * Allocates affinity and pending_mask cpumask if required.
 * Returns true if successful (or not required).
 * Side effect: affinity has all bits set, pending_mask has all bits clear.
 */
436
static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
437 438
								bool boot)
{
439 440
	int node;

441 442 443 444 445 446 447 448 449 450 451
	if (boot) {
		alloc_bootmem_cpumask_var(&desc->affinity);
		cpumask_setall(desc->affinity);

#ifdef CONFIG_GENERIC_PENDING_IRQ
		alloc_bootmem_cpumask_var(&desc->pending_mask);
		cpumask_clear(desc->pending_mask);
#endif
		return true;
	}

452 453
	node = cpu_to_node(cpu);

454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
	if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
		return false;
	cpumask_setall(desc->affinity);

#ifdef CONFIG_GENERIC_PENDING_IRQ
	if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
		free_cpumask_var(desc->affinity);
		return false;
	}
	cpumask_clear(desc->pending_mask);
#endif
	return true;
}

/**
 * init_copy_desc_masks - copy cpumasks for irq_desc
 * @old_desc:	pointer to old irq_desc struct
 * @new_desc:	pointer to new irq_desc struct
 *
 * Insures affinity and pending_masks are copied to new irq_desc.
 * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
 * irq_desc struct so the copy is redundant.
 */

static inline void init_copy_desc_masks(struct irq_desc *old_desc,
					struct irq_desc *new_desc)
{
#ifdef CONFIG_CPUMASKS_OFFSTACK
	cpumask_copy(new_desc->affinity, old_desc->affinity);

#ifdef CONFIG_GENERIC_PENDING_IRQ
	cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
#endif
#endif
}

490 491 492 493 494 495 496 497 498 499
static inline void free_desc_masks(struct irq_desc *old_desc,
				   struct irq_desc *new_desc)
{
	free_cpumask_var(old_desc->affinity);

#ifdef CONFIG_GENERIC_PENDING_IRQ
	free_cpumask_var(old_desc->pending_mask);
#endif
}

500 501
#else /* !CONFIG_SMP */

502
static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
503 504 505 506 507 508 509 510 511 512
								bool boot)
{
	return true;
}

static inline void init_copy_desc_masks(struct irq_desc *old_desc,
					struct irq_desc *new_desc)
{
}

513 514 515 516
static inline void free_desc_masks(struct irq_desc *old_desc,
				   struct irq_desc *new_desc)
{
}
517 518
#endif	/* CONFIG_SMP */

519
#endif /* _LINUX_IRQ_H */