irq.h 25.4 KB
Newer Older
1 2
#ifndef _LINUX_IRQ_H
#define _LINUX_IRQ_H
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11

/*
 * Please do not include this file in generic code.  There is currently
 * no requirement for any architecture to implement anything held
 * within this file.
 *
 * Thanks. --rmk
 */

12
#include <linux/smp.h>
L
Linus Torvalds 已提交
13 14 15 16
#include <linux/linkage.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
R
Ralf Baechle 已提交
17
#include <linux/gfp.h>
18
#include <linux/irqreturn.h>
T
Thomas Gleixner 已提交
19
#include <linux/irqnr.h>
D
David Howells 已提交
20
#include <linux/errno.h>
R
Ralf Baechle 已提交
21
#include <linux/topology.h>
22
#include <linux/wait.h>
L
Linus Torvalds 已提交
23 24 25

#include <asm/irq.h>
#include <asm/ptrace.h>
26
#include <asm/irq_regs.h>
L
Linus Torvalds 已提交
27

28
struct seq_file;
29
struct module;
30
struct irq_desc;
31
struct irq_data;
32
typedef	void (*irq_flow_handler_t)(unsigned int irq,
33
					    struct irq_desc *desc);
34
typedef	void (*irq_preflow_handler_t)(struct irq_data *data);
35

L
Linus Torvalds 已提交
36 37
/*
 * IRQ line status.
38
 *
T
Thomas Gleixner 已提交
39 40 41 42 43 44 45 46 47 48
 * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h
 *
 * IRQ_TYPE_NONE		- default, unspecified type
 * IRQ_TYPE_EDGE_RISING		- rising edge triggered
 * IRQ_TYPE_EDGE_FALLING	- falling edge triggered
 * IRQ_TYPE_EDGE_BOTH		- rising and falling edge triggered
 * IRQ_TYPE_LEVEL_HIGH		- high level triggered
 * IRQ_TYPE_LEVEL_LOW		- low level triggered
 * IRQ_TYPE_LEVEL_MASK		- Mask to filter out the level bits
 * IRQ_TYPE_SENSE_MASK		- Mask for all the above bits
49 50 51 52 53 54
 * IRQ_TYPE_DEFAULT		- For use by some PICs to ask irq_set_type
 *				  to setup the HW to a sane default (used
 *                                by irqdomain map() callbacks to synchronize
 *                                the HW state and SW flags for a newly
 *                                allocated descriptor).
 *
T
Thomas Gleixner 已提交
55 56 57 58 59
 * IRQ_TYPE_PROBE		- Special flag for probing in progress
 *
 * Bits which can be modified via irq_set/clear/modify_status_flags()
 * IRQ_LEVEL			- Interrupt is level type. Will be also
 *				  updated in the code when the above trigger
60
 *				  bits are modified via irq_set_irq_type()
T
Thomas Gleixner 已提交
61 62 63 64 65
 * IRQ_PER_CPU			- Mark an interrupt PER_CPU. Will protect
 *				  it from affinity setting
 * IRQ_NOPROBE			- Interrupt cannot be probed by autoprobing
 * IRQ_NOREQUEST		- Interrupt cannot be requested via
 *				  request_irq()
66
 * IRQ_NOTHREAD			- Interrupt cannot be threaded
T
Thomas Gleixner 已提交
67 68 69 70 71
 * IRQ_NOAUTOEN			- Interrupt is not automatically enabled in
 *				  request/setup_irq()
 * IRQ_NO_BALANCING		- Interrupt cannot be balanced (affinity set)
 * IRQ_MOVE_PCNTXT		- Interrupt can be migrated from process context
 * IRQ_NESTED_TRHEAD		- Interrupt nests into another thread
72
 * IRQ_PER_CPU_DEVID		- Dev_id is a per-cpu variable
73 74 75
 * IRQ_IS_POLLED		- Always polled by another interrupt. Exclude
 *				  it from the spurious interrupt detection
 *				  mechanism and from core side polling.
L
Linus Torvalds 已提交
76
 */
T
Thomas Gleixner 已提交
77 78 79 80 81 82 83 84 85
enum {
	IRQ_TYPE_NONE		= 0x00000000,
	IRQ_TYPE_EDGE_RISING	= 0x00000001,
	IRQ_TYPE_EDGE_FALLING	= 0x00000002,
	IRQ_TYPE_EDGE_BOTH	= (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
	IRQ_TYPE_LEVEL_HIGH	= 0x00000004,
	IRQ_TYPE_LEVEL_LOW	= 0x00000008,
	IRQ_TYPE_LEVEL_MASK	= (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
	IRQ_TYPE_SENSE_MASK	= 0x0000000f,
86
	IRQ_TYPE_DEFAULT	= IRQ_TYPE_SENSE_MASK,
T
Thomas Gleixner 已提交
87 88 89 90 91 92 93 94 95 96 97

	IRQ_TYPE_PROBE		= 0x00000010,

	IRQ_LEVEL		= (1 <<  8),
	IRQ_PER_CPU		= (1 <<  9),
	IRQ_NOPROBE		= (1 << 10),
	IRQ_NOREQUEST		= (1 << 11),
	IRQ_NOAUTOEN		= (1 << 12),
	IRQ_NO_BALANCING	= (1 << 13),
	IRQ_MOVE_PCNTXT		= (1 << 14),
	IRQ_NESTED_THREAD	= (1 << 15),
98
	IRQ_NOTHREAD		= (1 << 16),
99
	IRQ_PER_CPU_DEVID	= (1 << 17),
100
	IRQ_IS_POLLED		= (1 << 18),
T
Thomas Gleixner 已提交
101
};
102

T
Thomas Gleixner 已提交
103 104
#define IRQF_MODIFY_MASK	\
	(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
105
	 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
106 107
	 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
	 IRQ_IS_POLLED)
T
Thomas Gleixner 已提交
108

109 110
#define IRQ_NO_BALANCING_MASK	(IRQ_PER_CPU | IRQ_NO_BALANCING)

111 112 113 114 115 116 117 118 119 120 121
/*
 * Return value for chip->irq_set_affinity()
 *
 * IRQ_SET_MASK_OK	- OK, core updates irq_data.affinity
 * IRQ_SET_MASK_NOCPY	- OK, chip did update irq_data.affinity
 */
enum {
	IRQ_SET_MASK_OK = 0,
	IRQ_SET_MASK_OK_NOCOPY,
};

122
struct msi_desc;
123
struct irq_domain;
T
Thomas Gleixner 已提交
124

T
Thomas Gleixner 已提交
125 126
/**
 * struct irq_data - per irq and irq chip data passed down to chip functions
127
 * @mask:		precomputed bitmask for accessing the chip registers
T
Thomas Gleixner 已提交
128
 * @irq:		interrupt number
129
 * @hwirq:		hardware interrupt number, local to the interrupt domain
T
Thomas Gleixner 已提交
130
 * @node:		node index useful for balancing
131
 * @state_use_accessors: status information for irq chip functions.
132
 *			Use accessor functions to deal with it
T
Thomas Gleixner 已提交
133
 * @chip:		low level interrupt hardware access
134 135
 * @domain:		Interrupt translation domain; responsible for mapping
 *			between hwirq number and linux irq number.
T
Thomas Gleixner 已提交
136 137 138 139 140 141 142 143 144 145 146
 * @handler_data:	per-IRQ data for the irq_chip methods
 * @chip_data:		platform-specific per-chip private data for the chip
 *			methods, to allow shared chip implementations
 * @msi_desc:		MSI descriptor
 * @affinity:		IRQ affinity on SMP
 *
 * The fields here need to overlay the ones in irq_desc until we
 * cleaned up the direct references and switched everything over to
 * irq_data.
 */
struct irq_data {
147
	u32			mask;
T
Thomas Gleixner 已提交
148
	unsigned int		irq;
149
	unsigned long		hwirq;
T
Thomas Gleixner 已提交
150
	unsigned int		node;
151
	unsigned int		state_use_accessors;
T
Thomas Gleixner 已提交
152
	struct irq_chip		*chip;
153
	struct irq_domain	*domain;
T
Thomas Gleixner 已提交
154 155 156 157 158 159
	void			*handler_data;
	void			*chip_data;
	struct msi_desc		*msi_desc;
	cpumask_var_t		affinity;
};

160 161 162
/*
 * Bit masks for irq_data.state
 *
163
 * IRQD_TRIGGER_MASK		- Mask for the trigger type bits
164
 * IRQD_SETAFFINITY_PENDING	- Affinity setting is pending
165 166
 * IRQD_NO_BALANCING		- Balancing disabled for this IRQ
 * IRQD_PER_CPU			- Interrupt is per cpu
167
 * IRQD_AFFINITY_SET		- Interrupt affinity was set
168
 * IRQD_LEVEL			- Interrupt is level triggered
169 170
 * IRQD_WAKEUP_STATE		- Interrupt is configured for wakeup
 *				  from suspend
171 172
 * IRDQ_MOVE_PCNTXT		- Interrupt can be moved in process
 *				  context
173 174 175
 * IRQD_IRQ_DISABLED		- Disabled state of the interrupt
 * IRQD_IRQ_MASKED		- Masked state of the interrupt
 * IRQD_IRQ_INPROGRESS		- In progress state of the interrupt
176 177
 */
enum {
178
	IRQD_TRIGGER_MASK		= 0xf,
179 180 181
	IRQD_SETAFFINITY_PENDING	= (1 <<  8),
	IRQD_NO_BALANCING		= (1 << 10),
	IRQD_PER_CPU			= (1 << 11),
182
	IRQD_AFFINITY_SET		= (1 << 12),
183
	IRQD_LEVEL			= (1 << 13),
184
	IRQD_WAKEUP_STATE		= (1 << 14),
185
	IRQD_MOVE_PCNTXT		= (1 << 15),
186
	IRQD_IRQ_DISABLED		= (1 << 16),
187 188
	IRQD_IRQ_MASKED			= (1 << 17),
	IRQD_IRQ_INPROGRESS		= (1 << 18),
189 190 191 192 193 194 195
};

static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
{
	return d->state_use_accessors & IRQD_SETAFFINITY_PENDING;
}

196 197 198 199 200 201 202 203 204 205
static inline bool irqd_is_per_cpu(struct irq_data *d)
{
	return d->state_use_accessors & IRQD_PER_CPU;
}

static inline bool irqd_can_balance(struct irq_data *d)
{
	return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING));
}

206 207 208 209 210
static inline bool irqd_affinity_was_set(struct irq_data *d)
{
	return d->state_use_accessors & IRQD_AFFINITY_SET;
}

211 212 213 214 215
static inline void irqd_mark_affinity_was_set(struct irq_data *d)
{
	d->state_use_accessors |= IRQD_AFFINITY_SET;
}

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
static inline u32 irqd_get_trigger_type(struct irq_data *d)
{
	return d->state_use_accessors & IRQD_TRIGGER_MASK;
}

/*
 * Must only be called inside irq_chip.irq_set_type() functions.
 */
static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
{
	d->state_use_accessors &= ~IRQD_TRIGGER_MASK;
	d->state_use_accessors |= type & IRQD_TRIGGER_MASK;
}

static inline bool irqd_is_level_type(struct irq_data *d)
{
	return d->state_use_accessors & IRQD_LEVEL;
}

235 236 237 238 239
static inline bool irqd_is_wakeup_set(struct irq_data *d)
{
	return d->state_use_accessors & IRQD_WAKEUP_STATE;
}

240 241 242 243 244
static inline bool irqd_can_move_in_process_context(struct irq_data *d)
{
	return d->state_use_accessors & IRQD_MOVE_PCNTXT;
}

245 246 247 248 249
static inline bool irqd_irq_disabled(struct irq_data *d)
{
	return d->state_use_accessors & IRQD_IRQ_DISABLED;
}

250 251 252 253 254 255 256 257 258 259
static inline bool irqd_irq_masked(struct irq_data *d)
{
	return d->state_use_accessors & IRQD_IRQ_MASKED;
}

static inline bool irqd_irq_inprogress(struct irq_data *d)
{
	return d->state_use_accessors & IRQD_IRQ_INPROGRESS;
}

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
/*
 * Functions for chained handlers which can be enabled/disabled by the
 * standard disable_irq/enable_irq calls. Must be called with
 * irq_desc->lock held.
 */
static inline void irqd_set_chained_irq_inprogress(struct irq_data *d)
{
	d->state_use_accessors |= IRQD_IRQ_INPROGRESS;
}

static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d)
{
	d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS;
}

275 276 277 278 279
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
{
	return d->hwirq;
}

280
/**
T
Thomas Gleixner 已提交
281
 * struct irq_chip - hardware interrupt chip descriptor
282 283
 *
 * @name:		name for /proc/interrupts
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
 * @irq_startup:	start up the interrupt (defaults to ->enable if NULL)
 * @irq_shutdown:	shut down the interrupt (defaults to ->disable if NULL)
 * @irq_enable:		enable the interrupt (defaults to chip->unmask if NULL)
 * @irq_disable:	disable the interrupt
 * @irq_ack:		start of a new interrupt
 * @irq_mask:		mask an interrupt source
 * @irq_mask_ack:	ack and mask an interrupt source
 * @irq_unmask:		unmask an interrupt source
 * @irq_eoi:		end of interrupt
 * @irq_set_affinity:	set the CPU affinity on SMP machines
 * @irq_retrigger:	resend an IRQ to the CPU
 * @irq_set_type:	set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
 * @irq_set_wake:	enable/disable power-management wake-on of an IRQ
 * @irq_bus_lock:	function to lock access to slow bus (i2c) chips
 * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
299 300
 * @irq_cpu_online:	configure an interrupt source for a secondary CPU
 * @irq_cpu_offline:	un-configure an interrupt source for a secondary CPU
301 302 303
 * @irq_suspend:	function called from core code on suspend once per chip
 * @irq_resume:		function called from core code on resume once per chip
 * @irq_pm_shutdown:	function called from core code on shutdown once per chip
304
 * @irq_calc_mask:	Optional function to set irq_data.mask for special cases
305
 * @irq_print_chip:	optional to print special chip info in show_interrupts
306 307 308 309
 * @irq_request_resources:	optional to request resources before calling
 *				any other callback related to this irq
 * @irq_release_resources:	optional to release resources acquired with
 *				irq_request_resources
T
Thomas Gleixner 已提交
310
 * @flags:		chip specific flags
L
Linus Torvalds 已提交
311
 */
T
Thomas Gleixner 已提交
312 313
struct irq_chip {
	const char	*name;
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
	unsigned int	(*irq_startup)(struct irq_data *data);
	void		(*irq_shutdown)(struct irq_data *data);
	void		(*irq_enable)(struct irq_data *data);
	void		(*irq_disable)(struct irq_data *data);

	void		(*irq_ack)(struct irq_data *data);
	void		(*irq_mask)(struct irq_data *data);
	void		(*irq_mask_ack)(struct irq_data *data);
	void		(*irq_unmask)(struct irq_data *data);
	void		(*irq_eoi)(struct irq_data *data);

	int		(*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
	int		(*irq_retrigger)(struct irq_data *data);
	int		(*irq_set_type)(struct irq_data *data, unsigned int flow_type);
	int		(*irq_set_wake)(struct irq_data *data, unsigned int on);

	void		(*irq_bus_lock)(struct irq_data *data);
	void		(*irq_bus_sync_unlock)(struct irq_data *data);

333 334 335
	void		(*irq_cpu_online)(struct irq_data *data);
	void		(*irq_cpu_offline)(struct irq_data *data);

336 337 338 339
	void		(*irq_suspend)(struct irq_data *data);
	void		(*irq_resume)(struct irq_data *data);
	void		(*irq_pm_shutdown)(struct irq_data *data);

340 341
	void		(*irq_calc_mask)(struct irq_data *data);

342
	void		(*irq_print_chip)(struct irq_data *data, struct seq_file *p);
343 344
	int		(*irq_request_resources)(struct irq_data *data);
	void		(*irq_release_resources)(struct irq_data *data);
345

T
Thomas Gleixner 已提交
346
	unsigned long	flags;
L
Linus Torvalds 已提交
347 348
};

349 350 351
/*
 * irq_chip specific flags
 *
352 353
 * IRQCHIP_SET_TYPE_MASKED:	Mask before calling chip.irq_set_type()
 * IRQCHIP_EOI_IF_HANDLED:	Only issue irq_eoi() when irq was handled
354
 * IRQCHIP_MASK_ON_SUSPEND:	Mask non wake irqs in the suspend path
355 356
 * IRQCHIP_ONOFFLINE_ENABLED:	Only call irq_on/off_line callbacks
 *				when irq enabled
357
 * IRQCHIP_SKIP_SET_WAKE:	Skip chip.irq_set_wake(), for this irq chip
358 359 360
 */
enum {
	IRQCHIP_SET_TYPE_MASKED		= (1 <<  0),
361
	IRQCHIP_EOI_IF_HANDLED		= (1 <<  1),
362
	IRQCHIP_MASK_ON_SUSPEND		= (1 <<  2),
363
	IRQCHIP_ONOFFLINE_ENABLED	= (1 <<  3),
364
	IRQCHIP_SKIP_SET_WAKE		= (1 <<  4),
365
	IRQCHIP_ONESHOT_SAFE		= (1 <<  5),
366 367
};

T
Thomas Gleixner 已提交
368 369
/* This include will go away once we isolated irq_desc usage to core code */
#include <linux/irqdesc.h>
370

371 372 373 374
/*
 * Pick up the arch-dependent methods:
 */
#include <asm/hw_irq.h>
L
Linus Torvalds 已提交
375

376 377 378 379
#ifndef NR_IRQS_LEGACY
# define NR_IRQS_LEGACY 0
#endif

380 381 382 383
#ifndef ARCH_IRQ_INIT_FLAGS
# define ARCH_IRQ_INIT_FLAGS	0
#endif

384
#define IRQ_DEFAULT_INIT_FLAGS	ARCH_IRQ_INIT_FLAGS
385

T
Thomas Gleixner 已提交
386
struct irqaction;
387
extern int setup_irq(unsigned int irq, struct irqaction *new);
388
extern void remove_irq(unsigned int irq, struct irqaction *act);
389 390
extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
L
Linus Torvalds 已提交
391

392 393
extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
394
extern int __irq_set_affinity_locked(struct irq_data *data,  const struct cpumask *cpumask);
395

T
Thomas Gleixner 已提交
396
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
397 398
void irq_move_irq(struct irq_data *data);
void irq_move_masked_irq(struct irq_data *data);
T
Thomas Gleixner 已提交
399
#else
400 401
static inline void irq_move_irq(struct irq_data *data) { }
static inline void irq_move_masked_irq(struct irq_data *data) { }
T
Thomas Gleixner 已提交
402
#endif
403

L
Linus Torvalds 已提交
404 405
extern int no_irq_affinity;

406 407 408 409 410 411 412 413 414
#ifdef CONFIG_HARDIRQS_SW_RESEND
int irq_set_parent(int irq, int parent_irq);
#else
static inline int irq_set_parent(int irq, int parent_irq)
{
	return 0;
}
#endif

T
Thomas Gleixner 已提交
415 416
/*
 * Built-in IRQ handlers for various IRQ types,
K
Krzysztof Halasa 已提交
417
 * callable via desc->handle_irq()
T
Thomas Gleixner 已提交
418
 */
419 420 421
extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
422
extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
423 424
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
425
extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
426
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
427
extern void handle_nested_irq(unsigned int irq);
T
Thomas Gleixner 已提交
428 429

/* Handling of unhandled and spurious interrupts: */
430
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
431
			   irqreturn_t action_ret);
L
Linus Torvalds 已提交
432

433

T
Thomas Gleixner 已提交
434 435 436 437 438 439
/* Enable/disable irq debugging output: */
extern int noirqdebug_setup(char *str);

/* Checks whether the interrupt can be requested by request_irq(): */
extern int can_request_irq(unsigned int irq, unsigned long irqflags);

440
/* Dummy irq-chip implementations: */
T
Thomas Gleixner 已提交
441
extern struct irq_chip no_irq_chip;
442
extern struct irq_chip dummy_irq_chip;
T
Thomas Gleixner 已提交
443

444
extern void
445
irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
446 447
			      irq_flow_handler_t handle, const char *name);

448 449 450 451 452 453
static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
					    irq_flow_handler_t handle)
{
	irq_set_chip_and_handler_name(irq, chip, handle, NULL);
}

454 455
extern int irq_set_percpu_devid(unsigned int irq);

T
Thomas Gleixner 已提交
456
extern void
457
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
458
		  const char *name);
L
Linus Torvalds 已提交
459

T
Thomas Gleixner 已提交
460
static inline void
461
irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
T
Thomas Gleixner 已提交
462
{
463
	__irq_set_handler(irq, handle, 0, NULL);
T
Thomas Gleixner 已提交
464 465 466 467 468
}

/*
 * Set a highlevel chained flow handler for a given IRQ.
 * (a chained handler is automatically enabled and set to
469
 *  IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD)
T
Thomas Gleixner 已提交
470 471
 */
static inline void
472
irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
T
Thomas Gleixner 已提交
473
{
474
	__irq_set_handler(irq, handle, 1, NULL);
T
Thomas Gleixner 已提交
475 476
}

T
Thomas Gleixner 已提交
477 478 479 480 481 482 483 484 485 486 487 488
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);

static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
{
	irq_modify_status(irq, 0, set);
}

static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr)
{
	irq_modify_status(irq, clr, 0);
}

T
Thomas Gleixner 已提交
489
static inline void irq_set_noprobe(unsigned int irq)
T
Thomas Gleixner 已提交
490 491 492 493
{
	irq_modify_status(irq, 0, IRQ_NOPROBE);
}

T
Thomas Gleixner 已提交
494
static inline void irq_set_probe(unsigned int irq)
T
Thomas Gleixner 已提交
495 496 497
{
	irq_modify_status(irq, IRQ_NOPROBE, 0);
}
R
Ralf Baechle 已提交
498

499 500 501 502 503 504 505 506 507 508
static inline void irq_set_nothread(unsigned int irq)
{
	irq_modify_status(irq, 0, IRQ_NOTHREAD);
}

static inline void irq_set_thread(unsigned int irq)
{
	irq_modify_status(irq, IRQ_NOTHREAD, 0);
}

509 510 511 512 513 514 515 516
static inline void irq_set_nested_thread(unsigned int irq, bool nest)
{
	if (nest)
		irq_set_status_flags(irq, IRQ_NESTED_THREAD);
	else
		irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
}

517 518 519 520 521 522 523
static inline void irq_set_percpu_devid_flags(unsigned int irq)
{
	irq_set_status_flags(irq,
			     IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
			     IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
}

524
/* Handle dynamic irq creation and destruction */
525
extern unsigned int create_irq_nr(unsigned int irq_want, int node);
526 527
extern unsigned int __create_irqs(unsigned int from, unsigned int count,
				  int node);
528 529
extern int create_irq(void);
extern void destroy_irq(unsigned int irq);
530
extern void destroy_irqs(unsigned int irq, unsigned int count);
531

532 533 534 535
/*
 * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and
 * irq_free_desc instead.
 */
536
extern void dynamic_irq_cleanup(unsigned int irq);
537 538 539 540
static inline void dynamic_irq_init(unsigned int irq)
{
	dynamic_irq_cleanup(irq);
}
541

542
/* Set/get chip/data for an IRQ: */
T
Thomas Gleixner 已提交
543 544 545 546 547
extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
extern int irq_set_handler_data(unsigned int irq, void *data);
extern int irq_set_chip_data(unsigned int irq, void *data);
extern int irq_set_irq_type(unsigned int irq, unsigned int type);
extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
548 549
extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
				struct msi_desc *entry);
550
extern struct irq_data *irq_get_irq_data(unsigned int irq);
551

T
Thomas Gleixner 已提交
552
static inline struct irq_chip *irq_get_chip(unsigned int irq)
553 554 555 556 557 558 559 560 561 562
{
	struct irq_data *d = irq_get_irq_data(irq);
	return d ? d->chip : NULL;
}

static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
{
	return d->chip;
}

T
Thomas Gleixner 已提交
563
static inline void *irq_get_chip_data(unsigned int irq)
564 565 566 567 568 569 570 571 572 573
{
	struct irq_data *d = irq_get_irq_data(irq);
	return d ? d->chip_data : NULL;
}

static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
{
	return d->chip_data;
}

T
Thomas Gleixner 已提交
574
static inline void *irq_get_handler_data(unsigned int irq)
575 576 577 578 579
{
	struct irq_data *d = irq_get_irq_data(irq);
	return d ? d->handler_data : NULL;
}

T
Thomas Gleixner 已提交
580
static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
581 582 583 584
{
	return d->handler_data;
}

T
Thomas Gleixner 已提交
585
static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
586 587 588 589 590 591 592 593 594 595
{
	struct irq_data *d = irq_get_irq_data(irq);
	return d ? d->msi_desc : NULL;
}

static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
{
	return d->msi_desc;
}

596 597 598 599 600 601
static inline u32 irq_get_trigger_type(unsigned int irq)
{
	struct irq_data *d = irq_get_irq_data(irq);
	return d ? irqd_get_trigger_type(d) : 0;
}

602 603 604
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
		struct module *owner);

605 606 607
/* use macros to avoid needing export.h for THIS_MODULE */
#define irq_alloc_descs(irq, from, cnt, node)	\
	__irq_alloc_descs(irq, from, cnt, node, THIS_MODULE)
608

609 610
#define irq_alloc_desc(node)			\
	irq_alloc_descs(-1, 0, 1, node)
611

612 613
#define irq_alloc_desc_at(at, node)		\
	irq_alloc_descs(at, at, 1, node)
614

615 616
#define irq_alloc_desc_from(from, node)		\
	irq_alloc_descs(-1, from, 1, node)
617

618 619 620
#define irq_alloc_descs_from(from, cnt, node)	\
	irq_alloc_descs(-1, from, cnt, node)

621 622
void irq_free_descs(unsigned int irq, unsigned int cnt);
int irq_reserve_irqs(unsigned int from, unsigned int cnt);
623 624 625 626 627 628

static inline void irq_free_desc(unsigned int irq)
{
	irq_free_descs(irq, 1);
}

629 630 631 632 633
static inline int irq_reserve_irq(unsigned int irq)
{
	return irq_reserve_irqs(irq, 1);
}

634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
#ifndef irq_reg_writel
# define irq_reg_writel(val, addr)	writel(val, addr)
#endif
#ifndef irq_reg_readl
# define irq_reg_readl(addr)		readl(addr)
#endif

/**
 * struct irq_chip_regs - register offsets for struct irq_gci
 * @enable:	Enable register offset to reg_base
 * @disable:	Disable register offset to reg_base
 * @mask:	Mask register offset to reg_base
 * @ack:	Ack register offset to reg_base
 * @eoi:	Eoi register offset to reg_base
 * @type:	Type configuration register offset to reg_base
 * @polarity:	Polarity configuration register offset to reg_base
 */
struct irq_chip_regs {
	unsigned long		enable;
	unsigned long		disable;
	unsigned long		mask;
	unsigned long		ack;
	unsigned long		eoi;
	unsigned long		type;
	unsigned long		polarity;
};

/**
 * struct irq_chip_type - Generic interrupt chip instance for a flow type
 * @chip:		The real interrupt chip which provides the callbacks
 * @regs:		Register offsets for this chip
 * @handler:		Flow handler associated with this chip
 * @type:		Chip can handle these flow types
667 668
 * @mask_cache_priv:	Cached mask register private to the chip type
 * @mask_cache:		Pointer to cached mask register
669 670 671 672 673 674 675 676 677 678
 *
 * A irq_generic_chip can have several instances of irq_chip_type when
 * it requires different functions and register offsets for different
 * flow types.
 */
struct irq_chip_type {
	struct irq_chip		chip;
	struct irq_chip_regs	regs;
	irq_flow_handler_t	handler;
	u32			type;
679 680
	u32			mask_cache_priv;
	u32			*mask_cache;
681 682 683 684 685 686 687 688
};

/**
 * struct irq_chip_generic - Generic irq chip data structure
 * @lock:		Lock to protect register and cache data access
 * @reg_base:		Register base address (virtual)
 * @irq_base:		Interrupt base nr for this chip
 * @irq_cnt:		Number of interrupts handled by this chip
689
 * @mask_cache:		Cached mask register shared between all chip types
690 691 692 693 694 695
 * @type_cache:		Cached type register
 * @polarity_cache:	Cached polarity register
 * @wake_enabled:	Interrupt can wakeup from suspend
 * @wake_active:	Interrupt is marked as an wakeup from suspend source
 * @num_ct:		Number of available irq_chip_type instances (usually 1)
 * @private:		Private data for non generic chip callbacks
696
 * @installed:		bitfield to denote installed interrupts
697
 * @unused:		bitfield to denote unused interrupts
698
 * @domain:		irq domain pointer
699
 * @list:		List head for keeping track of instances
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
 * @chip_types:		Array of interrupt irq_chip_types
 *
 * Note, that irq_chip_generic can have multiple irq_chip_type
 * implementations which can be associated to a particular irq line of
 * an irq_chip_generic instance. That allows to share and protect
 * state in an irq_chip_generic instance when we need to implement
 * different flow mechanisms (level/edge) for it.
 */
struct irq_chip_generic {
	raw_spinlock_t		lock;
	void __iomem		*reg_base;
	unsigned int		irq_base;
	unsigned int		irq_cnt;
	u32			mask_cache;
	u32			type_cache;
	u32			polarity_cache;
	u32			wake_enabled;
	u32			wake_active;
	unsigned int		num_ct;
	void			*private;
720
	unsigned long		installed;
721
	unsigned long		unused;
722
	struct irq_domain	*domain;
723
	struct list_head	list;
724 725 726 727 728 729 730 731 732
	struct irq_chip_type	chip_types[0];
};

/**
 * enum irq_gc_flags - Initialization flags for generic irq chips
 * @IRQ_GC_INIT_MASK_CACHE:	Initialize the mask_cache by reading mask reg
 * @IRQ_GC_INIT_NESTED_LOCK:	Set the lock class of the irqs to nested for
 *				irq chips which need to call irq_set_wake() on
 *				the parent irq. Usually GPIO implementations
733
 * @IRQ_GC_MASK_CACHE_PER_TYPE:	Mask cache is chip type private
734
 * @IRQ_GC_NO_MASK:		Do not calculate irq_data->mask
735 736 737 738
 */
enum irq_gc_flags {
	IRQ_GC_INIT_MASK_CACHE		= 1 << 0,
	IRQ_GC_INIT_NESTED_LOCK		= 1 << 1,
739
	IRQ_GC_MASK_CACHE_PER_TYPE	= 1 << 2,
740
	IRQ_GC_NO_MASK			= 1 << 3,
741 742
};

743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
/*
 * struct irq_domain_chip_generic - Generic irq chip data structure for irq domains
 * @irqs_per_chip:	Number of interrupts per chip
 * @num_chips:		Number of chips
 * @irq_flags_to_set:	IRQ* flags to set on irq setup
 * @irq_flags_to_clear:	IRQ* flags to clear on irq setup
 * @gc_flags:		Generic chip specific setup flags
 * @gc:			Array of pointers to generic interrupt chips
 */
struct irq_domain_chip_generic {
	unsigned int		irqs_per_chip;
	unsigned int		num_chips;
	unsigned int		irq_flags_to_clear;
	unsigned int		irq_flags_to_set;
	enum irq_gc_flags	gc_flags;
	struct irq_chip_generic	*gc[0];
};

761 762 763 764 765 766
/* Generic chip callback functions */
void irq_gc_noop(struct irq_data *d);
void irq_gc_mask_disable_reg(struct irq_data *d);
void irq_gc_mask_set_bit(struct irq_data *d);
void irq_gc_mask_clr_bit(struct irq_data *d);
void irq_gc_unmask_enable_reg(struct irq_data *d);
767 768
void irq_gc_ack_set_bit(struct irq_data *d);
void irq_gc_ack_clr_bit(struct irq_data *d);
769 770 771 772 773 774 775 776 777 778 779 780
void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
void irq_gc_eoi(struct irq_data *d);
int irq_gc_set_wake(struct irq_data *d, unsigned int on);

/* Setup functions for irq_chip_generic */
struct irq_chip_generic *
irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
		       void __iomem *reg_base, irq_flow_handler_t handler);
void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
			    enum irq_gc_flags flags, unsigned int clr,
			    unsigned int set);
int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
781 782
void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
			     unsigned int clr, unsigned int set);
783

784 785 786 787 788 789 790 791
struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
				   int num_ct, const char *name,
				   irq_flow_handler_t handler,
				   unsigned int clr, unsigned int set,
				   enum irq_gc_flags flags);


792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
{
	return container_of(d->chip, struct irq_chip_type, chip);
}

#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)

#ifdef CONFIG_SMP
static inline void irq_gc_lock(struct irq_chip_generic *gc)
{
	raw_spin_lock(&gc->lock);
}

static inline void irq_gc_unlock(struct irq_chip_generic *gc)
{
	raw_spin_unlock(&gc->lock);
}
#else
static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
#endif

814
#endif /* _LINUX_IRQ_H */