workqueue.h 14.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * workqueue.h --- work queue handling for Linux.
 */

#ifndef _LINUX_WORKQUEUE_H
#define _LINUX_WORKQUEUE_H

#include <linux/timer.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
11
#include <linux/lockdep.h>
12
#include <linux/threads.h>
A
Arun Sharma 已提交
13
#include <linux/atomic.h>
L
Linus Torvalds 已提交
14 15 16

struct workqueue_struct;

17 18
struct work_struct;
typedef void (*work_func_t)(struct work_struct *work);
19
void delayed_work_timer_fn(unsigned long __data);
20

21 22 23 24 25 26
/*
 * The first word is the work queue pointer and the flags rolled into
 * one
 */
#define work_data_bits(work) ((unsigned long *)(&(work)->data))

27 28
enum {
	WORK_STRUCT_PENDING_BIT	= 0,	/* work item is pending execution */
29 30 31
	WORK_STRUCT_DELAYED_BIT	= 1,	/* work item is delayed */
	WORK_STRUCT_CWQ_BIT	= 2,	/* data points to cwq */
	WORK_STRUCT_LINKED_BIT	= 3,	/* next work is linked to this one */
32
#ifdef CONFIG_DEBUG_OBJECTS_WORK
33 34
	WORK_STRUCT_STATIC_BIT	= 4,	/* static initializer (debugobjects) */
	WORK_STRUCT_COLOR_SHIFT	= 5,	/* color for workqueue flushing */
T
Tejun Heo 已提交
35
#else
36
	WORK_STRUCT_COLOR_SHIFT	= 4,	/* color for workqueue flushing */
37 38
#endif

39 40
	WORK_STRUCT_COLOR_BITS	= 4,

41
	WORK_STRUCT_PENDING	= 1 << WORK_STRUCT_PENDING_BIT,
42
	WORK_STRUCT_DELAYED	= 1 << WORK_STRUCT_DELAYED_BIT,
43
	WORK_STRUCT_CWQ		= 1 << WORK_STRUCT_CWQ_BIT,
44
	WORK_STRUCT_LINKED	= 1 << WORK_STRUCT_LINKED_BIT,
45 46 47 48 49 50
#ifdef CONFIG_DEBUG_OBJECTS_WORK
	WORK_STRUCT_STATIC	= 1 << WORK_STRUCT_STATIC_BIT,
#else
	WORK_STRUCT_STATIC	= 0,
#endif

51 52 53 54 55 56 57
	/*
	 * The last color is no color used for works which don't
	 * participate in workqueue flushing.
	 */
	WORK_NR_COLORS		= (1 << WORK_STRUCT_COLOR_BITS) - 1,
	WORK_NO_COLOR		= WORK_NR_COLORS,

58
	/* special cpu IDs */
59 60
	WORK_CPU_UNBOUND	= NR_CPUS,
	WORK_CPU_NONE		= NR_CPUS + 1,
61 62
	WORK_CPU_LAST		= WORK_CPU_NONE,

63
	/*
64
	 * Reserve 7 bits off of cwq pointer w/ debugobjects turned
65 66
	 * off.  This makes cwqs aligned to 256 bytes and allows 15
	 * workqueue flush colors.
67 68 69 70
	 */
	WORK_STRUCT_FLAG_BITS	= WORK_STRUCT_COLOR_SHIFT +
				  WORK_STRUCT_COLOR_BITS,

71 72 73 74 75 76
	/* data contains off-queue information when !WORK_STRUCT_CWQ */
	WORK_OFFQ_FLAG_BASE	= WORK_STRUCT_FLAG_BITS,
	WORK_OFFQ_FLAG_BITS	= 0,
	WORK_OFFQ_CPU_SHIFT	= WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,

	/* convenience constants */
T
Tejun Heo 已提交
77
	WORK_STRUCT_FLAG_MASK	= (1UL << WORK_STRUCT_FLAG_BITS) - 1,
78
	WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
79
	WORK_STRUCT_NO_CPU	= (unsigned long)WORK_CPU_NONE << WORK_OFFQ_CPU_SHIFT,
80 81 82 83

	/* bit mask for work_busy() return values */
	WORK_BUSY_PENDING	= 1 << 0,
	WORK_BUSY_RUNNING	= 1 << 1,
84 85
};

L
Linus Torvalds 已提交
86
struct work_struct {
87
	atomic_long_t data;
L
Linus Torvalds 已提交
88
	struct list_head entry;
89
	work_func_t func;
90 91 92
#ifdef CONFIG_LOCKDEP
	struct lockdep_map lockdep_map;
#endif
93 94
};

95 96 97
#define WORK_DATA_INIT()	ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
#define WORK_DATA_STATIC_INIT()	\
	ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC)
98

99 100
struct delayed_work {
	struct work_struct work;
L
Linus Torvalds 已提交
101 102 103
	struct timer_list timer;
};

104 105 106 107 108
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
{
	return container_of(work, struct delayed_work, work);
}

109 110 111 112
struct execute_work {
	struct work_struct work;
};

113 114 115 116 117 118 119 120 121 122 123 124
#ifdef CONFIG_LOCKDEP
/*
 * NB: because we have to copy the lockdep_map, setting _key
 * here is required, otherwise it could get initialised to the
 * copy of the lockdep_map!
 */
#define __WORK_INIT_LOCKDEP_MAP(n, k) \
	.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
#else
#define __WORK_INIT_LOCKDEP_MAP(n, k)
#endif

125
#define __WORK_INITIALIZER(n, f) {				\
126
	.data = WORK_DATA_STATIC_INIT(),			\
O
Oleg Nesterov 已提交
127
	.entry	= { &(n).entry, &(n).entry },			\
128
	.func = (f),						\
129
	__WORK_INIT_LOCKDEP_MAP(#n, &(n))			\
130 131 132 133
	}

#define __DELAYED_WORK_INITIALIZER(n, f) {			\
	.work = __WORK_INITIALIZER((n).work, (f)),		\
134 135
	.timer = TIMER_INITIALIZER(delayed_work_timer_fn,	\
				0, (unsigned long)&(n)),	\
136 137
	}

138 139
#define __DEFERRED_WORK_INITIALIZER(n, f) {			\
	.work = __WORK_INITIALIZER((n).work, (f)),		\
140 141
	.timer = TIMER_DEFERRED_INITIALIZER(delayed_work_timer_fn, \
				0, (unsigned long)&(n)),	\
142 143
	}

144 145 146 147 148 149
#define DECLARE_WORK(n, f)					\
	struct work_struct n = __WORK_INITIALIZER(n, f)

#define DECLARE_DELAYED_WORK(n, f)				\
	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)

150 151 152
#define DECLARE_DEFERRED_WORK(n, f)				\
	struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f)

L
Linus Torvalds 已提交
153
/*
154
 * initialize a work item's function pointer
L
Linus Torvalds 已提交
155
 */
156
#define PREPARE_WORK(_work, _func)				\
L
Linus Torvalds 已提交
157
	do {							\
158
		(_work)->func = (_func);			\
L
Linus Torvalds 已提交
159 160
	} while (0)

161 162
#define PREPARE_DELAYED_WORK(_work, _func)			\
	PREPARE_WORK(&(_work)->work, (_func))
163

164 165 166
#ifdef CONFIG_DEBUG_OBJECTS_WORK
extern void __init_work(struct work_struct *work, int onstack);
extern void destroy_work_on_stack(struct work_struct *work);
T
Tejun Heo 已提交
167 168
static inline unsigned int work_static(struct work_struct *work)
{
169
	return *work_data_bits(work) & WORK_STRUCT_STATIC;
T
Tejun Heo 已提交
170
}
171 172 173
#else
static inline void __init_work(struct work_struct *work, int onstack) { }
static inline void destroy_work_on_stack(struct work_struct *work) { }
T
Tejun Heo 已提交
174
static inline unsigned int work_static(struct work_struct *work) { return 0; }
175 176
#endif

L
Linus Torvalds 已提交
177
/*
178
 * initialize all of a work item in one go
179
 *
D
Dmitri Vorobiev 已提交
180
 * NOTE! No point in using "atomic_long_set()": using a direct
181 182
 * assignment of the work data initializer allows the compiler
 * to generate better code.
L
Linus Torvalds 已提交
183
 */
184
#ifdef CONFIG_LOCKDEP
185
#define __INIT_WORK(_work, _func, _onstack)				\
186
	do {								\
187 188
		static struct lock_class_key __key;			\
									\
189
		__init_work((_work), _onstack);				\
O
Oleg Nesterov 已提交
190
		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
191
		lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
192 193 194
		INIT_LIST_HEAD(&(_work)->entry);			\
		PREPARE_WORK((_work), (_func));				\
	} while (0)
195
#else
196
#define __INIT_WORK(_work, _func, _onstack)				\
197
	do {								\
198
		__init_work((_work), _onstack);				\
199 200 201 202 203
		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
		INIT_LIST_HEAD(&(_work)->entry);			\
		PREPARE_WORK((_work), (_func));				\
	} while (0)
#endif
204

205 206 207 208 209
#define INIT_WORK(_work, _func)					\
	do {							\
		__INIT_WORK((_work), (_func), 0);		\
	} while (0)

A
Andrew Morton 已提交
210
#define INIT_WORK_ONSTACK(_work, _func)				\
211 212 213 214
	do {							\
		__INIT_WORK((_work), (_func), 1);		\
	} while (0)

215 216 217 218
#define INIT_DELAYED_WORK(_work, _func)				\
	do {							\
		INIT_WORK(&(_work)->work, (_func));		\
		init_timer(&(_work)->timer);			\
219 220
		(_work)->timer.function = delayed_work_timer_fn;\
		(_work)->timer.data = (unsigned long)(_work);	\
221 222
	} while (0)

A
Andrew Morton 已提交
223
#define INIT_DELAYED_WORK_ONSTACK(_work, _func)			\
224
	do {							\
A
Andrew Morton 已提交
225
		INIT_WORK_ONSTACK(&(_work)->work, (_func));	\
226
		init_timer_on_stack(&(_work)->timer);		\
227 228
		(_work)->timer.function = delayed_work_timer_fn;\
		(_work)->timer.data = (unsigned long)(_work);	\
229 230
	} while (0)

231
#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func)		\
232 233 234
	do {							\
		INIT_WORK(&(_work)->work, (_func));		\
		init_timer_deferrable(&(_work)->timer);		\
235 236
		(_work)->timer.function = delayed_work_timer_fn;\
		(_work)->timer.data = (unsigned long)(_work);	\
237 238
	} while (0)

239 240 241 242 243
/**
 * work_pending - Find out whether a work item is currently pending
 * @work: The work item in question
 */
#define work_pending(work) \
244
	test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
245 246 247 248 249 250

/**
 * delayed_work_pending - Find out whether a delayable work item is currently
 * pending
 * @work: The work item in question
 */
251 252
#define delayed_work_pending(w) \
	work_pending(&(w)->work)
253

254
/**
O
Oleg Nesterov 已提交
255 256
 * work_clear_pending - for internal use only, mark a work item as not pending
 * @work: The work item in question
257
 */
O
Oleg Nesterov 已提交
258
#define work_clear_pending(work) \
259
	clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
260

T
Tejun Heo 已提交
261 262 263 264
/*
 * Workqueue flags and constants.  For details, please refer to
 * Documentation/workqueue.txt.
 */
265
enum {
266
	WQ_NON_REENTRANT	= 1 << 0, /* guarantee non-reentrance */
267
	WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */
268
	WQ_FREEZABLE		= 1 << 2, /* freeze during suspend */
269
	WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */
270
	WQ_HIGHPRI		= 1 << 4, /* high priority */
271
	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu instensive workqueue */
272

273
	WQ_DRAINING		= 1 << 6, /* internal: workqueue is draining */
274
	WQ_RESCUER		= 1 << 7, /* internal: workqueue has rescuer */
275

276
	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
277
	WQ_MAX_UNBOUND_PER_CPU	= 4,	  /* 4 * #cpus for unbound wq */
278
	WQ_DFL_ACTIVE		= WQ_MAX_ACTIVE / 2,
279
};
280

281 282 283
/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
#define WQ_UNBOUND_MAX_ACTIVE	\
	max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
284

285 286 287 288 289 290 291 292 293 294 295 296 297 298
/*
 * System-wide workqueues which are always present.
 *
 * system_wq is the one used by schedule[_delayed]_work[_on]().
 * Multi-CPU multi-threaded.  There are users which expect relatively
 * short queue flush time.  Don't queue works which can run for too
 * long.
 *
 * system_long_wq is similar to system_wq but may host long running
 * works.  Queue flushing might take relatively long.
 *
 * system_nrt_wq is non-reentrant and guarantees that any given work
 * item is never executed in parallel by multiple CPUs.  Queue
 * flushing might take relatively long.
299 300 301 302 303
 *
 * system_unbound_wq is unbound workqueue.  Workers are not bound to
 * any specific CPU, not concurrency managed, and all queued works are
 * executed immediately as long as max_active limit is not reached and
 * resources are available.
T
Tejun Heo 已提交
304
 *
305 306
 * system_freezable_wq is equivalent to system_wq except that it's
 * freezable.
307 308 309
 *
 * system_nrt_freezable_wq is equivalent to system_nrt_wq except that
 * it's freezable.
310 311 312 313
 */
extern struct workqueue_struct *system_wq;
extern struct workqueue_struct *system_long_wq;
extern struct workqueue_struct *system_nrt_wq;
314
extern struct workqueue_struct *system_unbound_wq;
315
extern struct workqueue_struct *system_freezable_wq;
316
extern struct workqueue_struct *system_nrt_freezable_wq;
317

318
extern struct workqueue_struct *
319 320
__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
	struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
321

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
/**
 * alloc_workqueue - allocate a workqueue
 * @fmt: printf format for the name of the workqueue
 * @flags: WQ_* flags
 * @max_active: max in-flight work items, 0 for default
 * @args: args for @fmt
 *
 * Allocate a workqueue with the specified parameters.  For detailed
 * information on WQ_* flags, please refer to Documentation/workqueue.txt.
 *
 * The __lock_name macro dance is to guarantee that single lock_class_key
 * doesn't end up with different namesm, which isn't allowed by lockdep.
 *
 * RETURNS:
 * Pointer to the allocated workqueue on success, %NULL on failure.
 */
338
#ifdef CONFIG_LOCKDEP
339
#define alloc_workqueue(fmt, flags, max_active, args...)	\
340 341
({								\
	static struct lock_class_key __key;			\
342 343
	const char *__lock_name;				\
								\
344 345
	if (__builtin_constant_p(fmt))				\
		__lock_name = (fmt);				\
346
	else							\
347
		__lock_name = #fmt;				\
348
								\
349 350
	__alloc_workqueue_key((fmt), (flags), (max_active),	\
			      &__key, __lock_name, ##args);	\
351 352
})
#else
353 354 355
#define alloc_workqueue(fmt, flags, max_active, args...)	\
	__alloc_workqueue_key((fmt), (flags), (max_active),	\
			      NULL, NULL, ##args)
356 357
#endif

358 359
/**
 * alloc_ordered_workqueue - allocate an ordered workqueue
360
 * @fmt: printf format for the name of the workqueue
361
 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
362
 * @args: args for @fmt
363 364 365 366 367 368 369 370
 *
 * Allocate an ordered workqueue.  An ordered workqueue executes at
 * most one work item at any given time in the queued order.  They are
 * implemented as unbound workqueues with @max_active of one.
 *
 * RETURNS:
 * Pointer to the allocated workqueue on success, %NULL on failure.
 */
371 372
#define alloc_ordered_workqueue(fmt, flags, args...)		\
	alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
373

374
#define create_workqueue(name)					\
375
	alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
376 377
#define create_freezable_workqueue(name)			\
	alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
378
#define create_singlethread_workqueue(name)			\
379
	alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
L
Linus Torvalds 已提交
380 381 382

extern void destroy_workqueue(struct workqueue_struct *wq);

383
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
384
			struct work_struct *work);
385 386
extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
387
			struct delayed_work *work, unsigned long delay);
388
extern bool queue_delayed_work(struct workqueue_struct *wq,
389
			struct delayed_work *work, unsigned long delay);
390

391
extern void flush_workqueue(struct workqueue_struct *wq);
392
extern void drain_workqueue(struct workqueue_struct *wq);
393
extern void flush_scheduled_work(void);
L
Linus Torvalds 已提交
394

395 396 397 398 399 400
extern bool schedule_work_on(int cpu, struct work_struct *work);
extern bool schedule_work(struct work_struct *work);
extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work,
				     unsigned long delay);
extern bool schedule_delayed_work(struct delayed_work *work,
				  unsigned long delay);
401
extern int schedule_on_each_cpu(work_func_t func);
L
Linus Torvalds 已提交
402 403
extern int keventd_up(void);

404
int execute_in_process_context(work_func_t fn, struct execute_work *);
L
Linus Torvalds 已提交
405

406
extern bool flush_work(struct work_struct *work);
407
extern bool flush_work_sync(struct work_struct *work);
408 409 410
extern bool cancel_work_sync(struct work_struct *work);

extern bool flush_delayed_work(struct delayed_work *dwork);
411
extern bool flush_delayed_work_sync(struct delayed_work *work);
412
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
413

414 415 416 417 418 419
extern void workqueue_set_max_active(struct workqueue_struct *wq,
				     int max_active);
extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
extern unsigned int work_cpu(struct work_struct *work);
extern unsigned int work_busy(struct work_struct *work);

L
Linus Torvalds 已提交
420 421
/*
 * Kill off a pending schedule_delayed_work().  Note that the work callback
422 423
 * function may still be running on return from cancel_delayed_work(), unless
 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
424
 * cancel_work_sync() to wait on it.
L
Linus Torvalds 已提交
425
 */
426
static inline bool cancel_delayed_work(struct delayed_work *work)
L
Linus Torvalds 已提交
427
{
428
	bool ret;
L
Linus Torvalds 已提交
429

430
	ret = del_timer_sync(&work->timer);
L
Linus Torvalds 已提交
431
	if (ret)
O
Oleg Nesterov 已提交
432
		work_clear_pending(&work->work);
L
Linus Torvalds 已提交
433 434 435
	return ret;
}

436 437 438 439 440
/*
 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
 * if it returns 0 the timer function may be running and the queueing is in
 * progress.
 */
441
static inline bool __cancel_delayed_work(struct delayed_work *work)
442
{
443
	bool ret;
444 445 446 447 448 449 450

	ret = del_timer(&work->timer);
	if (ret)
		work_clear_pending(&work->work);
	return ret;
}

451 452 453 454 455 456 457 458
#ifndef CONFIG_SMP
static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
{
	return fn(arg);
}
#else
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
#endif /* CONFIG_SMP */
459

460 461 462 463 464 465
#ifdef CONFIG_FREEZER
extern void freeze_workqueues_begin(void);
extern bool freeze_workqueues_busy(void);
extern void thaw_workqueues(void);
#endif /* CONFIG_FREEZER */

L
Linus Torvalds 已提交
466
#endif