workqueue.h 11.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * workqueue.h --- work queue handling for Linux.
 */

#ifndef _LINUX_WORKQUEUE_H
#define _LINUX_WORKQUEUE_H

#include <linux/timer.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
11
#include <linux/lockdep.h>
12
#include <linux/threads.h>
13
#include <asm/atomic.h>
L
Linus Torvalds 已提交
14 15 16

struct workqueue_struct;

17 18
struct work_struct;
typedef void (*work_func_t)(struct work_struct *work);
19

20 21 22 23 24 25
/*
 * The first word is the work queue pointer and the flags rolled into
 * one
 */
#define work_data_bits(work) ((unsigned long *)(&(work)->data))

26 27
enum {
	WORK_STRUCT_PENDING_BIT	= 0,	/* work item is pending execution */
28
	WORK_STRUCT_LINKED_BIT	= 1,	/* next work is linked to this one */
29
#ifdef CONFIG_DEBUG_OBJECTS_WORK
30
	WORK_STRUCT_STATIC_BIT	= 2,	/* static initializer (debugobjects) */
31
	WORK_STRUCT_COLOR_SHIFT	= 3,	/* color for workqueue flushing */
T
Tejun Heo 已提交
32
#else
33
	WORK_STRUCT_COLOR_SHIFT	= 2,	/* color for workqueue flushing */
34 35
#endif

36 37
	WORK_STRUCT_COLOR_BITS	= 4,

38
	WORK_STRUCT_PENDING	= 1 << WORK_STRUCT_PENDING_BIT,
39
	WORK_STRUCT_LINKED	= 1 << WORK_STRUCT_LINKED_BIT,
40 41 42 43 44 45
#ifdef CONFIG_DEBUG_OBJECTS_WORK
	WORK_STRUCT_STATIC	= 1 << WORK_STRUCT_STATIC_BIT,
#else
	WORK_STRUCT_STATIC	= 0,
#endif

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
	/*
	 * The last color is no color used for works which don't
	 * participate in workqueue flushing.
	 */
	WORK_NR_COLORS		= (1 << WORK_STRUCT_COLOR_BITS) - 1,
	WORK_NO_COLOR		= WORK_NR_COLORS,

	/*
	 * Reserve 6 bits off of cwq pointer w/ debugobjects turned
	 * off.  This makes cwqs aligned to 64 bytes which isn't too
	 * excessive while allowing 15 workqueue flush colors.
	 */
	WORK_STRUCT_FLAG_BITS	= WORK_STRUCT_COLOR_SHIFT +
				  WORK_STRUCT_COLOR_BITS,

T
Tejun Heo 已提交
61
	WORK_STRUCT_FLAG_MASK	= (1UL << WORK_STRUCT_FLAG_BITS) - 1,
62
	WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
63
	WORK_STRUCT_NO_CPU	= NR_CPUS << WORK_STRUCT_FLAG_BITS,
64 65 66 67

	/* bit mask for work_busy() return values */
	WORK_BUSY_PENDING	= 1 << 0,
	WORK_BUSY_RUNNING	= 1 << 1,
68 69
};

L
Linus Torvalds 已提交
70
struct work_struct {
71
	atomic_long_t data;
L
Linus Torvalds 已提交
72
	struct list_head entry;
73
	work_func_t func;
74 75 76
#ifdef CONFIG_LOCKDEP
	struct lockdep_map lockdep_map;
#endif
77 78
};

79 80 81
#define WORK_DATA_INIT()	ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
#define WORK_DATA_STATIC_INIT()	\
	ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC)
82

83 84
struct delayed_work {
	struct work_struct work;
L
Linus Torvalds 已提交
85 86 87
	struct timer_list timer;
};

88 89 90 91 92
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
{
	return container_of(work, struct delayed_work, work);
}

93 94 95 96
struct execute_work {
	struct work_struct work;
};

97 98 99 100 101 102 103 104 105 106 107 108
#ifdef CONFIG_LOCKDEP
/*
 * NB: because we have to copy the lockdep_map, setting _key
 * here is required, otherwise it could get initialised to the
 * copy of the lockdep_map!
 */
#define __WORK_INIT_LOCKDEP_MAP(n, k) \
	.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
#else
#define __WORK_INIT_LOCKDEP_MAP(n, k)
#endif

109
#define __WORK_INITIALIZER(n, f) {				\
110
	.data = WORK_DATA_STATIC_INIT(),			\
O
Oleg Nesterov 已提交
111
	.entry	= { &(n).entry, &(n).entry },			\
112
	.func = (f),						\
113
	__WORK_INIT_LOCKDEP_MAP(#n, &(n))			\
114 115 116 117 118 119 120 121 122 123 124 125 126
	}

#define __DELAYED_WORK_INITIALIZER(n, f) {			\
	.work = __WORK_INITIALIZER((n).work, (f)),		\
	.timer = TIMER_INITIALIZER(NULL, 0, 0),			\
	}

#define DECLARE_WORK(n, f)					\
	struct work_struct n = __WORK_INITIALIZER(n, f)

#define DECLARE_DELAYED_WORK(n, f)				\
	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)

L
Linus Torvalds 已提交
127
/*
128
 * initialize a work item's function pointer
L
Linus Torvalds 已提交
129
 */
130
#define PREPARE_WORK(_work, _func)				\
L
Linus Torvalds 已提交
131
	do {							\
132
		(_work)->func = (_func);			\
L
Linus Torvalds 已提交
133 134
	} while (0)

135 136
#define PREPARE_DELAYED_WORK(_work, _func)			\
	PREPARE_WORK(&(_work)->work, (_func))
137

138 139 140
#ifdef CONFIG_DEBUG_OBJECTS_WORK
extern void __init_work(struct work_struct *work, int onstack);
extern void destroy_work_on_stack(struct work_struct *work);
T
Tejun Heo 已提交
141 142
static inline unsigned int work_static(struct work_struct *work)
{
143
	return *work_data_bits(work) & WORK_STRUCT_STATIC;
T
Tejun Heo 已提交
144
}
145 146 147
#else
static inline void __init_work(struct work_struct *work, int onstack) { }
static inline void destroy_work_on_stack(struct work_struct *work) { }
T
Tejun Heo 已提交
148
static inline unsigned int work_static(struct work_struct *work) { return 0; }
149 150
#endif

L
Linus Torvalds 已提交
151
/*
152
 * initialize all of a work item in one go
153
 *
D
Dmitri Vorobiev 已提交
154
 * NOTE! No point in using "atomic_long_set()": using a direct
155 156
 * assignment of the work data initializer allows the compiler
 * to generate better code.
L
Linus Torvalds 已提交
157
 */
158
#ifdef CONFIG_LOCKDEP
159
#define __INIT_WORK(_work, _func, _onstack)				\
160
	do {								\
161 162
		static struct lock_class_key __key;			\
									\
163
		__init_work((_work), _onstack);				\
O
Oleg Nesterov 已提交
164
		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
165
		lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
166 167 168
		INIT_LIST_HEAD(&(_work)->entry);			\
		PREPARE_WORK((_work), (_func));				\
	} while (0)
169
#else
170
#define __INIT_WORK(_work, _func, _onstack)				\
171
	do {								\
172
		__init_work((_work), _onstack);				\
173 174 175 176 177
		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
		INIT_LIST_HEAD(&(_work)->entry);			\
		PREPARE_WORK((_work), (_func));				\
	} while (0)
#endif
178

179 180 181 182 183 184 185 186 187 188
#define INIT_WORK(_work, _func)					\
	do {							\
		__INIT_WORK((_work), (_func), 0);		\
	} while (0)

#define INIT_WORK_ON_STACK(_work, _func)			\
	do {							\
		__INIT_WORK((_work), (_func), 1);		\
	} while (0)

189 190 191 192
#define INIT_DELAYED_WORK(_work, _func)				\
	do {							\
		INIT_WORK(&(_work)->work, (_func));		\
		init_timer(&(_work)->timer);			\
193 194
	} while (0)

195 196
#define INIT_DELAYED_WORK_ON_STACK(_work, _func)		\
	do {							\
197
		INIT_WORK_ON_STACK(&(_work)->work, (_func));	\
198 199 200
		init_timer_on_stack(&(_work)->timer);		\
	} while (0)

201
#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func)		\
202 203 204 205 206
	do {							\
		INIT_WORK(&(_work)->work, (_func));		\
		init_timer_deferrable(&(_work)->timer);		\
	} while (0)

207 208 209 210 211
/**
 * work_pending - Find out whether a work item is currently pending
 * @work: The work item in question
 */
#define work_pending(work) \
212
	test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
213 214 215 216 217 218

/**
 * delayed_work_pending - Find out whether a delayable work item is currently
 * pending
 * @work: The work item in question
 */
219 220
#define delayed_work_pending(w) \
	work_pending(&(w)->work)
221

222
/**
O
Oleg Nesterov 已提交
223 224
 * work_clear_pending - for internal use only, mark a work item as not pending
 * @work: The work item in question
225
 */
O
Oleg Nesterov 已提交
226
#define work_clear_pending(work) \
227
	clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
228

229 230
enum {
	WQ_FREEZEABLE		= 1 << 0, /* freeze during suspend */
231
	WQ_SINGLE_CPU		= 1 << 1, /* only single cpu at a time */
232
	WQ_NON_REENTRANT	= 1 << 2, /* guarantee non-reentrance */
233
	WQ_RESCUER		= 1 << 3, /* has an rescue worker */
234 235 236

	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
	WQ_DFL_ACTIVE		= WQ_MAX_ACTIVE / 2,
237
};
238

239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
/*
 * System-wide workqueues which are always present.
 *
 * system_wq is the one used by schedule[_delayed]_work[_on]().
 * Multi-CPU multi-threaded.  There are users which expect relatively
 * short queue flush time.  Don't queue works which can run for too
 * long.
 *
 * system_long_wq is similar to system_wq but may host long running
 * works.  Queue flushing might take relatively long.
 *
 * system_nrt_wq is non-reentrant and guarantees that any given work
 * item is never executed in parallel by multiple CPUs.  Queue
 * flushing might take relatively long.
 */
extern struct workqueue_struct *system_wq;
extern struct workqueue_struct *system_long_wq;
extern struct workqueue_struct *system_nrt_wq;

258
extern struct workqueue_struct *
259 260
__alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
		      struct lock_class_key *key, const char *lock_name);
261 262

#ifdef CONFIG_LOCKDEP
263
#define alloc_workqueue(name, flags, max_active)		\
264 265
({								\
	static struct lock_class_key __key;			\
266 267 268 269 270 271
	const char *__lock_name;				\
								\
	if (__builtin_constant_p(name))				\
		__lock_name = (name);				\
	else							\
		__lock_name = #name;				\
272
								\
273 274
	__alloc_workqueue_key((name), (flags), (max_active),	\
			      &__key, __lock_name);		\
275 276
})
#else
277 278
#define alloc_workqueue(name, flags, max_active)		\
	__alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
279 280
#endif

281
#define create_workqueue(name)					\
282
	alloc_workqueue((name), WQ_RESCUER, 1)
283
#define create_freezeable_workqueue(name)			\
284
	alloc_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_CPU | WQ_RESCUER, 1)
285
#define create_singlethread_workqueue(name)			\
286
	alloc_workqueue((name), WQ_SINGLE_CPU | WQ_RESCUER, 1)
L
Linus Torvalds 已提交
287 288 289

extern void destroy_workqueue(struct workqueue_struct *wq);

290
extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
291 292
extern int queue_work_on(int cpu, struct workqueue_struct *wq,
			struct work_struct *work);
293 294
extern int queue_delayed_work(struct workqueue_struct *wq,
			struct delayed_work *work, unsigned long delay);
295
extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
296 297
			struct delayed_work *work, unsigned long delay);

298
extern void flush_workqueue(struct workqueue_struct *wq);
299
extern void flush_scheduled_work(void);
300
extern void flush_delayed_work(struct delayed_work *work);
L
Linus Torvalds 已提交
301

302
extern int schedule_work(struct work_struct *work);
303
extern int schedule_work_on(int cpu, struct work_struct *work);
304
extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
305 306
extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
					unsigned long delay);
307
extern int schedule_on_each_cpu(work_func_t func);
L
Linus Torvalds 已提交
308 309 310
extern int keventd_up(void);

extern void init_workqueues(void);
311
int execute_in_process_context(work_func_t fn, struct execute_work *);
L
Linus Torvalds 已提交
312

313
extern int flush_work(struct work_struct *work);
314
extern int cancel_work_sync(struct work_struct *work);
315

316 317 318 319 320 321
extern void workqueue_set_max_active(struct workqueue_struct *wq,
				     int max_active);
extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
extern unsigned int work_cpu(struct work_struct *work);
extern unsigned int work_busy(struct work_struct *work);

L
Linus Torvalds 已提交
322 323
/*
 * Kill off a pending schedule_delayed_work().  Note that the work callback
324 325
 * function may still be running on return from cancel_delayed_work(), unless
 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
326
 * cancel_work_sync() to wait on it.
L
Linus Torvalds 已提交
327
 */
328
static inline int cancel_delayed_work(struct delayed_work *work)
L
Linus Torvalds 已提交
329 330 331
{
	int ret;

332
	ret = del_timer_sync(&work->timer);
L
Linus Torvalds 已提交
333
	if (ret)
O
Oleg Nesterov 已提交
334
		work_clear_pending(&work->work);
L
Linus Torvalds 已提交
335 336 337
	return ret;
}

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
/*
 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
 * if it returns 0 the timer function may be running and the queueing is in
 * progress.
 */
static inline int __cancel_delayed_work(struct delayed_work *work)
{
	int ret;

	ret = del_timer(&work->timer);
	if (ret)
		work_clear_pending(&work->work);
	return ret;
}

353
extern int cancel_delayed_work_sync(struct delayed_work *work);
354

355
/* Obsolete. use cancel_delayed_work_sync() */
356 357 358 359
static inline
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
					struct delayed_work *work)
{
360 361 362 363 364 365 366 367
	cancel_delayed_work_sync(work);
}

/* Obsolete. use cancel_delayed_work_sync() */
static inline
void cancel_rearming_delayed_work(struct delayed_work *work)
{
	cancel_delayed_work_sync(work);
368 369
}

370 371 372 373 374 375 376 377
#ifndef CONFIG_SMP
static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
{
	return fn(arg);
}
#else
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
#endif /* CONFIG_SMP */
378 379 380 381 382 383 384

#ifdef CONFIG_FREEZER
extern void freeze_workqueues_begin(void);
extern bool freeze_workqueues_busy(void);
extern void thaw_workqueues(void);
#endif /* CONFIG_FREEZER */

L
Linus Torvalds 已提交
385
#endif