workqueue.h 20.9 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11
/*
 * workqueue.h --- work queue handling for Linux.
 */

#ifndef _LINUX_WORKQUEUE_H
#define _LINUX_WORKQUEUE_H

#include <linux/timer.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
12
#include <linux/lockdep.h>
13
#include <linux/threads.h>
A
Arun Sharma 已提交
14
#include <linux/atomic.h>
T
Tejun Heo 已提交
15
#include <linux/cpumask.h>
L
Linus Torvalds 已提交
16 17 18

struct workqueue_struct;

19 20
struct work_struct;
typedef void (*work_func_t)(struct work_struct *work);
21
void delayed_work_timer_fn(struct timer_list *t);
22

23 24 25 26 27 28
/*
 * The first word is the work queue pointer and the flags rolled into
 * one
 */
#define work_data_bits(work) ((unsigned long *)(&(work)->data))

29 30
enum {
	WORK_STRUCT_PENDING_BIT	= 0,	/* work item is pending execution */
31
	WORK_STRUCT_DELAYED_BIT	= 1,	/* work item is delayed */
32
	WORK_STRUCT_PWQ_BIT	= 2,	/* data points to pwq */
33
	WORK_STRUCT_LINKED_BIT	= 3,	/* next work is linked to this one */
34
#ifdef CONFIG_DEBUG_OBJECTS_WORK
35 36
	WORK_STRUCT_STATIC_BIT	= 4,	/* static initializer (debugobjects) */
	WORK_STRUCT_COLOR_SHIFT	= 5,	/* color for workqueue flushing */
T
Tejun Heo 已提交
37
#else
38
	WORK_STRUCT_COLOR_SHIFT	= 4,	/* color for workqueue flushing */
39 40
#endif

41 42
	WORK_STRUCT_COLOR_BITS	= 4,

43
	WORK_STRUCT_PENDING	= 1 << WORK_STRUCT_PENDING_BIT,
44
	WORK_STRUCT_DELAYED	= 1 << WORK_STRUCT_DELAYED_BIT,
45
	WORK_STRUCT_PWQ		= 1 << WORK_STRUCT_PWQ_BIT,
46
	WORK_STRUCT_LINKED	= 1 << WORK_STRUCT_LINKED_BIT,
47 48 49 50 51 52
#ifdef CONFIG_DEBUG_OBJECTS_WORK
	WORK_STRUCT_STATIC	= 1 << WORK_STRUCT_STATIC_BIT,
#else
	WORK_STRUCT_STATIC	= 0,
#endif

53 54 55 56 57 58 59
	/*
	 * The last color is no color used for works which don't
	 * participate in workqueue flushing.
	 */
	WORK_NR_COLORS		= (1 << WORK_STRUCT_COLOR_BITS) - 1,
	WORK_NO_COLOR		= WORK_NR_COLORS,

60
	/* not bound to any CPU, prefer the local CPU */
61
	WORK_CPU_UNBOUND	= NR_CPUS,
62

63
	/*
64 65 66
	 * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
	 * This makes pwqs aligned to 256 bytes and allows 15 workqueue
	 * flush colors.
67 68 69 70
	 */
	WORK_STRUCT_FLAG_BITS	= WORK_STRUCT_COLOR_SHIFT +
				  WORK_STRUCT_COLOR_BITS,

71
	/* data contains off-queue information when !WORK_STRUCT_PWQ */
72
	WORK_OFFQ_FLAG_BASE	= WORK_STRUCT_COLOR_SHIFT,
73

74 75
	__WORK_OFFQ_CANCELING	= WORK_OFFQ_FLAG_BASE,
	WORK_OFFQ_CANCELING	= (1 << __WORK_OFFQ_CANCELING),
76

77 78
	/*
	 * When a work item is off queue, its high bits point to the last
79 80
	 * pool it was on.  Cap at 31 bits and use the highest number to
	 * indicate that no pool is associated.
81
	 */
82
	WORK_OFFQ_FLAG_BITS	= 1,
83 84 85 86
	WORK_OFFQ_POOL_SHIFT	= WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
	WORK_OFFQ_LEFT		= BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
	WORK_OFFQ_POOL_BITS	= WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
	WORK_OFFQ_POOL_NONE	= (1LU << WORK_OFFQ_POOL_BITS) - 1,
87 88

	/* convenience constants */
T
Tejun Heo 已提交
89
	WORK_STRUCT_FLAG_MASK	= (1UL << WORK_STRUCT_FLAG_BITS) - 1,
90
	WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
91
	WORK_STRUCT_NO_POOL	= (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
92 93 94 95

	/* bit mask for work_busy() return values */
	WORK_BUSY_PENDING	= 1 << 0,
	WORK_BUSY_RUNNING	= 1 << 1,
96 97 98

	/* maximum string length for set_worker_desc() */
	WORKER_DESC_LEN		= 24,
99 100
};

L
Linus Torvalds 已提交
101
struct work_struct {
102
	atomic_long_t data;
L
Linus Torvalds 已提交
103
	struct list_head entry;
104
	work_func_t func;
105 106 107
#ifdef CONFIG_LOCKDEP
	struct lockdep_map lockdep_map;
#endif
108 109
};

A
Arnd Bergmann 已提交
110
#define WORK_DATA_INIT()	ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
111
#define WORK_DATA_STATIC_INIT()	\
A
Arnd Bergmann 已提交
112
	ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
113

114 115
struct delayed_work {
	struct work_struct work;
L
Linus Torvalds 已提交
116
	struct timer_list timer;
117 118 119

	/* target workqueue and CPU ->timer uses to queue ->work */
	struct workqueue_struct *wq;
120
	int cpu;
L
Linus Torvalds 已提交
121 122
};

123 124
/**
 * struct workqueue_attrs - A struct for workqueue attributes.
125
 *
126
 * This can be used to change attributes of an unbound workqueue.
T
Tejun Heo 已提交
127 128
 */
struct workqueue_attrs {
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
	/**
	 * @nice: nice level
	 */
	int nice;

	/**
	 * @cpumask: allowed CPUs
	 */
	cpumask_var_t cpumask;

	/**
	 * @no_numa: disable NUMA affinity
	 *
	 * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
	 * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
	 * doesn't participate in pool hash calculations or equality comparisons.
	 */
	bool no_numa;
T
Tejun Heo 已提交
147 148
};

149 150 151 152 153
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
{
	return container_of(work, struct delayed_work, work);
}

154 155 156 157
struct execute_work {
	struct work_struct work;
};

158 159 160 161 162 163 164 165 166 167 168 169
#ifdef CONFIG_LOCKDEP
/*
 * NB: because we have to copy the lockdep_map, setting _key
 * here is required, otherwise it could get initialised to the
 * copy of the lockdep_map!
 */
#define __WORK_INIT_LOCKDEP_MAP(n, k) \
	.lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
#else
#define __WORK_INIT_LOCKDEP_MAP(n, k)
#endif

170 171 172 173 174
#define __WORK_INITIALIZER(n, f) {					\
	.data = WORK_DATA_STATIC_INIT(),				\
	.entry	= { &(n).entry, &(n).entry },				\
	.func = (f),							\
	__WORK_INIT_LOCKDEP_MAP(#n, &(n))				\
175 176
	}

177
#define __DELAYED_WORK_INITIALIZER(n, f, tflags) {			\
178
	.work = __WORK_INITIALIZER((n).work, (f)),			\
179
	.timer = __TIMER_INITIALIZER((TIMER_FUNC_TYPE)delayed_work_timer_fn,\
180
				     (tflags) | TIMER_IRQSAFE),		\
181 182
	}

183
#define DECLARE_WORK(n, f)						\
184 185
	struct work_struct n = __WORK_INITIALIZER(n, f)

186
#define DECLARE_DELAYED_WORK(n, f)					\
187
	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
188

189
#define DECLARE_DEFERRABLE_WORK(n, f)					\
190
	struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
191

192 193 194
#ifdef CONFIG_DEBUG_OBJECTS_WORK
extern void __init_work(struct work_struct *work, int onstack);
extern void destroy_work_on_stack(struct work_struct *work);
195
extern void destroy_delayed_work_on_stack(struct delayed_work *work);
T
Tejun Heo 已提交
196 197
static inline unsigned int work_static(struct work_struct *work)
{
198
	return *work_data_bits(work) & WORK_STRUCT_STATIC;
T
Tejun Heo 已提交
199
}
200 201 202
#else
static inline void __init_work(struct work_struct *work, int onstack) { }
static inline void destroy_work_on_stack(struct work_struct *work) { }
203
static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
T
Tejun Heo 已提交
204
static inline unsigned int work_static(struct work_struct *work) { return 0; }
205 206
#endif

L
Linus Torvalds 已提交
207
/*
208
 * initialize all of a work item in one go
209
 *
D
Dmitri Vorobiev 已提交
210
 * NOTE! No point in using "atomic_long_set()": using a direct
211 212
 * assignment of the work data initializer allows the compiler
 * to generate better code.
L
Linus Torvalds 已提交
213
 */
214
#ifdef CONFIG_LOCKDEP
215
#define __INIT_WORK(_work, _func, _onstack)				\
216
	do {								\
217 218
		static struct lock_class_key __key;			\
									\
219
		__init_work((_work), _onstack);				\
O
Oleg Nesterov 已提交
220
		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
221
		lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
222
		INIT_LIST_HEAD(&(_work)->entry);			\
223
		(_work)->func = (_func);				\
224
	} while (0)
225
#else
226
#define __INIT_WORK(_work, _func, _onstack)				\
227
	do {								\
228
		__init_work((_work), _onstack);				\
229 230
		(_work)->data = (atomic_long_t) WORK_DATA_INIT();	\
		INIT_LIST_HEAD(&(_work)->entry);			\
231
		(_work)->func = (_func);				\
232 233
	} while (0)
#endif
234

235
#define INIT_WORK(_work, _func)						\
236
	__INIT_WORK((_work), (_func), 0)
237

238
#define INIT_WORK_ONSTACK(_work, _func)					\
239
	__INIT_WORK((_work), (_func), 1)
240

241
#define __INIT_DELAYED_WORK(_work, _func, _tflags)			\
242 243
	do {								\
		INIT_WORK(&(_work)->work, (_func));			\
244 245 246
		__init_timer(&(_work)->timer,				\
			     delayed_work_timer_fn,			\
			     (_tflags) | TIMER_IRQSAFE);		\
247 248
	} while (0)

249
#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags)		\
250 251
	do {								\
		INIT_WORK_ONSTACK(&(_work)->work, (_func));		\
252 253 254
		__init_timer_on_stack(&(_work)->timer,			\
				      delayed_work_timer_fn,		\
				      (_tflags) | TIMER_IRQSAFE);	\
255 256
	} while (0)

257 258 259 260 261 262
#define INIT_DELAYED_WORK(_work, _func)					\
	__INIT_DELAYED_WORK(_work, _func, 0)

#define INIT_DELAYED_WORK_ONSTACK(_work, _func)				\
	__INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)

263
#define INIT_DEFERRABLE_WORK(_work, _func)				\
264 265 266 267
	__INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)

#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func)			\
	__INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
268

269 270 271 272 273
/**
 * work_pending - Find out whether a work item is currently pending
 * @work: The work item in question
 */
#define work_pending(work) \
274
	test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
275 276 277 278

/**
 * delayed_work_pending - Find out whether a delayable work item is currently
 * pending
279
 * @w: The work item in question
280
 */
281 282
#define delayed_work_pending(w) \
	work_pending(&(w)->work)
283

T
Tejun Heo 已提交
284 285
/*
 * Workqueue flags and constants.  For details, please refer to
286
 * Documentation/core-api/workqueue.rst.
T
Tejun Heo 已提交
287
 */
288
enum {
289
	WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */
290
	WQ_FREEZABLE		= 1 << 2, /* freeze during suspend */
291
	WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */
292
	WQ_HIGHPRI		= 1 << 4, /* high priority */
293
	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu intensive workqueue */
294
	WQ_SYSFS		= 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
295

296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
	/*
	 * Per-cpu workqueues are generally preferred because they tend to
	 * show better performance thanks to cache locality.  Per-cpu
	 * workqueues exclude the scheduler from choosing the CPU to
	 * execute the worker threads, which has an unfortunate side effect
	 * of increasing power consumption.
	 *
	 * The scheduler considers a CPU idle if it doesn't have any task
	 * to execute and tries to keep idle cores idle to conserve power;
	 * however, for example, a per-cpu work item scheduled from an
	 * interrupt handler on an idle CPU will force the scheduler to
	 * excute the work item on that CPU breaking the idleness, which in
	 * turn may lead to more scheduling choices which are sub-optimal
	 * in terms of power consumption.
	 *
	 * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
	 * but become unbound if workqueue.power_efficient kernel param is
	 * specified.  Per-cpu workqueues which are identified to
	 * contribute significantly to power-consumption are identified and
	 * marked with this flag and enabling the power_efficient mode
	 * leads to noticeable power saving at the cost of small
	 * performance disadvantage.
	 *
	 * http://thread.gmane.org/gmane.linux.kernel/1480396
	 */
	WQ_POWER_EFFICIENT	= 1 << 7,

323
	__WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */
324
	__WQ_ORDERED		= 1 << 17, /* internal: workqueue is ordered */
325
	__WQ_LEGACY		= 1 << 18, /* internal: create*_workqueue() */
B
Ben Hutchings 已提交
326
	__WQ_ORDERED_EXPLICIT	= 1 << 19, /* internal: alloc_ordered_workqueue() */
327

328
	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
329
	WQ_MAX_UNBOUND_PER_CPU	= 4,	  /* 4 * #cpus for unbound wq */
330
	WQ_DFL_ACTIVE		= WQ_MAX_ACTIVE / 2,
331
};
332

333 334 335
/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
#define WQ_UNBOUND_MAX_ACTIVE	\
	max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
336

337 338 339 340 341 342 343 344
/*
 * System-wide workqueues which are always present.
 *
 * system_wq is the one used by schedule[_delayed]_work[_on]().
 * Multi-CPU multi-threaded.  There are users which expect relatively
 * short queue flush time.  Don't queue works which can run for too
 * long.
 *
345 346 347
 * system_highpri_wq is similar to system_wq but for work items which
 * require WQ_HIGHPRI.
 *
348 349 350
 * system_long_wq is similar to system_wq but may host long running
 * works.  Queue flushing might take relatively long.
 *
351 352 353 354
 * system_unbound_wq is unbound workqueue.  Workers are not bound to
 * any specific CPU, not concurrency managed, and all queued works are
 * executed immediately as long as max_active limit is not reached and
 * resources are available.
T
Tejun Heo 已提交
355
 *
356 357
 * system_freezable_wq is equivalent to system_wq except that it's
 * freezable.
358 359 360 361 362 363
 *
 * *_power_efficient_wq are inclined towards saving power and converted
 * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
 * they are same as their non-power-efficient counterparts - e.g.
 * system_power_efficient_wq is identical to system_wq if
 * 'wq_power_efficient' is disabled.  See WQ_POWER_EFFICIENT for more info.
364 365
 */
extern struct workqueue_struct *system_wq;
366
extern struct workqueue_struct *system_highpri_wq;
367
extern struct workqueue_struct *system_long_wq;
368
extern struct workqueue_struct *system_unbound_wq;
369
extern struct workqueue_struct *system_freezable_wq;
370 371
extern struct workqueue_struct *system_power_efficient_wq;
extern struct workqueue_struct *system_freezable_power_efficient_wq;
372

373
extern struct workqueue_struct *
374 375
__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
	struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
376

377 378 379 380 381
/**
 * alloc_workqueue - allocate a workqueue
 * @fmt: printf format for the name of the workqueue
 * @flags: WQ_* flags
 * @max_active: max in-flight work items, 0 for default
382
 * @args...: args for @fmt
383 384
 *
 * Allocate a workqueue with the specified parameters.  For detailed
385 386
 * information on WQ_* flags, please refer to
 * Documentation/core-api/workqueue.rst.
387 388 389 390 391 392 393
 *
 * The __lock_name macro dance is to guarantee that single lock_class_key
 * doesn't end up with different namesm, which isn't allowed by lockdep.
 *
 * RETURNS:
 * Pointer to the allocated workqueue on success, %NULL on failure.
 */
394
#ifdef CONFIG_LOCKDEP
395 396 397 398 399
#define alloc_workqueue(fmt, flags, max_active, args...)		\
({									\
	static struct lock_class_key __key;				\
	const char *__lock_name;					\
									\
400
	__lock_name = "(wq_completion)"#fmt#args;			\
401 402 403
									\
	__alloc_workqueue_key((fmt), (flags), (max_active),		\
			      &__key, __lock_name, ##args);		\
404 405
})
#else
406 407
#define alloc_workqueue(fmt, flags, max_active, args...)		\
	__alloc_workqueue_key((fmt), (flags), (max_active),		\
408
			      NULL, NULL, ##args)
409 410
#endif

411 412
/**
 * alloc_ordered_workqueue - allocate an ordered workqueue
413
 * @fmt: printf format for the name of the workqueue
414
 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
415
 * @args...: args for @fmt
416 417 418 419 420 421 422 423
 *
 * Allocate an ordered workqueue.  An ordered workqueue executes at
 * most one work item at any given time in the queued order.  They are
 * implemented as unbound workqueues with @max_active of one.
 *
 * RETURNS:
 * Pointer to the allocated workqueue on success, %NULL on failure.
 */
424
#define alloc_ordered_workqueue(fmt, flags, args...)			\
425 426
	alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED |		\
			__WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
427

428
#define create_workqueue(name)						\
429
	alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
430
#define create_freezable_workqueue(name)				\
431 432
	alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND |	\
			WQ_MEM_RECLAIM, 1, (name))
433
#define create_singlethread_workqueue(name)				\
434
	alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
L
Linus Torvalds 已提交
435 436 437

extern void destroy_workqueue(struct workqueue_struct *wq);

T
Tejun Heo 已提交
438 439
struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
void free_workqueue_attrs(struct workqueue_attrs *attrs);
440 441
int apply_workqueue_attrs(struct workqueue_struct *wq,
			  const struct workqueue_attrs *attrs);
442
int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
T
Tejun Heo 已提交
443

444
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
445
			struct work_struct *work);
446
extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
447
			struct delayed_work *work, unsigned long delay);
448 449
extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
			struct delayed_work *dwork, unsigned long delay);
450

451
extern void flush_workqueue(struct workqueue_struct *wq);
452
extern void drain_workqueue(struct workqueue_struct *wq);
L
Linus Torvalds 已提交
453

454
extern int schedule_on_each_cpu(work_func_t func);
L
Linus Torvalds 已提交
455

456
int execute_in_process_context(work_func_t fn, struct execute_work *);
L
Linus Torvalds 已提交
457

458
extern bool flush_work(struct work_struct *work);
J
Jens Axboe 已提交
459
extern bool cancel_work(struct work_struct *work);
460 461 462
extern bool cancel_work_sync(struct work_struct *work);

extern bool flush_delayed_work(struct delayed_work *dwork);
463
extern bool cancel_delayed_work(struct delayed_work *dwork);
464
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
465

466 467
extern void workqueue_set_max_active(struct workqueue_struct *wq,
				     int max_active);
468
extern bool current_is_workqueue_rescuer(void);
469
extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
470
extern unsigned int work_busy(struct work_struct *work);
471 472
extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
extern void print_worker_info(const char *log_lvl, struct task_struct *task);
473
extern void show_workqueue_state(void);
474

T
Tejun Heo 已提交
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
/**
 * queue_work - queue work on a workqueue
 * @wq: workqueue to use
 * @work: work to queue
 *
 * Returns %false if @work was already on a queue, %true otherwise.
 *
 * We queue the work to the CPU on which it was submitted, but if the CPU dies
 * it can be processed by another CPU.
 */
static inline bool queue_work(struct workqueue_struct *wq,
			      struct work_struct *work)
{
	return queue_work_on(WORK_CPU_UNBOUND, wq, work);
}

/**
 * queue_delayed_work - queue work on a workqueue after delay
 * @wq: workqueue to use
 * @dwork: delayable work to queue
 * @delay: number of jiffies to wait before queueing
 *
 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
 */
static inline bool queue_delayed_work(struct workqueue_struct *wq,
				      struct delayed_work *dwork,
				      unsigned long delay)
{
	return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
}

/**
 * mod_delayed_work - modify delay of or queue a delayed work
 * @wq: workqueue to use
 * @dwork: work to queue
 * @delay: number of jiffies to wait before queueing
 *
 * mod_delayed_work_on() on local CPU.
 */
static inline bool mod_delayed_work(struct workqueue_struct *wq,
				    struct delayed_work *dwork,
				    unsigned long delay)
{
	return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
}

/**
 * schedule_work_on - put work task on a specific cpu
 * @cpu: cpu to put the work task on
 * @work: job to be done
 *
 * This puts a job on a specific cpu
 */
static inline bool schedule_work_on(int cpu, struct work_struct *work)
{
	return queue_work_on(cpu, system_wq, work);
}

/**
 * schedule_work - put work task in global workqueue
 * @work: job to be done
 *
 * Returns %false if @work was already on the kernel-global workqueue and
 * %true otherwise.
 *
 * This puts a job in the kernel-global workqueue if it was not already
 * queued and leaves it in the same position on the kernel-global
 * workqueue otherwise.
 */
static inline bool schedule_work(struct work_struct *work)
{
	return queue_work(system_wq, work);
}

549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
/**
 * flush_scheduled_work - ensure that any scheduled work has run to completion.
 *
 * Forces execution of the kernel-global workqueue and blocks until its
 * completion.
 *
 * Think twice before calling this function!  It's very easy to get into
 * trouble if you don't take great care.  Either of the following situations
 * will lead to deadlock:
 *
 *	One of the work items currently on the workqueue needs to acquire
 *	a lock held by your code or its caller.
 *
 *	Your code is running in the context of a work routine.
 *
 * They will be detected by lockdep when they occur, but the first might not
 * occur very often.  It depends on what work items are on the workqueue and
 * what locks they need, which you have no control over.
 *
 * In most situations flushing the entire workqueue is overkill; you merely
 * need to know that a particular work item isn't queued and isn't running.
 * In such cases you should use cancel_delayed_work_sync() or
 * cancel_work_sync() instead.
 */
static inline void flush_scheduled_work(void)
{
	flush_workqueue(system_wq);
}

T
Tejun Heo 已提交
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
/**
 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
 * @cpu: cpu to use
 * @dwork: job to be done
 * @delay: number of jiffies to wait
 *
 * After waiting for a given time this puts a job in the kernel-global
 * workqueue on the specified CPU.
 */
static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
					    unsigned long delay)
{
	return queue_delayed_work_on(cpu, system_wq, dwork, delay);
}

/**
 * schedule_delayed_work - put work task in global workqueue after delay
 * @dwork: job to be done
 * @delay: number of jiffies to wait or 0 for immediate execution
 *
 * After waiting for a given time this puts a job in the kernel-global
 * workqueue.
 */
static inline bool schedule_delayed_work(struct delayed_work *dwork,
					 unsigned long delay)
{
	return queue_delayed_work(system_wq, dwork, delay);
}

607
#ifndef CONFIG_SMP
608
static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
609 610 611
{
	return fn(arg);
}
612 613 614 615
static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
{
	return fn(arg);
}
616
#else
617
long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
618
long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
619
#endif /* CONFIG_SMP */
620

621 622 623 624 625 626
#ifdef CONFIG_FREEZER
extern void freeze_workqueues_begin(void);
extern bool freeze_workqueues_busy(void);
extern void thaw_workqueues(void);
#endif /* CONFIG_FREEZER */

627 628 629 630 631 632 633
#ifdef CONFIG_SYSFS
int workqueue_sysfs_register(struct workqueue_struct *wq);
#else	/* CONFIG_SYSFS */
static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
{ return 0; }
#endif	/* CONFIG_SYSFS */

T
Tejun Heo 已提交
634 635 636 637 638 639
#ifdef CONFIG_WQ_WATCHDOG
void wq_watchdog_touch(int cpu);
#else	/* CONFIG_WQ_WATCHDOG */
static inline void wq_watchdog_touch(int cpu) { }
#endif	/* CONFIG_WQ_WATCHDOG */

640 641 642 643 644 645
#ifdef CONFIG_SMP
int workqueue_prepare_cpu(unsigned int cpu);
int workqueue_online_cpu(unsigned int cpu);
int workqueue_offline_cpu(unsigned int cpu);
#endif

646 647 648
int __init workqueue_init_early(void);
int __init workqueue_init(void);

L
Linus Torvalds 已提交
649
#endif