workqueue.c 20.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * linux/kernel/workqueue.c
 *
 * Generic mechanism for defining kernel helper threads for running
 * arbitrary tasks in process context.
 *
 * Started by Ingo Molnar, Copyright (C) 2002
 *
 * Derived from the taskqueue/keventd code by:
 *
 *   David Woodhouse <dwmw2@infradead.org>
 *   Andrew Morton <andrewm@uow.edu.au>
 *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
 *   Theodore Ts'o <tytso@mit.edu>
15 16
 *
 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
L
Linus Torvalds 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
30
#include <linux/hardirq.h>
31
#include <linux/mempolicy.h>
32
#include <linux/freezer.h>
33 34
#include <linux/kallsyms.h>
#include <linux/debug_locks.h>
L
Linus Torvalds 已提交
35 36

/*
37 38
 * The per-CPU workqueue (if single thread, we always use the first
 * possible cpu).
L
Linus Torvalds 已提交
39 40 41 42 43 44 45
 */
struct cpu_workqueue_struct {

	spinlock_t lock;

	struct list_head worklist;
	wait_queue_head_t more_work;
46
	struct work_struct *current_work;
L
Linus Torvalds 已提交
47 48

	struct workqueue_struct *wq;
49
	struct task_struct *thread;
50
	int should_stop;
L
Linus Torvalds 已提交
51 52 53 54 55 56 57 58 59

	int run_depth;		/* Detect run_workqueue() recursion depth */
} ____cacheline_aligned;

/*
 * The externally visible workqueue abstraction is an array of
 * per-CPU workqueues:
 */
struct workqueue_struct {
60
	struct cpu_workqueue_struct *cpu_wq;
61
	struct list_head list;
L
Linus Torvalds 已提交
62
	const char *name;
63
	int singlethread;
64
	int freezeable;		/* Freeze threads during suspend */
L
Linus Torvalds 已提交
65 66 67 68
};

/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
   threads to each one as cpus come/go. */
69
static DEFINE_MUTEX(workqueue_mutex);
L
Linus Torvalds 已提交
70 71
static LIST_HEAD(workqueues);

72
static int singlethread_cpu __read_mostly;
73
static cpumask_t cpu_singlethread_map __read_mostly;
74 75
/* optimization, we could use cpu_possible_map */
static cpumask_t cpu_populated_map __read_mostly;
76

L
Linus Torvalds 已提交
77 78 79
/* If it's single threaded, it isn't in the list of workqueues. */
static inline int is_single_threaded(struct workqueue_struct *wq)
{
80
	return wq->singlethread;
L
Linus Torvalds 已提交
81 82
}

83 84 85 86 87 88
static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
{
	return is_single_threaded(wq)
		? &cpu_singlethread_map : &cpu_populated_map;
}

89 90 91 92
/*
 * Set the workqueue on which a work item is to be run
 * - Must *only* be called if the pending flag is set
 */
93 94
static inline void set_wq_data(struct work_struct *work,
				struct cpu_workqueue_struct *cwq)
95
{
96 97 98
	unsigned long new;

	BUG_ON(!work_pending(work));
99

100
	new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
101 102
	new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
	atomic_long_set(&work->data, new);
103 104
}

105 106
static inline
struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
107
{
108
	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
109 110
}

O
Oleg Nesterov 已提交
111 112 113 114 115 116 117 118 119 120 121
static void insert_work(struct cpu_workqueue_struct *cwq,
				struct work_struct *work, int tail)
{
	set_wq_data(work, cwq);
	if (tail)
		list_add_tail(&work->entry, &cwq->worklist);
	else
		list_add(&work->entry, &cwq->worklist);
	wake_up(&cwq->more_work);
}

L
Linus Torvalds 已提交
122 123 124 125 126 127 128
/* Preempt must be disabled. */
static void __queue_work(struct cpu_workqueue_struct *cwq,
			 struct work_struct *work)
{
	unsigned long flags;

	spin_lock_irqsave(&cwq->lock, flags);
O
Oleg Nesterov 已提交
129
	insert_work(cwq, work, 1);
L
Linus Torvalds 已提交
130 131 132
	spin_unlock_irqrestore(&cwq->lock, flags);
}

133 134 135 136 137
/**
 * queue_work - queue work on a workqueue
 * @wq: workqueue to use
 * @work: work to queue
 *
A
Alan Stern 已提交
138
 * Returns 0 if @work was already on a queue, non-zero otherwise.
L
Linus Torvalds 已提交
139 140 141 142 143 144 145 146
 *
 * We queue the work to the CPU it was submitted, but there is no
 * guarantee that it will be processed by that CPU.
 */
int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
	int ret = 0, cpu = get_cpu();

147
	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
L
Linus Torvalds 已提交
148
		if (unlikely(is_single_threaded(wq)))
149
			cpu = singlethread_cpu;
L
Linus Torvalds 已提交
150
		BUG_ON(!list_empty(&work->entry));
151
		__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
L
Linus Torvalds 已提交
152 153 154 155 156
		ret = 1;
	}
	put_cpu();
	return ret;
}
157
EXPORT_SYMBOL_GPL(queue_work);
L
Linus Torvalds 已提交
158

159
void delayed_work_timer_fn(unsigned long __data)
L
Linus Torvalds 已提交
160
{
161
	struct delayed_work *dwork = (struct delayed_work *)__data;
162 163
	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
	struct workqueue_struct *wq = cwq->wq;
L
Linus Torvalds 已提交
164 165 166
	int cpu = smp_processor_id();

	if (unlikely(is_single_threaded(wq)))
167
		cpu = singlethread_cpu;
L
Linus Torvalds 已提交
168

169
	__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
L
Linus Torvalds 已提交
170 171
}

172 173 174
/**
 * queue_delayed_work - queue work on a workqueue after delay
 * @wq: workqueue to use
175
 * @dwork: delayable work to queue
176 177
 * @delay: number of jiffies to wait before queueing
 *
A
Alan Stern 已提交
178
 * Returns 0 if @work was already on a queue, non-zero otherwise.
179
 */
L
Linus Torvalds 已提交
180
int fastcall queue_delayed_work(struct workqueue_struct *wq,
181
			struct delayed_work *dwork, unsigned long delay)
L
Linus Torvalds 已提交
182 183
{
	int ret = 0;
184 185 186
	struct timer_list *timer = &dwork->timer;
	struct work_struct *work = &dwork->work;

187
	timer_stats_timer_set_start_info(timer);
188 189
	if (delay == 0)
		return queue_work(wq, work);
L
Linus Torvalds 已提交
190

191
	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
L
Linus Torvalds 已提交
192 193 194
		BUG_ON(timer_pending(timer));
		BUG_ON(!list_empty(&work->entry));

195 196 197
		/* This stores cwq for the moment, for the timer_fn */
		set_wq_data(work,
			per_cpu_ptr(wq->cpu_wq, raw_smp_processor_id()));
L
Linus Torvalds 已提交
198
		timer->expires = jiffies + delay;
199
		timer->data = (unsigned long)dwork;
L
Linus Torvalds 已提交
200 201 202 203 204 205
		timer->function = delayed_work_timer_fn;
		add_timer(timer);
		ret = 1;
	}
	return ret;
}
206
EXPORT_SYMBOL_GPL(queue_delayed_work);
L
Linus Torvalds 已提交
207

208 209 210 211
/**
 * queue_delayed_work_on - queue work on specific CPU after delay
 * @cpu: CPU number to execute work on
 * @wq: workqueue to use
212
 * @dwork: work to queue
213 214
 * @delay: number of jiffies to wait before queueing
 *
A
Alan Stern 已提交
215
 * Returns 0 if @work was already on a queue, non-zero otherwise.
216
 */
217
int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
218
			struct delayed_work *dwork, unsigned long delay)
219 220
{
	int ret = 0;
221 222
	struct timer_list *timer = &dwork->timer;
	struct work_struct *work = &dwork->work;
223

224
	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
225 226 227
		BUG_ON(timer_pending(timer));
		BUG_ON(!list_empty(&work->entry));

228 229 230
		/* This stores cwq for the moment, for the timer_fn */
		set_wq_data(work,
			per_cpu_ptr(wq->cpu_wq, raw_smp_processor_id()));
231
		timer->expires = jiffies + delay;
232
		timer->data = (unsigned long)dwork;
233 234 235 236 237 238
		timer->function = delayed_work_timer_fn;
		add_timer_on(timer, cpu);
		ret = 1;
	}
	return ret;
}
239
EXPORT_SYMBOL_GPL(queue_delayed_work_on);
L
Linus Torvalds 已提交
240

241
static void run_workqueue(struct cpu_workqueue_struct *cwq)
L
Linus Torvalds 已提交
242
{
243
	spin_lock_irq(&cwq->lock);
L
Linus Torvalds 已提交
244 245 246 247 248 249 250 251 252 253
	cwq->run_depth++;
	if (cwq->run_depth > 3) {
		/* morton gets to eat his hat */
		printk("%s: recursion depth exceeded: %d\n",
			__FUNCTION__, cwq->run_depth);
		dump_stack();
	}
	while (!list_empty(&cwq->worklist)) {
		struct work_struct *work = list_entry(cwq->worklist.next,
						struct work_struct, entry);
254
		work_func_t f = work->func;
L
Linus Torvalds 已提交
255

O
Oleg Nesterov 已提交
256
		cwq->current_work = work;
L
Linus Torvalds 已提交
257
		list_del_init(cwq->worklist.next);
258
		spin_unlock_irq(&cwq->lock);
L
Linus Torvalds 已提交
259

260
		BUG_ON(get_wq_data(work) != cwq);
261
		if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
262 263
			work_release(work);
		f(work);
L
Linus Torvalds 已提交
264

265 266 267 268 269 270 271 272 273 274 275
		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
					"%s/0x%08x/%d\n",
					current->comm, preempt_count(),
				       	current->pid);
			printk(KERN_ERR "    last function: ");
			print_symbol("%s\n", (unsigned long)f);
			debug_show_held_locks(current);
			dump_stack();
		}

276
		spin_lock_irq(&cwq->lock);
O
Oleg Nesterov 已提交
277
		cwq->current_work = NULL;
L
Linus Torvalds 已提交
278 279
	}
	cwq->run_depth--;
280
	spin_unlock_irq(&cwq->lock);
L
Linus Torvalds 已提交
281 282
}

283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
/*
 * NOTE: the caller must not touch *cwq if this func returns true
 */
static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
{
	int should_stop = cwq->should_stop;

	if (unlikely(should_stop)) {
		spin_lock_irq(&cwq->lock);
		should_stop = cwq->should_stop && list_empty(&cwq->worklist);
		if (should_stop)
			cwq->thread = NULL;
		spin_unlock_irq(&cwq->lock);
	}

	return should_stop;
}

L
Linus Torvalds 已提交
301 302 303
static int worker_thread(void *__cwq)
{
	struct cpu_workqueue_struct *cwq = __cwq;
304
	DEFINE_WAIT(wait);
L
Linus Torvalds 已提交
305 306 307
	struct k_sigaction sa;
	sigset_t blocked;

308
	if (!cwq->wq->freezeable)
309
		current->flags |= PF_NOFREEZE;
L
Linus Torvalds 已提交
310 311 312 313 314 315 316 317

	set_user_nice(current, -5);

	/* Block and flush all signals */
	sigfillset(&blocked);
	sigprocmask(SIG_BLOCK, &blocked, NULL);
	flush_signals(current);

318 319 320 321 322 323
	/*
	 * We inherited MPOL_INTERLEAVE from the booting kernel.
	 * Set MPOL_DEFAULT to insure node local allocations.
	 */
	numa_default_policy();

L
Linus Torvalds 已提交
324 325 326 327 328 329
	/* SIG_IGN makes children autoreap: see do_notify_parent(). */
	sa.sa.sa_handler = SIG_IGN;
	sa.sa.sa_flags = 0;
	siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
	do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);

330
	for (;;) {
331
		if (cwq->wq->freezeable)
332 333
			try_to_freeze();

334 335
		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
		if (!cwq->should_stop && list_empty(&cwq->worklist))
L
Linus Torvalds 已提交
336
			schedule();
337 338 339 340
		finish_wait(&cwq->more_work, &wait);

		if (cwq_should_stop(cwq))
			break;
L
Linus Torvalds 已提交
341

342
		run_workqueue(cwq);
L
Linus Torvalds 已提交
343
	}
344

L
Linus Torvalds 已提交
345 346 347
	return 0;
}

O
Oleg Nesterov 已提交
348 349 350 351 352 353 354 355 356 357 358
struct wq_barrier {
	struct work_struct	work;
	struct completion	done;
};

static void wq_barrier_func(struct work_struct *work)
{
	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
	complete(&barr->done);
}

359 360
static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
					struct wq_barrier *barr, int tail)
O
Oleg Nesterov 已提交
361 362 363 364 365
{
	INIT_WORK(&barr->work, wq_barrier_func);
	__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));

	init_completion(&barr->done);
366 367

	insert_work(cwq, &barr->work, tail);
O
Oleg Nesterov 已提交
368 369
}

L
Linus Torvalds 已提交
370 371 372 373 374 375 376 377 378
static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
{
	if (cwq->thread == current) {
		/*
		 * Probably keventd trying to flush its own queue. So simply run
		 * it by hand rather than deadlocking.
		 */
		run_workqueue(cwq);
	} else {
O
Oleg Nesterov 已提交
379
		struct wq_barrier barr;
380
		int active = 0;
L
Linus Torvalds 已提交
381

382 383 384 385 386 387
		spin_lock_irq(&cwq->lock);
		if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
			insert_wq_barrier(cwq, &barr, 1);
			active = 1;
		}
		spin_unlock_irq(&cwq->lock);
L
Linus Torvalds 已提交
388

389
		if (active)
390
			wait_for_completion(&barr.done);
L
Linus Torvalds 已提交
391 392 393
	}
}

394
/**
L
Linus Torvalds 已提交
395
 * flush_workqueue - ensure that any scheduled work has run to completion.
396
 * @wq: workqueue to flush
L
Linus Torvalds 已提交
397 398 399 400
 *
 * Forces execution of the workqueue and blocks until its completion.
 * This is typically used in driver shutdown handlers.
 *
O
Oleg Nesterov 已提交
401 402
 * We sleep until all works which were queued on entry have been handled,
 * but we are not livelocked by new incoming ones.
L
Linus Torvalds 已提交
403 404 405 406 407 408
 *
 * This function used to run the workqueues itself.  Now we just wait for the
 * helper threads to do it.
 */
void fastcall flush_workqueue(struct workqueue_struct *wq)
{
409
	const cpumask_t *cpu_map = wq_cpu_map(wq);
410
	int cpu;
L
Linus Torvalds 已提交
411

412 413 414
	might_sleep();
	for_each_cpu_mask(cpu, *cpu_map)
		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
L
Linus Torvalds 已提交
415
}
416
EXPORT_SYMBOL_GPL(flush_workqueue);
L
Linus Torvalds 已提交
417

O
Oleg Nesterov 已提交
418 419 420 421 422 423 424 425
static void wait_on_work(struct cpu_workqueue_struct *cwq,
				struct work_struct *work)
{
	struct wq_barrier barr;
	int running = 0;

	spin_lock_irq(&cwq->lock);
	if (unlikely(cwq->current_work == work)) {
426
		insert_wq_barrier(cwq, &barr, 0);
O
Oleg Nesterov 已提交
427 428 429 430
		running = 1;
	}
	spin_unlock_irq(&cwq->lock);

431
	if (unlikely(running))
O
Oleg Nesterov 已提交
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
		wait_for_completion(&barr.done);
}

/**
 * flush_work - block until a work_struct's callback has terminated
 * @wq: the workqueue on which the work is queued
 * @work: the work which is to be flushed
 *
 * flush_work() will attempt to cancel the work if it is queued.  If the work's
 * callback appears to be running, flush_work() will block until it has
 * completed.
 *
 * flush_work() is designed to be used when the caller is tearing down data
 * structures which the callback function operates upon.  It is expected that,
 * prior to calling flush_work(), the caller has arranged for the work to not
 * be requeued.
 */
void flush_work(struct workqueue_struct *wq, struct work_struct *work)
{
451
	const cpumask_t *cpu_map = wq_cpu_map(wq);
O
Oleg Nesterov 已提交
452
	struct cpu_workqueue_struct *cwq;
453
	int cpu;
O
Oleg Nesterov 已提交
454

455 456
	might_sleep();

O
Oleg Nesterov 已提交
457 458 459
	cwq = get_wq_data(work);
	/* Was it ever queued ? */
	if (!cwq)
460
		return;
O
Oleg Nesterov 已提交
461 462

	/*
463 464
	 * This work can't be re-queued, no need to re-check that
	 * get_wq_data() is still the same when we take cwq->lock.
O
Oleg Nesterov 已提交
465 466 467 468 469 470
	 */
	spin_lock_irq(&cwq->lock);
	list_del_init(&work->entry);
	work_release(work);
	spin_unlock_irq(&cwq->lock);

471 472
	for_each_cpu_mask(cpu, *cpu_map)
		wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
O
Oleg Nesterov 已提交
473 474 475
}
EXPORT_SYMBOL_GPL(flush_work);

L
Linus Torvalds 已提交
476 477 478

static struct workqueue_struct *keventd_wq;

479 480 481 482 483 484
/**
 * schedule_work - put work task in global workqueue
 * @work: job to be done
 *
 * This puts a job in the kernel-global workqueue.
 */
L
Linus Torvalds 已提交
485 486 487 488
int fastcall schedule_work(struct work_struct *work)
{
	return queue_work(keventd_wq, work);
}
489
EXPORT_SYMBOL(schedule_work);
L
Linus Torvalds 已提交
490

491 492
/**
 * schedule_delayed_work - put work task in global workqueue after delay
493 494
 * @dwork: job to be done
 * @delay: number of jiffies to wait or 0 for immediate execution
495 496 497 498
 *
 * After waiting for a given time this puts a job in the kernel-global
 * workqueue.
 */
499 500
int fastcall schedule_delayed_work(struct delayed_work *dwork,
					unsigned long delay)
L
Linus Torvalds 已提交
501
{
502
	timer_stats_timer_set_start_info(&dwork->timer);
503
	return queue_delayed_work(keventd_wq, dwork, delay);
L
Linus Torvalds 已提交
504
}
505
EXPORT_SYMBOL(schedule_delayed_work);
L
Linus Torvalds 已提交
506

507 508 509
/**
 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
 * @cpu: cpu to use
510
 * @dwork: job to be done
511 512 513 514 515
 * @delay: number of jiffies to wait
 *
 * After waiting for a given time this puts a job in the kernel-global
 * workqueue on the specified CPU.
 */
L
Linus Torvalds 已提交
516
int schedule_delayed_work_on(int cpu,
517
			struct delayed_work *dwork, unsigned long delay)
L
Linus Torvalds 已提交
518
{
519
	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
L
Linus Torvalds 已提交
520
}
521
EXPORT_SYMBOL(schedule_delayed_work_on);
L
Linus Torvalds 已提交
522

523 524 525 526 527 528 529 530 531 532 533
/**
 * schedule_on_each_cpu - call a function on each online CPU from keventd
 * @func: the function to call
 *
 * Returns zero on success.
 * Returns -ve errno on failure.
 *
 * Appears to be racy against CPU hotplug.
 *
 * schedule_on_each_cpu() is very slow.
 */
534
int schedule_on_each_cpu(work_func_t func)
535 536
{
	int cpu;
537
	struct work_struct *works;
538

539 540
	works = alloc_percpu(struct work_struct);
	if (!works)
541
		return -ENOMEM;
542

543
	preempt_disable();		/* CPU hotplug */
544
	for_each_online_cpu(cpu) {
545 546 547 548 549
		struct work_struct *work = per_cpu_ptr(works, cpu);

		INIT_WORK(work, func);
		set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
		__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
550
	}
551
	preempt_enable();
552
	flush_workqueue(keventd_wq);
553
	free_percpu(works);
554 555 556
	return 0;
}

L
Linus Torvalds 已提交
557 558 559 560
void flush_scheduled_work(void)
{
	flush_workqueue(keventd_wq);
}
561
EXPORT_SYMBOL(flush_scheduled_work);
L
Linus Torvalds 已提交
562

O
Oleg Nesterov 已提交
563 564 565 566 567 568
void flush_work_keventd(struct work_struct *work)
{
	flush_work(keventd_wq, work);
}
EXPORT_SYMBOL(flush_work_keventd);

L
Linus Torvalds 已提交
569
/**
570
 * cancel_rearming_delayed_workqueue - kill off a delayed work whose handler rearms the delayed work.
L
Linus Torvalds 已提交
571
 * @wq:   the controlling workqueue structure
572
 * @dwork: the delayed work struct
573 574 575
 *
 * Note that the work callback function may still be running on return from
 * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it.
L
Linus Torvalds 已提交
576
 */
577
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
578
				       struct delayed_work *dwork)
L
Linus Torvalds 已提交
579
{
580 581 582 583
	/* Was it ever queued ? */
	if (!get_wq_data(&dwork->work))
		return;

584
	while (!cancel_delayed_work(dwork))
L
Linus Torvalds 已提交
585 586
		flush_workqueue(wq);
}
587
EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
L
Linus Torvalds 已提交
588 589

/**
590
 * cancel_rearming_delayed_work - kill off a delayed keventd work whose handler rearms the delayed work.
591
 * @dwork: the delayed work struct
L
Linus Torvalds 已提交
592
 */
593
void cancel_rearming_delayed_work(struct delayed_work *dwork)
L
Linus Torvalds 已提交
594
{
595
	cancel_rearming_delayed_workqueue(keventd_wq, dwork);
L
Linus Torvalds 已提交
596 597 598
}
EXPORT_SYMBOL(cancel_rearming_delayed_work);

599 600 601 602 603 604 605 606 607 608 609 610
/**
 * execute_in_process_context - reliably execute the routine with user context
 * @fn:		the function to execute
 * @ew:		guaranteed storage for the execute work structure (must
 *		be available when the work executes)
 *
 * Executes the function immediately if process context is available,
 * otherwise schedules the function for delayed execution.
 *
 * Returns:	0 - function was executed
 *		1 - function was scheduled for execution
 */
611
int execute_in_process_context(work_func_t fn, struct execute_work *ew)
612 613
{
	if (!in_interrupt()) {
614
		fn(&ew->work);
615 616 617
		return 0;
	}

618
	INIT_WORK(&ew->work, fn);
619 620 621 622 623 624
	schedule_work(&ew->work);

	return 1;
}
EXPORT_SYMBOL_GPL(execute_in_process_context);

L
Linus Torvalds 已提交
625 626 627 628 629 630 631 632 633 634 635 636 637
int keventd_up(void)
{
	return keventd_wq != NULL;
}

int current_is_keventd(void)
{
	struct cpu_workqueue_struct *cwq;
	int cpu = smp_processor_id();	/* preempt-safe: keventd is per-cpu */
	int ret = 0;

	BUG_ON(!keventd_wq);

638
	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
L
Linus Torvalds 已提交
639 640 641 642 643 644 645
	if (current == cwq->thread)
		ret = 1;

	return ret;

}

646 647
static struct cpu_workqueue_struct *
init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
L
Linus Torvalds 已提交
648
{
649
	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
L
Linus Torvalds 已提交
650

651 652 653 654 655 656
	cwq->wq = wq;
	spin_lock_init(&cwq->lock);
	INIT_LIST_HEAD(&cwq->worklist);
	init_waitqueue_head(&cwq->more_work);

	return cwq;
L
Linus Torvalds 已提交
657 658
}

659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
	struct workqueue_struct *wq = cwq->wq;
	const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
	struct task_struct *p;

	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
	/*
	 * Nobody can add the work_struct to this cwq,
	 *	if (caller is __create_workqueue)
	 *		nobody should see this wq
	 *	else // caller is CPU_UP_PREPARE
	 *		cpu is not on cpu_online_map
	 * so we can abort safely.
	 */
	if (IS_ERR(p))
		return PTR_ERR(p);

	cwq->thread = p;
	cwq->should_stop = 0;

	return 0;
}

683 684 685 686 687 688 689 690 691 692 693
static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
	struct task_struct *p = cwq->thread;

	if (p != NULL) {
		if (cpu >= 0)
			kthread_bind(p, cpu);
		wake_up_process(p);
	}
}

694 695
struct workqueue_struct *__create_workqueue(const char *name,
					    int singlethread, int freezeable)
L
Linus Torvalds 已提交
696 697
{
	struct workqueue_struct *wq;
698 699
	struct cpu_workqueue_struct *cwq;
	int err = 0, cpu;
L
Linus Torvalds 已提交
700

701 702 703 704 705 706 707 708 709 710 711
	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
	if (!wq)
		return NULL;

	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
	if (!wq->cpu_wq) {
		kfree(wq);
		return NULL;
	}

	wq->name = name;
712
	wq->singlethread = singlethread;
713
	wq->freezeable = freezeable;
714
	INIT_LIST_HEAD(&wq->list);
715 716 717 718

	if (singlethread) {
		cwq = init_cpu_workqueue(wq, singlethread_cpu);
		err = create_workqueue_thread(cwq, singlethread_cpu);
719
		start_workqueue_thread(cwq, -1);
720
	} else {
721
		mutex_lock(&workqueue_mutex);
722 723 724 725 726 727 728
		list_add(&wq->list, &workqueues);

		for_each_possible_cpu(cpu) {
			cwq = init_cpu_workqueue(wq, cpu);
			if (err || !cpu_online(cpu))
				continue;
			err = create_workqueue_thread(cwq, cpu);
729
			start_workqueue_thread(cwq, cpu);
L
Linus Torvalds 已提交
730
		}
731 732 733 734 735 736 737 738 739 740
		mutex_unlock(&workqueue_mutex);
	}

	if (err) {
		destroy_workqueue(wq);
		wq = NULL;
	}
	return wq;
}
EXPORT_SYMBOL_GPL(__create_workqueue);
L
Linus Torvalds 已提交
741

742 743 744 745
static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
	struct wq_barrier barr;
	int alive = 0;
746

747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
	spin_lock_irq(&cwq->lock);
	if (cwq->thread != NULL) {
		insert_wq_barrier(cwq, &barr, 1);
		cwq->should_stop = 1;
		alive = 1;
	}
	spin_unlock_irq(&cwq->lock);

	if (alive) {
		wait_for_completion(&barr.done);

		while (unlikely(cwq->thread != NULL))
			cpu_relax();
		/*
		 * Wait until cwq->thread unlocks cwq->lock,
		 * it won't touch *cwq after that.
		 */
		smp_rmb();
		spin_unlock_wait(&cwq->lock);
	}
}

/**
 * destroy_workqueue - safely terminate a workqueue
 * @wq: target workqueue
 *
 * Safely destroy a workqueue. All work currently pending will be done first.
 */
void destroy_workqueue(struct workqueue_struct *wq)
{
777
	const cpumask_t *cpu_map = wq_cpu_map(wq);
778
	struct cpu_workqueue_struct *cwq;
779
	int cpu;
780

781 782 783
	mutex_lock(&workqueue_mutex);
	list_del(&wq->list);
	mutex_unlock(&workqueue_mutex);
784

785 786 787
	for_each_cpu_mask(cpu, *cpu_map) {
		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
		cleanup_workqueue_thread(cwq, cpu);
788
	}
789

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
	free_percpu(wq->cpu_wq);
	kfree(wq);
}
EXPORT_SYMBOL_GPL(destroy_workqueue);

static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
						unsigned long action,
						void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;
	struct cpu_workqueue_struct *cwq;
	struct workqueue_struct *wq;

	switch (action) {
	case CPU_LOCK_ACQUIRE:
805
		mutex_lock(&workqueue_mutex);
806
		return NOTIFY_OK;
807

808
	case CPU_LOCK_RELEASE:
809
		mutex_unlock(&workqueue_mutex);
810
		return NOTIFY_OK;
L
Linus Torvalds 已提交
811

812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
	case CPU_UP_PREPARE:
		cpu_set(cpu, cpu_populated_map);
	}

	list_for_each_entry(wq, &workqueues, list) {
		cwq = per_cpu_ptr(wq->cpu_wq, cpu);

		switch (action) {
		case CPU_UP_PREPARE:
			if (!create_workqueue_thread(cwq, cpu))
				break;
			printk(KERN_ERR "workqueue for %i failed\n", cpu);
			return NOTIFY_BAD;

		case CPU_ONLINE:
827
			start_workqueue_thread(cwq, cpu);
828 829 830
			break;

		case CPU_UP_CANCELED:
831
			start_workqueue_thread(cwq, -1);
832 833 834 835
		case CPU_DEAD:
			cleanup_workqueue_thread(cwq, cpu);
			break;
		}
L
Linus Torvalds 已提交
836 837 838 839 840
	}

	return NOTIFY_OK;
}

841
void __init init_workqueues(void)
L
Linus Torvalds 已提交
842
{
843
	cpu_populated_map = cpu_online_map;
844
	singlethread_cpu = first_cpu(cpu_possible_map);
845
	cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
L
Linus Torvalds 已提交
846 847 848 849
	hotcpu_notifier(workqueue_cpu_callback, 0);
	keventd_wq = create_workqueue("events");
	BUG_ON(!keventd_wq);
}