workqueue.c 20.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * linux/kernel/workqueue.c
 *
 * Generic mechanism for defining kernel helper threads for running
 * arbitrary tasks in process context.
 *
 * Started by Ingo Molnar, Copyright (C) 2002
 *
 * Derived from the taskqueue/keventd code by:
 *
 *   David Woodhouse <dwmw2@infradead.org>
 *   Andrew Morton <andrewm@uow.edu.au>
 *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
 *   Theodore Ts'o <tytso@mit.edu>
15 16
 *
 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
L
Linus Torvalds 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
30
#include <linux/hardirq.h>
31
#include <linux/mempolicy.h>
32
#include <linux/freezer.h>
33 34
#include <linux/kallsyms.h>
#include <linux/debug_locks.h>
L
Linus Torvalds 已提交
35 36

/*
37 38
 * The per-CPU workqueue (if single thread, we always use the first
 * possible cpu).
L
Linus Torvalds 已提交
39 40 41 42 43 44 45
 */
struct cpu_workqueue_struct {

	spinlock_t lock;

	struct list_head worklist;
	wait_queue_head_t more_work;
46
	struct work_struct *current_work;
L
Linus Torvalds 已提交
47 48

	struct workqueue_struct *wq;
49
	struct task_struct *thread;
L
Linus Torvalds 已提交
50 51 52 53 54 55 56 57 58

	int run_depth;		/* Detect run_workqueue() recursion depth */
} ____cacheline_aligned;

/*
 * The externally visible workqueue abstraction is an array of
 * per-CPU workqueues:
 */
struct workqueue_struct {
59
	struct cpu_workqueue_struct *cpu_wq;
60
	struct list_head list;
L
Linus Torvalds 已提交
61
	const char *name;
62
	int singlethread;
63
	int freezeable;		/* Freeze threads during suspend */
L
Linus Torvalds 已提交
64 65 66 67
};

/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
   threads to each one as cpus come/go. */
68
static DEFINE_MUTEX(workqueue_mutex);
L
Linus Torvalds 已提交
69 70
static LIST_HEAD(workqueues);

71
static int singlethread_cpu __read_mostly;
72
static cpumask_t cpu_singlethread_map __read_mostly;
73 74 75 76 77 78 79
/*
 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
 * which comes in between can't use for_each_online_cpu(). We could
 * use cpu_possible_map, the cpumask below is more a documentation
 * than optimization.
 */
80
static cpumask_t cpu_populated_map __read_mostly;
81

L
Linus Torvalds 已提交
82 83 84
/* If it's single threaded, it isn't in the list of workqueues. */
static inline int is_single_threaded(struct workqueue_struct *wq)
{
85
	return wq->singlethread;
L
Linus Torvalds 已提交
86 87
}

88 89 90 91 92 93
static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
{
	return is_single_threaded(wq)
		? &cpu_singlethread_map : &cpu_populated_map;
}

94 95 96 97 98 99 100 101
static
struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
{
	if (unlikely(is_single_threaded(wq)))
		cpu = singlethread_cpu;
	return per_cpu_ptr(wq->cpu_wq, cpu);
}

102 103 104 105
/*
 * Set the workqueue on which a work item is to be run
 * - Must *only* be called if the pending flag is set
 */
106 107
static inline void set_wq_data(struct work_struct *work,
				struct cpu_workqueue_struct *cwq)
108
{
109 110 111
	unsigned long new;

	BUG_ON(!work_pending(work));
112

113
	new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
114 115
	new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
	atomic_long_set(&work->data, new);
116 117
}

118 119
static inline
struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
120
{
121
	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
122 123
}

O
Oleg Nesterov 已提交
124 125 126 127
static void insert_work(struct cpu_workqueue_struct *cwq,
				struct work_struct *work, int tail)
{
	set_wq_data(work, cwq);
128 129 130 131 132
	/*
	 * Ensure that we get the right work->data if we see the
	 * result of list_add() below, see try_to_grab_pending().
	 */
	smp_wmb();
O
Oleg Nesterov 已提交
133 134 135 136 137 138 139
	if (tail)
		list_add_tail(&work->entry, &cwq->worklist);
	else
		list_add(&work->entry, &cwq->worklist);
	wake_up(&cwq->more_work);
}

L
Linus Torvalds 已提交
140 141 142 143 144 145 146
/* Preempt must be disabled. */
static void __queue_work(struct cpu_workqueue_struct *cwq,
			 struct work_struct *work)
{
	unsigned long flags;

	spin_lock_irqsave(&cwq->lock, flags);
O
Oleg Nesterov 已提交
147
	insert_work(cwq, work, 1);
L
Linus Torvalds 已提交
148 149 150
	spin_unlock_irqrestore(&cwq->lock, flags);
}

151 152 153 154 155
/**
 * queue_work - queue work on a workqueue
 * @wq: workqueue to use
 * @work: work to queue
 *
A
Alan Stern 已提交
156
 * Returns 0 if @work was already on a queue, non-zero otherwise.
L
Linus Torvalds 已提交
157 158 159 160 161 162
 *
 * We queue the work to the CPU it was submitted, but there is no
 * guarantee that it will be processed by that CPU.
 */
int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
163
	int ret = 0;
L
Linus Torvalds 已提交
164

165
	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
L
Linus Torvalds 已提交
166
		BUG_ON(!list_empty(&work->entry));
167 168
		__queue_work(wq_per_cpu(wq, get_cpu()), work);
		put_cpu();
L
Linus Torvalds 已提交
169 170 171 172
		ret = 1;
	}
	return ret;
}
173
EXPORT_SYMBOL_GPL(queue_work);
L
Linus Torvalds 已提交
174

175
void delayed_work_timer_fn(unsigned long __data)
L
Linus Torvalds 已提交
176
{
177
	struct delayed_work *dwork = (struct delayed_work *)__data;
178 179
	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
	struct workqueue_struct *wq = cwq->wq;
L
Linus Torvalds 已提交
180

181
	__queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
L
Linus Torvalds 已提交
182 183
}

184 185 186
/**
 * queue_delayed_work - queue work on a workqueue after delay
 * @wq: workqueue to use
187
 * @dwork: delayable work to queue
188 189
 * @delay: number of jiffies to wait before queueing
 *
A
Alan Stern 已提交
190
 * Returns 0 if @work was already on a queue, non-zero otherwise.
191
 */
L
Linus Torvalds 已提交
192
int fastcall queue_delayed_work(struct workqueue_struct *wq,
193
			struct delayed_work *dwork, unsigned long delay)
L
Linus Torvalds 已提交
194
{
195
	timer_stats_timer_set_start_info(&dwork->timer);
196
	if (delay == 0)
197
		return queue_work(wq, &dwork->work);
L
Linus Torvalds 已提交
198

199
	return queue_delayed_work_on(-1, wq, dwork, delay);
L
Linus Torvalds 已提交
200
}
201
EXPORT_SYMBOL_GPL(queue_delayed_work);
L
Linus Torvalds 已提交
202

203 204 205 206
/**
 * queue_delayed_work_on - queue work on specific CPU after delay
 * @cpu: CPU number to execute work on
 * @wq: workqueue to use
207
 * @dwork: work to queue
208 209
 * @delay: number of jiffies to wait before queueing
 *
A
Alan Stern 已提交
210
 * Returns 0 if @work was already on a queue, non-zero otherwise.
211
 */
212
int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
213
			struct delayed_work *dwork, unsigned long delay)
214 215
{
	int ret = 0;
216 217
	struct timer_list *timer = &dwork->timer;
	struct work_struct *work = &dwork->work;
218

219
	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
220 221 222
		BUG_ON(timer_pending(timer));
		BUG_ON(!list_empty(&work->entry));

223
		/* This stores cwq for the moment, for the timer_fn */
224
		set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
225
		timer->expires = jiffies + delay;
226
		timer->data = (unsigned long)dwork;
227
		timer->function = delayed_work_timer_fn;
228 229 230 231 232

		if (unlikely(cpu >= 0))
			add_timer_on(timer, cpu);
		else
			add_timer(timer);
233 234 235 236
		ret = 1;
	}
	return ret;
}
237
EXPORT_SYMBOL_GPL(queue_delayed_work_on);
L
Linus Torvalds 已提交
238

239
static void run_workqueue(struct cpu_workqueue_struct *cwq)
L
Linus Torvalds 已提交
240
{
241
	spin_lock_irq(&cwq->lock);
L
Linus Torvalds 已提交
242 243 244 245 246 247 248 249 250 251
	cwq->run_depth++;
	if (cwq->run_depth > 3) {
		/* morton gets to eat his hat */
		printk("%s: recursion depth exceeded: %d\n",
			__FUNCTION__, cwq->run_depth);
		dump_stack();
	}
	while (!list_empty(&cwq->worklist)) {
		struct work_struct *work = list_entry(cwq->worklist.next,
						struct work_struct, entry);
252
		work_func_t f = work->func;
L
Linus Torvalds 已提交
253

O
Oleg Nesterov 已提交
254
		cwq->current_work = work;
L
Linus Torvalds 已提交
255
		list_del_init(cwq->worklist.next);
256
		spin_unlock_irq(&cwq->lock);
L
Linus Torvalds 已提交
257

258
		BUG_ON(get_wq_data(work) != cwq);
O
Oleg Nesterov 已提交
259
		work_clear_pending(work);
260
		f(work);
L
Linus Torvalds 已提交
261

262 263 264 265 266 267 268 269 270 271 272
		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
					"%s/0x%08x/%d\n",
					current->comm, preempt_count(),
				       	current->pid);
			printk(KERN_ERR "    last function: ");
			print_symbol("%s\n", (unsigned long)f);
			debug_show_held_locks(current);
			dump_stack();
		}

273
		spin_lock_irq(&cwq->lock);
O
Oleg Nesterov 已提交
274
		cwq->current_work = NULL;
L
Linus Torvalds 已提交
275 276
	}
	cwq->run_depth--;
277
	spin_unlock_irq(&cwq->lock);
L
Linus Torvalds 已提交
278 279 280 281 282
}

static int worker_thread(void *__cwq)
{
	struct cpu_workqueue_struct *cwq = __cwq;
283
	DEFINE_WAIT(wait);
L
Linus Torvalds 已提交
284

285 286
	if (cwq->wq->freezeable)
		set_freezable();
L
Linus Torvalds 已提交
287 288 289

	set_user_nice(current, -5);

290 291
	for (;;) {
		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
292 293 294
		if (!freezing(current) &&
		    !kthread_should_stop() &&
		    list_empty(&cwq->worklist))
L
Linus Torvalds 已提交
295
			schedule();
296 297
		finish_wait(&cwq->more_work, &wait);

298 299
		try_to_freeze();

300
		if (kthread_should_stop())
301
			break;
L
Linus Torvalds 已提交
302

303
		run_workqueue(cwq);
L
Linus Torvalds 已提交
304
	}
305

L
Linus Torvalds 已提交
306 307 308
	return 0;
}

O
Oleg Nesterov 已提交
309 310 311 312 313 314 315 316 317 318 319
struct wq_barrier {
	struct work_struct	work;
	struct completion	done;
};

static void wq_barrier_func(struct work_struct *work)
{
	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
	complete(&barr->done);
}

320 321
static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
					struct wq_barrier *barr, int tail)
O
Oleg Nesterov 已提交
322 323 324 325 326
{
	INIT_WORK(&barr->work, wq_barrier_func);
	__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));

	init_completion(&barr->done);
327 328

	insert_work(cwq, &barr->work, tail);
O
Oleg Nesterov 已提交
329 330
}

331
static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
L
Linus Torvalds 已提交
332
{
333 334
	int active;

L
Linus Torvalds 已提交
335 336 337 338 339 340
	if (cwq->thread == current) {
		/*
		 * Probably keventd trying to flush its own queue. So simply run
		 * it by hand rather than deadlocking.
		 */
		run_workqueue(cwq);
341
		active = 1;
L
Linus Torvalds 已提交
342
	} else {
O
Oleg Nesterov 已提交
343
		struct wq_barrier barr;
L
Linus Torvalds 已提交
344

345
		active = 0;
346 347 348 349 350 351
		spin_lock_irq(&cwq->lock);
		if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
			insert_wq_barrier(cwq, &barr, 1);
			active = 1;
		}
		spin_unlock_irq(&cwq->lock);
L
Linus Torvalds 已提交
352

353
		if (active)
354
			wait_for_completion(&barr.done);
L
Linus Torvalds 已提交
355
	}
356 357

	return active;
L
Linus Torvalds 已提交
358 359
}

360
/**
L
Linus Torvalds 已提交
361
 * flush_workqueue - ensure that any scheduled work has run to completion.
362
 * @wq: workqueue to flush
L
Linus Torvalds 已提交
363 364 365 366
 *
 * Forces execution of the workqueue and blocks until its completion.
 * This is typically used in driver shutdown handlers.
 *
O
Oleg Nesterov 已提交
367 368
 * We sleep until all works which were queued on entry have been handled,
 * but we are not livelocked by new incoming ones.
L
Linus Torvalds 已提交
369 370 371 372 373 374
 *
 * This function used to run the workqueues itself.  Now we just wait for the
 * helper threads to do it.
 */
void fastcall flush_workqueue(struct workqueue_struct *wq)
{
375
	const cpumask_t *cpu_map = wq_cpu_map(wq);
376
	int cpu;
L
Linus Torvalds 已提交
377

378 379 380
	might_sleep();
	for_each_cpu_mask(cpu, *cpu_map)
		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
L
Linus Torvalds 已提交
381
}
382
EXPORT_SYMBOL_GPL(flush_workqueue);
L
Linus Torvalds 已提交
383

384
/*
385
 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
386 387 388 389 390
 * so this work can't be re-armed in any way.
 */
static int try_to_grab_pending(struct work_struct *work)
{
	struct cpu_workqueue_struct *cwq;
391
	int ret = -1;
392 393

	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
394
		return 0;
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423

	/*
	 * The queueing is in progress, or it is already queued. Try to
	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
	 */

	cwq = get_wq_data(work);
	if (!cwq)
		return ret;

	spin_lock_irq(&cwq->lock);
	if (!list_empty(&work->entry)) {
		/*
		 * This work is queued, but perhaps we locked the wrong cwq.
		 * In that case we must see the new value after rmb(), see
		 * insert_work()->wmb().
		 */
		smp_rmb();
		if (cwq == get_wq_data(work)) {
			list_del_init(&work->entry);
			ret = 1;
		}
	}
	spin_unlock_irq(&cwq->lock);

	return ret;
}

static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
O
Oleg Nesterov 已提交
424 425 426 427 428 429 430
				struct work_struct *work)
{
	struct wq_barrier barr;
	int running = 0;

	spin_lock_irq(&cwq->lock);
	if (unlikely(cwq->current_work == work)) {
431
		insert_wq_barrier(cwq, &barr, 0);
O
Oleg Nesterov 已提交
432 433 434 435
		running = 1;
	}
	spin_unlock_irq(&cwq->lock);

436
	if (unlikely(running))
O
Oleg Nesterov 已提交
437 438 439
		wait_for_completion(&barr.done);
}

440
static void wait_on_work(struct work_struct *work)
O
Oleg Nesterov 已提交
441 442
{
	struct cpu_workqueue_struct *cwq;
443 444
	struct workqueue_struct *wq;
	const cpumask_t *cpu_map;
445
	int cpu;
O
Oleg Nesterov 已提交
446

447 448
	might_sleep();

O
Oleg Nesterov 已提交
449 450
	cwq = get_wq_data(work);
	if (!cwq)
451
		return;
O
Oleg Nesterov 已提交
452

453 454 455
	wq = cwq->wq;
	cpu_map = wq_cpu_map(wq);

456
	for_each_cpu_mask(cpu, *cpu_map)
457 458 459
		wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
}

460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
static int __cancel_work_timer(struct work_struct *work,
				struct timer_list* timer)
{
	int ret;

	do {
		ret = (timer && likely(del_timer(timer)));
		if (!ret)
			ret = try_to_grab_pending(work);
		wait_on_work(work);
	} while (unlikely(ret < 0));

	work_clear_pending(work);
	return ret;
}

476 477 478 479
/**
 * cancel_work_sync - block until a work_struct's callback has terminated
 * @work: the work which is to be flushed
 *
480 481
 * Returns true if @work was pending.
 *
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
 * cancel_work_sync() will cancel the work if it is queued. If the work's
 * callback appears to be running, cancel_work_sync() will block until it
 * has completed.
 *
 * It is possible to use this function if the work re-queues itself. It can
 * cancel the work even if it migrates to another workqueue, however in that
 * case it only guarantees that work->func() has completed on the last queued
 * workqueue.
 *
 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
 * pending, otherwise it goes into a busy-wait loop until the timer expires.
 *
 * The caller must ensure that workqueue_struct on which this work was last
 * queued can't be destroyed before this function returns.
 */
497
int cancel_work_sync(struct work_struct *work)
498
{
499
	return __cancel_work_timer(work, NULL);
O
Oleg Nesterov 已提交
500
}
501
EXPORT_SYMBOL_GPL(cancel_work_sync);
O
Oleg Nesterov 已提交
502

503
/**
504
 * cancel_delayed_work_sync - reliably kill off a delayed work.
505 506
 * @dwork: the delayed work struct
 *
507 508
 * Returns true if @dwork was pending.
 *
509 510 511
 * It is possible to use this function if @dwork rearms itself via queue_work()
 * or queue_delayed_work(). See also the comment for cancel_work_sync().
 */
512
int cancel_delayed_work_sync(struct delayed_work *dwork)
513
{
514
	return __cancel_work_timer(&dwork->work, &dwork->timer);
515
}
516
EXPORT_SYMBOL(cancel_delayed_work_sync);
L
Linus Torvalds 已提交
517

518
static struct workqueue_struct *keventd_wq __read_mostly;
L
Linus Torvalds 已提交
519

520 521 522 523 524 525
/**
 * schedule_work - put work task in global workqueue
 * @work: job to be done
 *
 * This puts a job in the kernel-global workqueue.
 */
L
Linus Torvalds 已提交
526 527 528 529
int fastcall schedule_work(struct work_struct *work)
{
	return queue_work(keventd_wq, work);
}
530
EXPORT_SYMBOL(schedule_work);
L
Linus Torvalds 已提交
531

532 533
/**
 * schedule_delayed_work - put work task in global workqueue after delay
534 535
 * @dwork: job to be done
 * @delay: number of jiffies to wait or 0 for immediate execution
536 537 538 539
 *
 * After waiting for a given time this puts a job in the kernel-global
 * workqueue.
 */
540 541
int fastcall schedule_delayed_work(struct delayed_work *dwork,
					unsigned long delay)
L
Linus Torvalds 已提交
542
{
543
	timer_stats_timer_set_start_info(&dwork->timer);
544
	return queue_delayed_work(keventd_wq, dwork, delay);
L
Linus Torvalds 已提交
545
}
546
EXPORT_SYMBOL(schedule_delayed_work);
L
Linus Torvalds 已提交
547

548 549 550
/**
 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
 * @cpu: cpu to use
551
 * @dwork: job to be done
552 553 554 555 556
 * @delay: number of jiffies to wait
 *
 * After waiting for a given time this puts a job in the kernel-global
 * workqueue on the specified CPU.
 */
L
Linus Torvalds 已提交
557
int schedule_delayed_work_on(int cpu,
558
			struct delayed_work *dwork, unsigned long delay)
L
Linus Torvalds 已提交
559
{
560
	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
L
Linus Torvalds 已提交
561
}
562
EXPORT_SYMBOL(schedule_delayed_work_on);
L
Linus Torvalds 已提交
563

564 565 566 567 568 569 570 571 572 573 574
/**
 * schedule_on_each_cpu - call a function on each online CPU from keventd
 * @func: the function to call
 *
 * Returns zero on success.
 * Returns -ve errno on failure.
 *
 * Appears to be racy against CPU hotplug.
 *
 * schedule_on_each_cpu() is very slow.
 */
575
int schedule_on_each_cpu(work_func_t func)
576 577
{
	int cpu;
578
	struct work_struct *works;
579

580 581
	works = alloc_percpu(struct work_struct);
	if (!works)
582
		return -ENOMEM;
583

584
	preempt_disable();		/* CPU hotplug */
585
	for_each_online_cpu(cpu) {
586 587 588 589 590
		struct work_struct *work = per_cpu_ptr(works, cpu);

		INIT_WORK(work, func);
		set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
		__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
591
	}
592
	preempt_enable();
593
	flush_workqueue(keventd_wq);
594
	free_percpu(works);
595 596 597
	return 0;
}

L
Linus Torvalds 已提交
598 599 600 601
void flush_scheduled_work(void)
{
	flush_workqueue(keventd_wq);
}
602
EXPORT_SYMBOL(flush_scheduled_work);
L
Linus Torvalds 已提交
603

604 605 606 607 608 609 610 611 612 613 614 615
/**
 * execute_in_process_context - reliably execute the routine with user context
 * @fn:		the function to execute
 * @ew:		guaranteed storage for the execute work structure (must
 *		be available when the work executes)
 *
 * Executes the function immediately if process context is available,
 * otherwise schedules the function for delayed execution.
 *
 * Returns:	0 - function was executed
 *		1 - function was scheduled for execution
 */
616
int execute_in_process_context(work_func_t fn, struct execute_work *ew)
617 618
{
	if (!in_interrupt()) {
619
		fn(&ew->work);
620 621 622
		return 0;
	}

623
	INIT_WORK(&ew->work, fn);
624 625 626 627 628 629
	schedule_work(&ew->work);

	return 1;
}
EXPORT_SYMBOL_GPL(execute_in_process_context);

L
Linus Torvalds 已提交
630 631 632 633 634 635 636 637 638 639 640 641 642
int keventd_up(void)
{
	return keventd_wq != NULL;
}

int current_is_keventd(void)
{
	struct cpu_workqueue_struct *cwq;
	int cpu = smp_processor_id();	/* preempt-safe: keventd is per-cpu */
	int ret = 0;

	BUG_ON(!keventd_wq);

643
	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
L
Linus Torvalds 已提交
644 645 646 647 648 649 650
	if (current == cwq->thread)
		ret = 1;

	return ret;

}

651 652
static struct cpu_workqueue_struct *
init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
L
Linus Torvalds 已提交
653
{
654
	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
L
Linus Torvalds 已提交
655

656 657 658 659 660 661
	cwq->wq = wq;
	spin_lock_init(&cwq->lock);
	INIT_LIST_HEAD(&cwq->worklist);
	init_waitqueue_head(&cwq->more_work);

	return cwq;
L
Linus Torvalds 已提交
662 663
}

664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
	struct workqueue_struct *wq = cwq->wq;
	const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
	struct task_struct *p;

	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
	/*
	 * Nobody can add the work_struct to this cwq,
	 *	if (caller is __create_workqueue)
	 *		nobody should see this wq
	 *	else // caller is CPU_UP_PREPARE
	 *		cpu is not on cpu_online_map
	 * so we can abort safely.
	 */
	if (IS_ERR(p))
		return PTR_ERR(p);

	cwq->thread = p;

	return 0;
}

687 688 689 690 691 692 693 694 695 696 697
static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
	struct task_struct *p = cwq->thread;

	if (p != NULL) {
		if (cpu >= 0)
			kthread_bind(p, cpu);
		wake_up_process(p);
	}
}

698 699
struct workqueue_struct *__create_workqueue(const char *name,
					    int singlethread, int freezeable)
L
Linus Torvalds 已提交
700 701
{
	struct workqueue_struct *wq;
702 703
	struct cpu_workqueue_struct *cwq;
	int err = 0, cpu;
L
Linus Torvalds 已提交
704

705 706 707 708 709 710 711 712 713 714 715
	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
	if (!wq)
		return NULL;

	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
	if (!wq->cpu_wq) {
		kfree(wq);
		return NULL;
	}

	wq->name = name;
716
	wq->singlethread = singlethread;
717
	wq->freezeable = freezeable;
718
	INIT_LIST_HEAD(&wq->list);
719 720 721 722

	if (singlethread) {
		cwq = init_cpu_workqueue(wq, singlethread_cpu);
		err = create_workqueue_thread(cwq, singlethread_cpu);
723
		start_workqueue_thread(cwq, -1);
724
	} else {
725
		mutex_lock(&workqueue_mutex);
726 727 728 729 730 731 732
		list_add(&wq->list, &workqueues);

		for_each_possible_cpu(cpu) {
			cwq = init_cpu_workqueue(wq, cpu);
			if (err || !cpu_online(cpu))
				continue;
			err = create_workqueue_thread(cwq, cpu);
733
			start_workqueue_thread(cwq, cpu);
L
Linus Torvalds 已提交
734
		}
735 736 737 738 739 740 741 742 743 744
		mutex_unlock(&workqueue_mutex);
	}

	if (err) {
		destroy_workqueue(wq);
		wq = NULL;
	}
	return wq;
}
EXPORT_SYMBOL_GPL(__create_workqueue);
L
Linus Torvalds 已提交
745

746 747
static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
748 749 750 751 752 753
	/*
	 * Our caller is either destroy_workqueue() or CPU_DEAD,
	 * workqueue_mutex protects cwq->thread
	 */
	if (cwq->thread == NULL)
		return;
754

O
Oleg Nesterov 已提交
755
	flush_cpu_workqueue(cwq);
756
	/*
O
Oleg Nesterov 已提交
757 758 759 760
	 * If the caller is CPU_DEAD and cwq->worklist was not empty,
	 * a concurrent flush_workqueue() can insert a barrier after us.
	 * However, in that case run_workqueue() won't return and check
	 * kthread_should_stop() until it flushes all work_struct's.
761 762 763 764 765 766 767
	 * When ->worklist becomes empty it is safe to exit because no
	 * more work_structs can be queued on this cwq: flush_workqueue
	 * checks list_empty(), and a "normal" queue_work() can't use
	 * a dead CPU.
	 */
	kthread_stop(cwq->thread);
	cwq->thread = NULL;
768 769 770 771 772 773 774 775 776 777
}

/**
 * destroy_workqueue - safely terminate a workqueue
 * @wq: target workqueue
 *
 * Safely destroy a workqueue. All work currently pending will be done first.
 */
void destroy_workqueue(struct workqueue_struct *wq)
{
778
	const cpumask_t *cpu_map = wq_cpu_map(wq);
779
	struct cpu_workqueue_struct *cwq;
780
	int cpu;
781

782 783 784
	mutex_lock(&workqueue_mutex);
	list_del(&wq->list);
	mutex_unlock(&workqueue_mutex);
785

786 787 788
	for_each_cpu_mask(cpu, *cpu_map) {
		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
		cleanup_workqueue_thread(cwq, cpu);
789
	}
790

791 792 793 794 795 796 797 798 799 800 801 802 803
	free_percpu(wq->cpu_wq);
	kfree(wq);
}
EXPORT_SYMBOL_GPL(destroy_workqueue);

static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
						unsigned long action,
						void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;
	struct cpu_workqueue_struct *cwq;
	struct workqueue_struct *wq;

804 805
	action &= ~CPU_TASKS_FROZEN;

806 807
	switch (action) {
	case CPU_LOCK_ACQUIRE:
808
		mutex_lock(&workqueue_mutex);
809
		return NOTIFY_OK;
810

811
	case CPU_LOCK_RELEASE:
812
		mutex_unlock(&workqueue_mutex);
813
		return NOTIFY_OK;
L
Linus Torvalds 已提交
814

815 816 817 818 819 820 821 822 823 824 825 826 827 828 829
	case CPU_UP_PREPARE:
		cpu_set(cpu, cpu_populated_map);
	}

	list_for_each_entry(wq, &workqueues, list) {
		cwq = per_cpu_ptr(wq->cpu_wq, cpu);

		switch (action) {
		case CPU_UP_PREPARE:
			if (!create_workqueue_thread(cwq, cpu))
				break;
			printk(KERN_ERR "workqueue for %i failed\n", cpu);
			return NOTIFY_BAD;

		case CPU_ONLINE:
830
			start_workqueue_thread(cwq, cpu);
831 832 833
			break;

		case CPU_UP_CANCELED:
834
			start_workqueue_thread(cwq, -1);
835 836 837 838
		case CPU_DEAD:
			cleanup_workqueue_thread(cwq, cpu);
			break;
		}
L
Linus Torvalds 已提交
839 840 841 842 843
	}

	return NOTIFY_OK;
}

844
void __init init_workqueues(void)
L
Linus Torvalds 已提交
845
{
846
	cpu_populated_map = cpu_online_map;
847
	singlethread_cpu = first_cpu(cpu_possible_map);
848
	cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
L
Linus Torvalds 已提交
849 850 851 852
	hotcpu_notifier(workqueue_cpu_callback, 0);
	keventd_wq = create_workqueue("events");
	BUG_ON(!keventd_wq);
}