workqueue.c 19.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * linux/kernel/workqueue.c
 *
 * Generic mechanism for defining kernel helper threads for running
 * arbitrary tasks in process context.
 *
 * Started by Ingo Molnar, Copyright (C) 2002
 *
 * Derived from the taskqueue/keventd code by:
 *
 *   David Woodhouse <dwmw2@infradead.org>
 *   Andrew Morton <andrewm@uow.edu.au>
 *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
 *   Theodore Ts'o <tytso@mit.edu>
15 16
 *
 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
L
Linus Torvalds 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
30
#include <linux/hardirq.h>
31
#include <linux/mempolicy.h>
32
#include <linux/freezer.h>
33 34
#include <linux/kallsyms.h>
#include <linux/debug_locks.h>
L
Linus Torvalds 已提交
35 36

/*
37 38
 * The per-CPU workqueue (if single thread, we always use the first
 * possible cpu).
L
Linus Torvalds 已提交
39 40 41 42 43 44 45
 */
struct cpu_workqueue_struct {

	spinlock_t lock;

	struct list_head worklist;
	wait_queue_head_t more_work;
46
	struct work_struct *current_work;
L
Linus Torvalds 已提交
47 48

	struct workqueue_struct *wq;
49
	struct task_struct *thread;
50
	int should_stop;
L
Linus Torvalds 已提交
51 52 53 54 55 56 57 58 59

	int run_depth;		/* Detect run_workqueue() recursion depth */
} ____cacheline_aligned;

/*
 * The externally visible workqueue abstraction is an array of
 * per-CPU workqueues:
 */
struct workqueue_struct {
60
	struct cpu_workqueue_struct *cpu_wq;
61
	struct list_head list;
L
Linus Torvalds 已提交
62
	const char *name;
63
	int singlethread;
64
	int freezeable;		/* Freeze threads during suspend */
L
Linus Torvalds 已提交
65 66 67 68
};

/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
   threads to each one as cpus come/go. */
69
static DEFINE_MUTEX(workqueue_mutex);
L
Linus Torvalds 已提交
70 71
static LIST_HEAD(workqueues);

72
static int singlethread_cpu __read_mostly;
73
static cpumask_t cpu_singlethread_map __read_mostly;
74 75
/* optimization, we could use cpu_possible_map */
static cpumask_t cpu_populated_map __read_mostly;
76

L
Linus Torvalds 已提交
77 78 79
/* If it's single threaded, it isn't in the list of workqueues. */
static inline int is_single_threaded(struct workqueue_struct *wq)
{
80
	return wq->singlethread;
L
Linus Torvalds 已提交
81 82
}

83 84 85 86 87 88
static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
{
	return is_single_threaded(wq)
		? &cpu_singlethread_map : &cpu_populated_map;
}

89 90 91 92 93 94 95 96
static
struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
{
	if (unlikely(is_single_threaded(wq)))
		cpu = singlethread_cpu;
	return per_cpu_ptr(wq->cpu_wq, cpu);
}

97 98 99 100
/*
 * Set the workqueue on which a work item is to be run
 * - Must *only* be called if the pending flag is set
 */
101 102
static inline void set_wq_data(struct work_struct *work,
				struct cpu_workqueue_struct *cwq)
103
{
104 105 106
	unsigned long new;

	BUG_ON(!work_pending(work));
107

108
	new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
109 110
	new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
	atomic_long_set(&work->data, new);
111 112
}

113 114
static inline
struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
115
{
116
	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
117 118
}

O
Oleg Nesterov 已提交
119 120 121 122 123 124 125 126 127 128 129
static void insert_work(struct cpu_workqueue_struct *cwq,
				struct work_struct *work, int tail)
{
	set_wq_data(work, cwq);
	if (tail)
		list_add_tail(&work->entry, &cwq->worklist);
	else
		list_add(&work->entry, &cwq->worklist);
	wake_up(&cwq->more_work);
}

L
Linus Torvalds 已提交
130 131 132 133 134 135 136
/* Preempt must be disabled. */
static void __queue_work(struct cpu_workqueue_struct *cwq,
			 struct work_struct *work)
{
	unsigned long flags;

	spin_lock_irqsave(&cwq->lock, flags);
O
Oleg Nesterov 已提交
137
	insert_work(cwq, work, 1);
L
Linus Torvalds 已提交
138 139 140
	spin_unlock_irqrestore(&cwq->lock, flags);
}

141 142 143 144 145
/**
 * queue_work - queue work on a workqueue
 * @wq: workqueue to use
 * @work: work to queue
 *
A
Alan Stern 已提交
146
 * Returns 0 if @work was already on a queue, non-zero otherwise.
L
Linus Torvalds 已提交
147 148 149 150 151 152
 *
 * We queue the work to the CPU it was submitted, but there is no
 * guarantee that it will be processed by that CPU.
 */
int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
153
	int ret = 0;
L
Linus Torvalds 已提交
154

155
	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
L
Linus Torvalds 已提交
156
		BUG_ON(!list_empty(&work->entry));
157 158
		__queue_work(wq_per_cpu(wq, get_cpu()), work);
		put_cpu();
L
Linus Torvalds 已提交
159 160 161 162
		ret = 1;
	}
	return ret;
}
163
EXPORT_SYMBOL_GPL(queue_work);
L
Linus Torvalds 已提交
164

165
void delayed_work_timer_fn(unsigned long __data)
L
Linus Torvalds 已提交
166
{
167
	struct delayed_work *dwork = (struct delayed_work *)__data;
168 169
	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
	struct workqueue_struct *wq = cwq->wq;
L
Linus Torvalds 已提交
170

171
	__queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
L
Linus Torvalds 已提交
172 173
}

174 175 176
/**
 * queue_delayed_work - queue work on a workqueue after delay
 * @wq: workqueue to use
177
 * @dwork: delayable work to queue
178 179
 * @delay: number of jiffies to wait before queueing
 *
A
Alan Stern 已提交
180
 * Returns 0 if @work was already on a queue, non-zero otherwise.
181
 */
L
Linus Torvalds 已提交
182
int fastcall queue_delayed_work(struct workqueue_struct *wq,
183
			struct delayed_work *dwork, unsigned long delay)
L
Linus Torvalds 已提交
184
{
185
	timer_stats_timer_set_start_info(&dwork->timer);
186
	if (delay == 0)
187
		return queue_work(wq, &dwork->work);
L
Linus Torvalds 已提交
188

189
	return queue_delayed_work_on(-1, wq, dwork, delay);
L
Linus Torvalds 已提交
190
}
191
EXPORT_SYMBOL_GPL(queue_delayed_work);
L
Linus Torvalds 已提交
192

193 194 195 196
/**
 * queue_delayed_work_on - queue work on specific CPU after delay
 * @cpu: CPU number to execute work on
 * @wq: workqueue to use
197
 * @dwork: work to queue
198 199
 * @delay: number of jiffies to wait before queueing
 *
A
Alan Stern 已提交
200
 * Returns 0 if @work was already on a queue, non-zero otherwise.
201
 */
202
int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
203
			struct delayed_work *dwork, unsigned long delay)
204 205
{
	int ret = 0;
206 207
	struct timer_list *timer = &dwork->timer;
	struct work_struct *work = &dwork->work;
208

209
	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
210 211 212
		BUG_ON(timer_pending(timer));
		BUG_ON(!list_empty(&work->entry));

213
		/* This stores cwq for the moment, for the timer_fn */
214
		set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
215
		timer->expires = jiffies + delay;
216
		timer->data = (unsigned long)dwork;
217
		timer->function = delayed_work_timer_fn;
218 219 220 221 222

		if (unlikely(cpu >= 0))
			add_timer_on(timer, cpu);
		else
			add_timer(timer);
223 224 225 226
		ret = 1;
	}
	return ret;
}
227
EXPORT_SYMBOL_GPL(queue_delayed_work_on);
L
Linus Torvalds 已提交
228

229
static void run_workqueue(struct cpu_workqueue_struct *cwq)
L
Linus Torvalds 已提交
230
{
231
	spin_lock_irq(&cwq->lock);
L
Linus Torvalds 已提交
232 233 234 235 236 237 238 239 240 241
	cwq->run_depth++;
	if (cwq->run_depth > 3) {
		/* morton gets to eat his hat */
		printk("%s: recursion depth exceeded: %d\n",
			__FUNCTION__, cwq->run_depth);
		dump_stack();
	}
	while (!list_empty(&cwq->worklist)) {
		struct work_struct *work = list_entry(cwq->worklist.next,
						struct work_struct, entry);
242
		work_func_t f = work->func;
L
Linus Torvalds 已提交
243

O
Oleg Nesterov 已提交
244
		cwq->current_work = work;
L
Linus Torvalds 已提交
245
		list_del_init(cwq->worklist.next);
246
		spin_unlock_irq(&cwq->lock);
L
Linus Torvalds 已提交
247

248
		BUG_ON(get_wq_data(work) != cwq);
O
Oleg Nesterov 已提交
249
		work_clear_pending(work);
250
		f(work);
L
Linus Torvalds 已提交
251

252 253 254 255 256 257 258 259 260 261 262
		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
					"%s/0x%08x/%d\n",
					current->comm, preempt_count(),
				       	current->pid);
			printk(KERN_ERR "    last function: ");
			print_symbol("%s\n", (unsigned long)f);
			debug_show_held_locks(current);
			dump_stack();
		}

263
		spin_lock_irq(&cwq->lock);
O
Oleg Nesterov 已提交
264
		cwq->current_work = NULL;
L
Linus Torvalds 已提交
265 266
	}
	cwq->run_depth--;
267
	spin_unlock_irq(&cwq->lock);
L
Linus Torvalds 已提交
268 269
}

270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
/*
 * NOTE: the caller must not touch *cwq if this func returns true
 */
static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
{
	int should_stop = cwq->should_stop;

	if (unlikely(should_stop)) {
		spin_lock_irq(&cwq->lock);
		should_stop = cwq->should_stop && list_empty(&cwq->worklist);
		if (should_stop)
			cwq->thread = NULL;
		spin_unlock_irq(&cwq->lock);
	}

	return should_stop;
}

L
Linus Torvalds 已提交
288 289 290
static int worker_thread(void *__cwq)
{
	struct cpu_workqueue_struct *cwq = __cwq;
291
	DEFINE_WAIT(wait);
L
Linus Torvalds 已提交
292

293
	if (!cwq->wq->freezeable)
294
		current->flags |= PF_NOFREEZE;
L
Linus Torvalds 已提交
295 296 297

	set_user_nice(current, -5);

298 299
	for (;;) {
		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
300 301
		if (!freezing(current) && !cwq->should_stop
		    && list_empty(&cwq->worklist))
L
Linus Torvalds 已提交
302
			schedule();
303 304
		finish_wait(&cwq->more_work, &wait);

305 306
		try_to_freeze();

307 308
		if (cwq_should_stop(cwq))
			break;
L
Linus Torvalds 已提交
309

310
		run_workqueue(cwq);
L
Linus Torvalds 已提交
311
	}
312

L
Linus Torvalds 已提交
313 314 315
	return 0;
}

O
Oleg Nesterov 已提交
316 317 318 319 320 321 322 323 324 325 326
struct wq_barrier {
	struct work_struct	work;
	struct completion	done;
};

static void wq_barrier_func(struct work_struct *work)
{
	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
	complete(&barr->done);
}

327 328
static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
					struct wq_barrier *barr, int tail)
O
Oleg Nesterov 已提交
329 330 331 332 333
{
	INIT_WORK(&barr->work, wq_barrier_func);
	__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));

	init_completion(&barr->done);
334 335

	insert_work(cwq, &barr->work, tail);
O
Oleg Nesterov 已提交
336 337
}

L
Linus Torvalds 已提交
338 339 340 341 342 343 344 345 346
static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
{
	if (cwq->thread == current) {
		/*
		 * Probably keventd trying to flush its own queue. So simply run
		 * it by hand rather than deadlocking.
		 */
		run_workqueue(cwq);
	} else {
O
Oleg Nesterov 已提交
347
		struct wq_barrier barr;
348
		int active = 0;
L
Linus Torvalds 已提交
349

350 351 352 353 354 355
		spin_lock_irq(&cwq->lock);
		if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
			insert_wq_barrier(cwq, &barr, 1);
			active = 1;
		}
		spin_unlock_irq(&cwq->lock);
L
Linus Torvalds 已提交
356

357
		if (active)
358
			wait_for_completion(&barr.done);
L
Linus Torvalds 已提交
359 360 361
	}
}

362
/**
L
Linus Torvalds 已提交
363
 * flush_workqueue - ensure that any scheduled work has run to completion.
364
 * @wq: workqueue to flush
L
Linus Torvalds 已提交
365 366 367 368
 *
 * Forces execution of the workqueue and blocks until its completion.
 * This is typically used in driver shutdown handlers.
 *
O
Oleg Nesterov 已提交
369 370
 * We sleep until all works which were queued on entry have been handled,
 * but we are not livelocked by new incoming ones.
L
Linus Torvalds 已提交
371 372 373 374 375 376
 *
 * This function used to run the workqueues itself.  Now we just wait for the
 * helper threads to do it.
 */
void fastcall flush_workqueue(struct workqueue_struct *wq)
{
377
	const cpumask_t *cpu_map = wq_cpu_map(wq);
378
	int cpu;
L
Linus Torvalds 已提交
379

380 381 382
	might_sleep();
	for_each_cpu_mask(cpu, *cpu_map)
		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
L
Linus Torvalds 已提交
383
}
384
EXPORT_SYMBOL_GPL(flush_workqueue);
L
Linus Torvalds 已提交
385

O
Oleg Nesterov 已提交
386 387 388 389 390 391 392 393
static void wait_on_work(struct cpu_workqueue_struct *cwq,
				struct work_struct *work)
{
	struct wq_barrier barr;
	int running = 0;

	spin_lock_irq(&cwq->lock);
	if (unlikely(cwq->current_work == work)) {
394
		insert_wq_barrier(cwq, &barr, 0);
O
Oleg Nesterov 已提交
395 396 397 398
		running = 1;
	}
	spin_unlock_irq(&cwq->lock);

399
	if (unlikely(running))
O
Oleg Nesterov 已提交
400 401 402 403
		wait_for_completion(&barr.done);
}

/**
404
 * cancel_work_sync - block until a work_struct's callback has terminated
O
Oleg Nesterov 已提交
405 406
 * @work: the work which is to be flushed
 *
407 408 409
 * cancel_work_sync() will attempt to cancel the work if it is queued. If the
 * work's callback appears to be running, cancel_work_sync() will block until
 * it has completed.
O
Oleg Nesterov 已提交
410
 *
411 412 413 414
 * cancel_work_sync() is designed to be used when the caller is tearing down
 * data structures which the callback function operates upon. It is expected
 * that, prior to calling cancel_work_sync(), the caller has arranged for the
 * work to not be requeued.
O
Oleg Nesterov 已提交
415
 */
416
void cancel_work_sync(struct work_struct *work)
O
Oleg Nesterov 已提交
417 418
{
	struct cpu_workqueue_struct *cwq;
419 420
	struct workqueue_struct *wq;
	const cpumask_t *cpu_map;
421
	int cpu;
O
Oleg Nesterov 已提交
422

423 424
	might_sleep();

O
Oleg Nesterov 已提交
425 426 427
	cwq = get_wq_data(work);
	/* Was it ever queued ? */
	if (!cwq)
428
		return;
O
Oleg Nesterov 已提交
429 430

	/*
431 432
	 * This work can't be re-queued, no need to re-check that
	 * get_wq_data() is still the same when we take cwq->lock.
O
Oleg Nesterov 已提交
433 434 435
	 */
	spin_lock_irq(&cwq->lock);
	list_del_init(&work->entry);
O
Oleg Nesterov 已提交
436
	work_clear_pending(work);
O
Oleg Nesterov 已提交
437 438
	spin_unlock_irq(&cwq->lock);

439 440 441
	wq = cwq->wq;
	cpu_map = wq_cpu_map(wq);

442 443
	for_each_cpu_mask(cpu, *cpu_map)
		wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
O
Oleg Nesterov 已提交
444
}
445
EXPORT_SYMBOL_GPL(cancel_work_sync);
O
Oleg Nesterov 已提交
446

L
Linus Torvalds 已提交
447 448 449

static struct workqueue_struct *keventd_wq;

450 451 452 453 454 455
/**
 * schedule_work - put work task in global workqueue
 * @work: job to be done
 *
 * This puts a job in the kernel-global workqueue.
 */
L
Linus Torvalds 已提交
456 457 458 459
int fastcall schedule_work(struct work_struct *work)
{
	return queue_work(keventd_wq, work);
}
460
EXPORT_SYMBOL(schedule_work);
L
Linus Torvalds 已提交
461

462 463
/**
 * schedule_delayed_work - put work task in global workqueue after delay
464 465
 * @dwork: job to be done
 * @delay: number of jiffies to wait or 0 for immediate execution
466 467 468 469
 *
 * After waiting for a given time this puts a job in the kernel-global
 * workqueue.
 */
470 471
int fastcall schedule_delayed_work(struct delayed_work *dwork,
					unsigned long delay)
L
Linus Torvalds 已提交
472
{
473
	timer_stats_timer_set_start_info(&dwork->timer);
474
	return queue_delayed_work(keventd_wq, dwork, delay);
L
Linus Torvalds 已提交
475
}
476
EXPORT_SYMBOL(schedule_delayed_work);
L
Linus Torvalds 已提交
477

478 479 480
/**
 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
 * @cpu: cpu to use
481
 * @dwork: job to be done
482 483 484 485 486
 * @delay: number of jiffies to wait
 *
 * After waiting for a given time this puts a job in the kernel-global
 * workqueue on the specified CPU.
 */
L
Linus Torvalds 已提交
487
int schedule_delayed_work_on(int cpu,
488
			struct delayed_work *dwork, unsigned long delay)
L
Linus Torvalds 已提交
489
{
490
	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
L
Linus Torvalds 已提交
491
}
492
EXPORT_SYMBOL(schedule_delayed_work_on);
L
Linus Torvalds 已提交
493

494 495 496 497 498 499 500 501 502 503 504
/**
 * schedule_on_each_cpu - call a function on each online CPU from keventd
 * @func: the function to call
 *
 * Returns zero on success.
 * Returns -ve errno on failure.
 *
 * Appears to be racy against CPU hotplug.
 *
 * schedule_on_each_cpu() is very slow.
 */
505
int schedule_on_each_cpu(work_func_t func)
506 507
{
	int cpu;
508
	struct work_struct *works;
509

510 511
	works = alloc_percpu(struct work_struct);
	if (!works)
512
		return -ENOMEM;
513

514
	preempt_disable();		/* CPU hotplug */
515
	for_each_online_cpu(cpu) {
516 517 518 519 520
		struct work_struct *work = per_cpu_ptr(works, cpu);

		INIT_WORK(work, func);
		set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
		__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
521
	}
522
	preempt_enable();
523
	flush_workqueue(keventd_wq);
524
	free_percpu(works);
525 526 527
	return 0;
}

L
Linus Torvalds 已提交
528 529 530 531
void flush_scheduled_work(void)
{
	flush_workqueue(keventd_wq);
}
532
EXPORT_SYMBOL(flush_scheduled_work);
L
Linus Torvalds 已提交
533 534

/**
535
 * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work.
536
 * @dwork: the delayed work struct
537 538
 *
 * Note that the work callback function may still be running on return from
539 540
 * cancel_delayed_work(). Run flush_workqueue() or cancel_work_sync() to wait
 * on it.
L
Linus Torvalds 已提交
541
 */
542
void cancel_rearming_delayed_work(struct delayed_work *dwork)
L
Linus Torvalds 已提交
543
{
544
	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
545

546 547 548
	/* Was it ever queued ? */
	if (cwq != NULL) {
		struct workqueue_struct *wq = cwq->wq;
L
Linus Torvalds 已提交
549

550 551 552
		while (!cancel_delayed_work(dwork))
			flush_workqueue(wq);
	}
L
Linus Torvalds 已提交
553 554 555
}
EXPORT_SYMBOL(cancel_rearming_delayed_work);

556 557 558 559 560 561 562 563 564 565 566 567
/**
 * execute_in_process_context - reliably execute the routine with user context
 * @fn:		the function to execute
 * @ew:		guaranteed storage for the execute work structure (must
 *		be available when the work executes)
 *
 * Executes the function immediately if process context is available,
 * otherwise schedules the function for delayed execution.
 *
 * Returns:	0 - function was executed
 *		1 - function was scheduled for execution
 */
568
int execute_in_process_context(work_func_t fn, struct execute_work *ew)
569 570
{
	if (!in_interrupt()) {
571
		fn(&ew->work);
572 573 574
		return 0;
	}

575
	INIT_WORK(&ew->work, fn);
576 577 578 579 580 581
	schedule_work(&ew->work);

	return 1;
}
EXPORT_SYMBOL_GPL(execute_in_process_context);

L
Linus Torvalds 已提交
582 583 584 585 586 587 588 589 590 591 592 593 594
int keventd_up(void)
{
	return keventd_wq != NULL;
}

int current_is_keventd(void)
{
	struct cpu_workqueue_struct *cwq;
	int cpu = smp_processor_id();	/* preempt-safe: keventd is per-cpu */
	int ret = 0;

	BUG_ON(!keventd_wq);

595
	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
L
Linus Torvalds 已提交
596 597 598 599 600 601 602
	if (current == cwq->thread)
		ret = 1;

	return ret;

}

603 604
static struct cpu_workqueue_struct *
init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
L
Linus Torvalds 已提交
605
{
606
	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
L
Linus Torvalds 已提交
607

608 609 610 611 612 613
	cwq->wq = wq;
	spin_lock_init(&cwq->lock);
	INIT_LIST_HEAD(&cwq->worklist);
	init_waitqueue_head(&cwq->more_work);

	return cwq;
L
Linus Torvalds 已提交
614 615
}

616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
	struct workqueue_struct *wq = cwq->wq;
	const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
	struct task_struct *p;

	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
	/*
	 * Nobody can add the work_struct to this cwq,
	 *	if (caller is __create_workqueue)
	 *		nobody should see this wq
	 *	else // caller is CPU_UP_PREPARE
	 *		cpu is not on cpu_online_map
	 * so we can abort safely.
	 */
	if (IS_ERR(p))
		return PTR_ERR(p);

	cwq->thread = p;
	cwq->should_stop = 0;

	return 0;
}

640 641 642 643 644 645 646 647 648 649 650
static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
	struct task_struct *p = cwq->thread;

	if (p != NULL) {
		if (cpu >= 0)
			kthread_bind(p, cpu);
		wake_up_process(p);
	}
}

651 652
struct workqueue_struct *__create_workqueue(const char *name,
					    int singlethread, int freezeable)
L
Linus Torvalds 已提交
653 654
{
	struct workqueue_struct *wq;
655 656
	struct cpu_workqueue_struct *cwq;
	int err = 0, cpu;
L
Linus Torvalds 已提交
657

658 659 660 661 662 663 664 665 666 667 668
	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
	if (!wq)
		return NULL;

	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
	if (!wq->cpu_wq) {
		kfree(wq);
		return NULL;
	}

	wq->name = name;
669
	wq->singlethread = singlethread;
670
	wq->freezeable = freezeable;
671
	INIT_LIST_HEAD(&wq->list);
672 673 674 675

	if (singlethread) {
		cwq = init_cpu_workqueue(wq, singlethread_cpu);
		err = create_workqueue_thread(cwq, singlethread_cpu);
676
		start_workqueue_thread(cwq, -1);
677
	} else {
678
		mutex_lock(&workqueue_mutex);
679 680 681 682 683 684 685
		list_add(&wq->list, &workqueues);

		for_each_possible_cpu(cpu) {
			cwq = init_cpu_workqueue(wq, cpu);
			if (err || !cpu_online(cpu))
				continue;
			err = create_workqueue_thread(cwq, cpu);
686
			start_workqueue_thread(cwq, cpu);
L
Linus Torvalds 已提交
687
		}
688 689 690 691 692 693 694 695 696 697
		mutex_unlock(&workqueue_mutex);
	}

	if (err) {
		destroy_workqueue(wq);
		wq = NULL;
	}
	return wq;
}
EXPORT_SYMBOL_GPL(__create_workqueue);
L
Linus Torvalds 已提交
698

699 700 701 702
static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
	struct wq_barrier barr;
	int alive = 0;
703

704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
	spin_lock_irq(&cwq->lock);
	if (cwq->thread != NULL) {
		insert_wq_barrier(cwq, &barr, 1);
		cwq->should_stop = 1;
		alive = 1;
	}
	spin_unlock_irq(&cwq->lock);

	if (alive) {
		wait_for_completion(&barr.done);

		while (unlikely(cwq->thread != NULL))
			cpu_relax();
		/*
		 * Wait until cwq->thread unlocks cwq->lock,
		 * it won't touch *cwq after that.
		 */
		smp_rmb();
		spin_unlock_wait(&cwq->lock);
	}
}

/**
 * destroy_workqueue - safely terminate a workqueue
 * @wq: target workqueue
 *
 * Safely destroy a workqueue. All work currently pending will be done first.
 */
void destroy_workqueue(struct workqueue_struct *wq)
{
734
	const cpumask_t *cpu_map = wq_cpu_map(wq);
735
	struct cpu_workqueue_struct *cwq;
736
	int cpu;
737

738 739 740
	mutex_lock(&workqueue_mutex);
	list_del(&wq->list);
	mutex_unlock(&workqueue_mutex);
741

742 743 744
	for_each_cpu_mask(cpu, *cpu_map) {
		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
		cleanup_workqueue_thread(cwq, cpu);
745
	}
746

747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
	free_percpu(wq->cpu_wq);
	kfree(wq);
}
EXPORT_SYMBOL_GPL(destroy_workqueue);

static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
						unsigned long action,
						void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;
	struct cpu_workqueue_struct *cwq;
	struct workqueue_struct *wq;

	switch (action) {
	case CPU_LOCK_ACQUIRE:
762
		mutex_lock(&workqueue_mutex);
763
		return NOTIFY_OK;
764

765
	case CPU_LOCK_RELEASE:
766
		mutex_unlock(&workqueue_mutex);
767
		return NOTIFY_OK;
L
Linus Torvalds 已提交
768

769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
	case CPU_UP_PREPARE:
		cpu_set(cpu, cpu_populated_map);
	}

	list_for_each_entry(wq, &workqueues, list) {
		cwq = per_cpu_ptr(wq->cpu_wq, cpu);

		switch (action) {
		case CPU_UP_PREPARE:
			if (!create_workqueue_thread(cwq, cpu))
				break;
			printk(KERN_ERR "workqueue for %i failed\n", cpu);
			return NOTIFY_BAD;

		case CPU_ONLINE:
784
			start_workqueue_thread(cwq, cpu);
785 786 787
			break;

		case CPU_UP_CANCELED:
788
			start_workqueue_thread(cwq, -1);
789 790 791 792
		case CPU_DEAD:
			cleanup_workqueue_thread(cwq, cpu);
			break;
		}
L
Linus Torvalds 已提交
793 794 795 796 797
	}

	return NOTIFY_OK;
}

798
void __init init_workqueues(void)
L
Linus Torvalds 已提交
799
{
800
	cpu_populated_map = cpu_online_map;
801
	singlethread_cpu = first_cpu(cpu_possible_map);
802
	cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
L
Linus Torvalds 已提交
803 804 805 806
	hotcpu_notifier(workqueue_cpu_callback, 0);
	keventd_wq = create_workqueue("events");
	BUG_ON(!keventd_wq);
}