workqueue.c 17.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * linux/kernel/workqueue.c
 *
 * Generic mechanism for defining kernel helper threads for running
 * arbitrary tasks in process context.
 *
 * Started by Ingo Molnar, Copyright (C) 2002
 *
 * Derived from the taskqueue/keventd code by:
 *
 *   David Woodhouse <dwmw2@infradead.org>
 *   Andrew Morton <andrewm@uow.edu.au>
 *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
 *   Theodore Ts'o <tytso@mit.edu>
15 16
 *
 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
L
Linus Torvalds 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
30
#include <linux/hardirq.h>
31
#include <linux/mempolicy.h>
L
Linus Torvalds 已提交
32 33

/*
34 35
 * The per-CPU workqueue (if single thread, we always use the first
 * possible cpu).
L
Linus Torvalds 已提交
36 37
 *
 * The sequence counters are for flush_scheduled_work().  It wants to wait
38
 * until all currently-scheduled works are completed, but it doesn't
L
Linus Torvalds 已提交
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
 * want to be livelocked by new, incoming ones.  So it waits until
 * remove_sequence is >= the insert_sequence which pertained when
 * flush_scheduled_work() was called.
 */
struct cpu_workqueue_struct {

	spinlock_t lock;

	long remove_sequence;	/* Least-recently added (next to run) */
	long insert_sequence;	/* Next to add */

	struct list_head worklist;
	wait_queue_head_t more_work;
	wait_queue_head_t work_done;

	struct workqueue_struct *wq;
55
	struct task_struct *thread;
L
Linus Torvalds 已提交
56 57 58 59 60 61 62 63 64

	int run_depth;		/* Detect run_workqueue() recursion depth */
} ____cacheline_aligned;

/*
 * The externally visible workqueue abstraction is an array of
 * per-CPU workqueues:
 */
struct workqueue_struct {
65
	struct cpu_workqueue_struct *cpu_wq;
L
Linus Torvalds 已提交
66 67 68 69 70 71
	const char *name;
	struct list_head list; 	/* Empty if single thread */
};

/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
   threads to each one as cpus come/go. */
72
static DEFINE_MUTEX(workqueue_mutex);
L
Linus Torvalds 已提交
73 74
static LIST_HEAD(workqueues);

75 76
static int singlethread_cpu;

L
Linus Torvalds 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
/* If it's single threaded, it isn't in the list of workqueues. */
static inline int is_single_threaded(struct workqueue_struct *wq)
{
	return list_empty(&wq->list);
}

/* Preempt must be disabled. */
static void __queue_work(struct cpu_workqueue_struct *cwq,
			 struct work_struct *work)
{
	unsigned long flags;

	spin_lock_irqsave(&cwq->lock, flags);
	work->wq_data = cwq;
	list_add_tail(&work->entry, &cwq->worklist);
	cwq->insert_sequence++;
	wake_up(&cwq->more_work);
	spin_unlock_irqrestore(&cwq->lock, flags);
}

97 98 99 100 101
/**
 * queue_work - queue work on a workqueue
 * @wq: workqueue to use
 * @work: work to queue
 *
A
Alan Stern 已提交
102
 * Returns 0 if @work was already on a queue, non-zero otherwise.
L
Linus Torvalds 已提交
103 104 105 106 107 108 109 110 111 112
 *
 * We queue the work to the CPU it was submitted, but there is no
 * guarantee that it will be processed by that CPU.
 */
int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
	int ret = 0, cpu = get_cpu();

	if (!test_and_set_bit(0, &work->pending)) {
		if (unlikely(is_single_threaded(wq)))
113
			cpu = singlethread_cpu;
L
Linus Torvalds 已提交
114
		BUG_ON(!list_empty(&work->entry));
115
		__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
L
Linus Torvalds 已提交
116 117 118 119 120
		ret = 1;
	}
	put_cpu();
	return ret;
}
121
EXPORT_SYMBOL_GPL(queue_work);
L
Linus Torvalds 已提交
122 123 124 125 126 127 128 129

static void delayed_work_timer_fn(unsigned long __data)
{
	struct work_struct *work = (struct work_struct *)__data;
	struct workqueue_struct *wq = work->wq_data;
	int cpu = smp_processor_id();

	if (unlikely(is_single_threaded(wq)))
130
		cpu = singlethread_cpu;
L
Linus Torvalds 已提交
131

132
	__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
L
Linus Torvalds 已提交
133 134
}

135 136 137 138 139 140
/**
 * queue_delayed_work - queue work on a workqueue after delay
 * @wq: workqueue to use
 * @work: work to queue
 * @delay: number of jiffies to wait before queueing
 *
A
Alan Stern 已提交
141
 * Returns 0 if @work was already on a queue, non-zero otherwise.
142
 */
L
Linus Torvalds 已提交
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
int fastcall queue_delayed_work(struct workqueue_struct *wq,
			struct work_struct *work, unsigned long delay)
{
	int ret = 0;
	struct timer_list *timer = &work->timer;

	if (!test_and_set_bit(0, &work->pending)) {
		BUG_ON(timer_pending(timer));
		BUG_ON(!list_empty(&work->entry));

		/* This stores wq for the moment, for the timer_fn */
		work->wq_data = wq;
		timer->expires = jiffies + delay;
		timer->data = (unsigned long)work;
		timer->function = delayed_work_timer_fn;
		add_timer(timer);
		ret = 1;
	}
	return ret;
}
163
EXPORT_SYMBOL_GPL(queue_delayed_work);
L
Linus Torvalds 已提交
164

165 166 167 168 169 170 171
/**
 * queue_delayed_work_on - queue work on specific CPU after delay
 * @cpu: CPU number to execute work on
 * @wq: workqueue to use
 * @work: work to queue
 * @delay: number of jiffies to wait before queueing
 *
A
Alan Stern 已提交
172
 * Returns 0 if @work was already on a queue, non-zero otherwise.
173
 */
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
			struct work_struct *work, unsigned long delay)
{
	int ret = 0;
	struct timer_list *timer = &work->timer;

	if (!test_and_set_bit(0, &work->pending)) {
		BUG_ON(timer_pending(timer));
		BUG_ON(!list_empty(&work->entry));

		/* This stores wq for the moment, for the timer_fn */
		work->wq_data = wq;
		timer->expires = jiffies + delay;
		timer->data = (unsigned long)work;
		timer->function = delayed_work_timer_fn;
		add_timer_on(timer, cpu);
		ret = 1;
	}
	return ret;
}
194
EXPORT_SYMBOL_GPL(queue_delayed_work_on);
L
Linus Torvalds 已提交
195

196
static void run_workqueue(struct cpu_workqueue_struct *cwq)
L
Linus Torvalds 已提交
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
{
	unsigned long flags;

	/*
	 * Keep taking off work from the queue until
	 * done.
	 */
	spin_lock_irqsave(&cwq->lock, flags);
	cwq->run_depth++;
	if (cwq->run_depth > 3) {
		/* morton gets to eat his hat */
		printk("%s: recursion depth exceeded: %d\n",
			__FUNCTION__, cwq->run_depth);
		dump_stack();
	}
	while (!list_empty(&cwq->worklist)) {
		struct work_struct *work = list_entry(cwq->worklist.next,
						struct work_struct, entry);
		void (*f) (void *) = work->func;
		void *data = work->data;

		list_del_init(cwq->worklist.next);
		spin_unlock_irqrestore(&cwq->lock, flags);

		BUG_ON(work->wq_data != cwq);
		clear_bit(0, &work->pending);
		f(data);

		spin_lock_irqsave(&cwq->lock, flags);
		cwq->remove_sequence++;
		wake_up(&cwq->work_done);
	}
	cwq->run_depth--;
	spin_unlock_irqrestore(&cwq->lock, flags);
}

static int worker_thread(void *__cwq)
{
	struct cpu_workqueue_struct *cwq = __cwq;
	DECLARE_WAITQUEUE(wait, current);
	struct k_sigaction sa;
	sigset_t blocked;

	current->flags |= PF_NOFREEZE;

	set_user_nice(current, -5);

	/* Block and flush all signals */
	sigfillset(&blocked);
	sigprocmask(SIG_BLOCK, &blocked, NULL);
	flush_signals(current);

249 250 251 252 253 254
	/*
	 * We inherited MPOL_INTERLEAVE from the booting kernel.
	 * Set MPOL_DEFAULT to insure node local allocations.
	 */
	numa_default_policy();

L
Linus Torvalds 已提交
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
	/* SIG_IGN makes children autoreap: see do_notify_parent(). */
	sa.sa.sa_handler = SIG_IGN;
	sa.sa.sa_flags = 0;
	siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
	do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);

	set_current_state(TASK_INTERRUPTIBLE);
	while (!kthread_should_stop()) {
		add_wait_queue(&cwq->more_work, &wait);
		if (list_empty(&cwq->worklist))
			schedule();
		else
			__set_current_state(TASK_RUNNING);
		remove_wait_queue(&cwq->more_work, &wait);

		if (!list_empty(&cwq->worklist))
			run_workqueue(cwq);
		set_current_state(TASK_INTERRUPTIBLE);
	}
	__set_current_state(TASK_RUNNING);
	return 0;
}

static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
{
	if (cwq->thread == current) {
		/*
		 * Probably keventd trying to flush its own queue. So simply run
		 * it by hand rather than deadlocking.
		 */
		run_workqueue(cwq);
	} else {
		DEFINE_WAIT(wait);
		long sequence_needed;

		spin_lock_irq(&cwq->lock);
		sequence_needed = cwq->insert_sequence;

		while (sequence_needed - cwq->remove_sequence > 0) {
			prepare_to_wait(&cwq->work_done, &wait,
					TASK_UNINTERRUPTIBLE);
			spin_unlock_irq(&cwq->lock);
			schedule();
			spin_lock_irq(&cwq->lock);
		}
		finish_wait(&cwq->work_done, &wait);
		spin_unlock_irq(&cwq->lock);
	}
}

305
/**
L
Linus Torvalds 已提交
306
 * flush_workqueue - ensure that any scheduled work has run to completion.
307
 * @wq: workqueue to flush
L
Linus Torvalds 已提交
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
 *
 * Forces execution of the workqueue and blocks until its completion.
 * This is typically used in driver shutdown handlers.
 *
 * This function will sample each workqueue's current insert_sequence number and
 * will sleep until the head sequence is greater than or equal to that.  This
 * means that we sleep until all works which were queued on entry have been
 * handled, but we are not livelocked by new incoming ones.
 *
 * This function used to run the workqueues itself.  Now we just wait for the
 * helper threads to do it.
 */
void fastcall flush_workqueue(struct workqueue_struct *wq)
{
	might_sleep();

	if (is_single_threaded(wq)) {
325
		/* Always use first cpu's area. */
326
		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
L
Linus Torvalds 已提交
327 328 329
	} else {
		int cpu;

330
		mutex_lock(&workqueue_mutex);
L
Linus Torvalds 已提交
331
		for_each_online_cpu(cpu)
332
			flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
333
		mutex_unlock(&workqueue_mutex);
L
Linus Torvalds 已提交
334 335
	}
}
336
EXPORT_SYMBOL_GPL(flush_workqueue);
L
Linus Torvalds 已提交
337 338 339 340

static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
						   int cpu)
{
341
	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
L
Linus Torvalds 已提交
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
	struct task_struct *p;

	spin_lock_init(&cwq->lock);
	cwq->wq = wq;
	cwq->thread = NULL;
	cwq->insert_sequence = 0;
	cwq->remove_sequence = 0;
	INIT_LIST_HEAD(&cwq->worklist);
	init_waitqueue_head(&cwq->more_work);
	init_waitqueue_head(&cwq->work_done);

	if (is_single_threaded(wq))
		p = kthread_create(worker_thread, cwq, "%s", wq->name);
	else
		p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
	if (IS_ERR(p))
		return NULL;
	cwq->thread = p;
	return p;
}

struct workqueue_struct *__create_workqueue(const char *name,
					    int singlethread)
{
	int cpu, destroy = 0;
	struct workqueue_struct *wq;
	struct task_struct *p;

370
	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
L
Linus Torvalds 已提交
371 372 373
	if (!wq)
		return NULL;

374
	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
375 376 377 378 379
	if (!wq->cpu_wq) {
		kfree(wq);
		return NULL;
	}

L
Linus Torvalds 已提交
380
	wq->name = name;
381
	mutex_lock(&workqueue_mutex);
L
Linus Torvalds 已提交
382 383
	if (singlethread) {
		INIT_LIST_HEAD(&wq->list);
384
		p = create_workqueue_thread(wq, singlethread_cpu);
L
Linus Torvalds 已提交
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
		if (!p)
			destroy = 1;
		else
			wake_up_process(p);
	} else {
		list_add(&wq->list, &workqueues);
		for_each_online_cpu(cpu) {
			p = create_workqueue_thread(wq, cpu);
			if (p) {
				kthread_bind(p, cpu);
				wake_up_process(p);
			} else
				destroy = 1;
		}
	}
400
	mutex_unlock(&workqueue_mutex);
L
Linus Torvalds 已提交
401 402 403 404 405 406 407 408 409 410

	/*
	 * Was there any error during startup? If yes then clean up:
	 */
	if (destroy) {
		destroy_workqueue(wq);
		wq = NULL;
	}
	return wq;
}
411
EXPORT_SYMBOL_GPL(__create_workqueue);
L
Linus Torvalds 已提交
412 413 414 415 416 417 418

static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
{
	struct cpu_workqueue_struct *cwq;
	unsigned long flags;
	struct task_struct *p;

419
	cwq = per_cpu_ptr(wq->cpu_wq, cpu);
L
Linus Torvalds 已提交
420 421 422 423 424 425 426 427
	spin_lock_irqsave(&cwq->lock, flags);
	p = cwq->thread;
	cwq->thread = NULL;
	spin_unlock_irqrestore(&cwq->lock, flags);
	if (p)
		kthread_stop(p);
}

428 429 430 431 432 433
/**
 * destroy_workqueue - safely terminate a workqueue
 * @wq: target workqueue
 *
 * Safely destroy a workqueue. All work currently pending will be done first.
 */
L
Linus Torvalds 已提交
434 435 436 437 438 439 440
void destroy_workqueue(struct workqueue_struct *wq)
{
	int cpu;

	flush_workqueue(wq);

	/* We don't need the distraction of CPUs appearing and vanishing. */
441
	mutex_lock(&workqueue_mutex);
L
Linus Torvalds 已提交
442
	if (is_single_threaded(wq))
443
		cleanup_workqueue_thread(wq, singlethread_cpu);
L
Linus Torvalds 已提交
444 445 446 447 448
	else {
		for_each_online_cpu(cpu)
			cleanup_workqueue_thread(wq, cpu);
		list_del(&wq->list);
	}
449
	mutex_unlock(&workqueue_mutex);
450
	free_percpu(wq->cpu_wq);
L
Linus Torvalds 已提交
451 452
	kfree(wq);
}
453
EXPORT_SYMBOL_GPL(destroy_workqueue);
L
Linus Torvalds 已提交
454 455 456

static struct workqueue_struct *keventd_wq;

457 458 459 460 461 462
/**
 * schedule_work - put work task in global workqueue
 * @work: job to be done
 *
 * This puts a job in the kernel-global workqueue.
 */
L
Linus Torvalds 已提交
463 464 465 466
int fastcall schedule_work(struct work_struct *work)
{
	return queue_work(keventd_wq, work);
}
467
EXPORT_SYMBOL(schedule_work);
L
Linus Torvalds 已提交
468

469 470 471 472 473 474 475 476
/**
 * schedule_delayed_work - put work task in global workqueue after delay
 * @work: job to be done
 * @delay: number of jiffies to wait
 *
 * After waiting for a given time this puts a job in the kernel-global
 * workqueue.
 */
L
Linus Torvalds 已提交
477 478 479 480
int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
{
	return queue_delayed_work(keventd_wq, work, delay);
}
481
EXPORT_SYMBOL(schedule_delayed_work);
L
Linus Torvalds 已提交
482

483 484 485 486 487 488 489 490 491
/**
 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
 * @cpu: cpu to use
 * @work: job to be done
 * @delay: number of jiffies to wait
 *
 * After waiting for a given time this puts a job in the kernel-global
 * workqueue on the specified CPU.
 */
L
Linus Torvalds 已提交
492 493 494
int schedule_delayed_work_on(int cpu,
			struct work_struct *work, unsigned long delay)
{
495
	return queue_delayed_work_on(cpu, keventd_wq, work, delay);
L
Linus Torvalds 已提交
496
}
497
EXPORT_SYMBOL(schedule_delayed_work_on);
L
Linus Torvalds 已提交
498

499 500 501 502 503 504 505 506 507 508 509 510 511
/**
 * schedule_on_each_cpu - call a function on each online CPU from keventd
 * @func: the function to call
 * @info: a pointer to pass to func()
 *
 * Returns zero on success.
 * Returns -ve errno on failure.
 *
 * Appears to be racy against CPU hotplug.
 *
 * schedule_on_each_cpu() is very slow.
 */
int schedule_on_each_cpu(void (*func)(void *info), void *info)
512 513
{
	int cpu;
514
	struct work_struct *works;
515

516 517
	works = alloc_percpu(struct work_struct);
	if (!works)
518
		return -ENOMEM;
519

520
	mutex_lock(&workqueue_mutex);
521
	for_each_online_cpu(cpu) {
522
		INIT_WORK(per_cpu_ptr(works, cpu), func, info);
523
		__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
524
				per_cpu_ptr(works, cpu));
525
	}
526
	mutex_unlock(&workqueue_mutex);
527
	flush_workqueue(keventd_wq);
528
	free_percpu(works);
529 530 531
	return 0;
}

L
Linus Torvalds 已提交
532 533 534 535
void flush_scheduled_work(void)
{
	flush_workqueue(keventd_wq);
}
536
EXPORT_SYMBOL(flush_scheduled_work);
L
Linus Torvalds 已提交
537 538 539 540 541 542 543

/**
 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
 *			work whose handler rearms the delayed work.
 * @wq:   the controlling workqueue structure
 * @work: the delayed work struct
 */
544 545
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
				       struct work_struct *work)
L
Linus Torvalds 已提交
546 547 548 549
{
	while (!cancel_delayed_work(work))
		flush_workqueue(wq);
}
550
EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
L
Linus Torvalds 已提交
551 552 553 554 555 556 557 558 559 560 561 562

/**
 * cancel_rearming_delayed_work - reliably kill off a delayed keventd
 *			work whose handler rearms the delayed work.
 * @work: the delayed work struct
 */
void cancel_rearming_delayed_work(struct work_struct *work)
{
	cancel_rearming_delayed_workqueue(keventd_wq, work);
}
EXPORT_SYMBOL(cancel_rearming_delayed_work);

563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
/**
 * execute_in_process_context - reliably execute the routine with user context
 * @fn:		the function to execute
 * @data:	data to pass to the function
 * @ew:		guaranteed storage for the execute work structure (must
 *		be available when the work executes)
 *
 * Executes the function immediately if process context is available,
 * otherwise schedules the function for delayed execution.
 *
 * Returns:	0 - function was executed
 *		1 - function was scheduled for execution
 */
int execute_in_process_context(void (*fn)(void *data), void *data,
			       struct execute_work *ew)
{
	if (!in_interrupt()) {
		fn(data);
		return 0;
	}

	INIT_WORK(&ew->work, fn, data);
	schedule_work(&ew->work);

	return 1;
}
EXPORT_SYMBOL_GPL(execute_in_process_context);

L
Linus Torvalds 已提交
591 592 593 594 595 596 597 598 599 600 601 602 603
int keventd_up(void)
{
	return keventd_wq != NULL;
}

int current_is_keventd(void)
{
	struct cpu_workqueue_struct *cwq;
	int cpu = smp_processor_id();	/* preempt-safe: keventd is per-cpu */
	int ret = 0;

	BUG_ON(!keventd_wq);

604
	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
L
Linus Torvalds 已提交
605 606 607 608 609 610 611 612 613 614 615
	if (current == cwq->thread)
		ret = 1;

	return ret;

}

#ifdef CONFIG_HOTPLUG_CPU
/* Take the work from this (downed) CPU. */
static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
{
616
	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
617
	struct list_head list;
L
Linus Torvalds 已提交
618 619 620
	struct work_struct *work;

	spin_lock_irq(&cwq->lock);
621
	list_replace_init(&cwq->worklist, &list);
L
Linus Torvalds 已提交
622 623 624 625 626

	while (!list_empty(&list)) {
		printk("Taking work for %s\n", wq->name);
		work = list_entry(list.next,struct work_struct,entry);
		list_del(&work->entry);
627
		__queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
L
Linus Torvalds 已提交
628 629 630 631 632
	}
	spin_unlock_irq(&cwq->lock);
}

/* We're holding the cpucontrol mutex here */
633
static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
L
Linus Torvalds 已提交
634 635 636 637 638 639 640 641
				  unsigned long action,
				  void *hcpu)
{
	unsigned int hotcpu = (unsigned long)hcpu;
	struct workqueue_struct *wq;

	switch (action) {
	case CPU_UP_PREPARE:
642
		mutex_lock(&workqueue_mutex);
L
Linus Torvalds 已提交
643 644
		/* Create a new workqueue thread for it. */
		list_for_each_entry(wq, &workqueues, list) {
645
			if (!create_workqueue_thread(wq, hotcpu)) {
L
Linus Torvalds 已提交
646 647 648 649 650 651 652 653 654
				printk("workqueue for %i failed\n", hotcpu);
				return NOTIFY_BAD;
			}
		}
		break;

	case CPU_ONLINE:
		/* Kick off worker threads. */
		list_for_each_entry(wq, &workqueues, list) {
655 656 657 658 659
			struct cpu_workqueue_struct *cwq;

			cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
			kthread_bind(cwq->thread, hotcpu);
			wake_up_process(cwq->thread);
L
Linus Torvalds 已提交
660
		}
661
		mutex_unlock(&workqueue_mutex);
L
Linus Torvalds 已提交
662 663 664 665
		break;

	case CPU_UP_CANCELED:
		list_for_each_entry(wq, &workqueues, list) {
666 667
			if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
				continue;
L
Linus Torvalds 已提交
668
			/* Unbind so it can run. */
669
			kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
670
				     any_online_cpu(cpu_online_map));
L
Linus Torvalds 已提交
671 672
			cleanup_workqueue_thread(wq, hotcpu);
		}
673 674 675 676 677 678 679 680 681
		mutex_unlock(&workqueue_mutex);
		break;

	case CPU_DOWN_PREPARE:
		mutex_lock(&workqueue_mutex);
		break;

	case CPU_DOWN_FAILED:
		mutex_unlock(&workqueue_mutex);
L
Linus Torvalds 已提交
682 683 684 685 686 687 688
		break;

	case CPU_DEAD:
		list_for_each_entry(wq, &workqueues, list)
			cleanup_workqueue_thread(wq, hotcpu);
		list_for_each_entry(wq, &workqueues, list)
			take_over_work(wq, hotcpu);
689
		mutex_unlock(&workqueue_mutex);
L
Linus Torvalds 已提交
690 691 692 693 694 695 696 697 698
		break;
	}

	return NOTIFY_OK;
}
#endif

void init_workqueues(void)
{
699
	singlethread_cpu = first_cpu(cpu_possible_map);
L
Linus Torvalds 已提交
700 701 702 703 704
	hotcpu_notifier(workqueue_cpu_callback, 0);
	keventd_wq = create_workqueue("events");
	BUG_ON(!keventd_wq);
}