workqueue.c 17.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * linux/kernel/workqueue.c
 *
 * Generic mechanism for defining kernel helper threads for running
 * arbitrary tasks in process context.
 *
 * Started by Ingo Molnar, Copyright (C) 2002
 *
 * Derived from the taskqueue/keventd code by:
 *
 *   David Woodhouse <dwmw2@infradead.org>
 *   Andrew Morton <andrewm@uow.edu.au>
 *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
 *   Theodore Ts'o <tytso@mit.edu>
15 16
 *
 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
L
Linus Torvalds 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
30
#include <linux/hardirq.h>
31
#include <linux/mempolicy.h>
L
Linus Torvalds 已提交
32 33

/*
34 35
 * The per-CPU workqueue (if single thread, we always use the first
 * possible cpu).
L
Linus Torvalds 已提交
36 37
 *
 * The sequence counters are for flush_scheduled_work().  It wants to wait
38
 * until all currently-scheduled works are completed, but it doesn't
L
Linus Torvalds 已提交
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
 * want to be livelocked by new, incoming ones.  So it waits until
 * remove_sequence is >= the insert_sequence which pertained when
 * flush_scheduled_work() was called.
 */
struct cpu_workqueue_struct {

	spinlock_t lock;

	long remove_sequence;	/* Least-recently added (next to run) */
	long insert_sequence;	/* Next to add */

	struct list_head worklist;
	wait_queue_head_t more_work;
	wait_queue_head_t work_done;

	struct workqueue_struct *wq;
55
	struct task_struct *thread;
L
Linus Torvalds 已提交
56 57 58 59 60 61 62 63 64

	int run_depth;		/* Detect run_workqueue() recursion depth */
} ____cacheline_aligned;

/*
 * The externally visible workqueue abstraction is an array of
 * per-CPU workqueues:
 */
struct workqueue_struct {
65
	struct cpu_workqueue_struct *cpu_wq;
L
Linus Torvalds 已提交
66 67 68 69 70 71
	const char *name;
	struct list_head list; 	/* Empty if single thread */
};

/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
   threads to each one as cpus come/go. */
72
static DEFINE_MUTEX(workqueue_mutex);
L
Linus Torvalds 已提交
73 74
static LIST_HEAD(workqueues);

75 76
static int singlethread_cpu;

L
Linus Torvalds 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
/* If it's single threaded, it isn't in the list of workqueues. */
static inline int is_single_threaded(struct workqueue_struct *wq)
{
	return list_empty(&wq->list);
}

/* Preempt must be disabled. */
static void __queue_work(struct cpu_workqueue_struct *cwq,
			 struct work_struct *work)
{
	unsigned long flags;

	spin_lock_irqsave(&cwq->lock, flags);
	work->wq_data = cwq;
	list_add_tail(&work->entry, &cwq->worklist);
	cwq->insert_sequence++;
	wake_up(&cwq->more_work);
	spin_unlock_irqrestore(&cwq->lock, flags);
}

97 98 99 100 101
/**
 * queue_work - queue work on a workqueue
 * @wq: workqueue to use
 * @work: work to queue
 *
A
Alan Stern 已提交
102
 * Returns 0 if @work was already on a queue, non-zero otherwise.
L
Linus Torvalds 已提交
103 104 105 106 107 108 109 110 111 112
 *
 * We queue the work to the CPU it was submitted, but there is no
 * guarantee that it will be processed by that CPU.
 */
int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
	int ret = 0, cpu = get_cpu();

	if (!test_and_set_bit(0, &work->pending)) {
		if (unlikely(is_single_threaded(wq)))
113
			cpu = singlethread_cpu;
L
Linus Torvalds 已提交
114
		BUG_ON(!list_empty(&work->entry));
115
		__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
L
Linus Torvalds 已提交
116 117 118 119 120
		ret = 1;
	}
	put_cpu();
	return ret;
}
121
EXPORT_SYMBOL_GPL(queue_work);
L
Linus Torvalds 已提交
122 123 124

static void delayed_work_timer_fn(unsigned long __data)
{
125 126
	struct delayed_work *dwork = (struct delayed_work *)__data;
	struct workqueue_struct *wq = dwork->work.wq_data;
L
Linus Torvalds 已提交
127 128 129
	int cpu = smp_processor_id();

	if (unlikely(is_single_threaded(wq)))
130
		cpu = singlethread_cpu;
L
Linus Torvalds 已提交
131

132
	__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
L
Linus Torvalds 已提交
133 134
}

135 136 137
/**
 * queue_delayed_work - queue work on a workqueue after delay
 * @wq: workqueue to use
138
 * @work: delayable work to queue
139 140
 * @delay: number of jiffies to wait before queueing
 *
A
Alan Stern 已提交
141
 * Returns 0 if @work was already on a queue, non-zero otherwise.
142
 */
L
Linus Torvalds 已提交
143
int fastcall queue_delayed_work(struct workqueue_struct *wq,
144
			struct delayed_work *dwork, unsigned long delay)
L
Linus Torvalds 已提交
145 146
{
	int ret = 0;
147 148 149 150 151
	struct timer_list *timer = &dwork->timer;
	struct work_struct *work = &dwork->work;

	if (delay == 0)
		return queue_work(wq, work);
L
Linus Torvalds 已提交
152 153 154 155 156 157 158 159

	if (!test_and_set_bit(0, &work->pending)) {
		BUG_ON(timer_pending(timer));
		BUG_ON(!list_empty(&work->entry));

		/* This stores wq for the moment, for the timer_fn */
		work->wq_data = wq;
		timer->expires = jiffies + delay;
160
		timer->data = (unsigned long)dwork;
L
Linus Torvalds 已提交
161 162 163 164 165 166
		timer->function = delayed_work_timer_fn;
		add_timer(timer);
		ret = 1;
	}
	return ret;
}
167
EXPORT_SYMBOL_GPL(queue_delayed_work);
L
Linus Torvalds 已提交
168

169 170 171 172 173 174 175
/**
 * queue_delayed_work_on - queue work on specific CPU after delay
 * @cpu: CPU number to execute work on
 * @wq: workqueue to use
 * @work: work to queue
 * @delay: number of jiffies to wait before queueing
 *
A
Alan Stern 已提交
176
 * Returns 0 if @work was already on a queue, non-zero otherwise.
177
 */
178
int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
179
			struct delayed_work *dwork, unsigned long delay)
180 181
{
	int ret = 0;
182 183
	struct timer_list *timer = &dwork->timer;
	struct work_struct *work = &dwork->work;
184 185 186 187 188 189 190 191

	if (!test_and_set_bit(0, &work->pending)) {
		BUG_ON(timer_pending(timer));
		BUG_ON(!list_empty(&work->entry));

		/* This stores wq for the moment, for the timer_fn */
		work->wq_data = wq;
		timer->expires = jiffies + delay;
192
		timer->data = (unsigned long)dwork;
193 194 195 196 197 198
		timer->function = delayed_work_timer_fn;
		add_timer_on(timer, cpu);
		ret = 1;
	}
	return ret;
}
199
EXPORT_SYMBOL_GPL(queue_delayed_work_on);
L
Linus Torvalds 已提交
200

201
static void run_workqueue(struct cpu_workqueue_struct *cwq)
L
Linus Torvalds 已提交
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
{
	unsigned long flags;

	/*
	 * Keep taking off work from the queue until
	 * done.
	 */
	spin_lock_irqsave(&cwq->lock, flags);
	cwq->run_depth++;
	if (cwq->run_depth > 3) {
		/* morton gets to eat his hat */
		printk("%s: recursion depth exceeded: %d\n",
			__FUNCTION__, cwq->run_depth);
		dump_stack();
	}
	while (!list_empty(&cwq->worklist)) {
		struct work_struct *work = list_entry(cwq->worklist.next,
						struct work_struct, entry);
		void (*f) (void *) = work->func;
		void *data = work->data;

		list_del_init(cwq->worklist.next);
		spin_unlock_irqrestore(&cwq->lock, flags);

		BUG_ON(work->wq_data != cwq);
		clear_bit(0, &work->pending);
		f(data);

		spin_lock_irqsave(&cwq->lock, flags);
		cwq->remove_sequence++;
		wake_up(&cwq->work_done);
	}
	cwq->run_depth--;
	spin_unlock_irqrestore(&cwq->lock, flags);
}

static int worker_thread(void *__cwq)
{
	struct cpu_workqueue_struct *cwq = __cwq;
	DECLARE_WAITQUEUE(wait, current);
	struct k_sigaction sa;
	sigset_t blocked;

	current->flags |= PF_NOFREEZE;

	set_user_nice(current, -5);

	/* Block and flush all signals */
	sigfillset(&blocked);
	sigprocmask(SIG_BLOCK, &blocked, NULL);
	flush_signals(current);

254 255 256 257 258 259
	/*
	 * We inherited MPOL_INTERLEAVE from the booting kernel.
	 * Set MPOL_DEFAULT to insure node local allocations.
	 */
	numa_default_policy();

L
Linus Torvalds 已提交
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
	/* SIG_IGN makes children autoreap: see do_notify_parent(). */
	sa.sa.sa_handler = SIG_IGN;
	sa.sa.sa_flags = 0;
	siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
	do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);

	set_current_state(TASK_INTERRUPTIBLE);
	while (!kthread_should_stop()) {
		add_wait_queue(&cwq->more_work, &wait);
		if (list_empty(&cwq->worklist))
			schedule();
		else
			__set_current_state(TASK_RUNNING);
		remove_wait_queue(&cwq->more_work, &wait);

		if (!list_empty(&cwq->worklist))
			run_workqueue(cwq);
		set_current_state(TASK_INTERRUPTIBLE);
	}
	__set_current_state(TASK_RUNNING);
	return 0;
}

static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
{
	if (cwq->thread == current) {
		/*
		 * Probably keventd trying to flush its own queue. So simply run
		 * it by hand rather than deadlocking.
		 */
		run_workqueue(cwq);
	} else {
		DEFINE_WAIT(wait);
		long sequence_needed;

		spin_lock_irq(&cwq->lock);
		sequence_needed = cwq->insert_sequence;

		while (sequence_needed - cwq->remove_sequence > 0) {
			prepare_to_wait(&cwq->work_done, &wait,
					TASK_UNINTERRUPTIBLE);
			spin_unlock_irq(&cwq->lock);
			schedule();
			spin_lock_irq(&cwq->lock);
		}
		finish_wait(&cwq->work_done, &wait);
		spin_unlock_irq(&cwq->lock);
	}
}

310
/**
L
Linus Torvalds 已提交
311
 * flush_workqueue - ensure that any scheduled work has run to completion.
312
 * @wq: workqueue to flush
L
Linus Torvalds 已提交
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
 *
 * Forces execution of the workqueue and blocks until its completion.
 * This is typically used in driver shutdown handlers.
 *
 * This function will sample each workqueue's current insert_sequence number and
 * will sleep until the head sequence is greater than or equal to that.  This
 * means that we sleep until all works which were queued on entry have been
 * handled, but we are not livelocked by new incoming ones.
 *
 * This function used to run the workqueues itself.  Now we just wait for the
 * helper threads to do it.
 */
void fastcall flush_workqueue(struct workqueue_struct *wq)
{
	might_sleep();

	if (is_single_threaded(wq)) {
330
		/* Always use first cpu's area. */
331
		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
L
Linus Torvalds 已提交
332 333 334
	} else {
		int cpu;

335
		mutex_lock(&workqueue_mutex);
L
Linus Torvalds 已提交
336
		for_each_online_cpu(cpu)
337
			flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
338
		mutex_unlock(&workqueue_mutex);
L
Linus Torvalds 已提交
339 340
	}
}
341
EXPORT_SYMBOL_GPL(flush_workqueue);
L
Linus Torvalds 已提交
342 343 344 345

static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
						   int cpu)
{
346
	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
L
Linus Torvalds 已提交
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
	struct task_struct *p;

	spin_lock_init(&cwq->lock);
	cwq->wq = wq;
	cwq->thread = NULL;
	cwq->insert_sequence = 0;
	cwq->remove_sequence = 0;
	INIT_LIST_HEAD(&cwq->worklist);
	init_waitqueue_head(&cwq->more_work);
	init_waitqueue_head(&cwq->work_done);

	if (is_single_threaded(wq))
		p = kthread_create(worker_thread, cwq, "%s", wq->name);
	else
		p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
	if (IS_ERR(p))
		return NULL;
	cwq->thread = p;
	return p;
}

struct workqueue_struct *__create_workqueue(const char *name,
					    int singlethread)
{
	int cpu, destroy = 0;
	struct workqueue_struct *wq;
	struct task_struct *p;

375
	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
L
Linus Torvalds 已提交
376 377 378
	if (!wq)
		return NULL;

379
	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
380 381 382 383 384
	if (!wq->cpu_wq) {
		kfree(wq);
		return NULL;
	}

L
Linus Torvalds 已提交
385
	wq->name = name;
386
	mutex_lock(&workqueue_mutex);
L
Linus Torvalds 已提交
387 388
	if (singlethread) {
		INIT_LIST_HEAD(&wq->list);
389
		p = create_workqueue_thread(wq, singlethread_cpu);
L
Linus Torvalds 已提交
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
		if (!p)
			destroy = 1;
		else
			wake_up_process(p);
	} else {
		list_add(&wq->list, &workqueues);
		for_each_online_cpu(cpu) {
			p = create_workqueue_thread(wq, cpu);
			if (p) {
				kthread_bind(p, cpu);
				wake_up_process(p);
			} else
				destroy = 1;
		}
	}
405
	mutex_unlock(&workqueue_mutex);
L
Linus Torvalds 已提交
406 407 408 409 410 411 412 413 414 415

	/*
	 * Was there any error during startup? If yes then clean up:
	 */
	if (destroy) {
		destroy_workqueue(wq);
		wq = NULL;
	}
	return wq;
}
416
EXPORT_SYMBOL_GPL(__create_workqueue);
L
Linus Torvalds 已提交
417 418 419 420 421 422 423

static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
{
	struct cpu_workqueue_struct *cwq;
	unsigned long flags;
	struct task_struct *p;

424
	cwq = per_cpu_ptr(wq->cpu_wq, cpu);
L
Linus Torvalds 已提交
425 426 427 428 429 430 431 432
	spin_lock_irqsave(&cwq->lock, flags);
	p = cwq->thread;
	cwq->thread = NULL;
	spin_unlock_irqrestore(&cwq->lock, flags);
	if (p)
		kthread_stop(p);
}

433 434 435 436 437 438
/**
 * destroy_workqueue - safely terminate a workqueue
 * @wq: target workqueue
 *
 * Safely destroy a workqueue. All work currently pending will be done first.
 */
L
Linus Torvalds 已提交
439 440 441 442 443 444 445
void destroy_workqueue(struct workqueue_struct *wq)
{
	int cpu;

	flush_workqueue(wq);

	/* We don't need the distraction of CPUs appearing and vanishing. */
446
	mutex_lock(&workqueue_mutex);
L
Linus Torvalds 已提交
447
	if (is_single_threaded(wq))
448
		cleanup_workqueue_thread(wq, singlethread_cpu);
L
Linus Torvalds 已提交
449 450 451 452 453
	else {
		for_each_online_cpu(cpu)
			cleanup_workqueue_thread(wq, cpu);
		list_del(&wq->list);
	}
454
	mutex_unlock(&workqueue_mutex);
455
	free_percpu(wq->cpu_wq);
L
Linus Torvalds 已提交
456 457
	kfree(wq);
}
458
EXPORT_SYMBOL_GPL(destroy_workqueue);
L
Linus Torvalds 已提交
459 460 461

static struct workqueue_struct *keventd_wq;

462 463 464 465 466 467
/**
 * schedule_work - put work task in global workqueue
 * @work: job to be done
 *
 * This puts a job in the kernel-global workqueue.
 */
L
Linus Torvalds 已提交
468 469 470 471
int fastcall schedule_work(struct work_struct *work)
{
	return queue_work(keventd_wq, work);
}
472
EXPORT_SYMBOL(schedule_work);
L
Linus Torvalds 已提交
473

474 475
/**
 * schedule_delayed_work - put work task in global workqueue after delay
476 477
 * @dwork: job to be done
 * @delay: number of jiffies to wait or 0 for immediate execution
478 479 480 481
 *
 * After waiting for a given time this puts a job in the kernel-global
 * workqueue.
 */
482
int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
L
Linus Torvalds 已提交
483
{
484
	return queue_delayed_work(keventd_wq, dwork, delay);
L
Linus Torvalds 已提交
485
}
486
EXPORT_SYMBOL(schedule_delayed_work);
L
Linus Torvalds 已提交
487

488 489 490
/**
 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
 * @cpu: cpu to use
491
 * @dwork: job to be done
492 493 494 495 496
 * @delay: number of jiffies to wait
 *
 * After waiting for a given time this puts a job in the kernel-global
 * workqueue on the specified CPU.
 */
L
Linus Torvalds 已提交
497
int schedule_delayed_work_on(int cpu,
498
			struct delayed_work *dwork, unsigned long delay)
L
Linus Torvalds 已提交
499
{
500
	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
L
Linus Torvalds 已提交
501
}
502
EXPORT_SYMBOL(schedule_delayed_work_on);
L
Linus Torvalds 已提交
503

504 505 506 507 508 509 510 511 512 513 514 515 516
/**
 * schedule_on_each_cpu - call a function on each online CPU from keventd
 * @func: the function to call
 * @info: a pointer to pass to func()
 *
 * Returns zero on success.
 * Returns -ve errno on failure.
 *
 * Appears to be racy against CPU hotplug.
 *
 * schedule_on_each_cpu() is very slow.
 */
int schedule_on_each_cpu(void (*func)(void *info), void *info)
517 518
{
	int cpu;
519
	struct work_struct *works;
520

521 522
	works = alloc_percpu(struct work_struct);
	if (!works)
523
		return -ENOMEM;
524

525
	mutex_lock(&workqueue_mutex);
526
	for_each_online_cpu(cpu) {
527
		INIT_WORK(per_cpu_ptr(works, cpu), func, info);
528
		__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
529
				per_cpu_ptr(works, cpu));
530
	}
531
	mutex_unlock(&workqueue_mutex);
532
	flush_workqueue(keventd_wq);
533
	free_percpu(works);
534 535 536
	return 0;
}

L
Linus Torvalds 已提交
537 538 539 540
void flush_scheduled_work(void)
{
	flush_workqueue(keventd_wq);
}
541
EXPORT_SYMBOL(flush_scheduled_work);
L
Linus Torvalds 已提交
542 543 544 545 546

/**
 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
 *			work whose handler rearms the delayed work.
 * @wq:   the controlling workqueue structure
547
 * @dwork: the delayed work struct
L
Linus Torvalds 已提交
548
 */
549
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
550
				       struct delayed_work *dwork)
L
Linus Torvalds 已提交
551
{
552
	while (!cancel_delayed_work(dwork))
L
Linus Torvalds 已提交
553 554
		flush_workqueue(wq);
}
555
EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
L
Linus Torvalds 已提交
556 557 558 559

/**
 * cancel_rearming_delayed_work - reliably kill off a delayed keventd
 *			work whose handler rearms the delayed work.
560
 * @dwork: the delayed work struct
L
Linus Torvalds 已提交
561
 */
562
void cancel_rearming_delayed_work(struct delayed_work *dwork)
L
Linus Torvalds 已提交
563
{
564
	cancel_rearming_delayed_workqueue(keventd_wq, dwork);
L
Linus Torvalds 已提交
565 566 567
}
EXPORT_SYMBOL(cancel_rearming_delayed_work);

568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
/**
 * execute_in_process_context - reliably execute the routine with user context
 * @fn:		the function to execute
 * @data:	data to pass to the function
 * @ew:		guaranteed storage for the execute work structure (must
 *		be available when the work executes)
 *
 * Executes the function immediately if process context is available,
 * otherwise schedules the function for delayed execution.
 *
 * Returns:	0 - function was executed
 *		1 - function was scheduled for execution
 */
int execute_in_process_context(void (*fn)(void *data), void *data,
			       struct execute_work *ew)
{
	if (!in_interrupt()) {
		fn(data);
		return 0;
	}

	INIT_WORK(&ew->work, fn, data);
	schedule_work(&ew->work);

	return 1;
}
EXPORT_SYMBOL_GPL(execute_in_process_context);

L
Linus Torvalds 已提交
596 597 598 599 600 601 602 603 604 605 606 607 608
int keventd_up(void)
{
	return keventd_wq != NULL;
}

int current_is_keventd(void)
{
	struct cpu_workqueue_struct *cwq;
	int cpu = smp_processor_id();	/* preempt-safe: keventd is per-cpu */
	int ret = 0;

	BUG_ON(!keventd_wq);

609
	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
L
Linus Torvalds 已提交
610 611 612 613 614 615 616 617 618 619 620
	if (current == cwq->thread)
		ret = 1;

	return ret;

}

#ifdef CONFIG_HOTPLUG_CPU
/* Take the work from this (downed) CPU. */
static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
{
621
	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
622
	struct list_head list;
L
Linus Torvalds 已提交
623 624 625
	struct work_struct *work;

	spin_lock_irq(&cwq->lock);
626
	list_replace_init(&cwq->worklist, &list);
L
Linus Torvalds 已提交
627 628 629 630 631

	while (!list_empty(&list)) {
		printk("Taking work for %s\n", wq->name);
		work = list_entry(list.next,struct work_struct,entry);
		list_del(&work->entry);
632
		__queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
L
Linus Torvalds 已提交
633 634 635 636 637
	}
	spin_unlock_irq(&cwq->lock);
}

/* We're holding the cpucontrol mutex here */
638
static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
L
Linus Torvalds 已提交
639 640 641 642 643 644 645 646
				  unsigned long action,
				  void *hcpu)
{
	unsigned int hotcpu = (unsigned long)hcpu;
	struct workqueue_struct *wq;

	switch (action) {
	case CPU_UP_PREPARE:
647
		mutex_lock(&workqueue_mutex);
L
Linus Torvalds 已提交
648 649
		/* Create a new workqueue thread for it. */
		list_for_each_entry(wq, &workqueues, list) {
650
			if (!create_workqueue_thread(wq, hotcpu)) {
L
Linus Torvalds 已提交
651 652 653 654 655 656 657 658 659
				printk("workqueue for %i failed\n", hotcpu);
				return NOTIFY_BAD;
			}
		}
		break;

	case CPU_ONLINE:
		/* Kick off worker threads. */
		list_for_each_entry(wq, &workqueues, list) {
660 661 662 663 664
			struct cpu_workqueue_struct *cwq;

			cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
			kthread_bind(cwq->thread, hotcpu);
			wake_up_process(cwq->thread);
L
Linus Torvalds 已提交
665
		}
666
		mutex_unlock(&workqueue_mutex);
L
Linus Torvalds 已提交
667 668 669 670
		break;

	case CPU_UP_CANCELED:
		list_for_each_entry(wq, &workqueues, list) {
671 672
			if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
				continue;
L
Linus Torvalds 已提交
673
			/* Unbind so it can run. */
674
			kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
675
				     any_online_cpu(cpu_online_map));
L
Linus Torvalds 已提交
676 677
			cleanup_workqueue_thread(wq, hotcpu);
		}
678 679 680 681 682 683 684 685 686
		mutex_unlock(&workqueue_mutex);
		break;

	case CPU_DOWN_PREPARE:
		mutex_lock(&workqueue_mutex);
		break;

	case CPU_DOWN_FAILED:
		mutex_unlock(&workqueue_mutex);
L
Linus Torvalds 已提交
687 688 689 690 691 692 693
		break;

	case CPU_DEAD:
		list_for_each_entry(wq, &workqueues, list)
			cleanup_workqueue_thread(wq, hotcpu);
		list_for_each_entry(wq, &workqueues, list)
			take_over_work(wq, hotcpu);
694
		mutex_unlock(&workqueue_mutex);
L
Linus Torvalds 已提交
695 696 697 698 699 700 701 702 703
		break;
	}

	return NOTIFY_OK;
}
#endif

void init_workqueues(void)
{
704
	singlethread_cpu = first_cpu(cpu_possible_map);
L
Linus Torvalds 已提交
705 706 707 708 709
	hotcpu_notifier(workqueue_cpu_callback, 0);
	keventd_wq = create_workqueue("events");
	BUG_ON(!keventd_wq);
}