workqueue.c 18.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * linux/kernel/workqueue.c
 *
 * Generic mechanism for defining kernel helper threads for running
 * arbitrary tasks in process context.
 *
 * Started by Ingo Molnar, Copyright (C) 2002
 *
 * Derived from the taskqueue/keventd code by:
 *
 *   David Woodhouse <dwmw2@infradead.org>
 *   Andrew Morton <andrewm@uow.edu.au>
 *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
 *   Theodore Ts'o <tytso@mit.edu>
15 16
 *
 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
L
Linus Torvalds 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
30
#include <linux/hardirq.h>
31
#include <linux/mempolicy.h>
32
#include <linux/freezer.h>
L
Linus Torvalds 已提交
33 34

/*
35 36
 * The per-CPU workqueue (if single thread, we always use the first
 * possible cpu).
L
Linus Torvalds 已提交
37 38
 *
 * The sequence counters are for flush_scheduled_work().  It wants to wait
39
 * until all currently-scheduled works are completed, but it doesn't
L
Linus Torvalds 已提交
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
 * want to be livelocked by new, incoming ones.  So it waits until
 * remove_sequence is >= the insert_sequence which pertained when
 * flush_scheduled_work() was called.
 */
struct cpu_workqueue_struct {

	spinlock_t lock;

	long remove_sequence;	/* Least-recently added (next to run) */
	long insert_sequence;	/* Next to add */

	struct list_head worklist;
	wait_queue_head_t more_work;
	wait_queue_head_t work_done;

	struct workqueue_struct *wq;
56
	struct task_struct *thread;
L
Linus Torvalds 已提交
57 58

	int run_depth;		/* Detect run_workqueue() recursion depth */
59 60

	int freezeable;		/* Freeze the thread during suspend */
L
Linus Torvalds 已提交
61 62 63 64 65 66 67
} ____cacheline_aligned;

/*
 * The externally visible workqueue abstraction is an array of
 * per-CPU workqueues:
 */
struct workqueue_struct {
68
	struct cpu_workqueue_struct *cpu_wq;
L
Linus Torvalds 已提交
69 70 71 72 73 74
	const char *name;
	struct list_head list; 	/* Empty if single thread */
};

/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
   threads to each one as cpus come/go. */
75
static DEFINE_MUTEX(workqueue_mutex);
L
Linus Torvalds 已提交
76 77
static LIST_HEAD(workqueues);

78 79
static int singlethread_cpu;

L
Linus Torvalds 已提交
80 81 82 83 84 85
/* If it's single threaded, it isn't in the list of workqueues. */
static inline int is_single_threaded(struct workqueue_struct *wq)
{
	return list_empty(&wq->list);
}

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
static inline void set_wq_data(struct work_struct *work, void *wq)
{
	unsigned long new, old, res;

	/* assume the pending flag is already set and that the task has already
	 * been queued on this workqueue */
	new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
	res = work->management;
	if (res != new) {
		do {
			old = res;
			new = (unsigned long) wq;
			new |= (old & WORK_STRUCT_FLAG_MASK);
			res = cmpxchg(&work->management, old, new);
		} while (res != old);
	}
}

static inline void *get_wq_data(struct work_struct *work)
{
	return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
}

L
Linus Torvalds 已提交
109 110 111 112 113 114 115
/* Preempt must be disabled. */
static void __queue_work(struct cpu_workqueue_struct *cwq,
			 struct work_struct *work)
{
	unsigned long flags;

	spin_lock_irqsave(&cwq->lock, flags);
116
	set_wq_data(work, cwq);
L
Linus Torvalds 已提交
117 118 119 120 121 122
	list_add_tail(&work->entry, &cwq->worklist);
	cwq->insert_sequence++;
	wake_up(&cwq->more_work);
	spin_unlock_irqrestore(&cwq->lock, flags);
}

123 124 125 126 127
/**
 * queue_work - queue work on a workqueue
 * @wq: workqueue to use
 * @work: work to queue
 *
A
Alan Stern 已提交
128
 * Returns 0 if @work was already on a queue, non-zero otherwise.
L
Linus Torvalds 已提交
129 130 131 132 133 134 135 136
 *
 * We queue the work to the CPU it was submitted, but there is no
 * guarantee that it will be processed by that CPU.
 */
int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
	int ret = 0, cpu = get_cpu();

137
	if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
L
Linus Torvalds 已提交
138
		if (unlikely(is_single_threaded(wq)))
139
			cpu = singlethread_cpu;
L
Linus Torvalds 已提交
140
		BUG_ON(!list_empty(&work->entry));
141
		__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
L
Linus Torvalds 已提交
142 143 144 145 146
		ret = 1;
	}
	put_cpu();
	return ret;
}
147
EXPORT_SYMBOL_GPL(queue_work);
L
Linus Torvalds 已提交
148 149 150

static void delayed_work_timer_fn(unsigned long __data)
{
151
	struct delayed_work *dwork = (struct delayed_work *)__data;
152
	struct workqueue_struct *wq = get_wq_data(&dwork->work);
L
Linus Torvalds 已提交
153 154 155
	int cpu = smp_processor_id();

	if (unlikely(is_single_threaded(wq)))
156
		cpu = singlethread_cpu;
L
Linus Torvalds 已提交
157

158
	__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
L
Linus Torvalds 已提交
159 160
}

161 162 163
/**
 * queue_delayed_work - queue work on a workqueue after delay
 * @wq: workqueue to use
164
 * @work: delayable work to queue
165 166
 * @delay: number of jiffies to wait before queueing
 *
A
Alan Stern 已提交
167
 * Returns 0 if @work was already on a queue, non-zero otherwise.
168
 */
L
Linus Torvalds 已提交
169
int fastcall queue_delayed_work(struct workqueue_struct *wq,
170
			struct delayed_work *dwork, unsigned long delay)
L
Linus Torvalds 已提交
171 172
{
	int ret = 0;
173 174 175 176 177
	struct timer_list *timer = &dwork->timer;
	struct work_struct *work = &dwork->work;

	if (delay == 0)
		return queue_work(wq, work);
L
Linus Torvalds 已提交
178

179
	if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
L
Linus Torvalds 已提交
180 181 182 183
		BUG_ON(timer_pending(timer));
		BUG_ON(!list_empty(&work->entry));

		/* This stores wq for the moment, for the timer_fn */
184
		set_wq_data(work, wq);
L
Linus Torvalds 已提交
185
		timer->expires = jiffies + delay;
186
		timer->data = (unsigned long)dwork;
L
Linus Torvalds 已提交
187 188 189 190 191 192
		timer->function = delayed_work_timer_fn;
		add_timer(timer);
		ret = 1;
	}
	return ret;
}
193
EXPORT_SYMBOL_GPL(queue_delayed_work);
L
Linus Torvalds 已提交
194

195 196 197 198 199 200 201
/**
 * queue_delayed_work_on - queue work on specific CPU after delay
 * @cpu: CPU number to execute work on
 * @wq: workqueue to use
 * @work: work to queue
 * @delay: number of jiffies to wait before queueing
 *
A
Alan Stern 已提交
202
 * Returns 0 if @work was already on a queue, non-zero otherwise.
203
 */
204
int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
205
			struct delayed_work *dwork, unsigned long delay)
206 207
{
	int ret = 0;
208 209
	struct timer_list *timer = &dwork->timer;
	struct work_struct *work = &dwork->work;
210

211
	if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
212 213 214 215
		BUG_ON(timer_pending(timer));
		BUG_ON(!list_empty(&work->entry));

		/* This stores wq for the moment, for the timer_fn */
216
		set_wq_data(work, wq);
217
		timer->expires = jiffies + delay;
218
		timer->data = (unsigned long)dwork;
219 220 221 222 223 224
		timer->function = delayed_work_timer_fn;
		add_timer_on(timer, cpu);
		ret = 1;
	}
	return ret;
}
225
EXPORT_SYMBOL_GPL(queue_delayed_work_on);
L
Linus Torvalds 已提交
226

227
static void run_workqueue(struct cpu_workqueue_struct *cwq)
L
Linus Torvalds 已提交
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
{
	unsigned long flags;

	/*
	 * Keep taking off work from the queue until
	 * done.
	 */
	spin_lock_irqsave(&cwq->lock, flags);
	cwq->run_depth++;
	if (cwq->run_depth > 3) {
		/* morton gets to eat his hat */
		printk("%s: recursion depth exceeded: %d\n",
			__FUNCTION__, cwq->run_depth);
		dump_stack();
	}
	while (!list_empty(&cwq->worklist)) {
		struct work_struct *work = list_entry(cwq->worklist.next,
						struct work_struct, entry);
246
		work_func_t f = work->func;
L
Linus Torvalds 已提交
247 248 249 250

		list_del_init(cwq->worklist.next);
		spin_unlock_irqrestore(&cwq->lock, flags);

251
		BUG_ON(get_wq_data(work) != cwq);
252 253 254
		if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
			work_release(work);
		f(work);
L
Linus Torvalds 已提交
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270

		spin_lock_irqsave(&cwq->lock, flags);
		cwq->remove_sequence++;
		wake_up(&cwq->work_done);
	}
	cwq->run_depth--;
	spin_unlock_irqrestore(&cwq->lock, flags);
}

static int worker_thread(void *__cwq)
{
	struct cpu_workqueue_struct *cwq = __cwq;
	DECLARE_WAITQUEUE(wait, current);
	struct k_sigaction sa;
	sigset_t blocked;

271 272
	if (!cwq->freezeable)
		current->flags |= PF_NOFREEZE;
L
Linus Torvalds 已提交
273 274 275 276 277 278 279 280

	set_user_nice(current, -5);

	/* Block and flush all signals */
	sigfillset(&blocked);
	sigprocmask(SIG_BLOCK, &blocked, NULL);
	flush_signals(current);

281 282 283 284 285 286
	/*
	 * We inherited MPOL_INTERLEAVE from the booting kernel.
	 * Set MPOL_DEFAULT to insure node local allocations.
	 */
	numa_default_policy();

L
Linus Torvalds 已提交
287 288 289 290 291 292 293 294
	/* SIG_IGN makes children autoreap: see do_notify_parent(). */
	sa.sa.sa_handler = SIG_IGN;
	sa.sa.sa_flags = 0;
	siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
	do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);

	set_current_state(TASK_INTERRUPTIBLE);
	while (!kthread_should_stop()) {
295 296 297
		if (cwq->freezeable)
			try_to_freeze();

L
Linus Torvalds 已提交
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
		add_wait_queue(&cwq->more_work, &wait);
		if (list_empty(&cwq->worklist))
			schedule();
		else
			__set_current_state(TASK_RUNNING);
		remove_wait_queue(&cwq->more_work, &wait);

		if (!list_empty(&cwq->worklist))
			run_workqueue(cwq);
		set_current_state(TASK_INTERRUPTIBLE);
	}
	__set_current_state(TASK_RUNNING);
	return 0;
}

static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
{
	if (cwq->thread == current) {
		/*
		 * Probably keventd trying to flush its own queue. So simply run
		 * it by hand rather than deadlocking.
		 */
		run_workqueue(cwq);
	} else {
		DEFINE_WAIT(wait);
		long sequence_needed;

		spin_lock_irq(&cwq->lock);
		sequence_needed = cwq->insert_sequence;

		while (sequence_needed - cwq->remove_sequence > 0) {
			prepare_to_wait(&cwq->work_done, &wait,
					TASK_UNINTERRUPTIBLE);
			spin_unlock_irq(&cwq->lock);
			schedule();
			spin_lock_irq(&cwq->lock);
		}
		finish_wait(&cwq->work_done, &wait);
		spin_unlock_irq(&cwq->lock);
	}
}

340
/**
L
Linus Torvalds 已提交
341
 * flush_workqueue - ensure that any scheduled work has run to completion.
342
 * @wq: workqueue to flush
L
Linus Torvalds 已提交
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
 *
 * Forces execution of the workqueue and blocks until its completion.
 * This is typically used in driver shutdown handlers.
 *
 * This function will sample each workqueue's current insert_sequence number and
 * will sleep until the head sequence is greater than or equal to that.  This
 * means that we sleep until all works which were queued on entry have been
 * handled, but we are not livelocked by new incoming ones.
 *
 * This function used to run the workqueues itself.  Now we just wait for the
 * helper threads to do it.
 */
void fastcall flush_workqueue(struct workqueue_struct *wq)
{
	might_sleep();

	if (is_single_threaded(wq)) {
360
		/* Always use first cpu's area. */
361
		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
L
Linus Torvalds 已提交
362 363 364
	} else {
		int cpu;

365
		mutex_lock(&workqueue_mutex);
L
Linus Torvalds 已提交
366
		for_each_online_cpu(cpu)
367
			flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
368
		mutex_unlock(&workqueue_mutex);
L
Linus Torvalds 已提交
369 370
	}
}
371
EXPORT_SYMBOL_GPL(flush_workqueue);
L
Linus Torvalds 已提交
372 373

static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
374
						   int cpu, int freezeable)
L
Linus Torvalds 已提交
375
{
376
	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
L
Linus Torvalds 已提交
377 378 379 380 381 382 383
	struct task_struct *p;

	spin_lock_init(&cwq->lock);
	cwq->wq = wq;
	cwq->thread = NULL;
	cwq->insert_sequence = 0;
	cwq->remove_sequence = 0;
384
	cwq->freezeable = freezeable;
L
Linus Torvalds 已提交
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
	INIT_LIST_HEAD(&cwq->worklist);
	init_waitqueue_head(&cwq->more_work);
	init_waitqueue_head(&cwq->work_done);

	if (is_single_threaded(wq))
		p = kthread_create(worker_thread, cwq, "%s", wq->name);
	else
		p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
	if (IS_ERR(p))
		return NULL;
	cwq->thread = p;
	return p;
}

struct workqueue_struct *__create_workqueue(const char *name,
400
					    int singlethread, int freezeable)
L
Linus Torvalds 已提交
401 402 403 404 405
{
	int cpu, destroy = 0;
	struct workqueue_struct *wq;
	struct task_struct *p;

406
	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
L
Linus Torvalds 已提交
407 408 409
	if (!wq)
		return NULL;

410
	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
411 412 413 414 415
	if (!wq->cpu_wq) {
		kfree(wq);
		return NULL;
	}

L
Linus Torvalds 已提交
416
	wq->name = name;
417
	mutex_lock(&workqueue_mutex);
L
Linus Torvalds 已提交
418 419
	if (singlethread) {
		INIT_LIST_HEAD(&wq->list);
420
		p = create_workqueue_thread(wq, singlethread_cpu, freezeable);
L
Linus Torvalds 已提交
421 422 423 424 425 426 427
		if (!p)
			destroy = 1;
		else
			wake_up_process(p);
	} else {
		list_add(&wq->list, &workqueues);
		for_each_online_cpu(cpu) {
428
			p = create_workqueue_thread(wq, cpu, freezeable);
L
Linus Torvalds 已提交
429 430 431 432 433 434 435
			if (p) {
				kthread_bind(p, cpu);
				wake_up_process(p);
			} else
				destroy = 1;
		}
	}
436
	mutex_unlock(&workqueue_mutex);
L
Linus Torvalds 已提交
437 438 439 440 441 442 443 444 445 446

	/*
	 * Was there any error during startup? If yes then clean up:
	 */
	if (destroy) {
		destroy_workqueue(wq);
		wq = NULL;
	}
	return wq;
}
447
EXPORT_SYMBOL_GPL(__create_workqueue);
L
Linus Torvalds 已提交
448 449 450 451 452 453 454

static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
{
	struct cpu_workqueue_struct *cwq;
	unsigned long flags;
	struct task_struct *p;

455
	cwq = per_cpu_ptr(wq->cpu_wq, cpu);
L
Linus Torvalds 已提交
456 457 458 459 460 461 462 463
	spin_lock_irqsave(&cwq->lock, flags);
	p = cwq->thread;
	cwq->thread = NULL;
	spin_unlock_irqrestore(&cwq->lock, flags);
	if (p)
		kthread_stop(p);
}

464 465 466 467 468 469
/**
 * destroy_workqueue - safely terminate a workqueue
 * @wq: target workqueue
 *
 * Safely destroy a workqueue. All work currently pending will be done first.
 */
L
Linus Torvalds 已提交
470 471 472 473 474 475 476
void destroy_workqueue(struct workqueue_struct *wq)
{
	int cpu;

	flush_workqueue(wq);

	/* We don't need the distraction of CPUs appearing and vanishing. */
477
	mutex_lock(&workqueue_mutex);
L
Linus Torvalds 已提交
478
	if (is_single_threaded(wq))
479
		cleanup_workqueue_thread(wq, singlethread_cpu);
L
Linus Torvalds 已提交
480 481 482 483 484
	else {
		for_each_online_cpu(cpu)
			cleanup_workqueue_thread(wq, cpu);
		list_del(&wq->list);
	}
485
	mutex_unlock(&workqueue_mutex);
486
	free_percpu(wq->cpu_wq);
L
Linus Torvalds 已提交
487 488
	kfree(wq);
}
489
EXPORT_SYMBOL_GPL(destroy_workqueue);
L
Linus Torvalds 已提交
490 491 492

static struct workqueue_struct *keventd_wq;

493 494 495 496 497 498
/**
 * schedule_work - put work task in global workqueue
 * @work: job to be done
 *
 * This puts a job in the kernel-global workqueue.
 */
L
Linus Torvalds 已提交
499 500 501 502
int fastcall schedule_work(struct work_struct *work)
{
	return queue_work(keventd_wq, work);
}
503
EXPORT_SYMBOL(schedule_work);
L
Linus Torvalds 已提交
504

505 506
/**
 * schedule_delayed_work - put work task in global workqueue after delay
507 508
 * @dwork: job to be done
 * @delay: number of jiffies to wait or 0 for immediate execution
509 510 511 512
 *
 * After waiting for a given time this puts a job in the kernel-global
 * workqueue.
 */
513
int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
L
Linus Torvalds 已提交
514
{
515
	return queue_delayed_work(keventd_wq, dwork, delay);
L
Linus Torvalds 已提交
516
}
517
EXPORT_SYMBOL(schedule_delayed_work);
L
Linus Torvalds 已提交
518

519 520 521
/**
 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
 * @cpu: cpu to use
522
 * @dwork: job to be done
523 524 525 526 527
 * @delay: number of jiffies to wait
 *
 * After waiting for a given time this puts a job in the kernel-global
 * workqueue on the specified CPU.
 */
L
Linus Torvalds 已提交
528
int schedule_delayed_work_on(int cpu,
529
			struct delayed_work *dwork, unsigned long delay)
L
Linus Torvalds 已提交
530
{
531
	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
L
Linus Torvalds 已提交
532
}
533
EXPORT_SYMBOL(schedule_delayed_work_on);
L
Linus Torvalds 已提交
534

535 536 537 538 539 540 541 542 543 544 545
/**
 * schedule_on_each_cpu - call a function on each online CPU from keventd
 * @func: the function to call
 *
 * Returns zero on success.
 * Returns -ve errno on failure.
 *
 * Appears to be racy against CPU hotplug.
 *
 * schedule_on_each_cpu() is very slow.
 */
546
int schedule_on_each_cpu(work_func_t func)
547 548
{
	int cpu;
549
	struct work_struct *works;
550

551 552
	works = alloc_percpu(struct work_struct);
	if (!works)
553
		return -ENOMEM;
554

555
	mutex_lock(&workqueue_mutex);
556
	for_each_online_cpu(cpu) {
557
		INIT_WORK(per_cpu_ptr(works, cpu), func);
558
		__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
559
				per_cpu_ptr(works, cpu));
560
	}
561
	mutex_unlock(&workqueue_mutex);
562
	flush_workqueue(keventd_wq);
563
	free_percpu(works);
564 565 566
	return 0;
}

L
Linus Torvalds 已提交
567 568 569 570
void flush_scheduled_work(void)
{
	flush_workqueue(keventd_wq);
}
571
EXPORT_SYMBOL(flush_scheduled_work);
L
Linus Torvalds 已提交
572 573 574 575 576

/**
 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
 *			work whose handler rearms the delayed work.
 * @wq:   the controlling workqueue structure
577
 * @dwork: the delayed work struct
L
Linus Torvalds 已提交
578
 */
579
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
580
				       struct delayed_work *dwork)
L
Linus Torvalds 已提交
581
{
582
	while (!cancel_delayed_work(dwork))
L
Linus Torvalds 已提交
583 584
		flush_workqueue(wq);
}
585
EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
L
Linus Torvalds 已提交
586 587 588 589

/**
 * cancel_rearming_delayed_work - reliably kill off a delayed keventd
 *			work whose handler rearms the delayed work.
590
 * @dwork: the delayed work struct
L
Linus Torvalds 已提交
591
 */
592
void cancel_rearming_delayed_work(struct delayed_work *dwork)
L
Linus Torvalds 已提交
593
{
594
	cancel_rearming_delayed_workqueue(keventd_wq, dwork);
L
Linus Torvalds 已提交
595 596 597
}
EXPORT_SYMBOL(cancel_rearming_delayed_work);

598 599 600 601 602 603 604 605 606 607 608 609
/**
 * execute_in_process_context - reliably execute the routine with user context
 * @fn:		the function to execute
 * @ew:		guaranteed storage for the execute work structure (must
 *		be available when the work executes)
 *
 * Executes the function immediately if process context is available,
 * otherwise schedules the function for delayed execution.
 *
 * Returns:	0 - function was executed
 *		1 - function was scheduled for execution
 */
610
int execute_in_process_context(work_func_t fn, struct execute_work *ew)
611 612
{
	if (!in_interrupt()) {
613
		fn(&ew->work);
614 615 616
		return 0;
	}

617
	INIT_WORK(&ew->work, fn);
618 619 620 621 622 623
	schedule_work(&ew->work);

	return 1;
}
EXPORT_SYMBOL_GPL(execute_in_process_context);

L
Linus Torvalds 已提交
624 625 626 627 628 629 630 631 632 633 634 635 636
int keventd_up(void)
{
	return keventd_wq != NULL;
}

int current_is_keventd(void)
{
	struct cpu_workqueue_struct *cwq;
	int cpu = smp_processor_id();	/* preempt-safe: keventd is per-cpu */
	int ret = 0;

	BUG_ON(!keventd_wq);

637
	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
L
Linus Torvalds 已提交
638 639 640 641 642 643 644 645 646 647 648
	if (current == cwq->thread)
		ret = 1;

	return ret;

}

#ifdef CONFIG_HOTPLUG_CPU
/* Take the work from this (downed) CPU. */
static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
{
649
	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
650
	struct list_head list;
L
Linus Torvalds 已提交
651 652 653
	struct work_struct *work;

	spin_lock_irq(&cwq->lock);
654
	list_replace_init(&cwq->worklist, &list);
L
Linus Torvalds 已提交
655 656 657 658 659

	while (!list_empty(&list)) {
		printk("Taking work for %s\n", wq->name);
		work = list_entry(list.next,struct work_struct,entry);
		list_del(&work->entry);
660
		__queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
L
Linus Torvalds 已提交
661 662 663 664 665
	}
	spin_unlock_irq(&cwq->lock);
}

/* We're holding the cpucontrol mutex here */
666
static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
L
Linus Torvalds 已提交
667 668 669 670 671 672 673 674
				  unsigned long action,
				  void *hcpu)
{
	unsigned int hotcpu = (unsigned long)hcpu;
	struct workqueue_struct *wq;

	switch (action) {
	case CPU_UP_PREPARE:
675
		mutex_lock(&workqueue_mutex);
L
Linus Torvalds 已提交
676 677
		/* Create a new workqueue thread for it. */
		list_for_each_entry(wq, &workqueues, list) {
678
			if (!create_workqueue_thread(wq, hotcpu, 0)) {
L
Linus Torvalds 已提交
679 680 681 682 683 684 685 686 687
				printk("workqueue for %i failed\n", hotcpu);
				return NOTIFY_BAD;
			}
		}
		break;

	case CPU_ONLINE:
		/* Kick off worker threads. */
		list_for_each_entry(wq, &workqueues, list) {
688 689 690 691 692
			struct cpu_workqueue_struct *cwq;

			cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
			kthread_bind(cwq->thread, hotcpu);
			wake_up_process(cwq->thread);
L
Linus Torvalds 已提交
693
		}
694
		mutex_unlock(&workqueue_mutex);
L
Linus Torvalds 已提交
695 696 697 698
		break;

	case CPU_UP_CANCELED:
		list_for_each_entry(wq, &workqueues, list) {
699 700
			if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
				continue;
L
Linus Torvalds 已提交
701
			/* Unbind so it can run. */
702
			kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
703
				     any_online_cpu(cpu_online_map));
L
Linus Torvalds 已提交
704 705
			cleanup_workqueue_thread(wq, hotcpu);
		}
706 707 708 709 710 711 712 713 714
		mutex_unlock(&workqueue_mutex);
		break;

	case CPU_DOWN_PREPARE:
		mutex_lock(&workqueue_mutex);
		break;

	case CPU_DOWN_FAILED:
		mutex_unlock(&workqueue_mutex);
L
Linus Torvalds 已提交
715 716 717 718 719 720 721
		break;

	case CPU_DEAD:
		list_for_each_entry(wq, &workqueues, list)
			cleanup_workqueue_thread(wq, hotcpu);
		list_for_each_entry(wq, &workqueues, list)
			take_over_work(wq, hotcpu);
722
		mutex_unlock(&workqueue_mutex);
L
Linus Torvalds 已提交
723 724 725 726 727 728 729 730 731
		break;
	}

	return NOTIFY_OK;
}
#endif

void init_workqueues(void)
{
732
	singlethread_cpu = first_cpu(cpu_possible_map);
L
Linus Torvalds 已提交
733 734 735 736 737
	hotcpu_notifier(workqueue_cpu_callback, 0);
	keventd_wq = create_workqueue("events");
	BUG_ON(!keventd_wq);
}