kthread.c 33.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/* Kernel thread helper functions.
 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
 *
4
 * Creation is done via kthreadd, so that we get a clean environment
L
Linus Torvalds 已提交
5 6 7
 * even if we're invoked from userspace (think modprobe, hotplug cpu,
 * etc.).
 */
8
#include <uapi/linux/sched/types.h>
L
Linus Torvalds 已提交
9
#include <linux/sched.h>
10
#include <linux/sched/task.h>
L
Linus Torvalds 已提交
11 12 13
#include <linux/kthread.h>
#include <linux/completion.h>
#include <linux/err.h>
14
#include <linux/cpuset.h>
L
Linus Torvalds 已提交
15 16
#include <linux/unistd.h>
#include <linux/file.h>
17
#include <linux/export.h>
A
Arjan van de Ven 已提交
18
#include <linux/mutex.h>
T
Tejun Heo 已提交
19 20
#include <linux/slab.h>
#include <linux/freezer.h>
21
#include <linux/ptrace.h>
22
#include <linux/uaccess.h>
23
#include <trace/events/sched.h>
L
Linus Torvalds 已提交
24

25 26 27
static DEFINE_SPINLOCK(kthread_create_lock);
static LIST_HEAD(kthread_create_list);
struct task_struct *kthreadd_task;
L
Linus Torvalds 已提交
28 29 30

struct kthread_create_info
{
31
	/* Information passed to kthread() from kthreadd. */
L
Linus Torvalds 已提交
32 33
	int (*threadfn)(void *data);
	void *data;
34
	int node;
L
Linus Torvalds 已提交
35

36
	/* Result passed back to kthread_create() from kthreadd. */
L
Linus Torvalds 已提交
37
	struct task_struct *result;
38
	struct completion *done;
39

40
	struct list_head list;
L
Linus Torvalds 已提交
41 42
};

O
Oleg Nesterov 已提交
43
struct kthread {
44 45
	unsigned long flags;
	unsigned int cpu;
T
Tejun Heo 已提交
46
	void *data;
47
	struct completion parked;
O
Oleg Nesterov 已提交
48
	struct completion exited;
S
Shaohua Li 已提交
49
#ifdef CONFIG_BLK_CGROUP
50 51
	struct cgroup_subsys_state *blkcg_css;
#endif
L
Linus Torvalds 已提交
52 53
};

54 55 56 57 58 59 60
enum KTHREAD_BITS {
	KTHREAD_IS_PER_CPU = 0,
	KTHREAD_SHOULD_STOP,
	KTHREAD_SHOULD_PARK,
	KTHREAD_IS_PARKED,
};

61 62 63 64 65 66 67 68 69
static inline void set_kthread_struct(void *kthread)
{
	/*
	 * We abuse ->set_child_tid to avoid the new member and because it
	 * can't be wrongly copied by copy_process(). We also rely on fact
	 * that the caller can't exec, so PF_KTHREAD can't be cleared.
	 */
	current->set_child_tid = (__force void __user *)kthread;
}
70 71 72

static inline struct kthread *to_kthread(struct task_struct *k)
{
73 74
	WARN_ON(!(k->flags & PF_KTHREAD));
	return (__force void *)k->set_child_tid;
75 76
}

77 78
void free_kthread_struct(struct task_struct *k)
{
79 80
	struct kthread *kthread;

81 82 83 84
	/*
	 * Can be NULL if this kthread was created by kernel_thread()
	 * or if kmalloc() in kthread() failed.
	 */
85
	kthread = to_kthread(k);
S
Shaohua Li 已提交
86
#ifdef CONFIG_BLK_CGROUP
87 88 89
	WARN_ON_ONCE(kthread && kthread->blkcg_css);
#endif
	kfree(kthread);
90 91
}

92 93 94
/**
 * kthread_should_stop - should this kthread return now?
 *
95
 * When someone calls kthread_stop() on your kthread, it will be woken
96 97 98
 * and this will return true.  You should then return, and your return
 * value will be passed through to kthread_stop().
 */
99
bool kthread_should_stop(void)
L
Linus Torvalds 已提交
100
{
101
	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
L
Linus Torvalds 已提交
102 103 104
}
EXPORT_SYMBOL(kthread_should_stop);

105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
/**
 * kthread_should_park - should this kthread park now?
 *
 * When someone calls kthread_park() on your kthread, it will be woken
 * and this will return true.  You should then do the necessary
 * cleanup and call kthread_parkme()
 *
 * Similar to kthread_should_stop(), but this keeps the thread alive
 * and in a park position. kthread_unpark() "restarts" the thread and
 * calls the thread function again.
 */
bool kthread_should_park(void)
{
	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
}
120
EXPORT_SYMBOL_GPL(kthread_should_park);
121

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
/**
 * kthread_freezable_should_stop - should this freezable kthread return now?
 * @was_frozen: optional out parameter, indicates whether %current was frozen
 *
 * kthread_should_stop() for freezable kthreads, which will enter
 * refrigerator if necessary.  This function is safe from kthread_stop() /
 * freezer deadlock and freezable kthreads should use this function instead
 * of calling try_to_freeze() directly.
 */
bool kthread_freezable_should_stop(bool *was_frozen)
{
	bool frozen = false;

	might_sleep();

	if (unlikely(freezing(current)))
		frozen = __refrigerator(true);

	if (was_frozen)
		*was_frozen = frozen;

	return kthread_should_stop();
}
EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);

T
Tejun Heo 已提交
147 148 149 150 151 152 153 154 155 156 157 158 159
/**
 * kthread_data - return data value specified on kthread creation
 * @task: kthread task in question
 *
 * Return the data value specified when kthread @task was created.
 * The caller is responsible for ensuring the validity of @task when
 * calling this function.
 */
void *kthread_data(struct task_struct *task)
{
	return to_kthread(task)->data;
}

160
/**
161
 * kthread_probe_data - speculative version of kthread_data()
162 163 164 165 166 167 168
 * @task: possible kthread task in question
 *
 * @task could be a kthread task.  Return the data value specified when it
 * was created if accessible.  If @task isn't a kthread task or its data is
 * inaccessible for any reason, %NULL is returned.  This function requires
 * that @task itself is safe to dereference.
 */
169
void *kthread_probe_data(struct task_struct *task)
170 171 172 173 174 175 176 177
{
	struct kthread *kthread = to_kthread(task);
	void *data = NULL;

	probe_kernel_read(&data, &kthread->data, sizeof(data));
	return data;
}

178 179
static void __kthread_parkme(struct kthread *self)
{
180 181 182 183
	for (;;) {
		set_current_state(TASK_PARKED);
		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
			break;
184 185 186 187 188 189 190 191 192 193 194 195
		if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
			complete(&self->parked);
		schedule();
	}
	clear_bit(KTHREAD_IS_PARKED, &self->flags);
	__set_current_state(TASK_RUNNING);
}

void kthread_parkme(void)
{
	__kthread_parkme(to_kthread(current));
}
196
EXPORT_SYMBOL_GPL(kthread_parkme);
197

L
Linus Torvalds 已提交
198 199
static int kthread(void *_create)
{
O
Oleg Nesterov 已提交
200
	/* Copy data: it's on kthread's stack */
L
Linus Torvalds 已提交
201
	struct kthread_create_info *create = _create;
O
Oleg Nesterov 已提交
202 203
	int (*threadfn)(void *data) = create->threadfn;
	void *data = create->data;
204
	struct completion *done;
205
	struct kthread *self;
O
Oleg Nesterov 已提交
206
	int ret;
L
Linus Torvalds 已提交
207

208
	self = kzalloc(sizeof(*self), GFP_KERNEL);
209
	set_kthread_struct(self);
L
Linus Torvalds 已提交
210

211 212 213 214 215 216
	/* If user was SIGKILLed, I release the structure. */
	done = xchg(&create->done, NULL);
	if (!done) {
		kfree(create);
		do_exit(-EINTR);
	}
217 218 219 220 221 222 223 224 225 226 227 228

	if (!self) {
		create->result = ERR_PTR(-ENOMEM);
		complete(done);
		do_exit(-ENOMEM);
	}

	self->data = data;
	init_completion(&self->exited);
	init_completion(&self->parked);
	current->vfork_done = &self->exited;

L
Linus Torvalds 已提交
229
	/* OK, tell user we're spawned, wait for stop or wakeup */
230
	__set_current_state(TASK_UNINTERRUPTIBLE);
231
	create->result = current;
232
	complete(done);
L
Linus Torvalds 已提交
233 234
	schedule();

O
Oleg Nesterov 已提交
235
	ret = -EINTR;
236
	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
237
		cgroup_kthread_ready();
238
		__kthread_parkme(self);
239 240
		ret = threadfn(data);
	}
O
Oleg Nesterov 已提交
241
	do_exit(ret);
L
Linus Torvalds 已提交
242 243
}

244 245 246 247 248 249 250
/* called from do_fork() to get node information for about to be created task */
int tsk_fork_get_node(struct task_struct *tsk)
{
#ifdef CONFIG_NUMA
	if (tsk == kthreadd_task)
		return tsk->pref_node_fork;
#endif
251
	return NUMA_NO_NODE;
252 253
}

254
static void create_kthread(struct kthread_create_info *create)
L
Linus Torvalds 已提交
255 256 257
{
	int pid;

258 259 260
#ifdef CONFIG_NUMA
	current->pref_node_fork = create->node;
#endif
L
Linus Torvalds 已提交
261 262
	/* We want our own signal handler (we take no signals by default). */
	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
263
	if (pid < 0) {
264 265 266 267 268 269 270
		/* If user was SIGKILLed, I release the structure. */
		struct completion *done = xchg(&create->done, NULL);

		if (!done) {
			kfree(create);
			return;
		}
L
Linus Torvalds 已提交
271
		create->result = ERR_PTR(pid);
272
		complete(done);
273
	}
L
Linus Torvalds 已提交
274 275
}

N
Nicolas Iooss 已提交
276 277
static __printf(4, 0)
struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
278 279 280
						    void *data, int node,
						    const char namefmt[],
						    va_list args)
L
Linus Torvalds 已提交
281
{
282 283 284 285 286 287 288 289 290 291 292
	DECLARE_COMPLETION_ONSTACK(done);
	struct task_struct *task;
	struct kthread_create_info *create = kmalloc(sizeof(*create),
						     GFP_KERNEL);

	if (!create)
		return ERR_PTR(-ENOMEM);
	create->threadfn = threadfn;
	create->data = data;
	create->node = node;
	create->done = &done;
293 294

	spin_lock(&kthread_create_lock);
295
	list_add_tail(&create->list, &kthread_create_list);
296 297
	spin_unlock(&kthread_create_lock);

298
	wake_up_process(kthreadd_task);
299 300 301 302 303 304 305 306 307 308 309 310
	/*
	 * Wait for completion in killable state, for I might be chosen by
	 * the OOM killer while kthreadd is trying to allocate memory for
	 * new kernel thread.
	 */
	if (unlikely(wait_for_completion_killable(&done))) {
		/*
		 * If I was SIGKILLed before kthreadd (or new kernel thread)
		 * calls complete(), leave the cleanup of this structure to
		 * that thread.
		 */
		if (xchg(&create->done, NULL))
311
			return ERR_PTR(-EINTR);
312 313 314 315 316 317 318 319
		/*
		 * kthreadd (or new kernel thread) will call complete()
		 * shortly.
		 */
		wait_for_completion(&done);
	}
	task = create->result;
	if (!IS_ERR(task)) {
320
		static const struct sched_param param = { .sched_priority = 0 };
321

322
		vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
323 324 325 326
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
327 328
		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(task, cpu_all_mask);
L
Linus Torvalds 已提交
329
	}
330 331
	kfree(create);
	return task;
L
Linus Torvalds 已提交
332
}
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370

/**
 * kthread_create_on_node - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @node: task and thread structures for the thread are allocated on this node
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
 * is affine to all CPUs.
 *
 * If thread is going to be bound on a particular cpu, give its node
 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which no one will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
 */
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
					   void *data, int node,
					   const char namefmt[],
					   ...)
{
	struct task_struct *task;
	va_list args;

	va_start(args, namefmt);
	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
	va_end(args);

	return task;
}
371
EXPORT_SYMBOL(kthread_create_on_node);
L
Linus Torvalds 已提交
372

373
static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
374
{
375 376
	unsigned long flags;

377 378 379 380
	if (!wait_task_inactive(p, state)) {
		WARN_ON(1);
		return;
	}
381

382
	/* It's safe because the task is inactive. */
383 384
	raw_spin_lock_irqsave(&p->pi_lock, flags);
	do_set_cpus_allowed(p, mask);
385
	p->flags |= PF_NO_SETAFFINITY;
386 387 388 389 390 391 392 393 394 395 396
	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}

static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
{
	__kthread_bind_mask(p, cpumask_of(cpu), state);
}

void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
{
	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
397 398
}

399 400 401 402 403 404 405 406 407 408 409
/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
410
	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
411 412 413
}
EXPORT_SYMBOL(kthread_bind);

414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
/**
 * kthread_create_on_cpu - Create a cpu bound kthread
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @cpu: The cpu on which the thread should be bound,
 * @namefmt: printf-style name for the thread. Format is restricted
 *	     to "name.*%u". Code fills in cpu number.
 *
 * Description: This helper function creates and names a kernel thread
 * The thread will be woken and put into park mode.
 */
struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
					  void *data, unsigned int cpu,
					  const char *namefmt)
{
	struct task_struct *p;

431
	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
432 433 434
				   cpu);
	if (IS_ERR(p))
		return p;
435 436
	kthread_bind(p, cpu);
	/* CPU hotplug need to bind once again when unparking the thread. */
437 438 439 440 441
	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
	to_kthread(p)->cpu = cpu;
	return p;
}

442 443 444 445 446 447 448 449 450
/**
 * kthread_unpark - unpark a thread created by kthread_create().
 * @k:		thread created by kthread_create().
 *
 * Sets kthread_should_park() for @k to return false, wakes it, and
 * waits for it to return. If the thread is marked percpu then its
 * bound to the cpu again.
 */
void kthread_unpark(struct task_struct *k)
451
{
452 453
	struct kthread *kthread = to_kthread(k);

454 455 456 457 458 459 460 461
	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
	/*
	 * We clear the IS_PARKED bit here as we don't wait
	 * until the task has left the park code. So if we'd
	 * park before that happens we'd see the IS_PARKED bit
	 * which might be about to be cleared.
	 */
	if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
462 463 464 465
		/*
		 * Newly created kthread was parked when the CPU was offline.
		 * The binding was lost and we need to set it again.
		 */
466 467 468 469 470
		if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
			__kthread_bind(k, kthread->cpu, TASK_PARKED);
		wake_up_state(k, TASK_PARKED);
	}
}
471
EXPORT_SYMBOL_GPL(kthread_unpark);
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486

/**
 * kthread_park - park a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_park() for @k to return true, wakes it, and
 * waits for it to return. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will park without
 * calling threadfn().
 *
 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
 * If called by the kthread itself just the park bit is set.
 */
int kthread_park(struct task_struct *k)
{
487 488 489 490 491 492 493 494 495 496
	struct kthread *kthread = to_kthread(k);

	if (WARN_ON(k->flags & PF_EXITING))
		return -ENOSYS;

	if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
		set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
		if (k != current) {
			wake_up_process(k);
			wait_for_completion(&kthread->parked);
497 498
		}
	}
499 500

	return 0;
501
}
502
EXPORT_SYMBOL_GPL(kthread_park);
503

504 505 506 507 508
/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
509 510 511 512 513 514
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
515 516 517 518
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
L
Linus Torvalds 已提交
519 520
int kthread_stop(struct task_struct *k)
{
521
	struct kthread *kthread;
L
Linus Torvalds 已提交
522 523
	int ret;

524
	trace_sched_kthread_stop(k);
525 526

	get_task_struct(k);
527 528
	kthread = to_kthread(k);
	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
529
	kthread_unpark(k);
530 531
	wake_up_process(k);
	wait_for_completion(&kthread->exited);
O
Oleg Nesterov 已提交
532
	ret = k->exit_code;
L
Linus Torvalds 已提交
533
	put_task_struct(k);
534

535
	trace_sched_kthread_stop_ret(ret);
L
Linus Torvalds 已提交
536 537
	return ret;
}
538
EXPORT_SYMBOL(kthread_stop);
L
Linus Torvalds 已提交
539

540
int kthreadd(void *unused)
L
Linus Torvalds 已提交
541
{
542
	struct task_struct *tsk = current;
L
Linus Torvalds 已提交
543

544
	/* Setup a clean context for our children to inherit. */
545
	set_task_comm(tsk, "kthreadd");
546
	ignore_signals(tsk);
547
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
548
	set_mems_allowed(node_states[N_MEMORY]);
549

550
	current->flags |= PF_NOFREEZE;
551
	cgroup_init_kthreadd();
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}
T
Tejun Heo 已提交
577

P
Petr Mladek 已提交
578
void __kthread_init_worker(struct kthread_worker *worker,
Y
Yong Zhang 已提交
579 580 581
				const char *name,
				struct lock_class_key *key)
{
582
	memset(worker, 0, sizeof(struct kthread_worker));
Y
Yong Zhang 已提交
583 584 585
	spin_lock_init(&worker->lock);
	lockdep_set_class_and_name(&worker->lock, key, name);
	INIT_LIST_HEAD(&worker->work_list);
586
	INIT_LIST_HEAD(&worker->delayed_work_list);
Y
Yong Zhang 已提交
587
}
P
Petr Mladek 已提交
588
EXPORT_SYMBOL_GPL(__kthread_init_worker);
Y
Yong Zhang 已提交
589

T
Tejun Heo 已提交
590 591 592 593
/**
 * kthread_worker_fn - kthread function to process kthread_worker
 * @worker_ptr: pointer to initialized kthread_worker
 *
594 595 596
 * This function implements the main cycle of kthread worker. It processes
 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
 * is empty.
T
Tejun Heo 已提交
597
 *
598 599 600
 * The works are not allowed to keep any locks, disable preemption or interrupts
 * when they finish. There is defined a safe point for freezing when one work
 * finishes and before a new one is started.
601 602 603
 *
 * Also the works must not be handled by more than one worker at the same time,
 * see also kthread_queue_work().
T
Tejun Heo 已提交
604 605 606 607 608 609
 */
int kthread_worker_fn(void *worker_ptr)
{
	struct kthread_worker *worker = worker_ptr;
	struct kthread_work *work;

610 611 612 613 614
	/*
	 * FIXME: Update the check and remove the assignment when all kthread
	 * worker users are created using kthread_create_worker*() functions.
	 */
	WARN_ON(worker->task && worker->task != current);
T
Tejun Heo 已提交
615
	worker->task = current;
616 617 618 619

	if (worker->flags & KTW_FREEZABLE)
		set_freezable();

T
Tejun Heo 已提交
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
repeat:
	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */

	if (kthread_should_stop()) {
		__set_current_state(TASK_RUNNING);
		spin_lock_irq(&worker->lock);
		worker->task = NULL;
		spin_unlock_irq(&worker->lock);
		return 0;
	}

	work = NULL;
	spin_lock_irq(&worker->lock);
	if (!list_empty(&worker->work_list)) {
		work = list_first_entry(&worker->work_list,
					struct kthread_work, node);
		list_del_init(&work->node);
	}
638
	worker->current_work = work;
T
Tejun Heo 已提交
639 640 641 642 643 644 645 646 647
	spin_unlock_irq(&worker->lock);

	if (work) {
		__set_current_state(TASK_RUNNING);
		work->func(work);
	} else if (!freezing(current))
		schedule();

	try_to_freeze();
648
	cond_resched();
T
Tejun Heo 已提交
649 650 651 652
	goto repeat;
}
EXPORT_SYMBOL_GPL(kthread_worker_fn);

N
Nicolas Iooss 已提交
653
static __printf(3, 0) struct kthread_worker *
654 655
__kthread_create_worker(int cpu, unsigned int flags,
			const char namefmt[], va_list args)
656 657 658
{
	struct kthread_worker *worker;
	struct task_struct *task;
659
	int node = -1;
660 661 662 663 664 665 666

	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
	if (!worker)
		return ERR_PTR(-ENOMEM);

	kthread_init_worker(worker);

667 668
	if (cpu >= 0)
		node = cpu_to_node(cpu);
669

670 671
	task = __kthread_create_on_node(kthread_worker_fn, worker,
						node, namefmt, args);
672 673 674
	if (IS_ERR(task))
		goto fail_task;

675 676 677
	if (cpu >= 0)
		kthread_bind(task, cpu);

678
	worker->flags = flags;
679 680 681 682 683 684 685 686 687 688 689
	worker->task = task;
	wake_up_process(task);
	return worker;

fail_task:
	kfree(worker);
	return ERR_CAST(task);
}

/**
 * kthread_create_worker - create a kthread worker
690
 * @flags: flags modifying the default behavior of the worker
691 692 693 694 695 696 697
 * @namefmt: printf-style name for the kthread worker (task).
 *
 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 * when the worker was SIGKILLed.
 */
struct kthread_worker *
698
kthread_create_worker(unsigned int flags, const char namefmt[], ...)
699 700 701 702 703
{
	struct kthread_worker *worker;
	va_list args;

	va_start(args, namefmt);
704
	worker = __kthread_create_worker(-1, flags, namefmt, args);
705 706 707 708 709 710 711 712 713 714
	va_end(args);

	return worker;
}
EXPORT_SYMBOL(kthread_create_worker);

/**
 * kthread_create_worker_on_cpu - create a kthread worker and bind it
 *	it to a given CPU and the associated NUMA node.
 * @cpu: CPU number
715
 * @flags: flags modifying the default behavior of the worker
716 717 718 719 720 721 722 723 724 725 726 727 728
 * @namefmt: printf-style name for the kthread worker (task).
 *
 * Use a valid CPU number if you want to bind the kthread worker
 * to the given CPU and the associated NUMA node.
 *
 * A good practice is to add the cpu number also into the worker name.
 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
 *
 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 * when the worker was SIGKILLed.
 */
struct kthread_worker *
729 730
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
			     const char namefmt[], ...)
731 732 733 734 735
{
	struct kthread_worker *worker;
	va_list args;

	va_start(args, namefmt);
736
	worker = __kthread_create_worker(cpu, flags, namefmt, args);
737 738 739 740 741 742
	va_end(args);

	return worker;
}
EXPORT_SYMBOL(kthread_create_worker_on_cpu);

743 744 745 746 747 748 749 750 751 752 753 754 755
/*
 * Returns true when the work could not be queued at the moment.
 * It happens when it is already pending in a worker list
 * or when it is being cancelled.
 */
static inline bool queuing_blocked(struct kthread_worker *worker,
				   struct kthread_work *work)
{
	lockdep_assert_held(&worker->lock);

	return !list_empty(&work->node) || work->canceling;
}

756 757 758 759 760 761 762 763 764
static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
					     struct kthread_work *work)
{
	lockdep_assert_held(&worker->lock);
	WARN_ON_ONCE(!list_empty(&work->node));
	/* Do not use a work with >1 worker, see kthread_queue_work() */
	WARN_ON_ONCE(work->worker && work->worker != worker);
}

765
/* insert @work before @pos in @worker */
P
Petr Mladek 已提交
766
static void kthread_insert_work(struct kthread_worker *worker,
767 768
				struct kthread_work *work,
				struct list_head *pos)
769
{
770
	kthread_insert_work_sanity_check(worker, work);
771 772

	list_add_tail(&work->node, pos);
773
	work->worker = worker;
774
	if (!worker->current_work && likely(worker->task))
775 776 777
		wake_up_process(worker->task);
}

T
Tejun Heo 已提交
778
/**
P
Petr Mladek 已提交
779
 * kthread_queue_work - queue a kthread_work
T
Tejun Heo 已提交
780 781 782 783 784 785
 * @worker: target kthread_worker
 * @work: kthread_work to queue
 *
 * Queue @work to work processor @task for async execution.  @task
 * must have been created with kthread_worker_create().  Returns %true
 * if @work was successfully queued, %false if it was already pending.
786 787 788
 *
 * Reinitialize the work if it needs to be used by another worker.
 * For example, when the worker was stopped and started again.
T
Tejun Heo 已提交
789
 */
P
Petr Mladek 已提交
790
bool kthread_queue_work(struct kthread_worker *worker,
T
Tejun Heo 已提交
791 792 793 794 795 796
			struct kthread_work *work)
{
	bool ret = false;
	unsigned long flags;

	spin_lock_irqsave(&worker->lock, flags);
797
	if (!queuing_blocked(worker, work)) {
P
Petr Mladek 已提交
798
		kthread_insert_work(worker, work, &worker->work_list);
T
Tejun Heo 已提交
799 800 801 802 803
		ret = true;
	}
	spin_unlock_irqrestore(&worker->lock, flags);
	return ret;
}
P
Petr Mladek 已提交
804
EXPORT_SYMBOL_GPL(kthread_queue_work);
T
Tejun Heo 已提交
805

806 807 808
/**
 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
 *	delayed work when the timer expires.
809
 * @t: pointer to the expired timer
810 811 812 813
 *
 * The format of the function is defined by struct timer_list.
 * It should have been called from irqsafe timer with irq already off.
 */
814
void kthread_delayed_work_timer_fn(struct timer_list *t)
815
{
816
	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
	struct kthread_work *work = &dwork->work;
	struct kthread_worker *worker = work->worker;

	/*
	 * This might happen when a pending work is reinitialized.
	 * It means that it is used a wrong way.
	 */
	if (WARN_ON_ONCE(!worker))
		return;

	spin_lock(&worker->lock);
	/* Work must not be used with >1 worker, see kthread_queue_work(). */
	WARN_ON_ONCE(work->worker != worker);

	/* Move the work from worker->delayed_work_list. */
	WARN_ON_ONCE(list_empty(&work->node));
	list_del_init(&work->node);
	kthread_insert_work(worker, work, &worker->work_list);

	spin_unlock(&worker->lock);
}
EXPORT_SYMBOL(kthread_delayed_work_timer_fn);

void __kthread_queue_delayed_work(struct kthread_worker *worker,
				  struct kthread_delayed_work *dwork,
				  unsigned long delay)
{
	struct timer_list *timer = &dwork->timer;
	struct kthread_work *work = &dwork->work;

847
	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893

	/*
	 * If @delay is 0, queue @dwork->work immediately.  This is for
	 * both optimization and correctness.  The earliest @timer can
	 * expire is on the closest next tick and delayed_work users depend
	 * on that there's no such delay when @delay is 0.
	 */
	if (!delay) {
		kthread_insert_work(worker, work, &worker->work_list);
		return;
	}

	/* Be paranoid and try to detect possible races already now. */
	kthread_insert_work_sanity_check(worker, work);

	list_add(&work->node, &worker->delayed_work_list);
	work->worker = worker;
	timer->expires = jiffies + delay;
	add_timer(timer);
}

/**
 * kthread_queue_delayed_work - queue the associated kthread work
 *	after a delay.
 * @worker: target kthread_worker
 * @dwork: kthread_delayed_work to queue
 * @delay: number of jiffies to wait before queuing
 *
 * If the work has not been pending it starts a timer that will queue
 * the work after the given @delay. If @delay is zero, it queues the
 * work immediately.
 *
 * Return: %false if the @work has already been pending. It means that
 * either the timer was running or the work was queued. It returns %true
 * otherwise.
 */
bool kthread_queue_delayed_work(struct kthread_worker *worker,
				struct kthread_delayed_work *dwork,
				unsigned long delay)
{
	struct kthread_work *work = &dwork->work;
	unsigned long flags;
	bool ret = false;

	spin_lock_irqsave(&worker->lock, flags);

894
	if (!queuing_blocked(worker, work)) {
895 896 897 898 899 900 901 902 903
		__kthread_queue_delayed_work(worker, dwork, delay);
		ret = true;
	}

	spin_unlock_irqrestore(&worker->lock, flags);
	return ret;
}
EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);

904 905 906 907 908 909 910 911 912 913 914 915
struct kthread_flush_work {
	struct kthread_work	work;
	struct completion	done;
};

static void kthread_flush_work_fn(struct kthread_work *work)
{
	struct kthread_flush_work *fwork =
		container_of(work, struct kthread_flush_work, work);
	complete(&fwork->done);
}

T
Tejun Heo 已提交
916
/**
P
Petr Mladek 已提交
917
 * kthread_flush_work - flush a kthread_work
T
Tejun Heo 已提交
918 919 920 921
 * @work: work to flush
 *
 * If @work is queued or executing, wait for it to finish execution.
 */
P
Petr Mladek 已提交
922
void kthread_flush_work(struct kthread_work *work)
T
Tejun Heo 已提交
923
{
924 925 926 927 928 929 930 931 932 933
	struct kthread_flush_work fwork = {
		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
	};
	struct kthread_worker *worker;
	bool noop = false;

	worker = work->worker;
	if (!worker)
		return;
T
Tejun Heo 已提交
934

935
	spin_lock_irq(&worker->lock);
936 937
	/* Work must not be used with >1 worker, see kthread_queue_work(). */
	WARN_ON_ONCE(work->worker != worker);
T
Tejun Heo 已提交
938

939
	if (!list_empty(&work->node))
P
Petr Mladek 已提交
940
		kthread_insert_work(worker, &fwork.work, work->node.next);
941
	else if (worker->current_work == work)
P
Petr Mladek 已提交
942 943
		kthread_insert_work(worker, &fwork.work,
				    worker->work_list.next);
944 945
	else
		noop = true;
T
Tejun Heo 已提交
946

947
	spin_unlock_irq(&worker->lock);
T
Tejun Heo 已提交
948

949 950
	if (!noop)
		wait_for_completion(&fwork.done);
T
Tejun Heo 已提交
951
}
P
Petr Mladek 已提交
952
EXPORT_SYMBOL_GPL(kthread_flush_work);
T
Tejun Heo 已提交
953

954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997
/*
 * This function removes the work from the worker queue. Also it makes sure
 * that it won't get queued later via the delayed work's timer.
 *
 * The work might still be in use when this function finishes. See the
 * current_work proceed by the worker.
 *
 * Return: %true if @work was pending and successfully canceled,
 *	%false if @work was not pending
 */
static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
				  unsigned long *flags)
{
	/* Try to cancel the timer if exists. */
	if (is_dwork) {
		struct kthread_delayed_work *dwork =
			container_of(work, struct kthread_delayed_work, work);
		struct kthread_worker *worker = work->worker;

		/*
		 * del_timer_sync() must be called to make sure that the timer
		 * callback is not running. The lock must be temporary released
		 * to avoid a deadlock with the callback. In the meantime,
		 * any queuing is blocked by setting the canceling counter.
		 */
		work->canceling++;
		spin_unlock_irqrestore(&worker->lock, *flags);
		del_timer_sync(&dwork->timer);
		spin_lock_irqsave(&worker->lock, *flags);
		work->canceling--;
	}

	/*
	 * Try to remove the work from a worker list. It might either
	 * be from worker->work_list or from worker->delayed_work_list.
	 */
	if (!list_empty(&work->node)) {
		list_del_init(&work->node);
		return true;
	}

	return false;
}

998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
/**
 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
 * @worker: kthread worker to use
 * @dwork: kthread delayed work to queue
 * @delay: number of jiffies to wait before queuing
 *
 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
 * @work is guaranteed to be queued immediately.
 *
 * Return: %true if @dwork was pending and its timer was modified,
 * %false otherwise.
 *
 * A special case is when the work is being canceled in parallel.
 * It might be caused either by the real kthread_cancel_delayed_work_sync()
 * or yet another kthread_mod_delayed_work() call. We let the other command
 * win and return %false here. The caller is supposed to synchronize these
 * operations a reasonable way.
 *
 * This function is safe to call from any context including IRQ handler.
 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
 * for details.
 */
bool kthread_mod_delayed_work(struct kthread_worker *worker,
			      struct kthread_delayed_work *dwork,
			      unsigned long delay)
{
	struct kthread_work *work = &dwork->work;
	unsigned long flags;
	int ret = false;

	spin_lock_irqsave(&worker->lock, flags);

	/* Do not bother with canceling when never queued. */
	if (!work->worker)
		goto fast_queue;

	/* Work must not be used with >1 worker, see kthread_queue_work() */
	WARN_ON_ONCE(work->worker != worker);

	/* Do not fight with another command that is canceling this work. */
	if (work->canceling)
		goto out;

	ret = __kthread_cancel_work(work, true, &flags);
fast_queue:
	__kthread_queue_delayed_work(worker, dwork, delay);
out:
	spin_unlock_irqrestore(&worker->lock, flags);
	return ret;
}
EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);

1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
{
	struct kthread_worker *worker = work->worker;
	unsigned long flags;
	int ret = false;

	if (!worker)
		goto out;

	spin_lock_irqsave(&worker->lock, flags);
	/* Work must not be used with >1 worker, see kthread_queue_work(). */
	WARN_ON_ONCE(work->worker != worker);

	ret = __kthread_cancel_work(work, is_dwork, &flags);

	if (worker->current_work != work)
		goto out_fast;

	/*
	 * The work is in progress and we need to wait with the lock released.
	 * In the meantime, block any queuing by setting the canceling counter.
	 */
	work->canceling++;
	spin_unlock_irqrestore(&worker->lock, flags);
	kthread_flush_work(work);
	spin_lock_irqsave(&worker->lock, flags);
	work->canceling--;

out_fast:
	spin_unlock_irqrestore(&worker->lock, flags);
out:
	return ret;
}

/**
 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
 * @work: the kthread work to cancel
 *
 * Cancel @work and wait for its execution to finish.  This function
 * can be used even if the work re-queues itself. On return from this
 * function, @work is guaranteed to be not pending or executing on any CPU.
 *
 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
 *
 * The caller must ensure that the worker on which @work was last
 * queued can't be destroyed before this function returns.
 *
 * Return: %true if @work was pending, %false otherwise.
 */
bool kthread_cancel_work_sync(struct kthread_work *work)
{
	return __kthread_cancel_work_sync(work, false);
}
EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);

/**
 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
 *	wait for it to finish.
 * @dwork: the kthread delayed work to cancel
 *
 * This is kthread_cancel_work_sync() for delayed works.
 *
 * Return: %true if @dwork was pending, %false otherwise.
 */
bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
{
	return __kthread_cancel_work_sync(&dwork->work, true);
}
EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);

T
Tejun Heo 已提交
1122
/**
P
Petr Mladek 已提交
1123
 * kthread_flush_worker - flush all current works on a kthread_worker
T
Tejun Heo 已提交
1124 1125 1126 1127 1128
 * @worker: worker to flush
 *
 * Wait until all currently executing or pending works on @worker are
 * finished.
 */
P
Petr Mladek 已提交
1129
void kthread_flush_worker(struct kthread_worker *worker)
T
Tejun Heo 已提交
1130 1131 1132 1133 1134 1135
{
	struct kthread_flush_work fwork = {
		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
	};

P
Petr Mladek 已提交
1136
	kthread_queue_work(worker, &fwork.work);
T
Tejun Heo 已提交
1137 1138
	wait_for_completion(&fwork.done);
}
P
Petr Mladek 已提交
1139
EXPORT_SYMBOL_GPL(kthread_flush_worker);
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162

/**
 * kthread_destroy_worker - destroy a kthread worker
 * @worker: worker to be destroyed
 *
 * Flush and destroy @worker.  The simple flush is enough because the kthread
 * worker API is used only in trivial scenarios.  There are no multi-step state
 * machines needed.
 */
void kthread_destroy_worker(struct kthread_worker *worker)
{
	struct task_struct *task;

	task = worker->task;
	if (WARN_ON(!task))
		return;

	kthread_flush_worker(worker);
	kthread_stop(task);
	WARN_ON(!list_empty(&worker->work_list));
	kfree(worker);
}
EXPORT_SYMBOL(kthread_destroy_worker);
1163

S
Shaohua Li 已提交
1164
#ifdef CONFIG_BLK_CGROUP
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
/**
 * kthread_associate_blkcg - associate blkcg to current kthread
 * @css: the cgroup info
 *
 * Current thread must be a kthread. The thread is running jobs on behalf of
 * other threads. In some cases, we expect the jobs attach cgroup info of
 * original threads instead of that of current thread. This function stores
 * original thread's cgroup info in current kthread context for later
 * retrieval.
 */
void kthread_associate_blkcg(struct cgroup_subsys_state *css)
{
	struct kthread *kthread;

	if (!(current->flags & PF_KTHREAD))
		return;
	kthread = to_kthread(current);
	if (!kthread)
		return;

	if (kthread->blkcg_css) {
		css_put(kthread->blkcg_css);
		kthread->blkcg_css = NULL;
	}
	if (css) {
		css_get(css);
		kthread->blkcg_css = css;
	}
}
EXPORT_SYMBOL(kthread_associate_blkcg);

/**
 * kthread_blkcg - get associated blkcg css of current kthread
 *
 * Current thread must be a kthread.
 */
struct cgroup_subsys_state *kthread_blkcg(void)
{
	struct kthread *kthread;

	if (current->flags & PF_KTHREAD) {
		kthread = to_kthread(current);
		if (kthread)
			return kthread->blkcg_css;
	}
	return NULL;
}
EXPORT_SYMBOL(kthread_blkcg);
#endif