kthread.c 33.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/* Kernel thread helper functions.
 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
 *
4
 * Creation is done via kthreadd, so that we get a clean environment
L
Linus Torvalds 已提交
5 6 7
 * even if we're invoked from userspace (think modprobe, hotplug cpu,
 * etc.).
 */
8
#include <uapi/linux/sched/types.h>
L
Linus Torvalds 已提交
9
#include <linux/sched.h>
10
#include <linux/sched/task.h>
L
Linus Torvalds 已提交
11 12 13
#include <linux/kthread.h>
#include <linux/completion.h>
#include <linux/err.h>
14
#include <linux/cpuset.h>
L
Linus Torvalds 已提交
15 16
#include <linux/unistd.h>
#include <linux/file.h>
17
#include <linux/export.h>
A
Arjan van de Ven 已提交
18
#include <linux/mutex.h>
T
Tejun Heo 已提交
19 20
#include <linux/slab.h>
#include <linux/freezer.h>
21
#include <linux/ptrace.h>
22
#include <linux/uaccess.h>
23
#include <trace/events/sched.h>
L
Linus Torvalds 已提交
24

25 26 27
static DEFINE_SPINLOCK(kthread_create_lock);
static LIST_HEAD(kthread_create_list);
struct task_struct *kthreadd_task;
L
Linus Torvalds 已提交
28 29 30

struct kthread_create_info
{
31
	/* Information passed to kthread() from kthreadd. */
L
Linus Torvalds 已提交
32 33
	int (*threadfn)(void *data);
	void *data;
34
	int node;
L
Linus Torvalds 已提交
35

36
	/* Result passed back to kthread_create() from kthreadd. */
L
Linus Torvalds 已提交
37
	struct task_struct *result;
38
	struct completion *done;
39

40
	struct list_head list;
L
Linus Torvalds 已提交
41 42
};

O
Oleg Nesterov 已提交
43
struct kthread {
44 45
	unsigned long flags;
	unsigned int cpu;
T
Tejun Heo 已提交
46
	void *data;
47
	struct completion parked;
O
Oleg Nesterov 已提交
48
	struct completion exited;
S
Shaohua Li 已提交
49
#ifdef CONFIG_BLK_CGROUP
50 51
	struct cgroup_subsys_state *blkcg_css;
#endif
L
Linus Torvalds 已提交
52 53
};

54 55 56 57 58 59
enum KTHREAD_BITS {
	KTHREAD_IS_PER_CPU = 0,
	KTHREAD_SHOULD_STOP,
	KTHREAD_SHOULD_PARK,
};

60 61 62 63 64 65 66 67 68
static inline void set_kthread_struct(void *kthread)
{
	/*
	 * We abuse ->set_child_tid to avoid the new member and because it
	 * can't be wrongly copied by copy_process(). We also rely on fact
	 * that the caller can't exec, so PF_KTHREAD can't be cleared.
	 */
	current->set_child_tid = (__force void __user *)kthread;
}
69 70 71

static inline struct kthread *to_kthread(struct task_struct *k)
{
72 73
	WARN_ON(!(k->flags & PF_KTHREAD));
	return (__force void *)k->set_child_tid;
74 75
}

76 77
void free_kthread_struct(struct task_struct *k)
{
78 79
	struct kthread *kthread;

80 81 82 83
	/*
	 * Can be NULL if this kthread was created by kernel_thread()
	 * or if kmalloc() in kthread() failed.
	 */
84
	kthread = to_kthread(k);
S
Shaohua Li 已提交
85
#ifdef CONFIG_BLK_CGROUP
86 87 88
	WARN_ON_ONCE(kthread && kthread->blkcg_css);
#endif
	kfree(kthread);
89 90
}

91 92 93
/**
 * kthread_should_stop - should this kthread return now?
 *
94
 * When someone calls kthread_stop() on your kthread, it will be woken
95 96 97
 * and this will return true.  You should then return, and your return
 * value will be passed through to kthread_stop().
 */
98
bool kthread_should_stop(void)
L
Linus Torvalds 已提交
99
{
100
	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
L
Linus Torvalds 已提交
101 102 103
}
EXPORT_SYMBOL(kthread_should_stop);

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
/**
 * kthread_should_park - should this kthread park now?
 *
 * When someone calls kthread_park() on your kthread, it will be woken
 * and this will return true.  You should then do the necessary
 * cleanup and call kthread_parkme()
 *
 * Similar to kthread_should_stop(), but this keeps the thread alive
 * and in a park position. kthread_unpark() "restarts" the thread and
 * calls the thread function again.
 */
bool kthread_should_park(void)
{
	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
}
119
EXPORT_SYMBOL_GPL(kthread_should_park);
120

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
/**
 * kthread_freezable_should_stop - should this freezable kthread return now?
 * @was_frozen: optional out parameter, indicates whether %current was frozen
 *
 * kthread_should_stop() for freezable kthreads, which will enter
 * refrigerator if necessary.  This function is safe from kthread_stop() /
 * freezer deadlock and freezable kthreads should use this function instead
 * of calling try_to_freeze() directly.
 */
bool kthread_freezable_should_stop(bool *was_frozen)
{
	bool frozen = false;

	might_sleep();

	if (unlikely(freezing(current)))
		frozen = __refrigerator(true);

	if (was_frozen)
		*was_frozen = frozen;

	return kthread_should_stop();
}
EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);

T
Tejun Heo 已提交
146 147 148 149 150 151 152 153 154 155 156 157 158
/**
 * kthread_data - return data value specified on kthread creation
 * @task: kthread task in question
 *
 * Return the data value specified when kthread @task was created.
 * The caller is responsible for ensuring the validity of @task when
 * calling this function.
 */
void *kthread_data(struct task_struct *task)
{
	return to_kthread(task)->data;
}

159
/**
160
 * kthread_probe_data - speculative version of kthread_data()
161 162 163 164 165 166 167
 * @task: possible kthread task in question
 *
 * @task could be a kthread task.  Return the data value specified when it
 * was created if accessible.  If @task isn't a kthread task or its data is
 * inaccessible for any reason, %NULL is returned.  This function requires
 * that @task itself is safe to dereference.
 */
168
void *kthread_probe_data(struct task_struct *task)
169 170 171 172 173 174 175 176
{
	struct kthread *kthread = to_kthread(task);
	void *data = NULL;

	probe_kernel_read(&data, &kthread->data, sizeof(data));
	return data;
}

177 178
static void __kthread_parkme(struct kthread *self)
{
179 180 181 182
	for (;;) {
		set_current_state(TASK_PARKED);
		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
			break;
183 184 185 186 187 188 189 190 191
		schedule();
	}
	__set_current_state(TASK_RUNNING);
}

void kthread_parkme(void)
{
	__kthread_parkme(to_kthread(current));
}
192
EXPORT_SYMBOL_GPL(kthread_parkme);
193

194 195
void kthread_park_complete(struct task_struct *k)
{
196
	complete_all(&to_kthread(k)->parked);
197 198
}

L
Linus Torvalds 已提交
199 200
static int kthread(void *_create)
{
O
Oleg Nesterov 已提交
201
	/* Copy data: it's on kthread's stack */
L
Linus Torvalds 已提交
202
	struct kthread_create_info *create = _create;
O
Oleg Nesterov 已提交
203 204
	int (*threadfn)(void *data) = create->threadfn;
	void *data = create->data;
205
	struct completion *done;
206
	struct kthread *self;
O
Oleg Nesterov 已提交
207
	int ret;
L
Linus Torvalds 已提交
208

209
	self = kzalloc(sizeof(*self), GFP_KERNEL);
210
	set_kthread_struct(self);
L
Linus Torvalds 已提交
211

212 213 214 215 216 217
	/* If user was SIGKILLed, I release the structure. */
	done = xchg(&create->done, NULL);
	if (!done) {
		kfree(create);
		do_exit(-EINTR);
	}
218 219 220 221 222 223 224 225 226 227 228 229

	if (!self) {
		create->result = ERR_PTR(-ENOMEM);
		complete(done);
		do_exit(-ENOMEM);
	}

	self->data = data;
	init_completion(&self->exited);
	init_completion(&self->parked);
	current->vfork_done = &self->exited;

L
Linus Torvalds 已提交
230
	/* OK, tell user we're spawned, wait for stop or wakeup */
231
	__set_current_state(TASK_UNINTERRUPTIBLE);
232
	create->result = current;
233
	complete(done);
L
Linus Torvalds 已提交
234 235
	schedule();

O
Oleg Nesterov 已提交
236
	ret = -EINTR;
237
	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
238
		cgroup_kthread_ready();
239
		__kthread_parkme(self);
240 241
		ret = threadfn(data);
	}
O
Oleg Nesterov 已提交
242
	do_exit(ret);
L
Linus Torvalds 已提交
243 244
}

245 246 247 248 249 250 251
/* called from do_fork() to get node information for about to be created task */
int tsk_fork_get_node(struct task_struct *tsk)
{
#ifdef CONFIG_NUMA
	if (tsk == kthreadd_task)
		return tsk->pref_node_fork;
#endif
252
	return NUMA_NO_NODE;
253 254
}

255
static void create_kthread(struct kthread_create_info *create)
L
Linus Torvalds 已提交
256 257 258
{
	int pid;

259 260 261
#ifdef CONFIG_NUMA
	current->pref_node_fork = create->node;
#endif
L
Linus Torvalds 已提交
262 263
	/* We want our own signal handler (we take no signals by default). */
	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
264
	if (pid < 0) {
265 266 267 268 269 270 271
		/* If user was SIGKILLed, I release the structure. */
		struct completion *done = xchg(&create->done, NULL);

		if (!done) {
			kfree(create);
			return;
		}
L
Linus Torvalds 已提交
272
		create->result = ERR_PTR(pid);
273
		complete(done);
274
	}
L
Linus Torvalds 已提交
275 276
}

N
Nicolas Iooss 已提交
277 278
static __printf(4, 0)
struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
279 280 281
						    void *data, int node,
						    const char namefmt[],
						    va_list args)
L
Linus Torvalds 已提交
282
{
283 284 285 286 287 288 289 290 291 292 293
	DECLARE_COMPLETION_ONSTACK(done);
	struct task_struct *task;
	struct kthread_create_info *create = kmalloc(sizeof(*create),
						     GFP_KERNEL);

	if (!create)
		return ERR_PTR(-ENOMEM);
	create->threadfn = threadfn;
	create->data = data;
	create->node = node;
	create->done = &done;
294 295

	spin_lock(&kthread_create_lock);
296
	list_add_tail(&create->list, &kthread_create_list);
297 298
	spin_unlock(&kthread_create_lock);

299
	wake_up_process(kthreadd_task);
300 301 302 303 304 305 306 307 308 309 310 311
	/*
	 * Wait for completion in killable state, for I might be chosen by
	 * the OOM killer while kthreadd is trying to allocate memory for
	 * new kernel thread.
	 */
	if (unlikely(wait_for_completion_killable(&done))) {
		/*
		 * If I was SIGKILLed before kthreadd (or new kernel thread)
		 * calls complete(), leave the cleanup of this structure to
		 * that thread.
		 */
		if (xchg(&create->done, NULL))
312
			return ERR_PTR(-EINTR);
313 314 315 316 317 318 319 320
		/*
		 * kthreadd (or new kernel thread) will call complete()
		 * shortly.
		 */
		wait_for_completion(&done);
	}
	task = create->result;
	if (!IS_ERR(task)) {
321
		static const struct sched_param param = { .sched_priority = 0 };
322

323
		vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
324 325 326 327
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
328 329
		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(task, cpu_all_mask);
L
Linus Torvalds 已提交
330
	}
331 332
	kfree(create);
	return task;
L
Linus Torvalds 已提交
333
}
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371

/**
 * kthread_create_on_node - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @node: task and thread structures for the thread are allocated on this node
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
 * is affine to all CPUs.
 *
 * If thread is going to be bound on a particular cpu, give its node
 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which no one will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
 */
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
					   void *data, int node,
					   const char namefmt[],
					   ...)
{
	struct task_struct *task;
	va_list args;

	va_start(args, namefmt);
	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
	va_end(args);

	return task;
}
372
EXPORT_SYMBOL(kthread_create_on_node);
L
Linus Torvalds 已提交
373

374
static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
375
{
376 377
	unsigned long flags;

378 379 380 381
	if (!wait_task_inactive(p, state)) {
		WARN_ON(1);
		return;
	}
382

383
	/* It's safe because the task is inactive. */
384 385
	raw_spin_lock_irqsave(&p->pi_lock, flags);
	do_set_cpus_allowed(p, mask);
386
	p->flags |= PF_NO_SETAFFINITY;
387 388 389 390 391 392 393 394 395 396 397
	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}

static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
{
	__kthread_bind_mask(p, cpumask_of(cpu), state);
}

void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
{
	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
398 399
}

400 401 402 403 404 405 406 407 408 409 410
/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
411
	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
412 413 414
}
EXPORT_SYMBOL(kthread_bind);

415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
/**
 * kthread_create_on_cpu - Create a cpu bound kthread
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @cpu: The cpu on which the thread should be bound,
 * @namefmt: printf-style name for the thread. Format is restricted
 *	     to "name.*%u". Code fills in cpu number.
 *
 * Description: This helper function creates and names a kernel thread
 * The thread will be woken and put into park mode.
 */
struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
					  void *data, unsigned int cpu,
					  const char *namefmt)
{
	struct task_struct *p;

432
	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
433 434 435
				   cpu);
	if (IS_ERR(p))
		return p;
436 437
	kthread_bind(p, cpu);
	/* CPU hotplug need to bind once again when unparking the thread. */
438 439 440 441 442
	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
	to_kthread(p)->cpu = cpu;
	return p;
}

443 444 445 446 447 448 449 450 451
/**
 * kthread_unpark - unpark a thread created by kthread_create().
 * @k:		thread created by kthread_create().
 *
 * Sets kthread_should_park() for @k to return false, wakes it, and
 * waits for it to return. If the thread is marked percpu then its
 * bound to the cpu again.
 */
void kthread_unpark(struct task_struct *k)
452
{
453 454
	struct kthread *kthread = to_kthread(k);

455
	/*
456 457
	 * Newly created kthread was parked when the CPU was offline.
	 * The binding was lost and we need to set it again.
458
	 */
459 460 461
	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
		__kthread_bind(k, kthread->cpu, TASK_PARKED);

462
	reinit_completion(&kthread->parked);
463 464
	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
	wake_up_state(k, TASK_PARKED);
465
}
466
EXPORT_SYMBOL_GPL(kthread_unpark);
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481

/**
 * kthread_park - park a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_park() for @k to return true, wakes it, and
 * waits for it to return. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will park without
 * calling threadfn().
 *
 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
 * If called by the kthread itself just the park bit is set.
 */
int kthread_park(struct task_struct *k)
{
482 483 484 485 486
	struct kthread *kthread = to_kthread(k);

	if (WARN_ON(k->flags & PF_EXITING))
		return -ENOSYS;

487 488 489 490
	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
	if (k != current) {
		wake_up_process(k);
		wait_for_completion(&kthread->parked);
491
	}
492 493

	return 0;
494
}
495
EXPORT_SYMBOL_GPL(kthread_park);
496

497 498 499 500 501
/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
502 503 504 505 506 507
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
508 509 510 511
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
L
Linus Torvalds 已提交
512 513
int kthread_stop(struct task_struct *k)
{
514
	struct kthread *kthread;
L
Linus Torvalds 已提交
515 516
	int ret;

517
	trace_sched_kthread_stop(k);
518 519

	get_task_struct(k);
520 521
	kthread = to_kthread(k);
	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
522
	kthread_unpark(k);
523 524
	wake_up_process(k);
	wait_for_completion(&kthread->exited);
O
Oleg Nesterov 已提交
525
	ret = k->exit_code;
L
Linus Torvalds 已提交
526
	put_task_struct(k);
527

528
	trace_sched_kthread_stop_ret(ret);
L
Linus Torvalds 已提交
529 530
	return ret;
}
531
EXPORT_SYMBOL(kthread_stop);
L
Linus Torvalds 已提交
532

533
int kthreadd(void *unused)
L
Linus Torvalds 已提交
534
{
535
	struct task_struct *tsk = current;
L
Linus Torvalds 已提交
536

537
	/* Setup a clean context for our children to inherit. */
538
	set_task_comm(tsk, "kthreadd");
539
	ignore_signals(tsk);
540
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
541
	set_mems_allowed(node_states[N_MEMORY]);
542

543
	current->flags |= PF_NOFREEZE;
544
	cgroup_init_kthreadd();
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}
T
Tejun Heo 已提交
570

P
Petr Mladek 已提交
571
void __kthread_init_worker(struct kthread_worker *worker,
Y
Yong Zhang 已提交
572 573 574
				const char *name,
				struct lock_class_key *key)
{
575
	memset(worker, 0, sizeof(struct kthread_worker));
Y
Yong Zhang 已提交
576 577 578
	spin_lock_init(&worker->lock);
	lockdep_set_class_and_name(&worker->lock, key, name);
	INIT_LIST_HEAD(&worker->work_list);
579
	INIT_LIST_HEAD(&worker->delayed_work_list);
Y
Yong Zhang 已提交
580
}
P
Petr Mladek 已提交
581
EXPORT_SYMBOL_GPL(__kthread_init_worker);
Y
Yong Zhang 已提交
582

T
Tejun Heo 已提交
583 584 585 586
/**
 * kthread_worker_fn - kthread function to process kthread_worker
 * @worker_ptr: pointer to initialized kthread_worker
 *
587 588 589
 * This function implements the main cycle of kthread worker. It processes
 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
 * is empty.
T
Tejun Heo 已提交
590
 *
591 592 593
 * The works are not allowed to keep any locks, disable preemption or interrupts
 * when they finish. There is defined a safe point for freezing when one work
 * finishes and before a new one is started.
594 595 596
 *
 * Also the works must not be handled by more than one worker at the same time,
 * see also kthread_queue_work().
T
Tejun Heo 已提交
597 598 599 600 601 602
 */
int kthread_worker_fn(void *worker_ptr)
{
	struct kthread_worker *worker = worker_ptr;
	struct kthread_work *work;

603 604 605 606 607
	/*
	 * FIXME: Update the check and remove the assignment when all kthread
	 * worker users are created using kthread_create_worker*() functions.
	 */
	WARN_ON(worker->task && worker->task != current);
T
Tejun Heo 已提交
608
	worker->task = current;
609 610 611 612

	if (worker->flags & KTW_FREEZABLE)
		set_freezable();

T
Tejun Heo 已提交
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
repeat:
	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */

	if (kthread_should_stop()) {
		__set_current_state(TASK_RUNNING);
		spin_lock_irq(&worker->lock);
		worker->task = NULL;
		spin_unlock_irq(&worker->lock);
		return 0;
	}

	work = NULL;
	spin_lock_irq(&worker->lock);
	if (!list_empty(&worker->work_list)) {
		work = list_first_entry(&worker->work_list,
					struct kthread_work, node);
		list_del_init(&work->node);
	}
631
	worker->current_work = work;
T
Tejun Heo 已提交
632 633 634 635 636 637 638 639 640
	spin_unlock_irq(&worker->lock);

	if (work) {
		__set_current_state(TASK_RUNNING);
		work->func(work);
	} else if (!freezing(current))
		schedule();

	try_to_freeze();
641
	cond_resched();
T
Tejun Heo 已提交
642 643 644 645
	goto repeat;
}
EXPORT_SYMBOL_GPL(kthread_worker_fn);

N
Nicolas Iooss 已提交
646
static __printf(3, 0) struct kthread_worker *
647 648
__kthread_create_worker(int cpu, unsigned int flags,
			const char namefmt[], va_list args)
649 650 651
{
	struct kthread_worker *worker;
	struct task_struct *task;
652
	int node = -1;
653 654 655 656 657 658 659

	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
	if (!worker)
		return ERR_PTR(-ENOMEM);

	kthread_init_worker(worker);

660 661
	if (cpu >= 0)
		node = cpu_to_node(cpu);
662

663 664
	task = __kthread_create_on_node(kthread_worker_fn, worker,
						node, namefmt, args);
665 666 667
	if (IS_ERR(task))
		goto fail_task;

668 669 670
	if (cpu >= 0)
		kthread_bind(task, cpu);

671
	worker->flags = flags;
672 673 674 675 676 677 678 679 680 681 682
	worker->task = task;
	wake_up_process(task);
	return worker;

fail_task:
	kfree(worker);
	return ERR_CAST(task);
}

/**
 * kthread_create_worker - create a kthread worker
683
 * @flags: flags modifying the default behavior of the worker
684 685 686 687 688 689 690
 * @namefmt: printf-style name for the kthread worker (task).
 *
 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 * when the worker was SIGKILLed.
 */
struct kthread_worker *
691
kthread_create_worker(unsigned int flags, const char namefmt[], ...)
692 693 694 695 696
{
	struct kthread_worker *worker;
	va_list args;

	va_start(args, namefmt);
697
	worker = __kthread_create_worker(-1, flags, namefmt, args);
698 699 700 701 702 703 704 705 706 707
	va_end(args);

	return worker;
}
EXPORT_SYMBOL(kthread_create_worker);

/**
 * kthread_create_worker_on_cpu - create a kthread worker and bind it
 *	it to a given CPU and the associated NUMA node.
 * @cpu: CPU number
708
 * @flags: flags modifying the default behavior of the worker
709 710 711 712 713 714 715 716 717 718 719 720 721
 * @namefmt: printf-style name for the kthread worker (task).
 *
 * Use a valid CPU number if you want to bind the kthread worker
 * to the given CPU and the associated NUMA node.
 *
 * A good practice is to add the cpu number also into the worker name.
 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
 *
 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 * when the worker was SIGKILLed.
 */
struct kthread_worker *
722 723
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
			     const char namefmt[], ...)
724 725 726 727 728
{
	struct kthread_worker *worker;
	va_list args;

	va_start(args, namefmt);
729
	worker = __kthread_create_worker(cpu, flags, namefmt, args);
730 731 732 733 734 735
	va_end(args);

	return worker;
}
EXPORT_SYMBOL(kthread_create_worker_on_cpu);

736 737 738 739 740 741 742 743 744 745 746 747 748
/*
 * Returns true when the work could not be queued at the moment.
 * It happens when it is already pending in a worker list
 * or when it is being cancelled.
 */
static inline bool queuing_blocked(struct kthread_worker *worker,
				   struct kthread_work *work)
{
	lockdep_assert_held(&worker->lock);

	return !list_empty(&work->node) || work->canceling;
}

749 750 751 752 753 754 755 756 757
static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
					     struct kthread_work *work)
{
	lockdep_assert_held(&worker->lock);
	WARN_ON_ONCE(!list_empty(&work->node));
	/* Do not use a work with >1 worker, see kthread_queue_work() */
	WARN_ON_ONCE(work->worker && work->worker != worker);
}

758
/* insert @work before @pos in @worker */
P
Petr Mladek 已提交
759
static void kthread_insert_work(struct kthread_worker *worker,
760 761
				struct kthread_work *work,
				struct list_head *pos)
762
{
763
	kthread_insert_work_sanity_check(worker, work);
764 765

	list_add_tail(&work->node, pos);
766
	work->worker = worker;
767
	if (!worker->current_work && likely(worker->task))
768 769 770
		wake_up_process(worker->task);
}

T
Tejun Heo 已提交
771
/**
P
Petr Mladek 已提交
772
 * kthread_queue_work - queue a kthread_work
T
Tejun Heo 已提交
773 774 775 776 777 778
 * @worker: target kthread_worker
 * @work: kthread_work to queue
 *
 * Queue @work to work processor @task for async execution.  @task
 * must have been created with kthread_worker_create().  Returns %true
 * if @work was successfully queued, %false if it was already pending.
779 780 781
 *
 * Reinitialize the work if it needs to be used by another worker.
 * For example, when the worker was stopped and started again.
T
Tejun Heo 已提交
782
 */
P
Petr Mladek 已提交
783
bool kthread_queue_work(struct kthread_worker *worker,
T
Tejun Heo 已提交
784 785 786 787 788 789
			struct kthread_work *work)
{
	bool ret = false;
	unsigned long flags;

	spin_lock_irqsave(&worker->lock, flags);
790
	if (!queuing_blocked(worker, work)) {
P
Petr Mladek 已提交
791
		kthread_insert_work(worker, work, &worker->work_list);
T
Tejun Heo 已提交
792 793 794 795 796
		ret = true;
	}
	spin_unlock_irqrestore(&worker->lock, flags);
	return ret;
}
P
Petr Mladek 已提交
797
EXPORT_SYMBOL_GPL(kthread_queue_work);
T
Tejun Heo 已提交
798

799 800 801
/**
 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
 *	delayed work when the timer expires.
802
 * @t: pointer to the expired timer
803 804 805 806
 *
 * The format of the function is defined by struct timer_list.
 * It should have been called from irqsafe timer with irq already off.
 */
807
void kthread_delayed_work_timer_fn(struct timer_list *t)
808
{
809
	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
	struct kthread_work *work = &dwork->work;
	struct kthread_worker *worker = work->worker;

	/*
	 * This might happen when a pending work is reinitialized.
	 * It means that it is used a wrong way.
	 */
	if (WARN_ON_ONCE(!worker))
		return;

	spin_lock(&worker->lock);
	/* Work must not be used with >1 worker, see kthread_queue_work(). */
	WARN_ON_ONCE(work->worker != worker);

	/* Move the work from worker->delayed_work_list. */
	WARN_ON_ONCE(list_empty(&work->node));
	list_del_init(&work->node);
	kthread_insert_work(worker, work, &worker->work_list);

	spin_unlock(&worker->lock);
}
EXPORT_SYMBOL(kthread_delayed_work_timer_fn);

void __kthread_queue_delayed_work(struct kthread_worker *worker,
				  struct kthread_delayed_work *dwork,
				  unsigned long delay)
{
	struct timer_list *timer = &dwork->timer;
	struct kthread_work *work = &dwork->work;

840
	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886

	/*
	 * If @delay is 0, queue @dwork->work immediately.  This is for
	 * both optimization and correctness.  The earliest @timer can
	 * expire is on the closest next tick and delayed_work users depend
	 * on that there's no such delay when @delay is 0.
	 */
	if (!delay) {
		kthread_insert_work(worker, work, &worker->work_list);
		return;
	}

	/* Be paranoid and try to detect possible races already now. */
	kthread_insert_work_sanity_check(worker, work);

	list_add(&work->node, &worker->delayed_work_list);
	work->worker = worker;
	timer->expires = jiffies + delay;
	add_timer(timer);
}

/**
 * kthread_queue_delayed_work - queue the associated kthread work
 *	after a delay.
 * @worker: target kthread_worker
 * @dwork: kthread_delayed_work to queue
 * @delay: number of jiffies to wait before queuing
 *
 * If the work has not been pending it starts a timer that will queue
 * the work after the given @delay. If @delay is zero, it queues the
 * work immediately.
 *
 * Return: %false if the @work has already been pending. It means that
 * either the timer was running or the work was queued. It returns %true
 * otherwise.
 */
bool kthread_queue_delayed_work(struct kthread_worker *worker,
				struct kthread_delayed_work *dwork,
				unsigned long delay)
{
	struct kthread_work *work = &dwork->work;
	unsigned long flags;
	bool ret = false;

	spin_lock_irqsave(&worker->lock, flags);

887
	if (!queuing_blocked(worker, work)) {
888 889 890 891 892 893 894 895 896
		__kthread_queue_delayed_work(worker, dwork, delay);
		ret = true;
	}

	spin_unlock_irqrestore(&worker->lock, flags);
	return ret;
}
EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);

897 898 899 900 901 902 903 904 905 906 907 908
struct kthread_flush_work {
	struct kthread_work	work;
	struct completion	done;
};

static void kthread_flush_work_fn(struct kthread_work *work)
{
	struct kthread_flush_work *fwork =
		container_of(work, struct kthread_flush_work, work);
	complete(&fwork->done);
}

T
Tejun Heo 已提交
909
/**
P
Petr Mladek 已提交
910
 * kthread_flush_work - flush a kthread_work
T
Tejun Heo 已提交
911 912 913 914
 * @work: work to flush
 *
 * If @work is queued or executing, wait for it to finish execution.
 */
P
Petr Mladek 已提交
915
void kthread_flush_work(struct kthread_work *work)
T
Tejun Heo 已提交
916
{
917 918 919 920 921 922 923 924 925 926
	struct kthread_flush_work fwork = {
		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
	};
	struct kthread_worker *worker;
	bool noop = false;

	worker = work->worker;
	if (!worker)
		return;
T
Tejun Heo 已提交
927

928
	spin_lock_irq(&worker->lock);
929 930
	/* Work must not be used with >1 worker, see kthread_queue_work(). */
	WARN_ON_ONCE(work->worker != worker);
T
Tejun Heo 已提交
931

932
	if (!list_empty(&work->node))
P
Petr Mladek 已提交
933
		kthread_insert_work(worker, &fwork.work, work->node.next);
934
	else if (worker->current_work == work)
P
Petr Mladek 已提交
935 936
		kthread_insert_work(worker, &fwork.work,
				    worker->work_list.next);
937 938
	else
		noop = true;
T
Tejun Heo 已提交
939

940
	spin_unlock_irq(&worker->lock);
T
Tejun Heo 已提交
941

942 943
	if (!noop)
		wait_for_completion(&fwork.done);
T
Tejun Heo 已提交
944
}
P
Petr Mladek 已提交
945
EXPORT_SYMBOL_GPL(kthread_flush_work);
T
Tejun Heo 已提交
946

947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
/*
 * This function removes the work from the worker queue. Also it makes sure
 * that it won't get queued later via the delayed work's timer.
 *
 * The work might still be in use when this function finishes. See the
 * current_work proceed by the worker.
 *
 * Return: %true if @work was pending and successfully canceled,
 *	%false if @work was not pending
 */
static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
				  unsigned long *flags)
{
	/* Try to cancel the timer if exists. */
	if (is_dwork) {
		struct kthread_delayed_work *dwork =
			container_of(work, struct kthread_delayed_work, work);
		struct kthread_worker *worker = work->worker;

		/*
		 * del_timer_sync() must be called to make sure that the timer
		 * callback is not running. The lock must be temporary released
		 * to avoid a deadlock with the callback. In the meantime,
		 * any queuing is blocked by setting the canceling counter.
		 */
		work->canceling++;
		spin_unlock_irqrestore(&worker->lock, *flags);
		del_timer_sync(&dwork->timer);
		spin_lock_irqsave(&worker->lock, *flags);
		work->canceling--;
	}

	/*
	 * Try to remove the work from a worker list. It might either
	 * be from worker->work_list or from worker->delayed_work_list.
	 */
	if (!list_empty(&work->node)) {
		list_del_init(&work->node);
		return true;
	}

	return false;
}

991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
/**
 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
 * @worker: kthread worker to use
 * @dwork: kthread delayed work to queue
 * @delay: number of jiffies to wait before queuing
 *
 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
 * @work is guaranteed to be queued immediately.
 *
 * Return: %true if @dwork was pending and its timer was modified,
 * %false otherwise.
 *
 * A special case is when the work is being canceled in parallel.
 * It might be caused either by the real kthread_cancel_delayed_work_sync()
 * or yet another kthread_mod_delayed_work() call. We let the other command
 * win and return %false here. The caller is supposed to synchronize these
 * operations a reasonable way.
 *
 * This function is safe to call from any context including IRQ handler.
 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
 * for details.
 */
bool kthread_mod_delayed_work(struct kthread_worker *worker,
			      struct kthread_delayed_work *dwork,
			      unsigned long delay)
{
	struct kthread_work *work = &dwork->work;
	unsigned long flags;
	int ret = false;

	spin_lock_irqsave(&worker->lock, flags);

	/* Do not bother with canceling when never queued. */
	if (!work->worker)
		goto fast_queue;

	/* Work must not be used with >1 worker, see kthread_queue_work() */
	WARN_ON_ONCE(work->worker != worker);

	/* Do not fight with another command that is canceling this work. */
	if (work->canceling)
		goto out;

	ret = __kthread_cancel_work(work, true, &flags);
fast_queue:
	__kthread_queue_delayed_work(worker, dwork, delay);
out:
	spin_unlock_irqrestore(&worker->lock, flags);
	return ret;
}
EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);

1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
{
	struct kthread_worker *worker = work->worker;
	unsigned long flags;
	int ret = false;

	if (!worker)
		goto out;

	spin_lock_irqsave(&worker->lock, flags);
	/* Work must not be used with >1 worker, see kthread_queue_work(). */
	WARN_ON_ONCE(work->worker != worker);

	ret = __kthread_cancel_work(work, is_dwork, &flags);

	if (worker->current_work != work)
		goto out_fast;

	/*
	 * The work is in progress and we need to wait with the lock released.
	 * In the meantime, block any queuing by setting the canceling counter.
	 */
	work->canceling++;
	spin_unlock_irqrestore(&worker->lock, flags);
	kthread_flush_work(work);
	spin_lock_irqsave(&worker->lock, flags);
	work->canceling--;

out_fast:
	spin_unlock_irqrestore(&worker->lock, flags);
out:
	return ret;
}

/**
 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
 * @work: the kthread work to cancel
 *
 * Cancel @work and wait for its execution to finish.  This function
 * can be used even if the work re-queues itself. On return from this
 * function, @work is guaranteed to be not pending or executing on any CPU.
 *
 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
 *
 * The caller must ensure that the worker on which @work was last
 * queued can't be destroyed before this function returns.
 *
 * Return: %true if @work was pending, %false otherwise.
 */
bool kthread_cancel_work_sync(struct kthread_work *work)
{
	return __kthread_cancel_work_sync(work, false);
}
EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);

/**
 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
 *	wait for it to finish.
 * @dwork: the kthread delayed work to cancel
 *
 * This is kthread_cancel_work_sync() for delayed works.
 *
 * Return: %true if @dwork was pending, %false otherwise.
 */
bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
{
	return __kthread_cancel_work_sync(&dwork->work, true);
}
EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);

T
Tejun Heo 已提交
1115
/**
P
Petr Mladek 已提交
1116
 * kthread_flush_worker - flush all current works on a kthread_worker
T
Tejun Heo 已提交
1117 1118 1119 1120 1121
 * @worker: worker to flush
 *
 * Wait until all currently executing or pending works on @worker are
 * finished.
 */
P
Petr Mladek 已提交
1122
void kthread_flush_worker(struct kthread_worker *worker)
T
Tejun Heo 已提交
1123 1124 1125 1126 1127 1128
{
	struct kthread_flush_work fwork = {
		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
	};

P
Petr Mladek 已提交
1129
	kthread_queue_work(worker, &fwork.work);
T
Tejun Heo 已提交
1130 1131
	wait_for_completion(&fwork.done);
}
P
Petr Mladek 已提交
1132
EXPORT_SYMBOL_GPL(kthread_flush_worker);
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155

/**
 * kthread_destroy_worker - destroy a kthread worker
 * @worker: worker to be destroyed
 *
 * Flush and destroy @worker.  The simple flush is enough because the kthread
 * worker API is used only in trivial scenarios.  There are no multi-step state
 * machines needed.
 */
void kthread_destroy_worker(struct kthread_worker *worker)
{
	struct task_struct *task;

	task = worker->task;
	if (WARN_ON(!task))
		return;

	kthread_flush_worker(worker);
	kthread_stop(task);
	WARN_ON(!list_empty(&worker->work_list));
	kfree(worker);
}
EXPORT_SYMBOL(kthread_destroy_worker);
1156

S
Shaohua Li 已提交
1157
#ifdef CONFIG_BLK_CGROUP
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
/**
 * kthread_associate_blkcg - associate blkcg to current kthread
 * @css: the cgroup info
 *
 * Current thread must be a kthread. The thread is running jobs on behalf of
 * other threads. In some cases, we expect the jobs attach cgroup info of
 * original threads instead of that of current thread. This function stores
 * original thread's cgroup info in current kthread context for later
 * retrieval.
 */
void kthread_associate_blkcg(struct cgroup_subsys_state *css)
{
	struct kthread *kthread;

	if (!(current->flags & PF_KTHREAD))
		return;
	kthread = to_kthread(current);
	if (!kthread)
		return;

	if (kthread->blkcg_css) {
		css_put(kthread->blkcg_css);
		kthread->blkcg_css = NULL;
	}
	if (css) {
		css_get(css);
		kthread->blkcg_css = css;
	}
}
EXPORT_SYMBOL(kthread_associate_blkcg);

/**
 * kthread_blkcg - get associated blkcg css of current kthread
 *
 * Current thread must be a kthread.
 */
struct cgroup_subsys_state *kthread_blkcg(void)
{
	struct kthread *kthread;

	if (current->flags & PF_KTHREAD) {
		kthread = to_kthread(current);
		if (kthread)
			return kthread->blkcg_css;
	}
	return NULL;
}
EXPORT_SYMBOL(kthread_blkcg);
#endif