kthread.c 33.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/* Kernel thread helper functions.
 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
 *
4
 * Creation is done via kthreadd, so that we get a clean environment
L
Linus Torvalds 已提交
5 6 7
 * even if we're invoked from userspace (think modprobe, hotplug cpu,
 * etc.).
 */
8
#include <uapi/linux/sched/types.h>
L
Linus Torvalds 已提交
9
#include <linux/sched.h>
10
#include <linux/sched/task.h>
L
Linus Torvalds 已提交
11 12 13
#include <linux/kthread.h>
#include <linux/completion.h>
#include <linux/err.h>
14
#include <linux/cgroup.h>
15
#include <linux/cpuset.h>
L
Linus Torvalds 已提交
16 17
#include <linux/unistd.h>
#include <linux/file.h>
18
#include <linux/export.h>
A
Arjan van de Ven 已提交
19
#include <linux/mutex.h>
T
Tejun Heo 已提交
20 21
#include <linux/slab.h>
#include <linux/freezer.h>
22
#include <linux/ptrace.h>
23
#include <linux/uaccess.h>
24
#include <trace/events/sched.h>
L
Linus Torvalds 已提交
25

26 27 28
static DEFINE_SPINLOCK(kthread_create_lock);
static LIST_HEAD(kthread_create_list);
struct task_struct *kthreadd_task;
L
Linus Torvalds 已提交
29 30 31

struct kthread_create_info
{
32
	/* Information passed to kthread() from kthreadd. */
L
Linus Torvalds 已提交
33 34
	int (*threadfn)(void *data);
	void *data;
35
	int node;
L
Linus Torvalds 已提交
36

37
	/* Result passed back to kthread_create() from kthreadd. */
L
Linus Torvalds 已提交
38
	struct task_struct *result;
39
	struct completion *done;
40

41
	struct list_head list;
L
Linus Torvalds 已提交
42 43
};

O
Oleg Nesterov 已提交
44
struct kthread {
45 46
	unsigned long flags;
	unsigned int cpu;
T
Tejun Heo 已提交
47
	void *data;
48
	struct completion parked;
O
Oleg Nesterov 已提交
49
	struct completion exited;
S
Shaohua Li 已提交
50
#ifdef CONFIG_BLK_CGROUP
51 52
	struct cgroup_subsys_state *blkcg_css;
#endif
L
Linus Torvalds 已提交
53 54
};

55 56 57 58 59 60
enum KTHREAD_BITS {
	KTHREAD_IS_PER_CPU = 0,
	KTHREAD_SHOULD_STOP,
	KTHREAD_SHOULD_PARK,
};

61 62 63 64 65 66 67 68 69
static inline void set_kthread_struct(void *kthread)
{
	/*
	 * We abuse ->set_child_tid to avoid the new member and because it
	 * can't be wrongly copied by copy_process(). We also rely on fact
	 * that the caller can't exec, so PF_KTHREAD can't be cleared.
	 */
	current->set_child_tid = (__force void __user *)kthread;
}
70 71 72

static inline struct kthread *to_kthread(struct task_struct *k)
{
73 74
	WARN_ON(!(k->flags & PF_KTHREAD));
	return (__force void *)k->set_child_tid;
75 76
}

77 78
void free_kthread_struct(struct task_struct *k)
{
79 80
	struct kthread *kthread;

81 82 83 84
	/*
	 * Can be NULL if this kthread was created by kernel_thread()
	 * or if kmalloc() in kthread() failed.
	 */
85
	kthread = to_kthread(k);
S
Shaohua Li 已提交
86
#ifdef CONFIG_BLK_CGROUP
87 88 89
	WARN_ON_ONCE(kthread && kthread->blkcg_css);
#endif
	kfree(kthread);
90 91
}

92 93 94
/**
 * kthread_should_stop - should this kthread return now?
 *
95
 * When someone calls kthread_stop() on your kthread, it will be woken
96 97 98
 * and this will return true.  You should then return, and your return
 * value will be passed through to kthread_stop().
 */
99
bool kthread_should_stop(void)
L
Linus Torvalds 已提交
100
{
101
	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
L
Linus Torvalds 已提交
102 103 104
}
EXPORT_SYMBOL(kthread_should_stop);

105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
/**
 * kthread_should_park - should this kthread park now?
 *
 * When someone calls kthread_park() on your kthread, it will be woken
 * and this will return true.  You should then do the necessary
 * cleanup and call kthread_parkme()
 *
 * Similar to kthread_should_stop(), but this keeps the thread alive
 * and in a park position. kthread_unpark() "restarts" the thread and
 * calls the thread function again.
 */
bool kthread_should_park(void)
{
	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
}
120
EXPORT_SYMBOL_GPL(kthread_should_park);
121

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
/**
 * kthread_freezable_should_stop - should this freezable kthread return now?
 * @was_frozen: optional out parameter, indicates whether %current was frozen
 *
 * kthread_should_stop() for freezable kthreads, which will enter
 * refrigerator if necessary.  This function is safe from kthread_stop() /
 * freezer deadlock and freezable kthreads should use this function instead
 * of calling try_to_freeze() directly.
 */
bool kthread_freezable_should_stop(bool *was_frozen)
{
	bool frozen = false;

	might_sleep();

	if (unlikely(freezing(current)))
		frozen = __refrigerator(true);

	if (was_frozen)
		*was_frozen = frozen;

	return kthread_should_stop();
}
EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);

T
Tejun Heo 已提交
147 148 149 150 151 152 153 154 155 156 157 158 159
/**
 * kthread_data - return data value specified on kthread creation
 * @task: kthread task in question
 *
 * Return the data value specified when kthread @task was created.
 * The caller is responsible for ensuring the validity of @task when
 * calling this function.
 */
void *kthread_data(struct task_struct *task)
{
	return to_kthread(task)->data;
}

160
/**
161
 * kthread_probe_data - speculative version of kthread_data()
162 163 164 165 166 167 168
 * @task: possible kthread task in question
 *
 * @task could be a kthread task.  Return the data value specified when it
 * was created if accessible.  If @task isn't a kthread task or its data is
 * inaccessible for any reason, %NULL is returned.  This function requires
 * that @task itself is safe to dereference.
 */
169
void *kthread_probe_data(struct task_struct *task)
170 171 172 173 174 175 176 177
{
	struct kthread *kthread = to_kthread(task);
	void *data = NULL;

	probe_kernel_read(&data, &kthread->data, sizeof(data));
	return data;
}

178 179
static void __kthread_parkme(struct kthread *self)
{
180
	for (;;) {
181 182 183 184 185 186 187 188 189 190
		/*
		 * TASK_PARKED is a special state; we must serialize against
		 * possible pending wakeups to avoid store-store collisions on
		 * task->state.
		 *
		 * Such a collision might possibly result in the task state
		 * changin from TASK_PARKED and us failing the
		 * wait_task_inactive() in kthread_park().
		 */
		set_special_state(TASK_PARKED);
191 192
		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
			break;
193

194
		complete(&self->parked);
195 196 197 198 199 200 201 202 203
		schedule();
	}
	__set_current_state(TASK_RUNNING);
}

void kthread_parkme(void)
{
	__kthread_parkme(to_kthread(current));
}
204
EXPORT_SYMBOL_GPL(kthread_parkme);
205

L
Linus Torvalds 已提交
206 207
static int kthread(void *_create)
{
O
Oleg Nesterov 已提交
208
	/* Copy data: it's on kthread's stack */
L
Linus Torvalds 已提交
209
	struct kthread_create_info *create = _create;
O
Oleg Nesterov 已提交
210 211
	int (*threadfn)(void *data) = create->threadfn;
	void *data = create->data;
212
	struct completion *done;
213
	struct kthread *self;
O
Oleg Nesterov 已提交
214
	int ret;
L
Linus Torvalds 已提交
215

216
	self = kzalloc(sizeof(*self), GFP_KERNEL);
217
	set_kthread_struct(self);
L
Linus Torvalds 已提交
218

219 220 221 222 223 224
	/* If user was SIGKILLed, I release the structure. */
	done = xchg(&create->done, NULL);
	if (!done) {
		kfree(create);
		do_exit(-EINTR);
	}
225 226 227 228 229 230 231 232 233 234 235 236

	if (!self) {
		create->result = ERR_PTR(-ENOMEM);
		complete(done);
		do_exit(-ENOMEM);
	}

	self->data = data;
	init_completion(&self->exited);
	init_completion(&self->parked);
	current->vfork_done = &self->exited;

L
Linus Torvalds 已提交
237
	/* OK, tell user we're spawned, wait for stop or wakeup */
238
	__set_current_state(TASK_UNINTERRUPTIBLE);
239
	create->result = current;
240
	complete(done);
L
Linus Torvalds 已提交
241 242
	schedule();

O
Oleg Nesterov 已提交
243
	ret = -EINTR;
244
	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
245
		cgroup_kthread_ready();
246
		__kthread_parkme(self);
247 248
		ret = threadfn(data);
	}
O
Oleg Nesterov 已提交
249
	do_exit(ret);
L
Linus Torvalds 已提交
250 251
}

252 253 254 255 256 257 258
/* called from do_fork() to get node information for about to be created task */
int tsk_fork_get_node(struct task_struct *tsk)
{
#ifdef CONFIG_NUMA
	if (tsk == kthreadd_task)
		return tsk->pref_node_fork;
#endif
259
	return NUMA_NO_NODE;
260 261
}

262
static void create_kthread(struct kthread_create_info *create)
L
Linus Torvalds 已提交
263 264 265
{
	int pid;

266 267 268
#ifdef CONFIG_NUMA
	current->pref_node_fork = create->node;
#endif
L
Linus Torvalds 已提交
269 270
	/* We want our own signal handler (we take no signals by default). */
	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
271
	if (pid < 0) {
272 273 274 275 276 277 278
		/* If user was SIGKILLed, I release the structure. */
		struct completion *done = xchg(&create->done, NULL);

		if (!done) {
			kfree(create);
			return;
		}
L
Linus Torvalds 已提交
279
		create->result = ERR_PTR(pid);
280
		complete(done);
281
	}
L
Linus Torvalds 已提交
282 283
}

N
Nicolas Iooss 已提交
284 285
static __printf(4, 0)
struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
286 287 288
						    void *data, int node,
						    const char namefmt[],
						    va_list args)
L
Linus Torvalds 已提交
289
{
290 291 292 293 294 295 296 297 298 299 300
	DECLARE_COMPLETION_ONSTACK(done);
	struct task_struct *task;
	struct kthread_create_info *create = kmalloc(sizeof(*create),
						     GFP_KERNEL);

	if (!create)
		return ERR_PTR(-ENOMEM);
	create->threadfn = threadfn;
	create->data = data;
	create->node = node;
	create->done = &done;
301 302

	spin_lock(&kthread_create_lock);
303
	list_add_tail(&create->list, &kthread_create_list);
304 305
	spin_unlock(&kthread_create_lock);

306
	wake_up_process(kthreadd_task);
307 308 309 310 311 312 313 314 315 316 317 318
	/*
	 * Wait for completion in killable state, for I might be chosen by
	 * the OOM killer while kthreadd is trying to allocate memory for
	 * new kernel thread.
	 */
	if (unlikely(wait_for_completion_killable(&done))) {
		/*
		 * If I was SIGKILLed before kthreadd (or new kernel thread)
		 * calls complete(), leave the cleanup of this structure to
		 * that thread.
		 */
		if (xchg(&create->done, NULL))
319
			return ERR_PTR(-EINTR);
320 321 322 323 324 325 326 327
		/*
		 * kthreadd (or new kernel thread) will call complete()
		 * shortly.
		 */
		wait_for_completion(&done);
	}
	task = create->result;
	if (!IS_ERR(task)) {
328
		static const struct sched_param param = { .sched_priority = 0 };
329
		char name[TASK_COMM_LEN];
330

331 332 333 334 335 336
		/*
		 * task is already visible to other tasks, so updating
		 * COMM must be protected.
		 */
		vsnprintf(name, sizeof(name), namefmt, args);
		set_task_comm(task, name);
337 338 339 340
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
341 342
		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(task, cpu_all_mask);
L
Linus Torvalds 已提交
343
	}
344 345
	kfree(create);
	return task;
L
Linus Torvalds 已提交
346
}
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384

/**
 * kthread_create_on_node - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @node: task and thread structures for the thread are allocated on this node
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
 * is affine to all CPUs.
 *
 * If thread is going to be bound on a particular cpu, give its node
 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which no one will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
 */
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
					   void *data, int node,
					   const char namefmt[],
					   ...)
{
	struct task_struct *task;
	va_list args;

	va_start(args, namefmt);
	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
	va_end(args);

	return task;
}
385
EXPORT_SYMBOL(kthread_create_on_node);
L
Linus Torvalds 已提交
386

387
static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
388
{
389 390
	unsigned long flags;

391 392 393 394
	if (!wait_task_inactive(p, state)) {
		WARN_ON(1);
		return;
	}
395

396
	/* It's safe because the task is inactive. */
397 398
	raw_spin_lock_irqsave(&p->pi_lock, flags);
	do_set_cpus_allowed(p, mask);
399
	p->flags |= PF_NO_SETAFFINITY;
400 401 402 403 404 405 406 407 408 409 410
	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}

static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
{
	__kthread_bind_mask(p, cpumask_of(cpu), state);
}

void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
{
	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
411 412
}

413 414 415 416 417 418 419 420 421 422 423
/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
424
	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
425 426 427
}
EXPORT_SYMBOL(kthread_bind);

428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
/**
 * kthread_create_on_cpu - Create a cpu bound kthread
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @cpu: The cpu on which the thread should be bound,
 * @namefmt: printf-style name for the thread. Format is restricted
 *	     to "name.*%u". Code fills in cpu number.
 *
 * Description: This helper function creates and names a kernel thread
 * The thread will be woken and put into park mode.
 */
struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
					  void *data, unsigned int cpu,
					  const char *namefmt)
{
	struct task_struct *p;

445
	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
446 447 448
				   cpu);
	if (IS_ERR(p))
		return p;
449 450
	kthread_bind(p, cpu);
	/* CPU hotplug need to bind once again when unparking the thread. */
451 452 453 454 455
	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
	to_kthread(p)->cpu = cpu;
	return p;
}

456 457 458 459 460 461 462 463 464
/**
 * kthread_unpark - unpark a thread created by kthread_create().
 * @k:		thread created by kthread_create().
 *
 * Sets kthread_should_park() for @k to return false, wakes it, and
 * waits for it to return. If the thread is marked percpu then its
 * bound to the cpu again.
 */
void kthread_unpark(struct task_struct *k)
465
{
466 467
	struct kthread *kthread = to_kthread(k);

468
	/*
469 470
	 * Newly created kthread was parked when the CPU was offline.
	 * The binding was lost and we need to set it again.
471
	 */
472 473 474 475
	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
		__kthread_bind(k, kthread->cpu, TASK_PARKED);

	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
476 477 478
	/*
	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
	 */
479
	wake_up_state(k, TASK_PARKED);
480
}
481
EXPORT_SYMBOL_GPL(kthread_unpark);
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496

/**
 * kthread_park - park a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_park() for @k to return true, wakes it, and
 * waits for it to return. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will park without
 * calling threadfn().
 *
 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
 * If called by the kthread itself just the park bit is set.
 */
int kthread_park(struct task_struct *k)
{
497 498 499 500 501
	struct kthread *kthread = to_kthread(k);

	if (WARN_ON(k->flags & PF_EXITING))
		return -ENOSYS;

502 503 504
	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
		return -EBUSY;

505 506 507
	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
	if (k != current) {
		wake_up_process(k);
508 509 510 511
		/*
		 * Wait for __kthread_parkme() to complete(), this means we
		 * _will_ have TASK_PARKED and are about to call schedule().
		 */
512
		wait_for_completion(&kthread->parked);
513 514 515 516 517
		/*
		 * Now wait for that schedule() to complete and the task to
		 * get scheduled out.
		 */
		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
518
	}
519 520

	return 0;
521
}
522
EXPORT_SYMBOL_GPL(kthread_park);
523

524 525 526 527 528
/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
529 530 531 532 533 534
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
535 536 537 538
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
L
Linus Torvalds 已提交
539 540
int kthread_stop(struct task_struct *k)
{
541
	struct kthread *kthread;
L
Linus Torvalds 已提交
542 543
	int ret;

544
	trace_sched_kthread_stop(k);
545 546

	get_task_struct(k);
547 548
	kthread = to_kthread(k);
	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
549
	kthread_unpark(k);
550 551
	wake_up_process(k);
	wait_for_completion(&kthread->exited);
O
Oleg Nesterov 已提交
552
	ret = k->exit_code;
L
Linus Torvalds 已提交
553
	put_task_struct(k);
554

555
	trace_sched_kthread_stop_ret(ret);
L
Linus Torvalds 已提交
556 557
	return ret;
}
558
EXPORT_SYMBOL(kthread_stop);
L
Linus Torvalds 已提交
559

560
int kthreadd(void *unused)
L
Linus Torvalds 已提交
561
{
562
	struct task_struct *tsk = current;
L
Linus Torvalds 已提交
563

564
	/* Setup a clean context for our children to inherit. */
565
	set_task_comm(tsk, "kthreadd");
566
	ignore_signals(tsk);
567
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
568
	set_mems_allowed(node_states[N_MEMORY]);
569

570
	current->flags |= PF_NOFREEZE;
571
	cgroup_init_kthreadd();
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}
T
Tejun Heo 已提交
597

P
Petr Mladek 已提交
598
void __kthread_init_worker(struct kthread_worker *worker,
Y
Yong Zhang 已提交
599 600 601
				const char *name,
				struct lock_class_key *key)
{
602
	memset(worker, 0, sizeof(struct kthread_worker));
Y
Yong Zhang 已提交
603 604 605
	spin_lock_init(&worker->lock);
	lockdep_set_class_and_name(&worker->lock, key, name);
	INIT_LIST_HEAD(&worker->work_list);
606
	INIT_LIST_HEAD(&worker->delayed_work_list);
Y
Yong Zhang 已提交
607
}
P
Petr Mladek 已提交
608
EXPORT_SYMBOL_GPL(__kthread_init_worker);
Y
Yong Zhang 已提交
609

T
Tejun Heo 已提交
610 611 612 613
/**
 * kthread_worker_fn - kthread function to process kthread_worker
 * @worker_ptr: pointer to initialized kthread_worker
 *
614 615 616
 * This function implements the main cycle of kthread worker. It processes
 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
 * is empty.
T
Tejun Heo 已提交
617
 *
618 619 620
 * The works are not allowed to keep any locks, disable preemption or interrupts
 * when they finish. There is defined a safe point for freezing when one work
 * finishes and before a new one is started.
621 622 623
 *
 * Also the works must not be handled by more than one worker at the same time,
 * see also kthread_queue_work().
T
Tejun Heo 已提交
624 625 626 627 628 629
 */
int kthread_worker_fn(void *worker_ptr)
{
	struct kthread_worker *worker = worker_ptr;
	struct kthread_work *work;

630 631 632 633 634
	/*
	 * FIXME: Update the check and remove the assignment when all kthread
	 * worker users are created using kthread_create_worker*() functions.
	 */
	WARN_ON(worker->task && worker->task != current);
T
Tejun Heo 已提交
635
	worker->task = current;
636 637 638 639

	if (worker->flags & KTW_FREEZABLE)
		set_freezable();

T
Tejun Heo 已提交
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
repeat:
	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */

	if (kthread_should_stop()) {
		__set_current_state(TASK_RUNNING);
		spin_lock_irq(&worker->lock);
		worker->task = NULL;
		spin_unlock_irq(&worker->lock);
		return 0;
	}

	work = NULL;
	spin_lock_irq(&worker->lock);
	if (!list_empty(&worker->work_list)) {
		work = list_first_entry(&worker->work_list,
					struct kthread_work, node);
		list_del_init(&work->node);
	}
658
	worker->current_work = work;
T
Tejun Heo 已提交
659 660 661 662 663 664 665 666 667
	spin_unlock_irq(&worker->lock);

	if (work) {
		__set_current_state(TASK_RUNNING);
		work->func(work);
	} else if (!freezing(current))
		schedule();

	try_to_freeze();
668
	cond_resched();
T
Tejun Heo 已提交
669 670 671 672
	goto repeat;
}
EXPORT_SYMBOL_GPL(kthread_worker_fn);

N
Nicolas Iooss 已提交
673
static __printf(3, 0) struct kthread_worker *
674 675
__kthread_create_worker(int cpu, unsigned int flags,
			const char namefmt[], va_list args)
676 677 678
{
	struct kthread_worker *worker;
	struct task_struct *task;
679
	int node = -1;
680 681 682 683 684 685 686

	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
	if (!worker)
		return ERR_PTR(-ENOMEM);

	kthread_init_worker(worker);

687 688
	if (cpu >= 0)
		node = cpu_to_node(cpu);
689

690 691
	task = __kthread_create_on_node(kthread_worker_fn, worker,
						node, namefmt, args);
692 693 694
	if (IS_ERR(task))
		goto fail_task;

695 696 697
	if (cpu >= 0)
		kthread_bind(task, cpu);

698
	worker->flags = flags;
699 700 701 702 703 704 705 706 707 708 709
	worker->task = task;
	wake_up_process(task);
	return worker;

fail_task:
	kfree(worker);
	return ERR_CAST(task);
}

/**
 * kthread_create_worker - create a kthread worker
710
 * @flags: flags modifying the default behavior of the worker
711 712 713 714 715 716 717
 * @namefmt: printf-style name for the kthread worker (task).
 *
 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 * when the worker was SIGKILLed.
 */
struct kthread_worker *
718
kthread_create_worker(unsigned int flags, const char namefmt[], ...)
719 720 721 722 723
{
	struct kthread_worker *worker;
	va_list args;

	va_start(args, namefmt);
724
	worker = __kthread_create_worker(-1, flags, namefmt, args);
725 726 727 728 729 730 731 732 733 734
	va_end(args);

	return worker;
}
EXPORT_SYMBOL(kthread_create_worker);

/**
 * kthread_create_worker_on_cpu - create a kthread worker and bind it
 *	it to a given CPU and the associated NUMA node.
 * @cpu: CPU number
735
 * @flags: flags modifying the default behavior of the worker
736 737 738 739 740 741 742 743 744 745 746 747 748
 * @namefmt: printf-style name for the kthread worker (task).
 *
 * Use a valid CPU number if you want to bind the kthread worker
 * to the given CPU and the associated NUMA node.
 *
 * A good practice is to add the cpu number also into the worker name.
 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
 *
 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
 * when the worker was SIGKILLed.
 */
struct kthread_worker *
749 750
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
			     const char namefmt[], ...)
751 752 753 754 755
{
	struct kthread_worker *worker;
	va_list args;

	va_start(args, namefmt);
756
	worker = __kthread_create_worker(cpu, flags, namefmt, args);
757 758 759 760 761 762
	va_end(args);

	return worker;
}
EXPORT_SYMBOL(kthread_create_worker_on_cpu);

763 764 765 766 767 768 769 770 771 772 773 774 775
/*
 * Returns true when the work could not be queued at the moment.
 * It happens when it is already pending in a worker list
 * or when it is being cancelled.
 */
static inline bool queuing_blocked(struct kthread_worker *worker,
				   struct kthread_work *work)
{
	lockdep_assert_held(&worker->lock);

	return !list_empty(&work->node) || work->canceling;
}

776 777 778 779 780 781 782 783 784
static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
					     struct kthread_work *work)
{
	lockdep_assert_held(&worker->lock);
	WARN_ON_ONCE(!list_empty(&work->node));
	/* Do not use a work with >1 worker, see kthread_queue_work() */
	WARN_ON_ONCE(work->worker && work->worker != worker);
}

785
/* insert @work before @pos in @worker */
P
Petr Mladek 已提交
786
static void kthread_insert_work(struct kthread_worker *worker,
787 788
				struct kthread_work *work,
				struct list_head *pos)
789
{
790
	kthread_insert_work_sanity_check(worker, work);
791 792

	list_add_tail(&work->node, pos);
793
	work->worker = worker;
794
	if (!worker->current_work && likely(worker->task))
795 796 797
		wake_up_process(worker->task);
}

T
Tejun Heo 已提交
798
/**
P
Petr Mladek 已提交
799
 * kthread_queue_work - queue a kthread_work
T
Tejun Heo 已提交
800 801 802 803 804 805
 * @worker: target kthread_worker
 * @work: kthread_work to queue
 *
 * Queue @work to work processor @task for async execution.  @task
 * must have been created with kthread_worker_create().  Returns %true
 * if @work was successfully queued, %false if it was already pending.
806 807 808
 *
 * Reinitialize the work if it needs to be used by another worker.
 * For example, when the worker was stopped and started again.
T
Tejun Heo 已提交
809
 */
P
Petr Mladek 已提交
810
bool kthread_queue_work(struct kthread_worker *worker,
T
Tejun Heo 已提交
811 812 813 814 815 816
			struct kthread_work *work)
{
	bool ret = false;
	unsigned long flags;

	spin_lock_irqsave(&worker->lock, flags);
817
	if (!queuing_blocked(worker, work)) {
P
Petr Mladek 已提交
818
		kthread_insert_work(worker, work, &worker->work_list);
T
Tejun Heo 已提交
819 820 821 822 823
		ret = true;
	}
	spin_unlock_irqrestore(&worker->lock, flags);
	return ret;
}
P
Petr Mladek 已提交
824
EXPORT_SYMBOL_GPL(kthread_queue_work);
T
Tejun Heo 已提交
825

826 827 828
/**
 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
 *	delayed work when the timer expires.
829
 * @t: pointer to the expired timer
830 831 832 833
 *
 * The format of the function is defined by struct timer_list.
 * It should have been called from irqsafe timer with irq already off.
 */
834
void kthread_delayed_work_timer_fn(struct timer_list *t)
835
{
836
	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
	struct kthread_work *work = &dwork->work;
	struct kthread_worker *worker = work->worker;

	/*
	 * This might happen when a pending work is reinitialized.
	 * It means that it is used a wrong way.
	 */
	if (WARN_ON_ONCE(!worker))
		return;

	spin_lock(&worker->lock);
	/* Work must not be used with >1 worker, see kthread_queue_work(). */
	WARN_ON_ONCE(work->worker != worker);

	/* Move the work from worker->delayed_work_list. */
	WARN_ON_ONCE(list_empty(&work->node));
	list_del_init(&work->node);
	kthread_insert_work(worker, work, &worker->work_list);

	spin_unlock(&worker->lock);
}
EXPORT_SYMBOL(kthread_delayed_work_timer_fn);

void __kthread_queue_delayed_work(struct kthread_worker *worker,
				  struct kthread_delayed_work *dwork,
				  unsigned long delay)
{
	struct timer_list *timer = &dwork->timer;
	struct kthread_work *work = &dwork->work;

867
	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913

	/*
	 * If @delay is 0, queue @dwork->work immediately.  This is for
	 * both optimization and correctness.  The earliest @timer can
	 * expire is on the closest next tick and delayed_work users depend
	 * on that there's no such delay when @delay is 0.
	 */
	if (!delay) {
		kthread_insert_work(worker, work, &worker->work_list);
		return;
	}

	/* Be paranoid and try to detect possible races already now. */
	kthread_insert_work_sanity_check(worker, work);

	list_add(&work->node, &worker->delayed_work_list);
	work->worker = worker;
	timer->expires = jiffies + delay;
	add_timer(timer);
}

/**
 * kthread_queue_delayed_work - queue the associated kthread work
 *	after a delay.
 * @worker: target kthread_worker
 * @dwork: kthread_delayed_work to queue
 * @delay: number of jiffies to wait before queuing
 *
 * If the work has not been pending it starts a timer that will queue
 * the work after the given @delay. If @delay is zero, it queues the
 * work immediately.
 *
 * Return: %false if the @work has already been pending. It means that
 * either the timer was running or the work was queued. It returns %true
 * otherwise.
 */
bool kthread_queue_delayed_work(struct kthread_worker *worker,
				struct kthread_delayed_work *dwork,
				unsigned long delay)
{
	struct kthread_work *work = &dwork->work;
	unsigned long flags;
	bool ret = false;

	spin_lock_irqsave(&worker->lock, flags);

914
	if (!queuing_blocked(worker, work)) {
915 916 917 918 919 920 921 922 923
		__kthread_queue_delayed_work(worker, dwork, delay);
		ret = true;
	}

	spin_unlock_irqrestore(&worker->lock, flags);
	return ret;
}
EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);

924 925 926 927 928 929 930 931 932 933 934 935
struct kthread_flush_work {
	struct kthread_work	work;
	struct completion	done;
};

static void kthread_flush_work_fn(struct kthread_work *work)
{
	struct kthread_flush_work *fwork =
		container_of(work, struct kthread_flush_work, work);
	complete(&fwork->done);
}

T
Tejun Heo 已提交
936
/**
P
Petr Mladek 已提交
937
 * kthread_flush_work - flush a kthread_work
T
Tejun Heo 已提交
938 939 940 941
 * @work: work to flush
 *
 * If @work is queued or executing, wait for it to finish execution.
 */
P
Petr Mladek 已提交
942
void kthread_flush_work(struct kthread_work *work)
T
Tejun Heo 已提交
943
{
944 945 946 947 948 949 950 951 952 953
	struct kthread_flush_work fwork = {
		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
	};
	struct kthread_worker *worker;
	bool noop = false;

	worker = work->worker;
	if (!worker)
		return;
T
Tejun Heo 已提交
954

955
	spin_lock_irq(&worker->lock);
956 957
	/* Work must not be used with >1 worker, see kthread_queue_work(). */
	WARN_ON_ONCE(work->worker != worker);
T
Tejun Heo 已提交
958

959
	if (!list_empty(&work->node))
P
Petr Mladek 已提交
960
		kthread_insert_work(worker, &fwork.work, work->node.next);
961
	else if (worker->current_work == work)
P
Petr Mladek 已提交
962 963
		kthread_insert_work(worker, &fwork.work,
				    worker->work_list.next);
964 965
	else
		noop = true;
T
Tejun Heo 已提交
966

967
	spin_unlock_irq(&worker->lock);
T
Tejun Heo 已提交
968

969 970
	if (!noop)
		wait_for_completion(&fwork.done);
T
Tejun Heo 已提交
971
}
P
Petr Mladek 已提交
972
EXPORT_SYMBOL_GPL(kthread_flush_work);
T
Tejun Heo 已提交
973

974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
/*
 * This function removes the work from the worker queue. Also it makes sure
 * that it won't get queued later via the delayed work's timer.
 *
 * The work might still be in use when this function finishes. See the
 * current_work proceed by the worker.
 *
 * Return: %true if @work was pending and successfully canceled,
 *	%false if @work was not pending
 */
static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
				  unsigned long *flags)
{
	/* Try to cancel the timer if exists. */
	if (is_dwork) {
		struct kthread_delayed_work *dwork =
			container_of(work, struct kthread_delayed_work, work);
		struct kthread_worker *worker = work->worker;

		/*
		 * del_timer_sync() must be called to make sure that the timer
		 * callback is not running. The lock must be temporary released
		 * to avoid a deadlock with the callback. In the meantime,
		 * any queuing is blocked by setting the canceling counter.
		 */
		work->canceling++;
		spin_unlock_irqrestore(&worker->lock, *flags);
		del_timer_sync(&dwork->timer);
		spin_lock_irqsave(&worker->lock, *flags);
		work->canceling--;
	}

	/*
	 * Try to remove the work from a worker list. It might either
	 * be from worker->work_list or from worker->delayed_work_list.
	 */
	if (!list_empty(&work->node)) {
		list_del_init(&work->node);
		return true;
	}

	return false;
}

1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
/**
 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
 * @worker: kthread worker to use
 * @dwork: kthread delayed work to queue
 * @delay: number of jiffies to wait before queuing
 *
 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
 * @work is guaranteed to be queued immediately.
 *
 * Return: %true if @dwork was pending and its timer was modified,
 * %false otherwise.
 *
 * A special case is when the work is being canceled in parallel.
 * It might be caused either by the real kthread_cancel_delayed_work_sync()
 * or yet another kthread_mod_delayed_work() call. We let the other command
 * win and return %false here. The caller is supposed to synchronize these
 * operations a reasonable way.
 *
 * This function is safe to call from any context including IRQ handler.
 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
 * for details.
 */
bool kthread_mod_delayed_work(struct kthread_worker *worker,
			      struct kthread_delayed_work *dwork,
			      unsigned long delay)
{
	struct kthread_work *work = &dwork->work;
	unsigned long flags;
	int ret = false;

	spin_lock_irqsave(&worker->lock, flags);

	/* Do not bother with canceling when never queued. */
	if (!work->worker)
		goto fast_queue;

	/* Work must not be used with >1 worker, see kthread_queue_work() */
	WARN_ON_ONCE(work->worker != worker);

	/* Do not fight with another command that is canceling this work. */
	if (work->canceling)
		goto out;

	ret = __kthread_cancel_work(work, true, &flags);
fast_queue:
	__kthread_queue_delayed_work(worker, dwork, delay);
out:
	spin_unlock_irqrestore(&worker->lock, flags);
	return ret;
}
EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);

1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
{
	struct kthread_worker *worker = work->worker;
	unsigned long flags;
	int ret = false;

	if (!worker)
		goto out;

	spin_lock_irqsave(&worker->lock, flags);
	/* Work must not be used with >1 worker, see kthread_queue_work(). */
	WARN_ON_ONCE(work->worker != worker);

	ret = __kthread_cancel_work(work, is_dwork, &flags);

	if (worker->current_work != work)
		goto out_fast;

	/*
	 * The work is in progress and we need to wait with the lock released.
	 * In the meantime, block any queuing by setting the canceling counter.
	 */
	work->canceling++;
	spin_unlock_irqrestore(&worker->lock, flags);
	kthread_flush_work(work);
	spin_lock_irqsave(&worker->lock, flags);
	work->canceling--;

out_fast:
	spin_unlock_irqrestore(&worker->lock, flags);
out:
	return ret;
}

/**
 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
 * @work: the kthread work to cancel
 *
 * Cancel @work and wait for its execution to finish.  This function
 * can be used even if the work re-queues itself. On return from this
 * function, @work is guaranteed to be not pending or executing on any CPU.
 *
 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
 *
 * The caller must ensure that the worker on which @work was last
 * queued can't be destroyed before this function returns.
 *
 * Return: %true if @work was pending, %false otherwise.
 */
bool kthread_cancel_work_sync(struct kthread_work *work)
{
	return __kthread_cancel_work_sync(work, false);
}
EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);

/**
 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
 *	wait for it to finish.
 * @dwork: the kthread delayed work to cancel
 *
 * This is kthread_cancel_work_sync() for delayed works.
 *
 * Return: %true if @dwork was pending, %false otherwise.
 */
bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
{
	return __kthread_cancel_work_sync(&dwork->work, true);
}
EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);

T
Tejun Heo 已提交
1142
/**
P
Petr Mladek 已提交
1143
 * kthread_flush_worker - flush all current works on a kthread_worker
T
Tejun Heo 已提交
1144 1145 1146 1147 1148
 * @worker: worker to flush
 *
 * Wait until all currently executing or pending works on @worker are
 * finished.
 */
P
Petr Mladek 已提交
1149
void kthread_flush_worker(struct kthread_worker *worker)
T
Tejun Heo 已提交
1150 1151 1152 1153 1154 1155
{
	struct kthread_flush_work fwork = {
		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
	};

P
Petr Mladek 已提交
1156
	kthread_queue_work(worker, &fwork.work);
T
Tejun Heo 已提交
1157 1158
	wait_for_completion(&fwork.done);
}
P
Petr Mladek 已提交
1159
EXPORT_SYMBOL_GPL(kthread_flush_worker);
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182

/**
 * kthread_destroy_worker - destroy a kthread worker
 * @worker: worker to be destroyed
 *
 * Flush and destroy @worker.  The simple flush is enough because the kthread
 * worker API is used only in trivial scenarios.  There are no multi-step state
 * machines needed.
 */
void kthread_destroy_worker(struct kthread_worker *worker)
{
	struct task_struct *task;

	task = worker->task;
	if (WARN_ON(!task))
		return;

	kthread_flush_worker(worker);
	kthread_stop(task);
	WARN_ON(!list_empty(&worker->work_list));
	kfree(worker);
}
EXPORT_SYMBOL(kthread_destroy_worker);
1183

S
Shaohua Li 已提交
1184
#ifdef CONFIG_BLK_CGROUP
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
/**
 * kthread_associate_blkcg - associate blkcg to current kthread
 * @css: the cgroup info
 *
 * Current thread must be a kthread. The thread is running jobs on behalf of
 * other threads. In some cases, we expect the jobs attach cgroup info of
 * original threads instead of that of current thread. This function stores
 * original thread's cgroup info in current kthread context for later
 * retrieval.
 */
void kthread_associate_blkcg(struct cgroup_subsys_state *css)
{
	struct kthread *kthread;

	if (!(current->flags & PF_KTHREAD))
		return;
	kthread = to_kthread(current);
	if (!kthread)
		return;

	if (kthread->blkcg_css) {
		css_put(kthread->blkcg_css);
		kthread->blkcg_css = NULL;
	}
	if (css) {
		css_get(css);
		kthread->blkcg_css = css;
	}
}
EXPORT_SYMBOL(kthread_associate_blkcg);

/**
 * kthread_blkcg - get associated blkcg css of current kthread
 *
 * Current thread must be a kthread.
 */
struct cgroup_subsys_state *kthread_blkcg(void)
{
	struct kthread *kthread;

	if (current->flags & PF_KTHREAD) {
		kthread = to_kthread(current);
		if (kthread)
			return kthread->blkcg_css;
	}
	return NULL;
}
EXPORT_SYMBOL(kthread_blkcg);
#endif