io-wq.c 26.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// SPDX-License-Identifier: GPL-2.0
/*
 * Basic worker thread pool for io_uring
 *
 * Copyright (C) 2019 Jens Axboe
 *
 */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched/signal.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/rculist_nulls.h>
17
#include <linux/cpu.h>
18
#include <linux/tracehook.h>
19
#include <linux/freezer.h>
20

21
#include "../kernel/sched/sched.h"
22 23 24 25 26 27 28 29
#include "io-wq.h"

#define WORKER_IDLE_TIMEOUT	(5 * HZ)

enum {
	IO_WORKER_F_UP		= 1,	/* up and active */
	IO_WORKER_F_RUNNING	= 2,	/* account as running */
	IO_WORKER_F_FREE	= 4,	/* worker on free list */
30 31
	IO_WORKER_F_FIXED	= 8,	/* static idle worker */
	IO_WORKER_F_BOUND	= 16,	/* is doing bounded work */
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
};

enum {
	IO_WQ_BIT_EXIT		= 0,	/* wq exiting */
};

enum {
	IO_WQE_FLAG_STALLED	= 1,	/* stalled on hash */
};

/*
 * One for each thread in a wqe pool
 */
struct io_worker {
	refcount_t ref;
	unsigned flags;
	struct hlist_nulls_node nulls_node;
49
	struct list_head all_list;
50 51
	struct task_struct *task;
	struct io_wqe *wqe;
52

53
	struct io_wq_work *cur_work;
54
	spinlock_t lock;
55

56 57
	struct completion ref_done;

58 59 60 61 62 63 64 65 66
	struct rcu_head rcu;
};

#if BITS_PER_LONG == 64
#define IO_WQ_HASH_ORDER	6
#else
#define IO_WQ_HASH_ORDER	5
#endif

67 68
#define IO_WQ_NR_HASH_BUCKETS	(1u << IO_WQ_HASH_ORDER)

69 70 71 72 73 74 75 76 77 78 79
struct io_wqe_acct {
	unsigned nr_workers;
	unsigned max_workers;
	atomic_t nr_running;
};

enum {
	IO_WQ_ACCT_BOUND,
	IO_WQ_ACCT_UNBOUND,
};

80 81 82 83 84
/*
 * Per-node worker thread pool
 */
struct io_wqe {
	struct {
85
		raw_spinlock_t lock;
J
Jens Axboe 已提交
86
		struct io_wq_work_list work_list;
87 88 89 90
		unsigned flags;
	} ____cacheline_aligned_in_smp;

	int node;
91
	struct io_wqe_acct acct[2];
92

93
	struct hlist_nulls_head free_list;
94
	struct list_head all_list;
95

96 97
	struct wait_queue_entry wait;

98
	struct io_wq *wq;
99
	struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
100 101 102 103 104 105 106 107 108
};

/*
 * Per io_wq state
  */
struct io_wq {
	struct io_wqe **wqes;
	unsigned long state;

109
	free_work_fn *free_work;
110
	io_wq_work_fn *do_work;
111

112
	struct task_struct *manager;
113 114 115

	struct io_wq_hash *hash;

116
	refcount_t refs;
117
	struct completion exited;
J
Jens Axboe 已提交
118

119 120 121
	atomic_t worker_refs;
	struct completion worker_done;

122
	struct hlist_node cpuhp_node;
123 124

	pid_t task_pid;
125 126
};

127 128
static enum cpuhp_state io_wq_online;

129 130 131 132 133 134 135 136 137 138 139
struct io_cb_cancel_data {
	work_cancel_fn *fn;
	void *data;
	int nr_running;
	int nr_pending;
	bool cancel_all;
};

static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
				       struct io_cb_cancel_data *match);

140 141 142 143 144 145 146 147
static bool io_worker_get(struct io_worker *worker)
{
	return refcount_inc_not_zero(&worker->ref);
}

static void io_worker_release(struct io_worker *worker)
{
	if (refcount_dec_and_test(&worker->ref))
148
		complete(&worker->ref_done);
149 150
}

151 152 153 154 155 156 157 158 159
static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
						   struct io_wq_work *work)
{
	if (work->flags & IO_WQ_WORK_UNBOUND)
		return &wqe->acct[IO_WQ_ACCT_UNBOUND];

	return &wqe->acct[IO_WQ_ACCT_BOUND];
}

160
static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker)
161
{
162 163
	struct io_wqe *wqe = worker->wqe;

164 165 166 167 168 169
	if (worker->flags & IO_WORKER_F_BOUND)
		return &wqe->acct[IO_WQ_ACCT_BOUND];

	return &wqe->acct[IO_WQ_ACCT_UNBOUND];
}

170 171 172
static void io_worker_exit(struct io_worker *worker)
{
	struct io_wqe *wqe = worker->wqe;
173
	struct io_wqe_acct *acct = io_wqe_get_acct(worker);
174
	unsigned flags;
175

176 177 178
	if (refcount_dec_and_test(&worker->ref))
		complete(&worker->ref_done);
	wait_for_completion(&worker->ref_done);
179 180 181

	preempt_disable();
	current->flags &= ~PF_IO_WORKER;
182 183 184
	flags = worker->flags;
	worker->flags = 0;
	if (flags & IO_WORKER_F_RUNNING)
185
		atomic_dec(&acct->nr_running);
186 187 188
	worker->flags = 0;
	preempt_enable();

189
	raw_spin_lock_irq(&wqe->lock);
190 191
	if (flags & IO_WORKER_F_FREE)
		hlist_nulls_del_rcu(&worker->nulls_node);
192
	list_del_rcu(&worker->all_list);
193
	acct->nr_workers--;
194
	raw_spin_unlock_irq(&wqe->lock);
195

196
	kfree_rcu(worker, rcu);
197 198
	if (atomic_dec_and_test(&wqe->wq->worker_refs))
		complete(&wqe->wq->worker_done);
199
	do_exit(0);
200 201
}

202 203 204
static inline bool io_wqe_run_queue(struct io_wqe *wqe)
	__must_hold(wqe->lock)
{
J
Jens Axboe 已提交
205 206
	if (!wq_list_empty(&wqe->work_list) &&
	    !(wqe->flags & IO_WQE_FLAG_STALLED))
207 208 209 210 211 212 213 214 215 216 217 218 219 220
		return true;
	return false;
}

/*
 * Check head of free list for an available worker. If one isn't available,
 * caller must wake up the wq manager to create one.
 */
static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
	__must_hold(RCU)
{
	struct hlist_nulls_node *n;
	struct io_worker *worker;

221
	n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
222 223 224 225 226
	if (is_a_nulls(n))
		return false;

	worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
	if (io_worker_get(worker)) {
J
Jens Axboe 已提交
227
		wake_up_process(worker->task);
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
		io_worker_release(worker);
		return true;
	}

	return false;
}

/*
 * We need a worker. If we find a free one, we're good. If not, and we're
 * below the max number of workers, wake up the manager to create one.
 */
static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
{
	bool ret;

	/*
	 * Most likely an attempt to queue unbounded work on an io_wq that
	 * wasn't setup with any unbounded workers.
	 */
	WARN_ON_ONCE(!acct->max_workers);

	rcu_read_lock();
	ret = io_wqe_activate_free_worker(wqe);
	rcu_read_unlock();

	if (!ret && acct->nr_workers < acct->max_workers)
		wake_up_process(wqe->wq->manager);
}

257
static void io_wqe_inc_running(struct io_worker *worker)
258
{
259
	struct io_wqe_acct *acct = io_wqe_get_acct(worker);
260 261 262 263

	atomic_inc(&acct->nr_running);
}

264
static void io_wqe_dec_running(struct io_worker *worker)
265 266
	__must_hold(wqe->lock)
{
267 268
	struct io_wqe_acct *acct = io_wqe_get_acct(worker);
	struct io_wqe *wqe = worker->wqe;
269 270 271 272 273

	if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
		io_wqe_wake_worker(wqe, acct);
}

274 275 276 277 278 279 280 281
/*
 * Worker will start processing some work. Move it to the busy list, if
 * it's currently on the freelist
 */
static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
			     struct io_wq_work *work)
	__must_hold(wqe->lock)
{
282 283
	bool worker_bound, work_bound;

284 285 286 287
	if (worker->flags & IO_WORKER_F_FREE) {
		worker->flags &= ~IO_WORKER_F_FREE;
		hlist_nulls_del_init_rcu(&worker->nulls_node);
	}
288 289 290 291 292

	/*
	 * If worker is moving from bound to unbound (or vice versa), then
	 * ensure we update the running accounting.
	 */
293 294 295
	worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
	work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
	if (worker_bound != work_bound) {
296
		io_wqe_dec_running(worker);
297 298 299 300 301 302 303 304 305
		if (work_bound) {
			worker->flags |= IO_WORKER_F_BOUND;
			wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
			wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
		} else {
			worker->flags &= ~IO_WORKER_F_BOUND;
			wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
			wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
		}
306
		io_wqe_inc_running(worker);
307
	 }
308 309 310 311 312 313 314 315 316
}

/*
 * No work, worker going to sleep. Move to freelist, and unuse mm if we
 * have one attached. Dropping the mm may potentially sleep, so we drop
 * the lock in that case and return success. Since the caller has to
 * retry the loop in that case (we changed task state), we don't regrab
 * the lock if we return success.
 */
317
static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
318 319 320 321
	__must_hold(wqe->lock)
{
	if (!(worker->flags & IO_WORKER_F_FREE)) {
		worker->flags |= IO_WORKER_F_FREE;
322
		hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
323 324 325
	}
}

P
Pavel Begunkov 已提交
326 327 328 329 330
static inline unsigned int io_get_work_hash(struct io_wq_work *work)
{
	return work->flags >> IO_WQ_HASH_SHIFT;
}

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
static void io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
{
	struct io_wq *wq = wqe->wq;

	spin_lock(&wq->hash->wait.lock);
	if (list_empty(&wqe->wait.entry)) {
		__add_wait_queue(&wq->hash->wait, &wqe->wait);
		if (!test_bit(hash, &wq->hash->map)) {
			__set_current_state(TASK_RUNNING);
			list_del_init(&wqe->wait.entry);
		}
	}
	spin_unlock(&wq->hash->wait.lock);
}

P
Pavel Begunkov 已提交
346
static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
347 348
	__must_hold(wqe->lock)
{
J
Jens Axboe 已提交
349
	struct io_wq_work_node *node, *prev;
350
	struct io_wq_work *work, *tail;
351
	unsigned int stall_hash = -1U;
352

J
Jens Axboe 已提交
353
	wq_list_for_each(node, prev, &wqe->work_list) {
354 355
		unsigned int hash;

J
Jens Axboe 已提交
356 357
		work = container_of(node, struct io_wq_work, list);

358
		/* not hashed, can run anytime */
359
		if (!io_wq_is_hashed(work)) {
360
			wq_list_del(&wqe->work_list, node, prev);
361 362 363
			return work;
		}

P
Pavel Begunkov 已提交
364
		hash = io_get_work_hash(work);
365 366 367 368 369
		/* all items with this hash lie in [work, tail] */
		tail = wqe->hash_tail[hash];

		/* hashed, can run if not already running */
		if (!test_and_set_bit(hash, &wqe->wq->hash->map)) {
370 371
			wqe->hash_tail[hash] = NULL;
			wq_list_cut(&wqe->work_list, &tail->list, prev);
372 373
			return work;
		}
374 375 376 377 378 379 380 381 382 383
		if (stall_hash == -1U)
			stall_hash = hash;
		/* fast forward to a next hash, for-each will fix up @prev */
		node = &tail->list;
	}

	if (stall_hash != -1U) {
		raw_spin_unlock(&wqe->lock);
		io_wait_on_hash(wqe, stall_hash);
		raw_spin_lock(&wqe->lock);
384 385 386 387 388
	}

	return NULL;
}

389
static bool io_flush_signals(void)
390
{
391
	if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) {
392
		__set_current_state(TASK_RUNNING);
393
		tracehook_notify_signal();
394
		return true;
395
	}
396
	return false;
397 398 399 400 401
}

static void io_assign_current_work(struct io_worker *worker,
				   struct io_wq_work *work)
{
402
	if (work) {
403
		io_flush_signals();
404 405
		cond_resched();
	}
406 407 408 409 410 411

	spin_lock_irq(&worker->lock);
	worker->cur_work = work;
	spin_unlock_irq(&worker->lock);
}

P
Pavel Begunkov 已提交
412 413
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);

414 415 416 417 418 419 420
static void io_worker_handle_work(struct io_worker *worker)
	__releases(wqe->lock)
{
	struct io_wqe *wqe = worker->wqe;
	struct io_wq *wq = wqe->wq;

	do {
421
		struct io_wq_work *work;
422
get_next:
423 424 425 426 427 428 429
		/*
		 * If we got some work, mark us as busy. If we didn't, but
		 * the list isn't empty, it means we stalled on hashed work.
		 * Mark us stalled so we don't keep looking for work when we
		 * can't make progress, any work completion or insertion will
		 * clear the stalled flag.
		 */
P
Pavel Begunkov 已提交
430
		work = io_get_next_work(wqe);
431 432
		if (work)
			__io_worker_busy(wqe, worker, work);
J
Jens Axboe 已提交
433
		else if (!wq_list_empty(&wqe->work_list))
434 435
			wqe->flags |= IO_WQE_FLAG_STALLED;

436
		raw_spin_unlock_irq(&wqe->lock);
437 438
		if (!work)
			break;
439
		io_assign_current_work(worker, work);
440
		__set_current_state(TASK_RUNNING);
441

442 443
		/* handle a whole dependent link */
		do {
444
			struct io_wq_work *next_hashed, *linked;
P
Pavel Begunkov 已提交
445
			unsigned int hash = io_get_work_hash(work);
446

447
			next_hashed = wq_next_work(work);
448 449
			wq->do_work(work);
			io_assign_current_work(worker, NULL);
450

451
			linked = wq->free_work(work);
452 453 454 455 456 457 458 459 460 461
			work = next_hashed;
			if (!work && linked && !io_wq_is_hashed(linked)) {
				work = linked;
				linked = NULL;
			}
			io_assign_current_work(worker, work);
			if (linked)
				io_wqe_enqueue(wqe, linked);

			if (hash != -1U && !next_hashed) {
462 463 464
				clear_bit(hash, &wq->hash->map);
				if (wq_has_sleeper(&wq->hash->wait))
					wake_up(&wq->hash->wait);
465
				raw_spin_lock_irq(&wqe->lock);
466
				wqe->flags &= ~IO_WQE_FLAG_STALLED;
467 468 469
				/* skip unnecessary unlock-lock wqe->lock */
				if (!work)
					goto get_next;
470
				raw_spin_unlock_irq(&wqe->lock);
471
			}
472
		} while (work);
473

474
		raw_spin_lock_irq(&wqe->lock);
475 476 477 478 479 480 481 482
	} while (1);
}

static int io_wqe_worker(void *data)
{
	struct io_worker *worker = data;
	struct io_wqe *wqe = worker->wqe;
	struct io_wq *wq = wqe->wq;
483
	char buf[TASK_COMM_LEN];
484

485 486 487 488 489
	worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
	io_wqe_inc_running(worker);

	sprintf(buf, "iou-wrk-%d", wq->task_pid);
	set_task_comm(current, buf);
490 491

	while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
492 493
		long ret;

J
Jens Axboe 已提交
494
		set_current_state(TASK_INTERRUPTIBLE);
495
loop:
496
		raw_spin_lock_irq(&wqe->lock);
497 498
		if (io_wqe_run_queue(wqe)) {
			io_worker_handle_work(worker);
499
			goto loop;
500
		}
501
		__io_worker_idle(wqe, worker);
502
		raw_spin_unlock_irq(&wqe->lock);
503 504
		if (io_flush_signals())
			continue;
505 506
		ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
		if (try_to_freeze() || ret)
507
			continue;
508 509
		if (fatal_signal_pending(current))
			break;
510 511 512 513 514 515 516
		/* timed out, exit unless we're the fixed worker */
		if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
		    !(worker->flags & IO_WORKER_F_FIXED))
			break;
	}

	if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
517
		raw_spin_lock_irq(&wqe->lock);
J
Jens Axboe 已提交
518
		if (!wq_list_empty(&wqe->work_list))
519 520
			io_worker_handle_work(worker);
		else
521
			raw_spin_unlock_irq(&wqe->lock);
522 523 524 525 526 527 528 529 530 531 532
	}

	io_worker_exit(worker);
	return 0;
}

/*
 * Called when a worker is scheduled in. Mark us as currently running.
 */
void io_wq_worker_running(struct task_struct *tsk)
{
533
	struct io_worker *worker = tsk->pf_io_worker;
534

535 536
	if (!worker)
		return;
537 538 539 540 541
	if (!(worker->flags & IO_WORKER_F_UP))
		return;
	if (worker->flags & IO_WORKER_F_RUNNING)
		return;
	worker->flags |= IO_WORKER_F_RUNNING;
542
	io_wqe_inc_running(worker);
543 544 545 546 547 548 549 550 551
}

/*
 * Called when worker is going to sleep. If there are no workers currently
 * running and we have work pending, wake up a free one or have the manager
 * set one up.
 */
void io_wq_worker_sleeping(struct task_struct *tsk)
{
552
	struct io_worker *worker = tsk->pf_io_worker;
553

554 555
	if (!worker)
		return;
556 557 558 559 560 561 562
	if (!(worker->flags & IO_WORKER_F_UP))
		return;
	if (!(worker->flags & IO_WORKER_F_RUNNING))
		return;

	worker->flags &= ~IO_WORKER_F_RUNNING;

563
	raw_spin_lock_irq(&worker->wqe->lock);
564
	io_wqe_dec_running(worker);
565
	raw_spin_unlock_irq(&worker->wqe->lock);
566 567
}

568 569
static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
{
570
	struct io_wqe_acct *acct = &wqe->acct[index];
571
	struct io_worker *worker;
572
	struct task_struct *tsk;
573

574 575
	__set_current_state(TASK_RUNNING);

576 577 578 579 580 581 582 583
	worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
	if (!worker)
		return false;

	refcount_set(&worker->ref, 1);
	worker->nulls_node.pprev = NULL;
	worker->wqe = wqe;
	spin_lock_init(&worker->lock);
584
	init_completion(&worker->ref_done);
585

586
	atomic_inc(&wq->worker_refs);
587

588 589
	tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
	if (IS_ERR(tsk)) {
590 591
		if (atomic_dec_and_test(&wq->worker_refs))
			complete(&wq->worker_done);
592 593 594
		kfree(worker);
		return false;
	}
595 596 597 598

	tsk->pf_io_worker = worker;
	worker->task = tsk;
	set_cpus_allowed_ptr(tsk, cpumask_of_node(wqe->node));
599
	tsk->flags |= PF_NO_SETAFFINITY;
600 601 602 603 604 605 606 607 608 609 610 611

	raw_spin_lock_irq(&wqe->lock);
	hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
	list_add_tail_rcu(&worker->all_list, &wqe->all_list);
	worker->flags |= IO_WORKER_F_FREE;
	if (index == IO_WQ_ACCT_BOUND)
		worker->flags |= IO_WORKER_F_BOUND;
	if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
		worker->flags |= IO_WORKER_F_FIXED;
	acct->nr_workers++;
	raw_spin_unlock_irq(&wqe->lock);
	wake_up_new_task(tsk);
612
	return true;
613 614
}

615
static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
616 617
	__must_hold(wqe->lock)
{
618
	struct io_wqe_acct *acct = &wqe->acct[index];
619

620 621
	if (acct->nr_workers && test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state))
		return false;
622
	/* if we have available workers or no work, no need */
623
	if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
624 625
		return false;
	return acct->nr_workers < acct->max_workers;
626 627
}

628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
/*
 * Iterate the passed in list and call the specific function for each
 * worker that isn't exiting
 */
static bool io_wq_for_each_worker(struct io_wqe *wqe,
				  bool (*func)(struct io_worker *, void *),
				  void *data)
{
	struct io_worker *worker;
	bool ret = false;

	list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
		if (io_worker_get(worker)) {
			/* no task if node is/was offline */
			if (worker->task)
				ret = func(worker, data);
			io_worker_release(worker);
			if (ret)
				break;
		}
	}

	return ret;
}

static bool io_wq_worker_wake(struct io_worker *worker, void *data)
{
655
	set_notify_signal(worker->task);
656 657 658 659
	wake_up_process(worker->task);
	return false;
}

660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
static void io_wq_check_workers(struct io_wq *wq)
{
	int node;

	for_each_node(node) {
		struct io_wqe *wqe = wq->wqes[node];
		bool fork_worker[2] = { false, false };

		if (!node_online(node))
			continue;

		raw_spin_lock_irq(&wqe->lock);
		if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
			fork_worker[IO_WQ_ACCT_BOUND] = true;
		if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
			fork_worker[IO_WQ_ACCT_UNBOUND] = true;
		raw_spin_unlock_irq(&wqe->lock);
		if (fork_worker[IO_WQ_ACCT_BOUND])
			create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
		if (fork_worker[IO_WQ_ACCT_UNBOUND])
			create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
	}
}

684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
{
	return true;
}

static void io_wq_cancel_pending(struct io_wq *wq)
{
	struct io_cb_cancel_data match = {
		.fn		= io_wq_work_match_all,
		.cancel_all	= true,
	};
	int node;

	for_each_node(node)
		io_wqe_cancel_pending_work(wq->wqes[node], &match);
}

701 702 703 704 705 706
/*
 * Manager thread. Tasked with creating new workers, if we need them.
 */
static int io_wq_manager(void *data)
{
	struct io_wq *wq = data;
707
	char buf[TASK_COMM_LEN];
708
	int node;
709

710 711
	sprintf(buf, "iou-mgr-%d", wq->task_pid);
	set_task_comm(current, buf);
712

713
	do {
714
		set_current_state(TASK_INTERRUPTIBLE);
715
		io_wq_check_workers(wq);
716
		schedule_timeout(HZ);
717
		try_to_freeze();
718 719
		if (fatal_signal_pending(current))
			set_bit(IO_WQ_BIT_EXIT, &wq->state);
720 721 722
	} while (!test_bit(IO_WQ_BIT_EXIT, &wq->state));

	io_wq_check_workers(wq);
723 724 725 726 727 728

	rcu_read_lock();
	for_each_node(node)
		io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
	rcu_read_unlock();

729 730 731
	if (atomic_dec_and_test(&wq->worker_refs))
		complete(&wq->worker_done);
	wait_for_completion(&wq->worker_done);
732

733 734 735 736 737
	spin_lock_irq(&wq->hash->wait.lock);
	for_each_node(node)
		list_del_init(&wq->wqes[node]->wait.entry);
	spin_unlock_irq(&wq->hash->wait.lock);

738
	io_wq_cancel_pending(wq);
739
	complete(&wq->exited);
740
	do_exit(0);
741 742
}

743
static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
744
{
745 746
	struct io_wq *wq = wqe->wq;

747 748
	do {
		work->flags |= IO_WQ_WORK_CANCEL;
749 750
		wq->do_work(work);
		work = wq->free_work(work);
751 752 753
	} while (work);
}

754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
{
	unsigned int hash;
	struct io_wq_work *tail;

	if (!io_wq_is_hashed(work)) {
append:
		wq_list_add_tail(&work->list, &wqe->work_list);
		return;
	}

	hash = io_get_work_hash(work);
	tail = wqe->hash_tail[hash];
	wqe->hash_tail[hash] = work;
	if (!tail)
		goto append;

	wq_list_add_after(&work->list, &tail->list, &wqe->work_list);
}

774 775
static int io_wq_fork_manager(struct io_wq *wq)
{
776
	struct task_struct *tsk;
777 778 779 780

	if (wq->manager)
		return 0;

781 782
	WARN_ON_ONCE(test_bit(IO_WQ_BIT_EXIT, &wq->state));

783 784
	init_completion(&wq->worker_done);
	atomic_set(&wq->worker_refs, 1);
785 786 787 788
	tsk = create_io_thread(io_wq_manager, wq, NUMA_NO_NODE);
	if (!IS_ERR(tsk)) {
		wq->manager = get_task_struct(tsk);
		wake_up_new_task(tsk);
789 790 791
		return 0;
	}

792 793 794
	if (atomic_dec_and_test(&wq->worker_refs))
		complete(&wq->worker_done);

795
	return PTR_ERR(tsk);
796 797
}

798 799
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
{
800
	struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
801
	int work_flags;
802 803
	unsigned long flags;

804
	/* Can only happen if manager creation fails after exec */
805 806
	if (io_wq_fork_manager(wqe->wq) ||
	    test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state)) {
807
		io_run_cancel(work, wqe);
808 809 810
		return;
	}

811
	work_flags = work->flags;
812
	raw_spin_lock_irqsave(&wqe->lock, flags);
813
	io_wqe_insert_work(wqe, work);
814
	wqe->flags &= ~IO_WQE_FLAG_STALLED;
815
	raw_spin_unlock_irqrestore(&wqe->lock, flags);
816

817 818
	if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
	    !atomic_read(&acct->nr_running))
819
		io_wqe_wake_worker(wqe, acct);
820 821 822 823 824 825 826 827 828 829
}

void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
{
	struct io_wqe *wqe = wq->wqes[numa_node_id()];

	io_wqe_enqueue(wqe, work);
}

/*
830 831
 * Work items that hash to the same value will not be done in parallel.
 * Used to limit concurrent writes, generally hashed by inode.
832
 */
833
void io_wq_hash_work(struct io_wq_work *work, void *val)
834
{
835
	unsigned int bit;
836 837 838 839 840

	bit = hash_ptr(val, IO_WQ_HASH_ORDER);
	work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
}

841
static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
842
{
843
	struct io_cb_cancel_data *match = data;
844
	unsigned long flags;
845 846 847

	/*
	 * Hold the lock to avoid ->cur_work going out of scope, caller
848
	 * may dereference the passed in work.
849
	 */
850
	spin_lock_irqsave(&worker->lock, flags);
851
	if (worker->cur_work &&
852
	    match->fn(worker->cur_work, match->data)) {
853
		set_notify_signal(worker->task);
854
		match->nr_running++;
855
	}
856
	spin_unlock_irqrestore(&worker->lock, flags);
857

858
	return match->nr_running && !match->cancel_all;
859 860
}

861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
static inline void io_wqe_remove_pending(struct io_wqe *wqe,
					 struct io_wq_work *work,
					 struct io_wq_work_node *prev)
{
	unsigned int hash = io_get_work_hash(work);
	struct io_wq_work *prev_work = NULL;

	if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
		if (prev)
			prev_work = container_of(prev, struct io_wq_work, list);
		if (prev_work && io_get_work_hash(prev_work) == hash)
			wqe->hash_tail[hash] = prev_work;
		else
			wqe->hash_tail[hash] = NULL;
	}
	wq_list_del(&wqe->work_list, &work->list, prev);
}

879
static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
880
				       struct io_cb_cancel_data *match)
881
{
J
Jens Axboe 已提交
882
	struct io_wq_work_node *node, *prev;
883
	struct io_wq_work *work;
884
	unsigned long flags;
885

886
retry:
887
	raw_spin_lock_irqsave(&wqe->lock, flags);
J
Jens Axboe 已提交
888 889
	wq_list_for_each(node, prev, &wqe->work_list) {
		work = container_of(node, struct io_wq_work, list);
890 891
		if (!match->fn(work, match->data))
			continue;
892
		io_wqe_remove_pending(wqe, work, prev);
893
		raw_spin_unlock_irqrestore(&wqe->lock, flags);
894 895 896 897 898 899 900
		io_run_cancel(work, wqe);
		match->nr_pending++;
		if (!match->cancel_all)
			return;

		/* not safe to continue after unlock */
		goto retry;
901
	}
902
	raw_spin_unlock_irqrestore(&wqe->lock, flags);
903 904
}

905
static void io_wqe_cancel_running_work(struct io_wqe *wqe,
906 907
				       struct io_cb_cancel_data *match)
{
908
	rcu_read_lock();
909
	io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
910 911 912
	rcu_read_unlock();
}

913
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
914
				  void *data, bool cancel_all)
915
{
916
	struct io_cb_cancel_data match = {
917 918 919
		.fn		= cancel,
		.data		= data,
		.cancel_all	= cancel_all,
920
	};
J
Jann Horn 已提交
921
	int node;
922

923 924 925 926 927
	/*
	 * First check pending list, if we're lucky we can just remove it
	 * from there. CANCEL_OK means that the work is returned as-new,
	 * no completion will be posted for it.
	 */
J
Jann Horn 已提交
928 929
	for_each_node(node) {
		struct io_wqe *wqe = wq->wqes[node];
930

931 932
		io_wqe_cancel_pending_work(wqe, &match);
		if (match.nr_pending && !match.cancel_all)
933
			return IO_WQ_CANCEL_OK;
934 935
	}

936 937 938 939 940 941 942 943 944
	/*
	 * Now check if a free (going busy) or busy worker has the work
	 * currently running. If we find it there, we'll return CANCEL_RUNNING
	 * as an indication that we attempt to signal cancellation. The
	 * completion will run normally in this case.
	 */
	for_each_node(node) {
		struct io_wqe *wqe = wq->wqes[node];

945 946
		io_wqe_cancel_running_work(wqe, &match);
		if (match.nr_running && !match.cancel_all)
947 948 949
			return IO_WQ_CANCEL_RUNNING;
	}

950 951 952 953
	if (match.nr_running)
		return IO_WQ_CANCEL_RUNNING;
	if (match.nr_pending)
		return IO_WQ_CANCEL_OK;
954
	return IO_WQ_CANCEL_NOTFOUND;
955 956
}

957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode,
			    int sync, void *key)
{
	struct io_wqe *wqe = container_of(wait, struct io_wqe, wait);
	int ret;

	list_del_init(&wait->entry);

	rcu_read_lock();
	ret = io_wqe_activate_free_worker(wqe);
	rcu_read_unlock();

	if (!ret)
		wake_up_process(wqe->wq->manager);

	return 1;
}

975
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
976
{
J
Jann Horn 已提交
977
	int ret = -ENOMEM, node;
978 979
	struct io_wq *wq;

980
	if (WARN_ON_ONCE(!data->free_work || !data->do_work))
981 982
		return ERR_PTR(-EINVAL);

983
	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
984 985 986
	if (!wq)
		return ERR_PTR(-ENOMEM);

J
Jann Horn 已提交
987
	wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL);
988 989 990 991 992 993
	if (!wq->wqes)
		goto err_wq;

	ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
	if (ret)
		goto err_wqes;
994

995 996
	refcount_inc(&data->hash->refs);
	wq->hash = data->hash;
997
	wq->free_work = data->free_work;
998
	wq->do_work = data->do_work;
999

1000
	ret = -ENOMEM;
J
Jann Horn 已提交
1001
	for_each_node(node) {
1002
		struct io_wqe *wqe;
1003
		int alloc_node = node;
1004

1005 1006 1007
		if (!node_online(alloc_node))
			alloc_node = NUMA_NO_NODE;
		wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
1008
		if (!wqe)
J
Jann Horn 已提交
1009 1010
			goto err;
		wq->wqes[node] = wqe;
1011
		wqe->node = alloc_node;
1012 1013
		wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
		atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
J
Jens Axboe 已提交
1014
		wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
1015 1016
					task_rlimit(current, RLIMIT_NPROC);
		atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
1017 1018
		wqe->wait.func = io_wqe_hash_wake;
		INIT_LIST_HEAD(&wqe->wait.entry);
1019
		wqe->wq = wq;
1020
		raw_spin_lock_init(&wqe->lock);
J
Jens Axboe 已提交
1021
		INIT_WQ_LIST(&wqe->work_list);
1022
		INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
1023
		INIT_LIST_HEAD(&wqe->all_list);
1024 1025
	}

1026
	wq->task_pid = current->pid;
1027
	init_completion(&wq->exited);
1028
	refcount_set(&wq->refs, 1);
1029

1030 1031
	ret = io_wq_fork_manager(wq);
	if (!ret)
1032
		return wq;
1033
err:
1034
	io_wq_put_hash(data->hash);
1035
	cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
J
Jann Horn 已提交
1036 1037
	for_each_node(node)
		kfree(wq->wqes[node]);
1038
err_wqes:
1039
	kfree(wq->wqes);
1040
err_wq:
1041
	kfree(wq);
1042 1043 1044
	return ERR_PTR(ret);
}

1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
static void io_wq_destroy_manager(struct io_wq *wq)
{
	if (wq->manager) {
		wake_up_process(wq->manager);
		wait_for_completion(&wq->exited);
		put_task_struct(wq->manager);
		wq->manager = NULL;
	}
}

1055
static void io_wq_destroy(struct io_wq *wq)
1056
{
J
Jann Horn 已提交
1057
	int node;
1058

1059 1060
	cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);

1061
	set_bit(IO_WQ_BIT_EXIT, &wq->state);
1062
	io_wq_destroy_manager(wq);
1063

1064 1065
	for_each_node(node) {
		struct io_wqe *wqe = wq->wqes[node];
1066
		WARN_ON_ONCE(!wq_list_empty(&wqe->work_list));
1067 1068 1069
		kfree(wqe);
	}
	io_wq_put_hash(wq->hash);
1070 1071
	kfree(wq->wqes);
	kfree(wq);
1072 1073 1074 1075 1076 1077
}

void io_wq_put(struct io_wq *wq)
{
	if (refcount_dec_and_test(&wq->refs))
		io_wq_destroy(wq);
1078
}
J
Jens Axboe 已提交
1079

1080 1081 1082 1083 1084 1085 1086
void io_wq_put_and_exit(struct io_wq *wq)
{
	set_bit(IO_WQ_BIT_EXIT, &wq->state);
	io_wq_destroy_manager(wq);
	io_wq_put(wq);
}

1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
{
	struct task_struct *task = worker->task;
	struct rq_flags rf;
	struct rq *rq;

	rq = task_rq_lock(task, &rf);
	do_set_cpus_allowed(task, cpumask_of_node(worker->wqe->node));
	task->flags |= PF_NO_SETAFFINITY;
	task_rq_unlock(rq, task, &rf);
	return false;
}

static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
{
	struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
	int i;

	rcu_read_lock();
	for_each_node(i)
		io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, NULL);
	rcu_read_unlock();
	return 0;
}

static __init int io_wq_init(void)
{
	int ret;

	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
					io_wq_cpu_online, NULL);
	if (ret < 0)
		return ret;
	io_wq_online = ret;
	return 0;
}
subsys_initcall(io_wq_init);