io-wq.c 25.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// SPDX-License-Identifier: GPL-2.0
/*
 * Basic worker thread pool for io_uring
 *
 * Copyright (C) 2019 Jens Axboe
 *
 */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched/signal.h>
#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/sched/mm.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/rculist_nulls.h>
19
#include <linux/fs_struct.h>
20 21 22 23 24 25 26 27 28 29 30

#include "io-wq.h"

#define WORKER_IDLE_TIMEOUT	(5 * HZ)

enum {
	IO_WORKER_F_UP		= 1,	/* up and active */
	IO_WORKER_F_RUNNING	= 2,	/* account as running */
	IO_WORKER_F_FREE	= 4,	/* worker on free list */
	IO_WORKER_F_EXITING	= 8,	/* worker exiting */
	IO_WORKER_F_FIXED	= 16,	/* static idle worker */
31
	IO_WORKER_F_BOUND	= 32,	/* is doing bounded work */
32 33 34 35 36
};

enum {
	IO_WQ_BIT_EXIT		= 0,	/* wq exiting */
	IO_WQ_BIT_CANCEL	= 1,	/* cancel work on list */
37
	IO_WQ_BIT_ERROR		= 2,	/* error on setup */
38 39 40 41 42 43 44 45 46 47 48 49 50
};

enum {
	IO_WQE_FLAG_STALLED	= 1,	/* stalled on hash */
};

/*
 * One for each thread in a wqe pool
 */
struct io_worker {
	refcount_t ref;
	unsigned flags;
	struct hlist_nulls_node nulls_node;
51
	struct list_head all_list;
52 53
	struct task_struct *task;
	struct io_wqe *wqe;
54

55
	struct io_wq_work *cur_work;
56
	spinlock_t lock;
57 58 59

	struct rcu_head rcu;
	struct mm_struct *mm;
60 61
	const struct cred *cur_creds;
	const struct cred *saved_creds;
62
	struct files_struct *restore_files;
63
	struct fs_struct *restore_fs;
64 65 66 67 68 69 70 71
};

#if BITS_PER_LONG == 64
#define IO_WQ_HASH_ORDER	6
#else
#define IO_WQ_HASH_ORDER	5
#endif

72 73 74 75 76 77 78 79 80 81 82
struct io_wqe_acct {
	unsigned nr_workers;
	unsigned max_workers;
	atomic_t nr_running;
};

enum {
	IO_WQ_ACCT_BOUND,
	IO_WQ_ACCT_UNBOUND,
};

83 84 85 86 87 88
/*
 * Per-node worker thread pool
 */
struct io_wqe {
	struct {
		spinlock_t lock;
J
Jens Axboe 已提交
89
		struct io_wq_work_list work_list;
90 91 92 93 94
		unsigned long hash_map;
		unsigned flags;
	} ____cacheline_aligned_in_smp;

	int node;
95
	struct io_wqe_acct acct[2];
96

97
	struct hlist_nulls_head free_list;
98
	struct list_head all_list;
99 100 101 102 103 104 105 106 107 108 109

	struct io_wq *wq;
};

/*
 * Per io_wq state
  */
struct io_wq {
	struct io_wqe **wqes;
	unsigned long state;

110
	free_work_fn *free_work;
111

112
	struct task_struct *manager;
113
	struct user_struct *user;
114 115
	refcount_t refs;
	struct completion done;
J
Jens Axboe 已提交
116 117

	refcount_t use_refs;
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
};

static bool io_worker_get(struct io_worker *worker)
{
	return refcount_inc_not_zero(&worker->ref);
}

static void io_worker_release(struct io_worker *worker)
{
	if (refcount_dec_and_test(&worker->ref))
		wake_up_process(worker->task);
}

/*
 * Note: drops the wqe->lock if returning true! The caller must re-acquire
 * the lock in that case. Some callers need to restart handling if this
 * happens, so we can't just re-acquire the lock on behalf of the caller.
 */
static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
{
138 139
	bool dropped_lock = false;

140 141 142
	if (worker->saved_creds) {
		revert_creds(worker->saved_creds);
		worker->cur_creds = worker->saved_creds = NULL;
143 144
	}

145 146 147 148 149 150 151 152 153 154
	if (current->files != worker->restore_files) {
		__acquire(&wqe->lock);
		spin_unlock_irq(&wqe->lock);
		dropped_lock = true;

		task_lock(current);
		current->files = worker->restore_files;
		task_unlock(current);
	}

155 156 157
	if (current->fs != worker->restore_fs)
		current->fs = worker->restore_fs;

158 159 160 161 162
	/*
	 * If we have an active mm, we need to drop the wq lock before unusing
	 * it. If we do, return true and let the caller retry the idle loop.
	 */
	if (worker->mm) {
163 164 165 166 167
		if (!dropped_lock) {
			__acquire(&wqe->lock);
			spin_unlock_irq(&wqe->lock);
			dropped_lock = true;
		}
168 169 170 171 172 173 174
		__set_current_state(TASK_RUNNING);
		set_fs(KERNEL_DS);
		unuse_mm(worker->mm);
		mmput(worker->mm);
		worker->mm = NULL;
	}

175
	return dropped_lock;
176 177
}

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
						   struct io_wq_work *work)
{
	if (work->flags & IO_WQ_WORK_UNBOUND)
		return &wqe->acct[IO_WQ_ACCT_UNBOUND];

	return &wqe->acct[IO_WQ_ACCT_BOUND];
}

static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe,
						  struct io_worker *worker)
{
	if (worker->flags & IO_WORKER_F_BOUND)
		return &wqe->acct[IO_WQ_ACCT_BOUND];

	return &wqe->acct[IO_WQ_ACCT_UNBOUND];
}

196 197 198
static void io_worker_exit(struct io_worker *worker)
{
	struct io_wqe *wqe = worker->wqe;
199 200
	struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
	unsigned nr_workers;
201 202 203 204 205 206 207 208 209 210 211 212 213

	/*
	 * If we're not at zero, someone else is holding a brief reference
	 * to the worker. Wait for that to go away.
	 */
	set_current_state(TASK_INTERRUPTIBLE);
	if (!refcount_dec_and_test(&worker->ref))
		schedule();
	__set_current_state(TASK_RUNNING);

	preempt_disable();
	current->flags &= ~PF_IO_WORKER;
	if (worker->flags & IO_WORKER_F_RUNNING)
214 215 216
		atomic_dec(&acct->nr_running);
	if (!(worker->flags & IO_WORKER_F_BOUND))
		atomic_dec(&wqe->wq->user->processes);
217 218 219 220 221
	worker->flags = 0;
	preempt_enable();

	spin_lock_irq(&wqe->lock);
	hlist_nulls_del_rcu(&worker->nulls_node);
222
	list_del_rcu(&worker->all_list);
223 224 225 226
	if (__io_worker_unuse(wqe, worker)) {
		__release(&wqe->lock);
		spin_lock_irq(&wqe->lock);
	}
227 228 229
	acct->nr_workers--;
	nr_workers = wqe->acct[IO_WQ_ACCT_BOUND].nr_workers +
			wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers;
230 231 232
	spin_unlock_irq(&wqe->lock);

	/* all workers gone, wq exit can proceed */
233
	if (!nr_workers && refcount_dec_and_test(&wqe->wq->refs))
234 235
		complete(&wqe->wq->done);

236
	kfree_rcu(worker, rcu);
237 238
}

239 240 241
static inline bool io_wqe_run_queue(struct io_wqe *wqe)
	__must_hold(wqe->lock)
{
J
Jens Axboe 已提交
242 243
	if (!wq_list_empty(&wqe->work_list) &&
	    !(wqe->flags & IO_WQE_FLAG_STALLED))
244 245 246 247 248 249 250 251 252 253 254 255 256 257
		return true;
	return false;
}

/*
 * Check head of free list for an available worker. If one isn't available,
 * caller must wake up the wq manager to create one.
 */
static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
	__must_hold(RCU)
{
	struct hlist_nulls_node *n;
	struct io_worker *worker;

258
	n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
259 260 261 262 263
	if (is_a_nulls(n))
		return false;

	worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
	if (io_worker_get(worker)) {
J
Jens Axboe 已提交
264
		wake_up_process(worker->task);
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
		io_worker_release(worker);
		return true;
	}

	return false;
}

/*
 * We need a worker. If we find a free one, we're good. If not, and we're
 * below the max number of workers, wake up the manager to create one.
 */
static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
{
	bool ret;

	/*
	 * Most likely an attempt to queue unbounded work on an io_wq that
	 * wasn't setup with any unbounded workers.
	 */
	WARN_ON_ONCE(!acct->max_workers);

	rcu_read_lock();
	ret = io_wqe_activate_free_worker(wqe);
	rcu_read_unlock();

	if (!ret && acct->nr_workers < acct->max_workers)
		wake_up_process(wqe->wq->manager);
}

static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker)
{
	struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);

	atomic_inc(&acct->nr_running);
}

static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker)
	__must_hold(wqe->lock)
{
	struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);

	if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
		io_wqe_wake_worker(wqe, acct);
}

310 311 312 313 314 315 316
static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
{
	allow_kernel_signal(SIGINT);

	current->flags |= PF_IO_WORKER;

	worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
317
	worker->restore_files = current->files;
318
	worker->restore_fs = current->fs;
319
	io_wqe_inc_running(wqe, worker);
320 321 322 323 324 325 326 327 328 329
}

/*
 * Worker will start processing some work. Move it to the busy list, if
 * it's currently on the freelist
 */
static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
			     struct io_wq_work *work)
	__must_hold(wqe->lock)
{
330 331
	bool worker_bound, work_bound;

332 333 334 335
	if (worker->flags & IO_WORKER_F_FREE) {
		worker->flags &= ~IO_WORKER_F_FREE;
		hlist_nulls_del_init_rcu(&worker->nulls_node);
	}
336 337 338 339 340

	/*
	 * If worker is moving from bound to unbound (or vice versa), then
	 * ensure we update the running accounting.
	 */
341 342 343
	worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
	work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
	if (worker_bound != work_bound) {
344 345 346 347 348 349 350 351 352 353 354 355 356 357
		io_wqe_dec_running(wqe, worker);
		if (work_bound) {
			worker->flags |= IO_WORKER_F_BOUND;
			wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
			wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
			atomic_dec(&wqe->wq->user->processes);
		} else {
			worker->flags &= ~IO_WORKER_F_BOUND;
			wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
			wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
			atomic_inc(&wqe->wq->user->processes);
		}
		io_wqe_inc_running(wqe, worker);
	 }
358 359 360 361 362 363 364 365 366 367 368 369 370 371
}

/*
 * No work, worker going to sleep. Move to freelist, and unuse mm if we
 * have one attached. Dropping the mm may potentially sleep, so we drop
 * the lock in that case and return success. Since the caller has to
 * retry the loop in that case (we changed task state), we don't regrab
 * the lock if we return success.
 */
static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
	__must_hold(wqe->lock)
{
	if (!(worker->flags & IO_WORKER_F_FREE)) {
		worker->flags |= IO_WORKER_F_FREE;
372
		hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
373 374 375 376 377 378 379 380
	}

	return __io_worker_unuse(wqe, worker);
}

static struct io_wq_work *io_get_next_work(struct io_wqe *wqe, unsigned *hash)
	__must_hold(wqe->lock)
{
J
Jens Axboe 已提交
381
	struct io_wq_work_node *node, *prev;
382 383
	struct io_wq_work *work;

J
Jens Axboe 已提交
384 385 386
	wq_list_for_each(node, prev, &wqe->work_list) {
		work = container_of(node, struct io_wq_work, list);

387 388
		/* not hashed, can run anytime */
		if (!(work->flags & IO_WQ_WORK_HASHED)) {
J
Jens Axboe 已提交
389
			wq_node_del(&wqe->work_list, node, prev);
390 391 392 393 394
			return work;
		}

		/* hashed, can run if not already running */
		*hash = work->flags >> IO_WQ_HASH_SHIFT;
P
Pavel Begunkov 已提交
395 396
		if (!(wqe->hash_map & BIT(*hash))) {
			wqe->hash_map |= BIT(*hash);
J
Jens Axboe 已提交
397
			wq_node_del(&wqe->work_list, node, prev);
398 399 400 401 402 403 404
			return work;
		}
	}

	return NULL;
}

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
{
	if (worker->mm) {
		unuse_mm(worker->mm);
		mmput(worker->mm);
		worker->mm = NULL;
	}
	if (!work->mm) {
		set_fs(KERNEL_DS);
		return;
	}
	if (mmget_not_zero(work->mm)) {
		use_mm(work->mm);
		if (!worker->mm)
			set_fs(USER_DS);
		worker->mm = work->mm;
		/* hang on to this mm */
		work->mm = NULL;
		return;
	}

	/* failed grabbing mm, ensure work gets cancelled */
	work->flags |= IO_WQ_WORK_CANCEL;
}

static void io_wq_switch_creds(struct io_worker *worker,
			       struct io_wq_work *work)
{
	const struct cred *old_creds = override_creds(work->creds);

	worker->cur_creds = work->creds;
	if (worker->saved_creds)
		put_cred(old_creds); /* creds set by previous switch */
	else
		worker->saved_creds = old_creds;
}

442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
static void io_impersonate_work(struct io_worker *worker,
				struct io_wq_work *work)
{
	if (work->files && current->files != work->files) {
		task_lock(current);
		current->files = work->files;
		task_unlock(current);
	}
	if (work->fs && current->fs != work->fs)
		current->fs = work->fs;
	if (work->mm != worker->mm)
		io_wq_switch_mm(worker, work);
	if (worker->cur_creds != work->creds)
		io_wq_switch_creds(worker, work);
}

static void io_assign_current_work(struct io_worker *worker,
				   struct io_wq_work *work)
{
461 462 463 464 465 466
	if (work) {
		/* flush pending signals before assigning new work */
		if (signal_pending(current))
			flush_signals(current);
		cond_resched();
	}
467 468 469 470 471 472

	spin_lock_irq(&worker->lock);
	worker->cur_work = work;
	spin_unlock_irq(&worker->lock);
}

473 474 475 476 477
static void io_worker_handle_work(struct io_worker *worker)
	__releases(wqe->lock)
{
	struct io_wqe *wqe = worker->wqe;
	struct io_wq *wq = wqe->wq;
478
	unsigned hash = -1U;
479 480

	do {
481
		struct io_wq_work *work;
482
get_next:
483 484 485 486 487 488 489 490 491 492
		/*
		 * If we got some work, mark us as busy. If we didn't, but
		 * the list isn't empty, it means we stalled on hashed work.
		 * Mark us stalled so we don't keep looking for work when we
		 * can't make progress, any work completion or insertion will
		 * clear the stalled flag.
		 */
		work = io_get_next_work(wqe, &hash);
		if (work)
			__io_worker_busy(wqe, worker, work);
J
Jens Axboe 已提交
493
		else if (!wq_list_empty(&wqe->work_list))
494 495 496 497 498
			wqe->flags |= IO_WQE_FLAG_STALLED;

		spin_unlock_irq(&wqe->lock);
		if (!work)
			break;
499
		io_assign_current_work(worker, work);
500

501 502
		/* handle a whole dependent link */
		do {
503
			struct io_wq_work *old_work;
504

505
			io_impersonate_work(worker, work);
506 507 508 509 510 511 512 513 514
			/*
			 * OK to set IO_WQ_WORK_CANCEL even for uncancellable
			 * work, the worker function will do the right thing.
			 */
			if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
				work->flags |= IO_WQ_WORK_CANCEL;

			old_work = work;
			work->func(&work);
515 516
			work = (old_work == work) ? NULL : work;
			io_assign_current_work(worker, work);
517
			wq->free_work(old_work);
518 519 520 521 522 523 524

			if (hash != -1U) {
				spin_lock_irq(&wqe->lock);
				wqe->hash_map &= ~BIT_ULL(hash);
				wqe->flags &= ~IO_WQE_FLAG_STALLED;
				/* dependent work is not hashed */
				hash = -1U;
525 526 527 528
				/* skip unnecessary unlock-lock wqe->lock */
				if (!work)
					goto get_next;
				spin_unlock_irq(&wqe->lock);
529
			}
530
		} while (work);
531

532
		spin_lock_irq(&wqe->lock);
533 534 535 536 537 538 539 540 541 542 543 544
	} while (1);
}

static int io_wqe_worker(void *data)
{
	struct io_worker *worker = data;
	struct io_wqe *wqe = worker->wqe;
	struct io_wq *wq = wqe->wq;

	io_worker_start(wqe, worker);

	while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
J
Jens Axboe 已提交
545
		set_current_state(TASK_INTERRUPTIBLE);
546
loop:
547 548 549 550
		spin_lock_irq(&wqe->lock);
		if (io_wqe_run_queue(wqe)) {
			__set_current_state(TASK_RUNNING);
			io_worker_handle_work(worker);
551
			goto loop;
552 553 554 555
		}
		/* drops the lock on success, retry */
		if (__io_worker_idle(wqe, worker)) {
			__release(&wqe->lock);
556
			goto loop;
557 558 559 560 561 562 563 564 565 566 567 568 569 570
		}
		spin_unlock_irq(&wqe->lock);
		if (signal_pending(current))
			flush_signals(current);
		if (schedule_timeout(WORKER_IDLE_TIMEOUT))
			continue;
		/* timed out, exit unless we're the fixed worker */
		if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
		    !(worker->flags & IO_WORKER_F_FIXED))
			break;
	}

	if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
		spin_lock_irq(&wqe->lock);
J
Jens Axboe 已提交
571
		if (!wq_list_empty(&wqe->work_list))
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
			io_worker_handle_work(worker);
		else
			spin_unlock_irq(&wqe->lock);
	}

	io_worker_exit(worker);
	return 0;
}

/*
 * Called when a worker is scheduled in. Mark us as currently running.
 */
void io_wq_worker_running(struct task_struct *tsk)
{
	struct io_worker *worker = kthread_data(tsk);
	struct io_wqe *wqe = worker->wqe;

	if (!(worker->flags & IO_WORKER_F_UP))
		return;
	if (worker->flags & IO_WORKER_F_RUNNING)
		return;
	worker->flags |= IO_WORKER_F_RUNNING;
594
	io_wqe_inc_running(wqe, worker);
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
}

/*
 * Called when worker is going to sleep. If there are no workers currently
 * running and we have work pending, wake up a free one or have the manager
 * set one up.
 */
void io_wq_worker_sleeping(struct task_struct *tsk)
{
	struct io_worker *worker = kthread_data(tsk);
	struct io_wqe *wqe = worker->wqe;

	if (!(worker->flags & IO_WORKER_F_UP))
		return;
	if (!(worker->flags & IO_WORKER_F_RUNNING))
		return;

	worker->flags &= ~IO_WORKER_F_RUNNING;

	spin_lock_irq(&wqe->lock);
615
	io_wqe_dec_running(wqe, worker);
616 617 618
	spin_unlock_irq(&wqe->lock);
}

619
static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
620
{
621
	struct io_wqe_acct *acct =&wqe->acct[index];
622 623
	struct io_worker *worker;

624
	worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
625
	if (!worker)
626
		return false;
627 628 629 630

	refcount_set(&worker->ref, 1);
	worker->nulls_node.pprev = NULL;
	worker->wqe = wqe;
631
	spin_lock_init(&worker->lock);
632 633

	worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node,
634
				"io_wqe_worker-%d/%d", index, wqe->node);
635 636
	if (IS_ERR(worker->task)) {
		kfree(worker);
637
		return false;
638 639 640
	}

	spin_lock_irq(&wqe->lock);
641
	hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
642
	list_add_tail_rcu(&worker->all_list, &wqe->all_list);
643
	worker->flags |= IO_WORKER_F_FREE;
644 645 646
	if (index == IO_WQ_ACCT_BOUND)
		worker->flags |= IO_WORKER_F_BOUND;
	if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
647
		worker->flags |= IO_WORKER_F_FIXED;
648
	acct->nr_workers++;
649 650
	spin_unlock_irq(&wqe->lock);

651 652 653
	if (index == IO_WQ_ACCT_UNBOUND)
		atomic_inc(&wq->user->processes);

654
	wake_up_process(worker->task);
655
	return true;
656 657
}

658
static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
659 660
	__must_hold(wqe->lock)
{
661
	struct io_wqe_acct *acct = &wqe->acct[index];
662

663
	/* if we have available workers or no work, no need */
664
	if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
665 666
		return false;
	return acct->nr_workers < acct->max_workers;
667 668 669 670 671 672 673 674
}

/*
 * Manager thread. Tasked with creating new workers, if we need them.
 */
static int io_wq_manager(void *data)
{
	struct io_wq *wq = data;
J
Jann Horn 已提交
675 676
	int workers_to_create = num_possible_nodes();
	int node;
677

678
	/* create fixed workers */
J
Jann Horn 已提交
679 680
	refcount_set(&wq->refs, workers_to_create);
	for_each_node(node) {
681 682
		if (!node_online(node))
			continue;
J
Jann Horn 已提交
683 684 685
		if (!create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
			goto err;
		workers_to_create--;
686
	}
687

688 689 690
	while (workers_to_create--)
		refcount_dec(&wq->refs);

691 692 693
	complete(&wq->done);

	while (!kthread_should_stop()) {
J
Jann Horn 已提交
694 695
		for_each_node(node) {
			struct io_wqe *wqe = wq->wqes[node];
696
			bool fork_worker[2] = { false, false };
697

698 699 700
			if (!node_online(node))
				continue;

701
			spin_lock_irq(&wqe->lock);
702 703 704 705
			if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
				fork_worker[IO_WQ_ACCT_BOUND] = true;
			if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
				fork_worker[IO_WQ_ACCT_UNBOUND] = true;
706
			spin_unlock_irq(&wqe->lock);
707 708 709 710
			if (fork_worker[IO_WQ_ACCT_BOUND])
				create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
			if (fork_worker[IO_WQ_ACCT_UNBOUND])
				create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
711 712 713 714 715 716
		}
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(HZ);
	}

	return 0;
717 718 719
err:
	set_bit(IO_WQ_BIT_ERROR, &wq->state);
	set_bit(IO_WQ_BIT_EXIT, &wq->state);
J
Jann Horn 已提交
720
	if (refcount_sub_and_test(workers_to_create, &wq->refs))
721 722
		complete(&wq->done);
	return 0;
723 724
}

725 726 727 728 729 730 731 732 733 734 735
static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
			    struct io_wq_work *work)
{
	bool free_worker;

	if (!(work->flags & IO_WQ_WORK_UNBOUND))
		return true;
	if (atomic_read(&acct->nr_running))
		return true;

	rcu_read_lock();
736
	free_worker = !hlist_nulls_empty(&wqe->free_list);
737 738 739 740 741 742 743 744 745 746 747
	rcu_read_unlock();
	if (free_worker)
		return true;

	if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers &&
	    !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN)))
		return false;

	return true;
}

748
static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
749
{
750 751
	struct io_wq *wq = wqe->wq;

752 753 754 755 756 757
	do {
		struct io_wq_work *old_work = work;

		work->flags |= IO_WQ_WORK_CANCEL;
		work->func(&work);
		work = (work == old_work) ? NULL : work;
758
		wq->free_work(old_work);
759 760 761
	} while (work);
}

762 763
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
{
764
	struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
765
	int work_flags;
766 767
	unsigned long flags;

768 769 770 771 772 773 774
	/*
	 * Do early check to see if we need a new unbound worker, and if we do,
	 * if we're allowed to do so. This isn't 100% accurate as there's a
	 * gap between this check and incrementing the value, but that's OK.
	 * It's close enough to not be an issue, fork() has the same delay.
	 */
	if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
775
		io_run_cancel(work, wqe);
776 777 778
		return;
	}

779
	work_flags = work->flags;
780
	spin_lock_irqsave(&wqe->lock, flags);
J
Jens Axboe 已提交
781
	wq_list_add_tail(&work->list, &wqe->work_list);
782 783 784
	wqe->flags &= ~IO_WQE_FLAG_STALLED;
	spin_unlock_irqrestore(&wqe->lock, flags);

785 786
	if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
	    !atomic_read(&acct->nr_running))
787
		io_wqe_wake_worker(wqe, acct);
788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829
}

void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
{
	struct io_wqe *wqe = wq->wqes[numa_node_id()];

	io_wqe_enqueue(wqe, work);
}

/*
 * Enqueue work, hashed by some key. Work items that hash to the same value
 * will not be done in parallel. Used to limit concurrent writes, generally
 * hashed by inode.
 */
void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val)
{
	struct io_wqe *wqe = wq->wqes[numa_node_id()];
	unsigned bit;


	bit = hash_ptr(val, IO_WQ_HASH_ORDER);
	work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
	io_wqe_enqueue(wqe, work);
}

static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
{
	send_sig(SIGINT, worker->task, 1);
	return false;
}

/*
 * Iterate the passed in list and call the specific function for each
 * worker that isn't exiting
 */
static bool io_wq_for_each_worker(struct io_wqe *wqe,
				  bool (*func)(struct io_worker *, void *),
				  void *data)
{
	struct io_worker *worker;
	bool ret = false;

830
	list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
831
		if (io_worker_get(worker)) {
832 833 834
			/* no task if node is/was offline */
			if (worker->task)
				ret = func(worker, data);
835 836 837 838 839
			io_worker_release(worker);
			if (ret)
				break;
		}
	}
840

841 842 843 844 845
	return ret;
}

void io_wq_cancel_all(struct io_wq *wq)
{
J
Jann Horn 已提交
846
	int node;
847 848 849 850

	set_bit(IO_WQ_BIT_CANCEL, &wq->state);

	rcu_read_lock();
J
Jann Horn 已提交
851 852
	for_each_node(node) {
		struct io_wqe *wqe = wq->wqes[node];
853

854
		io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL);
855 856 857 858
	}
	rcu_read_unlock();
}

859
struct io_cb_cancel_data {
860 861
	work_cancel_fn *fn;
	void *data;
862 863
};

864
static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
865
{
866
	struct io_cb_cancel_data *match = data;
867
	unsigned long flags;
868 869 870 871
	bool ret = false;

	/*
	 * Hold the lock to avoid ->cur_work going out of scope, caller
872
	 * may dereference the passed in work.
873
	 */
874
	spin_lock_irqsave(&worker->lock, flags);
875
	if (worker->cur_work &&
876
	    !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
877
	    match->fn(worker->cur_work, match->data)) {
878
		send_sig(SIGINT, worker->task, 1);
879
		ret = true;
880
	}
881
	spin_unlock_irqrestore(&worker->lock, flags);
882

883
	return ret;
884 885 886
}

static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
887
					    struct io_cb_cancel_data *match)
888
{
J
Jens Axboe 已提交
889
	struct io_wq_work_node *node, *prev;
890
	struct io_wq_work *work;
891
	unsigned long flags;
892 893 894 895 896 897 898
	bool found = false;

	/*
	 * First check pending list, if we're lucky we can just remove it
	 * from there. CANCEL_OK means that the work is returned as-new,
	 * no completion will be posted for it.
	 */
899
	spin_lock_irqsave(&wqe->lock, flags);
J
Jens Axboe 已提交
900 901 902
	wq_list_for_each(node, prev, &wqe->work_list) {
		work = container_of(node, struct io_wq_work, list);

903
		if (match->fn(work, match->data)) {
J
Jens Axboe 已提交
904
			wq_node_del(&wqe->work_list, node, prev);
905 906 907 908
			found = true;
			break;
		}
	}
909
	spin_unlock_irqrestore(&wqe->lock, flags);
910 911

	if (found) {
912
		io_run_cancel(work, wqe);
913 914 915 916 917 918
		return IO_WQ_CANCEL_OK;
	}

	/*
	 * Now check if a free (going busy) or busy worker has the work
	 * currently running. If we find it there, we'll return CANCEL_RUNNING
919
	 * as an indication that we attempt to signal cancellation. The
920 921 922
	 * completion will run normally in this case.
	 */
	rcu_read_lock();
923
	found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
924 925 926 927
	rcu_read_unlock();
	return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
}

928 929
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
				  void *data)
930
{
931 932 933
	struct io_cb_cancel_data match = {
		.fn	= cancel,
		.data	= data,
934
	};
935
	enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
J
Jann Horn 已提交
936
	int node;
937

J
Jann Horn 已提交
938 939
	for_each_node(node) {
		struct io_wqe *wqe = wq->wqes[node];
940

941
		ret = io_wqe_cancel_work(wqe, &match);
942 943 944 945 946 947 948
		if (ret != IO_WQ_CANCEL_NOTFOUND)
			break;
	}

	return ret;
}

949 950 951 952 953 954 955 956 957 958
static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
{
	return work == data;
}

enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
{
	return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork);
}

959 960 961 962
static bool io_wq_pid_match(struct io_wq_work *work, void *data)
{
	pid_t pid = (pid_t) (unsigned long) data;

963
	return work->task_pid == pid;
964 965 966 967
}

enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid)
{
968
	void *data = (void *) (unsigned long) pid;
969

970
	return io_wq_cancel_cb(wq, io_wq_pid_match, data);
971 972
}

973
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
974
{
J
Jann Horn 已提交
975
	int ret = -ENOMEM, node;
976 977
	struct io_wq *wq;

978 979 980
	if (WARN_ON_ONCE(!data->free_work))
		return ERR_PTR(-EINVAL);

981
	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
982 983 984
	if (!wq)
		return ERR_PTR(-ENOMEM);

J
Jann Horn 已提交
985
	wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL);
986 987 988 989 990
	if (!wq->wqes) {
		kfree(wq);
		return ERR_PTR(-ENOMEM);
	}

991
	wq->free_work = data->free_work;
992

993
	/* caller must already hold a reference to this */
994
	wq->user = data->user;
995

J
Jann Horn 已提交
996
	for_each_node(node) {
997
		struct io_wqe *wqe;
998
		int alloc_node = node;
999

1000 1001 1002
		if (!node_online(alloc_node))
			alloc_node = NUMA_NO_NODE;
		wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
1003
		if (!wqe)
J
Jann Horn 已提交
1004 1005
			goto err;
		wq->wqes[node] = wqe;
1006
		wqe->node = alloc_node;
1007 1008
		wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
		atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
1009
		if (wq->user) {
1010 1011 1012 1013
			wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
					task_rlimit(current, RLIMIT_NPROC);
		}
		atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
1014 1015
		wqe->wq = wq;
		spin_lock_init(&wqe->lock);
J
Jens Axboe 已提交
1016
		INIT_WQ_LIST(&wqe->work_list);
1017
		INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
1018
		INIT_LIST_HEAD(&wqe->all_list);
1019 1020 1021 1022 1023 1024 1025
	}

	init_completion(&wq->done);

	wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
	if (!IS_ERR(wq->manager)) {
		wake_up_process(wq->manager);
1026 1027 1028 1029 1030
		wait_for_completion(&wq->done);
		if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
			ret = -ENOMEM;
			goto err;
		}
J
Jens Axboe 已提交
1031
		refcount_set(&wq->use_refs, 1);
1032
		reinit_completion(&wq->done);
1033 1034 1035 1036 1037
		return wq;
	}

	ret = PTR_ERR(wq->manager);
	complete(&wq->done);
1038
err:
J
Jann Horn 已提交
1039 1040
	for_each_node(node)
		kfree(wq->wqes[node]);
1041 1042
	kfree(wq->wqes);
	kfree(wq);
1043 1044 1045
	return ERR_PTR(ret);
}

1046 1047
bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
{
1048
	if (data->free_work != wq->free_work)
1049 1050 1051 1052 1053
		return false;

	return refcount_inc_not_zero(&wq->use_refs);
}

1054 1055 1056 1057 1058 1059
static bool io_wq_worker_wake(struct io_worker *worker, void *data)
{
	wake_up_process(worker->task);
	return false;
}

J
Jens Axboe 已提交
1060
static void __io_wq_destroy(struct io_wq *wq)
1061
{
J
Jann Horn 已提交
1062
	int node;
1063

1064 1065
	set_bit(IO_WQ_BIT_EXIT, &wq->state);
	if (wq->manager)
1066 1067 1068
		kthread_stop(wq->manager);

	rcu_read_lock();
J
Jann Horn 已提交
1069 1070
	for_each_node(node)
		io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
1071 1072 1073 1074
	rcu_read_unlock();

	wait_for_completion(&wq->done);

J
Jann Horn 已提交
1075 1076
	for_each_node(node)
		kfree(wq->wqes[node]);
1077 1078 1079
	kfree(wq->wqes);
	kfree(wq);
}
J
Jens Axboe 已提交
1080 1081 1082 1083 1084 1085

void io_wq_destroy(struct io_wq *wq)
{
	if (refcount_dec_and_test(&wq->use_refs))
		__io_wq_destroy(wq);
}