io-wq.c 25.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// SPDX-License-Identifier: GPL-2.0
/*
 * Basic worker thread pool for io_uring
 *
 * Copyright (C) 2019 Jens Axboe
 *
 */
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched/signal.h>
#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/sched/mm.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/rculist_nulls.h>
19
#include <linux/fs_struct.h>
20 21 22 23 24 25 26 27 28 29 30

#include "io-wq.h"

#define WORKER_IDLE_TIMEOUT	(5 * HZ)

enum {
	IO_WORKER_F_UP		= 1,	/* up and active */
	IO_WORKER_F_RUNNING	= 2,	/* account as running */
	IO_WORKER_F_FREE	= 4,	/* worker on free list */
	IO_WORKER_F_EXITING	= 8,	/* worker exiting */
	IO_WORKER_F_FIXED	= 16,	/* static idle worker */
31
	IO_WORKER_F_BOUND	= 32,	/* is doing bounded work */
32 33 34 35 36
};

enum {
	IO_WQ_BIT_EXIT		= 0,	/* wq exiting */
	IO_WQ_BIT_CANCEL	= 1,	/* cancel work on list */
37
	IO_WQ_BIT_ERROR		= 2,	/* error on setup */
38 39 40 41 42 43 44 45 46 47 48 49 50
};

enum {
	IO_WQE_FLAG_STALLED	= 1,	/* stalled on hash */
};

/*
 * One for each thread in a wqe pool
 */
struct io_worker {
	refcount_t ref;
	unsigned flags;
	struct hlist_nulls_node nulls_node;
51
	struct list_head all_list;
52 53
	struct task_struct *task;
	struct io_wqe *wqe;
54

55
	struct io_wq_work *cur_work;
56
	spinlock_t lock;
57 58 59

	struct rcu_head rcu;
	struct mm_struct *mm;
60 61
	const struct cred *cur_creds;
	const struct cred *saved_creds;
62
	struct files_struct *restore_files;
63
	struct fs_struct *restore_fs;
64 65 66 67 68 69 70 71
};

#if BITS_PER_LONG == 64
#define IO_WQ_HASH_ORDER	6
#else
#define IO_WQ_HASH_ORDER	5
#endif

72 73 74 75 76 77 78 79 80 81 82
struct io_wqe_acct {
	unsigned nr_workers;
	unsigned max_workers;
	atomic_t nr_running;
};

enum {
	IO_WQ_ACCT_BOUND,
	IO_WQ_ACCT_UNBOUND,
};

83 84 85 86 87 88
/*
 * Per-node worker thread pool
 */
struct io_wqe {
	struct {
		spinlock_t lock;
J
Jens Axboe 已提交
89
		struct io_wq_work_list work_list;
90 91 92 93 94
		unsigned long hash_map;
		unsigned flags;
	} ____cacheline_aligned_in_smp;

	int node;
95
	struct io_wqe_acct acct[2];
96

97
	struct hlist_nulls_head free_list;
98
	struct list_head all_list;
99 100 101 102 103 104 105 106 107 108 109

	struct io_wq *wq;
};

/*
 * Per io_wq state
  */
struct io_wq {
	struct io_wqe **wqes;
	unsigned long state;

110
	free_work_fn *free_work;
111

112
	struct task_struct *manager;
113
	struct user_struct *user;
114 115
	refcount_t refs;
	struct completion done;
J
Jens Axboe 已提交
116 117

	refcount_t use_refs;
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
};

static bool io_worker_get(struct io_worker *worker)
{
	return refcount_inc_not_zero(&worker->ref);
}

static void io_worker_release(struct io_worker *worker)
{
	if (refcount_dec_and_test(&worker->ref))
		wake_up_process(worker->task);
}

/*
 * Note: drops the wqe->lock if returning true! The caller must re-acquire
 * the lock in that case. Some callers need to restart handling if this
 * happens, so we can't just re-acquire the lock on behalf of the caller.
 */
static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
{
138 139
	bool dropped_lock = false;

140 141 142
	if (worker->saved_creds) {
		revert_creds(worker->saved_creds);
		worker->cur_creds = worker->saved_creds = NULL;
143 144
	}

145 146 147 148 149 150 151 152 153 154
	if (current->files != worker->restore_files) {
		__acquire(&wqe->lock);
		spin_unlock_irq(&wqe->lock);
		dropped_lock = true;

		task_lock(current);
		current->files = worker->restore_files;
		task_unlock(current);
	}

155 156 157
	if (current->fs != worker->restore_fs)
		current->fs = worker->restore_fs;

158 159 160 161 162
	/*
	 * If we have an active mm, we need to drop the wq lock before unusing
	 * it. If we do, return true and let the caller retry the idle loop.
	 */
	if (worker->mm) {
163 164 165 166 167
		if (!dropped_lock) {
			__acquire(&wqe->lock);
			spin_unlock_irq(&wqe->lock);
			dropped_lock = true;
		}
168 169 170 171 172 173 174
		__set_current_state(TASK_RUNNING);
		set_fs(KERNEL_DS);
		unuse_mm(worker->mm);
		mmput(worker->mm);
		worker->mm = NULL;
	}

175
	return dropped_lock;
176 177
}

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
						   struct io_wq_work *work)
{
	if (work->flags & IO_WQ_WORK_UNBOUND)
		return &wqe->acct[IO_WQ_ACCT_UNBOUND];

	return &wqe->acct[IO_WQ_ACCT_BOUND];
}

static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe,
						  struct io_worker *worker)
{
	if (worker->flags & IO_WORKER_F_BOUND)
		return &wqe->acct[IO_WQ_ACCT_BOUND];

	return &wqe->acct[IO_WQ_ACCT_UNBOUND];
}

196 197 198
static void io_worker_exit(struct io_worker *worker)
{
	struct io_wqe *wqe = worker->wqe;
199 200
	struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
	unsigned nr_workers;
201 202 203 204 205 206 207 208 209 210 211 212 213

	/*
	 * If we're not at zero, someone else is holding a brief reference
	 * to the worker. Wait for that to go away.
	 */
	set_current_state(TASK_INTERRUPTIBLE);
	if (!refcount_dec_and_test(&worker->ref))
		schedule();
	__set_current_state(TASK_RUNNING);

	preempt_disable();
	current->flags &= ~PF_IO_WORKER;
	if (worker->flags & IO_WORKER_F_RUNNING)
214 215 216
		atomic_dec(&acct->nr_running);
	if (!(worker->flags & IO_WORKER_F_BOUND))
		atomic_dec(&wqe->wq->user->processes);
217 218 219 220 221
	worker->flags = 0;
	preempt_enable();

	spin_lock_irq(&wqe->lock);
	hlist_nulls_del_rcu(&worker->nulls_node);
222
	list_del_rcu(&worker->all_list);
223 224 225 226
	if (__io_worker_unuse(wqe, worker)) {
		__release(&wqe->lock);
		spin_lock_irq(&wqe->lock);
	}
227 228 229
	acct->nr_workers--;
	nr_workers = wqe->acct[IO_WQ_ACCT_BOUND].nr_workers +
			wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers;
230 231 232
	spin_unlock_irq(&wqe->lock);

	/* all workers gone, wq exit can proceed */
233
	if (!nr_workers && refcount_dec_and_test(&wqe->wq->refs))
234 235
		complete(&wqe->wq->done);

236
	kfree_rcu(worker, rcu);
237 238
}

239 240 241
static inline bool io_wqe_run_queue(struct io_wqe *wqe)
	__must_hold(wqe->lock)
{
J
Jens Axboe 已提交
242 243
	if (!wq_list_empty(&wqe->work_list) &&
	    !(wqe->flags & IO_WQE_FLAG_STALLED))
244 245 246 247 248 249 250 251 252 253 254 255 256 257
		return true;
	return false;
}

/*
 * Check head of free list for an available worker. If one isn't available,
 * caller must wake up the wq manager to create one.
 */
static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
	__must_hold(RCU)
{
	struct hlist_nulls_node *n;
	struct io_worker *worker;

258
	n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
259 260 261 262 263
	if (is_a_nulls(n))
		return false;

	worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
	if (io_worker_get(worker)) {
J
Jens Axboe 已提交
264
		wake_up_process(worker->task);
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
		io_worker_release(worker);
		return true;
	}

	return false;
}

/*
 * We need a worker. If we find a free one, we're good. If not, and we're
 * below the max number of workers, wake up the manager to create one.
 */
static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
{
	bool ret;

	/*
	 * Most likely an attempt to queue unbounded work on an io_wq that
	 * wasn't setup with any unbounded workers.
	 */
	WARN_ON_ONCE(!acct->max_workers);

	rcu_read_lock();
	ret = io_wqe_activate_free_worker(wqe);
	rcu_read_unlock();

	if (!ret && acct->nr_workers < acct->max_workers)
		wake_up_process(wqe->wq->manager);
}

static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker)
{
	struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);

	atomic_inc(&acct->nr_running);
}

static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker)
	__must_hold(wqe->lock)
{
	struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);

	if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
		io_wqe_wake_worker(wqe, acct);
}

310 311 312 313 314 315 316
static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
{
	allow_kernel_signal(SIGINT);

	current->flags |= PF_IO_WORKER;

	worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
317
	worker->restore_files = current->files;
318
	worker->restore_fs = current->fs;
319
	io_wqe_inc_running(wqe, worker);
320 321 322 323 324 325 326 327 328 329
}

/*
 * Worker will start processing some work. Move it to the busy list, if
 * it's currently on the freelist
 */
static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
			     struct io_wq_work *work)
	__must_hold(wqe->lock)
{
330 331
	bool worker_bound, work_bound;

332 333 334 335
	if (worker->flags & IO_WORKER_F_FREE) {
		worker->flags &= ~IO_WORKER_F_FREE;
		hlist_nulls_del_init_rcu(&worker->nulls_node);
	}
336 337 338 339 340

	/*
	 * If worker is moving from bound to unbound (or vice versa), then
	 * ensure we update the running accounting.
	 */
341 342 343
	worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
	work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
	if (worker_bound != work_bound) {
344 345 346 347 348 349 350 351 352 353 354 355 356 357
		io_wqe_dec_running(wqe, worker);
		if (work_bound) {
			worker->flags |= IO_WORKER_F_BOUND;
			wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
			wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
			atomic_dec(&wqe->wq->user->processes);
		} else {
			worker->flags &= ~IO_WORKER_F_BOUND;
			wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
			wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
			atomic_inc(&wqe->wq->user->processes);
		}
		io_wqe_inc_running(wqe, worker);
	 }
358 359 360 361 362 363 364 365 366 367 368 369 370 371
}

/*
 * No work, worker going to sleep. Move to freelist, and unuse mm if we
 * have one attached. Dropping the mm may potentially sleep, so we drop
 * the lock in that case and return success. Since the caller has to
 * retry the loop in that case (we changed task state), we don't regrab
 * the lock if we return success.
 */
static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
	__must_hold(wqe->lock)
{
	if (!(worker->flags & IO_WORKER_F_FREE)) {
		worker->flags |= IO_WORKER_F_FREE;
372
		hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
373 374 375 376 377 378 379 380
	}

	return __io_worker_unuse(wqe, worker);
}

static struct io_wq_work *io_get_next_work(struct io_wqe *wqe, unsigned *hash)
	__must_hold(wqe->lock)
{
J
Jens Axboe 已提交
381
	struct io_wq_work_node *node, *prev;
382 383
	struct io_wq_work *work;

J
Jens Axboe 已提交
384 385 386
	wq_list_for_each(node, prev, &wqe->work_list) {
		work = container_of(node, struct io_wq_work, list);

387 388
		/* not hashed, can run anytime */
		if (!(work->flags & IO_WQ_WORK_HASHED)) {
J
Jens Axboe 已提交
389
			wq_node_del(&wqe->work_list, node, prev);
390 391 392 393 394
			return work;
		}

		/* hashed, can run if not already running */
		*hash = work->flags >> IO_WQ_HASH_SHIFT;
P
Pavel Begunkov 已提交
395 396
		if (!(wqe->hash_map & BIT(*hash))) {
			wqe->hash_map |= BIT(*hash);
J
Jens Axboe 已提交
397
			wq_node_del(&wqe->work_list, node, prev);
398 399 400 401 402 403 404
			return work;
		}
	}

	return NULL;
}

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
{
	if (worker->mm) {
		unuse_mm(worker->mm);
		mmput(worker->mm);
		worker->mm = NULL;
	}
	if (!work->mm) {
		set_fs(KERNEL_DS);
		return;
	}
	if (mmget_not_zero(work->mm)) {
		use_mm(work->mm);
		if (!worker->mm)
			set_fs(USER_DS);
		worker->mm = work->mm;
		/* hang on to this mm */
		work->mm = NULL;
		return;
	}

	/* failed grabbing mm, ensure work gets cancelled */
	work->flags |= IO_WQ_WORK_CANCEL;
}

static void io_wq_switch_creds(struct io_worker *worker,
			       struct io_wq_work *work)
{
	const struct cred *old_creds = override_creds(work->creds);

	worker->cur_creds = work->creds;
	if (worker->saved_creds)
		put_cred(old_creds); /* creds set by previous switch */
	else
		worker->saved_creds = old_creds;
}

442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
static void io_impersonate_work(struct io_worker *worker,
				struct io_wq_work *work)
{
	if (work->files && current->files != work->files) {
		task_lock(current);
		current->files = work->files;
		task_unlock(current);
	}
	if (work->fs && current->fs != work->fs)
		current->fs = work->fs;
	if (work->mm != worker->mm)
		io_wq_switch_mm(worker, work);
	if (worker->cur_creds != work->creds)
		io_wq_switch_creds(worker, work);
}

static void io_assign_current_work(struct io_worker *worker,
				   struct io_wq_work *work)
{
	/* flush pending signals before assigning new work */
	if (signal_pending(current))
		flush_signals(current);
	cond_resched();

	spin_lock_irq(&worker->lock);
	worker->cur_work = work;
	spin_unlock_irq(&worker->lock);
}

471 472 473 474 475
static void io_worker_handle_work(struct io_worker *worker)
	__releases(wqe->lock)
{
	struct io_wqe *wqe = worker->wqe;
	struct io_wq *wq = wqe->wq;
476
	unsigned hash = -1U;
477 478

	do {
479
		struct io_wq_work *work;
480
get_next:
481 482 483 484 485 486 487 488 489 490
		/*
		 * If we got some work, mark us as busy. If we didn't, but
		 * the list isn't empty, it means we stalled on hashed work.
		 * Mark us stalled so we don't keep looking for work when we
		 * can't make progress, any work completion or insertion will
		 * clear the stalled flag.
		 */
		work = io_get_next_work(wqe, &hash);
		if (work)
			__io_worker_busy(wqe, worker, work);
J
Jens Axboe 已提交
491
		else if (!wq_list_empty(&wqe->work_list))
492 493 494 495 496
			wqe->flags |= IO_WQE_FLAG_STALLED;

		spin_unlock_irq(&wqe->lock);
		if (!work)
			break;
497
		io_assign_current_work(worker, work);
498

499 500
		/* handle a whole dependent link */
		do {
501
			struct io_wq_work *old_work;
502

503
			io_impersonate_work(worker, work);
504 505 506 507 508 509 510 511 512
			/*
			 * OK to set IO_WQ_WORK_CANCEL even for uncancellable
			 * work, the worker function will do the right thing.
			 */
			if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
				work->flags |= IO_WQ_WORK_CANCEL;

			old_work = work;
			work->func(&work);
513 514
			work = (old_work == work) ? NULL : work;
			io_assign_current_work(worker, work);
515
			wq->free_work(old_work);
516 517 518 519 520 521 522

			if (hash != -1U) {
				spin_lock_irq(&wqe->lock);
				wqe->hash_map &= ~BIT_ULL(hash);
				wqe->flags &= ~IO_WQE_FLAG_STALLED;
				/* dependent work is not hashed */
				hash = -1U;
523 524 525 526
				/* skip unnecessary unlock-lock wqe->lock */
				if (!work)
					goto get_next;
				spin_unlock_irq(&wqe->lock);
527
			}
528
		} while (work);
529

530
		spin_lock_irq(&wqe->lock);
531 532 533 534 535 536 537 538 539 540 541 542
	} while (1);
}

static int io_wqe_worker(void *data)
{
	struct io_worker *worker = data;
	struct io_wqe *wqe = worker->wqe;
	struct io_wq *wq = wqe->wq;

	io_worker_start(wqe, worker);

	while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
J
Jens Axboe 已提交
543
		set_current_state(TASK_INTERRUPTIBLE);
544
loop:
545 546 547 548
		spin_lock_irq(&wqe->lock);
		if (io_wqe_run_queue(wqe)) {
			__set_current_state(TASK_RUNNING);
			io_worker_handle_work(worker);
549
			goto loop;
550 551 552 553
		}
		/* drops the lock on success, retry */
		if (__io_worker_idle(wqe, worker)) {
			__release(&wqe->lock);
554
			goto loop;
555 556 557 558 559 560 561 562 563 564 565 566 567 568
		}
		spin_unlock_irq(&wqe->lock);
		if (signal_pending(current))
			flush_signals(current);
		if (schedule_timeout(WORKER_IDLE_TIMEOUT))
			continue;
		/* timed out, exit unless we're the fixed worker */
		if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
		    !(worker->flags & IO_WORKER_F_FIXED))
			break;
	}

	if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
		spin_lock_irq(&wqe->lock);
J
Jens Axboe 已提交
569
		if (!wq_list_empty(&wqe->work_list))
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
			io_worker_handle_work(worker);
		else
			spin_unlock_irq(&wqe->lock);
	}

	io_worker_exit(worker);
	return 0;
}

/*
 * Called when a worker is scheduled in. Mark us as currently running.
 */
void io_wq_worker_running(struct task_struct *tsk)
{
	struct io_worker *worker = kthread_data(tsk);
	struct io_wqe *wqe = worker->wqe;

	if (!(worker->flags & IO_WORKER_F_UP))
		return;
	if (worker->flags & IO_WORKER_F_RUNNING)
		return;
	worker->flags |= IO_WORKER_F_RUNNING;
592
	io_wqe_inc_running(wqe, worker);
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
}

/*
 * Called when worker is going to sleep. If there are no workers currently
 * running and we have work pending, wake up a free one or have the manager
 * set one up.
 */
void io_wq_worker_sleeping(struct task_struct *tsk)
{
	struct io_worker *worker = kthread_data(tsk);
	struct io_wqe *wqe = worker->wqe;

	if (!(worker->flags & IO_WORKER_F_UP))
		return;
	if (!(worker->flags & IO_WORKER_F_RUNNING))
		return;

	worker->flags &= ~IO_WORKER_F_RUNNING;

	spin_lock_irq(&wqe->lock);
613
	io_wqe_dec_running(wqe, worker);
614 615 616
	spin_unlock_irq(&wqe->lock);
}

617
static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
618
{
619
	struct io_wqe_acct *acct =&wqe->acct[index];
620 621
	struct io_worker *worker;

622
	worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
623
	if (!worker)
624
		return false;
625 626 627 628

	refcount_set(&worker->ref, 1);
	worker->nulls_node.pprev = NULL;
	worker->wqe = wqe;
629
	spin_lock_init(&worker->lock);
630 631

	worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node,
632
				"io_wqe_worker-%d/%d", index, wqe->node);
633 634
	if (IS_ERR(worker->task)) {
		kfree(worker);
635
		return false;
636 637 638
	}

	spin_lock_irq(&wqe->lock);
639
	hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
640
	list_add_tail_rcu(&worker->all_list, &wqe->all_list);
641
	worker->flags |= IO_WORKER_F_FREE;
642 643 644
	if (index == IO_WQ_ACCT_BOUND)
		worker->flags |= IO_WORKER_F_BOUND;
	if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
645
		worker->flags |= IO_WORKER_F_FIXED;
646
	acct->nr_workers++;
647 648
	spin_unlock_irq(&wqe->lock);

649 650 651
	if (index == IO_WQ_ACCT_UNBOUND)
		atomic_inc(&wq->user->processes);

652
	wake_up_process(worker->task);
653
	return true;
654 655
}

656
static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
657 658
	__must_hold(wqe->lock)
{
659
	struct io_wqe_acct *acct = &wqe->acct[index];
660

661
	/* if we have available workers or no work, no need */
662
	if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
663 664
		return false;
	return acct->nr_workers < acct->max_workers;
665 666 667 668 669 670 671 672
}

/*
 * Manager thread. Tasked with creating new workers, if we need them.
 */
static int io_wq_manager(void *data)
{
	struct io_wq *wq = data;
J
Jann Horn 已提交
673 674
	int workers_to_create = num_possible_nodes();
	int node;
675

676
	/* create fixed workers */
J
Jann Horn 已提交
677 678
	refcount_set(&wq->refs, workers_to_create);
	for_each_node(node) {
679 680
		if (!node_online(node))
			continue;
J
Jann Horn 已提交
681 682 683
		if (!create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
			goto err;
		workers_to_create--;
684
	}
685

686 687 688
	while (workers_to_create--)
		refcount_dec(&wq->refs);

689 690 691
	complete(&wq->done);

	while (!kthread_should_stop()) {
J
Jann Horn 已提交
692 693
		for_each_node(node) {
			struct io_wqe *wqe = wq->wqes[node];
694
			bool fork_worker[2] = { false, false };
695

696 697 698
			if (!node_online(node))
				continue;

699
			spin_lock_irq(&wqe->lock);
700 701 702 703
			if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
				fork_worker[IO_WQ_ACCT_BOUND] = true;
			if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
				fork_worker[IO_WQ_ACCT_UNBOUND] = true;
704
			spin_unlock_irq(&wqe->lock);
705 706 707 708
			if (fork_worker[IO_WQ_ACCT_BOUND])
				create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
			if (fork_worker[IO_WQ_ACCT_UNBOUND])
				create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
709 710 711 712 713 714
		}
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(HZ);
	}

	return 0;
715 716 717
err:
	set_bit(IO_WQ_BIT_ERROR, &wq->state);
	set_bit(IO_WQ_BIT_EXIT, &wq->state);
J
Jann Horn 已提交
718
	if (refcount_sub_and_test(workers_to_create, &wq->refs))
719 720
		complete(&wq->done);
	return 0;
721 722
}

723 724 725 726 727 728 729 730 731 732 733
static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
			    struct io_wq_work *work)
{
	bool free_worker;

	if (!(work->flags & IO_WQ_WORK_UNBOUND))
		return true;
	if (atomic_read(&acct->nr_running))
		return true;

	rcu_read_lock();
734
	free_worker = !hlist_nulls_empty(&wqe->free_list);
735 736 737 738 739 740 741 742 743 744 745
	rcu_read_unlock();
	if (free_worker)
		return true;

	if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers &&
	    !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN)))
		return false;

	return true;
}

746
static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
747
{
748 749
	struct io_wq *wq = wqe->wq;

750 751 752 753 754 755
	do {
		struct io_wq_work *old_work = work;

		work->flags |= IO_WQ_WORK_CANCEL;
		work->func(&work);
		work = (work == old_work) ? NULL : work;
756
		wq->free_work(old_work);
757 758 759
	} while (work);
}

760 761
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
{
762
	struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
763
	int work_flags;
764 765
	unsigned long flags;

766 767 768 769 770 771 772
	/*
	 * Do early check to see if we need a new unbound worker, and if we do,
	 * if we're allowed to do so. This isn't 100% accurate as there's a
	 * gap between this check and incrementing the value, but that's OK.
	 * It's close enough to not be an issue, fork() has the same delay.
	 */
	if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
773
		io_run_cancel(work, wqe);
774 775 776
		return;
	}

777
	work_flags = work->flags;
778
	spin_lock_irqsave(&wqe->lock, flags);
J
Jens Axboe 已提交
779
	wq_list_add_tail(&work->list, &wqe->work_list);
780 781 782
	wqe->flags &= ~IO_WQE_FLAG_STALLED;
	spin_unlock_irqrestore(&wqe->lock, flags);

783 784
	if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
	    !atomic_read(&acct->nr_running))
785
		io_wqe_wake_worker(wqe, acct);
786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
}

void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
{
	struct io_wqe *wqe = wq->wqes[numa_node_id()];

	io_wqe_enqueue(wqe, work);
}

/*
 * Enqueue work, hashed by some key. Work items that hash to the same value
 * will not be done in parallel. Used to limit concurrent writes, generally
 * hashed by inode.
 */
void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val)
{
	struct io_wqe *wqe = wq->wqes[numa_node_id()];
	unsigned bit;


	bit = hash_ptr(val, IO_WQ_HASH_ORDER);
	work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
	io_wqe_enqueue(wqe, work);
}

static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
{
	send_sig(SIGINT, worker->task, 1);
	return false;
}

/*
 * Iterate the passed in list and call the specific function for each
 * worker that isn't exiting
 */
static bool io_wq_for_each_worker(struct io_wqe *wqe,
				  bool (*func)(struct io_worker *, void *),
				  void *data)
{
	struct io_worker *worker;
	bool ret = false;

828
	list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
829
		if (io_worker_get(worker)) {
830 831 832
			/* no task if node is/was offline */
			if (worker->task)
				ret = func(worker, data);
833 834 835 836 837
			io_worker_release(worker);
			if (ret)
				break;
		}
	}
838

839 840 841 842 843
	return ret;
}

void io_wq_cancel_all(struct io_wq *wq)
{
J
Jann Horn 已提交
844
	int node;
845 846 847 848

	set_bit(IO_WQ_BIT_CANCEL, &wq->state);

	rcu_read_lock();
J
Jann Horn 已提交
849 850
	for_each_node(node) {
		struct io_wqe *wqe = wq->wqes[node];
851

852
		io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL);
853 854 855 856
	}
	rcu_read_unlock();
}

857
struct io_cb_cancel_data {
858 859
	work_cancel_fn *fn;
	void *data;
860 861
};

862
static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
863
{
864
	struct io_cb_cancel_data *match = data;
865
	unsigned long flags;
866 867 868 869
	bool ret = false;

	/*
	 * Hold the lock to avoid ->cur_work going out of scope, caller
870
	 * may dereference the passed in work.
871
	 */
872
	spin_lock_irqsave(&worker->lock, flags);
873
	if (worker->cur_work &&
874
	    !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
875
	    match->fn(worker->cur_work, match->data)) {
876
		send_sig(SIGINT, worker->task, 1);
877
		ret = true;
878
	}
879
	spin_unlock_irqrestore(&worker->lock, flags);
880

881
	return ret;
882 883 884
}

static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
885
					    struct io_cb_cancel_data *match)
886
{
J
Jens Axboe 已提交
887
	struct io_wq_work_node *node, *prev;
888
	struct io_wq_work *work;
889
	unsigned long flags;
890 891 892 893 894 895 896
	bool found = false;

	/*
	 * First check pending list, if we're lucky we can just remove it
	 * from there. CANCEL_OK means that the work is returned as-new,
	 * no completion will be posted for it.
	 */
897
	spin_lock_irqsave(&wqe->lock, flags);
J
Jens Axboe 已提交
898 899 900
	wq_list_for_each(node, prev, &wqe->work_list) {
		work = container_of(node, struct io_wq_work, list);

901
		if (match->fn(work, match->data)) {
J
Jens Axboe 已提交
902
			wq_node_del(&wqe->work_list, node, prev);
903 904 905 906
			found = true;
			break;
		}
	}
907
	spin_unlock_irqrestore(&wqe->lock, flags);
908 909

	if (found) {
910
		io_run_cancel(work, wqe);
911 912 913 914 915 916
		return IO_WQ_CANCEL_OK;
	}

	/*
	 * Now check if a free (going busy) or busy worker has the work
	 * currently running. If we find it there, we'll return CANCEL_RUNNING
917
	 * as an indication that we attempt to signal cancellation. The
918 919 920
	 * completion will run normally in this case.
	 */
	rcu_read_lock();
921
	found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
922 923 924 925
	rcu_read_unlock();
	return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
}

926 927
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
				  void *data)
928
{
929 930 931
	struct io_cb_cancel_data match = {
		.fn	= cancel,
		.data	= data,
932
	};
933
	enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
J
Jann Horn 已提交
934
	int node;
935

J
Jann Horn 已提交
936 937
	for_each_node(node) {
		struct io_wqe *wqe = wq->wqes[node];
938

939
		ret = io_wqe_cancel_work(wqe, &match);
940 941 942 943 944 945 946
		if (ret != IO_WQ_CANCEL_NOTFOUND)
			break;
	}

	return ret;
}

947 948 949 950 951 952 953 954 955 956
static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
{
	return work == data;
}

enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
{
	return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork);
}

957 958 959 960
static bool io_wq_pid_match(struct io_wq_work *work, void *data)
{
	pid_t pid = (pid_t) (unsigned long) data;

961
	return work->task_pid == pid;
962 963 964 965
}

enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid)
{
966
	void *data = (void *) (unsigned long) pid;
967

968
	return io_wq_cancel_cb(wq, io_wq_pid_match, data);
969 970
}

971
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
972
{
J
Jann Horn 已提交
973
	int ret = -ENOMEM, node;
974 975
	struct io_wq *wq;

976 977 978
	if (WARN_ON_ONCE(!data->free_work))
		return ERR_PTR(-EINVAL);

979
	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
980 981 982
	if (!wq)
		return ERR_PTR(-ENOMEM);

J
Jann Horn 已提交
983
	wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL);
984 985 986 987 988
	if (!wq->wqes) {
		kfree(wq);
		return ERR_PTR(-ENOMEM);
	}

989
	wq->free_work = data->free_work;
990

991
	/* caller must already hold a reference to this */
992
	wq->user = data->user;
993

J
Jann Horn 已提交
994
	for_each_node(node) {
995
		struct io_wqe *wqe;
996
		int alloc_node = node;
997

998 999 1000
		if (!node_online(alloc_node))
			alloc_node = NUMA_NO_NODE;
		wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
1001
		if (!wqe)
J
Jann Horn 已提交
1002 1003
			goto err;
		wq->wqes[node] = wqe;
1004
		wqe->node = alloc_node;
1005 1006
		wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
		atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
1007
		if (wq->user) {
1008 1009 1010 1011
			wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
					task_rlimit(current, RLIMIT_NPROC);
		}
		atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
1012 1013
		wqe->wq = wq;
		spin_lock_init(&wqe->lock);
J
Jens Axboe 已提交
1014
		INIT_WQ_LIST(&wqe->work_list);
1015
		INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
1016
		INIT_LIST_HEAD(&wqe->all_list);
1017 1018 1019 1020 1021 1022 1023
	}

	init_completion(&wq->done);

	wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
	if (!IS_ERR(wq->manager)) {
		wake_up_process(wq->manager);
1024 1025 1026 1027 1028
		wait_for_completion(&wq->done);
		if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
			ret = -ENOMEM;
			goto err;
		}
J
Jens Axboe 已提交
1029
		refcount_set(&wq->use_refs, 1);
1030
		reinit_completion(&wq->done);
1031 1032 1033 1034 1035
		return wq;
	}

	ret = PTR_ERR(wq->manager);
	complete(&wq->done);
1036
err:
J
Jann Horn 已提交
1037 1038
	for_each_node(node)
		kfree(wq->wqes[node]);
1039 1040
	kfree(wq->wqes);
	kfree(wq);
1041 1042 1043
	return ERR_PTR(ret);
}

1044 1045
bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
{
1046
	if (data->free_work != wq->free_work)
1047 1048 1049 1050 1051
		return false;

	return refcount_inc_not_zero(&wq->use_refs);
}

1052 1053 1054 1055 1056 1057
static bool io_wq_worker_wake(struct io_worker *worker, void *data)
{
	wake_up_process(worker->task);
	return false;
}

J
Jens Axboe 已提交
1058
static void __io_wq_destroy(struct io_wq *wq)
1059
{
J
Jann Horn 已提交
1060
	int node;
1061

1062 1063
	set_bit(IO_WQ_BIT_EXIT, &wq->state);
	if (wq->manager)
1064 1065 1066
		kthread_stop(wq->manager);

	rcu_read_lock();
J
Jann Horn 已提交
1067 1068
	for_each_node(node)
		io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
1069 1070 1071 1072
	rcu_read_unlock();

	wait_for_completion(&wq->done);

J
Jann Horn 已提交
1073 1074
	for_each_node(node)
		kfree(wq->wqes[node]);
1075 1076 1077
	kfree(wq->wqes);
	kfree(wq);
}
J
Jens Axboe 已提交
1078 1079 1080 1081 1082 1083

void io_wq_destroy(struct io_wq *wq)
{
	if (refcount_dec_and_test(&wq->use_refs))
		__io_wq_destroy(wq);
}