async-thread.c 17.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/spinlock.h>
22
#include <linux/freezer.h>
23 24
#include "async-thread.h"

C
Chris Mason 已提交
25 26 27
#define WORK_QUEUED_BIT 0
#define WORK_DONE_BIT 1
#define WORK_ORDER_DONE_BIT 2
28
#define WORK_HIGH_PRIO_BIT 3
C
Chris Mason 已提交
29

30 31 32 33 34
/*
 * container for the kthread task pointer and the list of pending work
 * One of these is allocated per thread.
 */
struct btrfs_worker_thread {
C
Chris Mason 已提交
35 36 37
	/* pool we belong to */
	struct btrfs_workers *workers;

38 39
	/* list of struct btrfs_work that are waiting for service */
	struct list_head pending;
40
	struct list_head prio_pending;
41 42 43 44 45 46 47 48 49

	/* list of worker threads from struct btrfs_workers */
	struct list_head worker_list;

	/* kthread */
	struct task_struct *task;

	/* number of things on the pending list */
	atomic_t num_pending;
50

51 52 53
	/* reference counter for this struct */
	atomic_t refs;

54
	unsigned long sequence;
55 56 57 58 59 60

	/* protects the pending list. */
	spinlock_t lock;

	/* set to non-zero when this thread is already awake and kicking */
	int working;
C
Chris Mason 已提交
61 62 63

	/* are we currently idle */
	int idle;
64 65
};

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
/*
 * btrfs_start_workers uses kthread_run, which can block waiting for memory
 * for a very long time.  It will actually throttle on page writeback,
 * and so it may not make progress until after our btrfs worker threads
 * process all of the pending work structs in their queue
 *
 * This means we can't use btrfs_start_workers from inside a btrfs worker
 * thread that is used as part of cleaning dirty memory, which pretty much
 * involves all of the worker threads.
 *
 * Instead we have a helper queue who never has more than one thread
 * where we scheduler thread start operations.  This worker_start struct
 * is used to contain the work and hold a pointer to the queue that needs
 * another worker.
 */
struct worker_start {
	struct btrfs_work work;
	struct btrfs_workers *queue;
};

static void start_new_worker_func(struct btrfs_work *work)
{
	struct worker_start *start;
	start = container_of(work, struct worker_start, work);
	btrfs_start_workers(start->queue, 1);
	kfree(start);
}

static int start_new_worker(struct btrfs_workers *queue)
{
	struct worker_start *start;
	int ret;

	start = kzalloc(sizeof(*start), GFP_NOFS);
	if (!start)
		return -ENOMEM;

	start->work.func = start_new_worker_func;
	start->queue = queue;
	ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
	if (ret)
		kfree(start);
	return ret;
}

C
Chris Mason 已提交
111 112 113 114 115 116 117 118 119 120 121
/*
 * helper function to move a thread onto the idle list after it
 * has finished some requests.
 */
static void check_idle_worker(struct btrfs_worker_thread *worker)
{
	if (!worker->idle && atomic_read(&worker->num_pending) <
	    worker->workers->idle_thresh / 2) {
		unsigned long flags;
		spin_lock_irqsave(&worker->workers->lock, flags);
		worker->idle = 1;
122 123 124 125 126 127

		/* the list may be empty if the worker is just starting */
		if (!list_empty(&worker->worker_list)) {
			list_move(&worker->worker_list,
				 &worker->workers->idle_list);
		}
C
Chris Mason 已提交
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
		spin_unlock_irqrestore(&worker->workers->lock, flags);
	}
}

/*
 * helper function to move a thread off the idle list after new
 * pending work is added.
 */
static void check_busy_worker(struct btrfs_worker_thread *worker)
{
	if (worker->idle && atomic_read(&worker->num_pending) >=
	    worker->workers->idle_thresh) {
		unsigned long flags;
		spin_lock_irqsave(&worker->workers->lock, flags);
		worker->idle = 0;
143 144 145 146 147

		if (!list_empty(&worker->worker_list)) {
			list_move_tail(&worker->worker_list,
				      &worker->workers->worker_list);
		}
C
Chris Mason 已提交
148 149 150 151
		spin_unlock_irqrestore(&worker->workers->lock, flags);
	}
}

152 153 154 155 156 157 158 159 160 161 162 163 164 165
static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
{
	struct btrfs_workers *workers = worker->workers;
	unsigned long flags;

	rmb();
	if (!workers->atomic_start_pending)
		return;

	spin_lock_irqsave(&workers->lock, flags);
	if (!workers->atomic_start_pending)
		goto out;

	workers->atomic_start_pending = 0;
166 167
	if (workers->num_workers + workers->num_workers_starting >=
	    workers->max_workers)
168 169
		goto out;

170
	workers->num_workers_starting += 1;
171
	spin_unlock_irqrestore(&workers->lock, flags);
172
	start_new_worker(workers);
173 174 175 176 177 178
	return;

out:
	spin_unlock_irqrestore(&workers->lock, flags);
}

C
Chris Mason 已提交
179 180 181 182 183 184 185 186
static noinline int run_ordered_completions(struct btrfs_workers *workers,
					    struct btrfs_work *work)
{
	if (!workers->ordered)
		return 0;

	set_bit(WORK_DONE_BIT, &work->flags);

187
	spin_lock(&workers->order_lock);
C
Chris Mason 已提交
188

189 190 191 192 193 194 195 196 197 198
	while (1) {
		if (!list_empty(&workers->prio_order_list)) {
			work = list_entry(workers->prio_order_list.next,
					  struct btrfs_work, order_list);
		} else if (!list_empty(&workers->order_list)) {
			work = list_entry(workers->order_list.next,
					  struct btrfs_work, order_list);
		} else {
			break;
		}
C
Chris Mason 已提交
199 200 201 202 203 204 205 206 207 208 209
		if (!test_bit(WORK_DONE_BIT, &work->flags))
			break;

		/* we are going to call the ordered done function, but
		 * we leave the work item on the list as a barrier so
		 * that later work items that are done don't have their
		 * functions called before this one returns
		 */
		if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
			break;

210
		spin_unlock(&workers->order_lock);
C
Chris Mason 已提交
211 212 213 214

		work->ordered_func(work);

		/* now take the lock again and call the freeing code */
215
		spin_lock(&workers->order_lock);
C
Chris Mason 已提交
216 217 218 219
		list_del(&work->order_list);
		work->ordered_free(work);
	}

220
	spin_unlock(&workers->order_lock);
C
Chris Mason 已提交
221 222 223
	return 0;
}

224 225 226 227 228 229 230 231 232 233 234
static void put_worker(struct btrfs_worker_thread *worker)
{
	if (atomic_dec_and_test(&worker->refs))
		kfree(worker);
}

static int try_worker_shutdown(struct btrfs_worker_thread *worker)
{
	int freeit = 0;

	spin_lock_irq(&worker->lock);
235
	spin_lock(&worker->workers->lock);
236 237 238 239 240
	if (worker->workers->num_workers > 1 &&
	    worker->idle &&
	    !worker->working &&
	    !list_empty(&worker->worker_list) &&
	    list_empty(&worker->prio_pending) &&
241 242
	    list_empty(&worker->pending) &&
	    atomic_read(&worker->num_pending) == 0) {
243 244 245 246
		freeit = 1;
		list_del_init(&worker->worker_list);
		worker->workers->num_workers--;
	}
247
	spin_unlock(&worker->workers->lock);
248 249 250 251 252 253 254
	spin_unlock_irq(&worker->lock);

	if (freeit)
		put_worker(worker);
	return freeit;
}

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
					struct list_head *prio_head,
					struct list_head *head)
{
	struct btrfs_work *work = NULL;
	struct list_head *cur = NULL;

	if(!list_empty(prio_head))
		cur = prio_head->next;

	smp_mb();
	if (!list_empty(&worker->prio_pending))
		goto refill;

	if (!list_empty(head))
		cur = head->next;

	if (cur)
		goto out;

refill:
	spin_lock_irq(&worker->lock);
	list_splice_tail_init(&worker->prio_pending, prio_head);
	list_splice_tail_init(&worker->pending, head);

	if (!list_empty(prio_head))
		cur = prio_head->next;
	else if (!list_empty(head))
		cur = head->next;
	spin_unlock_irq(&worker->lock);

	if (!cur)
		goto out_fail;

out:
	work = list_entry(cur, struct btrfs_work, list);

out_fail:
	return work;
}

296 297 298 299 300 301
/*
 * main loop for servicing work items
 */
static int worker_loop(void *arg)
{
	struct btrfs_worker_thread *worker = arg;
302 303
	struct list_head head;
	struct list_head prio_head;
304
	struct btrfs_work *work;
305 306 307 308

	INIT_LIST_HEAD(&head);
	INIT_LIST_HEAD(&prio_head);

309
	do {
310
again:
311
		while (1) {
312 313 314 315


			work = get_next_work(worker, &prio_head, &head);
			if (!work)
316 317
				break;

318
			list_del(&work->list);
C
Chris Mason 已提交
319
			clear_bit(WORK_QUEUED_BIT, &work->flags);
320 321 322 323 324 325

			work->worker = worker;

			work->func(work);

			atomic_dec(&worker->num_pending);
C
Chris Mason 已提交
326 327 328 329 330 331
			/*
			 * unless this is an ordered work queue,
			 * 'work' was probably freed by func above.
			 */
			run_ordered_completions(worker->workers, work);

332 333
			check_pending_worker_creates(worker);

334
		}
335 336 337 338

		spin_lock_irq(&worker->lock);
		check_idle_worker(worker);

339
		if (freezing(current)) {
340 341
			worker->working = 0;
			spin_unlock_irq(&worker->lock);
342 343 344
			refrigerator();
		} else {
			spin_unlock_irq(&worker->lock);
345 346 347 348 349 350 351
			if (!kthread_should_stop()) {
				cpu_relax();
				/*
				 * we've dropped the lock, did someone else
				 * jump_in?
				 */
				smp_mb();
352 353
				if (!list_empty(&worker->pending) ||
				    !list_empty(&worker->prio_pending))
354 355 356 357 358 359 360 361 362 363 364 365
					continue;

				/*
				 * this short schedule allows more work to
				 * come in without the queue functions
				 * needing to go through wake_up_process()
				 *
				 * worker->working is still 1, so nobody
				 * is going to try and wake us up
				 */
				schedule_timeout(1);
				smp_mb();
366 367
				if (!list_empty(&worker->pending) ||
				    !list_empty(&worker->prio_pending))
368 369
					continue;

A
Amit Gud 已提交
370 371 372
				if (kthread_should_stop())
					break;

373 374 375
				/* still no more work?, sleep for real */
				spin_lock_irq(&worker->lock);
				set_current_state(TASK_INTERRUPTIBLE);
376
				if (!list_empty(&worker->pending) ||
377 378 379 380
				    !list_empty(&worker->prio_pending)) {
					spin_unlock_irq(&worker->lock);
					goto again;
				}
381 382 383 384 385 386 387 388

				/*
				 * this makes sure we get a wakeup when someone
				 * adds something new to the queue
				 */
				worker->working = 0;
				spin_unlock_irq(&worker->lock);

389 390 391 392 393 394 395
				if (!kthread_should_stop()) {
					schedule_timeout(HZ * 120);
					if (!worker->working &&
					    try_worker_shutdown(worker)) {
						return 0;
					}
				}
396
			}
397 398 399 400 401 402 403 404 405 406 407 408 409
			__set_current_state(TASK_RUNNING);
		}
	} while (!kthread_should_stop());
	return 0;
}

/*
 * this will wait for all the worker threads to shutdown
 */
int btrfs_stop_workers(struct btrfs_workers *workers)
{
	struct list_head *cur;
	struct btrfs_worker_thread *worker;
410
	int can_stop;
411

412
	spin_lock_irq(&workers->lock);
C
Chris Mason 已提交
413
	list_splice_init(&workers->idle_list, &workers->worker_list);
C
Chris Mason 已提交
414
	while (!list_empty(&workers->worker_list)) {
415 416 417
		cur = workers->worker_list.next;
		worker = list_entry(cur, struct btrfs_worker_thread,
				    worker_list);
418 419 420 421 422 423 424 425 426 427 428 429 430 431

		atomic_inc(&worker->refs);
		workers->num_workers -= 1;
		if (!list_empty(&worker->worker_list)) {
			list_del_init(&worker->worker_list);
			put_worker(worker);
			can_stop = 1;
		} else
			can_stop = 0;
		spin_unlock_irq(&workers->lock);
		if (can_stop)
			kthread_stop(worker->task);
		spin_lock_irq(&workers->lock);
		put_worker(worker);
432
	}
433
	spin_unlock_irq(&workers->lock);
434 435 436 437 438 439
	return 0;
}

/*
 * simple init on struct btrfs_workers
 */
440 441
void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
			struct btrfs_workers *async_helper)
442 443
{
	workers->num_workers = 0;
444
	workers->num_workers_starting = 0;
445
	INIT_LIST_HEAD(&workers->worker_list);
C
Chris Mason 已提交
446
	INIT_LIST_HEAD(&workers->idle_list);
C
Chris Mason 已提交
447
	INIT_LIST_HEAD(&workers->order_list);
448
	INIT_LIST_HEAD(&workers->prio_order_list);
449
	spin_lock_init(&workers->lock);
450
	spin_lock_init(&workers->order_lock);
451
	workers->max_workers = max;
452
	workers->idle_thresh = 32;
453
	workers->name = name;
C
Chris Mason 已提交
454
	workers->ordered = 0;
455
	workers->atomic_start_pending = 0;
456
	workers->atomic_worker_start = async_helper;
457 458 459 460 461 462
}

/*
 * starts new worker threads.  This does not enforce the max worker
 * count in case you need to temporarily go past it.
 */
463 464
static int __btrfs_start_workers(struct btrfs_workers *workers,
				 int num_workers)
465 466 467 468 469 470 471 472 473 474 475 476 477
{
	struct btrfs_worker_thread *worker;
	int ret = 0;
	int i;

	for (i = 0; i < num_workers; i++) {
		worker = kzalloc(sizeof(*worker), GFP_NOFS);
		if (!worker) {
			ret = -ENOMEM;
			goto fail;
		}

		INIT_LIST_HEAD(&worker->pending);
478
		INIT_LIST_HEAD(&worker->prio_pending);
479 480
		INIT_LIST_HEAD(&worker->worker_list);
		spin_lock_init(&worker->lock);
481

482
		atomic_set(&worker->num_pending, 0);
483
		atomic_set(&worker->refs, 1);
484
		worker->workers = workers;
485 486 487
		worker->task = kthread_run(worker_loop, worker,
					   "btrfs-%s-%d", workers->name,
					   workers->num_workers + i);
488 489
		if (IS_ERR(worker->task)) {
			ret = PTR_ERR(worker->task);
490
			kfree(worker);
491 492 493
			goto fail;
		}
		spin_lock_irq(&workers->lock);
C
Chris Mason 已提交
494
		list_add_tail(&worker->worker_list, &workers->idle_list);
495
		worker->idle = 1;
496
		workers->num_workers++;
497 498
		workers->num_workers_starting--;
		WARN_ON(workers->num_workers_starting < 0);
499 500 501 502 503 504 505 506
		spin_unlock_irq(&workers->lock);
	}
	return 0;
fail:
	btrfs_stop_workers(workers);
	return ret;
}

507 508 509 510 511 512 513 514
int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
{
	spin_lock_irq(&workers->lock);
	workers->num_workers_starting += num_workers;
	spin_unlock_irq(&workers->lock);
	return __btrfs_start_workers(workers, num_workers);
}

515 516 517 518 519 520 521 522 523
/*
 * run through the list and find a worker thread that doesn't have a lot
 * to do right now.  This can return null if we aren't yet at the thread
 * count limit and all of the threads are busy.
 */
static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
{
	struct btrfs_worker_thread *worker;
	struct list_head *next;
524 525 526 527
	int enforce_min;

	enforce_min = (workers->num_workers + workers->num_workers_starting) <
		workers->max_workers;
528 529

	/*
C
Chris Mason 已提交
530 531 532 533
	 * if we find an idle thread, don't move it to the end of the
	 * idle list.  This improves the chance that the next submission
	 * will reuse the same thread, and maybe catch it while it is still
	 * working
534
	 */
C
Chris Mason 已提交
535 536
	if (!list_empty(&workers->idle_list)) {
		next = workers->idle_list.next;
537 538
		worker = list_entry(next, struct btrfs_worker_thread,
				    worker_list);
C
Chris Mason 已提交
539
		return worker;
540
	}
C
Chris Mason 已提交
541 542 543
	if (enforce_min || list_empty(&workers->worker_list))
		return NULL;

544
	/*
C
Chris Mason 已提交
545
	 * if we pick a busy task, move the task to the end of the list.
C
Chris Mason 已提交
546 547 548
	 * hopefully this will keep things somewhat evenly balanced.
	 * Do the move in batches based on the sequence number.  This groups
	 * requests submitted at roughly the same time onto the same worker.
549
	 */
C
Chris Mason 已提交
550 551
	next = workers->worker_list.next;
	worker = list_entry(next, struct btrfs_worker_thread, worker_list);
552
	worker->sequence++;
C
Chris Mason 已提交
553

554
	if (worker->sequence % workers->idle_thresh == 0)
555
		list_move_tail(next, &workers->worker_list);
556 557 558
	return worker;
}

C
Chris Mason 已提交
559 560 561 562 563
/*
 * selects a worker thread to take the next job.  This will either find
 * an idle worker, start a new worker up to the max count, or just return
 * one of the existing busy workers.
 */
564 565 566 567
static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
{
	struct btrfs_worker_thread *worker;
	unsigned long flags;
568
	struct list_head *fallback;
569 570 571 572 573 574

again:
	spin_lock_irqsave(&workers->lock, flags);
	worker = next_worker(workers);

	if (!worker) {
575 576
		if (workers->num_workers + workers->num_workers_starting >=
		    workers->max_workers) {
577 578 579 580
			goto fallback;
		} else if (workers->atomic_worker_start) {
			workers->atomic_start_pending = 1;
			goto fallback;
581
		} else {
582
			workers->num_workers_starting++;
583 584
			spin_unlock_irqrestore(&workers->lock, flags);
			/* we're below the limit, start another worker */
585
			__btrfs_start_workers(workers, 1);
586 587 588
			goto again;
		}
	}
589
	goto found;
590 591 592 593 594 595 596 597 598 599 600 601 602 603

fallback:
	fallback = NULL;
	/*
	 * we have failed to find any workers, just
	 * return the first one we can find.
	 */
	if (!list_empty(&workers->worker_list))
		fallback = workers->worker_list.next;
	if (!list_empty(&workers->idle_list))
		fallback = workers->idle_list.next;
	BUG_ON(!fallback);
	worker = list_entry(fallback,
		  struct btrfs_worker_thread, worker_list);
604 605 606 607 608 609
found:
	/*
	 * this makes sure the worker doesn't exit before it is placed
	 * onto a busy/idle list
	 */
	atomic_inc(&worker->num_pending);
610 611
	spin_unlock_irqrestore(&workers->lock, flags);
	return worker;
612 613 614 615 616 617 618 619 620 621 622
}

/*
 * btrfs_requeue_work just puts the work item back on the tail of the list
 * it was taken from.  It is intended for use with long running work functions
 * that make some progress and want to give the cpu up for others.
 */
int btrfs_requeue_work(struct btrfs_work *work)
{
	struct btrfs_worker_thread *worker = work->worker;
	unsigned long flags;
623
	int wake = 0;
624

C
Chris Mason 已提交
625
	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
626 627 628
		goto out;

	spin_lock_irqsave(&worker->lock, flags);
629 630 631 632
	if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
		list_add_tail(&work->list, &worker->prio_pending);
	else
		list_add_tail(&work->list, &worker->pending);
633
	atomic_inc(&worker->num_pending);
634 635 636 637 638

	/* by definition we're busy, take ourselves off the idle
	 * list
	 */
	if (worker->idle) {
639
		spin_lock(&worker->workers->lock);
640 641
		worker->idle = 0;
		list_move_tail(&worker->worker_list,
642
			      &worker->workers->worker_list);
643
		spin_unlock(&worker->workers->lock);
644
	}
645 646 647 648
	if (!worker->working) {
		wake = 1;
		worker->working = 1;
	}
649

650 651
	if (wake)
		wake_up_process(worker->task);
652
	spin_unlock_irqrestore(&worker->lock, flags);
653
out:
654

655 656 657
	return 0;
}

658 659 660 661 662
void btrfs_set_work_high_prio(struct btrfs_work *work)
{
	set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
}

663 664 665 666 667 668 669 670 671 672
/*
 * places a struct btrfs_work into the pending queue of one of the kthreads
 */
int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
{
	struct btrfs_worker_thread *worker;
	unsigned long flags;
	int wake = 0;

	/* don't requeue something already on a list */
C
Chris Mason 已提交
673
	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
674 675 676
		goto out;

	worker = find_worker(workers);
C
Chris Mason 已提交
677
	if (workers->ordered) {
678 679 680 681 682
		/*
		 * you're not allowed to do ordered queues from an
		 * interrupt handler
		 */
		spin_lock(&workers->order_lock);
683 684 685 686 687 688
		if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
			list_add_tail(&work->order_list,
				      &workers->prio_order_list);
		} else {
			list_add_tail(&work->order_list, &workers->order_list);
		}
689
		spin_unlock(&workers->order_lock);
C
Chris Mason 已提交
690 691 692
	} else {
		INIT_LIST_HEAD(&work->order_list);
	}
693 694

	spin_lock_irqsave(&worker->lock, flags);
695

696 697 698 699
	if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
		list_add_tail(&work->list, &worker->prio_pending);
	else
		list_add_tail(&work->list, &worker->pending);
C
Chris Mason 已提交
700
	check_busy_worker(worker);
701 702 703 704 705 706 707 708 709 710 711

	/*
	 * avoid calling into wake_up_process if this thread has already
	 * been kicked
	 */
	if (!worker->working)
		wake = 1;
	worker->working = 1;

	if (wake)
		wake_up_process(worker->task);
712 713
	spin_unlock_irqrestore(&worker->lock, flags);

714 715 716
out:
	return 0;
}