async-thread.c 15.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/spinlock.h>
22
#include <linux/freezer.h>
23 24
#include "async-thread.h"

C
Chris Mason 已提交
25 26 27
#define WORK_QUEUED_BIT 0
#define WORK_DONE_BIT 1
#define WORK_ORDER_DONE_BIT 2
28
#define WORK_HIGH_PRIO_BIT 3
C
Chris Mason 已提交
29

30 31 32 33 34
/*
 * container for the kthread task pointer and the list of pending work
 * One of these is allocated per thread.
 */
struct btrfs_worker_thread {
C
Chris Mason 已提交
35 36 37
	/* pool we belong to */
	struct btrfs_workers *workers;

38 39
	/* list of struct btrfs_work that are waiting for service */
	struct list_head pending;
40
	struct list_head prio_pending;
41 42 43 44 45 46 47 48 49

	/* list of worker threads from struct btrfs_workers */
	struct list_head worker_list;

	/* kthread */
	struct task_struct *task;

	/* number of things on the pending list */
	atomic_t num_pending;
50

51 52 53
	/* reference counter for this struct */
	atomic_t refs;

54
	unsigned long sequence;
55 56 57 58 59 60

	/* protects the pending list. */
	spinlock_t lock;

	/* set to non-zero when this thread is already awake and kicking */
	int working;
C
Chris Mason 已提交
61 62 63

	/* are we currently idle */
	int idle;
64 65
};

C
Chris Mason 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
/*
 * helper function to move a thread onto the idle list after it
 * has finished some requests.
 */
static void check_idle_worker(struct btrfs_worker_thread *worker)
{
	if (!worker->idle && atomic_read(&worker->num_pending) <
	    worker->workers->idle_thresh / 2) {
		unsigned long flags;
		spin_lock_irqsave(&worker->workers->lock, flags);
		worker->idle = 1;
		list_move(&worker->worker_list, &worker->workers->idle_list);
		spin_unlock_irqrestore(&worker->workers->lock, flags);
	}
}

/*
 * helper function to move a thread off the idle list after new
 * pending work is added.
 */
static void check_busy_worker(struct btrfs_worker_thread *worker)
{
	if (worker->idle && atomic_read(&worker->num_pending) >=
	    worker->workers->idle_thresh) {
		unsigned long flags;
		spin_lock_irqsave(&worker->workers->lock, flags);
		worker->idle = 0;
		list_move_tail(&worker->worker_list,
			       &worker->workers->worker_list);
		spin_unlock_irqrestore(&worker->workers->lock, flags);
	}
}

99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
{
	struct btrfs_workers *workers = worker->workers;
	unsigned long flags;

	rmb();
	if (!workers->atomic_start_pending)
		return;

	spin_lock_irqsave(&workers->lock, flags);
	if (!workers->atomic_start_pending)
		goto out;

	workers->atomic_start_pending = 0;
	if (workers->num_workers >= workers->max_workers)
		goto out;

	spin_unlock_irqrestore(&workers->lock, flags);
	btrfs_start_workers(workers, 1);
	return;

out:
	spin_unlock_irqrestore(&workers->lock, flags);
}

C
Chris Mason 已提交
124 125 126 127 128 129 130 131 132 133 134 135
static noinline int run_ordered_completions(struct btrfs_workers *workers,
					    struct btrfs_work *work)
{
	unsigned long flags;

	if (!workers->ordered)
		return 0;

	set_bit(WORK_DONE_BIT, &work->flags);

	spin_lock_irqsave(&workers->lock, flags);

136 137 138 139 140 141 142 143 144 145
	while (1) {
		if (!list_empty(&workers->prio_order_list)) {
			work = list_entry(workers->prio_order_list.next,
					  struct btrfs_work, order_list);
		} else if (!list_empty(&workers->order_list)) {
			work = list_entry(workers->order_list.next,
					  struct btrfs_work, order_list);
		} else {
			break;
		}
C
Chris Mason 已提交
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
		if (!test_bit(WORK_DONE_BIT, &work->flags))
			break;

		/* we are going to call the ordered done function, but
		 * we leave the work item on the list as a barrier so
		 * that later work items that are done don't have their
		 * functions called before this one returns
		 */
		if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
			break;

		spin_unlock_irqrestore(&workers->lock, flags);

		work->ordered_func(work);

		/* now take the lock again and call the freeing code */
		spin_lock_irqsave(&workers->lock, flags);
		list_del(&work->order_list);
		work->ordered_free(work);
	}

	spin_unlock_irqrestore(&workers->lock, flags);
	return 0;
}

171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
static void put_worker(struct btrfs_worker_thread *worker)
{
	if (atomic_dec_and_test(&worker->refs))
		kfree(worker);
}

static int try_worker_shutdown(struct btrfs_worker_thread *worker)
{
	int freeit = 0;

	spin_lock_irq(&worker->lock);
	spin_lock_irq(&worker->workers->lock);
	if (worker->workers->num_workers > 1 &&
	    worker->idle &&
	    !worker->working &&
	    !list_empty(&worker->worker_list) &&
	    list_empty(&worker->prio_pending) &&
	    list_empty(&worker->pending)) {
		freeit = 1;
		list_del_init(&worker->worker_list);
		worker->workers->num_workers--;
	}
	spin_unlock_irq(&worker->workers->lock);
	spin_unlock_irq(&worker->lock);

	if (freeit)
		put_worker(worker);
	return freeit;
}

201 202 203 204 205 206 207 208 209 210
/*
 * main loop for servicing work items
 */
static int worker_loop(void *arg)
{
	struct btrfs_worker_thread *worker = arg;
	struct list_head *cur;
	struct btrfs_work *work;
	do {
		spin_lock_irq(&worker->lock);
211
again_locked:
212 213 214 215 216 217 218 219
		while (1) {
			if (!list_empty(&worker->prio_pending))
				cur = worker->prio_pending.next;
			else if (!list_empty(&worker->pending))
				cur = worker->pending.next;
			else
				break;

220 221
			work = list_entry(cur, struct btrfs_work, list);
			list_del(&work->list);
C
Chris Mason 已提交
222
			clear_bit(WORK_QUEUED_BIT, &work->flags);
223 224 225 226 227 228 229

			work->worker = worker;
			spin_unlock_irq(&worker->lock);

			work->func(work);

			atomic_dec(&worker->num_pending);
C
Chris Mason 已提交
230 231 232 233 234 235
			/*
			 * unless this is an ordered work queue,
			 * 'work' was probably freed by func above.
			 */
			run_ordered_completions(worker->workers, work);

236 237
			check_pending_worker_creates(worker);

238
			spin_lock_irq(&worker->lock);
C
Chris Mason 已提交
239
			check_idle_worker(worker);
240 241
		}
		if (freezing(current)) {
242 243
			worker->working = 0;
			spin_unlock_irq(&worker->lock);
244 245 246
			refrigerator();
		} else {
			spin_unlock_irq(&worker->lock);
247 248 249 250 251 252 253
			if (!kthread_should_stop()) {
				cpu_relax();
				/*
				 * we've dropped the lock, did someone else
				 * jump_in?
				 */
				smp_mb();
254 255
				if (!list_empty(&worker->pending) ||
				    !list_empty(&worker->prio_pending))
256 257 258 259 260 261 262 263 264 265 266 267
					continue;

				/*
				 * this short schedule allows more work to
				 * come in without the queue functions
				 * needing to go through wake_up_process()
				 *
				 * worker->working is still 1, so nobody
				 * is going to try and wake us up
				 */
				schedule_timeout(1);
				smp_mb();
268 269
				if (!list_empty(&worker->pending) ||
				    !list_empty(&worker->prio_pending))
270 271
					continue;

A
Amit Gud 已提交
272 273 274
				if (kthread_should_stop())
					break;

275 276 277
				/* still no more work?, sleep for real */
				spin_lock_irq(&worker->lock);
				set_current_state(TASK_INTERRUPTIBLE);
278 279
				if (!list_empty(&worker->pending) ||
				    !list_empty(&worker->prio_pending))
280 281 282 283 284 285 286 287 288
					goto again_locked;

				/*
				 * this makes sure we get a wakeup when someone
				 * adds something new to the queue
				 */
				worker->working = 0;
				spin_unlock_irq(&worker->lock);

289 290 291 292 293 294 295
				if (!kthread_should_stop()) {
					schedule_timeout(HZ * 120);
					if (!worker->working &&
					    try_worker_shutdown(worker)) {
						return 0;
					}
				}
296
			}
297 298 299 300 301 302 303 304 305 306 307 308 309
			__set_current_state(TASK_RUNNING);
		}
	} while (!kthread_should_stop());
	return 0;
}

/*
 * this will wait for all the worker threads to shutdown
 */
int btrfs_stop_workers(struct btrfs_workers *workers)
{
	struct list_head *cur;
	struct btrfs_worker_thread *worker;
310
	int can_stop;
311

312
	spin_lock_irq(&workers->lock);
C
Chris Mason 已提交
313
	list_splice_init(&workers->idle_list, &workers->worker_list);
C
Chris Mason 已提交
314
	while (!list_empty(&workers->worker_list)) {
315 316 317
		cur = workers->worker_list.next;
		worker = list_entry(cur, struct btrfs_worker_thread,
				    worker_list);
318 319 320 321 322 323 324 325 326 327 328 329 330 331

		atomic_inc(&worker->refs);
		workers->num_workers -= 1;
		if (!list_empty(&worker->worker_list)) {
			list_del_init(&worker->worker_list);
			put_worker(worker);
			can_stop = 1;
		} else
			can_stop = 0;
		spin_unlock_irq(&workers->lock);
		if (can_stop)
			kthread_stop(worker->task);
		spin_lock_irq(&workers->lock);
		put_worker(worker);
332
	}
333
	spin_unlock_irq(&workers->lock);
334 335 336 337 338 339
	return 0;
}

/*
 * simple init on struct btrfs_workers
 */
340
void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
341 342 343
{
	workers->num_workers = 0;
	INIT_LIST_HEAD(&workers->worker_list);
C
Chris Mason 已提交
344
	INIT_LIST_HEAD(&workers->idle_list);
C
Chris Mason 已提交
345
	INIT_LIST_HEAD(&workers->order_list);
346
	INIT_LIST_HEAD(&workers->prio_order_list);
347 348
	spin_lock_init(&workers->lock);
	workers->max_workers = max;
349
	workers->idle_thresh = 32;
350
	workers->name = name;
C
Chris Mason 已提交
351
	workers->ordered = 0;
352 353
	workers->atomic_start_pending = 0;
	workers->atomic_worker_start = 0;
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
}

/*
 * starts new worker threads.  This does not enforce the max worker
 * count in case you need to temporarily go past it.
 */
int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
{
	struct btrfs_worker_thread *worker;
	int ret = 0;
	int i;

	for (i = 0; i < num_workers; i++) {
		worker = kzalloc(sizeof(*worker), GFP_NOFS);
		if (!worker) {
			ret = -ENOMEM;
			goto fail;
		}

		INIT_LIST_HEAD(&worker->pending);
374
		INIT_LIST_HEAD(&worker->prio_pending);
375 376 377
		INIT_LIST_HEAD(&worker->worker_list);
		spin_lock_init(&worker->lock);
		atomic_set(&worker->num_pending, 0);
378
		atomic_set(&worker->refs, 1);
379
		worker->workers = workers;
380 381 382
		worker->task = kthread_run(worker_loop, worker,
					   "btrfs-%s-%d", workers->name,
					   workers->num_workers + i);
383 384
		if (IS_ERR(worker->task)) {
			ret = PTR_ERR(worker->task);
385
			kfree(worker);
386 387 388
			goto fail;
		}
		spin_lock_irq(&workers->lock);
C
Chris Mason 已提交
389
		list_add_tail(&worker->worker_list, &workers->idle_list);
390
		worker->idle = 1;
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
		workers->num_workers++;
		spin_unlock_irq(&workers->lock);
	}
	return 0;
fail:
	btrfs_stop_workers(workers);
	return ret;
}

/*
 * run through the list and find a worker thread that doesn't have a lot
 * to do right now.  This can return null if we aren't yet at the thread
 * count limit and all of the threads are busy.
 */
static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
{
	struct btrfs_worker_thread *worker;
	struct list_head *next;
	int enforce_min = workers->num_workers < workers->max_workers;

	/*
C
Chris Mason 已提交
412 413 414 415
	 * if we find an idle thread, don't move it to the end of the
	 * idle list.  This improves the chance that the next submission
	 * will reuse the same thread, and maybe catch it while it is still
	 * working
416
	 */
C
Chris Mason 已提交
417 418
	if (!list_empty(&workers->idle_list)) {
		next = workers->idle_list.next;
419 420
		worker = list_entry(next, struct btrfs_worker_thread,
				    worker_list);
C
Chris Mason 已提交
421
		return worker;
422
	}
C
Chris Mason 已提交
423 424 425
	if (enforce_min || list_empty(&workers->worker_list))
		return NULL;

426
	/*
C
Chris Mason 已提交
427
	 * if we pick a busy task, move the task to the end of the list.
C
Chris Mason 已提交
428 429 430
	 * hopefully this will keep things somewhat evenly balanced.
	 * Do the move in batches based on the sequence number.  This groups
	 * requests submitted at roughly the same time onto the same worker.
431
	 */
C
Chris Mason 已提交
432 433
	next = workers->worker_list.next;
	worker = list_entry(next, struct btrfs_worker_thread, worker_list);
434 435
	atomic_inc(&worker->num_pending);
	worker->sequence++;
C
Chris Mason 已提交
436

437
	if (worker->sequence % workers->idle_thresh == 0)
438
		list_move_tail(next, &workers->worker_list);
439 440 441
	return worker;
}

C
Chris Mason 已提交
442 443 444 445 446
/*
 * selects a worker thread to take the next job.  This will either find
 * an idle worker, start a new worker up to the max count, or just return
 * one of the existing busy workers.
 */
447 448 449 450
static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
{
	struct btrfs_worker_thread *worker;
	unsigned long flags;
451
	struct list_head *fallback;
452 453 454 455 456 457 458 459 460

again:
	spin_lock_irqsave(&workers->lock, flags);
	worker = next_worker(workers);
	spin_unlock_irqrestore(&workers->lock, flags);

	if (!worker) {
		spin_lock_irqsave(&workers->lock, flags);
		if (workers->num_workers >= workers->max_workers) {
461 462 463 464
			goto fallback;
		} else if (workers->atomic_worker_start) {
			workers->atomic_start_pending = 1;
			goto fallback;
465 466 467 468 469 470 471 472
		} else {
			spin_unlock_irqrestore(&workers->lock, flags);
			/* we're below the limit, start another worker */
			btrfs_start_workers(workers, 1);
			goto again;
		}
	}
	return worker;
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488

fallback:
	fallback = NULL;
	/*
	 * we have failed to find any workers, just
	 * return the first one we can find.
	 */
	if (!list_empty(&workers->worker_list))
		fallback = workers->worker_list.next;
	if (!list_empty(&workers->idle_list))
		fallback = workers->idle_list.next;
	BUG_ON(!fallback);
	worker = list_entry(fallback,
		  struct btrfs_worker_thread, worker_list);
	spin_unlock_irqrestore(&workers->lock, flags);
	return worker;
489 490 491 492 493 494 495 496 497 498 499
}

/*
 * btrfs_requeue_work just puts the work item back on the tail of the list
 * it was taken from.  It is intended for use with long running work functions
 * that make some progress and want to give the cpu up for others.
 */
int btrfs_requeue_work(struct btrfs_work *work)
{
	struct btrfs_worker_thread *worker = work->worker;
	unsigned long flags;
500
	int wake = 0;
501

C
Chris Mason 已提交
502
	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
503 504 505
		goto out;

	spin_lock_irqsave(&worker->lock, flags);
506 507 508 509
	if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
		list_add_tail(&work->list, &worker->prio_pending);
	else
		list_add_tail(&work->list, &worker->pending);
510
	atomic_inc(&worker->num_pending);
511 512 513 514 515

	/* by definition we're busy, take ourselves off the idle
	 * list
	 */
	if (worker->idle) {
516
		spin_lock(&worker->workers->lock);
517 518 519
		worker->idle = 0;
		list_move_tail(&worker->worker_list,
			       &worker->workers->worker_list);
520
		spin_unlock(&worker->workers->lock);
521
	}
522 523 524 525
	if (!worker->working) {
		wake = 1;
		worker->working = 1;
	}
526

527 528
	if (wake)
		wake_up_process(worker->task);
529
	spin_unlock_irqrestore(&worker->lock, flags);
530
out:
531

532 533 534
	return 0;
}

535 536 537 538 539
void btrfs_set_work_high_prio(struct btrfs_work *work)
{
	set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
}

540 541 542 543 544 545 546 547 548 549
/*
 * places a struct btrfs_work into the pending queue of one of the kthreads
 */
int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
{
	struct btrfs_worker_thread *worker;
	unsigned long flags;
	int wake = 0;

	/* don't requeue something already on a list */
C
Chris Mason 已提交
550
	if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
551 552 553
		goto out;

	worker = find_worker(workers);
C
Chris Mason 已提交
554 555
	if (workers->ordered) {
		spin_lock_irqsave(&workers->lock, flags);
556 557 558 559 560 561
		if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
			list_add_tail(&work->order_list,
				      &workers->prio_order_list);
		} else {
			list_add_tail(&work->order_list, &workers->order_list);
		}
C
Chris Mason 已提交
562 563 564 565
		spin_unlock_irqrestore(&workers->lock, flags);
	} else {
		INIT_LIST_HEAD(&work->order_list);
	}
566 567

	spin_lock_irqsave(&worker->lock, flags);
568

569 570 571 572
	if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
		list_add_tail(&work->list, &worker->prio_pending);
	else
		list_add_tail(&work->list, &worker->pending);
573
	atomic_inc(&worker->num_pending);
C
Chris Mason 已提交
574
	check_busy_worker(worker);
575 576 577 578 579 580 581 582 583 584 585

	/*
	 * avoid calling into wake_up_process if this thread has already
	 * been kicked
	 */
	if (!worker->working)
		wake = 1;
	worker->working = 1;

	if (wake)
		wake_up_process(worker->task);
586 587
	spin_unlock_irqrestore(&worker->lock, flags);

588 589 590
out:
	return 0;
}