writeback.c 18.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
K
Kent Overstreet 已提交
2 3 4 5 6 7 8 9 10 11 12
/*
 * background writeback - scan btree for dirty data and write it to the backing
 * device
 *
 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
 * Copyright 2012 Google, Inc.
 */

#include "bcache.h"
#include "btree.h"
#include "debug.h"
13
#include "writeback.h"
K
Kent Overstreet 已提交
14

15 16
#include <linux/delay.h>
#include <linux/kthread.h>
17
#include <linux/sched/clock.h>
K
Kent Overstreet 已提交
18 19
#include <trace/events/bcache.h>

K
Kent Overstreet 已提交
20
/* Rate limiting */
21
static uint64_t __calc_target_rate(struct cached_dev *dc)
K
Kent Overstreet 已提交
22 23
{
	struct cache_set *c = dc->disk.c;
24 25 26 27 28

	/*
	 * This is the size of the cache, minus the amount used for
	 * flash-only devices
	 */
29 30
	uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
				bcache_flash_devs_sectors_dirty(c);
31 32 33 34 35 36 37 38 39 40 41

	/*
	 * Unfortunately there is no control of global dirty data.  If the
	 * user states that they want 10% dirty data in the cache, and has,
	 * e.g., 5 backing volumes of equal size, we try and ensure each
	 * backing volume uses about 2% of the cache for dirty data.
	 */
	uint32_t bdev_share =
		div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
				c->cached_dev_sectors);

K
Kent Overstreet 已提交
42 43 44
	uint64_t cache_dirty_target =
		div_u64(cache_sectors * dc->writeback_percent, 100);

45 46 47 48 49 50 51 52 53
	/* Ensure each backing dev gets at least one dirty share */
	if (bdev_share < 1)
		bdev_share = 1;

	return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
}

static void __update_writeback_rate(struct cached_dev *dc)
{
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
	/*
	 * PI controller:
	 * Figures out the amount that should be written per second.
	 *
	 * First, the error (number of sectors that are dirty beyond our
	 * target) is calculated.  The error is accumulated (numerically
	 * integrated).
	 *
	 * Then, the proportional value and integral value are scaled
	 * based on configured values.  These are stored as inverses to
	 * avoid fixed point math and to make configuration easy-- e.g.
	 * the default value of 40 for writeback_rate_p_term_inverse
	 * attempts to write at a rate that would retire all the dirty
	 * blocks in 40 seconds.
	 *
	 * The writeback_rate_i_inverse value of 10000 means that 1/10000th
	 * of the error is accumulated in the integral term per second.
	 * This acts as a slow, long-term average that is not subject to
	 * variations in usage like the p term.
	 */
74
	int64_t target = __calc_target_rate(dc);
75
	int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
76 77 78
	int64_t error = dirty - target;
	int64_t proportional_scaled =
		div_s64(error, dc->writeback_rate_p_term_inverse);
79 80
	int64_t integral_scaled;
	uint32_t new_rate;
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97

	if ((error < 0 && dc->writeback_rate_integral > 0) ||
	    (error > 0 && time_before64(local_clock(),
			 dc->writeback_rate.next + NSEC_PER_MSEC))) {
		/*
		 * Only decrease the integral term if it's more than
		 * zero.  Only increase the integral term if the device
		 * is keeping up.  (Don't wind up the integral
		 * ineffectively in either case).
		 *
		 * It's necessary to scale this by
		 * writeback_rate_update_seconds to keep the integral
		 * term dimensioned properly.
		 */
		dc->writeback_rate_integral += error *
			dc->writeback_rate_update_seconds;
	}
K
Kent Overstreet 已提交
98

99 100
	integral_scaled = div_s64(dc->writeback_rate_integral,
			dc->writeback_rate_i_term_inverse);
K
Kent Overstreet 已提交
101

102 103
	new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
			dc->writeback_rate_minimum, NSEC_PER_SEC);
104

105 106 107 108
	dc->writeback_rate_proportional = proportional_scaled;
	dc->writeback_rate_integral_scaled = integral_scaled;
	dc->writeback_rate_change = new_rate - dc->writeback_rate.rate;
	dc->writeback_rate.rate = new_rate;
K
Kent Overstreet 已提交
109 110 111 112 113 114 115 116 117
	dc->writeback_rate_target = target;
}

static void update_writeback_rate(struct work_struct *work)
{
	struct cached_dev *dc = container_of(to_delayed_work(work),
					     struct cached_dev,
					     writeback_rate_update);

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
	/*
	 * should check BCACHE_DEV_RATE_DW_RUNNING before calling
	 * cancel_delayed_work_sync().
	 */
	set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
	/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
	smp_mb();

	if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) {
		clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
		/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
		smp_mb();
		return;
	}

K
Kent Overstreet 已提交
133 134 135 136 137 138 139
	down_read(&dc->writeback_lock);

	if (atomic_read(&dc->has_dirty) &&
	    dc->writeback_percent)
		__update_writeback_rate(dc);

	up_read(&dc->writeback_lock);
140

141 142
	if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) {
		schedule_delayed_work(&dc->writeback_rate_update,
143
			      dc->writeback_rate_update_seconds * HZ);
144 145 146 147 148 149 150 151 152
	}

	/*
	 * should check BCACHE_DEV_RATE_DW_RUNNING before calling
	 * cancel_delayed_work_sync().
	 */
	clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
	/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
	smp_mb();
K
Kent Overstreet 已提交
153 154 155 156
}

static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
{
157
	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
K
Kent Overstreet 已提交
158 159 160
	    !dc->writeback_percent)
		return 0;

161
	return bch_next_delay(&dc->writeback_rate, sectors);
K
Kent Overstreet 已提交
162 163
}

164 165 166
struct dirty_io {
	struct closure		cl;
	struct cached_dev	*dc;
167
	uint16_t		sequence;
168 169
	struct bio		bio;
};
K
Kent Overstreet 已提交
170

K
Kent Overstreet 已提交
171 172 173 174 175
static void dirty_init(struct keybuf_key *w)
{
	struct dirty_io *io = w->private;
	struct bio *bio = &io->bio;

176 177
	bio_init(bio, bio->bi_inline_vecs,
		 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
K
Kent Overstreet 已提交
178 179 180
	if (!io->dc->writeback_percent)
		bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));

181
	bio->bi_iter.bi_size	= KEY_SIZE(&w->key) << 9;
K
Kent Overstreet 已提交
182
	bio->bi_private		= w;
183
	bch_bio_map(bio, NULL);
K
Kent Overstreet 已提交
184 185 186 187 188 189 190 191 192 193 194 195 196 197
}

static void dirty_io_destructor(struct closure *cl)
{
	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
	kfree(io);
}

static void write_dirty_finish(struct closure *cl)
{
	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
	struct keybuf_key *w = io->bio.bi_private;
	struct cached_dev *dc = io->dc;

198
	bio_free_pages(&io->bio);
K
Kent Overstreet 已提交
199 200 201

	/* This is kind of a dumb way of signalling errors. */
	if (KEY_DIRTY(&w->key)) {
202
		int ret;
K
Kent Overstreet 已提交
203
		unsigned i;
204 205 206
		struct keylist keys;

		bch_keylist_init(&keys);
K
Kent Overstreet 已提交
207

K
Kent Overstreet 已提交
208 209 210
		bkey_copy(keys.top, &w->key);
		SET_KEY_DIRTY(keys.top, false);
		bch_keylist_push(&keys);
K
Kent Overstreet 已提交
211 212 213 214

		for (i = 0; i < KEY_PTRS(&w->key); i++)
			atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);

215
		ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
K
Kent Overstreet 已提交
216

217
		if (ret)
K
Kent Overstreet 已提交
218 219
			trace_bcache_writeback_collision(&w->key);

220
		atomic_long_inc(ret
K
Kent Overstreet 已提交
221 222 223 224 225
				? &dc->disk.c->writeback_keys_failed
				: &dc->disk.c->writeback_keys_done);
	}

	bch_keybuf_del(&dc->writeback_keys, w);
226
	up(&dc->in_flight);
K
Kent Overstreet 已提交
227 228 229 230

	closure_return_with_destructor(cl, dirty_io_destructor);
}

231
static void dirty_endio(struct bio *bio)
K
Kent Overstreet 已提交
232 233 234 235
{
	struct keybuf_key *w = bio->bi_private;
	struct dirty_io *io = w->private;

236
	if (bio->bi_status)
K
Kent Overstreet 已提交
237 238 239 240 241 242 243 244 245
		SET_KEY_DIRTY(&w->key, false);

	closure_put(&io->cl);
}

static void write_dirty(struct closure *cl)
{
	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
	struct keybuf_key *w = io->bio.bi_private;
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
	struct cached_dev *dc = io->dc;

	uint16_t next_sequence;

	if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
		/* Not our turn to write; wait for a write to complete */
		closure_wait(&dc->writeback_ordering_wait, cl);

		if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
			/*
			 * Edge case-- it happened in indeterminate order
			 * relative to when we were added to wait list..
			 */
			closure_wake_up(&dc->writeback_ordering_wait);
		}

		continue_at(cl, write_dirty, io->dc->writeback_write_wq);
		return;
	}

	next_sequence = io->sequence + 1;
K
Kent Overstreet 已提交
267

268 269 270 271 272 273 274 275 276 277 278 279
	/*
	 * IO errors are signalled using the dirty bit on the key.
	 * If we failed to read, we should not attempt to write to the
	 * backing device.  Instead, immediately go to write_dirty_finish
	 * to clean up.
	 */
	if (KEY_DIRTY(&w->key)) {
		dirty_init(w);
		bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
		io->bio.bi_iter.bi_sector = KEY_START(&w->key);
		bio_set_dev(&io->bio, io->dc->bdev);
		io->bio.bi_end_io	= dirty_endio;
K
Kent Overstreet 已提交
280

281 282
		closure_bio_submit(&io->bio, cl);
	}
K
Kent Overstreet 已提交
283

284 285 286
	atomic_set(&dc->writeback_sequence_next, next_sequence);
	closure_wake_up(&dc->writeback_ordering_wait);

287
	continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
K
Kent Overstreet 已提交
288 289
}

290
static void read_dirty_endio(struct bio *bio)
K
Kent Overstreet 已提交
291 292 293 294
{
	struct keybuf_key *w = bio->bi_private;
	struct dirty_io *io = w->private;

295
	/* is_read = 1 */
K
Kent Overstreet 已提交
296
	bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
297 298
			    bio->bi_status, 1,
			    "reading dirty data from cache");
K
Kent Overstreet 已提交
299

300
	dirty_endio(bio);
K
Kent Overstreet 已提交
301 302 303 304 305 306
}

static void read_dirty_submit(struct closure *cl)
{
	struct dirty_io *io = container_of(cl, struct dirty_io, cl);

307
	closure_bio_submit(&io->bio, cl);
K
Kent Overstreet 已提交
308

309
	continue_at(cl, write_dirty, io->dc->writeback_write_wq);
K
Kent Overstreet 已提交
310 311
}

312
static void read_dirty(struct cached_dev *dc)
K
Kent Overstreet 已提交
313
{
314
	unsigned delay = 0;
315 316 317
	struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
	size_t size;
	int nk, i;
K
Kent Overstreet 已提交
318
	struct dirty_io *io;
319
	struct closure cl;
320
	uint16_t sequence = 0;
321

322 323
	BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
	atomic_set(&dc->writeback_sequence_next, sequence);
324
	closure_init_stack(&cl);
K
Kent Overstreet 已提交
325 326 327 328 329 330

	/*
	 * XXX: if we error, background writeback just spins. Should use some
	 * mempools.
	 */

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
	next = bch_keybuf_next(&dc->writeback_keys);

	while (!kthread_should_stop() && next) {
		size = 0;
		nk = 0;

		do {
			BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));

			/*
			 * Don't combine too many operations, even if they
			 * are all small.
			 */
			if (nk >= MAX_WRITEBACKS_IN_PASS)
				break;

			/*
			 * If the current operation is very large, don't
			 * further combine operations.
			 */
			if (size >= MAX_WRITESIZE_IN_PASS)
				break;

			/*
			 * Operations are only eligible to be combined
			 * if they are contiguous.
			 *
			 * TODO: add a heuristic willing to fire a
			 * certain amount of non-contiguous IO per pass,
			 * so that we can benefit from backing device
			 * command queueing.
			 */
			if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
						&START_KEY(&next->key)))
				break;

			size += KEY_SIZE(&next->key);
			keys[nk++] = next;
		} while ((next = bch_keybuf_next(&dc->writeback_keys)));

		/* Now we have gathered a set of 1..5 keys to write back. */
		for (i = 0; i < nk; i++) {
			w = keys[i];

			io = kzalloc(sizeof(struct dirty_io) +
				     sizeof(struct bio_vec) *
				     DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
				     GFP_KERNEL);
			if (!io)
				goto err;

			w->private	= io;
			io->dc		= dc;
384
			io->sequence    = sequence++;
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408

			dirty_init(w);
			bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
			io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
			bio_set_dev(&io->bio,
				    PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
			io->bio.bi_end_io	= read_dirty_endio;

			if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
				goto err_free;

			trace_bcache_writeback(&w->key);

			down(&dc->in_flight);

			/* We've acquired a semaphore for the maximum
			 * simultaneous number of writebacks; from here
			 * everything happens asynchronously.
			 */
			closure_call(&io->cl, read_dirty_submit, NULL, &cl);
		}

		delay = writeback_delay(dc, size);

409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
		/* If the control system would wait for at least half a
		 * second, and there's been no reqs hitting the backing disk
		 * for awhile: use an alternate mode where we have at most
		 * one contiguous set of writebacks in flight at a time.  If
		 * someone wants to do IO it will be quick, as it will only
		 * have to contend with one operation in flight, and we'll
		 * be round-tripping data to the backing disk as quickly as
		 * it can accept it.
		 */
		if (delay >= HZ / 2) {
			/* 3 means at least 1.5 seconds, up to 7.5 if we
			 * have slowed way down.
			 */
			if (atomic_inc_return(&dc->backing_idle) >= 3) {
				/* Wait for current I/Os to finish */
				closure_sync(&cl);
				/* And immediately launch a new set. */
				delay = 0;
			}
		}

430 431 432 433
		while (!kthread_should_stop() && delay) {
			schedule_timeout_interruptible(delay);
			delay = writeback_delay(dc, 0);
		}
K
Kent Overstreet 已提交
434 435 436 437 438 439 440 441 442
	}

	if (0) {
err_free:
		kfree(w->private);
err:
		bch_keybuf_del(&dc->writeback_keys, w);
	}

443 444 445 446
	/*
	 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
	 * freed) before refilling again
	 */
447 448 449 450 451 452 453 454 455
	closure_sync(&cl);
}

/* Scan for dirty data */

void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
				  uint64_t offset, int nr_sectors)
{
	struct bcache_device *d = c->devices[inode];
456
	unsigned stripe_offset, stripe, sectors_dirty;
457 458 459 460

	if (!d)
		return;

461
	stripe = offset_to_stripe(d, offset);
462 463 464 465 466 467 468 469 470
	stripe_offset = offset & (d->stripe_size - 1);

	while (nr_sectors) {
		int s = min_t(unsigned, abs(nr_sectors),
			      d->stripe_size - stripe_offset);

		if (nr_sectors < 0)
			s = -s;

471 472 473 474 475 476 477 478 479 480
		if (stripe >= d->nr_stripes)
			return;

		sectors_dirty = atomic_add_return(s,
					d->stripe_sectors_dirty + stripe);
		if (sectors_dirty == d->stripe_size)
			set_bit(stripe, d->full_dirty_stripes);
		else
			clear_bit(stripe, d->full_dirty_stripes);

481 482 483 484 485 486 487 488
		nr_sectors -= s;
		stripe_offset = 0;
		stripe++;
	}
}

static bool dirty_pred(struct keybuf *buf, struct bkey *k)
{
489 490 491 492
	struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);

	BUG_ON(KEY_INODE(k) != dc->disk.id);

493 494 495
	return KEY_DIRTY(k);
}

496
static void refill_full_stripes(struct cached_dev *dc)
497
{
498 499 500 501 502
	struct keybuf *buf = &dc->writeback_keys;
	unsigned start_stripe, stripe, next_stripe;
	bool wrapped = false;

	stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
503

504 505
	if (stripe >= dc->disk.nr_stripes)
		stripe = 0;
506

507
	start_stripe = stripe;
508 509

	while (1) {
510 511
		stripe = find_next_bit(dc->disk.full_dirty_stripes,
				       dc->disk.nr_stripes, stripe);
512

513 514
		if (stripe == dc->disk.nr_stripes)
			goto next;
515

516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
		next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
						 dc->disk.nr_stripes, stripe);

		buf->last_scanned = KEY(dc->disk.id,
					stripe * dc->disk.stripe_size, 0);

		bch_refill_keybuf(dc->disk.c, buf,
				  &KEY(dc->disk.id,
				       next_stripe * dc->disk.stripe_size, 0),
				  dirty_pred);

		if (array_freelist_empty(&buf->freelist))
			return;

		stripe = next_stripe;
next:
		if (wrapped && stripe > start_stripe)
			return;

		if (stripe == dc->disk.nr_stripes) {
			stripe = 0;
			wrapped = true;
		}
539 540 541
	}
}

542 543 544
/*
 * Returns true if we scanned the entire disk
 */
545 546 547
static bool refill_dirty(struct cached_dev *dc)
{
	struct keybuf *buf = &dc->writeback_keys;
548
	struct bkey start = KEY(dc->disk.id, 0, 0);
549
	struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
550 551 552 553 554 555 556 557 558 559
	struct bkey start_pos;

	/*
	 * make sure keybuf pos is inside the range for this disk - at bringup
	 * we might not be attached yet so this disk's inode nr isn't
	 * initialized then
	 */
	if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
	    bkey_cmp(&buf->last_scanned, &end) > 0)
		buf->last_scanned = start;
560 561 562 563 564 565

	if (dc->partial_stripes_expensive) {
		refill_full_stripes(dc);
		if (array_freelist_empty(&buf->freelist))
			return false;
	}
566

567
	start_pos = buf->last_scanned;
568
	bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
569

570 571 572 573 574 575 576 577 578 579 580
	if (bkey_cmp(&buf->last_scanned, &end) < 0)
		return false;

	/*
	 * If we get to the end start scanning again from the beginning, and
	 * only scan up to where we initially started scanning from:
	 */
	buf->last_scanned = start;
	bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);

	return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
581 582 583 584 585 586 587
}

static int bch_writeback_thread(void *arg)
{
	struct cached_dev *dc = arg;
	bool searched_full_index;

588 589
	bch_ratelimit_reset(&dc->writeback_rate);

590 591
	while (!kthread_should_stop()) {
		down_write(&dc->writeback_lock);
592
		set_current_state(TASK_INTERRUPTIBLE);
593 594 595 596 597 598 599 600 601
		/*
		 * If the bache device is detaching, skip here and continue
		 * to perform writeback. Otherwise, if no dirty data on cache,
		 * or there is dirty data on cache but writeback is disabled,
		 * the writeback thread should sleep here and wait for others
		 * to wake up it.
		 */
		if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
		    (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
602 603
			up_write(&dc->writeback_lock);

604 605
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
606
				break;
607
			}
608 609 610 611

			schedule();
			continue;
		}
612
		set_current_state(TASK_RUNNING);
613 614 615 616 617 618 619 620

		searched_full_index = refill_dirty(dc);

		if (searched_full_index &&
		    RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
			atomic_set(&dc->has_dirty, 0);
			SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
			bch_write_bdev_super(dc, NULL);
621 622 623 624 625 626 627 628
			/*
			 * If bcache device is detaching via sysfs interface,
			 * writeback thread should stop after there is no dirty
			 * data on cache. BCACHE_DEV_DETACHING flag is set in
			 * bch_cached_dev_detach().
			 */
			if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
				break;
629 630 631 632 633 634 635 636 637 638 639
		}

		up_write(&dc->writeback_lock);

		read_dirty(dc);

		if (searched_full_index) {
			unsigned delay = dc->writeback_delay * HZ;

			while (delay &&
			       !kthread_should_stop() &&
640
			       !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
641
				delay = schedule_timeout_interruptible(delay);
642 643

			bch_ratelimit_reset(&dc->writeback_rate);
644 645 646
		}
	}

647 648 649
	dc->writeback_thread = NULL;
	cached_dev_put(dc);

650
	return 0;
K
Kent Overstreet 已提交
651 652
}

653 654
/* Init */

K
Kent Overstreet 已提交
655 656 657 658 659 660
struct sectors_dirty_init {
	struct btree_op	op;
	unsigned	inode;
};

static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
661
				 struct bkey *k)
662
{
K
Kent Overstreet 已提交
663 664
	struct sectors_dirty_init *op = container_of(_op,
						struct sectors_dirty_init, op);
665 666
	if (KEY_INODE(k) > op->inode)
		return MAP_DONE;
667

668 669 670 671 672
	if (KEY_DIRTY(k))
		bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
					     KEY_START(k), KEY_SIZE(k));

	return MAP_CONTINUE;
673 674
}

675
void bch_sectors_dirty_init(struct bcache_device *d)
676
{
K
Kent Overstreet 已提交
677
	struct sectors_dirty_init op;
678

K
Kent Overstreet 已提交
679
	bch_btree_op_init(&op.op, -1);
680
	op.inode = d->id;
681

682
	bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
683
			   sectors_dirty_init_fn, 0);
684 685
}

686
void bch_cached_dev_writeback_init(struct cached_dev *dc)
K
Kent Overstreet 已提交
687
{
688
	sema_init(&dc->in_flight, 64);
K
Kent Overstreet 已提交
689
	init_rwsem(&dc->writeback_lock);
K
Kent Overstreet 已提交
690
	bch_keybuf_init(&dc->writeback_keys);
K
Kent Overstreet 已提交
691 692 693 694 695 696

	dc->writeback_metadata		= true;
	dc->writeback_running		= true;
	dc->writeback_percent		= 10;
	dc->writeback_delay		= 30;
	dc->writeback_rate.rate		= 1024;
697
	dc->writeback_rate_minimum	= 8;
K
Kent Overstreet 已提交
698

699
	dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
700 701
	dc->writeback_rate_p_term_inverse = 40;
	dc->writeback_rate_i_term_inverse = 10000;
K
Kent Overstreet 已提交
702

703
	WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
704 705 706 707 708
	INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
}

int bch_cached_dev_writeback_start(struct cached_dev *dc)
{
709 710 711 712 713
	dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
						WQ_MEM_RECLAIM, 0);
	if (!dc->writeback_write_wq)
		return -ENOMEM;

714
	cached_dev_get(dc);
715 716
	dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
					      "bcache_writeback");
717 718
	if (IS_ERR(dc->writeback_thread)) {
		cached_dev_put(dc);
719
		return PTR_ERR(dc->writeback_thread);
720
	}
721

722
	WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
K
Kent Overstreet 已提交
723 724 725
	schedule_delayed_work(&dc->writeback_rate_update,
			      dc->writeback_rate_update_seconds * HZ);

726 727
	bch_writeback_queue(dc);

K
Kent Overstreet 已提交
728 729
	return 0;
}