writeback.c 17.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
K
Kent Overstreet 已提交
2 3 4 5 6 7 8 9 10 11 12
/*
 * background writeback - scan btree for dirty data and write it to the backing
 * device
 *
 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
 * Copyright 2012 Google, Inc.
 */

#include "bcache.h"
#include "btree.h"
#include "debug.h"
13
#include "writeback.h"
K
Kent Overstreet 已提交
14

15 16
#include <linux/delay.h>
#include <linux/kthread.h>
17
#include <linux/sched/clock.h>
K
Kent Overstreet 已提交
18 19
#include <trace/events/bcache.h>

K
Kent Overstreet 已提交
20
/* Rate limiting */
21
static uint64_t __calc_target_rate(struct cached_dev *dc)
K
Kent Overstreet 已提交
22 23
{
	struct cache_set *c = dc->disk.c;
24 25 26 27 28

	/*
	 * This is the size of the cache, minus the amount used for
	 * flash-only devices
	 */
29 30
	uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
				bcache_flash_devs_sectors_dirty(c);
31 32 33 34 35 36 37 38 39 40 41

	/*
	 * Unfortunately there is no control of global dirty data.  If the
	 * user states that they want 10% dirty data in the cache, and has,
	 * e.g., 5 backing volumes of equal size, we try and ensure each
	 * backing volume uses about 2% of the cache for dirty data.
	 */
	uint32_t bdev_share =
		div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
				c->cached_dev_sectors);

K
Kent Overstreet 已提交
42 43 44
	uint64_t cache_dirty_target =
		div_u64(cache_sectors * dc->writeback_percent, 100);

45 46 47 48 49 50 51 52 53
	/* Ensure each backing dev gets at least one dirty share */
	if (bdev_share < 1)
		bdev_share = 1;

	return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
}

static void __update_writeback_rate(struct cached_dev *dc)
{
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
	/*
	 * PI controller:
	 * Figures out the amount that should be written per second.
	 *
	 * First, the error (number of sectors that are dirty beyond our
	 * target) is calculated.  The error is accumulated (numerically
	 * integrated).
	 *
	 * Then, the proportional value and integral value are scaled
	 * based on configured values.  These are stored as inverses to
	 * avoid fixed point math and to make configuration easy-- e.g.
	 * the default value of 40 for writeback_rate_p_term_inverse
	 * attempts to write at a rate that would retire all the dirty
	 * blocks in 40 seconds.
	 *
	 * The writeback_rate_i_inverse value of 10000 means that 1/10000th
	 * of the error is accumulated in the integral term per second.
	 * This acts as a slow, long-term average that is not subject to
	 * variations in usage like the p term.
	 */
74
	int64_t target = __calc_target_rate(dc);
75
	int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
76 77 78
	int64_t error = dirty - target;
	int64_t proportional_scaled =
		div_s64(error, dc->writeback_rate_p_term_inverse);
79 80
	int64_t integral_scaled;
	uint32_t new_rate;
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97

	if ((error < 0 && dc->writeback_rate_integral > 0) ||
	    (error > 0 && time_before64(local_clock(),
			 dc->writeback_rate.next + NSEC_PER_MSEC))) {
		/*
		 * Only decrease the integral term if it's more than
		 * zero.  Only increase the integral term if the device
		 * is keeping up.  (Don't wind up the integral
		 * ineffectively in either case).
		 *
		 * It's necessary to scale this by
		 * writeback_rate_update_seconds to keep the integral
		 * term dimensioned properly.
		 */
		dc->writeback_rate_integral += error *
			dc->writeback_rate_update_seconds;
	}
K
Kent Overstreet 已提交
98

99 100
	integral_scaled = div_s64(dc->writeback_rate_integral,
			dc->writeback_rate_i_term_inverse);
K
Kent Overstreet 已提交
101

102 103
	new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
			dc->writeback_rate_minimum, NSEC_PER_SEC);
104

105 106 107 108
	dc->writeback_rate_proportional = proportional_scaled;
	dc->writeback_rate_integral_scaled = integral_scaled;
	dc->writeback_rate_change = new_rate - dc->writeback_rate.rate;
	dc->writeback_rate.rate = new_rate;
K
Kent Overstreet 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
	dc->writeback_rate_target = target;
}

static void update_writeback_rate(struct work_struct *work)
{
	struct cached_dev *dc = container_of(to_delayed_work(work),
					     struct cached_dev,
					     writeback_rate_update);

	down_read(&dc->writeback_lock);

	if (atomic_read(&dc->has_dirty) &&
	    dc->writeback_percent)
		__update_writeback_rate(dc);

	up_read(&dc->writeback_lock);
125 126 127

	schedule_delayed_work(&dc->writeback_rate_update,
			      dc->writeback_rate_update_seconds * HZ);
K
Kent Overstreet 已提交
128 129 130 131
}

static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
{
132
	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
K
Kent Overstreet 已提交
133 134 135
	    !dc->writeback_percent)
		return 0;

136
	return bch_next_delay(&dc->writeback_rate, sectors);
K
Kent Overstreet 已提交
137 138
}

139 140 141
struct dirty_io {
	struct closure		cl;
	struct cached_dev	*dc;
142
	uint16_t		sequence;
143 144
	struct bio		bio;
};
K
Kent Overstreet 已提交
145

K
Kent Overstreet 已提交
146 147 148 149 150
static void dirty_init(struct keybuf_key *w)
{
	struct dirty_io *io = w->private;
	struct bio *bio = &io->bio;

151 152
	bio_init(bio, bio->bi_inline_vecs,
		 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
K
Kent Overstreet 已提交
153 154 155
	if (!io->dc->writeback_percent)
		bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));

156
	bio->bi_iter.bi_size	= KEY_SIZE(&w->key) << 9;
K
Kent Overstreet 已提交
157
	bio->bi_private		= w;
158
	bch_bio_map(bio, NULL);
K
Kent Overstreet 已提交
159 160 161 162 163 164 165 166 167 168 169 170 171 172
}

static void dirty_io_destructor(struct closure *cl)
{
	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
	kfree(io);
}

static void write_dirty_finish(struct closure *cl)
{
	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
	struct keybuf_key *w = io->bio.bi_private;
	struct cached_dev *dc = io->dc;

173
	bio_free_pages(&io->bio);
K
Kent Overstreet 已提交
174 175 176

	/* This is kind of a dumb way of signalling errors. */
	if (KEY_DIRTY(&w->key)) {
177
		int ret;
K
Kent Overstreet 已提交
178
		unsigned i;
179 180 181
		struct keylist keys;

		bch_keylist_init(&keys);
K
Kent Overstreet 已提交
182

K
Kent Overstreet 已提交
183 184 185
		bkey_copy(keys.top, &w->key);
		SET_KEY_DIRTY(keys.top, false);
		bch_keylist_push(&keys);
K
Kent Overstreet 已提交
186 187 188 189

		for (i = 0; i < KEY_PTRS(&w->key); i++)
			atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);

190
		ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
K
Kent Overstreet 已提交
191

192
		if (ret)
K
Kent Overstreet 已提交
193 194
			trace_bcache_writeback_collision(&w->key);

195
		atomic_long_inc(ret
K
Kent Overstreet 已提交
196 197 198 199 200
				? &dc->disk.c->writeback_keys_failed
				: &dc->disk.c->writeback_keys_done);
	}

	bch_keybuf_del(&dc->writeback_keys, w);
201
	up(&dc->in_flight);
K
Kent Overstreet 已提交
202 203 204 205

	closure_return_with_destructor(cl, dirty_io_destructor);
}

206
static void dirty_endio(struct bio *bio)
K
Kent Overstreet 已提交
207 208 209 210
{
	struct keybuf_key *w = bio->bi_private;
	struct dirty_io *io = w->private;

211
	if (bio->bi_status)
K
Kent Overstreet 已提交
212 213 214 215 216 217 218 219 220
		SET_KEY_DIRTY(&w->key, false);

	closure_put(&io->cl);
}

static void write_dirty(struct closure *cl)
{
	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
	struct keybuf_key *w = io->bio.bi_private;
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
	struct cached_dev *dc = io->dc;

	uint16_t next_sequence;

	if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
		/* Not our turn to write; wait for a write to complete */
		closure_wait(&dc->writeback_ordering_wait, cl);

		if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
			/*
			 * Edge case-- it happened in indeterminate order
			 * relative to when we were added to wait list..
			 */
			closure_wake_up(&dc->writeback_ordering_wait);
		}

		continue_at(cl, write_dirty, io->dc->writeback_write_wq);
		return;
	}

	next_sequence = io->sequence + 1;
K
Kent Overstreet 已提交
242

243 244 245 246 247 248 249 250 251 252 253 254
	/*
	 * IO errors are signalled using the dirty bit on the key.
	 * If we failed to read, we should not attempt to write to the
	 * backing device.  Instead, immediately go to write_dirty_finish
	 * to clean up.
	 */
	if (KEY_DIRTY(&w->key)) {
		dirty_init(w);
		bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
		io->bio.bi_iter.bi_sector = KEY_START(&w->key);
		bio_set_dev(&io->bio, io->dc->bdev);
		io->bio.bi_end_io	= dirty_endio;
K
Kent Overstreet 已提交
255

256 257
		closure_bio_submit(&io->bio, cl);
	}
K
Kent Overstreet 已提交
258

259 260 261
	atomic_set(&dc->writeback_sequence_next, next_sequence);
	closure_wake_up(&dc->writeback_ordering_wait);

262
	continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
K
Kent Overstreet 已提交
263 264
}

265
static void read_dirty_endio(struct bio *bio)
K
Kent Overstreet 已提交
266 267 268 269
{
	struct keybuf_key *w = bio->bi_private;
	struct dirty_io *io = w->private;

270
	/* is_read = 1 */
K
Kent Overstreet 已提交
271
	bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
272 273
			    bio->bi_status, 1,
			    "reading dirty data from cache");
K
Kent Overstreet 已提交
274

275
	dirty_endio(bio);
K
Kent Overstreet 已提交
276 277 278 279 280 281
}

static void read_dirty_submit(struct closure *cl)
{
	struct dirty_io *io = container_of(cl, struct dirty_io, cl);

282
	closure_bio_submit(&io->bio, cl);
K
Kent Overstreet 已提交
283

284
	continue_at(cl, write_dirty, io->dc->writeback_write_wq);
K
Kent Overstreet 已提交
285 286
}

287
static void read_dirty(struct cached_dev *dc)
K
Kent Overstreet 已提交
288
{
289
	unsigned delay = 0;
290 291 292
	struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
	size_t size;
	int nk, i;
K
Kent Overstreet 已提交
293
	struct dirty_io *io;
294
	struct closure cl;
295
	uint16_t sequence = 0;
296

297 298
	BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
	atomic_set(&dc->writeback_sequence_next, sequence);
299
	closure_init_stack(&cl);
K
Kent Overstreet 已提交
300 301 302 303 304 305

	/*
	 * XXX: if we error, background writeback just spins. Should use some
	 * mempools.
	 */

306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
	next = bch_keybuf_next(&dc->writeback_keys);

	while (!kthread_should_stop() && next) {
		size = 0;
		nk = 0;

		do {
			BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));

			/*
			 * Don't combine too many operations, even if they
			 * are all small.
			 */
			if (nk >= MAX_WRITEBACKS_IN_PASS)
				break;

			/*
			 * If the current operation is very large, don't
			 * further combine operations.
			 */
			if (size >= MAX_WRITESIZE_IN_PASS)
				break;

			/*
			 * Operations are only eligible to be combined
			 * if they are contiguous.
			 *
			 * TODO: add a heuristic willing to fire a
			 * certain amount of non-contiguous IO per pass,
			 * so that we can benefit from backing device
			 * command queueing.
			 */
			if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
						&START_KEY(&next->key)))
				break;

			size += KEY_SIZE(&next->key);
			keys[nk++] = next;
		} while ((next = bch_keybuf_next(&dc->writeback_keys)));

		/* Now we have gathered a set of 1..5 keys to write back. */
		for (i = 0; i < nk; i++) {
			w = keys[i];

			io = kzalloc(sizeof(struct dirty_io) +
				     sizeof(struct bio_vec) *
				     DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
				     GFP_KERNEL);
			if (!io)
				goto err;

			w->private	= io;
			io->dc		= dc;
359
			io->sequence    = sequence++;
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383

			dirty_init(w);
			bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
			io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
			bio_set_dev(&io->bio,
				    PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
			io->bio.bi_end_io	= read_dirty_endio;

			if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
				goto err_free;

			trace_bcache_writeback(&w->key);

			down(&dc->in_flight);

			/* We've acquired a semaphore for the maximum
			 * simultaneous number of writebacks; from here
			 * everything happens asynchronously.
			 */
			closure_call(&io->cl, read_dirty_submit, NULL, &cl);
		}

		delay = writeback_delay(dc, size);

384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
		/* If the control system would wait for at least half a
		 * second, and there's been no reqs hitting the backing disk
		 * for awhile: use an alternate mode where we have at most
		 * one contiguous set of writebacks in flight at a time.  If
		 * someone wants to do IO it will be quick, as it will only
		 * have to contend with one operation in flight, and we'll
		 * be round-tripping data to the backing disk as quickly as
		 * it can accept it.
		 */
		if (delay >= HZ / 2) {
			/* 3 means at least 1.5 seconds, up to 7.5 if we
			 * have slowed way down.
			 */
			if (atomic_inc_return(&dc->backing_idle) >= 3) {
				/* Wait for current I/Os to finish */
				closure_sync(&cl);
				/* And immediately launch a new set. */
				delay = 0;
			}
		}

405 406 407 408
		while (!kthread_should_stop() && delay) {
			schedule_timeout_interruptible(delay);
			delay = writeback_delay(dc, 0);
		}
K
Kent Overstreet 已提交
409 410 411 412 413 414 415 416 417
	}

	if (0) {
err_free:
		kfree(w->private);
err:
		bch_keybuf_del(&dc->writeback_keys, w);
	}

418 419 420 421
	/*
	 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
	 * freed) before refilling again
	 */
422 423 424 425 426 427 428 429 430
	closure_sync(&cl);
}

/* Scan for dirty data */

void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
				  uint64_t offset, int nr_sectors)
{
	struct bcache_device *d = c->devices[inode];
431
	unsigned stripe_offset, stripe, sectors_dirty;
432 433 434 435

	if (!d)
		return;

436
	stripe = offset_to_stripe(d, offset);
437 438 439 440 441 442 443 444 445
	stripe_offset = offset & (d->stripe_size - 1);

	while (nr_sectors) {
		int s = min_t(unsigned, abs(nr_sectors),
			      d->stripe_size - stripe_offset);

		if (nr_sectors < 0)
			s = -s;

446 447 448 449 450 451 452 453 454 455
		if (stripe >= d->nr_stripes)
			return;

		sectors_dirty = atomic_add_return(s,
					d->stripe_sectors_dirty + stripe);
		if (sectors_dirty == d->stripe_size)
			set_bit(stripe, d->full_dirty_stripes);
		else
			clear_bit(stripe, d->full_dirty_stripes);

456 457 458 459 460 461 462 463
		nr_sectors -= s;
		stripe_offset = 0;
		stripe++;
	}
}

static bool dirty_pred(struct keybuf *buf, struct bkey *k)
{
464 465 466 467
	struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);

	BUG_ON(KEY_INODE(k) != dc->disk.id);

468 469 470
	return KEY_DIRTY(k);
}

471
static void refill_full_stripes(struct cached_dev *dc)
472
{
473 474 475 476 477
	struct keybuf *buf = &dc->writeback_keys;
	unsigned start_stripe, stripe, next_stripe;
	bool wrapped = false;

	stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
478

479 480
	if (stripe >= dc->disk.nr_stripes)
		stripe = 0;
481

482
	start_stripe = stripe;
483 484

	while (1) {
485 486
		stripe = find_next_bit(dc->disk.full_dirty_stripes,
				       dc->disk.nr_stripes, stripe);
487

488 489
		if (stripe == dc->disk.nr_stripes)
			goto next;
490

491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
		next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
						 dc->disk.nr_stripes, stripe);

		buf->last_scanned = KEY(dc->disk.id,
					stripe * dc->disk.stripe_size, 0);

		bch_refill_keybuf(dc->disk.c, buf,
				  &KEY(dc->disk.id,
				       next_stripe * dc->disk.stripe_size, 0),
				  dirty_pred);

		if (array_freelist_empty(&buf->freelist))
			return;

		stripe = next_stripe;
next:
		if (wrapped && stripe > start_stripe)
			return;

		if (stripe == dc->disk.nr_stripes) {
			stripe = 0;
			wrapped = true;
		}
514 515 516
	}
}

517 518 519
/*
 * Returns true if we scanned the entire disk
 */
520 521 522
static bool refill_dirty(struct cached_dev *dc)
{
	struct keybuf *buf = &dc->writeback_keys;
523
	struct bkey start = KEY(dc->disk.id, 0, 0);
524
	struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
525 526 527 528 529 530 531 532 533 534
	struct bkey start_pos;

	/*
	 * make sure keybuf pos is inside the range for this disk - at bringup
	 * we might not be attached yet so this disk's inode nr isn't
	 * initialized then
	 */
	if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
	    bkey_cmp(&buf->last_scanned, &end) > 0)
		buf->last_scanned = start;
535 536 537 538 539 540

	if (dc->partial_stripes_expensive) {
		refill_full_stripes(dc);
		if (array_freelist_empty(&buf->freelist))
			return false;
	}
541

542
	start_pos = buf->last_scanned;
543
	bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
544

545 546 547 548 549 550 551 552 553 554 555
	if (bkey_cmp(&buf->last_scanned, &end) < 0)
		return false;

	/*
	 * If we get to the end start scanning again from the beginning, and
	 * only scan up to where we initially started scanning from:
	 */
	buf->last_scanned = start;
	bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);

	return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
556 557 558 559 560 561 562
}

static int bch_writeback_thread(void *arg)
{
	struct cached_dev *dc = arg;
	bool searched_full_index;

563 564
	bch_ratelimit_reset(&dc->writeback_rate);

565 566
	while (!kthread_should_stop()) {
		down_write(&dc->writeback_lock);
567
		set_current_state(TASK_INTERRUPTIBLE);
568
		if (!atomic_read(&dc->has_dirty) ||
569
		    (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
570 571 572
		     !dc->writeback_running)) {
			up_write(&dc->writeback_lock);

573 574
			if (kthread_should_stop()) {
				set_current_state(TASK_RUNNING);
575
				return 0;
576
			}
577 578 579 580

			schedule();
			continue;
		}
581
		set_current_state(TASK_RUNNING);
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601

		searched_full_index = refill_dirty(dc);

		if (searched_full_index &&
		    RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
			atomic_set(&dc->has_dirty, 0);
			cached_dev_put(dc);
			SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
			bch_write_bdev_super(dc, NULL);
		}

		up_write(&dc->writeback_lock);

		read_dirty(dc);

		if (searched_full_index) {
			unsigned delay = dc->writeback_delay * HZ;

			while (delay &&
			       !kthread_should_stop() &&
602
			       !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
603
				delay = schedule_timeout_interruptible(delay);
604 605

			bch_ratelimit_reset(&dc->writeback_rate);
606 607 608 609
		}
	}

	return 0;
K
Kent Overstreet 已提交
610 611
}

612 613
/* Init */

K
Kent Overstreet 已提交
614 615 616 617 618 619
struct sectors_dirty_init {
	struct btree_op	op;
	unsigned	inode;
};

static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
620
				 struct bkey *k)
621
{
K
Kent Overstreet 已提交
622 623
	struct sectors_dirty_init *op = container_of(_op,
						struct sectors_dirty_init, op);
624 625
	if (KEY_INODE(k) > op->inode)
		return MAP_DONE;
626

627 628 629 630 631
	if (KEY_DIRTY(k))
		bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
					     KEY_START(k), KEY_SIZE(k));

	return MAP_CONTINUE;
632 633
}

634
void bch_sectors_dirty_init(struct bcache_device *d)
635
{
K
Kent Overstreet 已提交
636
	struct sectors_dirty_init op;
637

K
Kent Overstreet 已提交
638
	bch_btree_op_init(&op.op, -1);
639
	op.inode = d->id;
640

641
	bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
642
			   sectors_dirty_init_fn, 0);
643 644
}

645
void bch_cached_dev_writeback_init(struct cached_dev *dc)
K
Kent Overstreet 已提交
646
{
647
	sema_init(&dc->in_flight, 64);
K
Kent Overstreet 已提交
648
	init_rwsem(&dc->writeback_lock);
K
Kent Overstreet 已提交
649
	bch_keybuf_init(&dc->writeback_keys);
K
Kent Overstreet 已提交
650 651 652 653 654 655

	dc->writeback_metadata		= true;
	dc->writeback_running		= true;
	dc->writeback_percent		= 10;
	dc->writeback_delay		= 30;
	dc->writeback_rate.rate		= 1024;
656
	dc->writeback_rate_minimum	= 8;
K
Kent Overstreet 已提交
657

658
	dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
659 660
	dc->writeback_rate_p_term_inverse = 40;
	dc->writeback_rate_i_term_inverse = 10000;
K
Kent Overstreet 已提交
661

662 663 664 665 666
	INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
}

int bch_cached_dev_writeback_start(struct cached_dev *dc)
{
667 668 669 670 671
	dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
						WQ_MEM_RECLAIM, 0);
	if (!dc->writeback_write_wq)
		return -ENOMEM;

672 673 674 675 676
	dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
					      "bcache_writeback");
	if (IS_ERR(dc->writeback_thread))
		return PTR_ERR(dc->writeback_thread);

K
Kent Overstreet 已提交
677 678 679
	schedule_delayed_work(&dc->writeback_rate_update,
			      dc->writeback_rate_update_seconds * HZ);

680 681
	bch_writeback_queue(dc);

K
Kent Overstreet 已提交
682 683
	return 0;
}