writeback.c 16.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
K
Kent Overstreet 已提交
2 3 4 5 6 7 8 9 10 11 12
/*
 * background writeback - scan btree for dirty data and write it to the backing
 * device
 *
 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
 * Copyright 2012 Google, Inc.
 */

#include "bcache.h"
#include "btree.h"
#include "debug.h"
13
#include "writeback.h"
K
Kent Overstreet 已提交
14

15 16
#include <linux/delay.h>
#include <linux/kthread.h>
17
#include <linux/sched/clock.h>
K
Kent Overstreet 已提交
18 19
#include <trace/events/bcache.h>

K
Kent Overstreet 已提交
20 21 22 23 24
/* Rate limiting */

static void __update_writeback_rate(struct cached_dev *dc)
{
	struct cache_set *c = dc->disk.c;
25 26
	uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
				bcache_flash_devs_sectors_dirty(c);
K
Kent Overstreet 已提交
27 28 29 30 31
	uint64_t cache_dirty_target =
		div_u64(cache_sectors * dc->writeback_percent, 100);
	int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
				   c->cached_dev_sectors);

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
	/*
	 * PI controller:
	 * Figures out the amount that should be written per second.
	 *
	 * First, the error (number of sectors that are dirty beyond our
	 * target) is calculated.  The error is accumulated (numerically
	 * integrated).
	 *
	 * Then, the proportional value and integral value are scaled
	 * based on configured values.  These are stored as inverses to
	 * avoid fixed point math and to make configuration easy-- e.g.
	 * the default value of 40 for writeback_rate_p_term_inverse
	 * attempts to write at a rate that would retire all the dirty
	 * blocks in 40 seconds.
	 *
	 * The writeback_rate_i_inverse value of 10000 means that 1/10000th
	 * of the error is accumulated in the integral term per second.
	 * This acts as a slow, long-term average that is not subject to
	 * variations in usage like the p term.
	 */
52
	int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
53 54 55
	int64_t error = dirty - target;
	int64_t proportional_scaled =
		div_s64(error, dc->writeback_rate_p_term_inverse);
56 57
	int64_t integral_scaled;
	uint32_t new_rate;
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74

	if ((error < 0 && dc->writeback_rate_integral > 0) ||
	    (error > 0 && time_before64(local_clock(),
			 dc->writeback_rate.next + NSEC_PER_MSEC))) {
		/*
		 * Only decrease the integral term if it's more than
		 * zero.  Only increase the integral term if the device
		 * is keeping up.  (Don't wind up the integral
		 * ineffectively in either case).
		 *
		 * It's necessary to scale this by
		 * writeback_rate_update_seconds to keep the integral
		 * term dimensioned properly.
		 */
		dc->writeback_rate_integral += error *
			dc->writeback_rate_update_seconds;
	}
K
Kent Overstreet 已提交
75

76 77
	integral_scaled = div_s64(dc->writeback_rate_integral,
			dc->writeback_rate_i_term_inverse);
K
Kent Overstreet 已提交
78

79 80
	new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
			dc->writeback_rate_minimum, NSEC_PER_SEC);
81

82 83 84 85
	dc->writeback_rate_proportional = proportional_scaled;
	dc->writeback_rate_integral_scaled = integral_scaled;
	dc->writeback_rate_change = new_rate - dc->writeback_rate.rate;
	dc->writeback_rate.rate = new_rate;
K
Kent Overstreet 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
	dc->writeback_rate_target = target;
}

static void update_writeback_rate(struct work_struct *work)
{
	struct cached_dev *dc = container_of(to_delayed_work(work),
					     struct cached_dev,
					     writeback_rate_update);

	down_read(&dc->writeback_lock);

	if (atomic_read(&dc->has_dirty) &&
	    dc->writeback_percent)
		__update_writeback_rate(dc);

	up_read(&dc->writeback_lock);
102 103 104

	schedule_delayed_work(&dc->writeback_rate_update,
			      dc->writeback_rate_update_seconds * HZ);
K
Kent Overstreet 已提交
105 106 107 108
}

static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
{
109
	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
K
Kent Overstreet 已提交
110 111 112
	    !dc->writeback_percent)
		return 0;

113
	return bch_next_delay(&dc->writeback_rate, sectors);
K
Kent Overstreet 已提交
114 115
}

116 117 118
struct dirty_io {
	struct closure		cl;
	struct cached_dev	*dc;
119
	uint16_t		sequence;
120 121
	struct bio		bio;
};
K
Kent Overstreet 已提交
122

K
Kent Overstreet 已提交
123 124 125 126 127
static void dirty_init(struct keybuf_key *w)
{
	struct dirty_io *io = w->private;
	struct bio *bio = &io->bio;

128 129
	bio_init(bio, bio->bi_inline_vecs,
		 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
K
Kent Overstreet 已提交
130 131 132
	if (!io->dc->writeback_percent)
		bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));

133
	bio->bi_iter.bi_size	= KEY_SIZE(&w->key) << 9;
K
Kent Overstreet 已提交
134
	bio->bi_private		= w;
135
	bch_bio_map(bio, NULL);
K
Kent Overstreet 已提交
136 137 138 139 140 141 142 143 144 145 146 147 148 149
}

static void dirty_io_destructor(struct closure *cl)
{
	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
	kfree(io);
}

static void write_dirty_finish(struct closure *cl)
{
	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
	struct keybuf_key *w = io->bio.bi_private;
	struct cached_dev *dc = io->dc;

150
	bio_free_pages(&io->bio);
K
Kent Overstreet 已提交
151 152 153

	/* This is kind of a dumb way of signalling errors. */
	if (KEY_DIRTY(&w->key)) {
154
		int ret;
K
Kent Overstreet 已提交
155
		unsigned i;
156 157 158
		struct keylist keys;

		bch_keylist_init(&keys);
K
Kent Overstreet 已提交
159

K
Kent Overstreet 已提交
160 161 162
		bkey_copy(keys.top, &w->key);
		SET_KEY_DIRTY(keys.top, false);
		bch_keylist_push(&keys);
K
Kent Overstreet 已提交
163 164 165 166

		for (i = 0; i < KEY_PTRS(&w->key); i++)
			atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);

167
		ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
K
Kent Overstreet 已提交
168

169
		if (ret)
K
Kent Overstreet 已提交
170 171
			trace_bcache_writeback_collision(&w->key);

172
		atomic_long_inc(ret
K
Kent Overstreet 已提交
173 174 175 176 177
				? &dc->disk.c->writeback_keys_failed
				: &dc->disk.c->writeback_keys_done);
	}

	bch_keybuf_del(&dc->writeback_keys, w);
178
	up(&dc->in_flight);
K
Kent Overstreet 已提交
179 180 181 182

	closure_return_with_destructor(cl, dirty_io_destructor);
}

183
static void dirty_endio(struct bio *bio)
K
Kent Overstreet 已提交
184 185 186 187
{
	struct keybuf_key *w = bio->bi_private;
	struct dirty_io *io = w->private;

188
	if (bio->bi_status)
K
Kent Overstreet 已提交
189 190 191 192 193 194 195 196 197
		SET_KEY_DIRTY(&w->key, false);

	closure_put(&io->cl);
}

static void write_dirty(struct closure *cl)
{
	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
	struct keybuf_key *w = io->bio.bi_private;
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
	struct cached_dev *dc = io->dc;

	uint16_t next_sequence;

	if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
		/* Not our turn to write; wait for a write to complete */
		closure_wait(&dc->writeback_ordering_wait, cl);

		if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
			/*
			 * Edge case-- it happened in indeterminate order
			 * relative to when we were added to wait list..
			 */
			closure_wake_up(&dc->writeback_ordering_wait);
		}

		continue_at(cl, write_dirty, io->dc->writeback_write_wq);
		return;
	}

	next_sequence = io->sequence + 1;
K
Kent Overstreet 已提交
219

220 221 222 223 224 225 226 227 228 229 230 231
	/*
	 * IO errors are signalled using the dirty bit on the key.
	 * If we failed to read, we should not attempt to write to the
	 * backing device.  Instead, immediately go to write_dirty_finish
	 * to clean up.
	 */
	if (KEY_DIRTY(&w->key)) {
		dirty_init(w);
		bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
		io->bio.bi_iter.bi_sector = KEY_START(&w->key);
		bio_set_dev(&io->bio, io->dc->bdev);
		io->bio.bi_end_io	= dirty_endio;
K
Kent Overstreet 已提交
232

233 234
		closure_bio_submit(&io->bio, cl);
	}
K
Kent Overstreet 已提交
235

236 237 238
	atomic_set(&dc->writeback_sequence_next, next_sequence);
	closure_wake_up(&dc->writeback_ordering_wait);

239
	continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
K
Kent Overstreet 已提交
240 241
}

242
static void read_dirty_endio(struct bio *bio)
K
Kent Overstreet 已提交
243 244 245 246 247
{
	struct keybuf_key *w = bio->bi_private;
	struct dirty_io *io = w->private;

	bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
248
			    bio->bi_status, "reading dirty data from cache");
K
Kent Overstreet 已提交
249

250
	dirty_endio(bio);
K
Kent Overstreet 已提交
251 252 253 254 255 256
}

static void read_dirty_submit(struct closure *cl)
{
	struct dirty_io *io = container_of(cl, struct dirty_io, cl);

257
	closure_bio_submit(&io->bio, cl);
K
Kent Overstreet 已提交
258

259
	continue_at(cl, write_dirty, io->dc->writeback_write_wq);
K
Kent Overstreet 已提交
260 261
}

262
static void read_dirty(struct cached_dev *dc)
K
Kent Overstreet 已提交
263
{
264
	unsigned delay = 0;
265 266 267
	struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
	size_t size;
	int nk, i;
K
Kent Overstreet 已提交
268
	struct dirty_io *io;
269
	struct closure cl;
270
	uint16_t sequence = 0;
271

272 273
	BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
	atomic_set(&dc->writeback_sequence_next, sequence);
274
	closure_init_stack(&cl);
K
Kent Overstreet 已提交
275 276 277 278 279 280

	/*
	 * XXX: if we error, background writeback just spins. Should use some
	 * mempools.
	 */

281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
	next = bch_keybuf_next(&dc->writeback_keys);

	while (!kthread_should_stop() && next) {
		size = 0;
		nk = 0;

		do {
			BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));

			/*
			 * Don't combine too many operations, even if they
			 * are all small.
			 */
			if (nk >= MAX_WRITEBACKS_IN_PASS)
				break;

			/*
			 * If the current operation is very large, don't
			 * further combine operations.
			 */
			if (size >= MAX_WRITESIZE_IN_PASS)
				break;

			/*
			 * Operations are only eligible to be combined
			 * if they are contiguous.
			 *
			 * TODO: add a heuristic willing to fire a
			 * certain amount of non-contiguous IO per pass,
			 * so that we can benefit from backing device
			 * command queueing.
			 */
			if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
						&START_KEY(&next->key)))
				break;

			size += KEY_SIZE(&next->key);
			keys[nk++] = next;
		} while ((next = bch_keybuf_next(&dc->writeback_keys)));

		/* Now we have gathered a set of 1..5 keys to write back. */
		for (i = 0; i < nk; i++) {
			w = keys[i];

			io = kzalloc(sizeof(struct dirty_io) +
				     sizeof(struct bio_vec) *
				     DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
				     GFP_KERNEL);
			if (!io)
				goto err;

			w->private	= io;
			io->dc		= dc;
334
			io->sequence    = sequence++;
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358

			dirty_init(w);
			bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
			io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
			bio_set_dev(&io->bio,
				    PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
			io->bio.bi_end_io	= read_dirty_endio;

			if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
				goto err_free;

			trace_bcache_writeback(&w->key);

			down(&dc->in_flight);

			/* We've acquired a semaphore for the maximum
			 * simultaneous number of writebacks; from here
			 * everything happens asynchronously.
			 */
			closure_call(&io->cl, read_dirty_submit, NULL, &cl);
		}

		delay = writeback_delay(dc, size);

359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
		/* If the control system would wait for at least half a
		 * second, and there's been no reqs hitting the backing disk
		 * for awhile: use an alternate mode where we have at most
		 * one contiguous set of writebacks in flight at a time.  If
		 * someone wants to do IO it will be quick, as it will only
		 * have to contend with one operation in flight, and we'll
		 * be round-tripping data to the backing disk as quickly as
		 * it can accept it.
		 */
		if (delay >= HZ / 2) {
			/* 3 means at least 1.5 seconds, up to 7.5 if we
			 * have slowed way down.
			 */
			if (atomic_inc_return(&dc->backing_idle) >= 3) {
				/* Wait for current I/Os to finish */
				closure_sync(&cl);
				/* And immediately launch a new set. */
				delay = 0;
			}
		}

380 381 382 383
		while (!kthread_should_stop() && delay) {
			schedule_timeout_interruptible(delay);
			delay = writeback_delay(dc, 0);
		}
K
Kent Overstreet 已提交
384 385 386 387 388 389 390 391 392
	}

	if (0) {
err_free:
		kfree(w->private);
err:
		bch_keybuf_del(&dc->writeback_keys, w);
	}

393 394 395 396
	/*
	 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
	 * freed) before refilling again
	 */
397 398 399 400 401 402 403 404 405
	closure_sync(&cl);
}

/* Scan for dirty data */

void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
				  uint64_t offset, int nr_sectors)
{
	struct bcache_device *d = c->devices[inode];
406
	unsigned stripe_offset, stripe, sectors_dirty;
407 408 409 410

	if (!d)
		return;

411
	stripe = offset_to_stripe(d, offset);
412 413 414 415 416 417 418 419 420
	stripe_offset = offset & (d->stripe_size - 1);

	while (nr_sectors) {
		int s = min_t(unsigned, abs(nr_sectors),
			      d->stripe_size - stripe_offset);

		if (nr_sectors < 0)
			s = -s;

421 422 423 424 425 426 427 428 429 430
		if (stripe >= d->nr_stripes)
			return;

		sectors_dirty = atomic_add_return(s,
					d->stripe_sectors_dirty + stripe);
		if (sectors_dirty == d->stripe_size)
			set_bit(stripe, d->full_dirty_stripes);
		else
			clear_bit(stripe, d->full_dirty_stripes);

431 432 433 434 435 436 437 438
		nr_sectors -= s;
		stripe_offset = 0;
		stripe++;
	}
}

static bool dirty_pred(struct keybuf *buf, struct bkey *k)
{
439 440 441 442
	struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);

	BUG_ON(KEY_INODE(k) != dc->disk.id);

443 444 445
	return KEY_DIRTY(k);
}

446
static void refill_full_stripes(struct cached_dev *dc)
447
{
448 449 450 451 452
	struct keybuf *buf = &dc->writeback_keys;
	unsigned start_stripe, stripe, next_stripe;
	bool wrapped = false;

	stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
453

454 455
	if (stripe >= dc->disk.nr_stripes)
		stripe = 0;
456

457
	start_stripe = stripe;
458 459

	while (1) {
460 461
		stripe = find_next_bit(dc->disk.full_dirty_stripes,
				       dc->disk.nr_stripes, stripe);
462

463 464
		if (stripe == dc->disk.nr_stripes)
			goto next;
465

466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
		next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
						 dc->disk.nr_stripes, stripe);

		buf->last_scanned = KEY(dc->disk.id,
					stripe * dc->disk.stripe_size, 0);

		bch_refill_keybuf(dc->disk.c, buf,
				  &KEY(dc->disk.id,
				       next_stripe * dc->disk.stripe_size, 0),
				  dirty_pred);

		if (array_freelist_empty(&buf->freelist))
			return;

		stripe = next_stripe;
next:
		if (wrapped && stripe > start_stripe)
			return;

		if (stripe == dc->disk.nr_stripes) {
			stripe = 0;
			wrapped = true;
		}
489 490 491
	}
}

492 493 494
/*
 * Returns true if we scanned the entire disk
 */
495 496 497
static bool refill_dirty(struct cached_dev *dc)
{
	struct keybuf *buf = &dc->writeback_keys;
498
	struct bkey start = KEY(dc->disk.id, 0, 0);
499
	struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
500 501 502 503 504 505 506 507 508 509
	struct bkey start_pos;

	/*
	 * make sure keybuf pos is inside the range for this disk - at bringup
	 * we might not be attached yet so this disk's inode nr isn't
	 * initialized then
	 */
	if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
	    bkey_cmp(&buf->last_scanned, &end) > 0)
		buf->last_scanned = start;
510 511 512 513 514 515

	if (dc->partial_stripes_expensive) {
		refill_full_stripes(dc);
		if (array_freelist_empty(&buf->freelist))
			return false;
	}
516

517
	start_pos = buf->last_scanned;
518
	bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
519

520 521 522 523 524 525 526 527 528 529 530
	if (bkey_cmp(&buf->last_scanned, &end) < 0)
		return false;

	/*
	 * If we get to the end start scanning again from the beginning, and
	 * only scan up to where we initially started scanning from:
	 */
	buf->last_scanned = start;
	bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);

	return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
531 532 533 534 535 536 537
}

static int bch_writeback_thread(void *arg)
{
	struct cached_dev *dc = arg;
	bool searched_full_index;

538 539
	bch_ratelimit_reset(&dc->writeback_rate);

540 541 542
	while (!kthread_should_stop()) {
		down_write(&dc->writeback_lock);
		if (!atomic_read(&dc->has_dirty) ||
543
		    (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
		     !dc->writeback_running)) {
			up_write(&dc->writeback_lock);
			set_current_state(TASK_INTERRUPTIBLE);

			if (kthread_should_stop())
				return 0;

			schedule();
			continue;
		}

		searched_full_index = refill_dirty(dc);

		if (searched_full_index &&
		    RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
			atomic_set(&dc->has_dirty, 0);
			cached_dev_put(dc);
			SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
			bch_write_bdev_super(dc, NULL);
		}

		up_write(&dc->writeback_lock);

		read_dirty(dc);

		if (searched_full_index) {
			unsigned delay = dc->writeback_delay * HZ;

			while (delay &&
			       !kthread_should_stop() &&
574
			       !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
575
				delay = schedule_timeout_interruptible(delay);
576 577

			bch_ratelimit_reset(&dc->writeback_rate);
578 579 580 581
		}
	}

	return 0;
K
Kent Overstreet 已提交
582 583
}

584 585
/* Init */

K
Kent Overstreet 已提交
586 587 588 589 590 591
struct sectors_dirty_init {
	struct btree_op	op;
	unsigned	inode;
};

static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
592
				 struct bkey *k)
593
{
K
Kent Overstreet 已提交
594 595
	struct sectors_dirty_init *op = container_of(_op,
						struct sectors_dirty_init, op);
596 597
	if (KEY_INODE(k) > op->inode)
		return MAP_DONE;
598

599 600 601 602 603
	if (KEY_DIRTY(k))
		bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
					     KEY_START(k), KEY_SIZE(k));

	return MAP_CONTINUE;
604 605
}

606
void bch_sectors_dirty_init(struct bcache_device *d)
607
{
K
Kent Overstreet 已提交
608
	struct sectors_dirty_init op;
609

K
Kent Overstreet 已提交
610
	bch_btree_op_init(&op.op, -1);
611
	op.inode = d->id;
612

613
	bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
614
			   sectors_dirty_init_fn, 0);
615 616
}

617
void bch_cached_dev_writeback_init(struct cached_dev *dc)
K
Kent Overstreet 已提交
618
{
619
	sema_init(&dc->in_flight, 64);
K
Kent Overstreet 已提交
620
	init_rwsem(&dc->writeback_lock);
K
Kent Overstreet 已提交
621
	bch_keybuf_init(&dc->writeback_keys);
K
Kent Overstreet 已提交
622 623 624 625 626 627

	dc->writeback_metadata		= true;
	dc->writeback_running		= true;
	dc->writeback_percent		= 10;
	dc->writeback_delay		= 30;
	dc->writeback_rate.rate		= 1024;
628
	dc->writeback_rate_minimum	= 8;
K
Kent Overstreet 已提交
629

630
	dc->writeback_rate_update_seconds = 5;
631 632
	dc->writeback_rate_p_term_inverse = 40;
	dc->writeback_rate_i_term_inverse = 10000;
K
Kent Overstreet 已提交
633

634 635 636 637 638
	INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
}

int bch_cached_dev_writeback_start(struct cached_dev *dc)
{
639 640 641 642 643
	dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
						WQ_MEM_RECLAIM, 0);
	if (!dc->writeback_write_wq)
		return -ENOMEM;

644 645 646 647 648
	dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
					      "bcache_writeback");
	if (IS_ERR(dc->writeback_thread))
		return PTR_ERR(dc->writeback_thread);

K
Kent Overstreet 已提交
649 650 651
	schedule_delayed_work(&dc->writeback_rate_update,
			      dc->writeback_rate_update_seconds * HZ);

652 653
	bch_writeback_queue(dc);

K
Kent Overstreet 已提交
654 655
	return 0;
}