journal.c 19.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10
/*
 * bcache journalling code, for btree insertions
 *
 * Copyright 2012 Google, Inc.
 */

#include "bcache.h"
#include "btree.h"
#include "debug.h"
11
#include "extents.h"
12

13 14
#include <trace/events/bcache.h>

15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Journal replay/recovery:
 *
 * This code is all driven from run_cache_set(); we first read the journal
 * entries, do some other stuff, then we mark all the keys in the journal
 * entries (same as garbage collection would), then we replay them - reinserting
 * them into the cache in precisely the same order as they appear in the
 * journal.
 *
 * We only journal keys that go in leaf nodes, which simplifies things quite a
 * bit.
 */

28
static void journal_read_endio(struct bio *bio)
29 30
{
	struct closure *cl = bio->bi_private;
31

32 33 34 35
	closure_put(cl);
}

static int journal_read_bucket(struct cache *ca, struct list_head *list,
36
			       unsigned int bucket_index)
37 38 39 40 41 42
{
	struct journal_device *ja = &ca->journal;
	struct bio *bio = &ja->bio;

	struct journal_replay *i;
	struct jset *j, *data = ca->set->journal.w[0].data;
43
	struct closure cl;
44
	unsigned int len, left, offset = 0;
45 46 47
	int ret = 0;
	sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);

48 49
	closure_init_stack(&cl);

50
	pr_debug("reading %u", bucket_index);
51 52 53

	while (offset < ca->sb.bucket_size) {
reread:		left = ca->sb.bucket_size - offset;
54
		len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
55 56

		bio_reset(bio);
57
		bio->bi_iter.bi_sector	= bucket + offset;
58
		bio_set_dev(bio, ca->bdev);
59
		bio->bi_iter.bi_size	= len << 9;
60 61

		bio->bi_end_io	= journal_read_endio;
62
		bio->bi_private = &cl;
63
		bio_set_op_attrs(bio, REQ_OP_READ, 0);
64
		bch_bio_map(bio, data);
65

66
		closure_bio_submit(ca->set, bio, &cl);
67
		closure_sync(&cl);
68 69 70 71 72 73 74 75 76 77 78 79

		/* This function could be simpler now since we no longer write
		 * journal entries that overlap bucket boundaries; this means
		 * the start of a bucket will always have a valid journal entry
		 * if it has any journal entries at all.
		 */

		j = data;
		while (len) {
			struct list_head *where;
			size_t blocks, bytes = set_bytes(j);

80 81
			if (j->magic != jset_magic(&ca->sb)) {
				pr_debug("%u: bad magic", bucket_index);
82
				return ret;
83
			}
84

85 86 87 88
			if (bytes > left << 9 ||
			    bytes > PAGE_SIZE << JSET_BITS) {
				pr_info("%u: too big, %zu bytes, offset %u",
					bucket_index, bytes, offset);
89
				return ret;
90
			}
91 92 93 94

			if (bytes > len << 9)
				goto reread;

95 96 97
			if (j->csum != csum_set(j)) {
				pr_info("%u: bad csum, %zu bytes, offset %u",
					bucket_index, bytes, offset);
98
				return ret;
99
			}
100

101
			blocks = set_blocks(j, block_bytes(ca->set));
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145

			while (!list_empty(list)) {
				i = list_first_entry(list,
					struct journal_replay, list);
				if (i->j.seq >= j->last_seq)
					break;
				list_del(&i->list);
				kfree(i);
			}

			list_for_each_entry_reverse(i, list, list) {
				if (j->seq == i->j.seq)
					goto next_set;

				if (j->seq < i->j.last_seq)
					goto next_set;

				if (j->seq > i->j.seq) {
					where = &i->list;
					goto add;
				}
			}

			where = list;
add:
			i = kmalloc(offsetof(struct journal_replay, j) +
				    bytes, GFP_KERNEL);
			if (!i)
				return -ENOMEM;
			memcpy(&i->j, j, bytes);
			list_add(&i->list, where);
			ret = 1;

			ja->seq[bucket_index] = j->seq;
next_set:
			offset	+= blocks * ca->sb.block_size;
			len	-= blocks * ca->sb.block_size;
			j = ((void *) j) + blocks * block_bytes(ca);
		}
	}

	return ret;
}

146
int bch_journal_read(struct cache_set *c, struct list_head *list)
147 148 149
{
#define read_bucket(b)							\
	({								\
150
		int ret = journal_read_bucket(ca, list, b);		\
151 152 153 154 155 156 157
		__set_bit(b, bitmap);					\
		if (ret < 0)						\
			return ret;					\
		ret;							\
	})

	struct cache *ca;
158
	unsigned int iter;
159 160 161

	for_each_cache(ca, c, iter) {
		struct journal_device *ja = &ca->journal;
162
		DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
163
		unsigned int i, l, r, m;
164 165 166 167 168
		uint64_t seq;

		bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
		pr_debug("%u journal buckets", ca->sb.njournal_buckets);

169 170
		/*
		 * Read journal buckets ordered by golden ratio hash to quickly
171 172 173
		 * find a sequence of buckets with valid journal entries
		 */
		for (i = 0; i < ca->sb.njournal_buckets; i++) {
174 175 176 177 178
			/*
			 * We must try the index l with ZERO first for
			 * correctness due to the scenario that the journal
			 * bucket is circular buffer which might have wrapped
			 */
179 180 181 182 183 184 185 186 187
			l = (i * 2654435769U) % ca->sb.njournal_buckets;

			if (test_bit(l, bitmap))
				break;

			if (read_bucket(l))
				goto bsearch;
		}

188 189
		/*
		 * If that fails, check all the buckets we haven't checked
190 191 192 193
		 * already
		 */
		pr_debug("falling back to linear search");

194 195
		for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
		     l < ca->sb.njournal_buckets;
196 197
		     l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets,
					    l + 1))
198 199
			if (read_bucket(l))
				goto bsearch;
200

201 202
		/* no journal entries on this device? */
		if (l == ca->sb.njournal_buckets)
203
			continue;
204
bsearch:
205 206
		BUG_ON(list_empty(list));

207
		/* Binary search */
208 209
		m = l;
		r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
210 211 212
		pr_debug("starting binary search, l %u r %u", l, r);

		while (l + 1 < r) {
213 214 215
			seq = list_entry(list->prev, struct journal_replay,
					 list)->j.seq;

216
			m = (l + r) >> 1;
217
			read_bucket(m);
218

219 220
			if (seq != list_entry(list->prev, struct journal_replay,
					      list)->j.seq)
221 222 223 224 225
				l = m;
			else
				r = m;
		}

226 227
		/*
		 * Read buckets in reverse order until we stop finding more
228 229
		 * journal entries
		 */
230 231
		pr_debug("finishing up: m %u njournal_buckets %u",
			 m, ca->sb.njournal_buckets);
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
		l = m;

		while (1) {
			if (!l--)
				l = ca->sb.njournal_buckets - 1;

			if (l == m)
				break;

			if (test_bit(l, bitmap))
				continue;

			if (!read_bucket(l))
				break;
		}

		seq = 0;

		for (i = 0; i < ca->sb.njournal_buckets; i++)
			if (ja->seq[i] > seq) {
				seq = ja->seq[i];
253 254 255 256 257 258 259 260
				/*
				 * When journal_reclaim() goes to allocate for
				 * the first time, it'll use the bucket after
				 * ja->cur_idx
				 */
				ja->cur_idx = i;
				ja->last_idx = ja->discard_idx = (i + 1) %
					ca->sb.njournal_buckets;
261 262 263 264

			}
	}

265 266 267 268
	if (!list_empty(list))
		c->journal.seq = list_entry(list->prev,
					    struct journal_replay,
					    list)->j.seq;
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305

	return 0;
#undef read_bucket
}

void bch_journal_mark(struct cache_set *c, struct list_head *list)
{
	atomic_t p = { 0 };
	struct bkey *k;
	struct journal_replay *i;
	struct journal *j = &c->journal;
	uint64_t last = j->seq;

	/*
	 * journal.pin should never fill up - we never write a journal
	 * entry when it would fill up. But if for some reason it does, we
	 * iterate over the list in reverse order so that we can just skip that
	 * refcount instead of bugging.
	 */

	list_for_each_entry_reverse(i, list, list) {
		BUG_ON(last < i->j.seq);
		i->pin = NULL;

		while (last-- != i->j.seq)
			if (fifo_free(&j->pin) > 1) {
				fifo_push_front(&j->pin, p);
				atomic_set(&fifo_front(&j->pin), 0);
			}

		if (fifo_free(&j->pin) > 1) {
			fifo_push_front(&j->pin, p);
			i->pin = &fifo_front(&j->pin);
			atomic_set(i->pin, 1);
		}

		for (k = i->j.start;
306
		     k < bset_bkey_last(&i->j);
307 308
		     k = bkey_next(k))
			if (!__bch_extent_invalid(c, k)) {
309
				unsigned int j;
310

311 312 313
				for (j = 0; j < KEY_PTRS(k); j++)
					if (ptr_available(c, k, j))
						atomic_inc(&PTR_BUCKET(c, k, j)->pin);
314

315 316
				bch_initial_mark_key(c, 0, k);
			}
317 318 319
	}
}

320
int bch_journal_replay(struct cache_set *s, struct list_head *list)
321 322 323 324 325 326 327
{
	int ret = 0, keys = 0, entries = 0;
	struct bkey *k;
	struct journal_replay *i =
		list_entry(list->prev, struct journal_replay, list);

	uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
328 329
	struct keylist keylist;

330 331 332
	list_for_each_entry(i, list, list) {
		BUG_ON(i->pin && atomic_read(i->pin) != 1);

333 334 335
		cache_set_err_on(n != i->j.seq, s,
"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
				 n, i->j.seq - 1, start, end);
336 337

		for (k = i->j.start;
338
		     k < bset_bkey_last(&i->j);
339
		     k = bkey_next(k)) {
340 341
			trace_bcache_journal_replay_key(k);

342
			bch_keylist_init_single(&keylist, k);
343

344
			ret = bch_btree_insert(s, &keylist, i->pin, NULL);
345 346 347
			if (ret)
				goto err;

348
			BUG_ON(!bch_keylist_empty(&keylist));
349 350 351 352 353 354 355 356 357 358 359 360 361
			keys++;

			cond_resched();
		}

		if (i->pin)
			atomic_dec(i->pin);
		n = i->j.seq + 1;
		entries++;
	}

	pr_info("journal replay done, %i keys in %i entries, seq %llu",
		keys, entries, end);
K
Kent Overstreet 已提交
362
err:
363 364 365 366 367
	while (!list_empty(list)) {
		i = list_first_entry(list, struct journal_replay, list);
		list_del(&i->list);
		kfree(i);
	}
K
Kent Overstreet 已提交
368

369 370 371 372
	return ret;
}

/* Journalling */
373 374 375 376 377 378
#define journal_max_cmp(l, r) \
	(fifo_idx(&c->journal.pin, btree_current_write(l)->journal) < \
	 fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
#define journal_min_cmp(l, r) \
	(fifo_idx(&c->journal.pin, btree_current_write(l)->journal) > \
	 fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
379 380 381 382 383 384 385

static void btree_flush_write(struct cache_set *c)
{
	/*
	 * Try to find the btree node with that references the oldest journal
	 * entry, best is our current candidate and is locked if non NULL:
	 */
386 387
	struct btree *b;
	int i;
388 389

	atomic_long_inc(&c->flush_write);
390

391
retry:
392 393 394 395 396 397 398 399 400 401 402 403 404
	spin_lock(&c->journal.lock);
	if (heap_empty(&c->flush_btree)) {
		for_each_cached_btree(b, c, i)
			if (btree_current_write(b)->journal) {
				if (!heap_full(&c->flush_btree))
					heap_add(&c->flush_btree, b,
						 journal_max_cmp);
				else if (journal_max_cmp(b,
					 heap_peek(&c->flush_btree))) {
					c->flush_btree.data[0] = b;
					heap_sift(&c->flush_btree, 0,
						  journal_max_cmp);
				}
405
			}
406

407 408 409 410 411 412 413 414
		for (i = c->flush_btree.used / 2 - 1; i >= 0; --i)
			heap_sift(&c->flush_btree, i, journal_min_cmp);
	}

	b = NULL;
	heap_pop(&c->flush_btree, b, journal_min_cmp);
	spin_unlock(&c->journal.lock);

415
	if (b) {
416
		mutex_lock(&b->write_lock);
417
		if (!btree_current_write(b)->journal) {
418
			mutex_unlock(&b->write_lock);
419
			/* We raced */
420
			atomic_long_inc(&c->retry_flush_write);
421
			goto retry;
422 423
		}

424 425
		__bch_btree_node_write(b, NULL);
		mutex_unlock(&b->write_lock);
426 427 428 429 430
	}
}

#define last_seq(j)	((j)->seq - fifo_used(&(j)->pin) + 1)

431
static void journal_discard_endio(struct bio *bio)
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
{
	struct journal_device *ja =
		container_of(bio, struct journal_device, discard_bio);
	struct cache *ca = container_of(ja, struct cache, journal);

	atomic_set(&ja->discard_in_flight, DISCARD_DONE);

	closure_wake_up(&ca->set->journal.wait);
	closure_put(&ca->set->cl);
}

static void journal_discard_work(struct work_struct *work)
{
	struct journal_device *ja =
		container_of(work, struct journal_device, discard_work);

448
	submit_bio(&ja->discard_bio);
449 450 451 452 453 454 455 456 457 458 459 460
}

static void do_journal_discard(struct cache *ca)
{
	struct journal_device *ja = &ca->journal;
	struct bio *bio = &ja->discard_bio;

	if (!ca->discard) {
		ja->discard_idx = ja->last_idx;
		return;
	}

461
	switch (atomic_read(&ja->discard_in_flight)) {
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
	case DISCARD_IN_FLIGHT:
		return;

	case DISCARD_DONE:
		ja->discard_idx = (ja->discard_idx + 1) %
			ca->sb.njournal_buckets;

		atomic_set(&ja->discard_in_flight, DISCARD_READY);
		/* fallthrough */

	case DISCARD_READY:
		if (ja->discard_idx == ja->last_idx)
			return;

		atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);

478
		bio_init(bio, bio->bi_inline_vecs, 1);
479
		bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
480
		bio->bi_iter.bi_sector	= bucket_to_sector(ca->set,
481
						ca->sb.d[ja->discard_idx]);
482
		bio_set_dev(bio, ca->bdev);
483
		bio->bi_iter.bi_size	= bucket_bytes(ca);
484 485 486 487 488 489 490 491 492 493 494 495 496
		bio->bi_end_io		= journal_discard_endio;

		closure_get(&ca->set->cl);
		INIT_WORK(&ja->discard_work, journal_discard_work);
		schedule_work(&ja->discard_work);
	}
}

static void journal_reclaim(struct cache_set *c)
{
	struct bkey *k = &c->journal.key;
	struct cache *ca;
	uint64_t last_seq;
497
	unsigned int iter, n = 0;
498
	atomic_t p __maybe_unused;
499

500 501
	atomic_long_inc(&c->reclaim);

502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
	while (!atomic_read(&fifo_front(&c->journal.pin)))
		fifo_pop(&c->journal.pin, p);

	last_seq = last_seq(&c->journal);

	/* Update last_idx */

	for_each_cache(ca, c, iter) {
		struct journal_device *ja = &ca->journal;

		while (ja->last_idx != ja->cur_idx &&
		       ja->seq[ja->last_idx] < last_seq)
			ja->last_idx = (ja->last_idx + 1) %
				ca->sb.njournal_buckets;
	}

	for_each_cache(ca, c, iter)
		do_journal_discard(ca);

	if (c->journal.blocks_free)
522
		goto out;
523 524 525 526 527 528 529 530

	/*
	 * Allocate:
	 * XXX: Sort by free journal space
	 */

	for_each_cache(ca, c, iter) {
		struct journal_device *ja = &ca->journal;
531
		unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
532 533 534 535 536 537

		/* No space available on this device */
		if (next == ja->discard_idx)
			continue;

		ja->cur_idx = next;
538
		k->ptr[n++] = MAKE_PTR(0,
539 540 541 542 543 544 545 546 547
				  bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
				  ca->sb.nr_this_dev);
	}

	bkey_init(k);
	SET_KEY_PTRS(k, n);

	if (n)
		c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
548
out:
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
	if (!journal_full(&c->journal))
		__closure_wake_up(&c->journal.wait);
}

void bch_journal_next(struct journal *j)
{
	atomic_t p = { 1 };

	j->cur = (j->cur == j->w)
		? &j->w[1]
		: &j->w[0];

	/*
	 * The fifo_push() needs to happen at the same time as j->seq is
	 * incremented for last_seq() to be calculated correctly
	 */
	BUG_ON(!fifo_push(&j->pin, p));
	atomic_set(&fifo_back(&j->pin), 1);

	j->cur->data->seq	= ++j->seq;
569
	j->cur->dirty		= false;
570 571 572 573 574 575 576
	j->cur->need_write	= false;
	j->cur->data->keys	= 0;

	if (fifo_full(&j->pin))
		pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
}

577
static void journal_write_endio(struct bio *bio)
578 579 580
{
	struct journal_write *w = bio->bi_private;

581
	cache_set_err_on(bio->bi_status, w->c, "journal io error");
582
	closure_put(&w->c->journal.io);
583 584
}

585
static void journal_write(struct closure *cl);
586 587 588

static void journal_write_done(struct closure *cl)
{
589
	struct journal *j = container_of(cl, struct journal, io);
590 591 592 593 594
	struct journal_write *w = (j->cur == j->w)
		? &j->w[1]
		: &j->w[0];

	__closure_wake_up(&w->wait);
595
	continue_at_nobarrier(cl, journal_write, system_wq);
596 597
}

598
static void journal_write_unlock(struct closure *cl)
599
	__releases(&c->journal.lock)
600 601 602 603 604 605 606
{
	struct cache_set *c = container_of(cl, struct cache_set, journal.io);

	c->journal.io_in_flight = 0;
	spin_unlock(&c->journal.lock);
}

607
static void journal_write_unlocked(struct closure *cl)
K
Kent Overstreet 已提交
608
	__releases(c->journal.lock)
609
{
610
	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
611 612 613
	struct cache *ca;
	struct journal_write *w = c->journal.cur;
	struct bkey *k = &c->journal.key;
614
	unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
615
		c->sb.block_size;
616 617 618

	struct bio *bio;
	struct bio_list list;
619

620 621 622
	bio_list_init(&list);

	if (!w->need_write) {
623
		closure_return_with_destructor(cl, journal_write_unlock);
624
		return;
625 626 627 628 629 630
	} else if (journal_full(&c->journal)) {
		journal_reclaim(c);
		spin_unlock(&c->journal.lock);

		btree_flush_write(c);
		continue_at(cl, journal_write, system_wq);
631
		return;
632 633
	}

634
	c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
635 636 637 638 639 640 641 642 643

	w->data->btree_level = c->root->level;

	bkey_copy(&w->data->btree_root, &c->root->key);
	bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);

	for_each_cache(ca, c, i)
		w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];

644
	w->data->magic		= jset_magic(&c->sb);
645 646 647 648 649 650 651 652 653 654 655
	w->data->version	= BCACHE_JSET_VERSION;
	w->data->last_seq	= last_seq(&c->journal);
	w->data->csum		= csum_set(w->data);

	for (i = 0; i < KEY_PTRS(k); i++) {
		ca = PTR_CACHE(c, k, i);
		bio = &ca->journal.bio;

		atomic_long_add(sectors, &ca->meta_sectors_written);

		bio_reset(bio);
656
		bio->bi_iter.bi_sector	= PTR_OFFSET(k, i);
657
		bio_set_dev(bio, ca->bdev);
658
		bio->bi_iter.bi_size = sectors << 9;
659 660 661

		bio->bi_end_io	= journal_write_endio;
		bio->bi_private = w;
662
		bio_set_op_attrs(bio, REQ_OP_WRITE,
663
				 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
664
		bch_bio_map(bio, w->data);
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680

		trace_bcache_journal_write(bio);
		bio_list_add(&list, bio);

		SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);

		ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
	}

	atomic_dec_bug(&fifo_back(&c->journal.pin));
	bch_journal_next(&c->journal);
	journal_reclaim(c);

	spin_unlock(&c->journal.lock);

	while ((bio = bio_list_pop(&list)))
681
		closure_bio_submit(c, bio, cl);
682 683 684 685 686 687

	continue_at(cl, journal_write_done, NULL);
}

static void journal_write(struct closure *cl)
{
688
	struct cache_set *c = container_of(cl, struct cache_set, journal.io);
689 690 691 692 693

	spin_lock(&c->journal.lock);
	journal_write_unlocked(cl);
}

694
static void journal_try_write(struct cache_set *c)
K
Kent Overstreet 已提交
695
	__releases(c->journal.lock)
696
{
697 698 699 700
	struct closure *cl = &c->journal.io;
	struct journal_write *w = c->journal.cur;

	w->need_write = true;
701

702 703 704 705
	if (!c->journal.io_in_flight) {
		c->journal.io_in_flight = 1;
		closure_call(cl, journal_write_unlocked, NULL, &c->cl);
	} else {
706
		spin_unlock(&c->journal.lock);
707
	}
708 709
}

710
static struct journal_write *journal_wait_for_write(struct cache_set *c,
711
						    unsigned int nkeys)
712
	__acquires(&c->journal.lock)
713
{
714 715
	size_t sectors;
	struct closure cl;
716
	bool wait = false;
717

718 719 720 721 722 723 724 725
	closure_init_stack(&cl);

	spin_lock(&c->journal.lock);

	while (1) {
		struct journal_write *w = c->journal.cur;

		sectors = __set_blocks(w->data, w->data->keys + nkeys,
726
				       block_bytes(c)) * c->sb.block_size;
727 728 729 730 731 732

		if (sectors <= min_t(size_t,
				     c->journal.blocks_free * c->sb.block_size,
				     PAGE_SECTORS << JSET_BITS))
			return w;

733 734 735
		if (wait)
			closure_wait(&c->journal.wait, &cl);

736
		if (!journal_full(&c->journal)) {
737 738
			if (wait)
				trace_bcache_journal_entry_full(c);
739 740 741 742 743 744 745 746 747 748 749

			/*
			 * XXX: If we were inserting so many keys that they
			 * won't fit in an _empty_ journal write, we'll
			 * deadlock. For now, handle this in
			 * bch_keylist_realloc() - but something to think about.
			 */
			BUG_ON(!w->data->keys);

			journal_try_write(c); /* unlocks */
		} else {
750 751
			if (wait)
				trace_bcache_journal_full(c);
752 753 754

			journal_reclaim(c);
			spin_unlock(&c->journal.lock);
755

756 757
			btree_flush_write(c);
		}
758

759 760
		closure_sync(&cl);
		spin_lock(&c->journal.lock);
761
		wait = true;
762 763 764
	}
}

765 766 767 768 769 770
static void journal_write_work(struct work_struct *work)
{
	struct cache_set *c = container_of(to_delayed_work(work),
					   struct cache_set,
					   journal.work);
	spin_lock(&c->journal.lock);
771 772 773 774
	if (c->journal.cur->dirty)
		journal_try_write(c);
	else
		spin_unlock(&c->journal.lock);
775 776
}

777 778 779 780 781 782
/*
 * Entry point to the journalling code - bio_insert() and btree_invalidate()
 * pass bch_journal() a list of keys to be journalled, and then
 * bch_journal() hands those same keys off to btree_insert_async()
 */

783 784 785
atomic_t *bch_journal(struct cache_set *c,
		      struct keylist *keys,
		      struct closure *parent)
786 787
{
	struct journal_write *w;
788
	atomic_t *ret;
789

790 791
	if (!CACHE_SYNC(&c->sb))
		return NULL;
792

793
	w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
794

795
	memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
796
	w->data->keys += bch_keylist_nkeys(keys);
797

798 799
	ret = &fifo_back(&c->journal.pin);
	atomic_inc(ret);
800

801 802
	if (parent) {
		closure_wait(&w->wait, parent);
803
		journal_try_write(c);
804 805
	} else if (!w->dirty) {
		w->dirty = true;
806 807 808 809 810
		schedule_delayed_work(&c->journal.work,
				      msecs_to_jiffies(c->journal_delay_ms));
		spin_unlock(&c->journal.lock);
	} else {
		spin_unlock(&c->journal.lock);
811
	}
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826


	return ret;
}

void bch_journal_meta(struct cache_set *c, struct closure *cl)
{
	struct keylist keys;
	atomic_t *ref;

	bch_keylist_init(&keys);

	ref = bch_journal(c, &keys, cl);
	if (ref)
		atomic_dec_bug(ref);
827 828 829 830 831 832 833
}

void bch_journal_free(struct cache_set *c)
{
	free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
	free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
	free_fifo(&c->journal.pin);
834
	free_heap(&c->flush_btree);
835 836 837 838 839 840 841
}

int bch_journal_alloc(struct cache_set *c)
{
	struct journal *j = &c->journal;

	spin_lock_init(&j->lock);
842
	INIT_DELAYED_WORK(&j->work, journal_write_work);
843 844 845 846 847 848

	c->journal_delay_ms = 100;

	j->w[0].c = c;
	j->w[1].c = c;

849 850
	if (!(init_heap(&c->flush_btree, 128, GFP_KERNEL)) ||
	    !(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
851 852 853 854 855 856
	    !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
	    !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
		return -ENOMEM;

	return 0;
}
新手
引导
客服 返回
顶部