super.c 56.9 KB
Newer Older
K
Kent Overstreet 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * bcache setup/teardown code, and some metadata io - read a superblock and
 * figure out what to do with it.
 *
 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
 * Copyright 2012 Google, Inc.
 */

#include "bcache.h"
#include "btree.h"
#include "debug.h"
12
#include "extents.h"
K
Kent Overstreet 已提交
13
#include "request.h"
14
#include "writeback.h"
K
Kent Overstreet 已提交
15

K
Kent Overstreet 已提交
16
#include <linux/blkdev.h>
K
Kent Overstreet 已提交
17 18 19
#include <linux/buffer_head.h>
#include <linux/debugfs.h>
#include <linux/genhd.h>
20
#include <linux/idr.h>
21
#include <linux/kthread.h>
K
Kent Overstreet 已提交
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
#include <linux/module.h>
#include <linux/random.h>
#include <linux/reboot.h>
#include <linux/sysfs.h>

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");

static const char bcache_magic[] = {
	0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
	0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
};

static const char invalid_uuid[] = {
	0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
	0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
};

/* Default is -1; we skip past it for struct cached_dev's cache mode */
const char * const bch_cache_modes[] = {
	"default",
	"writethrough",
	"writeback",
	"writearound",
	"none",
	NULL
};

50 51 52 53 54 55 56 57
/* Default is -1; we skip past it for stop_when_cache_set_failed */
const char * const bch_stop_on_failure_modes[] = {
	"default",
	"auto",
	"always",
	NULL
};

K
Kent Overstreet 已提交
58 59 60 61 62
static struct kobject *bcache_kobj;
struct mutex bch_register_lock;
LIST_HEAD(bch_cache_sets);
static LIST_HEAD(uncached_devices);

63
static int bcache_major;
64
static DEFINE_IDA(bcache_device_idx);
K
Kent Overstreet 已提交
65 66 67 68
static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;

#define BTREE_MAX_PAGES		(256 * 1024 / PAGE_SIZE)
69 70 71 72
/* limitation of partitions number on single bcache device */
#define BCACHE_MINORS		128
/* limitation of bcache devices number on single system */
#define BCACHE_DEVICE_IDX_MAX	((1U << MINORBITS)/BCACHE_MINORS)
K
Kent Overstreet 已提交
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124

/* Superblock */

static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
			      struct page **res)
{
	const char *err;
	struct cache_sb *s;
	struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
	unsigned i;

	if (!bh)
		return "IO error";

	s = (struct cache_sb *) bh->b_data;

	sb->offset		= le64_to_cpu(s->offset);
	sb->version		= le64_to_cpu(s->version);

	memcpy(sb->magic,	s->magic, 16);
	memcpy(sb->uuid,	s->uuid, 16);
	memcpy(sb->set_uuid,	s->set_uuid, 16);
	memcpy(sb->label,	s->label, SB_LABEL_SIZE);

	sb->flags		= le64_to_cpu(s->flags);
	sb->seq			= le64_to_cpu(s->seq);
	sb->last_mount		= le32_to_cpu(s->last_mount);
	sb->first_bucket	= le16_to_cpu(s->first_bucket);
	sb->keys		= le16_to_cpu(s->keys);

	for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
		sb->d[i] = le64_to_cpu(s->d[i]);

	pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
		 sb->version, sb->flags, sb->seq, sb->keys);

	err = "Not a bcache superblock";
	if (sb->offset != SB_SECTOR)
		goto err;

	if (memcmp(sb->magic, bcache_magic, 16))
		goto err;

	err = "Too many journal buckets";
	if (sb->keys > SB_JOURNAL_BUCKETS)
		goto err;

	err = "Bad checksum";
	if (s->csum != csum_set(s))
		goto err;

	err = "Bad UUID";
125
	if (bch_is_zero(sb->uuid, 16))
K
Kent Overstreet 已提交
126 127
		goto err;

128 129 130 131 132 133
	sb->block_size	= le16_to_cpu(s->block_size);

	err = "Superblock block size smaller than device block size";
	if (sb->block_size << 9 < bdev_logical_block_size(bdev))
		goto err;

134 135 136 137 138 139 140 141 142 143
	switch (sb->version) {
	case BCACHE_SB_VERSION_BDEV:
		sb->data_offset	= BDEV_DATA_START_DEFAULT;
		break;
	case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
		sb->data_offset	= le64_to_cpu(s->data_offset);

		err = "Bad data offset";
		if (sb->data_offset < BDEV_DATA_START_DEFAULT)
			goto err;
K
Kent Overstreet 已提交
144

145 146 147 148 149
		break;
	case BCACHE_SB_VERSION_CDEV:
	case BCACHE_SB_VERSION_CDEV_WITH_UUID:
		sb->nbuckets	= le64_to_cpu(s->nbuckets);
		sb->bucket_size	= le16_to_cpu(s->bucket_size);
K
Kent Overstreet 已提交
150

151 152
		sb->nr_in_set	= le16_to_cpu(s->nr_in_set);
		sb->nr_this_dev	= le16_to_cpu(s->nr_this_dev);
K
Kent Overstreet 已提交
153

154 155 156
		err = "Too many buckets";
		if (sb->nbuckets > LONG_MAX)
			goto err;
K
Kent Overstreet 已提交
157

158 159 160
		err = "Not enough buckets";
		if (sb->nbuckets < 1 << 7)
			goto err;
K
Kent Overstreet 已提交
161

162 163 164 165 166 167
		err = "Bad block/bucket size";
		if (!is_power_of_2(sb->block_size) ||
		    sb->block_size > PAGE_SECTORS ||
		    !is_power_of_2(sb->bucket_size) ||
		    sb->bucket_size < PAGE_SECTORS)
			goto err;
K
Kent Overstreet 已提交
168

169 170 171
		err = "Invalid superblock: device too small";
		if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets)
			goto err;
K
Kent Overstreet 已提交
172

173 174 175
		err = "Bad UUID";
		if (bch_is_zero(sb->set_uuid, 16))
			goto err;
K
Kent Overstreet 已提交
176

177 178 179 180
		err = "Bad cache device number in set";
		if (!sb->nr_in_set ||
		    sb->nr_in_set <= sb->nr_this_dev ||
		    sb->nr_in_set > MAX_CACHES_PER_SET)
K
Kent Overstreet 已提交
181 182
			goto err;

183 184 185 186
		err = "Journal buckets not sequential";
		for (i = 0; i < sb->keys; i++)
			if (sb->d[i] != sb->first_bucket + i)
				goto err;
K
Kent Overstreet 已提交
187

188 189 190 191 192 193 194 195 196 197 198
		err = "Too many journal buckets";
		if (sb->first_bucket + sb->keys > sb->nbuckets)
			goto err;

		err = "Invalid superblock: first bucket comes before end of super";
		if (sb->first_bucket * sb->bucket_size < 16)
			goto err;

		break;
	default:
		err = "Unsupported superblock version";
K
Kent Overstreet 已提交
199
		goto err;
200 201
	}

K
Kent Overstreet 已提交
202 203 204 205 206 207 208 209 210 211
	sb->last_mount = get_seconds();
	err = NULL;

	get_page(bh->b_page);
	*res = bh->b_page;
err:
	put_bh(bh);
	return err;
}

212
static void write_bdev_super_endio(struct bio *bio)
K
Kent Overstreet 已提交
213 214 215 216
{
	struct cached_dev *dc = bio->bi_private;
	/* XXX: error checking */

217
	closure_put(&dc->sb_write);
K
Kent Overstreet 已提交
218 219 220 221
}

static void __write_super(struct cache_sb *sb, struct bio *bio)
{
222
	struct cache_sb *out = page_address(bio_first_page_all(bio));
K
Kent Overstreet 已提交
223 224
	unsigned i;

225 226
	bio->bi_iter.bi_sector	= SB_SECTOR;
	bio->bi_iter.bi_size	= SB_SIZE;
M
Mike Christie 已提交
227
	bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
228
	bch_bio_map(bio, NULL);
K
Kent Overstreet 已提交
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251

	out->offset		= cpu_to_le64(sb->offset);
	out->version		= cpu_to_le64(sb->version);

	memcpy(out->uuid,	sb->uuid, 16);
	memcpy(out->set_uuid,	sb->set_uuid, 16);
	memcpy(out->label,	sb->label, SB_LABEL_SIZE);

	out->flags		= cpu_to_le64(sb->flags);
	out->seq		= cpu_to_le64(sb->seq);

	out->last_mount		= cpu_to_le32(sb->last_mount);
	out->first_bucket	= cpu_to_le16(sb->first_bucket);
	out->keys		= cpu_to_le16(sb->keys);

	for (i = 0; i < sb->keys; i++)
		out->d[i] = cpu_to_le64(sb->d[i]);

	out->csum = csum_set(out);

	pr_debug("ver %llu, flags %llu, seq %llu",
		 sb->version, sb->flags, sb->seq);

252
	submit_bio(bio);
K
Kent Overstreet 已提交
253 254
}

255 256 257 258 259 260 261
static void bch_write_bdev_super_unlock(struct closure *cl)
{
	struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);

	up(&dc->sb_write_mutex);
}

K
Kent Overstreet 已提交
262 263
void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
{
264
	struct closure *cl = &dc->sb_write;
K
Kent Overstreet 已提交
265 266
	struct bio *bio = &dc->sb_bio;

267 268
	down(&dc->sb_write_mutex);
	closure_init(cl, parent);
K
Kent Overstreet 已提交
269 270

	bio_reset(bio);
271
	bio_set_dev(bio, dc->bdev);
K
Kent Overstreet 已提交
272 273 274 275
	bio->bi_end_io	= write_bdev_super_endio;
	bio->bi_private = dc;

	closure_get(cl);
276
	/* I/O request sent to backing device */
K
Kent Overstreet 已提交
277 278
	__write_super(&dc->sb, bio);

279
	closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
K
Kent Overstreet 已提交
280 281
}

282
static void write_super_endio(struct bio *bio)
K
Kent Overstreet 已提交
283 284 285
{
	struct cache *ca = bio->bi_private;

286 287 288
	/* is_read = 0 */
	bch_count_io_errors(ca, bio->bi_status, 0,
			    "writing superblock");
289 290 291 292 293 294 295 296
	closure_put(&ca->set->sb_write);
}

static void bcache_write_super_unlock(struct closure *cl)
{
	struct cache_set *c = container_of(cl, struct cache_set, sb_write);

	up(&c->sb_write_mutex);
K
Kent Overstreet 已提交
297 298 299 300
}

void bcache_write_super(struct cache_set *c)
{
301
	struct closure *cl = &c->sb_write;
K
Kent Overstreet 已提交
302 303 304
	struct cache *ca;
	unsigned i;

305 306
	down(&c->sb_write_mutex);
	closure_init(cl, &c->cl);
K
Kent Overstreet 已提交
307 308 309 310 311 312

	c->sb.seq++;

	for_each_cache(ca, c, i) {
		struct bio *bio = &ca->sb_bio;

313
		ca->sb.version		= BCACHE_SB_VERSION_CDEV_WITH_UUID;
K
Kent Overstreet 已提交
314 315 316 317 318 319
		ca->sb.seq		= c->sb.seq;
		ca->sb.last_mount	= c->sb.last_mount;

		SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));

		bio_reset(bio);
320
		bio_set_dev(bio, ca->bdev);
K
Kent Overstreet 已提交
321 322 323 324 325 326 327
		bio->bi_end_io	= write_super_endio;
		bio->bi_private = ca;

		closure_get(cl);
		__write_super(&ca->sb, bio);
	}

328
	closure_return_with_destructor(cl, bcache_write_super_unlock);
K
Kent Overstreet 已提交
329 330 331 332
}

/* UUID io */

333
static void uuid_endio(struct bio *bio)
K
Kent Overstreet 已提交
334 335
{
	struct closure *cl = bio->bi_private;
336
	struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
K
Kent Overstreet 已提交
337

338
	cache_set_err_on(bio->bi_status, c, "accessing uuids");
K
Kent Overstreet 已提交
339 340 341 342
	bch_bbio_free(bio, c);
	closure_put(cl);
}

343 344 345 346 347 348 349
static void uuid_io_unlock(struct closure *cl)
{
	struct cache_set *c = container_of(cl, struct cache_set, uuid_write);

	up(&c->uuid_write_mutex);
}

M
Mike Christie 已提交
350
static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
K
Kent Overstreet 已提交
351 352
		    struct bkey *k, struct closure *parent)
{
353
	struct closure *cl = &c->uuid_write;
K
Kent Overstreet 已提交
354 355
	struct uuid_entry *u;
	unsigned i;
356
	char buf[80];
K
Kent Overstreet 已提交
357 358

	BUG_ON(!parent);
359 360
	down(&c->uuid_write_mutex);
	closure_init(cl, parent);
K
Kent Overstreet 已提交
361 362 363 364

	for (i = 0; i < KEY_PTRS(k); i++) {
		struct bio *bio = bch_bbio_alloc(c);

J
Jens Axboe 已提交
365
		bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
366
		bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
K
Kent Overstreet 已提交
367 368 369

		bio->bi_end_io	= uuid_endio;
		bio->bi_private = cl;
M
Mike Christie 已提交
370
		bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
371
		bch_bio_map(bio, c->uuids);
K
Kent Overstreet 已提交
372 373 374

		bch_submit_bbio(bio, c, k, i);

M
Mike Christie 已提交
375
		if (op != REQ_OP_WRITE)
K
Kent Overstreet 已提交
376 377 378
			break;
	}

379
	bch_extent_to_text(buf, sizeof(buf), k);
M
Mike Christie 已提交
380
	pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
K
Kent Overstreet 已提交
381 382

	for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
383
		if (!bch_is_zero(u->uuid, 16))
K
Kent Overstreet 已提交
384 385 386 387
			pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
				 u - c->uuids, u->uuid, u->label,
				 u->first_reg, u->last_reg, u->invalidated);

388
	closure_return_with_destructor(cl, uuid_io_unlock);
K
Kent Overstreet 已提交
389 390 391 392 393 394
}

static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
{
	struct bkey *k = &j->uuid_bucket;

395
	if (__bch_btree_ptr_invalid(c, k))
K
Kent Overstreet 已提交
396 397 398
		return "bad uuid pointer";

	bkey_copy(&c->uuid_bucket, k);
399
	uuid_io(c, REQ_OP_READ, 0, k, cl);
K
Kent Overstreet 已提交
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439

	if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
		struct uuid_entry_v0	*u0 = (void *) c->uuids;
		struct uuid_entry	*u1 = (void *) c->uuids;
		int i;

		closure_sync(cl);

		/*
		 * Since the new uuid entry is bigger than the old, we have to
		 * convert starting at the highest memory address and work down
		 * in order to do it in place
		 */

		for (i = c->nr_uuids - 1;
		     i >= 0;
		     --i) {
			memcpy(u1[i].uuid,	u0[i].uuid, 16);
			memcpy(u1[i].label,	u0[i].label, 32);

			u1[i].first_reg		= u0[i].first_reg;
			u1[i].last_reg		= u0[i].last_reg;
			u1[i].invalidated	= u0[i].invalidated;

			u1[i].flags	= 0;
			u1[i].sectors	= 0;
		}
	}

	return NULL;
}

static int __uuid_write(struct cache_set *c)
{
	BKEY_PADDED(key) k;
	struct closure cl;
	closure_init_stack(&cl);

	lockdep_assert_held(&bch_register_lock);

440
	if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
K
Kent Overstreet 已提交
441 442 443
		return 1;

	SET_KEY_SIZE(&k.key, c->sb.bucket_size);
M
Mike Christie 已提交
444
	uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
K
Kent Overstreet 已提交
445 446 447
	closure_sync(&cl);

	bkey_copy(&c->uuid_bucket, &k.key);
448
	bkey_put(c, &k.key);
K
Kent Overstreet 已提交
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
	return 0;
}

int bch_uuid_write(struct cache_set *c)
{
	int ret = __uuid_write(c);

	if (!ret)
		bch_journal_meta(c, NULL);

	return ret;
}

static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
{
	struct uuid_entry *u;

	for (u = c->uuids;
	     u < c->uuids + c->nr_uuids; u++)
		if (!memcmp(u->uuid, uuid, 16))
			return u;

	return NULL;
}

static struct uuid_entry *uuid_find_empty(struct cache_set *c)
{
	static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
	return uuid_find(c, zero_uuid);
}

/*
 * Bucket priorities/gens:
 *
 * For each bucket, we store on disk its
   * 8 bit gen
   * 16 bit priority
 *
 * See alloc.c for an explanation of the gen. The priority is used to implement
 * lru (and in the future other) cache replacement policies; for most purposes
 * it's just an opaque integer.
 *
 * The gens and the priorities don't have a whole lot to do with each other, and
 * it's actually the gens that must be written out at specific times - it's no
 * big deal if the priorities don't get written, if we lose them we just reuse
 * buckets in suboptimal order.
 *
 * On disk they're stored in a packed array, and in as many buckets are required
 * to fit them all. The buckets we use to store them form a list; the journal
 * header points to the first bucket, the first bucket points to the second
 * bucket, et cetera.
 *
 * This code is used by the allocation code; periodically (whenever it runs out
 * of buckets to allocate from) the allocation code will invalidate some
 * buckets, but it can't use those buckets until their new gens are safely on
 * disk.
 */

507
static void prio_endio(struct bio *bio)
K
Kent Overstreet 已提交
508 509 510
{
	struct cache *ca = bio->bi_private;

511
	cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
K
Kent Overstreet 已提交
512 513 514 515
	bch_bbio_free(bio, ca->set);
	closure_put(&ca->prio);
}

M
Mike Christie 已提交
516 517
static void prio_io(struct cache *ca, uint64_t bucket, int op,
		    unsigned long op_flags)
K
Kent Overstreet 已提交
518 519 520 521 522 523
{
	struct closure *cl = &ca->prio;
	struct bio *bio = bch_bbio_alloc(ca->set);

	closure_init_stack(cl);

524
	bio->bi_iter.bi_sector	= bucket * ca->sb.bucket_size;
525
	bio_set_dev(bio, ca->bdev);
526
	bio->bi_iter.bi_size	= bucket_bytes(ca);
K
Kent Overstreet 已提交
527 528 529

	bio->bi_end_io	= prio_endio;
	bio->bi_private = ca;
M
Mike Christie 已提交
530
	bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
531
	bch_bio_map(bio, ca->disk_buckets);
K
Kent Overstreet 已提交
532

533
	closure_bio_submit(ca->set, bio, &ca->prio);
K
Kent Overstreet 已提交
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
	closure_sync(cl);
}

void bch_prio_write(struct cache *ca)
{
	int i;
	struct bucket *b;
	struct closure cl;

	closure_init_stack(&cl);

	lockdep_assert_held(&ca->set->bucket_lock);

	ca->disk_buckets->seq++;

	atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
			&ca->meta_sectors_written);

552 553
	//pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
	//	 fifo_used(&ca->free_inc), fifo_used(&ca->unused));
K
Kent Overstreet 已提交
554 555 556 557

	for (i = prio_buckets(ca) - 1; i >= 0; --i) {
		long bucket;
		struct prio_set *p = ca->disk_buckets;
K
Kent Overstreet 已提交
558 559
		struct bucket_disk *d = p->data;
		struct bucket_disk *end = d + prios_per_bucket(ca);
K
Kent Overstreet 已提交
560 561 562 563 564 565 566 567 568

		for (b = ca->buckets + i * prios_per_bucket(ca);
		     b < ca->buckets + ca->sb.nbuckets && d < end;
		     b++, d++) {
			d->prio = cpu_to_le16(b->prio);
			d->gen = b->gen;
		}

		p->next_bucket	= ca->prio_buckets[i + 1];
569
		p->magic	= pset_magic(&ca->sb);
570
		p->csum		= bch_crc64(&p->magic, bucket_bytes(ca) - 8);
K
Kent Overstreet 已提交
571

572
		bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
K
Kent Overstreet 已提交
573 574 575
		BUG_ON(bucket == -1);

		mutex_unlock(&ca->set->bucket_lock);
M
Mike Christie 已提交
576
		prio_io(ca, bucket, REQ_OP_WRITE, 0);
K
Kent Overstreet 已提交
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
		mutex_lock(&ca->set->bucket_lock);

		ca->prio_buckets[i] = bucket;
		atomic_dec_bug(&ca->buckets[bucket].pin);
	}

	mutex_unlock(&ca->set->bucket_lock);

	bch_journal_meta(ca->set, &cl);
	closure_sync(&cl);

	mutex_lock(&ca->set->bucket_lock);

	/*
	 * Don't want the old priorities to get garbage collected until after we
	 * finish writing the new ones, and they're journalled
	 */
K
Kent Overstreet 已提交
594 595 596 597 598
	for (i = 0; i < prio_buckets(ca); i++) {
		if (ca->prio_last_buckets[i])
			__bch_bucket_free(ca,
				&ca->buckets[ca->prio_last_buckets[i]]);

K
Kent Overstreet 已提交
599
		ca->prio_last_buckets[i] = ca->prio_buckets[i];
K
Kent Overstreet 已提交
600
	}
K
Kent Overstreet 已提交
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
}

static void prio_read(struct cache *ca, uint64_t bucket)
{
	struct prio_set *p = ca->disk_buckets;
	struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
	struct bucket *b;
	unsigned bucket_nr = 0;

	for (b = ca->buckets;
	     b < ca->buckets + ca->sb.nbuckets;
	     b++, d++) {
		if (d == end) {
			ca->prio_buckets[bucket_nr] = bucket;
			ca->prio_last_buckets[bucket_nr] = bucket;
			bucket_nr++;

618
			prio_io(ca, bucket, REQ_OP_READ, 0);
K
Kent Overstreet 已提交
619

620
			if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
K
Kent Overstreet 已提交
621 622
				pr_warn("bad csum reading priorities");

623
			if (p->magic != pset_magic(&ca->sb))
K
Kent Overstreet 已提交
624 625 626 627 628 629 630
				pr_warn("bad magic reading priorities");

			bucket = p->next_bucket;
			d = p->data;
		}

		b->prio = le16_to_cpu(d->prio);
K
Kent Overstreet 已提交
631
		b->gen = b->last_gc = d->gen;
K
Kent Overstreet 已提交
632 633 634 635 636 637 638 639
	}
}

/* Bcache device */

static int open_dev(struct block_device *b, fmode_t mode)
{
	struct bcache_device *d = b->bd_disk->private_data;
640
	if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
K
Kent Overstreet 已提交
641 642 643 644 645 646
		return -ENXIO;

	closure_get(&d->cl);
	return 0;
}

647
static void release_dev(struct gendisk *b, fmode_t mode)
K
Kent Overstreet 已提交
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
{
	struct bcache_device *d = b->private_data;
	closure_put(&d->cl);
}

static int ioctl_dev(struct block_device *b, fmode_t mode,
		     unsigned int cmd, unsigned long arg)
{
	struct bcache_device *d = b->bd_disk->private_data;
	return d->ioctl(d, mode, cmd, arg);
}

static const struct block_device_operations bcache_ops = {
	.open		= open_dev,
	.release	= release_dev,
	.ioctl		= ioctl_dev,
	.owner		= THIS_MODULE,
};

void bcache_device_stop(struct bcache_device *d)
{
669
	if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
K
Kent Overstreet 已提交
670 671 672
		closure_queue(&d->cl);
}

673 674
static void bcache_device_unlink(struct bcache_device *d)
{
675
	lockdep_assert_held(&bch_register_lock);
676

677 678 679
	if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
		unsigned i;
		struct cache *ca;
680

681 682 683 684 685 686
		sysfs_remove_link(&d->c->kobj, d->name);
		sysfs_remove_link(&d->kobj, "cache");

		for_each_cache(ca, d->c, i)
			bd_unlink_disk_holder(ca->bdev, d->disk);
	}
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
}

static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
			       const char *name)
{
	unsigned i;
	struct cache *ca;

	for_each_cache(ca, d->c, i)
		bd_link_disk_holder(ca->bdev, d->disk);

	snprintf(d->name, BCACHEDEVNAME_SIZE,
		 "%s%u", name, d->id);

	WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
	     sysfs_create_link(&c->kobj, &d->kobj, d->name),
	     "Couldn't create device <-> cache set symlinks");
704 705

	clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
706 707
}

K
Kent Overstreet 已提交
708 709 710 711
static void bcache_device_detach(struct bcache_device *d)
{
	lockdep_assert_held(&bch_register_lock);

712
	if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
K
Kent Overstreet 已提交
713 714 715 716 717 718 719 720
		struct uuid_entry *u = d->c->uuids + d->id;

		SET_UUID_FLASH_ONLY(u, 0);
		memcpy(u->uuid, invalid_uuid, 16);
		u->invalidated = cpu_to_le32(get_seconds());
		bch_uuid_write(d->c);
	}

721
	bcache_device_unlink(d);
722

K
Kent Overstreet 已提交
723 724 725 726 727 728 729 730 731 732 733 734
	d->c->devices[d->id] = NULL;
	closure_put(&d->c->caching);
	d->c = NULL;
}

static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
				 unsigned id)
{
	d->id = id;
	d->c = c;
	c->devices[id] = d;

735 736 737
	if (id >= c->devices_max_used)
		c->devices_max_used = id + 1;

K
Kent Overstreet 已提交
738 739 740
	closure_get(&c->caching);
}

741 742 743 744 745 746 747 748 749 750
static inline int first_minor_to_idx(int first_minor)
{
	return (first_minor/BCACHE_MINORS);
}

static inline int idx_to_first_minor(int idx)
{
	return (idx * BCACHE_MINORS);
}

K
Kent Overstreet 已提交
751 752 753 754 755 756 757 758
static void bcache_device_free(struct bcache_device *d)
{
	lockdep_assert_held(&bch_register_lock);

	pr_info("%s stopped", d->disk->disk_name);

	if (d->c)
		bcache_device_detach(d);
759
	if (d->disk && d->disk->flags & GENHD_FL_UP)
K
Kent Overstreet 已提交
760 761 762
		del_gendisk(d->disk);
	if (d->disk && d->disk->queue)
		blk_cleanup_queue(d->disk->queue);
763
	if (d->disk) {
764 765
		ida_simple_remove(&bcache_device_idx,
				  first_minor_to_idx(d->disk->first_minor));
K
Kent Overstreet 已提交
766
		put_disk(d->disk);
767
	}
K
Kent Overstreet 已提交
768 769 770

	if (d->bio_split)
		bioset_free(d->bio_split);
771 772
	kvfree(d->full_dirty_stripes);
	kvfree(d->stripe_sectors_dirty);
K
Kent Overstreet 已提交
773 774 775 776

	closure_debug_destroy(&d->cl);
}

777 778
static int bcache_device_init(struct bcache_device *d, unsigned block_size,
			      sector_t sectors)
K
Kent Overstreet 已提交
779 780
{
	struct request_queue *q;
781 782
	const size_t max_stripes = min_t(size_t, INT_MAX,
					 SIZE_MAX / sizeof(atomic_t));
783
	size_t n;
784
	int idx;
785

786 787
	if (!d->stripe_size)
		d->stripe_size = 1 << 31;
788

789
	d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
790

791
	if (!d->nr_stripes || d->nr_stripes > max_stripes) {
792 793
		pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
			(unsigned)d->nr_stripes);
794
		return -ENOMEM;
795
	}
796 797

	n = d->nr_stripes * sizeof(atomic_t);
798
	d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
799 800
	if (!d->stripe_sectors_dirty)
		return -ENOMEM;
K
Kent Overstreet 已提交
801

802
	n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
803
	d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
804 805 806
	if (!d->full_dirty_stripes)
		return -ENOMEM;

807 808 809 810
	idx = ida_simple_get(&bcache_device_idx, 0,
				BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
	if (idx < 0)
		return idx;
811

812 813 814
	if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio),
					   BIOSET_NEED_BVECS |
					   BIOSET_NEED_RESCUER)) ||
815
	    !(d->disk = alloc_disk(BCACHE_MINORS))) {
816
		ida_simple_remove(&bcache_device_idx, idx);
K
Kent Overstreet 已提交
817
		return -ENOMEM;
818
	}
K
Kent Overstreet 已提交
819

820
	set_capacity(d->disk, sectors);
821
	snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
K
Kent Overstreet 已提交
822 823

	d->disk->major		= bcache_major;
824
	d->disk->first_minor	= idx_to_first_minor(idx);
K
Kent Overstreet 已提交
825 826 827
	d->disk->fops		= &bcache_ops;
	d->disk->private_data	= d;

828 829 830 831
	q = blk_alloc_queue(GFP_KERNEL);
	if (!q)
		return -ENOMEM;

K
Kent Overstreet 已提交
832 833 834
	blk_queue_make_request(q, NULL);
	d->disk->queue			= q;
	q->queuedata			= d;
835
	q->backing_dev_info->congested_data = d;
K
Kent Overstreet 已提交
836 837 838 839
	q->limits.max_hw_sectors	= UINT_MAX;
	q->limits.max_sectors		= UINT_MAX;
	q->limits.max_segment_size	= UINT_MAX;
	q->limits.max_segments		= BIO_MAX_PAGES;
840
	blk_queue_max_discard_sectors(q, UINT_MAX);
841
	q->limits.discard_granularity	= 512;
K
Kent Overstreet 已提交
842 843 844
	q->limits.io_min		= block_size;
	q->limits.logical_block_size	= block_size;
	q->limits.physical_block_size	= block_size;
845 846 847
	blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
	blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue);
K
Kent Overstreet 已提交
848

849
	blk_queue_write_cache(q, true, true);
850

K
Kent Overstreet 已提交
851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
	return 0;
}

/* Cached device */

static void calc_cached_dev_sectors(struct cache_set *c)
{
	uint64_t sectors = 0;
	struct cached_dev *dc;

	list_for_each_entry(dc, &c->cached_devs, list)
		sectors += bdev_sectors(dc->bdev);

	c->cached_dev_sectors = sectors;
}

void bch_cached_dev_run(struct cached_dev *dc)
{
	struct bcache_device *d = &dc->disk;
G
Gabriel de Perthuis 已提交
870
	char buf[SB_LABEL_SIZE + 1];
871 872 873
	char *env[] = {
		"DRIVER=bcache",
		kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
G
Gabriel de Perthuis 已提交
874 875
		NULL,
		NULL,
876
	};
K
Kent Overstreet 已提交
877

G
Gabriel de Perthuis 已提交
878 879 880 881
	memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
	buf[SB_LABEL_SIZE] = '\0';
	env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);

882 883 884
	if (atomic_xchg(&dc->running, 1)) {
		kfree(env[1]);
		kfree(env[2]);
K
Kent Overstreet 已提交
885
		return;
886
	}
K
Kent Overstreet 已提交
887 888 889 890 891 892 893 894 895 896 897 898

	if (!d->c &&
	    BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
		struct closure cl;
		closure_init_stack(&cl);

		SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
		bch_write_bdev_super(dc, &cl);
		closure_sync(&cl);
	}

	add_disk(d->disk);
899
	bd_link_disk_holder(dc->bdev, dc->disk.disk);
900 901
	/* won't show up in the uevent file, use udevadm monitor -e instead
	 * only class / kset properties are persistent */
K
Kent Overstreet 已提交
902
	kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
903
	kfree(env[1]);
G
Gabriel de Perthuis 已提交
904
	kfree(env[2]);
905

K
Kent Overstreet 已提交
906 907 908 909 910
	if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
	    sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
		pr_debug("error creating sysfs link");
}

911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
/*
 * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed
 * work dc->writeback_rate_update is running. Wait until the routine
 * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to
 * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out
 * seconds, give up waiting here and continue to cancel it too.
 */
static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
{
	int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ;

	do {
		if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING,
			      &dc->disk.flags))
			break;
		time_out--;
		schedule_timeout_interruptible(1);
	} while (time_out > 0);

	if (time_out == 0)
		pr_warn("give up waiting for dc->writeback_write_update to quit");

	cancel_delayed_work_sync(&dc->writeback_rate_update);
}

K
Kent Overstreet 已提交
936 937 938 939 940 941
static void cached_dev_detach_finish(struct work_struct *w)
{
	struct cached_dev *dc = container_of(w, struct cached_dev, detach);
	struct closure cl;
	closure_init_stack(&cl);

942
	BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
943
	BUG_ON(refcount_read(&dc->count));
K
Kent Overstreet 已提交
944 945 946

	mutex_lock(&bch_register_lock);

947 948 949
	if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
		cancel_writeback_rate_update_dwork(dc);

950 951 952 953 954
	if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
		kthread_stop(dc->writeback_thread);
		dc->writeback_thread = NULL;
	}

K
Kent Overstreet 已提交
955 956 957 958 959 960 961 962 963
	memset(&dc->sb.set_uuid, 0, 16);
	SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);

	bch_write_bdev_super(dc, &cl);
	closure_sync(&cl);

	bcache_device_detach(&dc->disk);
	list_move(&dc->list, &uncached_devices);

964
	clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
965
	clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
966

K
Kent Overstreet 已提交
967 968
	mutex_unlock(&bch_register_lock);

969
	pr_info("Caching disabled for %s", dc->backing_dev_name);
K
Kent Overstreet 已提交
970 971 972 973 974 975 976 977 978

	/* Drop ref we took in cached_dev_detach() */
	closure_put(&dc->disk.cl);
}

void bch_cached_dev_detach(struct cached_dev *dc)
{
	lockdep_assert_held(&bch_register_lock);

979
	if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
K
Kent Overstreet 已提交
980 981
		return;

982
	if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
K
Kent Overstreet 已提交
983 984 985 986 987 988 989 990 991
		return;

	/*
	 * Block the device from being closed and freed until we're finished
	 * detaching
	 */
	closure_get(&dc->disk.cl);

	bch_writeback_queue(dc);
992

K
Kent Overstreet 已提交
993 994 995
	cached_dev_put(dc);
}

996 997
int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
			  uint8_t *set_uuid)
K
Kent Overstreet 已提交
998 999 1000
{
	uint32_t rtime = cpu_to_le32(get_seconds());
	struct uuid_entry *u;
1001
	struct cached_dev *exist_dc, *t;
K
Kent Overstreet 已提交
1002

1003 1004
	if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
	    (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
K
Kent Overstreet 已提交
1005 1006 1007
		return -ENOENT;

	if (dc->disk.c) {
1008 1009
		pr_err("Can't attach %s: already attached",
		       dc->backing_dev_name);
K
Kent Overstreet 已提交
1010 1011 1012 1013
		return -EINVAL;
	}

	if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
1014 1015
		pr_err("Can't attach %s: shutting down",
		       dc->backing_dev_name);
K
Kent Overstreet 已提交
1016 1017 1018 1019 1020
		return -EINVAL;
	}

	if (dc->sb.block_size < c->sb.block_size) {
		/* Will die */
K
Kent Overstreet 已提交
1021
		pr_err("Couldn't attach %s: block size less than set's block size",
1022
		       dc->backing_dev_name);
K
Kent Overstreet 已提交
1023 1024 1025
		return -EINVAL;
	}

1026 1027 1028 1029
	/* Check whether already attached */
	list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
		if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
			pr_err("Tried to attach %s but duplicate UUID already attached",
1030
				dc->backing_dev_name);
1031 1032 1033 1034 1035

			return -EINVAL;
		}
	}

K
Kent Overstreet 已提交
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
	u = uuid_find(c, dc->sb.uuid);

	if (u &&
	    (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
	     BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
		memcpy(u->uuid, invalid_uuid, 16);
		u->invalidated = cpu_to_le32(get_seconds());
		u = NULL;
	}

	if (!u) {
		if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1048 1049
			pr_err("Couldn't find uuid for %s in set",
			       dc->backing_dev_name);
K
Kent Overstreet 已提交
1050 1051 1052 1053 1054
			return -ENOENT;
		}

		u = uuid_find_empty(c);
		if (!u) {
1055 1056
			pr_err("Not caching %s, no room for UUID",
			       dc->backing_dev_name);
K
Kent Overstreet 已提交
1057 1058 1059 1060 1061 1062 1063 1064
			return -EINVAL;
		}
	}

	/* Deadlocks since we're called via sysfs...
	sysfs_remove_file(&dc->kobj, &sysfs_attach);
	 */

1065
	if (bch_is_zero(u->uuid, 16)) {
K
Kent Overstreet 已提交
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
		struct closure cl;
		closure_init_stack(&cl);

		memcpy(u->uuid, dc->sb.uuid, 16);
		memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
		u->first_reg = u->last_reg = rtime;
		bch_uuid_write(c);

		memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
		SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);

		bch_write_bdev_super(dc, &cl);
		closure_sync(&cl);
	} else {
		u->last_reg = rtime;
		bch_uuid_write(c);
	}

	bcache_device_attach(&dc->disk, c, u - c->uuids);
	list_move(&dc->list, &c->cached_devs);
	calc_cached_dev_sectors(c);

	smp_wmb();
	/*
	 * dc->c must be set before dc->count != 0 - paired with the mb in
	 * cached_dev_get()
	 */
1093
	refcount_set(&dc->count, 1);
K
Kent Overstreet 已提交
1094

1095 1096 1097 1098
	/* Block writeback thread, but spawn it */
	down_write(&dc->writeback_lock);
	if (bch_cached_dev_writeback_start(dc)) {
		up_write(&dc->writeback_lock);
1099
		return -ENOMEM;
1100
	}
1101

K
Kent Overstreet 已提交
1102
	if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1103
		bch_sectors_dirty_init(&dc->disk);
K
Kent Overstreet 已提交
1104 1105 1106 1107 1108
		atomic_set(&dc->has_dirty, 1);
		bch_writeback_queue(dc);
	}

	bch_cached_dev_run(dc);
1109
	bcache_device_link(&dc->disk, c, "bdev");
K
Kent Overstreet 已提交
1110

1111 1112 1113
	/* Allow the writeback thread to proceed */
	up_write(&dc->writeback_lock);

K
Kent Overstreet 已提交
1114
	pr_info("Caching %s as %s on set %pU",
1115 1116
		dc->backing_dev_name,
		dc->disk.disk->disk_name,
K
Kent Overstreet 已提交
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
		dc->disk.c->sb.set_uuid);
	return 0;
}

void bch_cached_dev_release(struct kobject *kobj)
{
	struct cached_dev *dc = container_of(kobj, struct cached_dev,
					     disk.kobj);
	kfree(dc);
	module_put(THIS_MODULE);
}

static void cached_dev_free(struct closure *cl)
{
	struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);

1133 1134 1135 1136 1137
	mutex_lock(&bch_register_lock);

	if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
		cancel_writeback_rate_update_dwork(dc);

1138 1139
	if (!IS_ERR_OR_NULL(dc->writeback_thread))
		kthread_stop(dc->writeback_thread);
1140 1141
	if (dc->writeback_write_wq)
		destroy_workqueue(dc->writeback_write_wq);
K
Kent Overstreet 已提交
1142

1143 1144
	if (atomic_read(&dc->running))
		bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
K
Kent Overstreet 已提交
1145 1146 1147 1148 1149
	bcache_device_free(&dc->disk);
	list_del(&dc->list);

	mutex_unlock(&bch_register_lock);

1150
	if (!IS_ERR_OR_NULL(dc->bdev))
K
Kent Overstreet 已提交
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
		blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);

	wake_up(&unregister_wait);

	kobject_put(&dc->disk.kobj);
}

static void cached_dev_flush(struct closure *cl)
{
	struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
	struct bcache_device *d = &dc->disk;

1163
	mutex_lock(&bch_register_lock);
1164
	bcache_device_unlink(d);
1165 1166
	mutex_unlock(&bch_register_lock);

K
Kent Overstreet 已提交
1167 1168 1169 1170 1171 1172 1173 1174
	bch_cache_accounting_destroy(&dc->accounting);
	kobject_del(&d->kobj);

	continue_at(cl, cached_dev_free, system_wq);
}

static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
{
1175
	int ret;
K
Kent Overstreet 已提交
1176
	struct io *io;
1177
	struct request_queue *q = bdev_get_queue(dc->bdev);
K
Kent Overstreet 已提交
1178 1179 1180

	__module_get(THIS_MODULE);
	INIT_LIST_HEAD(&dc->list);
1181 1182
	closure_init(&dc->disk.cl, NULL);
	set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
K
Kent Overstreet 已提交
1183 1184
	kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
	INIT_WORK(&dc->detach, cached_dev_detach_finish);
1185
	sema_init(&dc->sb_write_mutex, 1);
1186 1187 1188
	INIT_LIST_HEAD(&dc->io_lru);
	spin_lock_init(&dc->io_lock);
	bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
K
Kent Overstreet 已提交
1189 1190 1191 1192 1193 1194 1195 1196

	dc->sequential_cutoff		= 4 << 20;

	for (io = dc->io; io < dc->io + RECENT_IO; io++) {
		list_add(&io->lru, &dc->io_lru);
		hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
	}

1197 1198 1199 1200 1201 1202
	dc->disk.stripe_size = q->limits.io_opt >> 9;

	if (dc->disk.stripe_size)
		dc->partial_stripes_expensive =
			q->limits.raid_partial_stripes_expensive;

1203 1204
	ret = bcache_device_init(&dc->disk, block_size,
			 dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1205 1206 1207
	if (ret)
		return ret;

1208 1209 1210
	dc->disk.disk->queue->backing_dev_info->ra_pages =
		max(dc->disk.disk->queue->backing_dev_info->ra_pages,
		    q->backing_dev_info->ra_pages);
1211

1212 1213 1214
	atomic_set(&dc->io_errors, 0);
	dc->io_disable = false;
	dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT;
1215 1216 1217
	/* default to auto */
	dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO;

1218 1219
	bch_cached_dev_request_init(dc);
	bch_cached_dev_writeback_init(dc);
K
Kent Overstreet 已提交
1220 1221 1222 1223 1224
	return 0;
}

/* Cached device - bcache superblock */

1225
static void register_bdev(struct cache_sb *sb, struct page *sb_page,
K
Kent Overstreet 已提交
1226 1227 1228 1229 1230 1231
				 struct block_device *bdev,
				 struct cached_dev *dc)
{
	const char *err = "cannot allocate memory";
	struct cache_set *c;

1232
	bdevname(bdev, dc->backing_dev_name);
K
Kent Overstreet 已提交
1233 1234 1235 1236
	memcpy(&dc->sb, sb, sizeof(struct cache_sb));
	dc->bdev = bdev;
	dc->bdev->bd_holder = dc;

1237
	bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
1238
	bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
1239
	get_page(sb_page);
1240

1241

1242 1243
	if (cached_dev_init(dc, sb->block_size << 9))
		goto err;
K
Kent Overstreet 已提交
1244 1245 1246 1247 1248 1249 1250 1251

	err = "error creating kobject";
	if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
			"bcache"))
		goto err;
	if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
		goto err;

1252
	pr_info("registered backing device %s", dc->backing_dev_name);
1253

K
Kent Overstreet 已提交
1254 1255
	list_add(&dc->list, &uncached_devices);
	list_for_each_entry(c, &bch_cache_sets, list)
1256
		bch_cached_dev_attach(dc, c, NULL);
K
Kent Overstreet 已提交
1257 1258 1259 1260 1261

	if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
	    BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
		bch_cached_dev_run(dc);

1262
	return;
K
Kent Overstreet 已提交
1263
err:
1264
	pr_notice("error %s: %s", dc->backing_dev_name, err);
1265
	bcache_device_stop(&dc->disk);
K
Kent Overstreet 已提交
1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
}

/* Flash only volumes */

void bch_flash_dev_release(struct kobject *kobj)
{
	struct bcache_device *d = container_of(kobj, struct bcache_device,
					       kobj);
	kfree(d);
}

static void flash_dev_free(struct closure *cl)
{
	struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1280
	mutex_lock(&bch_register_lock);
K
Kent Overstreet 已提交
1281
	bcache_device_free(d);
1282
	mutex_unlock(&bch_register_lock);
K
Kent Overstreet 已提交
1283 1284 1285 1286 1287 1288 1289
	kobject_put(&d->kobj);
}

static void flash_dev_flush(struct closure *cl)
{
	struct bcache_device *d = container_of(cl, struct bcache_device, cl);

1290
	mutex_lock(&bch_register_lock);
1291
	bcache_device_unlink(d);
1292
	mutex_unlock(&bch_register_lock);
K
Kent Overstreet 已提交
1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
	kobject_del(&d->kobj);
	continue_at(cl, flash_dev_free, system_wq);
}

static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
{
	struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
					  GFP_KERNEL);
	if (!d)
		return -ENOMEM;

	closure_init(&d->cl, NULL);
	set_closure_fn(&d->cl, flash_dev_flush, system_wq);

	kobject_init(&d->kobj, &bch_flash_dev_ktype);

1309
	if (bcache_device_init(d, block_bytes(c), u->sectors))
K
Kent Overstreet 已提交
1310 1311 1312
		goto err;

	bcache_device_attach(d, c, u - c->uuids);
1313
	bch_sectors_dirty_init(d);
K
Kent Overstreet 已提交
1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
	bch_flash_dev_request_init(d);
	add_disk(d->disk);

	if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
		goto err;

	bcache_device_link(d, c, "volume");

	return 0;
err:
	kobject_put(&d->kobj);
	return -ENOMEM;
}

static int flash_devs_run(struct cache_set *c)
{
	int ret = 0;
	struct uuid_entry *u;

	for (u = c->uuids;
1334
	     u < c->uuids + c->nr_uuids && !ret;
K
Kent Overstreet 已提交
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
	     u++)
		if (UUID_FLASH_ONLY(u))
			ret = flash_dev_run(c, u);

	return ret;
}

int bch_flash_dev_create(struct cache_set *c, uint64_t size)
{
	struct uuid_entry *u;

	if (test_bit(CACHE_SET_STOPPING, &c->flags))
		return -EINTR;

1349 1350 1351
	if (!test_bit(CACHE_SET_RUNNING, &c->flags))
		return -EPERM;

K
Kent Overstreet 已提交
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
	u = uuid_find_empty(c);
	if (!u) {
		pr_err("Can't create volume, no room for UUID");
		return -EINVAL;
	}

	get_random_bytes(u->uuid, 16);
	memset(u->label, 0, 32);
	u->first_reg = u->last_reg = cpu_to_le32(get_seconds());

	SET_UUID_FLASH_ONLY(u, 1);
	u->sectors = size >> 9;

	bch_uuid_write(c);

	return flash_dev_run(c, u);
}

1370 1371
bool bch_cached_dev_error(struct cached_dev *dc)
{
1372 1373
	struct cache_set *c;

1374 1375 1376 1377 1378 1379 1380 1381
	if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
		return false;

	dc->io_disable = true;
	/* make others know io_disable is true earlier */
	smp_mb();

	pr_err("stop %s: too many IO errors on backing device %s\n",
1382
		dc->disk.disk->disk_name, dc->backing_dev_name);
1383

1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
	/*
	 * If the cached device is still attached to a cache set,
	 * even dc->io_disable is true and no more I/O requests
	 * accepted, cache device internal I/O (writeback scan or
	 * garbage collection) may still prevent bcache device from
	 * being stopped. So here CACHE_SET_IO_DISABLE should be
	 * set to c->flags too, to make the internal I/O to cache
	 * device rejected and stopped immediately.
	 * If c is NULL, that means the bcache device is not attached
	 * to any cache set, then no CACHE_SET_IO_DISABLE bit to set.
	 */
	c = dc->disk.c;
	if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
		pr_info("CACHE_SET_IO_DISABLE already set");

1399 1400 1401 1402
	bcache_device_stop(&dc->disk);
	return true;
}

K
Kent Overstreet 已提交
1403 1404 1405 1406 1407 1408 1409
/* Cache set */

__printf(2, 3)
bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
{
	va_list args;

1410 1411
	if (c->on_error != ON_ERROR_PANIC &&
	    test_bit(CACHE_SET_STOPPING, &c->flags))
K
Kent Overstreet 已提交
1412 1413
		return false;

1414
	if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
1415
		pr_info("CACHE_SET_IO_DISABLE already set");
1416

K
Kent Overstreet 已提交
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428
	/* XXX: we can be called from atomic context
	acquire_console_sem();
	*/

	printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid);

	va_start(args, fmt);
	vprintk(fmt, args);
	va_end(args);

	printk(", disabling caching\n");

1429 1430 1431
	if (c->on_error == ON_ERROR_PANIC)
		panic("panic forced after error\n");

K
Kent Overstreet 已提交
1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
	bch_cache_set_unregister(c);
	return true;
}

void bch_cache_set_release(struct kobject *kobj)
{
	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
	kfree(c);
	module_put(THIS_MODULE);
}

static void cache_set_free(struct closure *cl)
{
	struct cache_set *c = container_of(cl, struct cache_set, cl);
	struct cache *ca;
	unsigned i;

	if (!IS_ERR_OR_NULL(c->debug))
		debugfs_remove(c->debug);

	bch_open_buckets_free(c);
	bch_btree_cache_free(c);
	bch_journal_free(c);

	for_each_cache(ca, c, i)
1457 1458 1459
		if (ca) {
			ca->set = NULL;
			c->cache[ca->sb.nr_this_dev] = NULL;
K
Kent Overstreet 已提交
1460
			kobject_put(&ca->kobj);
1461
		}
K
Kent Overstreet 已提交
1462

1463
	bch_bset_sort_state_free(&c->sort);
K
Kent Overstreet 已提交
1464 1465
	free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));

1466 1467
	if (c->moving_gc_wq)
		destroy_workqueue(c->moving_gc_wq);
K
Kent Overstreet 已提交
1468 1469
	if (c->bio_split)
		bioset_free(c->bio_split);
K
Kent Overstreet 已提交
1470 1471
	if (c->fill_iter)
		mempool_destroy(c->fill_iter);
K
Kent Overstreet 已提交
1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
	if (c->bio_meta)
		mempool_destroy(c->bio_meta);
	if (c->search)
		mempool_destroy(c->search);
	kfree(c->devices);

	mutex_lock(&bch_register_lock);
	list_del(&c->list);
	mutex_unlock(&bch_register_lock);

	pr_info("Cache set %pU unregistered", c->sb.set_uuid);
	wake_up(&unregister_wait);

	closure_debug_destroy(&c->cl);
	kobject_put(&c->kobj);
}

static void cache_set_flush(struct closure *cl)
{
	struct cache_set *c = container_of(cl, struct cache_set, caching);
1492
	struct cache *ca;
K
Kent Overstreet 已提交
1493
	struct btree *b;
1494
	unsigned i;
K
Kent Overstreet 已提交
1495 1496 1497 1498 1499 1500

	bch_cache_accounting_destroy(&c->accounting);

	kobject_put(&c->internal);
	kobject_del(&c->kobj);

K
Kent Overstreet 已提交
1501 1502 1503
	if (c->gc_thread)
		kthread_stop(c->gc_thread);

K
Kent Overstreet 已提交
1504 1505 1506 1507
	if (!IS_ERR_OR_NULL(c->root))
		list_add(&c->root->list, &c->btree_cache);

	/* Should skip this if we're unregistering because of an error */
K
Kent Overstreet 已提交
1508 1509
	list_for_each_entry(b, &c->btree_cache, list) {
		mutex_lock(&b->write_lock);
K
Kent Overstreet 已提交
1510
		if (btree_node_dirty(b))
K
Kent Overstreet 已提交
1511 1512 1513
			__bch_btree_node_write(b, NULL);
		mutex_unlock(&b->write_lock);
	}
K
Kent Overstreet 已提交
1514

1515 1516 1517 1518
	for_each_cache(ca, c, i)
		if (ca->alloc_thread)
			kthread_stop(ca->alloc_thread);

1519 1520 1521 1522 1523
	if (c->journal.cur) {
		cancel_delayed_work_sync(&c->journal.work);
		/* flush last journal entry if needed */
		c->journal.work.work.func(&c->journal.work.work);
	}
K
Kent Overstreet 已提交
1524

K
Kent Overstreet 已提交
1525 1526 1527
	closure_return(cl);
}

1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558
/*
 * This function is only called when CACHE_SET_IO_DISABLE is set, which means
 * cache set is unregistering due to too many I/O errors. In this condition,
 * the bcache device might be stopped, it depends on stop_when_cache_set_failed
 * value and whether the broken cache has dirty data:
 *
 * dc->stop_when_cache_set_failed    dc->has_dirty   stop bcache device
 *  BCH_CACHED_STOP_AUTO               0               NO
 *  BCH_CACHED_STOP_AUTO               1               YES
 *  BCH_CACHED_DEV_STOP_ALWAYS         0               YES
 *  BCH_CACHED_DEV_STOP_ALWAYS         1               YES
 *
 * The expected behavior is, if stop_when_cache_set_failed is configured to
 * "auto" via sysfs interface, the bcache device will not be stopped if the
 * backing device is clean on the broken cache device.
 */
static void conditional_stop_bcache_device(struct cache_set *c,
					   struct bcache_device *d,
					   struct cached_dev *dc)
{
	if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
		pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.",
			d->disk->disk_name, c->sb.set_uuid);
		bcache_device_stop(d);
	} else if (atomic_read(&dc->has_dirty)) {
		/*
		 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
		 * and dc->has_dirty == 1
		 */
		pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.",
			d->disk->disk_name);
1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
			/*
			 * There might be a small time gap that cache set is
			 * released but bcache device is not. Inside this time
			 * gap, regular I/O requests will directly go into
			 * backing device as no cache set attached to. This
			 * behavior may also introduce potential inconsistence
			 * data in writeback mode while cache is dirty.
			 * Therefore before calling bcache_device_stop() due
			 * to a broken cache device, dc->io_disable should be
			 * explicitly set to true.
			 */
			dc->io_disable = true;
			/* make others know io_disable is true earlier */
			smp_mb();
1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
			bcache_device_stop(d);
	} else {
		/*
		 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
		 * and dc->has_dirty == 0
		 */
		pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.",
			d->disk->disk_name);
	}
}

K
Kent Overstreet 已提交
1584 1585 1586
static void __cache_set_unregister(struct closure *cl)
{
	struct cache_set *c = container_of(cl, struct cache_set, caching);
K
Kent Overstreet 已提交
1587
	struct cached_dev *dc;
1588
	struct bcache_device *d;
K
Kent Overstreet 已提交
1589 1590 1591 1592
	size_t i;

	mutex_lock(&bch_register_lock);

1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605
	for (i = 0; i < c->devices_max_used; i++) {
		d = c->devices[i];
		if (!d)
			continue;

		if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
		    test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
			dc = container_of(d, struct cached_dev, disk);
			bch_cached_dev_detach(dc);
			if (test_bit(CACHE_SET_IO_DISABLE, &c->flags))
				conditional_stop_bcache_device(c, d, dc);
		} else {
			bcache_device_stop(d);
K
Kent Overstreet 已提交
1606
		}
1607
	}
K
Kent Overstreet 已提交
1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659

	mutex_unlock(&bch_register_lock);

	continue_at(cl, cache_set_flush, system_wq);
}

void bch_cache_set_stop(struct cache_set *c)
{
	if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
		closure_queue(&c->caching);
}

void bch_cache_set_unregister(struct cache_set *c)
{
	set_bit(CACHE_SET_UNREGISTERING, &c->flags);
	bch_cache_set_stop(c);
}

#define alloc_bucket_pages(gfp, c)			\
	((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))

struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
{
	int iter_size;
	struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
	if (!c)
		return NULL;

	__module_get(THIS_MODULE);
	closure_init(&c->cl, NULL);
	set_closure_fn(&c->cl, cache_set_free, system_wq);

	closure_init(&c->caching, &c->cl);
	set_closure_fn(&c->caching, __cache_set_unregister, system_wq);

	/* Maybe create continue_at_noreturn() and use it here? */
	closure_set_stopped(&c->cl);
	closure_put(&c->cl);

	kobject_init(&c->kobj, &bch_cache_set_ktype);
	kobject_init(&c->internal, &bch_cache_set_internal_ktype);

	bch_cache_accounting_init(&c->accounting, &c->cl);

	memcpy(c->sb.set_uuid, sb->set_uuid, 16);
	c->sb.block_size	= sb->block_size;
	c->sb.bucket_size	= sb->bucket_size;
	c->sb.nr_in_set		= sb->nr_in_set;
	c->sb.last_mount	= sb->last_mount;
	c->bucket_bits		= ilog2(sb->bucket_size);
	c->block_bits		= ilog2(sb->block_size);
	c->nr_uuids		= bucket_bytes(c) / sizeof(struct uuid_entry);
1660
	c->devices_max_used	= 0;
1661
	c->btree_pages		= bucket_pages(c);
K
Kent Overstreet 已提交
1662 1663 1664 1665
	if (c->btree_pages > BTREE_MAX_PAGES)
		c->btree_pages = max_t(int, c->btree_pages / 4,
				       BTREE_MAX_PAGES);

1666
	sema_init(&c->sb_write_mutex, 1);
1667
	mutex_init(&c->bucket_lock);
1668
	init_waitqueue_head(&c->btree_cache_wait);
1669
	init_waitqueue_head(&c->bucket_wait);
1670
	init_waitqueue_head(&c->gc_wait);
1671
	sema_init(&c->uuid_write_mutex, 1);
1672 1673 1674 1675

	spin_lock_init(&c->btree_gc_time.lock);
	spin_lock_init(&c->btree_split_time.lock);
	spin_lock_init(&c->btree_read_time.lock);
1676

K
Kent Overstreet 已提交
1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
	bch_moving_init_cache_set(c);

	INIT_LIST_HEAD(&c->list);
	INIT_LIST_HEAD(&c->cached_devs);
	INIT_LIST_HEAD(&c->btree_cache);
	INIT_LIST_HEAD(&c->btree_cache_freeable);
	INIT_LIST_HEAD(&c->btree_cache_freed);
	INIT_LIST_HEAD(&c->data_buckets);

	c->search = mempool_create_slab_pool(32, bch_search_cache);
	if (!c->search)
		goto err;

	iter_size = (sb->bucket_size / sb->block_size + 1) *
		sizeof(struct btree_iter_set);

	if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) ||
	    !(c->bio_meta = mempool_create_kmalloc_pool(2,
				sizeof(struct bbio) + sizeof(struct bio_vec) *
				bucket_pages(c))) ||
K
Kent Overstreet 已提交
1697
	    !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
1698 1699 1700
	    !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio),
					   BIOSET_NEED_BVECS |
					   BIOSET_NEED_RESCUER)) ||
K
Kent Overstreet 已提交
1701
	    !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
1702 1703
	    !(c->moving_gc_wq = alloc_workqueue("bcache_gc",
						WQ_MEM_RECLAIM, 0)) ||
K
Kent Overstreet 已提交
1704 1705
	    bch_journal_alloc(c) ||
	    bch_btree_cache_alloc(c) ||
1706 1707
	    bch_open_buckets_alloc(c) ||
	    bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
K
Kent Overstreet 已提交
1708 1709 1710 1711
		goto err;

	c->congested_read_threshold_us	= 2000;
	c->congested_write_threshold_us	= 20000;
C
Coly Li 已提交
1712
	c->error_limit	= DEFAULT_IO_ERROR_LIMIT;
1713
	WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags));
K
Kent Overstreet 已提交
1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725

	return c;
err:
	bch_cache_set_unregister(c);
	return NULL;
}

static void run_cache_set(struct cache_set *c)
{
	const char *err = "cannot allocate memory";
	struct cached_dev *dc, *t;
	struct cache *ca;
K
Kent Overstreet 已提交
1726
	struct closure cl;
K
Kent Overstreet 已提交
1727 1728
	unsigned i;

K
Kent Overstreet 已提交
1729
	closure_init_stack(&cl);
K
Kent Overstreet 已提交
1730 1731 1732

	for_each_cache(ca, c, i)
		c->nbuckets += ca->sb.nbuckets;
1733
	set_gc_sectors(c);
K
Kent Overstreet 已提交
1734 1735 1736 1737 1738 1739 1740

	if (CACHE_SYNC(&c->sb)) {
		LIST_HEAD(journal);
		struct bkey *k;
		struct jset *j;

		err = "cannot allocate memory for journal";
K
Kent Overstreet 已提交
1741
		if (bch_journal_read(c, &journal))
K
Kent Overstreet 已提交
1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
			goto err;

		pr_debug("btree_journal_read() done");

		err = "no journal entries found";
		if (list_empty(&journal))
			goto err;

		j = &list_entry(journal.prev, struct journal_replay, list)->j;

		err = "IO error reading priorities";
		for_each_cache(ca, c, i)
			prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);

		/*
		 * If prio_read() fails it'll call cache_set_error and we'll
		 * tear everything down right away, but if we perhaps checked
		 * sooner we could avoid journal replay.
		 */

		k = &j->btree_root;

		err = "bad btree root";
1765
		if (__bch_btree_ptr_invalid(c, k))
K
Kent Overstreet 已提交
1766 1767 1768
			goto err;

		err = "error reading btree root";
1769
		c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL);
K
Kent Overstreet 已提交
1770 1771 1772 1773 1774 1775
		if (IS_ERR_OR_NULL(c->root))
			goto err;

		list_del_init(&c->root->list);
		rw_unlock(true, c->root);

K
Kent Overstreet 已提交
1776
		err = uuid_read(c, j, &cl);
K
Kent Overstreet 已提交
1777 1778 1779 1780
		if (err)
			goto err;

		err = "error in recovery";
K
Kent Overstreet 已提交
1781
		if (bch_btree_check(c))
K
Kent Overstreet 已提交
1782 1783 1784
			goto err;

		bch_journal_mark(c, &journal);
K
Kent Overstreet 已提交
1785
		bch_initial_gc_finish(c);
K
Kent Overstreet 已提交
1786 1787 1788 1789 1790 1791 1792 1793 1794
		pr_debug("btree_check() done");

		/*
		 * bcache_journal_next() can't happen sooner, or
		 * btree_gc_finish() will give spurious errors about last_gc >
		 * gc_gen - this is a hack but oh well.
		 */
		bch_journal_next(&c->journal);

1795
		err = "error starting allocator thread";
K
Kent Overstreet 已提交
1796
		for_each_cache(ca, c, i)
1797 1798
			if (bch_cache_allocator_start(ca))
				goto err;
K
Kent Overstreet 已提交
1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812

		/*
		 * First place it's safe to allocate: btree_check() and
		 * btree_gc_finish() have to run before we have buckets to
		 * allocate, and bch_bucket_alloc_set() might cause a journal
		 * entry to be written so bcache_journal_next() has to be called
		 * first.
		 *
		 * If the uuids were in the old format we have to rewrite them
		 * before the next journal entry is written:
		 */
		if (j->version < BCACHE_JSET_VERSION_UUID)
			__uuid_write(c);

K
Kent Overstreet 已提交
1813
		bch_journal_replay(c, &journal);
K
Kent Overstreet 已提交
1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826
	} else {
		pr_notice("invalidating existing data");

		for_each_cache(ca, c, i) {
			unsigned j;

			ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
					      2, SB_JOURNAL_BUCKETS);

			for (j = 0; j < ca->sb.keys; j++)
				ca->sb.d[j] = ca->sb.first_bucket + j;
		}

K
Kent Overstreet 已提交
1827
		bch_initial_gc_finish(c);
K
Kent Overstreet 已提交
1828

1829
		err = "error starting allocator thread";
K
Kent Overstreet 已提交
1830
		for_each_cache(ca, c, i)
1831 1832
			if (bch_cache_allocator_start(ca))
				goto err;
K
Kent Overstreet 已提交
1833 1834 1835 1836 1837 1838 1839 1840

		mutex_lock(&c->bucket_lock);
		for_each_cache(ca, c, i)
			bch_prio_write(ca);
		mutex_unlock(&c->bucket_lock);

		err = "cannot allocate new UUID bucket";
		if (__uuid_write(c))
K
Kent Overstreet 已提交
1841
			goto err;
K
Kent Overstreet 已提交
1842 1843

		err = "cannot allocate new btree root";
1844
		c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
K
Kent Overstreet 已提交
1845
		if (IS_ERR_OR_NULL(c->root))
K
Kent Overstreet 已提交
1846
			goto err;
K
Kent Overstreet 已提交
1847

K
Kent Overstreet 已提交
1848
		mutex_lock(&c->root->write_lock);
K
Kent Overstreet 已提交
1849
		bkey_copy_key(&c->root->key, &MAX_KEY);
K
Kent Overstreet 已提交
1850
		bch_btree_node_write(c->root, &cl);
K
Kent Overstreet 已提交
1851
		mutex_unlock(&c->root->write_lock);
K
Kent Overstreet 已提交
1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863

		bch_btree_set_root(c->root);
		rw_unlock(true, c->root);

		/*
		 * We don't want to write the first journal entry until
		 * everything is set up - fortunately journal entries won't be
		 * written until the SET_CACHE_SYNC() here:
		 */
		SET_CACHE_SYNC(&c->sb, true);

		bch_journal_next(&c->journal);
K
Kent Overstreet 已提交
1864
		bch_journal_meta(c, &cl);
K
Kent Overstreet 已提交
1865 1866
	}

K
Kent Overstreet 已提交
1867 1868 1869 1870
	err = "error starting gc thread";
	if (bch_gc_thread_start(c))
		goto err;

K
Kent Overstreet 已提交
1871
	closure_sync(&cl);
K
Kent Overstreet 已提交
1872 1873 1874 1875
	c->sb.last_mount = get_seconds();
	bcache_write_super(c);

	list_for_each_entry_safe(dc, t, &uncached_devices, list)
1876
		bch_cached_dev_attach(dc, c, NULL);
K
Kent Overstreet 已提交
1877 1878 1879

	flash_devs_run(c);

1880
	set_bit(CACHE_SET_RUNNING, &c->flags);
K
Kent Overstreet 已提交
1881 1882
	return;
err:
K
Kent Overstreet 已提交
1883
	closure_sync(&cl);
K
Kent Overstreet 已提交
1884
	/* XXX: test this, it's broken */
1885
	bch_cache_set_error(c, "%s", err);
K
Kent Overstreet 已提交
1886 1887 1888 1889 1890
}

static bool can_attach_cache(struct cache *ca, struct cache_set *c)
{
	return ca->sb.block_size	== c->sb.block_size &&
1891
		ca->sb.bucket_size	== c->sb.bucket_size &&
K
Kent Overstreet 已提交
1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
		ca->sb.nr_in_set	== c->sb.nr_in_set;
}

static const char *register_cache_set(struct cache *ca)
{
	char buf[12];
	const char *err = "cannot allocate memory";
	struct cache_set *c;

	list_for_each_entry(c, &bch_cache_sets, list)
		if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
			if (c->cache[ca->sb.nr_this_dev])
				return "duplicate cache set member";

			if (!can_attach_cache(ca, c))
				return "cache sb does not match set";

			if (!CACHE_SYNC(&ca->sb))
				SET_CACHE_SYNC(&c->sb, false);

			goto found;
		}

	c = bch_cache_set_alloc(&ca->sb);
	if (!c)
		return err;

	err = "error creating kobject";
	if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
	    kobject_add(&c->internal, &c->kobj, "internal"))
		goto err;

	if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
		goto err;

	bch_debug_init_cache_set(c);

	list_add(&c->list, &bch_cache_sets);
found:
	sprintf(buf, "cache%i", ca->sb.nr_this_dev);
	if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
	    sysfs_create_link(&c->kobj, &ca->kobj, buf))
		goto err;

	if (ca->sb.seq > c->sb.seq) {
		c->sb.version		= ca->sb.version;
		memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
		c->sb.flags             = ca->sb.flags;
		c->sb.seq		= ca->sb.seq;
		pr_debug("set version = %llu", c->sb.version);
	}

1944
	kobject_get(&ca->kobj);
K
Kent Overstreet 已提交
1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
	ca->set = c;
	ca->set->cache[ca->sb.nr_this_dev] = ca;
	c->cache_by_alloc[c->caches_loaded++] = ca;

	if (c->caches_loaded == c->sb.nr_in_set)
		run_cache_set(c);

	return NULL;
err:
	bch_cache_set_unregister(c);
	return err;
}

/* Cache device */

void bch_cache_release(struct kobject *kobj)
{
	struct cache *ca = container_of(kobj, struct cache, kobj);
1963
	unsigned i;
K
Kent Overstreet 已提交
1964

1965 1966
	if (ca->set) {
		BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
K
Kent Overstreet 已提交
1967
		ca->set->cache[ca->sb.nr_this_dev] = NULL;
1968
	}
K
Kent Overstreet 已提交
1969 1970 1971 1972 1973 1974 1975

	free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
	kfree(ca->prio_buckets);
	vfree(ca->buckets);

	free_heap(&ca->heap);
	free_fifo(&ca->free_inc);
1976 1977 1978

	for (i = 0; i < RESERVE_NR; i++)
		free_fifo(&ca->free[i]);
K
Kent Overstreet 已提交
1979 1980

	if (ca->sb_bio.bi_inline_vecs[0].bv_page)
1981
		put_page(bio_first_page_all(&ca->sb_bio));
K
Kent Overstreet 已提交
1982

1983
	if (!IS_ERR_OR_NULL(ca->bdev))
K
Kent Overstreet 已提交
1984 1985 1986 1987 1988 1989
		blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);

	kfree(ca);
	module_put(THIS_MODULE);
}

1990
static int cache_alloc(struct cache *ca)
K
Kent Overstreet 已提交
1991 1992
{
	size_t free;
1993
	size_t btree_buckets;
K
Kent Overstreet 已提交
1994 1995 1996 1997 1998
	struct bucket *b;

	__module_get(THIS_MODULE);
	kobject_init(&ca->kobj, &bch_cache_ktype);

1999
	bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
K
Kent Overstreet 已提交
2000

2001 2002 2003 2004 2005 2006 2007 2008 2009 2010
	/*
	 * when ca->sb.njournal_buckets is not zero, journal exists,
	 * and in bch_journal_replay(), tree node may split,
	 * so bucket of RESERVE_BTREE type is needed,
	 * the worst situation is all journal buckets are valid journal,
	 * and all the keys need to replay,
	 * so the number of  RESERVE_BTREE type buckets should be as much
	 * as journal buckets
	 */
	btree_buckets = ca->sb.njournal_buckets ?: 8;
2011
	free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
K
Kent Overstreet 已提交
2012

2013
	if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) ||
2014
	    !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
2015 2016
	    !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
	    !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
K
Kent Overstreet 已提交
2017 2018
	    !init_fifo(&ca->free_inc,	free << 2, GFP_KERNEL) ||
	    !init_heap(&ca->heap,	free << 3, GFP_KERNEL) ||
2019
	    !(ca->buckets	= vzalloc(sizeof(struct bucket) *
K
Kent Overstreet 已提交
2020 2021 2022
					  ca->sb.nbuckets)) ||
	    !(ca->prio_buckets	= kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
					  2, GFP_KERNEL)) ||
2023
	    !(ca->disk_buckets	= alloc_bucket_pages(GFP_KERNEL, ca)))
2024
		return -ENOMEM;
K
Kent Overstreet 已提交
2025 2026 2027 2028 2029 2030 2031 2032 2033

	ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);

	for_each_bucket(b, ca)
		atomic_set(&b->pin, 0);

	return 0;
}

2034
static int register_cache(struct cache_sb *sb, struct page *sb_page,
2035
				struct block_device *bdev, struct cache *ca)
K
Kent Overstreet 已提交
2036
{
2037
	const char *err = NULL; /* must be set for any error case */
2038
	int ret = 0;
K
Kent Overstreet 已提交
2039

2040
	bdevname(bdev, ca->cache_dev_name);
2041
	memcpy(&ca->sb, sb, sizeof(struct cache_sb));
K
Kent Overstreet 已提交
2042 2043 2044
	ca->bdev = bdev;
	ca->bdev->bd_holder = ca;

2045
	bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
2046
	bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
2047 2048
	get_page(sb_page);

2049
	if (blk_queue_discard(bdev_get_queue(bdev)))
K
Kent Overstreet 已提交
2050 2051
		ca->discard = CACHE_DISCARD(&ca->sb);

2052
	ret = cache_alloc(ca);
2053
	if (ret != 0) {
2054
		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2055 2056 2057 2058
		if (ret == -ENOMEM)
			err = "cache_alloc(): -ENOMEM";
		else
			err = "cache_alloc(): unknown error";
2059
		goto err;
2060
	}
2061

2062 2063 2064 2065 2066
	if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
		err = "error calling kobject_add";
		ret = -ENOMEM;
		goto out;
	}
K
Kent Overstreet 已提交
2067

2068
	mutex_lock(&bch_register_lock);
K
Kent Overstreet 已提交
2069
	err = register_cache_set(ca);
2070 2071
	mutex_unlock(&bch_register_lock);

2072 2073 2074 2075
	if (err) {
		ret = -ENODEV;
		goto out;
	}
K
Kent Overstreet 已提交
2076

2077
	pr_info("registered cache device %s", ca->cache_dev_name);
2078

2079 2080
out:
	kobject_put(&ca->kobj);
2081

K
Kent Overstreet 已提交
2082
err:
2083
	if (err)
2084
		pr_notice("error %s: %s", ca->cache_dev_name, err);
2085 2086

	return ret;
K
Kent Overstreet 已提交
2087 2088 2089 2090 2091 2092 2093 2094 2095 2096
}

/* Global interfaces/init */

static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
			       const char *, size_t);

kobj_attribute_write(register,		register_bcache);
kobj_attribute_write(register_quiet,	register_bcache);

2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126
static bool bch_is_open_backing(struct block_device *bdev) {
	struct cache_set *c, *tc;
	struct cached_dev *dc, *t;

	list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
		list_for_each_entry_safe(dc, t, &c->cached_devs, list)
			if (dc->bdev == bdev)
				return true;
	list_for_each_entry_safe(dc, t, &uncached_devices, list)
		if (dc->bdev == bdev)
			return true;
	return false;
}

static bool bch_is_open_cache(struct block_device *bdev) {
	struct cache_set *c, *tc;
	struct cache *ca;
	unsigned i;

	list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
		for_each_cache(ca, c, i)
			if (ca->bdev == bdev)
				return true;
	return false;
}

static bool bch_is_open(struct block_device *bdev) {
	return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
}

K
Kent Overstreet 已提交
2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
			       const char *buffer, size_t size)
{
	ssize_t ret = size;
	const char *err = "cannot allocate memory";
	char *path = NULL;
	struct cache_sb *sb = NULL;
	struct block_device *bdev = NULL;
	struct page *sb_page = NULL;

	if (!try_module_get(THIS_MODULE))
		return -EBUSY;

	if (!(path = kstrndup(buffer, size, GFP_KERNEL)) ||
	    !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL)))
		goto err;

	err = "failed to open device";
	bdev = blkdev_get_by_path(strim(path),
				  FMODE_READ|FMODE_WRITE|FMODE_EXCL,
				  sb);
2148
	if (IS_ERR(bdev)) {
2149 2150
		if (bdev == ERR_PTR(-EBUSY)) {
			bdev = lookup_bdev(strim(path));
2151
			mutex_lock(&bch_register_lock);
2152 2153 2154 2155
			if (!IS_ERR(bdev) && bch_is_open(bdev))
				err = "device already registered";
			else
				err = "device busy";
2156
			mutex_unlock(&bch_register_lock);
J
Jan Kara 已提交
2157 2158
			if (!IS_ERR(bdev))
				bdput(bdev);
2159 2160
			if (attr == &ksysfs_register_quiet)
				goto out;
2161
		}
K
Kent Overstreet 已提交
2162
		goto err;
2163 2164 2165 2166 2167
	}

	err = "failed to set blocksize";
	if (set_blocksize(bdev, 4096))
		goto err_close;
K
Kent Overstreet 已提交
2168 2169 2170 2171 2172

	err = read_super(sb, bdev, &sb_page);
	if (err)
		goto err_close;

2173
	err = "failed to register device";
2174
	if (SB_IS_BDEV(sb)) {
K
Kent Overstreet 已提交
2175
		struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
2176 2177
		if (!dc)
			goto err_close;
K
Kent Overstreet 已提交
2178

2179
		mutex_lock(&bch_register_lock);
2180
		register_bdev(sb, sb_page, bdev, dc);
2181
		mutex_unlock(&bch_register_lock);
K
Kent Overstreet 已提交
2182 2183
	} else {
		struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2184 2185
		if (!ca)
			goto err_close;
K
Kent Overstreet 已提交
2186

2187
		if (register_cache(sb, sb_page, bdev, ca) != 0)
2188
			goto err;
K
Kent Overstreet 已提交
2189
	}
2190 2191
out:
	if (sb_page)
K
Kent Overstreet 已提交
2192 2193 2194 2195 2196
		put_page(sb_page);
	kfree(sb);
	kfree(path);
	module_put(THIS_MODULE);
	return ret;
2197 2198 2199 2200

err_close:
	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
err:
2201
	pr_info("error %s: %s", path, err);
2202 2203
	ret = -EINVAL;
	goto out;
K
Kent Overstreet 已提交
2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275
}

static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
{
	if (code == SYS_DOWN ||
	    code == SYS_HALT ||
	    code == SYS_POWER_OFF) {
		DEFINE_WAIT(wait);
		unsigned long start = jiffies;
		bool stopped = false;

		struct cache_set *c, *tc;
		struct cached_dev *dc, *tdc;

		mutex_lock(&bch_register_lock);

		if (list_empty(&bch_cache_sets) &&
		    list_empty(&uncached_devices))
			goto out;

		pr_info("Stopping all devices:");

		list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
			bch_cache_set_stop(c);

		list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
			bcache_device_stop(&dc->disk);

		/* What's a condition variable? */
		while (1) {
			long timeout = start + 2 * HZ - jiffies;

			stopped = list_empty(&bch_cache_sets) &&
				list_empty(&uncached_devices);

			if (timeout < 0 || stopped)
				break;

			prepare_to_wait(&unregister_wait, &wait,
					TASK_UNINTERRUPTIBLE);

			mutex_unlock(&bch_register_lock);
			schedule_timeout(timeout);
			mutex_lock(&bch_register_lock);
		}

		finish_wait(&unregister_wait, &wait);

		if (stopped)
			pr_info("All devices stopped");
		else
			pr_notice("Timeout waiting for devices to be closed");
out:
		mutex_unlock(&bch_register_lock);
	}

	return NOTIFY_DONE;
}

static struct notifier_block reboot = {
	.notifier_call	= bcache_reboot,
	.priority	= INT_MAX, /* before any real devices */
};

static void bcache_exit(void)
{
	bch_debug_exit();
	bch_request_exit();
	if (bcache_kobj)
		kobject_put(bcache_kobj);
	if (bcache_wq)
		destroy_workqueue(bcache_wq);
2276 2277
	if (bcache_major)
		unregister_blkdev(bcache_major, "bcache");
K
Kent Overstreet 已提交
2278
	unregister_reboot_notifier(&reboot);
2279
	mutex_destroy(&bch_register_lock);
K
Kent Overstreet 已提交
2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294
}

static int __init bcache_init(void)
{
	static const struct attribute *files[] = {
		&ksysfs_register.attr,
		&ksysfs_register_quiet.attr,
		NULL
	};

	mutex_init(&bch_register_lock);
	init_waitqueue_head(&unregister_wait);
	register_reboot_notifier(&reboot);

	bcache_major = register_blkdev(0, "bcache");
2295 2296
	if (bcache_major < 0) {
		unregister_reboot_notifier(&reboot);
2297
		mutex_destroy(&bch_register_lock);
K
Kent Overstreet 已提交
2298
		return bcache_major;
2299
	}
K
Kent Overstreet 已提交
2300

2301
	if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
K
Kent Overstreet 已提交
2302 2303
	    !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
	    bch_request_init() ||
2304
	    bch_debug_init(bcache_kobj) || closure_debug_init() ||
2305
	    sysfs_create_files(bcache_kobj, files))
K
Kent Overstreet 已提交
2306 2307 2308 2309 2310 2311 2312 2313 2314 2315
		goto err;

	return 0;
err:
	bcache_exit();
	return -ENOMEM;
}

module_exit(bcache_exit);
module_init(bcache_init);