super.c 66.2 KB
Newer Older
C
Coly Li 已提交
1
// SPDX-License-Identifier: GPL-2.0
K
Kent Overstreet 已提交
2 3 4 5 6 7 8 9 10 11 12
/*
 * bcache setup/teardown code, and some metadata io - read a superblock and
 * figure out what to do with it.
 *
 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
 * Copyright 2012 Google, Inc.
 */

#include "bcache.h"
#include "btree.h"
#include "debug.h"
13
#include "extents.h"
K
Kent Overstreet 已提交
14
#include "request.h"
15
#include "writeback.h"
K
Kent Overstreet 已提交
16

K
Kent Overstreet 已提交
17
#include <linux/blkdev.h>
K
Kent Overstreet 已提交
18 19
#include <linux/debugfs.h>
#include <linux/genhd.h>
20
#include <linux/idr.h>
21
#include <linux/kthread.h>
K
Kent Overstreet 已提交
22 23 24 25 26
#include <linux/module.h>
#include <linux/random.h>
#include <linux/reboot.h>
#include <linux/sysfs.h>

27 28 29
unsigned int bch_cutoff_writeback;
unsigned int bch_cutoff_writeback_sync;

K
Kent Overstreet 已提交
30 31 32 33 34 35 36 37 38 39 40 41
static const char bcache_magic[] = {
	0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
	0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
};

static const char invalid_uuid[] = {
	0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
	0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
};

static struct kobject *bcache_kobj;
struct mutex bch_register_lock;
42
bool bcache_is_reboot;
K
Kent Overstreet 已提交
43 44 45
LIST_HEAD(bch_cache_sets);
static LIST_HEAD(uncached_devices);

46
static int bcache_major;
47
static DEFINE_IDA(bcache_device_idx);
K
Kent Overstreet 已提交
48 49
static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;
50
struct workqueue_struct *bch_journal_wq;
K
Kent Overstreet 已提交
51

52

K
Kent Overstreet 已提交
53
#define BTREE_MAX_PAGES		(256 * 1024 / PAGE_SIZE)
54 55 56 57
/* limitation of partitions number on single bcache device */
#define BCACHE_MINORS		128
/* limitation of bcache devices number on single system */
#define BCACHE_DEVICE_IDX_MAX	((1U << MINORBITS)/BCACHE_MINORS)
K
Kent Overstreet 已提交
58 59 60 61

/* Superblock */

static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
62
			      struct cache_sb_disk **res)
K
Kent Overstreet 已提交
63 64
{
	const char *err;
65
	struct cache_sb_disk *s;
66
	struct page *page;
67
	unsigned int i;
K
Kent Overstreet 已提交
68

69 70 71
	page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
				   SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
	if (IS_ERR(page))
K
Kent Overstreet 已提交
72
		return "IO error";
73
	s = page_address(page) + offset_in_page(SB_OFFSET);
K
Kent Overstreet 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94

	sb->offset		= le64_to_cpu(s->offset);
	sb->version		= le64_to_cpu(s->version);

	memcpy(sb->magic,	s->magic, 16);
	memcpy(sb->uuid,	s->uuid, 16);
	memcpy(sb->set_uuid,	s->set_uuid, 16);
	memcpy(sb->label,	s->label, SB_LABEL_SIZE);

	sb->flags		= le64_to_cpu(s->flags);
	sb->seq			= le64_to_cpu(s->seq);
	sb->last_mount		= le32_to_cpu(s->last_mount);
	sb->first_bucket	= le16_to_cpu(s->first_bucket);
	sb->keys		= le16_to_cpu(s->keys);

	for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
		sb->d[i] = le64_to_cpu(s->d[i]);

	pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
		 sb->version, sb->flags, sb->seq, sb->keys);

95
	err = "Not a bcache superblock (bad offset)";
K
Kent Overstreet 已提交
96 97 98
	if (sb->offset != SB_SECTOR)
		goto err;

99
	err = "Not a bcache superblock (bad magic)";
K
Kent Overstreet 已提交
100 101 102 103 104 105 106 107 108 109 110 111
	if (memcmp(sb->magic, bcache_magic, 16))
		goto err;

	err = "Too many journal buckets";
	if (sb->keys > SB_JOURNAL_BUCKETS)
		goto err;

	err = "Bad checksum";
	if (s->csum != csum_set(s))
		goto err;

	err = "Bad UUID";
112
	if (bch_is_zero(sb->uuid, 16))
K
Kent Overstreet 已提交
113 114
		goto err;

115 116 117 118 119 120
	sb->block_size	= le16_to_cpu(s->block_size);

	err = "Superblock block size smaller than device block size";
	if (sb->block_size << 9 < bdev_logical_block_size(bdev))
		goto err;

121 122 123 124 125 126 127 128 129 130
	switch (sb->version) {
	case BCACHE_SB_VERSION_BDEV:
		sb->data_offset	= BDEV_DATA_START_DEFAULT;
		break;
	case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
		sb->data_offset	= le64_to_cpu(s->data_offset);

		err = "Bad data offset";
		if (sb->data_offset < BDEV_DATA_START_DEFAULT)
			goto err;
K
Kent Overstreet 已提交
131

132 133 134 135 136
		break;
	case BCACHE_SB_VERSION_CDEV:
	case BCACHE_SB_VERSION_CDEV_WITH_UUID:
		sb->nbuckets	= le64_to_cpu(s->nbuckets);
		sb->bucket_size	= le16_to_cpu(s->bucket_size);
K
Kent Overstreet 已提交
137

138 139
		sb->nr_in_set	= le16_to_cpu(s->nr_in_set);
		sb->nr_this_dev	= le16_to_cpu(s->nr_this_dev);
K
Kent Overstreet 已提交
140

141 142 143
		err = "Too many buckets";
		if (sb->nbuckets > LONG_MAX)
			goto err;
K
Kent Overstreet 已提交
144

145 146 147
		err = "Not enough buckets";
		if (sb->nbuckets < 1 << 7)
			goto err;
K
Kent Overstreet 已提交
148

149 150 151 152 153 154
		err = "Bad block/bucket size";
		if (!is_power_of_2(sb->block_size) ||
		    sb->block_size > PAGE_SECTORS ||
		    !is_power_of_2(sb->bucket_size) ||
		    sb->bucket_size < PAGE_SECTORS)
			goto err;
K
Kent Overstreet 已提交
155

156
		err = "Invalid superblock: device too small";
157 158
		if (get_capacity(bdev->bd_disk) <
		    sb->bucket_size * sb->nbuckets)
159
			goto err;
K
Kent Overstreet 已提交
160

161 162 163
		err = "Bad UUID";
		if (bch_is_zero(sb->set_uuid, 16))
			goto err;
K
Kent Overstreet 已提交
164

165 166 167 168
		err = "Bad cache device number in set";
		if (!sb->nr_in_set ||
		    sb->nr_in_set <= sb->nr_this_dev ||
		    sb->nr_in_set > MAX_CACHES_PER_SET)
K
Kent Overstreet 已提交
169 170
			goto err;

171 172 173 174
		err = "Journal buckets not sequential";
		for (i = 0; i < sb->keys; i++)
			if (sb->d[i] != sb->first_bucket + i)
				goto err;
K
Kent Overstreet 已提交
175

176 177 178 179 180 181 182 183 184 185 186
		err = "Too many journal buckets";
		if (sb->first_bucket + sb->keys > sb->nbuckets)
			goto err;

		err = "Invalid superblock: first bucket comes before end of super";
		if (sb->first_bucket * sb->bucket_size < 16)
			goto err;

		break;
	default:
		err = "Unsupported superblock version";
K
Kent Overstreet 已提交
187
		goto err;
188 189
	}

190
	sb->last_mount = (u32)ktime_get_real_seconds();
191
	*res = s;
192
	return NULL;
K
Kent Overstreet 已提交
193
err:
194
	put_page(page);
K
Kent Overstreet 已提交
195 196 197
	return err;
}

198
static void write_bdev_super_endio(struct bio *bio)
K
Kent Overstreet 已提交
199 200
{
	struct cached_dev *dc = bio->bi_private;
201 202 203

	if (bio->bi_status)
		bch_count_backing_io_errors(dc, bio);
K
Kent Overstreet 已提交
204

205
	closure_put(&dc->sb_write);
K
Kent Overstreet 已提交
206 207
}

208 209
static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
		struct bio *bio)
K
Kent Overstreet 已提交
210
{
211
	unsigned int i;
K
Kent Overstreet 已提交
212

213
	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META;
214
	bio->bi_iter.bi_sector	= SB_SECTOR;
215 216
	__bio_add_page(bio, virt_to_page(out), SB_SIZE,
			offset_in_page(out));
K
Kent Overstreet 已提交
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239

	out->offset		= cpu_to_le64(sb->offset);
	out->version		= cpu_to_le64(sb->version);

	memcpy(out->uuid,	sb->uuid, 16);
	memcpy(out->set_uuid,	sb->set_uuid, 16);
	memcpy(out->label,	sb->label, SB_LABEL_SIZE);

	out->flags		= cpu_to_le64(sb->flags);
	out->seq		= cpu_to_le64(sb->seq);

	out->last_mount		= cpu_to_le32(sb->last_mount);
	out->first_bucket	= cpu_to_le16(sb->first_bucket);
	out->keys		= cpu_to_le16(sb->keys);

	for (i = 0; i < sb->keys; i++)
		out->d[i] = cpu_to_le64(sb->d[i]);

	out->csum = csum_set(out);

	pr_debug("ver %llu, flags %llu, seq %llu",
		 sb->version, sb->flags, sb->seq);

240
	submit_bio(bio);
K
Kent Overstreet 已提交
241 242
}

243 244 245 246 247 248 249
static void bch_write_bdev_super_unlock(struct closure *cl)
{
	struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);

	up(&dc->sb_write_mutex);
}

K
Kent Overstreet 已提交
250 251
void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
{
252
	struct closure *cl = &dc->sb_write;
K
Kent Overstreet 已提交
253 254
	struct bio *bio = &dc->sb_bio;

255 256
	down(&dc->sb_write_mutex);
	closure_init(cl, parent);
K
Kent Overstreet 已提交
257

258
	bio_init(bio, dc->sb_bv, 1);
259
	bio_set_dev(bio, dc->bdev);
K
Kent Overstreet 已提交
260 261 262 263
	bio->bi_end_io	= write_bdev_super_endio;
	bio->bi_private = dc;

	closure_get(cl);
264
	/* I/O request sent to backing device */
265
	__write_super(&dc->sb, dc->sb_disk, bio);
K
Kent Overstreet 已提交
266

267
	closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
K
Kent Overstreet 已提交
268 269
}

270
static void write_super_endio(struct bio *bio)
K
Kent Overstreet 已提交
271 272 273
{
	struct cache *ca = bio->bi_private;

274 275 276
	/* is_read = 0 */
	bch_count_io_errors(ca, bio->bi_status, 0,
			    "writing superblock");
277 278 279 280 281 282 283 284
	closure_put(&ca->set->sb_write);
}

static void bcache_write_super_unlock(struct closure *cl)
{
	struct cache_set *c = container_of(cl, struct cache_set, sb_write);

	up(&c->sb_write_mutex);
K
Kent Overstreet 已提交
285 286 287 288
}

void bcache_write_super(struct cache_set *c)
{
289
	struct closure *cl = &c->sb_write;
K
Kent Overstreet 已提交
290
	struct cache *ca;
291
	unsigned int i;
K
Kent Overstreet 已提交
292

293 294
	down(&c->sb_write_mutex);
	closure_init(cl, &c->cl);
K
Kent Overstreet 已提交
295 296 297 298 299 300

	c->sb.seq++;

	for_each_cache(ca, c, i) {
		struct bio *bio = &ca->sb_bio;

301
		ca->sb.version		= BCACHE_SB_VERSION_CDEV_WITH_UUID;
K
Kent Overstreet 已提交
302 303 304 305 306
		ca->sb.seq		= c->sb.seq;
		ca->sb.last_mount	= c->sb.last_mount;

		SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));

307
		bio_init(bio, ca->sb_bv, 1);
308
		bio_set_dev(bio, ca->bdev);
K
Kent Overstreet 已提交
309 310 311 312
		bio->bi_end_io	= write_super_endio;
		bio->bi_private = ca;

		closure_get(cl);
313
		__write_super(&ca->sb, ca->sb_disk, bio);
K
Kent Overstreet 已提交
314 315
	}

316
	closure_return_with_destructor(cl, bcache_write_super_unlock);
K
Kent Overstreet 已提交
317 318 319 320
}

/* UUID io */

321
static void uuid_endio(struct bio *bio)
K
Kent Overstreet 已提交
322 323
{
	struct closure *cl = bio->bi_private;
324
	struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
K
Kent Overstreet 已提交
325

326
	cache_set_err_on(bio->bi_status, c, "accessing uuids");
K
Kent Overstreet 已提交
327 328 329 330
	bch_bbio_free(bio, c);
	closure_put(cl);
}

331 332 333 334 335 336 337
static void uuid_io_unlock(struct closure *cl)
{
	struct cache_set *c = container_of(cl, struct cache_set, uuid_write);

	up(&c->uuid_write_mutex);
}

M
Mike Christie 已提交
338
static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
K
Kent Overstreet 已提交
339 340
		    struct bkey *k, struct closure *parent)
{
341
	struct closure *cl = &c->uuid_write;
K
Kent Overstreet 已提交
342
	struct uuid_entry *u;
343
	unsigned int i;
344
	char buf[80];
K
Kent Overstreet 已提交
345 346

	BUG_ON(!parent);
347 348
	down(&c->uuid_write_mutex);
	closure_init(cl, parent);
K
Kent Overstreet 已提交
349 350 351 352

	for (i = 0; i < KEY_PTRS(k); i++) {
		struct bio *bio = bch_bbio_alloc(c);

J
Jens Axboe 已提交
353
		bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
354
		bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
K
Kent Overstreet 已提交
355 356 357

		bio->bi_end_io	= uuid_endio;
		bio->bi_private = cl;
M
Mike Christie 已提交
358
		bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
359
		bch_bio_map(bio, c->uuids);
K
Kent Overstreet 已提交
360 361 362

		bch_submit_bbio(bio, c, k, i);

M
Mike Christie 已提交
363
		if (op != REQ_OP_WRITE)
K
Kent Overstreet 已提交
364 365 366
			break;
	}

367
	bch_extent_to_text(buf, sizeof(buf), k);
M
Mike Christie 已提交
368
	pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
K
Kent Overstreet 已提交
369 370

	for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
371
		if (!bch_is_zero(u->uuid, 16))
K
Kent Overstreet 已提交
372 373 374 375
			pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
				 u - c->uuids, u->uuid, u->label,
				 u->first_reg, u->last_reg, u->invalidated);

376
	closure_return_with_destructor(cl, uuid_io_unlock);
K
Kent Overstreet 已提交
377 378 379 380 381 382
}

static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
{
	struct bkey *k = &j->uuid_bucket;

383
	if (__bch_btree_ptr_invalid(c, k))
K
Kent Overstreet 已提交
384 385 386
		return "bad uuid pointer";

	bkey_copy(&c->uuid_bucket, k);
387
	uuid_io(c, REQ_OP_READ, 0, k, cl);
K
Kent Overstreet 已提交
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423

	if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
		struct uuid_entry_v0	*u0 = (void *) c->uuids;
		struct uuid_entry	*u1 = (void *) c->uuids;
		int i;

		closure_sync(cl);

		/*
		 * Since the new uuid entry is bigger than the old, we have to
		 * convert starting at the highest memory address and work down
		 * in order to do it in place
		 */

		for (i = c->nr_uuids - 1;
		     i >= 0;
		     --i) {
			memcpy(u1[i].uuid,	u0[i].uuid, 16);
			memcpy(u1[i].label,	u0[i].label, 32);

			u1[i].first_reg		= u0[i].first_reg;
			u1[i].last_reg		= u0[i].last_reg;
			u1[i].invalidated	= u0[i].invalidated;

			u1[i].flags	= 0;
			u1[i].sectors	= 0;
		}
	}

	return NULL;
}

static int __uuid_write(struct cache_set *c)
{
	BKEY_PADDED(key) k;
	struct closure cl;
424
	struct cache *ca;
K
Kent Overstreet 已提交
425

426
	closure_init_stack(&cl);
K
Kent Overstreet 已提交
427 428
	lockdep_assert_held(&bch_register_lock);

429
	if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
K
Kent Overstreet 已提交
430 431 432
		return 1;

	SET_KEY_SIZE(&k.key, c->sb.bucket_size);
M
Mike Christie 已提交
433
	uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
K
Kent Overstreet 已提交
434 435
	closure_sync(&cl);

436 437 438 439
	/* Only one bucket used for uuid write */
	ca = PTR_CACHE(c, &k.key, 0);
	atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);

K
Kent Overstreet 已提交
440
	bkey_copy(&c->uuid_bucket, &k.key);
441
	bkey_put(c, &k.key);
K
Kent Overstreet 已提交
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
	return 0;
}

int bch_uuid_write(struct cache_set *c)
{
	int ret = __uuid_write(c);

	if (!ret)
		bch_journal_meta(c, NULL);

	return ret;
}

static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
{
	struct uuid_entry *u;

	for (u = c->uuids;
	     u < c->uuids + c->nr_uuids; u++)
		if (!memcmp(u->uuid, uuid, 16))
			return u;

	return NULL;
}

static struct uuid_entry *uuid_find_empty(struct cache_set *c)
{
	static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
470

K
Kent Overstreet 已提交
471 472 473 474 475 476 477
	return uuid_find(c, zero_uuid);
}

/*
 * Bucket priorities/gens:
 *
 * For each bucket, we store on disk its
C
Coly Li 已提交
478 479
 *   8 bit gen
 *  16 bit priority
K
Kent Overstreet 已提交
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
 *
 * See alloc.c for an explanation of the gen. The priority is used to implement
 * lru (and in the future other) cache replacement policies; for most purposes
 * it's just an opaque integer.
 *
 * The gens and the priorities don't have a whole lot to do with each other, and
 * it's actually the gens that must be written out at specific times - it's no
 * big deal if the priorities don't get written, if we lose them we just reuse
 * buckets in suboptimal order.
 *
 * On disk they're stored in a packed array, and in as many buckets are required
 * to fit them all. The buckets we use to store them form a list; the journal
 * header points to the first bucket, the first bucket points to the second
 * bucket, et cetera.
 *
 * This code is used by the allocation code; periodically (whenever it runs out
 * of buckets to allocate from) the allocation code will invalidate some
 * buckets, but it can't use those buckets until their new gens are safely on
 * disk.
 */

501
static void prio_endio(struct bio *bio)
K
Kent Overstreet 已提交
502 503 504
{
	struct cache *ca = bio->bi_private;

505
	cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
K
Kent Overstreet 已提交
506 507 508 509
	bch_bbio_free(bio, ca->set);
	closure_put(&ca->prio);
}

M
Mike Christie 已提交
510 511
static void prio_io(struct cache *ca, uint64_t bucket, int op,
		    unsigned long op_flags)
K
Kent Overstreet 已提交
512 513 514 515 516 517
{
	struct closure *cl = &ca->prio;
	struct bio *bio = bch_bbio_alloc(ca->set);

	closure_init_stack(cl);

518
	bio->bi_iter.bi_sector	= bucket * ca->sb.bucket_size;
519
	bio_set_dev(bio, ca->bdev);
520
	bio->bi_iter.bi_size	= bucket_bytes(ca);
K
Kent Overstreet 已提交
521 522 523

	bio->bi_end_io	= prio_endio;
	bio->bi_private = ca;
M
Mike Christie 已提交
524
	bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
525
	bch_bio_map(bio, ca->disk_buckets);
K
Kent Overstreet 已提交
526

527
	closure_bio_submit(ca->set, bio, &ca->prio);
K
Kent Overstreet 已提交
528 529 530
	closure_sync(cl);
}

531
int bch_prio_write(struct cache *ca, bool wait)
K
Kent Overstreet 已提交
532 533 534 535 536
{
	int i;
	struct bucket *b;
	struct closure cl;

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
	pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu",
		 fifo_used(&ca->free[RESERVE_PRIO]),
		 fifo_used(&ca->free[RESERVE_NONE]),
		 fifo_used(&ca->free_inc));

	/*
	 * Pre-check if there are enough free buckets. In the non-blocking
	 * scenario it's better to fail early rather than starting to allocate
	 * buckets and do a cleanup later in case of failure.
	 */
	if (!wait) {
		size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) +
			       fifo_used(&ca->free[RESERVE_NONE]);
		if (prio_buckets(ca) > avail)
			return -ENOMEM;
	}

K
Kent Overstreet 已提交
554 555 556 557 558 559 560 561 562 563 564 565
	closure_init_stack(&cl);

	lockdep_assert_held(&ca->set->bucket_lock);

	ca->disk_buckets->seq++;

	atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
			&ca->meta_sectors_written);

	for (i = prio_buckets(ca) - 1; i >= 0; --i) {
		long bucket;
		struct prio_set *p = ca->disk_buckets;
K
Kent Overstreet 已提交
566 567
		struct bucket_disk *d = p->data;
		struct bucket_disk *end = d + prios_per_bucket(ca);
K
Kent Overstreet 已提交
568 569 570 571 572 573 574 575 576

		for (b = ca->buckets + i * prios_per_bucket(ca);
		     b < ca->buckets + ca->sb.nbuckets && d < end;
		     b++, d++) {
			d->prio = cpu_to_le16(b->prio);
			d->gen = b->gen;
		}

		p->next_bucket	= ca->prio_buckets[i + 1];
577
		p->magic	= pset_magic(&ca->sb);
578
		p->csum		= bch_crc64(&p->magic, bucket_bytes(ca) - 8);
K
Kent Overstreet 已提交
579

580
		bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
K
Kent Overstreet 已提交
581 582 583
		BUG_ON(bucket == -1);

		mutex_unlock(&ca->set->bucket_lock);
M
Mike Christie 已提交
584
		prio_io(ca, bucket, REQ_OP_WRITE, 0);
K
Kent Overstreet 已提交
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
		mutex_lock(&ca->set->bucket_lock);

		ca->prio_buckets[i] = bucket;
		atomic_dec_bug(&ca->buckets[bucket].pin);
	}

	mutex_unlock(&ca->set->bucket_lock);

	bch_journal_meta(ca->set, &cl);
	closure_sync(&cl);

	mutex_lock(&ca->set->bucket_lock);

	/*
	 * Don't want the old priorities to get garbage collected until after we
	 * finish writing the new ones, and they're journalled
	 */
K
Kent Overstreet 已提交
602 603 604 605 606
	for (i = 0; i < prio_buckets(ca); i++) {
		if (ca->prio_last_buckets[i])
			__bch_bucket_free(ca,
				&ca->buckets[ca->prio_last_buckets[i]]);

K
Kent Overstreet 已提交
607
		ca->prio_last_buckets[i] = ca->prio_buckets[i];
K
Kent Overstreet 已提交
608
	}
609
	return 0;
K
Kent Overstreet 已提交
610 611
}

612
static int prio_read(struct cache *ca, uint64_t bucket)
K
Kent Overstreet 已提交
613 614 615 616
{
	struct prio_set *p = ca->disk_buckets;
	struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
	struct bucket *b;
617
	unsigned int bucket_nr = 0;
618
	int ret = -EIO;
K
Kent Overstreet 已提交
619 620 621 622 623 624 625 626 627

	for (b = ca->buckets;
	     b < ca->buckets + ca->sb.nbuckets;
	     b++, d++) {
		if (d == end) {
			ca->prio_buckets[bucket_nr] = bucket;
			ca->prio_last_buckets[bucket_nr] = bucket;
			bucket_nr++;

628
			prio_io(ca, bucket, REQ_OP_READ, 0);
K
Kent Overstreet 已提交
629

630
			if (p->csum !=
631
			    bch_crc64(&p->magic, bucket_bytes(ca) - 8)) {
K
Kent Overstreet 已提交
632
				pr_warn("bad csum reading priorities");
633 634
				goto out;
			}
K
Kent Overstreet 已提交
635

636
			if (p->magic != pset_magic(&ca->sb)) {
K
Kent Overstreet 已提交
637
				pr_warn("bad magic reading priorities");
638 639
				goto out;
			}
K
Kent Overstreet 已提交
640 641 642 643 644 645

			bucket = p->next_bucket;
			d = p->data;
		}

		b->prio = le16_to_cpu(d->prio);
K
Kent Overstreet 已提交
646
		b->gen = b->last_gc = d->gen;
K
Kent Overstreet 已提交
647
	}
648 649 650 651

	ret = 0;
out:
	return ret;
K
Kent Overstreet 已提交
652 653 654 655 656 657 658
}

/* Bcache device */

static int open_dev(struct block_device *b, fmode_t mode)
{
	struct bcache_device *d = b->bd_disk->private_data;
659

660
	if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
K
Kent Overstreet 已提交
661 662 663 664 665 666
		return -ENXIO;

	closure_get(&d->cl);
	return 0;
}

667
static void release_dev(struct gendisk *b, fmode_t mode)
K
Kent Overstreet 已提交
668 669
{
	struct bcache_device *d = b->private_data;
670

K
Kent Overstreet 已提交
671 672 673 674 675 676 677
	closure_put(&d->cl);
}

static int ioctl_dev(struct block_device *b, fmode_t mode,
		     unsigned int cmd, unsigned long arg)
{
	struct bcache_device *d = b->bd_disk->private_data;
678

K
Kent Overstreet 已提交
679 680 681 682 683 684 685 686 687 688 689 690
	return d->ioctl(d, mode, cmd, arg);
}

static const struct block_device_operations bcache_ops = {
	.open		= open_dev,
	.release	= release_dev,
	.ioctl		= ioctl_dev,
	.owner		= THIS_MODULE,
};

void bcache_device_stop(struct bcache_device *d)
{
691
	if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
692 693 694 695 696
		/*
		 * closure_fn set to
		 * - cached device: cached_dev_flush()
		 * - flash dev: flash_dev_flush()
		 */
K
Kent Overstreet 已提交
697 698 699
		closure_queue(&d->cl);
}

700 701
static void bcache_device_unlink(struct bcache_device *d)
{
702
	lockdep_assert_held(&bch_register_lock);
703

704
	if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
705
		unsigned int i;
706
		struct cache *ca;
707

708 709 710 711 712 713
		sysfs_remove_link(&d->c->kobj, d->name);
		sysfs_remove_link(&d->kobj, "cache");

		for_each_cache(ca, d->c, i)
			bd_unlink_disk_holder(ca->bdev, d->disk);
	}
714 715 716 717 718
}

static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
			       const char *name)
{
719
	unsigned int i;
720
	struct cache *ca;
721
	int ret;
722 723 724 725 726 727 728

	for_each_cache(ca, d->c, i)
		bd_link_disk_holder(ca->bdev, d->disk);

	snprintf(d->name, BCACHEDEVNAME_SIZE,
		 "%s%u", name, d->id);

729 730 731 732 733 734 735
	ret = sysfs_create_link(&d->kobj, &c->kobj, "cache");
	if (ret < 0)
		pr_err("Couldn't create device -> cache set symlink");

	ret = sysfs_create_link(&c->kobj, &d->kobj, d->name);
	if (ret < 0)
		pr_err("Couldn't create cache set -> device symlink");
736 737

	clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
738 739
}

K
Kent Overstreet 已提交
740 741 742 743
static void bcache_device_detach(struct bcache_device *d)
{
	lockdep_assert_held(&bch_register_lock);

744 745
	atomic_dec(&d->c->attached_dev_nr);

746
	if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
K
Kent Overstreet 已提交
747 748 749 750
		struct uuid_entry *u = d->c->uuids + d->id;

		SET_UUID_FLASH_ONLY(u, 0);
		memcpy(u->uuid, invalid_uuid, 16);
751
		u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
K
Kent Overstreet 已提交
752 753 754
		bch_uuid_write(d->c);
	}

755
	bcache_device_unlink(d);
756

K
Kent Overstreet 已提交
757 758 759 760 761 762
	d->c->devices[d->id] = NULL;
	closure_put(&d->c->caching);
	d->c = NULL;
}

static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
763
				 unsigned int id)
K
Kent Overstreet 已提交
764 765 766 767 768
{
	d->id = id;
	d->c = c;
	c->devices[id] = d;

769 770 771
	if (id >= c->devices_max_used)
		c->devices_max_used = id + 1;

K
Kent Overstreet 已提交
772 773 774
	closure_get(&c->caching);
}

775 776 777 778 779 780 781 782 783 784
static inline int first_minor_to_idx(int first_minor)
{
	return (first_minor/BCACHE_MINORS);
}

static inline int idx_to_first_minor(int idx)
{
	return (idx * BCACHE_MINORS);
}

K
Kent Overstreet 已提交
785 786
static void bcache_device_free(struct bcache_device *d)
{
787 788
	struct gendisk *disk = d->disk;

K
Kent Overstreet 已提交
789 790
	lockdep_assert_held(&bch_register_lock);

791 792 793 794
	if (disk)
		pr_info("%s stopped", disk->disk_name);
	else
		pr_err("bcache device (NULL gendisk) stopped");
K
Kent Overstreet 已提交
795 796 797

	if (d->c)
		bcache_device_detach(d);
798 799 800 801 802 803 804 805

	if (disk) {
		if (disk->flags & GENHD_FL_UP)
			del_gendisk(disk);

		if (disk->queue)
			blk_cleanup_queue(disk->queue);

806
		ida_simple_remove(&bcache_device_idx,
807 808
				  first_minor_to_idx(disk->first_minor));
		put_disk(disk);
809
	}
K
Kent Overstreet 已提交
810

811
	bioset_exit(&d->bio_split);
812 813
	kvfree(d->full_dirty_stripes);
	kvfree(d->stripe_sectors_dirty);
K
Kent Overstreet 已提交
814 815 816 817

	closure_debug_destroy(&d->cl);
}

818
static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
819
			      sector_t sectors, make_request_fn make_request_fn)
K
Kent Overstreet 已提交
820 821
{
	struct request_queue *q;
822 823
	const size_t max_stripes = min_t(size_t, INT_MAX,
					 SIZE_MAX / sizeof(atomic_t));
824
	size_t n;
825
	int idx;
826

827 828
	if (!d->stripe_size)
		d->stripe_size = 1 << 31;
829

830
	d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
831

832
	if (!d->nr_stripes || d->nr_stripes > max_stripes) {
833
		pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
834
			(unsigned int)d->nr_stripes);
835
		return -ENOMEM;
836
	}
837 838

	n = d->nr_stripes * sizeof(atomic_t);
839
	d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
840 841
	if (!d->stripe_sectors_dirty)
		return -ENOMEM;
K
Kent Overstreet 已提交
842

843
	n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
844
	d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
845 846 847
	if (!d->full_dirty_stripes)
		return -ENOMEM;

848 849 850 851
	idx = ida_simple_get(&bcache_device_idx, 0,
				BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
	if (idx < 0)
		return idx;
852

853
	if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
854 855 856 857 858 859
			BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
		goto err;

	d->disk = alloc_disk(BCACHE_MINORS);
	if (!d->disk)
		goto err;
K
Kent Overstreet 已提交
860

861
	set_capacity(d->disk, sectors);
862
	snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
K
Kent Overstreet 已提交
863 864

	d->disk->major		= bcache_major;
865
	d->disk->first_minor	= idx_to_first_minor(idx);
K
Kent Overstreet 已提交
866 867 868
	d->disk->fops		= &bcache_ops;
	d->disk->private_data	= d;

869
	q = blk_alloc_queue(make_request_fn, NUMA_NO_NODE);
870 871 872
	if (!q)
		return -ENOMEM;

K
Kent Overstreet 已提交
873 874
	d->disk->queue			= q;
	q->queuedata			= d;
875
	q->backing_dev_info->congested_data = d;
K
Kent Overstreet 已提交
876 877 878 879
	q->limits.max_hw_sectors	= UINT_MAX;
	q->limits.max_sectors		= UINT_MAX;
	q->limits.max_segment_size	= UINT_MAX;
	q->limits.max_segments		= BIO_MAX_PAGES;
880
	blk_queue_max_discard_sectors(q, UINT_MAX);
881
	q->limits.discard_granularity	= 512;
K
Kent Overstreet 已提交
882 883 884
	q->limits.io_min		= block_size;
	q->limits.logical_block_size	= block_size;
	q->limits.physical_block_size	= block_size;
885 886 887
	blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
	blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue);
K
Kent Overstreet 已提交
888

889
	blk_queue_write_cache(q, true, true);
890

K
Kent Overstreet 已提交
891
	return 0;
892 893 894 895 896

err:
	ida_simple_remove(&bcache_device_idx, idx);
	return -ENOMEM;

K
Kent Overstreet 已提交
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
}

/* Cached device */

static void calc_cached_dev_sectors(struct cache_set *c)
{
	uint64_t sectors = 0;
	struct cached_dev *dc;

	list_for_each_entry(dc, &c->cached_devs, list)
		sectors += bdev_sectors(dc->bdev);

	c->cached_dev_sectors = sectors;
}

912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
#define BACKING_DEV_OFFLINE_TIMEOUT 5
static int cached_dev_status_update(void *arg)
{
	struct cached_dev *dc = arg;
	struct request_queue *q;

	/*
	 * If this delayed worker is stopping outside, directly quit here.
	 * dc->io_disable might be set via sysfs interface, so check it
	 * here too.
	 */
	while (!kthread_should_stop() && !dc->io_disable) {
		q = bdev_get_queue(dc->bdev);
		if (blk_queue_dying(q))
			dc->offline_seconds++;
		else
			dc->offline_seconds = 0;

		if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) {
			pr_err("%s: device offline for %d seconds",
			       dc->backing_dev_name,
			       BACKING_DEV_OFFLINE_TIMEOUT);
			pr_err("%s: disable I/O request due to backing "
			       "device offline", dc->disk.name);
			dc->io_disable = true;
			/* let others know earlier that io_disable is true */
			smp_mb();
			bcache_device_stop(&dc->disk);
			break;
		}
		schedule_timeout_interruptible(HZ);
	}

	wait_for_kthread_stop();
	return 0;
}


950
int bch_cached_dev_run(struct cached_dev *dc)
K
Kent Overstreet 已提交
951 952
{
	struct bcache_device *d = &dc->disk;
953
	char *buf = kmemdup_nul(dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL);
954 955 956
	char *env[] = {
		"DRIVER=bcache",
		kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
957
		kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf ? : ""),
G
Gabriel de Perthuis 已提交
958
		NULL,
959
	};
K
Kent Overstreet 已提交
960

961 962 963
	if (dc->io_disable) {
		pr_err("I/O disabled on cached dev %s",
		       dc->backing_dev_name);
964 965 966
		kfree(env[1]);
		kfree(env[2]);
		kfree(buf);
967
		return -EIO;
968
	}
969

970 971 972
	if (atomic_xchg(&dc->running, 1)) {
		kfree(env[1]);
		kfree(env[2]);
973
		kfree(buf);
974 975
		pr_info("cached dev %s is running already",
		       dc->backing_dev_name);
976
		return -EBUSY;
977
	}
K
Kent Overstreet 已提交
978 979 980 981

	if (!d->c &&
	    BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
		struct closure cl;
982

K
Kent Overstreet 已提交
983 984 985 986 987 988 989 990
		closure_init_stack(&cl);

		SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
		bch_write_bdev_super(dc, &cl);
		closure_sync(&cl);
	}

	add_disk(d->disk);
991
	bd_link_disk_holder(dc->bdev, dc->disk.disk);
C
Coly Li 已提交
992 993 994 995
	/*
	 * won't show up in the uevent file, use udevadm monitor -e instead
	 * only class / kset properties are persistent
	 */
K
Kent Overstreet 已提交
996
	kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
997
	kfree(env[1]);
G
Gabriel de Perthuis 已提交
998
	kfree(env[2]);
999
	kfree(buf);
1000

K
Kent Overstreet 已提交
1001
	if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
1002 1003
	    sysfs_create_link(&disk_to_dev(d->disk)->kobj,
			      &d->kobj, "bcache")) {
1004
		pr_err("Couldn't create bcache dev <-> disk sysfs symlinks");
1005 1006
		return -ENOMEM;
	}
1007 1008 1009 1010 1011 1012 1013 1014

	dc->status_update_thread = kthread_run(cached_dev_status_update,
					       dc, "bcache_status_update");
	if (IS_ERR(dc->status_update_thread)) {
		pr_warn("failed to create bcache_status_update kthread, "
			"continue to run without monitoring backing "
			"device status");
	}
1015 1016

	return 0;
K
Kent Overstreet 已提交
1017 1018
}

1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
/*
 * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed
 * work dc->writeback_rate_update is running. Wait until the routine
 * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to
 * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out
 * seconds, give up waiting here and continue to cancel it too.
 */
static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
{
	int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ;

	do {
		if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING,
			      &dc->disk.flags))
			break;
		time_out--;
		schedule_timeout_interruptible(1);
	} while (time_out > 0);

	if (time_out == 0)
		pr_warn("give up waiting for dc->writeback_write_update to quit");

	cancel_delayed_work_sync(&dc->writeback_rate_update);
}

K
Kent Overstreet 已提交
1044 1045 1046 1047
static void cached_dev_detach_finish(struct work_struct *w)
{
	struct cached_dev *dc = container_of(w, struct cached_dev, detach);
	struct closure cl;
1048

K
Kent Overstreet 已提交
1049 1050
	closure_init_stack(&cl);

1051
	BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
1052
	BUG_ON(refcount_read(&dc->count));
K
Kent Overstreet 已提交
1053 1054


1055 1056 1057
	if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
		cancel_writeback_rate_update_dwork(dc);

1058 1059 1060 1061 1062
	if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
		kthread_stop(dc->writeback_thread);
		dc->writeback_thread = NULL;
	}

K
Kent Overstreet 已提交
1063 1064 1065 1066 1067 1068
	memset(&dc->sb.set_uuid, 0, 16);
	SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);

	bch_write_bdev_super(dc, &cl);
	closure_sync(&cl);

1069 1070
	mutex_lock(&bch_register_lock);

1071
	calc_cached_dev_sectors(dc->disk.c);
K
Kent Overstreet 已提交
1072 1073 1074
	bcache_device_detach(&dc->disk);
	list_move(&dc->list, &uncached_devices);

1075
	clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
1076
	clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
1077

K
Kent Overstreet 已提交
1078 1079
	mutex_unlock(&bch_register_lock);

1080
	pr_info("Caching disabled for %s", dc->backing_dev_name);
K
Kent Overstreet 已提交
1081 1082 1083 1084 1085 1086 1087 1088 1089

	/* Drop ref we took in cached_dev_detach() */
	closure_put(&dc->disk.cl);
}

void bch_cached_dev_detach(struct cached_dev *dc)
{
	lockdep_assert_held(&bch_register_lock);

1090
	if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
K
Kent Overstreet 已提交
1091 1092
		return;

1093
	if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
K
Kent Overstreet 已提交
1094 1095 1096 1097 1098 1099 1100 1101 1102
		return;

	/*
	 * Block the device from being closed and freed until we're finished
	 * detaching
	 */
	closure_get(&dc->disk.cl);

	bch_writeback_queue(dc);
1103

K
Kent Overstreet 已提交
1104 1105 1106
	cached_dev_put(dc);
}

1107 1108
int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
			  uint8_t *set_uuid)
K
Kent Overstreet 已提交
1109
{
1110
	uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds());
K
Kent Overstreet 已提交
1111
	struct uuid_entry *u;
1112
	struct cached_dev *exist_dc, *t;
1113
	int ret = 0;
K
Kent Overstreet 已提交
1114

1115 1116
	if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
	    (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
K
Kent Overstreet 已提交
1117 1118 1119
		return -ENOENT;

	if (dc->disk.c) {
1120 1121
		pr_err("Can't attach %s: already attached",
		       dc->backing_dev_name);
K
Kent Overstreet 已提交
1122 1123 1124 1125
		return -EINVAL;
	}

	if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
1126 1127
		pr_err("Can't attach %s: shutting down",
		       dc->backing_dev_name);
K
Kent Overstreet 已提交
1128 1129 1130 1131 1132
		return -EINVAL;
	}

	if (dc->sb.block_size < c->sb.block_size) {
		/* Will die */
K
Kent Overstreet 已提交
1133
		pr_err("Couldn't attach %s: block size less than set's block size",
1134
		       dc->backing_dev_name);
K
Kent Overstreet 已提交
1135 1136 1137
		return -EINVAL;
	}

1138 1139 1140 1141
	/* Check whether already attached */
	list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
		if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
			pr_err("Tried to attach %s but duplicate UUID already attached",
1142
				dc->backing_dev_name);
1143 1144 1145 1146 1147

			return -EINVAL;
		}
	}

K
Kent Overstreet 已提交
1148 1149 1150 1151 1152 1153
	u = uuid_find(c, dc->sb.uuid);

	if (u &&
	    (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
	     BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
		memcpy(u->uuid, invalid_uuid, 16);
1154
		u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
K
Kent Overstreet 已提交
1155 1156 1157 1158 1159
		u = NULL;
	}

	if (!u) {
		if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1160 1161
			pr_err("Couldn't find uuid for %s in set",
			       dc->backing_dev_name);
K
Kent Overstreet 已提交
1162 1163 1164 1165 1166
			return -ENOENT;
		}

		u = uuid_find_empty(c);
		if (!u) {
1167 1168
			pr_err("Not caching %s, no room for UUID",
			       dc->backing_dev_name);
K
Kent Overstreet 已提交
1169 1170 1171 1172
			return -EINVAL;
		}
	}

C
Coly Li 已提交
1173 1174 1175
	/*
	 * Deadlocks since we're called via sysfs...
	 * sysfs_remove_file(&dc->kobj, &sysfs_attach);
K
Kent Overstreet 已提交
1176 1177
	 */

1178
	if (bch_is_zero(u->uuid, 16)) {
K
Kent Overstreet 已提交
1179
		struct closure cl;
1180

K
Kent Overstreet 已提交
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
		closure_init_stack(&cl);

		memcpy(u->uuid, dc->sb.uuid, 16);
		memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
		u->first_reg = u->last_reg = rtime;
		bch_uuid_write(c);

		memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
		SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);

		bch_write_bdev_super(dc, &cl);
		closure_sync(&cl);
	} else {
		u->last_reg = rtime;
		bch_uuid_write(c);
	}

	bcache_device_attach(&dc->disk, c, u - c->uuids);
	list_move(&dc->list, &c->cached_devs);
	calc_cached_dev_sectors(c);

	/*
	 * dc->c must be set before dc->count != 0 - paired with the mb in
	 * cached_dev_get()
	 */
1206
	smp_wmb();
1207
	refcount_set(&dc->count, 1);
K
Kent Overstreet 已提交
1208

1209 1210 1211 1212
	/* Block writeback thread, but spawn it */
	down_write(&dc->writeback_lock);
	if (bch_cached_dev_writeback_start(dc)) {
		up_write(&dc->writeback_lock);
1213 1214
		pr_err("Couldn't start writeback facilities for %s",
		       dc->disk.disk->disk_name);
1215
		return -ENOMEM;
1216
	}
1217

K
Kent Overstreet 已提交
1218 1219 1220 1221 1222
	if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
		atomic_set(&dc->has_dirty, 1);
		bch_writeback_queue(dc);
	}

1223 1224
	bch_sectors_dirty_init(&dc->disk);

1225 1226 1227
	ret = bch_cached_dev_run(dc);
	if (ret && (ret != -EBUSY)) {
		up_write(&dc->writeback_lock);
1228 1229 1230 1231 1232 1233 1234 1235
		/*
		 * bch_register_lock is held, bcache_device_stop() is not
		 * able to be directly called. The kthread and kworker
		 * created previously in bch_cached_dev_writeback_start()
		 * have to be stopped manually here.
		 */
		kthread_stop(dc->writeback_thread);
		cancel_writeback_rate_update_dwork(dc);
1236 1237
		pr_err("Couldn't run cached device %s",
		       dc->backing_dev_name);
1238 1239 1240
		return ret;
	}

1241
	bcache_device_link(&dc->disk, c, "bdev");
1242
	atomic_inc(&c->attached_dev_nr);
K
Kent Overstreet 已提交
1243

1244 1245 1246
	/* Allow the writeback thread to proceed */
	up_write(&dc->writeback_lock);

K
Kent Overstreet 已提交
1247
	pr_info("Caching %s as %s on set %pU",
1248 1249
		dc->backing_dev_name,
		dc->disk.disk->disk_name,
K
Kent Overstreet 已提交
1250 1251 1252 1253
		dc->disk.c->sb.set_uuid);
	return 0;
}

1254
/* when dc->disk.kobj released */
K
Kent Overstreet 已提交
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
void bch_cached_dev_release(struct kobject *kobj)
{
	struct cached_dev *dc = container_of(kobj, struct cached_dev,
					     disk.kobj);
	kfree(dc);
	module_put(THIS_MODULE);
}

static void cached_dev_free(struct closure *cl)
{
	struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);

1267 1268 1269
	if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
		cancel_writeback_rate_update_dwork(dc);

1270 1271
	if (!IS_ERR_OR_NULL(dc->writeback_thread))
		kthread_stop(dc->writeback_thread);
1272 1273
	if (!IS_ERR_OR_NULL(dc->status_update_thread))
		kthread_stop(dc->status_update_thread);
K
Kent Overstreet 已提交
1274

1275 1276
	mutex_lock(&bch_register_lock);

1277 1278
	if (atomic_read(&dc->running))
		bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
K
Kent Overstreet 已提交
1279 1280 1281 1282 1283
	bcache_device_free(&dc->disk);
	list_del(&dc->list);

	mutex_unlock(&bch_register_lock);

1284 1285
	if (dc->sb_disk)
		put_page(virt_to_page(dc->sb_disk));
1286

1287
	if (!IS_ERR_OR_NULL(dc->bdev))
K
Kent Overstreet 已提交
1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
		blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);

	wake_up(&unregister_wait);

	kobject_put(&dc->disk.kobj);
}

static void cached_dev_flush(struct closure *cl)
{
	struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
	struct bcache_device *d = &dc->disk;

1300
	mutex_lock(&bch_register_lock);
1301
	bcache_device_unlink(d);
1302 1303
	mutex_unlock(&bch_register_lock);

K
Kent Overstreet 已提交
1304 1305 1306 1307 1308 1309
	bch_cache_accounting_destroy(&dc->accounting);
	kobject_del(&d->kobj);

	continue_at(cl, cached_dev_free, system_wq);
}

1310
static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
K
Kent Overstreet 已提交
1311
{
1312
	int ret;
K
Kent Overstreet 已提交
1313
	struct io *io;
1314
	struct request_queue *q = bdev_get_queue(dc->bdev);
K
Kent Overstreet 已提交
1315 1316 1317

	__module_get(THIS_MODULE);
	INIT_LIST_HEAD(&dc->list);
1318 1319
	closure_init(&dc->disk.cl, NULL);
	set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
K
Kent Overstreet 已提交
1320 1321
	kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
	INIT_WORK(&dc->detach, cached_dev_detach_finish);
1322
	sema_init(&dc->sb_write_mutex, 1);
1323 1324 1325
	INIT_LIST_HEAD(&dc->io_lru);
	spin_lock_init(&dc->io_lock);
	bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
K
Kent Overstreet 已提交
1326 1327 1328 1329 1330 1331 1332 1333

	dc->sequential_cutoff		= 4 << 20;

	for (io = dc->io; io < dc->io + RECENT_IO; io++) {
		list_add(&io->lru, &dc->io_lru);
		hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
	}

1334 1335 1336 1337 1338 1339
	dc->disk.stripe_size = q->limits.io_opt >> 9;

	if (dc->disk.stripe_size)
		dc->partial_stripes_expensive =
			q->limits.raid_partial_stripes_expensive;

1340
	ret = bcache_device_init(&dc->disk, block_size,
1341 1342
			 dc->bdev->bd_part->nr_sects - dc->sb.data_offset,
			 cached_dev_make_request);
1343 1344 1345
	if (ret)
		return ret;

1346 1347 1348
	dc->disk.disk->queue->backing_dev_info->ra_pages =
		max(dc->disk.disk->queue->backing_dev_info->ra_pages,
		    q->backing_dev_info->ra_pages);
1349

1350 1351 1352
	atomic_set(&dc->io_errors, 0);
	dc->io_disable = false;
	dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT;
1353 1354 1355
	/* default to auto */
	dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO;

1356 1357
	bch_cached_dev_request_init(dc);
	bch_cached_dev_writeback_init(dc);
K
Kent Overstreet 已提交
1358 1359 1360 1361 1362
	return 0;
}

/* Cached device - bcache superblock */

1363
static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
K
Kent Overstreet 已提交
1364 1365 1366 1367 1368
				 struct block_device *bdev,
				 struct cached_dev *dc)
{
	const char *err = "cannot allocate memory";
	struct cache_set *c;
1369
	int ret = -ENOMEM;
K
Kent Overstreet 已提交
1370

1371
	bdevname(bdev, dc->backing_dev_name);
K
Kent Overstreet 已提交
1372 1373 1374
	memcpy(&dc->sb, sb, sizeof(struct cache_sb));
	dc->bdev = bdev;
	dc->bdev->bd_holder = dc;
1375
	dc->sb_disk = sb_disk;
1376

1377 1378
	if (cached_dev_init(dc, sb->block_size << 9))
		goto err;
K
Kent Overstreet 已提交
1379 1380 1381 1382 1383 1384 1385 1386

	err = "error creating kobject";
	if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
			"bcache"))
		goto err;
	if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
		goto err;

1387
	pr_info("registered backing device %s", dc->backing_dev_name);
1388

K
Kent Overstreet 已提交
1389
	list_add(&dc->list, &uncached_devices);
C
Coly Li 已提交
1390
	/* attach to a matched cache set if it exists */
K
Kent Overstreet 已提交
1391
	list_for_each_entry(c, &bch_cache_sets, list)
1392
		bch_cached_dev_attach(dc, c, NULL);
K
Kent Overstreet 已提交
1393 1394

	if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
1395 1396 1397 1398 1399 1400
	    BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) {
		err = "failed to run cached device";
		ret = bch_cached_dev_run(dc);
		if (ret)
			goto err;
	}
K
Kent Overstreet 已提交
1401

1402
	return 0;
K
Kent Overstreet 已提交
1403
err:
1404
	pr_notice("error %s: %s", dc->backing_dev_name, err);
1405
	bcache_device_stop(&dc->disk);
1406
	return ret;
K
Kent Overstreet 已提交
1407 1408 1409 1410
}

/* Flash only volumes */

1411
/* When d->kobj released */
K
Kent Overstreet 已提交
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
void bch_flash_dev_release(struct kobject *kobj)
{
	struct bcache_device *d = container_of(kobj, struct bcache_device,
					       kobj);
	kfree(d);
}

static void flash_dev_free(struct closure *cl)
{
	struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1422

1423
	mutex_lock(&bch_register_lock);
1424 1425
	atomic_long_sub(bcache_dev_sectors_dirty(d),
			&d->c->flash_dev_dirty_sectors);
K
Kent Overstreet 已提交
1426
	bcache_device_free(d);
1427
	mutex_unlock(&bch_register_lock);
K
Kent Overstreet 已提交
1428 1429 1430 1431 1432 1433 1434
	kobject_put(&d->kobj);
}

static void flash_dev_flush(struct closure *cl)
{
	struct bcache_device *d = container_of(cl, struct bcache_device, cl);

1435
	mutex_lock(&bch_register_lock);
1436
	bcache_device_unlink(d);
1437
	mutex_unlock(&bch_register_lock);
K
Kent Overstreet 已提交
1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
	kobject_del(&d->kobj);
	continue_at(cl, flash_dev_free, system_wq);
}

static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
{
	struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
					  GFP_KERNEL);
	if (!d)
		return -ENOMEM;

	closure_init(&d->cl, NULL);
	set_closure_fn(&d->cl, flash_dev_flush, system_wq);

	kobject_init(&d->kobj, &bch_flash_dev_ktype);

1454 1455
	if (bcache_device_init(d, block_bytes(c), u->sectors,
			flash_dev_make_request))
K
Kent Overstreet 已提交
1456 1457 1458
		goto err;

	bcache_device_attach(d, c, u - c->uuids);
1459
	bch_sectors_dirty_init(d);
K
Kent Overstreet 已提交
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479
	bch_flash_dev_request_init(d);
	add_disk(d->disk);

	if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
		goto err;

	bcache_device_link(d, c, "volume");

	return 0;
err:
	kobject_put(&d->kobj);
	return -ENOMEM;
}

static int flash_devs_run(struct cache_set *c)
{
	int ret = 0;
	struct uuid_entry *u;

	for (u = c->uuids;
1480
	     u < c->uuids + c->nr_uuids && !ret;
K
Kent Overstreet 已提交
1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
	     u++)
		if (UUID_FLASH_ONLY(u))
			ret = flash_dev_run(c, u);

	return ret;
}

int bch_flash_dev_create(struct cache_set *c, uint64_t size)
{
	struct uuid_entry *u;

	if (test_bit(CACHE_SET_STOPPING, &c->flags))
		return -EINTR;

1495 1496 1497
	if (!test_bit(CACHE_SET_RUNNING, &c->flags))
		return -EPERM;

K
Kent Overstreet 已提交
1498 1499 1500 1501 1502 1503 1504 1505
	u = uuid_find_empty(c);
	if (!u) {
		pr_err("Can't create volume, no room for UUID");
		return -EINVAL;
	}

	get_random_bytes(u->uuid, 16);
	memset(u->label, 0, 32);
1506
	u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds());
K
Kent Overstreet 已提交
1507 1508 1509 1510 1511 1512 1513 1514 1515

	SET_UUID_FLASH_ONLY(u, 1);
	u->sectors = size >> 9;

	bch_uuid_write(c);

	return flash_dev_run(c, u);
}

1516 1517 1518 1519 1520 1521 1522 1523 1524 1525
bool bch_cached_dev_error(struct cached_dev *dc)
{
	if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
		return false;

	dc->io_disable = true;
	/* make others know io_disable is true earlier */
	smp_mb();

	pr_err("stop %s: too many IO errors on backing device %s\n",
1526
		dc->disk.disk->disk_name, dc->backing_dev_name);
1527 1528 1529 1530 1531

	bcache_device_stop(&dc->disk);
	return true;
}

K
Kent Overstreet 已提交
1532 1533 1534 1535 1536 1537 1538
/* Cache set */

__printf(2, 3)
bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
{
	va_list args;

1539 1540
	if (c->on_error != ON_ERROR_PANIC &&
	    test_bit(CACHE_SET_STOPPING, &c->flags))
K
Kent Overstreet 已提交
1541 1542
		return false;

1543
	if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
1544
		pr_info("CACHE_SET_IO_DISABLE already set");
1545

C
Coly Li 已提交
1546 1547 1548 1549
	/*
	 * XXX: we can be called from atomic context
	 * acquire_console_sem();
	 */
K
Kent Overstreet 已提交
1550

1551
	pr_err("bcache: error on %pU: ", c->sb.set_uuid);
K
Kent Overstreet 已提交
1552 1553 1554 1555 1556

	va_start(args, fmt);
	vprintk(fmt, args);
	va_end(args);

1557
	pr_err(", disabling caching\n");
K
Kent Overstreet 已提交
1558

1559 1560 1561
	if (c->on_error == ON_ERROR_PANIC)
		panic("panic forced after error\n");

K
Kent Overstreet 已提交
1562 1563 1564 1565
	bch_cache_set_unregister(c);
	return true;
}

1566
/* When c->kobj released */
K
Kent Overstreet 已提交
1567 1568 1569
void bch_cache_set_release(struct kobject *kobj)
{
	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
1570

K
Kent Overstreet 已提交
1571 1572 1573 1574 1575 1576 1577 1578
	kfree(c);
	module_put(THIS_MODULE);
}

static void cache_set_free(struct closure *cl)
{
	struct cache_set *c = container_of(cl, struct cache_set, cl);
	struct cache *ca;
1579
	unsigned int i;
K
Kent Overstreet 已提交
1580

1581
	debugfs_remove(c->debug);
K
Kent Overstreet 已提交
1582 1583 1584 1585 1586

	bch_open_buckets_free(c);
	bch_btree_cache_free(c);
	bch_journal_free(c);

1587
	mutex_lock(&bch_register_lock);
K
Kent Overstreet 已提交
1588
	for_each_cache(ca, c, i)
1589 1590 1591
		if (ca) {
			ca->set = NULL;
			c->cache[ca->sb.nr_this_dev] = NULL;
K
Kent Overstreet 已提交
1592
			kobject_put(&ca->kobj);
1593
		}
K
Kent Overstreet 已提交
1594

1595
	bch_bset_sort_state_free(&c->sort);
K
Kent Overstreet 已提交
1596 1597
	free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));

1598 1599
	if (c->moving_gc_wq)
		destroy_workqueue(c->moving_gc_wq);
1600 1601 1602 1603
	bioset_exit(&c->bio_split);
	mempool_exit(&c->fill_iter);
	mempool_exit(&c->bio_meta);
	mempool_exit(&c->search);
K
Kent Overstreet 已提交
1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
	kfree(c->devices);

	list_del(&c->list);
	mutex_unlock(&bch_register_lock);

	pr_info("Cache set %pU unregistered", c->sb.set_uuid);
	wake_up(&unregister_wait);

	closure_debug_destroy(&c->cl);
	kobject_put(&c->kobj);
}

static void cache_set_flush(struct closure *cl)
{
	struct cache_set *c = container_of(cl, struct cache_set, caching);
1619
	struct cache *ca;
K
Kent Overstreet 已提交
1620
	struct btree *b;
1621
	unsigned int i;
K
Kent Overstreet 已提交
1622 1623 1624 1625 1626 1627

	bch_cache_accounting_destroy(&c->accounting);

	kobject_put(&c->internal);
	kobject_del(&c->kobj);

1628
	if (!IS_ERR_OR_NULL(c->gc_thread))
K
Kent Overstreet 已提交
1629 1630
		kthread_stop(c->gc_thread);

K
Kent Overstreet 已提交
1631 1632 1633
	if (!IS_ERR_OR_NULL(c->root))
		list_add(&c->root->list, &c->btree_cache);

1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
	/*
	 * Avoid flushing cached nodes if cache set is retiring
	 * due to too many I/O errors detected.
	 */
	if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags))
		list_for_each_entry(b, &c->btree_cache, list) {
			mutex_lock(&b->write_lock);
			if (btree_node_dirty(b))
				__bch_btree_node_write(b, NULL);
			mutex_unlock(&b->write_lock);
		}
K
Kent Overstreet 已提交
1645

1646 1647 1648 1649
	for_each_cache(ca, c, i)
		if (ca->alloc_thread)
			kthread_stop(ca->alloc_thread);

1650 1651 1652 1653 1654
	if (c->journal.cur) {
		cancel_delayed_work_sync(&c->journal.work);
		/* flush last journal entry if needed */
		c->journal.work.work.func(&c->journal.work.work);
	}
K
Kent Overstreet 已提交
1655

K
Kent Overstreet 已提交
1656 1657 1658
	closure_return(cl);
}

1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
/*
 * This function is only called when CACHE_SET_IO_DISABLE is set, which means
 * cache set is unregistering due to too many I/O errors. In this condition,
 * the bcache device might be stopped, it depends on stop_when_cache_set_failed
 * value and whether the broken cache has dirty data:
 *
 * dc->stop_when_cache_set_failed    dc->has_dirty   stop bcache device
 *  BCH_CACHED_STOP_AUTO               0               NO
 *  BCH_CACHED_STOP_AUTO               1               YES
 *  BCH_CACHED_DEV_STOP_ALWAYS         0               YES
 *  BCH_CACHED_DEV_STOP_ALWAYS         1               YES
 *
 * The expected behavior is, if stop_when_cache_set_failed is configured to
 * "auto" via sysfs interface, the bcache device will not be stopped if the
 * backing device is clean on the broken cache device.
 */
static void conditional_stop_bcache_device(struct cache_set *c,
					   struct bcache_device *d,
					   struct cached_dev *dc)
{
	if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
		pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.",
			d->disk->disk_name, c->sb.set_uuid);
		bcache_device_stop(d);
	} else if (atomic_read(&dc->has_dirty)) {
		/*
		 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
		 * and dc->has_dirty == 1
		 */
		pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.",
			d->disk->disk_name);
1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
		/*
		 * There might be a small time gap that cache set is
		 * released but bcache device is not. Inside this time
		 * gap, regular I/O requests will directly go into
		 * backing device as no cache set attached to. This
		 * behavior may also introduce potential inconsistence
		 * data in writeback mode while cache is dirty.
		 * Therefore before calling bcache_device_stop() due
		 * to a broken cache device, dc->io_disable should be
		 * explicitly set to true.
		 */
		dc->io_disable = true;
		/* make others know io_disable is true earlier */
		smp_mb();
		bcache_device_stop(d);
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
	} else {
		/*
		 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
		 * and dc->has_dirty == 0
		 */
		pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.",
			d->disk->disk_name);
	}
}

K
Kent Overstreet 已提交
1715 1716 1717
static void __cache_set_unregister(struct closure *cl)
{
	struct cache_set *c = container_of(cl, struct cache_set, caching);
K
Kent Overstreet 已提交
1718
	struct cached_dev *dc;
1719
	struct bcache_device *d;
K
Kent Overstreet 已提交
1720 1721 1722 1723
	size_t i;

	mutex_lock(&bch_register_lock);

1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736
	for (i = 0; i < c->devices_max_used; i++) {
		d = c->devices[i];
		if (!d)
			continue;

		if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
		    test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
			dc = container_of(d, struct cached_dev, disk);
			bch_cached_dev_detach(dc);
			if (test_bit(CACHE_SET_IO_DISABLE, &c->flags))
				conditional_stop_bcache_device(c, d, dc);
		} else {
			bcache_device_stop(d);
K
Kent Overstreet 已提交
1737
		}
1738
	}
K
Kent Overstreet 已提交
1739 1740 1741 1742 1743 1744 1745 1746 1747

	mutex_unlock(&bch_register_lock);

	continue_at(cl, cache_set_flush, system_wq);
}

void bch_cache_set_stop(struct cache_set *c)
{
	if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
1748
		/* closure_fn set to __cache_set_unregister() */
K
Kent Overstreet 已提交
1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
		closure_queue(&c->caching);
}

void bch_cache_set_unregister(struct cache_set *c)
{
	set_bit(CACHE_SET_UNREGISTERING, &c->flags);
	bch_cache_set_stop(c);
}

#define alloc_bucket_pages(gfp, c)			\
	((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))

struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
{
	int iter_size;
	struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
1765

K
Kent Overstreet 已提交
1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792
	if (!c)
		return NULL;

	__module_get(THIS_MODULE);
	closure_init(&c->cl, NULL);
	set_closure_fn(&c->cl, cache_set_free, system_wq);

	closure_init(&c->caching, &c->cl);
	set_closure_fn(&c->caching, __cache_set_unregister, system_wq);

	/* Maybe create continue_at_noreturn() and use it here? */
	closure_set_stopped(&c->cl);
	closure_put(&c->cl);

	kobject_init(&c->kobj, &bch_cache_set_ktype);
	kobject_init(&c->internal, &bch_cache_set_internal_ktype);

	bch_cache_accounting_init(&c->accounting, &c->cl);

	memcpy(c->sb.set_uuid, sb->set_uuid, 16);
	c->sb.block_size	= sb->block_size;
	c->sb.bucket_size	= sb->bucket_size;
	c->sb.nr_in_set		= sb->nr_in_set;
	c->sb.last_mount	= sb->last_mount;
	c->bucket_bits		= ilog2(sb->bucket_size);
	c->block_bits		= ilog2(sb->block_size);
	c->nr_uuids		= bucket_bytes(c) / sizeof(struct uuid_entry);
1793
	c->devices_max_used	= 0;
1794
	atomic_set(&c->attached_dev_nr, 0);
1795
	c->btree_pages		= bucket_pages(c);
K
Kent Overstreet 已提交
1796 1797 1798 1799
	if (c->btree_pages > BTREE_MAX_PAGES)
		c->btree_pages = max_t(int, c->btree_pages / 4,
				       BTREE_MAX_PAGES);

1800
	sema_init(&c->sb_write_mutex, 1);
1801
	mutex_init(&c->bucket_lock);
1802
	init_waitqueue_head(&c->btree_cache_wait);
1803
	spin_lock_init(&c->btree_cannibalize_lock);
1804
	init_waitqueue_head(&c->bucket_wait);
1805
	init_waitqueue_head(&c->gc_wait);
1806
	sema_init(&c->uuid_write_mutex, 1);
1807 1808 1809 1810

	spin_lock_init(&c->btree_gc_time.lock);
	spin_lock_init(&c->btree_split_time.lock);
	spin_lock_init(&c->btree_read_time.lock);
1811

K
Kent Overstreet 已提交
1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823
	bch_moving_init_cache_set(c);

	INIT_LIST_HEAD(&c->list);
	INIT_LIST_HEAD(&c->cached_devs);
	INIT_LIST_HEAD(&c->btree_cache);
	INIT_LIST_HEAD(&c->btree_cache_freeable);
	INIT_LIST_HEAD(&c->btree_cache_freed);
	INIT_LIST_HEAD(&c->data_buckets);

	iter_size = (sb->bucket_size / sb->block_size + 1) *
		sizeof(struct btree_iter_set);

K
Kees Cook 已提交
1824
	if (!(c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL)) ||
1825 1826
	    mempool_init_slab_pool(&c->search, 32, bch_search_cache) ||
	    mempool_init_kmalloc_pool(&c->bio_meta, 2,
1827 1828
				sizeof(struct bbio) + sizeof(struct bio_vec) *
				bucket_pages(c)) ||
1829 1830 1831
	    mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
	    bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
			BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) ||
K
Kent Overstreet 已提交
1832
	    !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
1833 1834
	    !(c->moving_gc_wq = alloc_workqueue("bcache_gc",
						WQ_MEM_RECLAIM, 0)) ||
K
Kent Overstreet 已提交
1835 1836
	    bch_journal_alloc(c) ||
	    bch_btree_cache_alloc(c) ||
1837 1838
	    bch_open_buckets_alloc(c) ||
	    bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
K
Kent Overstreet 已提交
1839 1840 1841 1842
		goto err;

	c->congested_read_threshold_us	= 2000;
	c->congested_write_threshold_us	= 20000;
C
Coly Li 已提交
1843
	c->error_limit	= DEFAULT_IO_ERROR_LIMIT;
1844
	c->idle_max_writeback_rate_enabled = 1;
1845
	WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags));
K
Kent Overstreet 已提交
1846 1847 1848 1849 1850 1851 1852

	return c;
err:
	bch_cache_set_unregister(c);
	return NULL;
}

1853
static int run_cache_set(struct cache_set *c)
K
Kent Overstreet 已提交
1854 1855 1856 1857
{
	const char *err = "cannot allocate memory";
	struct cached_dev *dc, *t;
	struct cache *ca;
K
Kent Overstreet 已提交
1858
	struct closure cl;
1859
	unsigned int i;
1860 1861
	LIST_HEAD(journal);
	struct journal_replay *l;
K
Kent Overstreet 已提交
1862

K
Kent Overstreet 已提交
1863
	closure_init_stack(&cl);
K
Kent Overstreet 已提交
1864 1865 1866

	for_each_cache(ca, c, i)
		c->nbuckets += ca->sb.nbuckets;
1867
	set_gc_sectors(c);
K
Kent Overstreet 已提交
1868 1869 1870 1871 1872 1873

	if (CACHE_SYNC(&c->sb)) {
		struct bkey *k;
		struct jset *j;

		err = "cannot allocate memory for journal";
K
Kent Overstreet 已提交
1874
		if (bch_journal_read(c, &journal))
K
Kent Overstreet 已提交
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885
			goto err;

		pr_debug("btree_journal_read() done");

		err = "no journal entries found";
		if (list_empty(&journal))
			goto err;

		j = &list_entry(journal.prev, struct journal_replay, list)->j;

		err = "IO error reading priorities";
1886 1887 1888 1889
		for_each_cache(ca, c, i) {
			if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
				goto err;
		}
K
Kent Overstreet 已提交
1890 1891 1892 1893 1894 1895 1896 1897 1898 1899

		/*
		 * If prio_read() fails it'll call cache_set_error and we'll
		 * tear everything down right away, but if we perhaps checked
		 * sooner we could avoid journal replay.
		 */

		k = &j->btree_root;

		err = "bad btree root";
1900
		if (__bch_btree_ptr_invalid(c, k))
K
Kent Overstreet 已提交
1901 1902 1903
			goto err;

		err = "error reading btree root";
1904 1905 1906
		c->root = bch_btree_node_get(c, NULL, k,
					     j->btree_level,
					     true, NULL);
K
Kent Overstreet 已提交
1907 1908 1909 1910 1911 1912
		if (IS_ERR_OR_NULL(c->root))
			goto err;

		list_del_init(&c->root->list);
		rw_unlock(true, c->root);

K
Kent Overstreet 已提交
1913
		err = uuid_read(c, j, &cl);
K
Kent Overstreet 已提交
1914 1915 1916 1917
		if (err)
			goto err;

		err = "error in recovery";
K
Kent Overstreet 已提交
1918
		if (bch_btree_check(c))
K
Kent Overstreet 已提交
1919 1920 1921
			goto err;

		bch_journal_mark(c, &journal);
K
Kent Overstreet 已提交
1922
		bch_initial_gc_finish(c);
K
Kent Overstreet 已提交
1923 1924 1925 1926 1927 1928 1929 1930 1931
		pr_debug("btree_check() done");

		/*
		 * bcache_journal_next() can't happen sooner, or
		 * btree_gc_finish() will give spurious errors about last_gc >
		 * gc_gen - this is a hack but oh well.
		 */
		bch_journal_next(&c->journal);

1932
		err = "error starting allocator thread";
K
Kent Overstreet 已提交
1933
		for_each_cache(ca, c, i)
1934 1935
			if (bch_cache_allocator_start(ca))
				goto err;
K
Kent Overstreet 已提交
1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949

		/*
		 * First place it's safe to allocate: btree_check() and
		 * btree_gc_finish() have to run before we have buckets to
		 * allocate, and bch_bucket_alloc_set() might cause a journal
		 * entry to be written so bcache_journal_next() has to be called
		 * first.
		 *
		 * If the uuids were in the old format we have to rewrite them
		 * before the next journal entry is written:
		 */
		if (j->version < BCACHE_JSET_VERSION_UUID)
			__uuid_write(c);

1950 1951 1952
		err = "bcache: replay journal failed";
		if (bch_journal_replay(c, &journal))
			goto err;
K
Kent Overstreet 已提交
1953 1954 1955 1956
	} else {
		pr_notice("invalidating existing data");

		for_each_cache(ca, c, i) {
1957
			unsigned int j;
K
Kent Overstreet 已提交
1958 1959 1960 1961 1962 1963 1964 1965

			ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
					      2, SB_JOURNAL_BUCKETS);

			for (j = 0; j < ca->sb.keys; j++)
				ca->sb.d[j] = ca->sb.first_bucket + j;
		}

K
Kent Overstreet 已提交
1966
		bch_initial_gc_finish(c);
K
Kent Overstreet 已提交
1967

1968
		err = "error starting allocator thread";
K
Kent Overstreet 已提交
1969
		for_each_cache(ca, c, i)
1970 1971
			if (bch_cache_allocator_start(ca))
				goto err;
K
Kent Overstreet 已提交
1972 1973 1974

		mutex_lock(&c->bucket_lock);
		for_each_cache(ca, c, i)
1975
			bch_prio_write(ca, true);
K
Kent Overstreet 已提交
1976 1977 1978 1979
		mutex_unlock(&c->bucket_lock);

		err = "cannot allocate new UUID bucket";
		if (__uuid_write(c))
K
Kent Overstreet 已提交
1980
			goto err;
K
Kent Overstreet 已提交
1981 1982

		err = "cannot allocate new btree root";
1983
		c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
K
Kent Overstreet 已提交
1984
		if (IS_ERR_OR_NULL(c->root))
K
Kent Overstreet 已提交
1985
			goto err;
K
Kent Overstreet 已提交
1986

K
Kent Overstreet 已提交
1987
		mutex_lock(&c->root->write_lock);
K
Kent Overstreet 已提交
1988
		bkey_copy_key(&c->root->key, &MAX_KEY);
K
Kent Overstreet 已提交
1989
		bch_btree_node_write(c->root, &cl);
K
Kent Overstreet 已提交
1990
		mutex_unlock(&c->root->write_lock);
K
Kent Overstreet 已提交
1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002

		bch_btree_set_root(c->root);
		rw_unlock(true, c->root);

		/*
		 * We don't want to write the first journal entry until
		 * everything is set up - fortunately journal entries won't be
		 * written until the SET_CACHE_SYNC() here:
		 */
		SET_CACHE_SYNC(&c->sb, true);

		bch_journal_next(&c->journal);
K
Kent Overstreet 已提交
2003
		bch_journal_meta(c, &cl);
K
Kent Overstreet 已提交
2004 2005
	}

K
Kent Overstreet 已提交
2006 2007 2008 2009
	err = "error starting gc thread";
	if (bch_gc_thread_start(c))
		goto err;

K
Kent Overstreet 已提交
2010
	closure_sync(&cl);
2011
	c->sb.last_mount = (u32)ktime_get_real_seconds();
K
Kent Overstreet 已提交
2012 2013 2014
	bcache_write_super(c);

	list_for_each_entry_safe(dc, t, &uncached_devices, list)
2015
		bch_cached_dev_attach(dc, c, NULL);
K
Kent Overstreet 已提交
2016 2017 2018

	flash_devs_run(c);

2019
	set_bit(CACHE_SET_RUNNING, &c->flags);
2020
	return 0;
K
Kent Overstreet 已提交
2021
err:
2022 2023 2024 2025 2026 2027
	while (!list_empty(&journal)) {
		l = list_first_entry(&journal, struct journal_replay, list);
		list_del(&l->list);
		kfree(l);
	}

K
Kent Overstreet 已提交
2028
	closure_sync(&cl);
2029

2030
	bch_cache_set_error(c, "%s", err);
2031 2032

	return -EIO;
K
Kent Overstreet 已提交
2033 2034 2035 2036 2037
}

static bool can_attach_cache(struct cache *ca, struct cache_set *c)
{
	return ca->sb.block_size	== c->sb.block_size &&
2038
		ca->sb.bucket_size	== c->sb.bucket_size &&
K
Kent Overstreet 已提交
2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090
		ca->sb.nr_in_set	== c->sb.nr_in_set;
}

static const char *register_cache_set(struct cache *ca)
{
	char buf[12];
	const char *err = "cannot allocate memory";
	struct cache_set *c;

	list_for_each_entry(c, &bch_cache_sets, list)
		if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
			if (c->cache[ca->sb.nr_this_dev])
				return "duplicate cache set member";

			if (!can_attach_cache(ca, c))
				return "cache sb does not match set";

			if (!CACHE_SYNC(&ca->sb))
				SET_CACHE_SYNC(&c->sb, false);

			goto found;
		}

	c = bch_cache_set_alloc(&ca->sb);
	if (!c)
		return err;

	err = "error creating kobject";
	if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
	    kobject_add(&c->internal, &c->kobj, "internal"))
		goto err;

	if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
		goto err;

	bch_debug_init_cache_set(c);

	list_add(&c->list, &bch_cache_sets);
found:
	sprintf(buf, "cache%i", ca->sb.nr_this_dev);
	if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
	    sysfs_create_link(&c->kobj, &ca->kobj, buf))
		goto err;

	if (ca->sb.seq > c->sb.seq) {
		c->sb.version		= ca->sb.version;
		memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
		c->sb.flags             = ca->sb.flags;
		c->sb.seq		= ca->sb.seq;
		pr_debug("set version = %llu", c->sb.version);
	}

2091
	kobject_get(&ca->kobj);
K
Kent Overstreet 已提交
2092 2093 2094 2095
	ca->set = c;
	ca->set->cache[ca->sb.nr_this_dev] = ca;
	c->cache_by_alloc[c->caches_loaded++] = ca;

2096 2097 2098 2099 2100
	if (c->caches_loaded == c->sb.nr_in_set) {
		err = "failed to run cache set";
		if (run_cache_set(c) < 0)
			goto err;
	}
K
Kent Overstreet 已提交
2101 2102 2103 2104 2105 2106 2107 2108 2109

	return NULL;
err:
	bch_cache_set_unregister(c);
	return err;
}

/* Cache device */

2110
/* When ca->kobj released */
K
Kent Overstreet 已提交
2111 2112 2113
void bch_cache_release(struct kobject *kobj)
{
	struct cache *ca = container_of(kobj, struct cache, kobj);
2114
	unsigned int i;
K
Kent Overstreet 已提交
2115

2116 2117
	if (ca->set) {
		BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
K
Kent Overstreet 已提交
2118
		ca->set->cache[ca->sb.nr_this_dev] = NULL;
2119
	}
K
Kent Overstreet 已提交
2120 2121 2122 2123 2124 2125 2126

	free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
	kfree(ca->prio_buckets);
	vfree(ca->buckets);

	free_heap(&ca->heap);
	free_fifo(&ca->free_inc);
2127 2128 2129

	for (i = 0; i < RESERVE_NR; i++)
		free_fifo(&ca->free[i]);
K
Kent Overstreet 已提交
2130

2131 2132
	if (ca->sb_disk)
		put_page(virt_to_page(ca->sb_disk));
K
Kent Overstreet 已提交
2133

2134
	if (!IS_ERR_OR_NULL(ca->bdev))
K
Kent Overstreet 已提交
2135 2136 2137 2138 2139 2140
		blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);

	kfree(ca);
	module_put(THIS_MODULE);
}

2141
static int cache_alloc(struct cache *ca)
K
Kent Overstreet 已提交
2142 2143
{
	size_t free;
2144
	size_t btree_buckets;
K
Kent Overstreet 已提交
2145
	struct bucket *b;
2146 2147
	int ret = -ENOMEM;
	const char *err = NULL;
K
Kent Overstreet 已提交
2148 2149 2150 2151

	__module_get(THIS_MODULE);
	kobject_init(&ca->kobj, &bch_cache_ktype);

2152
	bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
K
Kent Overstreet 已提交
2153

2154 2155 2156 2157 2158 2159 2160 2161 2162 2163
	/*
	 * when ca->sb.njournal_buckets is not zero, journal exists,
	 * and in bch_journal_replay(), tree node may split,
	 * so bucket of RESERVE_BTREE type is needed,
	 * the worst situation is all journal buckets are valid journal,
	 * and all the keys need to replay,
	 * so the number of  RESERVE_BTREE type buckets should be as much
	 * as journal buckets
	 */
	btree_buckets = ca->sb.njournal_buckets ?: 8;
2164
	free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
2165 2166 2167 2168 2169
	if (!free) {
		ret = -EPERM;
		err = "ca->sb.nbuckets is too small";
		goto err_free;
	}
K
Kent Overstreet 已提交
2170

2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
	if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets,
						GFP_KERNEL)) {
		err = "ca->free[RESERVE_BTREE] alloc failed";
		goto err_btree_alloc;
	}

	if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca),
							GFP_KERNEL)) {
		err = "ca->free[RESERVE_PRIO] alloc failed";
		goto err_prio_alloc;
	}

	if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) {
		err = "ca->free[RESERVE_MOVINGGC] alloc failed";
		goto err_movinggc_alloc;
	}

	if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) {
		err = "ca->free[RESERVE_NONE] alloc failed";
		goto err_none_alloc;
	}

	if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) {
		err = "ca->free_inc alloc failed";
		goto err_free_inc_alloc;
	}

	if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) {
		err = "ca->heap alloc failed";
		goto err_heap_alloc;
	}

	ca->buckets = vzalloc(array_size(sizeof(struct bucket),
			      ca->sb.nbuckets));
	if (!ca->buckets) {
		err = "ca->buckets alloc failed";
		goto err_buckets_alloc;
	}

	ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t),
				   prio_buckets(ca), 2),
				   GFP_KERNEL);
	if (!ca->prio_buckets) {
		err = "ca->prio_buckets alloc failed";
		goto err_prio_buckets_alloc;
	}

	ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca);
	if (!ca->disk_buckets) {
		err = "ca->disk_buckets alloc failed";
		goto err_disk_buckets_alloc;
	}
K
Kent Overstreet 已提交
2223 2224 2225 2226 2227 2228

	ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);

	for_each_bucket(b, ca)
		atomic_set(&b->pin, 0);
	return 0;
2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246

err_disk_buckets_alloc:
	kfree(ca->prio_buckets);
err_prio_buckets_alloc:
	vfree(ca->buckets);
err_buckets_alloc:
	free_heap(&ca->heap);
err_heap_alloc:
	free_fifo(&ca->free_inc);
err_free_inc_alloc:
	free_fifo(&ca->free[RESERVE_NONE]);
err_none_alloc:
	free_fifo(&ca->free[RESERVE_MOVINGGC]);
err_movinggc_alloc:
	free_fifo(&ca->free[RESERVE_PRIO]);
err_prio_alloc:
	free_fifo(&ca->free[RESERVE_BTREE]);
err_btree_alloc:
2247
err_free:
2248 2249 2250 2251
	module_put(THIS_MODULE);
	if (err)
		pr_notice("error %s: %s", ca->cache_dev_name, err);
	return ret;
K
Kent Overstreet 已提交
2252 2253
}

2254
static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
2255
				struct block_device *bdev, struct cache *ca)
K
Kent Overstreet 已提交
2256
{
2257
	const char *err = NULL; /* must be set for any error case */
2258
	int ret = 0;
K
Kent Overstreet 已提交
2259

2260
	bdevname(bdev, ca->cache_dev_name);
2261
	memcpy(&ca->sb, sb, sizeof(struct cache_sb));
K
Kent Overstreet 已提交
2262 2263
	ca->bdev = bdev;
	ca->bdev->bd_holder = ca;
2264
	ca->sb_disk = sb_disk;
2265

2266
	if (blk_queue_discard(bdev_get_queue(bdev)))
K
Kent Overstreet 已提交
2267 2268
		ca->discard = CACHE_DISCARD(&ca->sb);

2269
	ret = cache_alloc(ca);
2270
	if (ret != 0) {
2271 2272 2273 2274 2275 2276
		/*
		 * If we failed here, it means ca->kobj is not initialized yet,
		 * kobject_put() won't be called and there is no chance to
		 * call blkdev_put() to bdev in bch_cache_release(). So we
		 * explicitly call blkdev_put() here.
		 */
2277
		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2278 2279
		if (ret == -ENOMEM)
			err = "cache_alloc(): -ENOMEM";
2280 2281
		else if (ret == -EPERM)
			err = "cache_alloc(): cache device is too small";
2282 2283
		else
			err = "cache_alloc(): unknown error";
2284
		goto err;
2285
	}
2286

2287 2288 2289
	if (kobject_add(&ca->kobj,
			&part_to_dev(bdev->bd_part)->kobj,
			"bcache")) {
2290 2291 2292 2293
		err = "error calling kobject_add";
		ret = -ENOMEM;
		goto out;
	}
K
Kent Overstreet 已提交
2294

2295
	mutex_lock(&bch_register_lock);
K
Kent Overstreet 已提交
2296
	err = register_cache_set(ca);
2297 2298
	mutex_unlock(&bch_register_lock);

2299 2300 2301 2302
	if (err) {
		ret = -ENODEV;
		goto out;
	}
K
Kent Overstreet 已提交
2303

2304
	pr_info("registered cache device %s", ca->cache_dev_name);
2305

2306 2307
out:
	kobject_put(&ca->kobj);
2308

K
Kent Overstreet 已提交
2309
err:
2310
	if (err)
2311
		pr_notice("error %s: %s", ca->cache_dev_name, err);
2312 2313

	return ret;
K
Kent Overstreet 已提交
2314 2315 2316 2317
}

/* Global interfaces/init */

2318 2319
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
			       const char *buffer, size_t size);
2320 2321 2322
static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
					 struct kobj_attribute *attr,
					 const char *buffer, size_t size);
K
Kent Overstreet 已提交
2323 2324 2325

kobj_attribute_write(register,		register_bcache);
kobj_attribute_write(register_quiet,	register_bcache);
2326
kobj_attribute_write(pendings_cleanup,	bch_pending_bdevs_cleanup);
K
Kent Overstreet 已提交
2327

2328 2329
static bool bch_is_open_backing(struct block_device *bdev)
{
2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
	struct cache_set *c, *tc;
	struct cached_dev *dc, *t;

	list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
		list_for_each_entry_safe(dc, t, &c->cached_devs, list)
			if (dc->bdev == bdev)
				return true;
	list_for_each_entry_safe(dc, t, &uncached_devices, list)
		if (dc->bdev == bdev)
			return true;
	return false;
}

2343 2344
static bool bch_is_open_cache(struct block_device *bdev)
{
2345 2346
	struct cache_set *c, *tc;
	struct cache *ca;
2347
	unsigned int i;
2348 2349 2350 2351 2352 2353 2354 2355

	list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
		for_each_cache(ca, c, i)
			if (ca->bdev == bdev)
				return true;
	return false;
}

2356 2357
static bool bch_is_open(struct block_device *bdev)
{
2358 2359 2360
	return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
}

K
Kent Overstreet 已提交
2361 2362 2363
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
			       const char *buffer, size_t size)
{
2364
	const char *err;
2365
	char *path = NULL;
2366
	struct cache_sb *sb;
2367
	struct cache_sb_disk *sb_disk;
2368
	struct block_device *bdev;
2369
	ssize_t ret;
K
Kent Overstreet 已提交
2370

2371
	ret = -EBUSY;
2372
	err = "failed to reference bcache module";
K
Kent Overstreet 已提交
2373
	if (!try_module_get(THIS_MODULE))
2374
		goto out;
K
Kent Overstreet 已提交
2375

2376 2377
	/* For latest state of bcache_is_reboot */
	smp_mb();
2378
	err = "bcache is in reboot";
2379
	if (bcache_is_reboot)
2380
		goto out_module_put;
2381

2382 2383
	ret = -ENOMEM;
	err = "cannot allocate memory";
2384 2385
	path = kstrndup(buffer, size, GFP_KERNEL);
	if (!path)
2386
		goto out_module_put;
2387 2388 2389

	sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
	if (!sb)
2390
		goto out_free_path;
K
Kent Overstreet 已提交
2391

2392
	ret = -EINVAL;
K
Kent Overstreet 已提交
2393 2394 2395 2396
	err = "failed to open device";
	bdev = blkdev_get_by_path(strim(path),
				  FMODE_READ|FMODE_WRITE|FMODE_EXCL,
				  sb);
2397
	if (IS_ERR(bdev)) {
2398 2399
		if (bdev == ERR_PTR(-EBUSY)) {
			bdev = lookup_bdev(strim(path));
2400
			mutex_lock(&bch_register_lock);
2401 2402 2403 2404
			if (!IS_ERR(bdev) && bch_is_open(bdev))
				err = "device already registered";
			else
				err = "device busy";
2405
			mutex_unlock(&bch_register_lock);
J
Jan Kara 已提交
2406 2407
			if (!IS_ERR(bdev))
				bdput(bdev);
2408
			if (attr == &ksysfs_register_quiet)
2409
				goto done;
2410
		}
2411
		goto out_free_sb;
2412 2413 2414 2415
	}

	err = "failed to set blocksize";
	if (set_blocksize(bdev, 4096))
2416
		goto out_blkdev_put;
K
Kent Overstreet 已提交
2417

2418
	err = read_super(sb, bdev, &sb_disk);
K
Kent Overstreet 已提交
2419
	if (err)
2420
		goto out_blkdev_put;
K
Kent Overstreet 已提交
2421

2422
	err = "failed to register device";
2423
	if (SB_IS_BDEV(sb)) {
K
Kent Overstreet 已提交
2424
		struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
2425

2426
		if (!dc)
2427
			goto out_put_sb_page;
K
Kent Overstreet 已提交
2428

2429
		mutex_lock(&bch_register_lock);
2430
		ret = register_bdev(sb, sb_disk, bdev, dc);
2431
		mutex_unlock(&bch_register_lock);
2432
		/* blkdev_put() will be called in cached_dev_free() */
2433 2434
		if (ret < 0)
			goto out_free_sb;
K
Kent Overstreet 已提交
2435 2436
	} else {
		struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2437

2438
		if (!ca)
2439
			goto out_put_sb_page;
K
Kent Overstreet 已提交
2440

2441
		/* blkdev_put() will be called in bch_cache_release() */
2442
		if (register_cache(sb, sb_disk, bdev, ca) != 0)
2443
			goto out_free_sb;
K
Kent Overstreet 已提交
2444
	}
2445 2446

done:
K
Kent Overstreet 已提交
2447 2448 2449
	kfree(sb);
	kfree(path);
	module_put(THIS_MODULE);
2450 2451 2452
	return size;

out_put_sb_page:
2453
	put_page(virt_to_page(sb_disk));
2454
out_blkdev_put:
2455
	blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2456 2457 2458 2459
out_free_sb:
	kfree(sb);
out_free_path:
	kfree(path);
2460
	path = NULL;
2461 2462 2463
out_module_put:
	module_put(THIS_MODULE);
out:
2464
	pr_info("error %s: %s", path?path:"", err);
2465
	return ret;
K
Kent Overstreet 已提交
2466 2467
}

2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517

struct pdev {
	struct list_head list;
	struct cached_dev *dc;
};

static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
					 struct kobj_attribute *attr,
					 const char *buffer,
					 size_t size)
{
	LIST_HEAD(pending_devs);
	ssize_t ret = size;
	struct cached_dev *dc, *tdc;
	struct pdev *pdev, *tpdev;
	struct cache_set *c, *tc;

	mutex_lock(&bch_register_lock);
	list_for_each_entry_safe(dc, tdc, &uncached_devices, list) {
		pdev = kmalloc(sizeof(struct pdev), GFP_KERNEL);
		if (!pdev)
			break;
		pdev->dc = dc;
		list_add(&pdev->list, &pending_devs);
	}

	list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
		list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
			char *pdev_set_uuid = pdev->dc->sb.set_uuid;
			char *set_uuid = c->sb.uuid;

			if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
				list_del(&pdev->list);
				kfree(pdev);
				break;
			}
		}
	}
	mutex_unlock(&bch_register_lock);

	list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
		pr_info("delete pdev %p", pdev);
		list_del(&pdev->list);
		bcache_device_stop(&pdev->dc->disk);
		kfree(pdev);
	}

	return ret;
}

K
Kent Overstreet 已提交
2518 2519
static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
{
2520 2521 2522
	if (bcache_is_reboot)
		return NOTIFY_DONE;

K
Kent Overstreet 已提交
2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534
	if (code == SYS_DOWN ||
	    code == SYS_HALT ||
	    code == SYS_POWER_OFF) {
		DEFINE_WAIT(wait);
		unsigned long start = jiffies;
		bool stopped = false;

		struct cache_set *c, *tc;
		struct cached_dev *dc, *tdc;

		mutex_lock(&bch_register_lock);

2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545
		if (bcache_is_reboot)
			goto out;

		/* New registration is rejected since now */
		bcache_is_reboot = true;
		/*
		 * Make registering caller (if there is) on other CPU
		 * core know bcache_is_reboot set to true earlier
		 */
		smp_mb();

K
Kent Overstreet 已提交
2546 2547 2548 2549
		if (list_empty(&bch_cache_sets) &&
		    list_empty(&uncached_devices))
			goto out;

2550 2551
		mutex_unlock(&bch_register_lock);

K
Kent Overstreet 已提交
2552 2553
		pr_info("Stopping all devices:");

2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567
		/*
		 * The reason bch_register_lock is not held to call
		 * bch_cache_set_stop() and bcache_device_stop() is to
		 * avoid potential deadlock during reboot, because cache
		 * set or bcache device stopping process will acqurie
		 * bch_register_lock too.
		 *
		 * We are safe here because bcache_is_reboot sets to
		 * true already, register_bcache() will reject new
		 * registration now. bcache_is_reboot also makes sure
		 * bcache_reboot() won't be re-entered on by other thread,
		 * so there is no race in following list iteration by
		 * list_for_each_entry_safe().
		 */
K
Kent Overstreet 已提交
2568 2569 2570 2571 2572 2573
		list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
			bch_cache_set_stop(c);

		list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
			bcache_device_stop(&dc->disk);

C
Coly Li 已提交
2574 2575 2576 2577 2578 2579 2580

		/*
		 * Give an early chance for other kthreads and
		 * kworkers to stop themselves
		 */
		schedule();

K
Kent Overstreet 已提交
2581 2582
		/* What's a condition variable? */
		while (1) {
C
Coly Li 已提交
2583
			long timeout = start + 10 * HZ - jiffies;
K
Kent Overstreet 已提交
2584

C
Coly Li 已提交
2585
			mutex_lock(&bch_register_lock);
K
Kent Overstreet 已提交
2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624
			stopped = list_empty(&bch_cache_sets) &&
				list_empty(&uncached_devices);

			if (timeout < 0 || stopped)
				break;

			prepare_to_wait(&unregister_wait, &wait,
					TASK_UNINTERRUPTIBLE);

			mutex_unlock(&bch_register_lock);
			schedule_timeout(timeout);
		}

		finish_wait(&unregister_wait, &wait);

		if (stopped)
			pr_info("All devices stopped");
		else
			pr_notice("Timeout waiting for devices to be closed");
out:
		mutex_unlock(&bch_register_lock);
	}

	return NOTIFY_DONE;
}

static struct notifier_block reboot = {
	.notifier_call	= bcache_reboot,
	.priority	= INT_MAX, /* before any real devices */
};

static void bcache_exit(void)
{
	bch_debug_exit();
	bch_request_exit();
	if (bcache_kobj)
		kobject_put(bcache_kobj);
	if (bcache_wq)
		destroy_workqueue(bcache_wq);
2625 2626 2627
	if (bch_journal_wq)
		destroy_workqueue(bch_journal_wq);

2628 2629
	if (bcache_major)
		unregister_blkdev(bcache_major, "bcache");
K
Kent Overstreet 已提交
2630
	unregister_reboot_notifier(&reboot);
2631
	mutex_destroy(&bch_register_lock);
K
Kent Overstreet 已提交
2632 2633
}

2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659
/* Check and fixup module parameters */
static void check_module_parameters(void)
{
	if (bch_cutoff_writeback_sync == 0)
		bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC;
	else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) {
		pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u",
			bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX);
		bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX;
	}

	if (bch_cutoff_writeback == 0)
		bch_cutoff_writeback = CUTOFF_WRITEBACK;
	else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) {
		pr_warn("set bch_cutoff_writeback (%u) to max value %u",
			bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX);
		bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX;
	}

	if (bch_cutoff_writeback > bch_cutoff_writeback_sync) {
		pr_warn("set bch_cutoff_writeback (%u) to %u",
			bch_cutoff_writeback, bch_cutoff_writeback_sync);
		bch_cutoff_writeback = bch_cutoff_writeback_sync;
	}
}

K
Kent Overstreet 已提交
2660 2661 2662 2663 2664
static int __init bcache_init(void)
{
	static const struct attribute *files[] = {
		&ksysfs_register.attr,
		&ksysfs_register_quiet.attr,
2665
		&ksysfs_pendings_cleanup.attr,
K
Kent Overstreet 已提交
2666 2667 2668
		NULL
	};

2669 2670
	check_module_parameters();

K
Kent Overstreet 已提交
2671 2672 2673 2674 2675
	mutex_init(&bch_register_lock);
	init_waitqueue_head(&unregister_wait);
	register_reboot_notifier(&reboot);

	bcache_major = register_blkdev(0, "bcache");
2676 2677
	if (bcache_major < 0) {
		unregister_reboot_notifier(&reboot);
2678
		mutex_destroy(&bch_register_lock);
K
Kent Overstreet 已提交
2679
		return bcache_major;
2680
	}
K
Kent Overstreet 已提交
2681

2682 2683 2684 2685
	bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
	if (!bcache_wq)
		goto err;

2686 2687 2688 2689
	bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
	if (!bch_journal_wq)
		goto err;

2690 2691 2692 2693 2694
	bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
	if (!bcache_kobj)
		goto err;

	if (bch_request_init() ||
2695
	    sysfs_create_files(bcache_kobj, files))
K
Kent Overstreet 已提交
2696 2697
		goto err;

2698
	bch_debug_init();
2699 2700
	closure_debug_init();

2701 2702
	bcache_is_reboot = false;

K
Kent Overstreet 已提交
2703 2704 2705 2706 2707 2708
	return 0;
err:
	bcache_exit();
	return -ENOMEM;
}

2709 2710 2711
/*
 * Module hooks
 */
K
Kent Overstreet 已提交
2712 2713
module_exit(bcache_exit);
module_init(bcache_init);
2714

2715 2716 2717 2718 2719 2720
module_param(bch_cutoff_writeback, uint, 0);
MODULE_PARM_DESC(bch_cutoff_writeback, "threshold to cutoff writeback");

module_param(bch_cutoff_writeback_sync, uint, 0);
MODULE_PARM_DESC(bch_cutoff_writeback_sync, "hard threshold to cutoff writeback");

2721 2722 2723
MODULE_DESCRIPTION("Bcache: a Linux block layer cache");
MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
MODULE_LICENSE("GPL");