request.c 35.9 KB
Newer Older
K
Kent Overstreet 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Main bcache entry point - handle a read or a write request and decide what to
 * do with it; the make_request functions are called by the block layer.
 *
 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
 * Copyright 2012 Google, Inc.
 */

#include "bcache.h"
#include "btree.h"
#include "debug.h"
#include "request.h"
13
#include "writeback.h"
K
Kent Overstreet 已提交
14 15 16 17 18 19 20 21 22 23 24 25 26 27

#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/hash.h>
#include <linux/random.h>
#include "blk-cgroup.h"

#include <trace/events/bcache.h>

#define CUTOFF_CACHE_ADD	95
#define CUTOFF_CACHE_READA	90

struct kmem_cache *bch_search_cache;

28 29
static void bch_data_insert_start(struct closure *);

K
Kent Overstreet 已提交
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
/* Cgroup interface */

#ifdef CONFIG_CGROUP_BCACHE
static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };

static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
{
	struct cgroup_subsys_state *css;
	return cgroup &&
		(css = cgroup_subsys_state(cgroup, bcache_subsys_id))
		? container_of(css, struct bch_cgroup, css)
		: &bcache_default_cgroup;
}

struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
{
	struct cgroup_subsys_state *css = bio->bi_css
		? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
		: task_subsys_state(current, bcache_subsys_id);

	return css
		? container_of(css, struct bch_cgroup, css)
		: &bcache_default_cgroup;
}

static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
			struct file *file,
			char __user *buf, size_t nbytes, loff_t *ppos)
{
	char tmp[1024];
60 61
	int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
					  cgroup_to_bcache(cgrp)->cache_mode + 1);
K
Kent Overstreet 已提交
62 63 64 65 66 67 68 69 70 71

	if (len < 0)
		return len;

	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
}

static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
			    const char *buf)
{
72
	int v = bch_read_string_list(buf, bch_cache_modes);
K
Kent Overstreet 已提交
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
	if (v < 0)
		return v;

	cgroup_to_bcache(cgrp)->cache_mode = v - 1;
	return 0;
}

static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
{
	return cgroup_to_bcache(cgrp)->verify;
}

static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
{
	cgroup_to_bcache(cgrp)->verify = val;
	return 0;
}

static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
{
	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
	return atomic_read(&bcachecg->stats.cache_hits);
}

static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
{
	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
	return atomic_read(&bcachecg->stats.cache_misses);
}

static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
					 struct cftype *cft)
{
	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
	return atomic_read(&bcachecg->stats.cache_bypass_hits);
}

static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
					   struct cftype *cft)
{
	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
	return atomic_read(&bcachecg->stats.cache_bypass_misses);
}

static struct cftype bch_files[] = {
	{
		.name		= "cache_mode",
		.read		= cache_mode_read,
		.write_string	= cache_mode_write,
	},
	{
		.name		= "verify",
		.read_u64	= bch_verify_read,
		.write_u64	= bch_verify_write,
	},
	{
		.name		= "cache_hits",
		.read_u64	= bch_cache_hits_read,
	},
	{
		.name		= "cache_misses",
		.read_u64	= bch_cache_misses_read,
	},
	{
		.name		= "cache_bypass_hits",
		.read_u64	= bch_cache_bypass_hits_read,
	},
	{
		.name		= "cache_bypass_misses",
		.read_u64	= bch_cache_bypass_misses_read,
	},
	{ }	/* terminate */
};

static void init_bch_cgroup(struct bch_cgroup *cg)
{
	cg->cache_mode = -1;
}

static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
{
	struct bch_cgroup *cg;

	cg = kzalloc(sizeof(*cg), GFP_KERNEL);
	if (!cg)
		return ERR_PTR(-ENOMEM);
	init_bch_cgroup(cg);
	return &cg->css;
}

static void bcachecg_destroy(struct cgroup *cgroup)
{
	struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
	free_css_id(&bcache_subsys, &cg->css);
	kfree(cg);
}

struct cgroup_subsys bcache_subsys = {
	.create		= bcachecg_create,
	.destroy	= bcachecg_destroy,
	.subsys_id	= bcache_subsys_id,
	.name		= "bcache",
	.module		= THIS_MODULE,
};
EXPORT_SYMBOL_GPL(bcache_subsys);
#endif

static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
{
#ifdef CONFIG_CGROUP_BCACHE
	int r = bch_bio_to_cgroup(bio)->cache_mode;
	if (r >= 0)
		return r;
#endif
	return BDEV_CACHE_MODE(&dc->sb);
}

static bool verify(struct cached_dev *dc, struct bio *bio)
{
#ifdef CONFIG_CGROUP_BCACHE
	if (bch_bio_to_cgroup(bio)->verify)
		return true;
#endif
	return dc->verify;
}

static void bio_csum(struct bio *bio, struct bkey *k)
{
	struct bio_vec *bv;
	uint64_t csum = 0;
	int i;

	bio_for_each_segment(bv, bio, i) {
		void *d = kmap(bv->bv_page) + bv->bv_offset;
207
		csum = bch_crc64_update(csum, d, bv->bv_len);
K
Kent Overstreet 已提交
208 209 210 211 212 213 214 215
		kunmap(bv->bv_page);
	}

	k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
}

/* Insert data into cache */

216
static void bch_data_insert_keys(struct closure *cl)
K
Kent Overstreet 已提交
217
{
K
Kent Overstreet 已提交
218
	struct search *s = container_of(cl, struct search, btree);
K
Kent Overstreet 已提交
219
	atomic_t *journal_ref = NULL;
K
Kent Overstreet 已提交
220
	struct bkey *replace_key = s->replace ? &s->replace_key : NULL;
K
Kent Overstreet 已提交
221

222 223 224 225 226 227 228 229 230 231 232
	/*
	 * If we're looping, might already be waiting on
	 * another journal write - can't wait on more than one journal write at
	 * a time
	 *
	 * XXX: this looks wrong
	 */
#if 0
	while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
		closure_sync(&s->cl);
#endif
K
Kent Overstreet 已提交
233

234
	if (s->write)
K
Kent Overstreet 已提交
235 236
		journal_ref = bch_journal(s->c, &s->insert_keys,
					  s->flush_journal
237
					  ? &s->cl : NULL);
K
Kent Overstreet 已提交
238

K
Kent Overstreet 已提交
239 240
	if (bch_btree_insert(&s->op, s->c, &s->insert_keys,
			     journal_ref, replace_key)) {
241
		s->error		= -ENOMEM;
K
Kent Overstreet 已提交
242
		s->insert_data_done	= true;
243
	}
K
Kent Overstreet 已提交
244

K
Kent Overstreet 已提交
245 246
	if (journal_ref)
		atomic_dec_bug(journal_ref);
K
Kent Overstreet 已提交
247

K
Kent Overstreet 已提交
248
	if (!s->insert_data_done)
249
		continue_at(cl, bch_data_insert_start, bcache_wq);
K
Kent Overstreet 已提交
250

251
	bch_keylist_free(&s->insert_keys);
252
	closure_return(cl);
K
Kent Overstreet 已提交
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
}

struct open_bucket {
	struct list_head	list;
	struct task_struct	*last;
	unsigned		sectors_free;
	BKEY_PADDED(key);
};

void bch_open_buckets_free(struct cache_set *c)
{
	struct open_bucket *b;

	while (!list_empty(&c->data_buckets)) {
		b = list_first_entry(&c->data_buckets,
				     struct open_bucket, list);
		list_del(&b->list);
		kfree(b);
	}
}

int bch_open_buckets_alloc(struct cache_set *c)
{
	int i;

	spin_lock_init(&c->data_bucket_lock);

	for (i = 0; i < 6; i++) {
		struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
		if (!b)
			return -ENOMEM;

		list_add(&b->list, &c->data_buckets);
	}

	return 0;
}

/*
 * We keep multiple buckets open for writes, and try to segregate different
 * write streams for better cache utilization: first we look for a bucket where
 * the last write to it was sequential with the current write, and failing that
 * we look for a bucket that was last used by the same task.
 *
 * The ideas is if you've got multiple tasks pulling data into the cache at the
 * same time, you'll get better cache utilization if you try to segregate their
 * data and preserve locality.
 *
 * For example, say you've starting Firefox at the same time you're copying a
 * bunch of files. Firefox will likely end up being fairly hot and stay in the
 * cache awhile, but the data you copied might not be; if you wrote all that
 * data to the same buckets it'd get invalidated at the same time.
 *
 * Both of those tasks will be doing fairly random IO so we can't rely on
 * detecting sequential IO to segregate their data, but going off of the task
 * should be a sane heuristic.
 */
static struct open_bucket *pick_data_bucket(struct cache_set *c,
					    const struct bkey *search,
					    struct task_struct *task,
					    struct bkey *alloc)
{
	struct open_bucket *ret, *ret_task = NULL;

	list_for_each_entry_reverse(ret, &c->data_buckets, list)
		if (!bkey_cmp(&ret->key, search))
			goto found;
		else if (ret->last == task)
			ret_task = ret;

	ret = ret_task ?: list_first_entry(&c->data_buckets,
					   struct open_bucket, list);
found:
	if (!ret->sectors_free && KEY_PTRS(alloc)) {
		ret->sectors_free = c->sb.bucket_size;
		bkey_copy(&ret->key, alloc);
		bkey_init(alloc);
	}

	if (!ret->sectors_free)
		ret = NULL;

	return ret;
}

/*
 * Allocates some space in the cache to write to, and k to point to the newly
 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
 * end of the newly allocated space).
 *
 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
 * sectors were actually allocated.
 *
 * If s->writeback is true, will not fail.
 */
static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
			      struct search *s)
{
K
Kent Overstreet 已提交
351
	struct cache_set *c = s->c;
K
Kent Overstreet 已提交
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
	struct open_bucket *b;
	BKEY_PADDED(key) alloc;
	unsigned i;

	/*
	 * We might have to allocate a new bucket, which we can't do with a
	 * spinlock held. So if we have to allocate, we drop the lock, allocate
	 * and then retry. KEY_PTRS() indicates whether alloc points to
	 * allocated bucket(s).
	 */

	bkey_init(&alloc.key);
	spin_lock(&c->data_bucket_lock);

	while (!(b = pick_data_bucket(c, k, s->task, &alloc.key))) {
K
Kent Overstreet 已提交
367
		unsigned watermark = s->write_prio
K
Kent Overstreet 已提交
368 369 370 371 372
			? WATERMARK_MOVINGGC
			: WATERMARK_NONE;

		spin_unlock(&c->data_bucket_lock);

373 374
		if (bch_bucket_alloc_set(c, watermark, &alloc.key,
					 1, s->writeback))
K
Kent Overstreet 已提交
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
			return false;

		spin_lock(&c->data_bucket_lock);
	}

	/*
	 * If we had to allocate, we might race and not need to allocate the
	 * second time we call find_data_bucket(). If we allocated a bucket but
	 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
	 */
	if (KEY_PTRS(&alloc.key))
		__bkey_put(c, &alloc.key);

	for (i = 0; i < KEY_PTRS(&b->key); i++)
		EBUG_ON(ptr_stale(c, &b->key, i));

	/* Set up the pointer to the space we're allocating: */

	for (i = 0; i < KEY_PTRS(&b->key); i++)
		k->ptr[i] = b->key.ptr[i];

	sectors = min(sectors, b->sectors_free);

	SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
	SET_KEY_SIZE(k, sectors);
	SET_KEY_PTRS(k, KEY_PTRS(&b->key));

	/*
	 * Move b to the end of the lru, and keep track of what this bucket was
	 * last used for:
	 */
	list_move_tail(&b->list, &c->data_buckets);
	bkey_copy_key(&b->key, k);
	b->last = s->task;

	b->sectors_free	-= sectors;

	for (i = 0; i < KEY_PTRS(&b->key); i++) {
		SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);

		atomic_long_add(sectors,
				&PTR_CACHE(c, &b->key, i)->sectors_written);
	}

	if (b->sectors_free < c->sb.block_size)
		b->sectors_free = 0;

	/*
	 * k takes refcounts on the buckets it points to until it's inserted
	 * into the btree, but if we're done with this bucket we just transfer
	 * get_data_bucket()'s refcount.
	 */
	if (b->sectors_free)
		for (i = 0; i < KEY_PTRS(&b->key); i++)
			atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);

	spin_unlock(&c->data_bucket_lock);
	return true;
}

435 436
static void bch_data_invalidate(struct closure *cl)
{
K
Kent Overstreet 已提交
437
	struct search *s = container_of(cl, struct search, btree);
K
Kent Overstreet 已提交
438
	struct bio *bio = s->cache_bio;
439 440 441 442 443 444 445

	pr_debug("invalidating %i sectors from %llu",
		 bio_sectors(bio), (uint64_t) bio->bi_sector);

	while (bio_sectors(bio)) {
		unsigned len = min(bio_sectors(bio), 1U << 14);

K
Kent Overstreet 已提交
446
		if (bch_keylist_realloc(&s->insert_keys, 0, s->c))
447 448 449 450 451
			goto out;

		bio->bi_sector	+= len;
		bio->bi_size	-= len << 9;

452
		bch_keylist_add(&s->insert_keys,
K
Kent Overstreet 已提交
453
				&KEY(s->inode, bio->bi_sector, len));
454 455
	}

K
Kent Overstreet 已提交
456
	s->insert_data_done = true;
457 458 459 460 461 462
	bio_put(bio);
out:
	continue_at(cl, bch_data_insert_keys, bcache_wq);
}

static void bch_data_insert_error(struct closure *cl)
K
Kent Overstreet 已提交
463
{
K
Kent Overstreet 已提交
464
	struct search *s = container_of(cl, struct search, btree);
K
Kent Overstreet 已提交
465 466 467 468 469 470 471 472 473 474

	/*
	 * Our data write just errored, which means we've got a bunch of keys to
	 * insert that point to data that wasn't succesfully written.
	 *
	 * We don't have to insert those keys but we still have to invalidate
	 * that region of the cache - so, if we just strip off all the pointers
	 * from the keys we'll accomplish just that.
	 */

475
	struct bkey *src = s->insert_keys.keys, *dst = s->insert_keys.keys;
K
Kent Overstreet 已提交
476

477
	while (src != s->insert_keys.top) {
K
Kent Overstreet 已提交
478 479 480
		struct bkey *n = bkey_next(src);

		SET_KEY_PTRS(src, 0);
K
Kent Overstreet 已提交
481
		memmove(dst, src, bkey_bytes(src));
K
Kent Overstreet 已提交
482 483 484 485 486

		dst = bkey_next(dst);
		src = n;
	}

487
	s->insert_keys.top = dst;
K
Kent Overstreet 已提交
488

489
	bch_data_insert_keys(cl);
K
Kent Overstreet 已提交
490 491
}

492
static void bch_data_insert_endio(struct bio *bio, int error)
K
Kent Overstreet 已提交
493 494
{
	struct closure *cl = bio->bi_private;
K
Kent Overstreet 已提交
495
	struct search *s = container_of(cl, struct search, btree);
K
Kent Overstreet 已提交
496 497 498 499 500 501

	if (error) {
		/* TODO: We could try to recover from this. */
		if (s->writeback)
			s->error = error;
		else if (s->write)
502
			set_closure_fn(cl, bch_data_insert_error, bcache_wq);
K
Kent Overstreet 已提交
503 504 505 506
		else
			set_closure_fn(cl, NULL, NULL);
	}

K
Kent Overstreet 已提交
507
	bch_bbio_endio(s->c, bio, error, "writing data to cache");
K
Kent Overstreet 已提交
508 509
}

510
static void bch_data_insert_start(struct closure *cl)
K
Kent Overstreet 已提交
511
{
K
Kent Overstreet 已提交
512
	struct search *s = container_of(cl, struct search, btree);
K
Kent Overstreet 已提交
513
	struct bio *bio = s->cache_bio, *n;
K
Kent Overstreet 已提交
514

K
Kent Overstreet 已提交
515
	if (s->bypass)
516
		return bch_data_invalidate(cl);
K
Kent Overstreet 已提交
517

K
Kent Overstreet 已提交
518 519 520
	if (atomic_sub_return(bio_sectors(bio), &s->c->sectors_to_gc) < 0) {
		set_gc_sectors(s->c);
		wake_up_gc(s->c);
K
Kent Overstreet 已提交
521 522
	}

523 524 525 526 527 528
	/*
	 * Journal writes are marked REQ_FLUSH; if the original write was a
	 * flush, it'll wait on the journal write.
	 */
	bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);

K
Kent Overstreet 已提交
529 530 531 532
	do {
		unsigned i;
		struct bkey *k;
		struct bio_set *split = s->d
K
Kent Overstreet 已提交
533
			? s->d->bio_split : s->c->bio_split;
K
Kent Overstreet 已提交
534 535

		/* 1 for the device pointer and 1 for the chksum */
536
		if (bch_keylist_realloc(&s->insert_keys,
K
Kent Overstreet 已提交
537 538
					1 + (s->csum ? 1 : 0),
					s->c))
539
			continue_at(cl, bch_data_insert_keys, bcache_wq);
K
Kent Overstreet 已提交
540

541
		k = s->insert_keys.top;
K
Kent Overstreet 已提交
542
		bkey_init(k);
K
Kent Overstreet 已提交
543
		SET_KEY_INODE(k, s->inode);
K
Kent Overstreet 已提交
544 545 546 547 548 549 550
		SET_KEY_OFFSET(k, bio->bi_sector);

		if (!bch_alloc_sectors(k, bio_sectors(bio), s))
			goto err;

		n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);

551
		n->bi_end_io	= bch_data_insert_endio;
K
Kent Overstreet 已提交
552 553 554 555 556 557
		n->bi_private	= cl;

		if (s->writeback) {
			SET_KEY_DIRTY(k, true);

			for (i = 0; i < KEY_PTRS(k); i++)
K
Kent Overstreet 已提交
558
				SET_GC_MARK(PTR_BUCKET(s->c, k, i),
K
Kent Overstreet 已提交
559 560 561
					    GC_MARK_DIRTY);
		}

K
Kent Overstreet 已提交
562
		SET_KEY_CSUM(k, s->csum);
K
Kent Overstreet 已提交
563 564 565
		if (KEY_CSUM(k))
			bio_csum(n, k);

K
Kent Overstreet 已提交
566
		trace_bcache_cache_insert(k);
567
		bch_keylist_push(&s->insert_keys);
K
Kent Overstreet 已提交
568 569

		n->bi_rw |= REQ_WRITE;
K
Kent Overstreet 已提交
570
		bch_submit_bbio(n, s->c, k, 0);
K
Kent Overstreet 已提交
571 572
	} while (n != bio);

K
Kent Overstreet 已提交
573
	s->insert_data_done = true;
574
	continue_at(cl, bch_data_insert_keys, bcache_wq);
K
Kent Overstreet 已提交
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
err:
	/* bch_alloc_sectors() blocks if s->writeback = true */
	BUG_ON(s->writeback);

	/*
	 * But if it's not a writeback write we'd rather just bail out if
	 * there aren't any buckets ready to write to - it might take awhile and
	 * we might be starving btree writes for gc or something.
	 */

	if (s->write) {
		/*
		 * Writethrough write: We can't complete the write until we've
		 * updated the index. But we don't want to delay the write while
		 * we wait for buckets to be freed up, so just invalidate the
		 * rest of the write.
		 */
K
Kent Overstreet 已提交
592
		s->bypass = true;
593
		return bch_data_invalidate(cl);
K
Kent Overstreet 已提交
594 595 596 597 598
	} else {
		/*
		 * From a cache miss, we can just insert the keys for the data
		 * we have written or bail out if we didn't do anything.
		 */
K
Kent Overstreet 已提交
599
		s->insert_data_done = true;
K
Kent Overstreet 已提交
600 601
		bio_put(bio);

602
		if (!bch_keylist_empty(&s->insert_keys))
603
			continue_at(cl, bch_data_insert_keys, bcache_wq);
K
Kent Overstreet 已提交
604 605 606 607 608 609
		else
			closure_return(cl);
	}
}

/**
610
 * bch_data_insert - stick some data in the cache
K
Kent Overstreet 已提交
611 612 613 614 615 616 617 618 619 620 621
 *
 * This is the starting point for any data to end up in a cache device; it could
 * be from a normal write, or a writeback write, or a write to a flash only
 * volume - it's also used by the moving garbage collector to compact data in
 * mostly empty buckets.
 *
 * It first writes the data to the cache, creating a list of keys to be inserted
 * (if the data had to be fragmented there will be multiple keys); after the
 * data is written it calls bch_journal, and after the keys have been added to
 * the next journal write they're inserted into the btree.
 *
K
Kent Overstreet 已提交
622
 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
K
Kent Overstreet 已提交
623 624
 * and op->inode is used for the key inode.
 *
K
Kent Overstreet 已提交
625 626
 * If s->bypass is true, instead of inserting the data it invalidates the
 * region of the cache represented by s->cache_bio and op->inode.
K
Kent Overstreet 已提交
627
 */
628
void bch_data_insert(struct closure *cl)
K
Kent Overstreet 已提交
629
{
K
Kent Overstreet 已提交
630
	struct search *s = container_of(cl, struct search, btree);
K
Kent Overstreet 已提交
631

632
	bch_keylist_init(&s->insert_keys);
K
Kent Overstreet 已提交
633
	bio_get(s->cache_bio);
634
	bch_data_insert_start(cl);
K
Kent Overstreet 已提交
635 636
}

637
/* Cache lookup */
K
Kent Overstreet 已提交
638

639
static void bch_cache_read_endio(struct bio *bio, int error)
K
Kent Overstreet 已提交
640 641 642 643 644 645 646 647 648 649 650 651 652 653
{
	struct bbio *b = container_of(bio, struct bbio, bio);
	struct closure *cl = bio->bi_private;
	struct search *s = container_of(cl, struct search, cl);

	/*
	 * If the bucket was reused while our bio was in flight, we might have
	 * read the wrong data. Set s->error but not error so it doesn't get
	 * counted against the cache device, but we'll still reread the data
	 * from the backing device.
	 */

	if (error)
		s->error = error;
K
Kent Overstreet 已提交
654 655
	else if (ptr_stale(s->c, &b->key, 0)) {
		atomic_long_inc(&s->c->cache_read_races);
K
Kent Overstreet 已提交
656 657 658
		s->error = -EINTR;
	}

K
Kent Overstreet 已提交
659
	bch_bbio_endio(s->c, bio, error, "reading from cache");
K
Kent Overstreet 已提交
660 661
}

662 663 664 665
/*
 * Read from a single key, handling the initial cache miss if the key starts in
 * the middle of the bio
 */
K
Kent Overstreet 已提交
666
static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
667 668
{
	struct search *s = container_of(op, struct search, op);
K
Kent Overstreet 已提交
669 670
	struct bio *n, *bio = &s->bio.bio;
	struct bkey *bio_key;
671 672
	unsigned ptr;

K
Kent Overstreet 已提交
673
	if (bkey_cmp(k, &KEY(s->inode, bio->bi_sector, 0)) <= 0)
K
Kent Overstreet 已提交
674 675
		return MAP_CONTINUE;

K
Kent Overstreet 已提交
676
	if (KEY_INODE(k) != s->inode ||
K
Kent Overstreet 已提交
677 678
	    KEY_START(k) > bio->bi_sector) {
		unsigned bio_sectors = bio_sectors(bio);
K
Kent Overstreet 已提交
679
		unsigned sectors = KEY_INODE(k) == s->inode
K
Kent Overstreet 已提交
680 681 682 683 684 685 686 687 688 689 690 691 692 693
			? min_t(uint64_t, INT_MAX,
				KEY_START(k) - bio->bi_sector)
			: INT_MAX;

		int ret = s->d->cache_miss(b, s, bio, sectors);
		if (ret != MAP_CONTINUE)
			return ret;

		/* if this was a complete miss we shouldn't get here */
		BUG_ON(bio_sectors <= sectors);
	}

	if (!KEY_SIZE(k))
		return MAP_CONTINUE;
694 695 696 697 698 699

	/* XXX: figure out best pointer - for multiple cache devices */
	ptr = 0;

	PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;

K
Kent Overstreet 已提交
700 701 702
	n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
				     KEY_OFFSET(k) - bio->bi_sector),
			  GFP_NOIO, s->d->bio_split);
703

K
Kent Overstreet 已提交
704 705
	bio_key = &container_of(n, struct bbio, bio)->key;
	bch_bkey_copy_single_ptr(bio_key, k, ptr);
706

K
Kent Overstreet 已提交
707 708
	bch_cut_front(&KEY(s->inode, n->bi_sector, 0), bio_key);
	bch_cut_back(&KEY(s->inode, bio_end_sector(n), 0), bio_key);
709

K
Kent Overstreet 已提交
710 711
	n->bi_end_io	= bch_cache_read_endio;
	n->bi_private	= &s->cl;
712

K
Kent Overstreet 已提交
713 714 715 716 717 718 719 720 721 722
	/*
	 * The bucket we're reading from might be reused while our bio
	 * is in flight, and we could then end up reading the wrong
	 * data.
	 *
	 * We guard against this by checking (in cache_read_endio()) if
	 * the pointer is stale again; if so, we treat it as an error
	 * and reread from the backing device (but we don't pass that
	 * error up anywhere).
	 */
723

K
Kent Overstreet 已提交
724 725
	__bch_submit_bbio(n, b->c);
	return n == bio ? MAP_DONE : MAP_CONTINUE;
726 727 728 729
}

static void cache_lookup(struct closure *cl)
{
K
Kent Overstreet 已提交
730
	struct search *s = container_of(cl, struct search, btree);
731 732
	struct bio *bio = &s->bio.bio;

K
Kent Overstreet 已提交
733
	int ret = bch_btree_map_keys(&s->op, s->c,
K
Kent Overstreet 已提交
734
				     &KEY(s->inode, bio->bi_sector, 0),
K
Kent Overstreet 已提交
735
				     cache_lookup_fn, MAP_END_KEY);
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
	if (ret == -EAGAIN)
		continue_at(cl, cache_lookup, bcache_wq);

	closure_return(cl);
}

/* Common code for the make_request functions */

static void request_endio(struct bio *bio, int error)
{
	struct closure *cl = bio->bi_private;

	if (error) {
		struct search *s = container_of(cl, struct search, cl);
		s->error = error;
		/* Only cache read errors are recoverable */
		s->recoverable = false;
	}

	bio_put(bio);
	closure_put(cl);
}

K
Kent Overstreet 已提交
759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
static void bio_complete(struct search *s)
{
	if (s->orig_bio) {
		int cpu, rw = bio_data_dir(s->orig_bio);
		unsigned long duration = jiffies - s->start_time;

		cpu = part_stat_lock();
		part_round_stats(cpu, &s->d->disk->part0);
		part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
		part_stat_unlock();

		trace_bcache_request_end(s, s->orig_bio);
		bio_endio(s->orig_bio, s->error);
		s->orig_bio = NULL;
	}
}

static void do_bio_hook(struct search *s)
{
	struct bio *bio = &s->bio.bio;
	memcpy(bio, s->orig_bio, sizeof(struct bio));

	bio->bi_end_io		= request_endio;
	bio->bi_private		= &s->cl;
	atomic_set(&bio->bi_cnt, 3);
}

static void search_free(struct closure *cl)
{
	struct search *s = container_of(cl, struct search, cl);
	bio_complete(s);

K
Kent Overstreet 已提交
791 792
	if (s->cache_bio)
		bio_put(s->cache_bio);
K
Kent Overstreet 已提交
793 794 795 796 797 798 799 800 801 802

	if (s->unaligned_bvec)
		mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);

	closure_debug_destroy(cl);
	mempool_free(s, s->d->c->search);
}

static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
{
803
	struct search *s;
K
Kent Overstreet 已提交
804
	struct bio_vec *bv;
805 806 807

	s = mempool_alloc(d->c->search, GFP_NOIO);
	memset(s, 0, offsetof(struct search, insert_keys));
K
Kent Overstreet 已提交
808 809 810

	__closure_init(&s->cl, NULL);

K
Kent Overstreet 已提交
811 812
	s->inode		= d->id;
	s->c			= d->c;
K
Kent Overstreet 已提交
813 814 815 816 817
	s->d			= d;
	s->op.lock		= -1;
	s->task			= current;
	s->orig_bio		= bio;
	s->write		= (bio->bi_rw & REQ_WRITE) != 0;
K
Kent Overstreet 已提交
818
	s->flush_journal	= (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
K
Kent Overstreet 已提交
819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
	s->recoverable		= 1;
	s->start_time		= jiffies;
	do_bio_hook(s);

	if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
		bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
		memcpy(bv, bio_iovec(bio),
		       sizeof(struct bio_vec) * bio_segments(bio));

		s->bio.bio.bi_io_vec	= bv;
		s->unaligned_bvec	= 1;
	}

	return s;
}

/* Cached devices */

static void cached_dev_bio_complete(struct closure *cl)
{
	struct search *s = container_of(cl, struct search, cl);
	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);

	search_free(cl);
	cached_dev_put(dc);
}

K
Kent Overstreet 已提交
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
unsigned bch_get_congested(struct cache_set *c)
{
	int i;
	long rand;

	if (!c->congested_read_threshold_us &&
	    !c->congested_write_threshold_us)
		return 0;

	i = (local_clock_us() - c->congested_last_us) / 1024;
	if (i < 0)
		return 0;

	i += atomic_read(&c->congested);
	if (i >= 0)
		return 0;

	i += CONGESTED_MAX;

	if (i > 0)
		i = fract_exp_two(i, 6);

	rand = get_random_int();
	i -= bitmap_weight(&rand, BITS_PER_LONG);

	return i > 0 ? i : 1;
}

static void add_sequential(struct task_struct *t)
{
	ewma_add(t->sequential_io_avg,
		 t->sequential_io, 8, 0);

	t->sequential_io = 0;
}

static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
{
	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
}

static bool check_should_bypass(struct cached_dev *dc, struct search *s)
{
K
Kent Overstreet 已提交
889
	struct cache_set *c = s->c;
K
Kent Overstreet 已提交
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
	struct bio *bio = &s->bio.bio;
	unsigned mode = cache_mode(dc, bio);
	unsigned sectors, congested = bch_get_congested(c);

	if (atomic_read(&dc->disk.detaching) ||
	    c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
	    (bio->bi_rw & REQ_DISCARD))
		goto skip;

	if (mode == CACHE_MODE_NONE ||
	    (mode == CACHE_MODE_WRITEAROUND &&
	     (bio->bi_rw & REQ_WRITE)))
		goto skip;

	if (bio->bi_sector & (c->sb.block_size - 1) ||
	    bio_sectors(bio) & (c->sb.block_size - 1)) {
		pr_debug("skipping unaligned io");
		goto skip;
	}

	if (!congested && !dc->sequential_cutoff)
		goto rescale;

	if (!congested &&
	    mode == CACHE_MODE_WRITEBACK &&
	    (bio->bi_rw & REQ_WRITE) &&
	    (bio->bi_rw & REQ_SYNC))
		goto rescale;

	if (dc->sequential_merge) {
		struct io *i;

		spin_lock(&dc->io_lock);

		hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
			if (i->last == bio->bi_sector &&
			    time_before(jiffies, i->jiffies))
				goto found;

		i = list_first_entry(&dc->io_lru, struct io, lru);

		add_sequential(s->task);
		i->sequential = 0;
found:
		if (i->sequential + bio->bi_size > i->sequential)
			i->sequential	+= bio->bi_size;

		i->last			 = bio_end_sector(bio);
		i->jiffies		 = jiffies + msecs_to_jiffies(5000);
		s->task->sequential_io	 = i->sequential;

		hlist_del(&i->hash);
		hlist_add_head(&i->hash, iohash(dc, i->last));
		list_move_tail(&i->lru, &dc->io_lru);

		spin_unlock(&dc->io_lock);
	} else {
		s->task->sequential_io = bio->bi_size;

		add_sequential(s->task);
	}

	sectors = max(s->task->sequential_io,
		      s->task->sequential_io_avg) >> 9;

	if (dc->sequential_cutoff &&
	    sectors >= dc->sequential_cutoff >> 9) {
		trace_bcache_bypass_sequential(s->orig_bio);
		goto skip;
	}

	if (congested && sectors >= congested) {
		trace_bcache_bypass_congested(s->orig_bio);
		goto skip;
	}

rescale:
	bch_rescale_priorities(c, bio_sectors(bio));
	return false;
skip:
	bch_mark_sectors_bypassed(s, bio_sectors(bio));
	return true;
}

K
Kent Overstreet 已提交
974 975
/* Process reads */

976
static void cached_dev_cache_miss_done(struct closure *cl)
K
Kent Overstreet 已提交
977 978 979 980 981 982
{
	struct search *s = container_of(cl, struct search, cl);

	if (s->op.insert_collision)
		bch_mark_cache_miss_collision(s);

K
Kent Overstreet 已提交
983
	if (s->cache_bio) {
K
Kent Overstreet 已提交
984 985 986
		int i;
		struct bio_vec *bv;

K
Kent Overstreet 已提交
987
		bio_for_each_segment_all(bv, s->cache_bio, i)
K
Kent Overstreet 已提交
988 989 990 991 992 993
			__free_page(bv->bv_page);
	}

	cached_dev_bio_complete(cl);
}

994
static void cached_dev_read_error(struct closure *cl)
K
Kent Overstreet 已提交
995 996
{
	struct search *s = container_of(cl, struct search, cl);
997
	struct bio *bio = &s->bio.bio;
K
Kent Overstreet 已提交
998 999 1000 1001
	struct bio_vec *bv;
	int i;

	if (s->recoverable) {
K
Kent Overstreet 已提交
1002 1003
		/* Retry from the backing device: */
		trace_bcache_read_retry(s->orig_bio);
K
Kent Overstreet 已提交
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020

		s->error = 0;
		bv = s->bio.bio.bi_io_vec;
		do_bio_hook(s);
		s->bio.bio.bi_io_vec = bv;

		if (!s->unaligned_bvec)
			bio_for_each_segment(bv, s->orig_bio, i)
				bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
		else
			memcpy(s->bio.bio.bi_io_vec,
			       bio_iovec(s->orig_bio),
			       sizeof(struct bio_vec) *
			       bio_segments(s->orig_bio));

		/* XXX: invalidate cache */

1021
		closure_bio_submit(bio, cl, s->d);
K
Kent Overstreet 已提交
1022 1023
	}

1024
	continue_at(cl, cached_dev_cache_miss_done, NULL);
K
Kent Overstreet 已提交
1025 1026
}

1027
static void cached_dev_read_done(struct closure *cl)
K
Kent Overstreet 已提交
1028 1029 1030 1031 1032
{
	struct search *s = container_of(cl, struct search, cl);
	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);

	/*
1033 1034
	 * We had a cache miss; cache_bio now contains data ready to be inserted
	 * into the cache.
K
Kent Overstreet 已提交
1035 1036 1037 1038 1039
	 *
	 * First, we copy the data we just read from cache_bio's bounce buffers
	 * to the buffers the original bio pointed to:
	 */

K
Kent Overstreet 已提交
1040 1041 1042 1043 1044 1045 1046
	if (s->cache_bio) {
		bio_reset(s->cache_bio);
		s->cache_bio->bi_sector =
			s->cache_miss->bi_sector;
		s->cache_bio->bi_bdev = s->cache_miss->bi_bdev;
		s->cache_bio->bi_size = s->cache_bio_sectors << 9;
		bch_bio_map(s->cache_bio, NULL);
K
Kent Overstreet 已提交
1047

K
Kent Overstreet 已提交
1048
		bio_copy_data(s->cache_miss, s->cache_bio);
K
Kent Overstreet 已提交
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058

		bio_put(s->cache_miss);
		s->cache_miss = NULL;
	}

	if (verify(dc, &s->bio.bio) && s->recoverable)
		bch_data_verify(s);

	bio_complete(s);

K
Kent Overstreet 已提交
1059 1060
	if (s->cache_bio &&
	    !test_bit(CACHE_SET_STOPPING, &s->c->flags)) {
K
Kent Overstreet 已提交
1061
		BUG_ON(!s->replace);
K
Kent Overstreet 已提交
1062
		closure_call(&s->btree, bch_data_insert, NULL, cl);
K
Kent Overstreet 已提交
1063 1064
	}

1065
	continue_at(cl, cached_dev_cache_miss_done, NULL);
K
Kent Overstreet 已提交
1066 1067
}

1068
static void cached_dev_read_done_bh(struct closure *cl)
K
Kent Overstreet 已提交
1069 1070 1071 1072
{
	struct search *s = container_of(cl, struct search, cl);
	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);

K
Kent Overstreet 已提交
1073 1074
	bch_mark_cache_accounting(s, !s->cache_miss, s->bypass);
	trace_bcache_read(s->orig_bio, !s->cache_miss, s->bypass);
K
Kent Overstreet 已提交
1075 1076

	if (s->error)
1077
		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
K
Kent Overstreet 已提交
1078
	else if (s->cache_bio || verify(dc, &s->bio.bio))
1079
		continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
K
Kent Overstreet 已提交
1080
	else
1081
		continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
K
Kent Overstreet 已提交
1082 1083 1084 1085 1086
}

static int cached_dev_cache_miss(struct btree *b, struct search *s,
				 struct bio *bio, unsigned sectors)
{
1087
	int ret = MAP_CONTINUE;
1088
	unsigned reada = 0;
K
Kent Overstreet 已提交
1089
	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
1090
	struct bio *miss, *cache_bio;
K
Kent Overstreet 已提交
1091

K
Kent Overstreet 已提交
1092
	if (s->cache_miss || s->bypass) {
1093
		miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
1094
		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
1095 1096
		goto out_submit;
	}
K
Kent Overstreet 已提交
1097

1098 1099
	if (!(bio->bi_rw & REQ_RAHEAD) &&
	    !(bio->bi_rw & REQ_META) &&
K
Kent Overstreet 已提交
1100
	    s->c->gc_stats.in_use < CUTOFF_CACHE_READA)
1101 1102
		reada = min_t(sector_t, dc->readahead >> 9,
			      bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
K
Kent Overstreet 已提交
1103

1104
	s->cache_bio_sectors = min(sectors, bio_sectors(bio) + reada);
K
Kent Overstreet 已提交
1105

K
Kent Overstreet 已提交
1106 1107
	s->replace_key = KEY(s->inode, bio->bi_sector +
			     s->cache_bio_sectors, s->cache_bio_sectors);
1108

K
Kent Overstreet 已提交
1109
	ret = bch_btree_insert_check_key(b, &s->op, &s->replace_key);
1110 1111 1112
	if (ret)
		return ret;

K
Kent Overstreet 已提交
1113 1114
	s->replace = true;

1115
	miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
1116 1117 1118

	/* btree_search_recurse()'s btree iterator is no good anymore */
	ret = miss == bio ? MAP_DONE : -EINTR;
K
Kent Overstreet 已提交
1119

1120
	cache_bio = bio_alloc_bioset(GFP_NOWAIT,
K
Kent Overstreet 已提交
1121 1122
			DIV_ROUND_UP(s->cache_bio_sectors, PAGE_SECTORS),
			dc->disk.bio_split);
1123
	if (!cache_bio)
K
Kent Overstreet 已提交
1124 1125
		goto out_submit;

1126 1127 1128
	cache_bio->bi_sector	= miss->bi_sector;
	cache_bio->bi_bdev	= miss->bi_bdev;
	cache_bio->bi_size	= s->cache_bio_sectors << 9;
K
Kent Overstreet 已提交
1129

1130 1131
	cache_bio->bi_end_io	= request_endio;
	cache_bio->bi_private	= &s->cl;
K
Kent Overstreet 已提交
1132

1133 1134
	bch_bio_map(cache_bio, NULL);
	if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
K
Kent Overstreet 已提交
1135 1136
		goto out_put;

1137
	s->cache_miss	= miss;
K
Kent Overstreet 已提交
1138
	s->cache_bio = cache_bio;
1139 1140
	bio_get(cache_bio);
	closure_bio_submit(cache_bio, &s->cl, s->d);
K
Kent Overstreet 已提交
1141 1142 1143

	return ret;
out_put:
1144
	bio_put(cache_bio);
K
Kent Overstreet 已提交
1145
out_submit:
1146 1147
	miss->bi_end_io		= request_endio;
	miss->bi_private	= &s->cl;
K
Kent Overstreet 已提交
1148 1149 1150 1151
	closure_bio_submit(miss, &s->cl, s->d);
	return ret;
}

1152
static void cached_dev_read(struct cached_dev *dc, struct search *s)
K
Kent Overstreet 已提交
1153 1154 1155
{
	struct closure *cl = &s->cl;

K
Kent Overstreet 已提交
1156
	closure_call(&s->btree, cache_lookup, NULL, cl);
1157
	continue_at(cl, cached_dev_read_done_bh, NULL);
K
Kent Overstreet 已提交
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
}

/* Process writes */

static void cached_dev_write_complete(struct closure *cl)
{
	struct search *s = container_of(cl, struct search, cl);
	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);

	up_read_non_owner(&dc->writeback_lock);
	cached_dev_bio_complete(cl);
}

1171
static void cached_dev_write(struct cached_dev *dc, struct search *s)
K
Kent Overstreet 已提交
1172 1173 1174
{
	struct closure *cl = &s->cl;
	struct bio *bio = &s->bio.bio;
K
Kent Overstreet 已提交
1175 1176
	struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
	struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
K
Kent Overstreet 已提交
1177

K
Kent Overstreet 已提交
1178
	bch_keybuf_check_overlapping(&s->c->moving_gc_keys, &start, &end);
K
Kent Overstreet 已提交
1179 1180 1181

	down_read_non_owner(&dc->writeback_lock);
	if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
K
Kent Overstreet 已提交
1182 1183 1184 1185
		/*
		 * We overlap with some dirty data undergoing background
		 * writeback, force this write to writeback
		 */
K
Kent Overstreet 已提交
1186
		s->bypass	= false;
K
Kent Overstreet 已提交
1187 1188 1189
		s->writeback	= true;
	}

K
Kent Overstreet 已提交
1190 1191 1192 1193 1194 1195 1196
	/*
	 * Discards aren't _required_ to do anything, so skipping if
	 * check_overlapping returned true is ok
	 *
	 * But check_overlapping drops dirty keys for which io hasn't started,
	 * so we still want to call it.
	 */
K
Kent Overstreet 已提交
1197
	if (bio->bi_rw & REQ_DISCARD)
K
Kent Overstreet 已提交
1198
		s->bypass = true;
K
Kent Overstreet 已提交
1199

K
Kent Overstreet 已提交
1200 1201
	if (should_writeback(dc, s->orig_bio,
			     cache_mode(dc, bio),
K
Kent Overstreet 已提交
1202 1203
			     s->bypass)) {
		s->bypass = false;
K
Kent Overstreet 已提交
1204 1205 1206
		s->writeback = true;
	}

K
Kent Overstreet 已提交
1207
	trace_bcache_write(s->orig_bio, s->writeback, s->bypass);
K
Kent Overstreet 已提交
1208

K
Kent Overstreet 已提交
1209 1210 1211
	if (s->bypass) {
		s->cache_bio = s->orig_bio;
		bio_get(s->cache_bio);
K
Kent Overstreet 已提交
1212

K
Kent Overstreet 已提交
1213 1214 1215 1216
		if (!(bio->bi_rw & REQ_DISCARD) ||
		    blk_queue_discard(bdev_get_queue(dc->bdev)))
			closure_bio_submit(bio, cl, s->d);
	} else if (s->writeback) {
1217
		bch_writeback_add(dc);
K
Kent Overstreet 已提交
1218
		s->cache_bio = bio;
K
Kent Overstreet 已提交
1219

1220
		if (bio->bi_rw & REQ_FLUSH) {
K
Kent Overstreet 已提交
1221
			/* Also need to send a flush to the backing device */
1222
			struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
1223
							     dc->disk.bio_split);
K
Kent Overstreet 已提交
1224

1225 1226 1227 1228 1229 1230
			flush->bi_rw	= WRITE_FLUSH;
			flush->bi_bdev	= bio->bi_bdev;
			flush->bi_end_io = request_endio;
			flush->bi_private = cl;

			closure_bio_submit(flush, cl, s->d);
K
Kent Overstreet 已提交
1231
		}
K
Kent Overstreet 已提交
1232
	} else {
K
Kent Overstreet 已提交
1233 1234
		s->cache_bio = bio_clone_bioset(bio, GFP_NOIO,
						dc->disk.bio_split);
K
Kent Overstreet 已提交
1235 1236

		closure_bio_submit(bio, cl, s->d);
K
Kent Overstreet 已提交
1237
	}
K
Kent Overstreet 已提交
1238

K
Kent Overstreet 已提交
1239
	closure_call(&s->btree, bch_data_insert, NULL, cl);
K
Kent Overstreet 已提交
1240 1241 1242
	continue_at(cl, cached_dev_write_complete, NULL);
}

1243
static void cached_dev_nodata(struct closure *cl)
K
Kent Overstreet 已提交
1244
{
1245
	struct search *s = container_of(cl, struct search, cl);
K
Kent Overstreet 已提交
1246 1247
	struct bio *bio = &s->bio.bio;

K
Kent Overstreet 已提交
1248 1249
	if (s->flush_journal)
		bch_journal_meta(s->c, cl);
K
Kent Overstreet 已提交
1250

K
Kent Overstreet 已提交
1251
	/* If it's a flush, we send the flush to the backing device too */
K
Kent Overstreet 已提交
1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
	closure_bio_submit(bio, cl, s->d);

	continue_at(cl, cached_dev_bio_complete, NULL);
}

/* Cached devices - read & write stuff */

static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
{
	struct search *s;
	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
	int cpu, rw = bio_data_dir(bio);

	cpu = part_stat_lock();
	part_stat_inc(cpu, &d->disk->part0, ios[rw]);
	part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
	part_stat_unlock();

	bio->bi_bdev = dc->bdev;
1272
	bio->bi_sector += dc->sb.data_offset;
K
Kent Overstreet 已提交
1273 1274 1275 1276 1277

	if (cached_dev_get(dc)) {
		s = search_alloc(bio, d);
		trace_bcache_request_start(s, bio);

1278 1279 1280 1281 1282 1283 1284 1285 1286
		if (!bio->bi_size) {
			/*
			 * can't call bch_journal_meta from under
			 * generic_make_request
			 */
			continue_at_nobarrier(&s->cl,
					      cached_dev_nodata,
					      bcache_wq);
		} else {
K
Kent Overstreet 已提交
1287
			s->bypass = check_should_bypass(dc, s);
K
Kent Overstreet 已提交
1288 1289

			if (rw)
1290
				cached_dev_write(dc, s);
K
Kent Overstreet 已提交
1291
			else
1292
				cached_dev_read(dc, s);
K
Kent Overstreet 已提交
1293
		}
K
Kent Overstreet 已提交
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
	} else {
		if ((bio->bi_rw & REQ_DISCARD) &&
		    !blk_queue_discard(bdev_get_queue(dc->bdev)))
			bio_endio(bio, 0);
		else
			bch_generic_make_request(bio, &d->bio_split_hook);
	}
}

static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
			    unsigned int cmd, unsigned long arg)
{
	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
	return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
}

static int cached_dev_congested(void *data, int bits)
{
	struct bcache_device *d = data;
	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
	struct request_queue *q = bdev_get_queue(dc->bdev);
	int ret = 0;

	if (bdi_congested(&q->backing_dev_info, bits))
		return 1;

	if (cached_dev_get(dc)) {
		unsigned i;
		struct cache *ca;

		for_each_cache(ca, d->c, i) {
			q = bdev_get_queue(ca->bdev);
			ret |= bdi_congested(&q->backing_dev_info, bits);
		}

		cached_dev_put(dc);
	}

	return ret;
}

void bch_cached_dev_request_init(struct cached_dev *dc)
{
	struct gendisk *g = dc->disk.disk;

	g->queue->make_request_fn		= cached_dev_make_request;
	g->queue->backing_dev_info.congested_fn = cached_dev_congested;
	dc->disk.cache_miss			= cached_dev_cache_miss;
	dc->disk.ioctl				= cached_dev_ioctl;
}

/* Flash backed devices */

static int flash_dev_cache_miss(struct btree *b, struct search *s,
				struct bio *bio, unsigned sectors)
{
1350 1351 1352
	struct bio_vec *bv;
	int i;

K
Kent Overstreet 已提交
1353 1354
	/* Zero fill bio */

1355
	bio_for_each_segment(bv, bio, i) {
K
Kent Overstreet 已提交
1356 1357 1358 1359 1360 1361
		unsigned j = min(bv->bv_len >> 9, sectors);

		void *p = kmap(bv->bv_page);
		memset(p + bv->bv_offset, 0, j << 9);
		kunmap(bv->bv_page);

1362
		sectors	-= j;
K
Kent Overstreet 已提交
1363 1364
	}

1365 1366 1367
	bio_advance(bio, min(sectors << 9, bio->bi_size));

	if (!bio->bi_size)
1368
		return MAP_DONE;
K
Kent Overstreet 已提交
1369

1370
	return MAP_CONTINUE;
K
Kent Overstreet 已提交
1371 1372
}

1373 1374 1375 1376
static void flash_dev_nodata(struct closure *cl)
{
	struct search *s = container_of(cl, struct search, cl);

K
Kent Overstreet 已提交
1377 1378
	if (s->flush_journal)
		bch_journal_meta(s->c, cl);
1379 1380 1381 1382

	continue_at(cl, search_free, NULL);
}

K
Kent Overstreet 已提交
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
{
	struct search *s;
	struct closure *cl;
	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
	int cpu, rw = bio_data_dir(bio);

	cpu = part_stat_lock();
	part_stat_inc(cpu, &d->disk->part0, ios[rw]);
	part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
	part_stat_unlock();

	s = search_alloc(bio, d);
	cl = &s->cl;
	bio = &s->bio.bio;

	trace_bcache_request_start(s, bio);

K
Kent Overstreet 已提交
1401
	if (!bio->bi_size) {
1402 1403 1404 1405 1406 1407 1408
		/*
		 * can't call bch_journal_meta from under
		 * generic_make_request
		 */
		continue_at_nobarrier(&s->cl,
				      flash_dev_nodata,
				      bcache_wq);
K
Kent Overstreet 已提交
1409
	} else if (rw) {
K
Kent Overstreet 已提交
1410
		bch_keybuf_check_overlapping(&s->c->moving_gc_keys,
1411 1412
					&KEY(d->id, bio->bi_sector, 0),
					&KEY(d->id, bio_end_sector(bio), 0));
K
Kent Overstreet 已提交
1413

K
Kent Overstreet 已提交
1414
		s->bypass	= (bio->bi_rw & REQ_DISCARD) != 0;
K
Kent Overstreet 已提交
1415
		s->writeback	= true;
K
Kent Overstreet 已提交
1416
		s->cache_bio	= bio;
K
Kent Overstreet 已提交
1417

K
Kent Overstreet 已提交
1418
		closure_call(&s->btree, bch_data_insert, NULL, cl);
K
Kent Overstreet 已提交
1419
	} else {
K
Kent Overstreet 已提交
1420
		closure_call(&s->btree, cache_lookup, NULL, cl);
K
Kent Overstreet 已提交
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
	}

	continue_at(cl, search_free, NULL);
}

static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
			   unsigned int cmd, unsigned long arg)
{
	return -ENOTTY;
}

static int flash_dev_congested(void *data, int bits)
{
	struct bcache_device *d = data;
	struct request_queue *q;
	struct cache *ca;
	unsigned i;
	int ret = 0;

	for_each_cache(ca, d->c, i) {
		q = bdev_get_queue(ca->bdev);
		ret |= bdi_congested(&q->backing_dev_info, bits);
	}

	return ret;
}

void bch_flash_dev_request_init(struct bcache_device *d)
{
	struct gendisk *g = d->disk;

	g->queue->make_request_fn		= flash_dev_make_request;
	g->queue->backing_dev_info.congested_fn = flash_dev_congested;
	d->cache_miss				= flash_dev_cache_miss;
	d->ioctl				= flash_dev_ioctl;
}

void bch_request_exit(void)
{
#ifdef CONFIG_CGROUP_BCACHE
	cgroup_unload_subsys(&bcache_subsys);
#endif
	if (bch_search_cache)
		kmem_cache_destroy(bch_search_cache);
}

int __init bch_request_init(void)
{
	bch_search_cache = KMEM_CACHE(search, 0);
	if (!bch_search_cache)
		return -ENOMEM;

#ifdef CONFIG_CGROUP_BCACHE
	cgroup_load_subsys(&bcache_subsys);
	init_bch_cgroup(&bcache_default_cgroup);

	cgroup_add_cftypes(&bcache_subsys, bch_files);
#endif
	return 0;
}