request.c 32.1 KB
Newer Older
K
Kent Overstreet 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Main bcache entry point - handle a read or a write request and decide what to
 * do with it; the make_request functions are called by the block layer.
 *
 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
 * Copyright 2012 Google, Inc.
 */

#include "bcache.h"
#include "btree.h"
#include "debug.h"
#include "request.h"
13
#include "writeback.h"
K
Kent Overstreet 已提交
14 15 16 17 18 19 20 21 22 23 24 25 26 27

#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/hash.h>
#include <linux/random.h>
#include "blk-cgroup.h"

#include <trace/events/bcache.h>

#define CUTOFF_CACHE_ADD	95
#define CUTOFF_CACHE_READA	90

struct kmem_cache *bch_search_cache;

28 29
static void bch_data_insert_start(struct closure *);

K
Kent Overstreet 已提交
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
/* Cgroup interface */

#ifdef CONFIG_CGROUP_BCACHE
static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 };

static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup)
{
	struct cgroup_subsys_state *css;
	return cgroup &&
		(css = cgroup_subsys_state(cgroup, bcache_subsys_id))
		? container_of(css, struct bch_cgroup, css)
		: &bcache_default_cgroup;
}

struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio)
{
	struct cgroup_subsys_state *css = bio->bi_css
		? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id)
		: task_subsys_state(current, bcache_subsys_id);

	return css
		? container_of(css, struct bch_cgroup, css)
		: &bcache_default_cgroup;
}

static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft,
			struct file *file,
			char __user *buf, size_t nbytes, loff_t *ppos)
{
	char tmp[1024];
60 61
	int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes,
					  cgroup_to_bcache(cgrp)->cache_mode + 1);
K
Kent Overstreet 已提交
62 63 64 65 66 67 68 69 70 71

	if (len < 0)
		return len;

	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
}

static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft,
			    const char *buf)
{
72
	int v = bch_read_string_list(buf, bch_cache_modes);
K
Kent Overstreet 已提交
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
	if (v < 0)
		return v;

	cgroup_to_bcache(cgrp)->cache_mode = v - 1;
	return 0;
}

static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft)
{
	return cgroup_to_bcache(cgrp)->verify;
}

static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val)
{
	cgroup_to_bcache(cgrp)->verify = val;
	return 0;
}

static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft)
{
	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
	return atomic_read(&bcachecg->stats.cache_hits);
}

static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft)
{
	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
	return atomic_read(&bcachecg->stats.cache_misses);
}

static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp,
					 struct cftype *cft)
{
	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
	return atomic_read(&bcachecg->stats.cache_bypass_hits);
}

static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp,
					   struct cftype *cft)
{
	struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp);
	return atomic_read(&bcachecg->stats.cache_bypass_misses);
}

static struct cftype bch_files[] = {
	{
		.name		= "cache_mode",
		.read		= cache_mode_read,
		.write_string	= cache_mode_write,
	},
	{
		.name		= "verify",
		.read_u64	= bch_verify_read,
		.write_u64	= bch_verify_write,
	},
	{
		.name		= "cache_hits",
		.read_u64	= bch_cache_hits_read,
	},
	{
		.name		= "cache_misses",
		.read_u64	= bch_cache_misses_read,
	},
	{
		.name		= "cache_bypass_hits",
		.read_u64	= bch_cache_bypass_hits_read,
	},
	{
		.name		= "cache_bypass_misses",
		.read_u64	= bch_cache_bypass_misses_read,
	},
	{ }	/* terminate */
};

static void init_bch_cgroup(struct bch_cgroup *cg)
{
	cg->cache_mode = -1;
}

static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup)
{
	struct bch_cgroup *cg;

	cg = kzalloc(sizeof(*cg), GFP_KERNEL);
	if (!cg)
		return ERR_PTR(-ENOMEM);
	init_bch_cgroup(cg);
	return &cg->css;
}

static void bcachecg_destroy(struct cgroup *cgroup)
{
	struct bch_cgroup *cg = cgroup_to_bcache(cgroup);
	free_css_id(&bcache_subsys, &cg->css);
	kfree(cg);
}

struct cgroup_subsys bcache_subsys = {
	.create		= bcachecg_create,
	.destroy	= bcachecg_destroy,
	.subsys_id	= bcache_subsys_id,
	.name		= "bcache",
	.module		= THIS_MODULE,
};
EXPORT_SYMBOL_GPL(bcache_subsys);
#endif

static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
{
#ifdef CONFIG_CGROUP_BCACHE
	int r = bch_bio_to_cgroup(bio)->cache_mode;
	if (r >= 0)
		return r;
#endif
	return BDEV_CACHE_MODE(&dc->sb);
}

static bool verify(struct cached_dev *dc, struct bio *bio)
{
#ifdef CONFIG_CGROUP_BCACHE
	if (bch_bio_to_cgroup(bio)->verify)
		return true;
#endif
	return dc->verify;
}

static void bio_csum(struct bio *bio, struct bkey *k)
{
	struct bio_vec *bv;
	uint64_t csum = 0;
	int i;

	bio_for_each_segment(bv, bio, i) {
		void *d = kmap(bv->bv_page) + bv->bv_offset;
207
		csum = bch_crc64_update(csum, d, bv->bv_len);
K
Kent Overstreet 已提交
208 209 210 211 212 213 214 215
		kunmap(bv->bv_page);
	}

	k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
}

/* Insert data into cache */

216
static void bch_data_insert_keys(struct closure *cl)
K
Kent Overstreet 已提交
217
{
K
Kent Overstreet 已提交
218
	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
K
Kent Overstreet 已提交
219
	atomic_t *journal_ref = NULL;
K
Kent Overstreet 已提交
220
	struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
221
	int ret;
K
Kent Overstreet 已提交
222

223 224 225 226 227 228 229 230 231 232 233
	/*
	 * If we're looping, might already be waiting on
	 * another journal write - can't wait on more than one journal write at
	 * a time
	 *
	 * XXX: this looks wrong
	 */
#if 0
	while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
		closure_sync(&s->cl);
#endif
K
Kent Overstreet 已提交
234

K
Kent Overstreet 已提交
235 236 237
	if (!op->replace)
		journal_ref = bch_journal(op->c, &op->insert_keys,
					  op->flush_journal ? cl : NULL);
K
Kent Overstreet 已提交
238

K
Kent Overstreet 已提交
239
	ret = bch_btree_insert(op->c, &op->insert_keys,
240 241
			       journal_ref, replace_key);
	if (ret == -ESRCH) {
K
Kent Overstreet 已提交
242
		op->replace_collision = true;
243
	} else if (ret) {
K
Kent Overstreet 已提交
244 245
		op->error		= -ENOMEM;
		op->insert_data_done	= true;
246
	}
K
Kent Overstreet 已提交
247

K
Kent Overstreet 已提交
248 249
	if (journal_ref)
		atomic_dec_bug(journal_ref);
K
Kent Overstreet 已提交
250

K
Kent Overstreet 已提交
251
	if (!op->insert_data_done)
252
		continue_at(cl, bch_data_insert_start, bcache_wq);
K
Kent Overstreet 已提交
253

K
Kent Overstreet 已提交
254
	bch_keylist_free(&op->insert_keys);
255
	closure_return(cl);
K
Kent Overstreet 已提交
256 257
}

258 259
static void bch_data_invalidate(struct closure *cl)
{
K
Kent Overstreet 已提交
260 261
	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
	struct bio *bio = op->bio;
262 263 264 265 266

	pr_debug("invalidating %i sectors from %llu",
		 bio_sectors(bio), (uint64_t) bio->bi_sector);

	while (bio_sectors(bio)) {
267 268
		unsigned sectors = min(bio_sectors(bio),
				       1U << (KEY_SIZE_BITS - 1));
269

K
Kent Overstreet 已提交
270
		if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
271 272
			goto out;

273 274
		bio->bi_sector	+= sectors;
		bio->bi_size	-= sectors << 9;
275

K
Kent Overstreet 已提交
276
		bch_keylist_add(&op->insert_keys,
277
				&KEY(op->inode, bio->bi_sector, sectors));
278 279
	}

K
Kent Overstreet 已提交
280
	op->insert_data_done = true;
281 282 283 284 285 286
	bio_put(bio);
out:
	continue_at(cl, bch_data_insert_keys, bcache_wq);
}

static void bch_data_insert_error(struct closure *cl)
K
Kent Overstreet 已提交
287
{
K
Kent Overstreet 已提交
288
	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
K
Kent Overstreet 已提交
289 290 291 292 293 294 295 296 297 298

	/*
	 * Our data write just errored, which means we've got a bunch of keys to
	 * insert that point to data that wasn't succesfully written.
	 *
	 * We don't have to insert those keys but we still have to invalidate
	 * that region of the cache - so, if we just strip off all the pointers
	 * from the keys we'll accomplish just that.
	 */

K
Kent Overstreet 已提交
299
	struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
K
Kent Overstreet 已提交
300

K
Kent Overstreet 已提交
301
	while (src != op->insert_keys.top) {
K
Kent Overstreet 已提交
302 303 304
		struct bkey *n = bkey_next(src);

		SET_KEY_PTRS(src, 0);
K
Kent Overstreet 已提交
305
		memmove(dst, src, bkey_bytes(src));
K
Kent Overstreet 已提交
306 307 308 309 310

		dst = bkey_next(dst);
		src = n;
	}

K
Kent Overstreet 已提交
311
	op->insert_keys.top = dst;
K
Kent Overstreet 已提交
312

313
	bch_data_insert_keys(cl);
K
Kent Overstreet 已提交
314 315
}

316
static void bch_data_insert_endio(struct bio *bio, int error)
K
Kent Overstreet 已提交
317 318
{
	struct closure *cl = bio->bi_private;
K
Kent Overstreet 已提交
319
	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
K
Kent Overstreet 已提交
320 321 322

	if (error) {
		/* TODO: We could try to recover from this. */
K
Kent Overstreet 已提交
323 324 325
		if (op->writeback)
			op->error = error;
		else if (!op->replace)
326
			set_closure_fn(cl, bch_data_insert_error, bcache_wq);
K
Kent Overstreet 已提交
327 328 329 330
		else
			set_closure_fn(cl, NULL, NULL);
	}

K
Kent Overstreet 已提交
331
	bch_bbio_endio(op->c, bio, error, "writing data to cache");
K
Kent Overstreet 已提交
332 333
}

334
static void bch_data_insert_start(struct closure *cl)
K
Kent Overstreet 已提交
335
{
K
Kent Overstreet 已提交
336 337
	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
	struct bio *bio = op->bio, *n;
K
Kent Overstreet 已提交
338

K
Kent Overstreet 已提交
339
	if (op->bypass)
340
		return bch_data_invalidate(cl);
K
Kent Overstreet 已提交
341

K
Kent Overstreet 已提交
342 343 344
	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
		set_gc_sectors(op->c);
		wake_up_gc(op->c);
K
Kent Overstreet 已提交
345 346
	}

347 348 349 350 351 352
	/*
	 * Journal writes are marked REQ_FLUSH; if the original write was a
	 * flush, it'll wait on the journal write.
	 */
	bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);

K
Kent Overstreet 已提交
353 354 355
	do {
		unsigned i;
		struct bkey *k;
K
Kent Overstreet 已提交
356
		struct bio_set *split = op->c->bio_split;
K
Kent Overstreet 已提交
357 358

		/* 1 for the device pointer and 1 for the chksum */
K
Kent Overstreet 已提交
359 360 361
		if (bch_keylist_realloc(&op->insert_keys,
					1 + (op->csum ? 1 : 0),
					op->c))
362
			continue_at(cl, bch_data_insert_keys, bcache_wq);
K
Kent Overstreet 已提交
363

K
Kent Overstreet 已提交
364
		k = op->insert_keys.top;
K
Kent Overstreet 已提交
365
		bkey_init(k);
K
Kent Overstreet 已提交
366
		SET_KEY_INODE(k, op->inode);
K
Kent Overstreet 已提交
367 368
		SET_KEY_OFFSET(k, bio->bi_sector);

369 370 371
		if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
				       op->write_point, op->write_prio,
				       op->writeback))
K
Kent Overstreet 已提交
372 373 374 375
			goto err;

		n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);

376
		n->bi_end_io	= bch_data_insert_endio;
K
Kent Overstreet 已提交
377 378
		n->bi_private	= cl;

K
Kent Overstreet 已提交
379
		if (op->writeback) {
K
Kent Overstreet 已提交
380 381 382
			SET_KEY_DIRTY(k, true);

			for (i = 0; i < KEY_PTRS(k); i++)
K
Kent Overstreet 已提交
383
				SET_GC_MARK(PTR_BUCKET(op->c, k, i),
K
Kent Overstreet 已提交
384 385 386
					    GC_MARK_DIRTY);
		}

K
Kent Overstreet 已提交
387
		SET_KEY_CSUM(k, op->csum);
K
Kent Overstreet 已提交
388 389 390
		if (KEY_CSUM(k))
			bio_csum(n, k);

K
Kent Overstreet 已提交
391
		trace_bcache_cache_insert(k);
K
Kent Overstreet 已提交
392
		bch_keylist_push(&op->insert_keys);
K
Kent Overstreet 已提交
393 394

		n->bi_rw |= REQ_WRITE;
K
Kent Overstreet 已提交
395
		bch_submit_bbio(n, op->c, k, 0);
K
Kent Overstreet 已提交
396 397
	} while (n != bio);

K
Kent Overstreet 已提交
398
	op->insert_data_done = true;
399
	continue_at(cl, bch_data_insert_keys, bcache_wq);
K
Kent Overstreet 已提交
400 401
err:
	/* bch_alloc_sectors() blocks if s->writeback = true */
K
Kent Overstreet 已提交
402
	BUG_ON(op->writeback);
K
Kent Overstreet 已提交
403 404 405 406 407 408 409

	/*
	 * But if it's not a writeback write we'd rather just bail out if
	 * there aren't any buckets ready to write to - it might take awhile and
	 * we might be starving btree writes for gc or something.
	 */

K
Kent Overstreet 已提交
410
	if (!op->replace) {
K
Kent Overstreet 已提交
411 412 413 414 415 416
		/*
		 * Writethrough write: We can't complete the write until we've
		 * updated the index. But we don't want to delay the write while
		 * we wait for buckets to be freed up, so just invalidate the
		 * rest of the write.
		 */
K
Kent Overstreet 已提交
417
		op->bypass = true;
418
		return bch_data_invalidate(cl);
K
Kent Overstreet 已提交
419 420 421 422 423
	} else {
		/*
		 * From a cache miss, we can just insert the keys for the data
		 * we have written or bail out if we didn't do anything.
		 */
K
Kent Overstreet 已提交
424
		op->insert_data_done = true;
K
Kent Overstreet 已提交
425 426
		bio_put(bio);

K
Kent Overstreet 已提交
427
		if (!bch_keylist_empty(&op->insert_keys))
428
			continue_at(cl, bch_data_insert_keys, bcache_wq);
K
Kent Overstreet 已提交
429 430 431 432 433 434
		else
			closure_return(cl);
	}
}

/**
435
 * bch_data_insert - stick some data in the cache
K
Kent Overstreet 已提交
436 437 438 439 440 441 442 443 444 445 446
 *
 * This is the starting point for any data to end up in a cache device; it could
 * be from a normal write, or a writeback write, or a write to a flash only
 * volume - it's also used by the moving garbage collector to compact data in
 * mostly empty buckets.
 *
 * It first writes the data to the cache, creating a list of keys to be inserted
 * (if the data had to be fragmented there will be multiple keys); after the
 * data is written it calls bch_journal, and after the keys have been added to
 * the next journal write they're inserted into the btree.
 *
K
Kent Overstreet 已提交
447
 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
K
Kent Overstreet 已提交
448 449
 * and op->inode is used for the key inode.
 *
K
Kent Overstreet 已提交
450 451
 * If s->bypass is true, instead of inserting the data it invalidates the
 * region of the cache represented by s->cache_bio and op->inode.
K
Kent Overstreet 已提交
452
 */
453
void bch_data_insert(struct closure *cl)
K
Kent Overstreet 已提交
454
{
K
Kent Overstreet 已提交
455
	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
K
Kent Overstreet 已提交
456

K
Kent Overstreet 已提交
457 458 459 460
	trace_bcache_write(op->bio, op->writeback, op->bypass);

	bch_keylist_init(&op->insert_keys);
	bio_get(op->bio);
461
	bch_data_insert_start(cl);
K
Kent Overstreet 已提交
462 463
}

K
Kent Overstreet 已提交
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
/* Congested? */

unsigned bch_get_congested(struct cache_set *c)
{
	int i;
	long rand;

	if (!c->congested_read_threshold_us &&
	    !c->congested_write_threshold_us)
		return 0;

	i = (local_clock_us() - c->congested_last_us) / 1024;
	if (i < 0)
		return 0;

	i += atomic_read(&c->congested);
	if (i >= 0)
		return 0;

	i += CONGESTED_MAX;

	if (i > 0)
		i = fract_exp_two(i, 6);

	rand = get_random_int();
	i -= bitmap_weight(&rand, BITS_PER_LONG);

	return i > 0 ? i : 1;
}

static void add_sequential(struct task_struct *t)
{
	ewma_add(t->sequential_io_avg,
		 t->sequential_io, 8, 0);

	t->sequential_io = 0;
}

static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
{
	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
}

static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
{
	struct cache_set *c = dc->disk.c;
	unsigned mode = cache_mode(dc, bio);
	unsigned sectors, congested = bch_get_congested(c);
	struct task_struct *task = current;

	if (atomic_read(&dc->disk.detaching) ||
	    c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
	    (bio->bi_rw & REQ_DISCARD))
		goto skip;

	if (mode == CACHE_MODE_NONE ||
	    (mode == CACHE_MODE_WRITEAROUND &&
	     (bio->bi_rw & REQ_WRITE)))
		goto skip;

	if (bio->bi_sector & (c->sb.block_size - 1) ||
	    bio_sectors(bio) & (c->sb.block_size - 1)) {
		pr_debug("skipping unaligned io");
		goto skip;
	}

	if (!congested && !dc->sequential_cutoff)
		goto rescale;

	if (!congested &&
	    mode == CACHE_MODE_WRITEBACK &&
	    (bio->bi_rw & REQ_WRITE) &&
	    (bio->bi_rw & REQ_SYNC))
		goto rescale;

	if (dc->sequential_merge) {
		struct io *i;

		spin_lock(&dc->io_lock);

		hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
			if (i->last == bio->bi_sector &&
			    time_before(jiffies, i->jiffies))
				goto found;

		i = list_first_entry(&dc->io_lru, struct io, lru);

		add_sequential(task);
		i->sequential = 0;
found:
		if (i->sequential + bio->bi_size > i->sequential)
			i->sequential	+= bio->bi_size;

		i->last			 = bio_end_sector(bio);
		i->jiffies		 = jiffies + msecs_to_jiffies(5000);
		task->sequential_io	 = i->sequential;

		hlist_del(&i->hash);
		hlist_add_head(&i->hash, iohash(dc, i->last));
		list_move_tail(&i->lru, &dc->io_lru);

		spin_unlock(&dc->io_lock);
	} else {
		task->sequential_io = bio->bi_size;

		add_sequential(task);
	}

	sectors = max(task->sequential_io,
		      task->sequential_io_avg) >> 9;

	if (dc->sequential_cutoff &&
	    sectors >= dc->sequential_cutoff >> 9) {
		trace_bcache_bypass_sequential(bio);
		goto skip;
	}

	if (congested && sectors >= congested) {
		trace_bcache_bypass_congested(bio);
		goto skip;
	}

rescale:
	bch_rescale_priorities(c, bio_sectors(bio));
	return false;
skip:
	bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
	return true;
}

594
/* Cache lookup */
K
Kent Overstreet 已提交
595

K
Kent Overstreet 已提交
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
struct search {
	/* Stack frame for bio_complete */
	struct closure		cl;

	struct bcache_device	*d;

	struct bbio		bio;
	struct bio		*orig_bio;
	struct bio		*cache_miss;

	unsigned		insert_bio_sectors;

	unsigned		recoverable:1;
	unsigned		unaligned_bvec:1;
	unsigned		write:1;

	unsigned long		start_time;

	struct btree_op		op;
	struct data_insert_op	iop;
};

618
static void bch_cache_read_endio(struct bio *bio, int error)
K
Kent Overstreet 已提交
619 620 621 622 623 624 625 626 627 628 629 630 631
{
	struct bbio *b = container_of(bio, struct bbio, bio);
	struct closure *cl = bio->bi_private;
	struct search *s = container_of(cl, struct search, cl);

	/*
	 * If the bucket was reused while our bio was in flight, we might have
	 * read the wrong data. Set s->error but not error so it doesn't get
	 * counted against the cache device, but we'll still reread the data
	 * from the backing device.
	 */

	if (error)
K
Kent Overstreet 已提交
632 633 634 635
		s->iop.error = error;
	else if (ptr_stale(s->iop.c, &b->key, 0)) {
		atomic_long_inc(&s->iop.c->cache_read_races);
		s->iop.error = -EINTR;
K
Kent Overstreet 已提交
636 637
	}

K
Kent Overstreet 已提交
638
	bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
K
Kent Overstreet 已提交
639 640
}

641 642 643 644
/*
 * Read from a single key, handling the initial cache miss if the key starts in
 * the middle of the bio
 */
K
Kent Overstreet 已提交
645
static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
646 647
{
	struct search *s = container_of(op, struct search, op);
K
Kent Overstreet 已提交
648 649
	struct bio *n, *bio = &s->bio.bio;
	struct bkey *bio_key;
650 651
	unsigned ptr;

K
Kent Overstreet 已提交
652
	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0)
K
Kent Overstreet 已提交
653 654
		return MAP_CONTINUE;

K
Kent Overstreet 已提交
655
	if (KEY_INODE(k) != s->iop.inode ||
K
Kent Overstreet 已提交
656 657
	    KEY_START(k) > bio->bi_sector) {
		unsigned bio_sectors = bio_sectors(bio);
K
Kent Overstreet 已提交
658
		unsigned sectors = KEY_INODE(k) == s->iop.inode
K
Kent Overstreet 已提交
659 660 661 662 663 664 665 666 667 668 669 670 671 672
			? min_t(uint64_t, INT_MAX,
				KEY_START(k) - bio->bi_sector)
			: INT_MAX;

		int ret = s->d->cache_miss(b, s, bio, sectors);
		if (ret != MAP_CONTINUE)
			return ret;

		/* if this was a complete miss we shouldn't get here */
		BUG_ON(bio_sectors <= sectors);
	}

	if (!KEY_SIZE(k))
		return MAP_CONTINUE;
673 674 675 676 677 678

	/* XXX: figure out best pointer - for multiple cache devices */
	ptr = 0;

	PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;

K
Kent Overstreet 已提交
679 680 681
	n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
				     KEY_OFFSET(k) - bio->bi_sector),
			  GFP_NOIO, s->d->bio_split);
682

K
Kent Overstreet 已提交
683 684
	bio_key = &container_of(n, struct bbio, bio)->key;
	bch_bkey_copy_single_ptr(bio_key, k, ptr);
685

K
Kent Overstreet 已提交
686 687
	bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key);
	bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
688

K
Kent Overstreet 已提交
689 690
	n->bi_end_io	= bch_cache_read_endio;
	n->bi_private	= &s->cl;
691

K
Kent Overstreet 已提交
692 693 694 695 696 697 698 699 700 701
	/*
	 * The bucket we're reading from might be reused while our bio
	 * is in flight, and we could then end up reading the wrong
	 * data.
	 *
	 * We guard against this by checking (in cache_read_endio()) if
	 * the pointer is stale again; if so, we treat it as an error
	 * and reread from the backing device (but we don't pass that
	 * error up anywhere).
	 */
702

K
Kent Overstreet 已提交
703 704
	__bch_submit_bbio(n, b->c);
	return n == bio ? MAP_DONE : MAP_CONTINUE;
705 706 707 708
}

static void cache_lookup(struct closure *cl)
{
K
Kent Overstreet 已提交
709
	struct search *s = container_of(cl, struct search, iop.cl);
710 711
	struct bio *bio = &s->bio.bio;

K
Kent Overstreet 已提交
712 713
	int ret = bch_btree_map_keys(&s->op, s->iop.c,
				     &KEY(s->iop.inode, bio->bi_sector, 0),
K
Kent Overstreet 已提交
714
				     cache_lookup_fn, MAP_END_KEY);
715 716 717 718 719 720 721 722 723 724 725 726 727 728
	if (ret == -EAGAIN)
		continue_at(cl, cache_lookup, bcache_wq);

	closure_return(cl);
}

/* Common code for the make_request functions */

static void request_endio(struct bio *bio, int error)
{
	struct closure *cl = bio->bi_private;

	if (error) {
		struct search *s = container_of(cl, struct search, cl);
K
Kent Overstreet 已提交
729
		s->iop.error = error;
730 731 732 733 734 735 736 737
		/* Only cache read errors are recoverable */
		s->recoverable = false;
	}

	bio_put(bio);
	closure_put(cl);
}

K
Kent Overstreet 已提交
738 739 740 741 742 743 744 745 746 747 748
static void bio_complete(struct search *s)
{
	if (s->orig_bio) {
		int cpu, rw = bio_data_dir(s->orig_bio);
		unsigned long duration = jiffies - s->start_time;

		cpu = part_stat_lock();
		part_round_stats(cpu, &s->d->disk->part0);
		part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
		part_stat_unlock();

K
Kent Overstreet 已提交
749 750
		trace_bcache_request_end(s->d, s->orig_bio);
		bio_endio(s->orig_bio, s->iop.error);
K
Kent Overstreet 已提交
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
		s->orig_bio = NULL;
	}
}

static void do_bio_hook(struct search *s)
{
	struct bio *bio = &s->bio.bio;
	memcpy(bio, s->orig_bio, sizeof(struct bio));

	bio->bi_end_io		= request_endio;
	bio->bi_private		= &s->cl;
	atomic_set(&bio->bi_cnt, 3);
}

static void search_free(struct closure *cl)
{
	struct search *s = container_of(cl, struct search, cl);
	bio_complete(s);

K
Kent Overstreet 已提交
770 771
	if (s->iop.bio)
		bio_put(s->iop.bio);
K
Kent Overstreet 已提交
772 773 774 775 776 777 778 779 780 781

	if (s->unaligned_bvec)
		mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);

	closure_debug_destroy(cl);
	mempool_free(s, s->d->c->search);
}

static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
{
782
	struct search *s;
K
Kent Overstreet 已提交
783
	struct bio_vec *bv;
784 785

	s = mempool_alloc(d->c->search, GFP_NOIO);
K
Kent Overstreet 已提交
786
	memset(s, 0, offsetof(struct search, iop.insert_keys));
K
Kent Overstreet 已提交
787 788 789

	__closure_init(&s->cl, NULL);

K
Kent Overstreet 已提交
790 791
	s->iop.inode		= d->id;
	s->iop.c		= d->c;
K
Kent Overstreet 已提交
792 793
	s->d			= d;
	s->op.lock		= -1;
794
	s->iop.write_point	= hash_long((unsigned long) current, 16);
K
Kent Overstreet 已提交
795 796
	s->orig_bio		= bio;
	s->write		= (bio->bi_rw & REQ_WRITE) != 0;
K
Kent Overstreet 已提交
797
	s->iop.flush_journal	= (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
K
Kent Overstreet 已提交
798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
	s->recoverable		= 1;
	s->start_time		= jiffies;
	do_bio_hook(s);

	if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
		bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
		memcpy(bv, bio_iovec(bio),
		       sizeof(struct bio_vec) * bio_segments(bio));

		s->bio.bio.bi_io_vec	= bv;
		s->unaligned_bvec	= 1;
	}

	return s;
}

/* Cached devices */

static void cached_dev_bio_complete(struct closure *cl)
{
	struct search *s = container_of(cl, struct search, cl);
	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);

	search_free(cl);
	cached_dev_put(dc);
}

/* Process reads */

827
static void cached_dev_cache_miss_done(struct closure *cl)
K
Kent Overstreet 已提交
828 829 830
{
	struct search *s = container_of(cl, struct search, cl);

K
Kent Overstreet 已提交
831 832
	if (s->iop.replace_collision)
		bch_mark_cache_miss_collision(s->iop.c, s->d);
K
Kent Overstreet 已提交
833

K
Kent Overstreet 已提交
834
	if (s->iop.bio) {
K
Kent Overstreet 已提交
835 836 837
		int i;
		struct bio_vec *bv;

K
Kent Overstreet 已提交
838
		bio_for_each_segment_all(bv, s->iop.bio, i)
K
Kent Overstreet 已提交
839 840 841 842 843 844
			__free_page(bv->bv_page);
	}

	cached_dev_bio_complete(cl);
}

845
static void cached_dev_read_error(struct closure *cl)
K
Kent Overstreet 已提交
846 847
{
	struct search *s = container_of(cl, struct search, cl);
848
	struct bio *bio = &s->bio.bio;
K
Kent Overstreet 已提交
849 850 851 852
	struct bio_vec *bv;
	int i;

	if (s->recoverable) {
K
Kent Overstreet 已提交
853 854
		/* Retry from the backing device: */
		trace_bcache_read_retry(s->orig_bio);
K
Kent Overstreet 已提交
855

K
Kent Overstreet 已提交
856
		s->iop.error = 0;
K
Kent Overstreet 已提交
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
		bv = s->bio.bio.bi_io_vec;
		do_bio_hook(s);
		s->bio.bio.bi_io_vec = bv;

		if (!s->unaligned_bvec)
			bio_for_each_segment(bv, s->orig_bio, i)
				bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
		else
			memcpy(s->bio.bio.bi_io_vec,
			       bio_iovec(s->orig_bio),
			       sizeof(struct bio_vec) *
			       bio_segments(s->orig_bio));

		/* XXX: invalidate cache */

872
		closure_bio_submit(bio, cl, s->d);
K
Kent Overstreet 已提交
873 874
	}

875
	continue_at(cl, cached_dev_cache_miss_done, NULL);
K
Kent Overstreet 已提交
876 877
}

878
static void cached_dev_read_done(struct closure *cl)
K
Kent Overstreet 已提交
879 880 881 882 883
{
	struct search *s = container_of(cl, struct search, cl);
	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);

	/*
884 885
	 * We had a cache miss; cache_bio now contains data ready to be inserted
	 * into the cache.
K
Kent Overstreet 已提交
886 887 888 889 890
	 *
	 * First, we copy the data we just read from cache_bio's bounce buffers
	 * to the buffers the original bio pointed to:
	 */

K
Kent Overstreet 已提交
891 892 893 894 895 896
	if (s->iop.bio) {
		bio_reset(s->iop.bio);
		s->iop.bio->bi_sector = s->cache_miss->bi_sector;
		s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
		s->iop.bio->bi_size = s->insert_bio_sectors << 9;
		bch_bio_map(s->iop.bio, NULL);
K
Kent Overstreet 已提交
897

K
Kent Overstreet 已提交
898
		bio_copy_data(s->cache_miss, s->iop.bio);
K
Kent Overstreet 已提交
899 900 901 902 903

		bio_put(s->cache_miss);
		s->cache_miss = NULL;
	}

K
Kent Overstreet 已提交
904 905
	if (verify(dc, &s->bio.bio) && s->recoverable && !s->unaligned_bvec)
		bch_data_verify(dc, s->orig_bio);
K
Kent Overstreet 已提交
906 907 908

	bio_complete(s);

K
Kent Overstreet 已提交
909 910 911 912
	if (s->iop.bio &&
	    !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
		BUG_ON(!s->iop.replace);
		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
K
Kent Overstreet 已提交
913 914
	}

915
	continue_at(cl, cached_dev_cache_miss_done, NULL);
K
Kent Overstreet 已提交
916 917
}

918
static void cached_dev_read_done_bh(struct closure *cl)
K
Kent Overstreet 已提交
919 920 921 922
{
	struct search *s = container_of(cl, struct search, cl);
	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);

K
Kent Overstreet 已提交
923 924 925
	bch_mark_cache_accounting(s->iop.c, s->d,
				  !s->cache_miss, s->iop.bypass);
	trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
K
Kent Overstreet 已提交
926

K
Kent Overstreet 已提交
927
	if (s->iop.error)
928
		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
K
Kent Overstreet 已提交
929
	else if (s->iop.bio || verify(dc, &s->bio.bio))
930
		continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
K
Kent Overstreet 已提交
931
	else
932
		continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
K
Kent Overstreet 已提交
933 934 935 936 937
}

static int cached_dev_cache_miss(struct btree *b, struct search *s,
				 struct bio *bio, unsigned sectors)
{
938
	int ret = MAP_CONTINUE;
939
	unsigned reada = 0;
K
Kent Overstreet 已提交
940
	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
941
	struct bio *miss, *cache_bio;
K
Kent Overstreet 已提交
942

K
Kent Overstreet 已提交
943
	if (s->cache_miss || s->iop.bypass) {
944
		miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
945
		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
946 947
		goto out_submit;
	}
K
Kent Overstreet 已提交
948

949 950
	if (!(bio->bi_rw & REQ_RAHEAD) &&
	    !(bio->bi_rw & REQ_META) &&
K
Kent Overstreet 已提交
951
	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
952 953
		reada = min_t(sector_t, dc->readahead >> 9,
			      bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
K
Kent Overstreet 已提交
954

K
Kent Overstreet 已提交
955
	s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
K
Kent Overstreet 已提交
956

K
Kent Overstreet 已提交
957 958 959
	s->iop.replace_key = KEY(s->iop.inode,
				 bio->bi_sector + s->insert_bio_sectors,
				 s->insert_bio_sectors);
960

K
Kent Overstreet 已提交
961
	ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
962 963 964
	if (ret)
		return ret;

K
Kent Overstreet 已提交
965
	s->iop.replace = true;
K
Kent Overstreet 已提交
966

967
	miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
968 969 970

	/* btree_search_recurse()'s btree iterator is no good anymore */
	ret = miss == bio ? MAP_DONE : -EINTR;
K
Kent Overstreet 已提交
971

972
	cache_bio = bio_alloc_bioset(GFP_NOWAIT,
K
Kent Overstreet 已提交
973
			DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
K
Kent Overstreet 已提交
974
			dc->disk.bio_split);
975
	if (!cache_bio)
K
Kent Overstreet 已提交
976 977
		goto out_submit;

978 979
	cache_bio->bi_sector	= miss->bi_sector;
	cache_bio->bi_bdev	= miss->bi_bdev;
K
Kent Overstreet 已提交
980
	cache_bio->bi_size	= s->insert_bio_sectors << 9;
K
Kent Overstreet 已提交
981

982 983
	cache_bio->bi_end_io	= request_endio;
	cache_bio->bi_private	= &s->cl;
K
Kent Overstreet 已提交
984

985 986
	bch_bio_map(cache_bio, NULL);
	if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
K
Kent Overstreet 已提交
987 988
		goto out_put;

K
Kent Overstreet 已提交
989 990 991
	if (reada)
		bch_mark_cache_readahead(s->iop.c, s->d);

992
	s->cache_miss	= miss;
K
Kent Overstreet 已提交
993
	s->iop.bio	= cache_bio;
994 995
	bio_get(cache_bio);
	closure_bio_submit(cache_bio, &s->cl, s->d);
K
Kent Overstreet 已提交
996 997 998

	return ret;
out_put:
999
	bio_put(cache_bio);
K
Kent Overstreet 已提交
1000
out_submit:
1001 1002
	miss->bi_end_io		= request_endio;
	miss->bi_private	= &s->cl;
K
Kent Overstreet 已提交
1003 1004 1005 1006
	closure_bio_submit(miss, &s->cl, s->d);
	return ret;
}

1007
static void cached_dev_read(struct cached_dev *dc, struct search *s)
K
Kent Overstreet 已提交
1008 1009 1010
{
	struct closure *cl = &s->cl;

K
Kent Overstreet 已提交
1011
	closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1012
	continue_at(cl, cached_dev_read_done_bh, NULL);
K
Kent Overstreet 已提交
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
}

/* Process writes */

static void cached_dev_write_complete(struct closure *cl)
{
	struct search *s = container_of(cl, struct search, cl);
	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);

	up_read_non_owner(&dc->writeback_lock);
	cached_dev_bio_complete(cl);
}

1026
static void cached_dev_write(struct cached_dev *dc, struct search *s)
K
Kent Overstreet 已提交
1027 1028 1029
{
	struct closure *cl = &s->cl;
	struct bio *bio = &s->bio.bio;
K
Kent Overstreet 已提交
1030 1031
	struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0);
	struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
K
Kent Overstreet 已提交
1032

K
Kent Overstreet 已提交
1033
	bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
K
Kent Overstreet 已提交
1034 1035 1036

	down_read_non_owner(&dc->writeback_lock);
	if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
K
Kent Overstreet 已提交
1037 1038 1039 1040
		/*
		 * We overlap with some dirty data undergoing background
		 * writeback, force this write to writeback
		 */
K
Kent Overstreet 已提交
1041 1042
		s->iop.bypass = false;
		s->iop.writeback = true;
K
Kent Overstreet 已提交
1043 1044
	}

K
Kent Overstreet 已提交
1045 1046 1047 1048 1049 1050 1051
	/*
	 * Discards aren't _required_ to do anything, so skipping if
	 * check_overlapping returned true is ok
	 *
	 * But check_overlapping drops dirty keys for which io hasn't started,
	 * so we still want to call it.
	 */
K
Kent Overstreet 已提交
1052
	if (bio->bi_rw & REQ_DISCARD)
K
Kent Overstreet 已提交
1053
		s->iop.bypass = true;
K
Kent Overstreet 已提交
1054

K
Kent Overstreet 已提交
1055 1056
	if (should_writeback(dc, s->orig_bio,
			     cache_mode(dc, bio),
K
Kent Overstreet 已提交
1057 1058 1059
			     s->iop.bypass)) {
		s->iop.bypass = false;
		s->iop.writeback = true;
K
Kent Overstreet 已提交
1060 1061
	}

K
Kent Overstreet 已提交
1062 1063 1064
	if (s->iop.bypass) {
		s->iop.bio = s->orig_bio;
		bio_get(s->iop.bio);
K
Kent Overstreet 已提交
1065

K
Kent Overstreet 已提交
1066 1067 1068
		if (!(bio->bi_rw & REQ_DISCARD) ||
		    blk_queue_discard(bdev_get_queue(dc->bdev)))
			closure_bio_submit(bio, cl, s->d);
K
Kent Overstreet 已提交
1069
	} else if (s->iop.writeback) {
1070
		bch_writeback_add(dc);
K
Kent Overstreet 已提交
1071
		s->iop.bio = bio;
K
Kent Overstreet 已提交
1072

1073
		if (bio->bi_rw & REQ_FLUSH) {
K
Kent Overstreet 已提交
1074
			/* Also need to send a flush to the backing device */
1075
			struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
1076
							     dc->disk.bio_split);
K
Kent Overstreet 已提交
1077

1078 1079 1080 1081 1082 1083
			flush->bi_rw	= WRITE_FLUSH;
			flush->bi_bdev	= bio->bi_bdev;
			flush->bi_end_io = request_endio;
			flush->bi_private = cl;

			closure_bio_submit(flush, cl, s->d);
K
Kent Overstreet 已提交
1084
		}
K
Kent Overstreet 已提交
1085
	} else {
K
Kent Overstreet 已提交
1086 1087
		s->iop.bio = bio_clone_bioset(bio, GFP_NOIO,
					      dc->disk.bio_split);
K
Kent Overstreet 已提交
1088 1089

		closure_bio_submit(bio, cl, s->d);
K
Kent Overstreet 已提交
1090
	}
K
Kent Overstreet 已提交
1091

K
Kent Overstreet 已提交
1092
	closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
K
Kent Overstreet 已提交
1093 1094 1095
	continue_at(cl, cached_dev_write_complete, NULL);
}

1096
static void cached_dev_nodata(struct closure *cl)
K
Kent Overstreet 已提交
1097
{
1098
	struct search *s = container_of(cl, struct search, cl);
K
Kent Overstreet 已提交
1099 1100
	struct bio *bio = &s->bio.bio;

K
Kent Overstreet 已提交
1101 1102
	if (s->iop.flush_journal)
		bch_journal_meta(s->iop.c, cl);
K
Kent Overstreet 已提交
1103

K
Kent Overstreet 已提交
1104
	/* If it's a flush, we send the flush to the backing device too */
K
Kent Overstreet 已提交
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
	closure_bio_submit(bio, cl, s->d);

	continue_at(cl, cached_dev_bio_complete, NULL);
}

/* Cached devices - read & write stuff */

static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
{
	struct search *s;
	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
	int cpu, rw = bio_data_dir(bio);

	cpu = part_stat_lock();
	part_stat_inc(cpu, &d->disk->part0, ios[rw]);
	part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
	part_stat_unlock();

	bio->bi_bdev = dc->bdev;
1125
	bio->bi_sector += dc->sb.data_offset;
K
Kent Overstreet 已提交
1126 1127 1128

	if (cached_dev_get(dc)) {
		s = search_alloc(bio, d);
K
Kent Overstreet 已提交
1129
		trace_bcache_request_start(s->d, bio);
K
Kent Overstreet 已提交
1130

1131 1132 1133 1134 1135 1136 1137 1138 1139
		if (!bio->bi_size) {
			/*
			 * can't call bch_journal_meta from under
			 * generic_make_request
			 */
			continue_at_nobarrier(&s->cl,
					      cached_dev_nodata,
					      bcache_wq);
		} else {
K
Kent Overstreet 已提交
1140
			s->iop.bypass = check_should_bypass(dc, bio);
K
Kent Overstreet 已提交
1141 1142

			if (rw)
1143
				cached_dev_write(dc, s);
K
Kent Overstreet 已提交
1144
			else
1145
				cached_dev_read(dc, s);
K
Kent Overstreet 已提交
1146
		}
K
Kent Overstreet 已提交
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
	} else {
		if ((bio->bi_rw & REQ_DISCARD) &&
		    !blk_queue_discard(bdev_get_queue(dc->bdev)))
			bio_endio(bio, 0);
		else
			bch_generic_make_request(bio, &d->bio_split_hook);
	}
}

static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
			    unsigned int cmd, unsigned long arg)
{
	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
	return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
}

static int cached_dev_congested(void *data, int bits)
{
	struct bcache_device *d = data;
	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
	struct request_queue *q = bdev_get_queue(dc->bdev);
	int ret = 0;

	if (bdi_congested(&q->backing_dev_info, bits))
		return 1;

	if (cached_dev_get(dc)) {
		unsigned i;
		struct cache *ca;

		for_each_cache(ca, d->c, i) {
			q = bdev_get_queue(ca->bdev);
			ret |= bdi_congested(&q->backing_dev_info, bits);
		}

		cached_dev_put(dc);
	}

	return ret;
}

void bch_cached_dev_request_init(struct cached_dev *dc)
{
	struct gendisk *g = dc->disk.disk;

	g->queue->make_request_fn		= cached_dev_make_request;
	g->queue->backing_dev_info.congested_fn = cached_dev_congested;
	dc->disk.cache_miss			= cached_dev_cache_miss;
	dc->disk.ioctl				= cached_dev_ioctl;
}

/* Flash backed devices */

static int flash_dev_cache_miss(struct btree *b, struct search *s,
				struct bio *bio, unsigned sectors)
{
1203 1204 1205
	struct bio_vec *bv;
	int i;

K
Kent Overstreet 已提交
1206 1207
	/* Zero fill bio */

1208
	bio_for_each_segment(bv, bio, i) {
K
Kent Overstreet 已提交
1209 1210 1211 1212 1213 1214
		unsigned j = min(bv->bv_len >> 9, sectors);

		void *p = kmap(bv->bv_page);
		memset(p + bv->bv_offset, 0, j << 9);
		kunmap(bv->bv_page);

1215
		sectors	-= j;
K
Kent Overstreet 已提交
1216 1217
	}

1218 1219 1220
	bio_advance(bio, min(sectors << 9, bio->bi_size));

	if (!bio->bi_size)
1221
		return MAP_DONE;
K
Kent Overstreet 已提交
1222

1223
	return MAP_CONTINUE;
K
Kent Overstreet 已提交
1224 1225
}

1226 1227 1228 1229
static void flash_dev_nodata(struct closure *cl)
{
	struct search *s = container_of(cl, struct search, cl);

K
Kent Overstreet 已提交
1230 1231
	if (s->iop.flush_journal)
		bch_journal_meta(s->iop.c, cl);
1232 1233 1234 1235

	continue_at(cl, search_free, NULL);
}

K
Kent Overstreet 已提交
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
{
	struct search *s;
	struct closure *cl;
	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
	int cpu, rw = bio_data_dir(bio);

	cpu = part_stat_lock();
	part_stat_inc(cpu, &d->disk->part0, ios[rw]);
	part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
	part_stat_unlock();

	s = search_alloc(bio, d);
	cl = &s->cl;
	bio = &s->bio.bio;

K
Kent Overstreet 已提交
1252
	trace_bcache_request_start(s->d, bio);
K
Kent Overstreet 已提交
1253

K
Kent Overstreet 已提交
1254
	if (!bio->bi_size) {
1255 1256 1257 1258 1259 1260 1261
		/*
		 * can't call bch_journal_meta from under
		 * generic_make_request
		 */
		continue_at_nobarrier(&s->cl,
				      flash_dev_nodata,
				      bcache_wq);
K
Kent Overstreet 已提交
1262
	} else if (rw) {
K
Kent Overstreet 已提交
1263
		bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1264 1265
					&KEY(d->id, bio->bi_sector, 0),
					&KEY(d->id, bio_end_sector(bio), 0));
K
Kent Overstreet 已提交
1266

K
Kent Overstreet 已提交
1267 1268 1269
		s->iop.bypass		= (bio->bi_rw & REQ_DISCARD) != 0;
		s->iop.writeback	= true;
		s->iop.bio		= bio;
K
Kent Overstreet 已提交
1270

K
Kent Overstreet 已提交
1271
		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
K
Kent Overstreet 已提交
1272
	} else {
K
Kent Overstreet 已提交
1273
		closure_call(&s->iop.cl, cache_lookup, NULL, cl);
K
Kent Overstreet 已提交
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
	}

	continue_at(cl, search_free, NULL);
}

static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
			   unsigned int cmd, unsigned long arg)
{
	return -ENOTTY;
}

static int flash_dev_congested(void *data, int bits)
{
	struct bcache_device *d = data;
	struct request_queue *q;
	struct cache *ca;
	unsigned i;
	int ret = 0;

	for_each_cache(ca, d->c, i) {
		q = bdev_get_queue(ca->bdev);
		ret |= bdi_congested(&q->backing_dev_info, bits);
	}

	return ret;
}

void bch_flash_dev_request_init(struct bcache_device *d)
{
	struct gendisk *g = d->disk;

	g->queue->make_request_fn		= flash_dev_make_request;
	g->queue->backing_dev_info.congested_fn = flash_dev_congested;
	d->cache_miss				= flash_dev_cache_miss;
	d->ioctl				= flash_dev_ioctl;
}

void bch_request_exit(void)
{
#ifdef CONFIG_CGROUP_BCACHE
	cgroup_unload_subsys(&bcache_subsys);
#endif
	if (bch_search_cache)
		kmem_cache_destroy(bch_search_cache);
}

int __init bch_request_init(void)
{
	bch_search_cache = KMEM_CACHE(search, 0);
	if (!bch_search_cache)
		return -ENOMEM;

#ifdef CONFIG_CGROUP_BCACHE
	cgroup_load_subsys(&bcache_subsys);
	init_bch_cgroup(&bcache_default_cgroup);

	cgroup_add_cftypes(&bcache_subsys, bch_files);
#endif
	return 0;
}