alloc.c 18.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
K
Kent Overstreet 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/*
 * Primary bucket allocation code
 *
 * Copyright 2012 Google, Inc.
 *
 * Allocation in bcache is done in terms of buckets:
 *
 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
 * btree pointers - they must match for the pointer to be considered valid.
 *
 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
 * bucket simply by incrementing its gen.
 *
 * The gens (along with the priorities; it's really the gens are important but
 * the code is named as if it's the priorities) are written in an arbitrary list
 * of buckets on disk, with a pointer to them in the journal header.
 *
 * When we invalidate a bucket, we have to write its new gen to disk and wait
 * for that write to complete before we use it - otherwise after a crash we
 * could have pointers that appeared to be good but pointed to data that had
 * been overwritten.
 *
 * Since the gens and priorities are all stored contiguously on disk, we can
 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
 * call prio_write(), and when prio_write() finishes we pull buckets off the
 * free_inc list and optionally discard them.
 *
 * free_inc isn't the only freelist - if it was, we'd often to sleep while
 * priorities and gens were being written before we could allocate. c->free is a
 * smaller freelist, and buckets on that list are always ready to be used.
 *
 * If we've got discards enabled, that happens when a bucket moves from the
 * free_inc list to the free list.
 *
 * There is another freelist, because sometimes we have buckets that we know
 * have nothing pointing into them - these we can reuse without waiting for
 * priorities to be rewritten. These come from freed btree nodes and buckets
 * that garbage collection discovered no longer had valid keys pointing into
 * them (because they were overwritten). That's the unused list - buckets on the
 * unused list move to the free list, optionally being discarded in the process.
 *
 * It's also important to ensure that gens don't wrap around - with respect to
 * either the oldest gen in the btree or the gen on disk. This is quite
 * difficult to do in practice, but we explicitly guard against it anyways - if
 * a bucket is in danger of wrapping around we simply skip invalidating it that
 * time around, and we garbage collect or rewrite the priorities sooner than we
 * would have otherwise.
 *
 * bch_bucket_alloc() allocates a single bucket from a specific cache.
 *
 * bch_bucket_alloc_set() allocates one or more buckets from different caches
 * out of a cache set.
 *
 * free_some_buckets() drives all the processes described above. It's called
 * from bch_bucket_alloc() and a few other places that need to make sure free
 * buckets are ready.
 *
 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
 * invalidated, and then invalidate them and stick them on the free_inc list -
 * in either lru or fifo order.
 */

#include "bcache.h"
#include "btree.h"

67
#include <linux/blkdev.h>
68
#include <linux/kthread.h>
K
Kent Overstreet 已提交
69
#include <linux/random.h>
K
Kent Overstreet 已提交
70
#include <trace/events/bcache.h>
K
Kent Overstreet 已提交
71

72 73
#define MAX_OPEN_BUCKETS 128

K
Kent Overstreet 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
/* Bucket heap / gen */

uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
{
	uint8_t ret = ++b->gen;

	ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
	WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);

	return ret;
}

void bch_rescale_priorities(struct cache_set *c, int sectors)
{
	struct cache *ca;
	struct bucket *b;
	unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
	unsigned i;
	int r;

	atomic_sub(sectors, &c->rescale);

	do {
		r = atomic_read(&c->rescale);

		if (r >= 0)
			return;
	} while (atomic_cmpxchg(&c->rescale, r, r + next) != r);

	mutex_lock(&c->bucket_lock);

	c->min_prio = USHRT_MAX;

	for_each_cache(ca, c, i)
		for_each_bucket(b, ca)
			if (b->prio &&
			    b->prio != BTREE_PRIO &&
			    !atomic_read(&b->pin)) {
				b->prio--;
				c->min_prio = min(c->min_prio, b->prio);
			}

	mutex_unlock(&c->bucket_lock);
}

K
Kent Overstreet 已提交
119 120 121 122 123 124
/*
 * Background allocation thread: scans for buckets to be invalidated,
 * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
 * then optionally issues discard commands to the newly free buckets, then puts
 * them on the various freelists.
 */
K
Kent Overstreet 已提交
125 126 127

static inline bool can_inc_bucket_gen(struct bucket *b)
{
K
Kent Overstreet 已提交
128
	return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
K
Kent Overstreet 已提交
129 130
}

K
Kent Overstreet 已提交
131
bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
K
Kent Overstreet 已提交
132
{
K
Kent Overstreet 已提交
133
	BUG_ON(!ca->set->gc_mark_valid);
K
Kent Overstreet 已提交
134

135 136
	return (!GC_MARK(b) ||
		GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
K
Kent Overstreet 已提交
137 138 139 140
		!atomic_read(&b->pin) &&
		can_inc_bucket_gen(b);
}

K
Kent Overstreet 已提交
141
void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
K
Kent Overstreet 已提交
142
{
K
Kent Overstreet 已提交
143 144
	lockdep_assert_held(&ca->set->bucket_lock);
	BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
145 146

	if (GC_SECTORS_USED(b))
K
Kent Overstreet 已提交
147
		trace_bcache_invalidate(ca, b - ca->buckets);
148

K
Kent Overstreet 已提交
149 150 151
	bch_inc_gen(ca, b);
	b->prio = INITIAL_PRIO;
	atomic_inc(&b->pin);
K
Kent Overstreet 已提交
152 153 154 155 156 157 158
}

static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
{
	__bch_invalidate_one_bucket(ca, b);

	fifo_push(&ca->free_inc, b - ca->buckets);
K
Kent Overstreet 已提交
159 160
}

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
/*
 * Determines what order we're going to reuse buckets, smallest bucket_prio()
 * first: we also take into account the number of sectors of live data in that
 * bucket, and in order for that multiply to make sense we have to scale bucket
 *
 * Thus, we scale the bucket priorities so that the bucket with the smallest
 * prio is worth 1/8th of what INITIAL_PRIO is worth.
 */

#define bucket_prio(b)							\
({									\
	unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;	\
									\
	(b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);	\
})
K
Kent Overstreet 已提交
176

K
Kent Overstreet 已提交
177 178
#define bucket_max_cmp(l, r)	(bucket_prio(l) < bucket_prio(r))
#define bucket_min_cmp(l, r)	(bucket_prio(l) > bucket_prio(r))
K
Kent Overstreet 已提交
179

K
Kent Overstreet 已提交
180 181
static void invalidate_buckets_lru(struct cache *ca)
{
K
Kent Overstreet 已提交
182 183 184 185 186 187
	struct bucket *b;
	ssize_t i;

	ca->heap.used = 0;

	for_each_bucket(b, ca) {
K
Kent Overstreet 已提交
188
		if (!bch_can_invalidate_bucket(ca, b))
189 190 191 192 193 194 195
			continue;

		if (!heap_full(&ca->heap))
			heap_add(&ca->heap, b, bucket_max_cmp);
		else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
			ca->heap.data[0] = b;
			heap_sift(&ca->heap, 0, bucket_max_cmp);
K
Kent Overstreet 已提交
196 197 198 199 200 201 202 203
		}
	}

	for (i = ca->heap.used / 2 - 1; i >= 0; --i)
		heap_sift(&ca->heap, i, bucket_min_cmp);

	while (!fifo_full(&ca->free_inc)) {
		if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
204 205
			/*
			 * We don't want to be calling invalidate_buckets()
K
Kent Overstreet 已提交
206 207 208
			 * multiple times when it can't do anything
			 */
			ca->invalidate_needs_gc = 1;
K
Kent Overstreet 已提交
209
			wake_up_gc(ca->set);
K
Kent Overstreet 已提交
210 211 212
			return;
		}

K
Kent Overstreet 已提交
213
		bch_invalidate_one_bucket(ca, b);
K
Kent Overstreet 已提交
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
	}
}

static void invalidate_buckets_fifo(struct cache *ca)
{
	struct bucket *b;
	size_t checked = 0;

	while (!fifo_full(&ca->free_inc)) {
		if (ca->fifo_last_bucket <  ca->sb.first_bucket ||
		    ca->fifo_last_bucket >= ca->sb.nbuckets)
			ca->fifo_last_bucket = ca->sb.first_bucket;

		b = ca->buckets + ca->fifo_last_bucket++;

K
Kent Overstreet 已提交
229 230
		if (bch_can_invalidate_bucket(ca, b))
			bch_invalidate_one_bucket(ca, b);
K
Kent Overstreet 已提交
231 232 233

		if (++checked >= ca->sb.nbuckets) {
			ca->invalidate_needs_gc = 1;
K
Kent Overstreet 已提交
234
			wake_up_gc(ca->set);
K
Kent Overstreet 已提交
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
			return;
		}
	}
}

static void invalidate_buckets_random(struct cache *ca)
{
	struct bucket *b;
	size_t checked = 0;

	while (!fifo_full(&ca->free_inc)) {
		size_t n;
		get_random_bytes(&n, sizeof(n));

		n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
		n += ca->sb.first_bucket;

		b = ca->buckets + n;

K
Kent Overstreet 已提交
254 255
		if (bch_can_invalidate_bucket(ca, b))
			bch_invalidate_one_bucket(ca, b);
K
Kent Overstreet 已提交
256 257 258

		if (++checked >= ca->sb.nbuckets / 2) {
			ca->invalidate_needs_gc = 1;
K
Kent Overstreet 已提交
259
			wake_up_gc(ca->set);
K
Kent Overstreet 已提交
260 261 262 263 264 265 266
			return;
		}
	}
}

static void invalidate_buckets(struct cache *ca)
{
K
Kent Overstreet 已提交
267
	BUG_ON(ca->invalidate_needs_gc);
K
Kent Overstreet 已提交
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283

	switch (CACHE_REPLACEMENT(&ca->sb)) {
	case CACHE_REPLACEMENT_LRU:
		invalidate_buckets_lru(ca);
		break;
	case CACHE_REPLACEMENT_FIFO:
		invalidate_buckets_fifo(ca);
		break;
	case CACHE_REPLACEMENT_RANDOM:
		invalidate_buckets_random(ca);
		break;
	}
}

#define allocator_wait(ca, cond)					\
do {									\
284
	while (1) {							\
285
		set_current_state(TASK_INTERRUPTIBLE);			\
286 287
		if (cond)						\
			break;						\
K
Kent Overstreet 已提交
288 289
									\
		mutex_unlock(&(ca)->set->bucket_lock);			\
290 291
		if (kthread_should_stop()) {				\
			set_current_state(TASK_RUNNING);		\
292
			return 0;					\
293
		}							\
K
Kent Overstreet 已提交
294 295 296 297
									\
		schedule();						\
		mutex_lock(&(ca)->set->bucket_lock);			\
	}								\
298
	__set_current_state(TASK_RUNNING);				\
K
Kent Overstreet 已提交
299 300
} while (0)

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
static int bch_allocator_push(struct cache *ca, long bucket)
{
	unsigned i;

	/* Prios/gens are actually the most important reserve */
	if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
		return true;

	for (i = 0; i < RESERVE_NR; i++)
		if (fifo_push(&ca->free[i], bucket))
			return true;

	return false;
}

316
static int bch_allocator_thread(void *arg)
K
Kent Overstreet 已提交
317
{
318
	struct cache *ca = arg;
K
Kent Overstreet 已提交
319 320 321 322

	mutex_lock(&ca->set->bucket_lock);

	while (1) {
323 324 325 326 327
		/*
		 * First, we pull buckets off of the unused and free_inc lists,
		 * possibly issue discards to them, then we add the bucket to
		 * the free list:
		 */
K
Kent Overstreet 已提交
328
		while (!fifo_empty(&ca->free_inc)) {
K
Kent Overstreet 已提交
329 330
			long bucket;

K
Kent Overstreet 已提交
331
			fifo_pop(&ca->free_inc, bucket);
K
Kent Overstreet 已提交
332 333

			if (ca->discard) {
334 335 336
				mutex_unlock(&ca->set->bucket_lock);
				blkdev_issue_discard(ca->bdev,
					bucket_to_sector(ca->set, bucket),
337
					ca->sb.bucket_size, GFP_KERNEL, 0);
338
				mutex_lock(&ca->set->bucket_lock);
K
Kent Overstreet 已提交
339
			}
340

341
			allocator_wait(ca, bch_allocator_push(ca, bucket));
342
			wake_up(&ca->set->btree_cache_wait);
343
			wake_up(&ca->set->bucket_wait);
K
Kent Overstreet 已提交
344 345
		}

346 347 348 349 350
		/*
		 * We've run out of free buckets, we need to find some buckets
		 * we can invalidate. First, invalidate them in memory and add
		 * them to the free_inc list:
		 */
K
Kent Overstreet 已提交
351

K
Kent Overstreet 已提交
352
retry_invalidate:
353
		allocator_wait(ca, ca->set->gc_mark_valid &&
K
Kent Overstreet 已提交
354
			       !ca->invalidate_needs_gc);
355
		invalidate_buckets(ca);
K
Kent Overstreet 已提交
356

357 358 359 360 361
		/*
		 * Now, we write their new gens to disk so we can start writing
		 * new stuff to them:
		 */
		allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
K
Kent Overstreet 已提交
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
		if (CACHE_SYNC(&ca->set->sb)) {
			/*
			 * This could deadlock if an allocation with a btree
			 * node locked ever blocked - having the btree node
			 * locked would block garbage collection, but here we're
			 * waiting on garbage collection before we invalidate
			 * and free anything.
			 *
			 * But this should be safe since the btree code always
			 * uses btree_check_reserve() before allocating now, and
			 * if it fails it blocks without btree nodes locked.
			 */
			if (!fifo_full(&ca->free_inc))
				goto retry_invalidate;

K
Kent Overstreet 已提交
377
			bch_prio_write(ca);
K
Kent Overstreet 已提交
378
		}
K
Kent Overstreet 已提交
379 380 381
	}
}

K
Kent Overstreet 已提交
382 383
/* Allocation */

384
long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
K
Kent Overstreet 已提交
385
{
386 387 388 389 390
	DEFINE_WAIT(w);
	struct bucket *b;
	long r;

	/* fastpath */
391 392
	if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
	    fifo_pop(&ca->free[reserve], r))
393 394
		goto out;

395 396
	if (!wait) {
		trace_bcache_alloc_fail(ca, reserve);
397
		return -1;
398
	}
399

400
	do {
401 402 403 404 405 406
		prepare_to_wait(&ca->set->bucket_wait, &w,
				TASK_UNINTERRUPTIBLE);

		mutex_unlock(&ca->set->bucket_lock);
		schedule();
		mutex_lock(&ca->set->bucket_lock);
407 408
	} while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
		 !fifo_pop(&ca->free[reserve], r));
409 410 411

	finish_wait(&ca->set->bucket_wait, &w);
out:
412 413
	if (ca->alloc_thread)
		wake_up_process(ca->alloc_thread);
K
Kent Overstreet 已提交
414

415 416
	trace_bcache_alloc(ca, reserve);

K
Kent Overstreet 已提交
417
	if (expensive_debug_checks(ca->set)) {
K
Kent Overstreet 已提交
418 419
		size_t iter;
		long i;
420
		unsigned j;
K
Kent Overstreet 已提交
421 422 423 424

		for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
			BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);

425 426 427
		for (j = 0; j < RESERVE_NR; j++)
			fifo_for_each(i, &ca->free[j], iter)
				BUG_ON(i == r);
K
Kent Overstreet 已提交
428 429 430
		fifo_for_each(i, &ca->free_inc, iter)
			BUG_ON(i == r);
	}
K
Kent Overstreet 已提交
431

432
	b = ca->buckets + r;
K
Kent Overstreet 已提交
433

434
	BUG_ON(atomic_read(&b->pin) != 1);
K
Kent Overstreet 已提交
435

436
	SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
K
Kent Overstreet 已提交
437

438
	if (reserve <= RESERVE_PRIO) {
439
		SET_GC_MARK(b, GC_MARK_METADATA);
440
		SET_GC_MOVE(b, 0);
441 442 443
		b->prio = BTREE_PRIO;
	} else {
		SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
444
		SET_GC_MOVE(b, 0);
445
		b->prio = INITIAL_PRIO;
K
Kent Overstreet 已提交
446 447
	}

448 449 450 451 452
	if (ca->set->avail_nbuckets > 0) {
		ca->set->avail_nbuckets--;
		bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
	}

453
	return r;
K
Kent Overstreet 已提交
454 455
}

K
Kent Overstreet 已提交
456 457 458 459
void __bch_bucket_free(struct cache *ca, struct bucket *b)
{
	SET_GC_MARK(b, 0);
	SET_GC_SECTORS_USED(b, 0);
460 461 462 463 464

	if (ca->set->avail_nbuckets < ca->set->nbuckets) {
		ca->set->avail_nbuckets++;
		bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
	}
K
Kent Overstreet 已提交
465 466
}

K
Kent Overstreet 已提交
467 468 469 470
void bch_bucket_free(struct cache_set *c, struct bkey *k)
{
	unsigned i;

K
Kent Overstreet 已提交
471 472 473
	for (i = 0; i < KEY_PTRS(k); i++)
		__bch_bucket_free(PTR_CACHE(c, k, i),
				  PTR_BUCKET(c, k, i));
K
Kent Overstreet 已提交
474 475
}

476
int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
477
			   struct bkey *k, int n, bool wait)
K
Kent Overstreet 已提交
478 479 480 481 482 483 484 485 486 487 488 489
{
	int i;

	lockdep_assert_held(&c->bucket_lock);
	BUG_ON(!n || n > c->caches_loaded || n > 8);

	bkey_init(k);

	/* sort by free space/prio of oldest data in caches */

	for (i = 0; i < n; i++) {
		struct cache *ca = c->cache_by_alloc[i];
490
		long b = bch_bucket_alloc(ca, reserve, wait);
K
Kent Overstreet 已提交
491 492 493 494

		if (b == -1)
			goto err;

H
Huacai Chen 已提交
495
		k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
K
Kent Overstreet 已提交
496 497 498 499 500 501 502 503 504
				bucket_to_sector(c, b),
				ca->sb.nr_this_dev);

		SET_KEY_PTRS(k, i + 1);
	}

	return 0;
err:
	bch_bucket_free(c, k);
505
	bkey_put(c, k);
K
Kent Overstreet 已提交
506 507 508
	return -1;
}

509
int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
510
			 struct bkey *k, int n, bool wait)
K
Kent Overstreet 已提交
511 512 513
{
	int ret;
	mutex_lock(&c->bucket_lock);
514
	ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
K
Kent Overstreet 已提交
515 516 517 518
	mutex_unlock(&c->bucket_lock);
	return ret;
}

519 520 521 522 523 524 525 526 527 528 529
/* Sector allocator */

struct open_bucket {
	struct list_head	list;
	unsigned		last_write_point;
	unsigned		sectors_free;
	BKEY_PADDED(key);
};

/*
 * We keep multiple buckets open for writes, and try to segregate different
530 531 532 533
 * write streams for better cache utilization: first we try to segregate flash
 * only volume write streams from cached devices, secondly we look for a bucket
 * where the last write to it was sequential with the current write, and
 * failing that we look for a bucket that was last used by the same task.
534 535 536 537 538
 *
 * The ideas is if you've got multiple tasks pulling data into the cache at the
 * same time, you'll get better cache utilization if you try to segregate their
 * data and preserve locality.
 *
539 540 541 542 543 544
 * For example, dirty sectors of flash only volume is not reclaimable, if their
 * dirty sectors mixed with dirty sectors of cached device, such buckets will
 * be marked as dirty and won't be reclaimed, though the dirty data of cached
 * device have been written back to backend device.
 *
 * And say you've starting Firefox at the same time you're copying a
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
 * bunch of files. Firefox will likely end up being fairly hot and stay in the
 * cache awhile, but the data you copied might not be; if you wrote all that
 * data to the same buckets it'd get invalidated at the same time.
 *
 * Both of those tasks will be doing fairly random IO so we can't rely on
 * detecting sequential IO to segregate their data, but going off of the task
 * should be a sane heuristic.
 */
static struct open_bucket *pick_data_bucket(struct cache_set *c,
					    const struct bkey *search,
					    unsigned write_point,
					    struct bkey *alloc)
{
	struct open_bucket *ret, *ret_task = NULL;

	list_for_each_entry_reverse(ret, &c->data_buckets, list)
561 562 563 564
		if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
		    UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
			continue;
		else if (!bkey_cmp(&ret->key, search))
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
			goto found;
		else if (ret->last_write_point == write_point)
			ret_task = ret;

	ret = ret_task ?: list_first_entry(&c->data_buckets,
					   struct open_bucket, list);
found:
	if (!ret->sectors_free && KEY_PTRS(alloc)) {
		ret->sectors_free = c->sb.bucket_size;
		bkey_copy(&ret->key, alloc);
		bkey_init(alloc);
	}

	if (!ret->sectors_free)
		ret = NULL;

	return ret;
}

/*
 * Allocates some space in the cache to write to, and k to point to the newly
 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
 * end of the newly allocated space).
 *
 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
 * sectors were actually allocated.
 *
 * If s->writeback is true, will not fail.
 */
bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
		       unsigned write_point, unsigned write_prio, bool wait)
{
	struct open_bucket *b;
	BKEY_PADDED(key) alloc;
	unsigned i;

	/*
	 * We might have to allocate a new bucket, which we can't do with a
	 * spinlock held. So if we have to allocate, we drop the lock, allocate
	 * and then retry. KEY_PTRS() indicates whether alloc points to
	 * allocated bucket(s).
	 */

	bkey_init(&alloc.key);
	spin_lock(&c->data_bucket_lock);

	while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
		unsigned watermark = write_prio
613 614
			? RESERVE_MOVINGGC
			: RESERVE_NONE;
615 616 617 618 619 620 621 622 623 624 625

		spin_unlock(&c->data_bucket_lock);

		if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
			return false;

		spin_lock(&c->data_bucket_lock);
	}

	/*
	 * If we had to allocate, we might race and not need to allocate the
626
	 * second time we call pick_data_bucket(). If we allocated a bucket but
627 628 629
	 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
	 */
	if (KEY_PTRS(&alloc.key))
630
		bkey_put(c, &alloc.key);
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678

	for (i = 0; i < KEY_PTRS(&b->key); i++)
		EBUG_ON(ptr_stale(c, &b->key, i));

	/* Set up the pointer to the space we're allocating: */

	for (i = 0; i < KEY_PTRS(&b->key); i++)
		k->ptr[i] = b->key.ptr[i];

	sectors = min(sectors, b->sectors_free);

	SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
	SET_KEY_SIZE(k, sectors);
	SET_KEY_PTRS(k, KEY_PTRS(&b->key));

	/*
	 * Move b to the end of the lru, and keep track of what this bucket was
	 * last used for:
	 */
	list_move_tail(&b->list, &c->data_buckets);
	bkey_copy_key(&b->key, k);
	b->last_write_point = write_point;

	b->sectors_free	-= sectors;

	for (i = 0; i < KEY_PTRS(&b->key); i++) {
		SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);

		atomic_long_add(sectors,
				&PTR_CACHE(c, &b->key, i)->sectors_written);
	}

	if (b->sectors_free < c->sb.block_size)
		b->sectors_free = 0;

	/*
	 * k takes refcounts on the buckets it points to until it's inserted
	 * into the btree, but if we're done with this bucket we just transfer
	 * get_data_bucket()'s refcount.
	 */
	if (b->sectors_free)
		for (i = 0; i < KEY_PTRS(&b->key); i++)
			atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);

	spin_unlock(&c->data_bucket_lock);
	return true;
}

K
Kent Overstreet 已提交
679 680
/* Init */

681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
void bch_open_buckets_free(struct cache_set *c)
{
	struct open_bucket *b;

	while (!list_empty(&c->data_buckets)) {
		b = list_first_entry(&c->data_buckets,
				     struct open_bucket, list);
		list_del(&b->list);
		kfree(b);
	}
}

int bch_open_buckets_alloc(struct cache_set *c)
{
	int i;

	spin_lock_init(&c->data_bucket_lock);

699
	for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
700 701 702 703 704 705 706 707 708 709
		struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
		if (!b)
			return -ENOMEM;

		list_add(&b->list, &c->data_buckets);
	}

	return 0;
}

710 711
int bch_cache_allocator_start(struct cache *ca)
{
712 713 714 715
	struct task_struct *k = kthread_run(bch_allocator_thread,
					    ca, "bcache_allocator");
	if (IS_ERR(k))
		return PTR_ERR(k);
716

717
	ca->alloc_thread = k;
718 719
	return 0;
}