bset.c 30.3 KB
Newer Older
K
Kent Overstreet 已提交
1 2 3 4 5 6 7
/*
 * Code for working with individual keys, and sorted sets of keys with in a
 * btree node
 *
 * Copyright 2012 Google, Inc.
 */

8 9 10 11
#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__

#include "util.h"
#include "bset.h"
K
Kent Overstreet 已提交
12

13
#include <linux/console.h>
K
Kent Overstreet 已提交
14
#include <linux/random.h>
15
#include <linux/prefetch.h>
K
Kent Overstreet 已提交
16

17 18 19 20 21 22 23 24 25
#ifdef CONFIG_BCACHE_DEBUG

void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
{
	struct bkey *k, *next;

	for (k = i->start; k < bset_bkey_last(i); k = next) {
		next = bkey_next(k);

26
		printk(KERN_ERR "block %u key %li/%u: ", set,
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
		       (uint64_t *) k - i->d, i->keys);

		if (b->ops->key_dump)
			b->ops->key_dump(b, k);
		else
			printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));

		if (next < bset_bkey_last(i) &&
		    bkey_cmp(k, b->ops->is_extents ?
			     &START_KEY(next) : next) > 0)
			printk(KERN_ERR "Key skipped backwards\n");
	}
}

void bch_dump_bucket(struct btree_keys *b)
{
	unsigned i;

	console_lock();
	for (i = 0; i <= b->nsets; i++)
		bch_dump_bset(b, b->set[i].data,
			      bset_sector_offset(b, b->set[i].data));
	console_unlock();
}

int __bch_count_data(struct btree_keys *b)
{
	unsigned ret = 0;
	struct btree_iter iter;
	struct bkey *k;

	if (b->ops->is_extents)
		for_each_key(b, k, &iter)
			ret += KEY_SIZE(k);
	return ret;
}

void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
{
	va_list args;
	struct bkey *k, *p = NULL;
	struct btree_iter iter;
	const char *err;

	for_each_key(b, k, &iter) {
		if (b->ops->is_extents) {
			err = "Keys out of order";
			if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
				goto bug;

			if (bch_ptr_invalid(b, k))
				continue;

			err =  "Overlapping keys";
			if (p && bkey_cmp(p, &START_KEY(k)) > 0)
				goto bug;
		} else {
			if (bch_ptr_bad(b, k))
				continue;

			err = "Duplicate keys";
			if (p && !bkey_cmp(p, k))
				goto bug;
		}
		p = k;
	}
#if 0
	err = "Key larger than btree node key";
	if (p && bkey_cmp(p, &b->key) > 0)
		goto bug;
#endif
	return;
bug:
	bch_dump_bucket(b);

	va_start(args, fmt);
	vprintk(fmt, args);
	va_end(args);

	panic("bch_check_keys error:  %s:\n", err);
}

static void bch_btree_iter_next_check(struct btree_iter *iter)
{
	struct bkey *k = iter->data->k, *next = bkey_next(k);

	if (next < iter->data->end &&
	    bkey_cmp(k, iter->b->ops->is_extents ?
		     &START_KEY(next) : next) > 0) {
		bch_dump_bucket(iter->b);
		panic("Key skipped backwards\n");
	}
}

#else

static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}

#endif

K
Kent Overstreet 已提交
127 128
/* Keylists */

129
int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
K
Kent Overstreet 已提交
130
{
K
Kent Overstreet 已提交
131
	size_t oldsize = bch_keylist_nkeys(l);
132
	size_t newsize = oldsize + u64s;
K
Kent Overstreet 已提交
133 134
	uint64_t *old_keys = l->keys_p == l->inline_keys ? NULL : l->keys_p;
	uint64_t *new_keys;
K
Kent Overstreet 已提交
135 136 137 138 139 140 141

	newsize = roundup_pow_of_two(newsize);

	if (newsize <= KEYLIST_INLINE ||
	    roundup_pow_of_two(oldsize) == newsize)
		return 0;

K
Kent Overstreet 已提交
142
	new_keys = krealloc(old_keys, sizeof(uint64_t) * newsize, GFP_NOIO);
K
Kent Overstreet 已提交
143

K
Kent Overstreet 已提交
144
	if (!new_keys)
K
Kent Overstreet 已提交
145 146
		return -ENOMEM;

K
Kent Overstreet 已提交
147 148
	if (!old_keys)
		memcpy(new_keys, l->inline_keys, sizeof(uint64_t) * oldsize);
K
Kent Overstreet 已提交
149

K
Kent Overstreet 已提交
150 151
	l->keys_p = new_keys;
	l->top_p = new_keys + oldsize;
K
Kent Overstreet 已提交
152 153 154 155 156 157

	return 0;
}

struct bkey *bch_keylist_pop(struct keylist *l)
{
K
Kent Overstreet 已提交
158
	struct bkey *k = l->keys;
K
Kent Overstreet 已提交
159 160 161 162 163 164 165 166 167 168

	if (k == l->top)
		return NULL;

	while (bkey_next(k) != l->top)
		k = bkey_next(k);

	return l->top = k;
}

K
Kent Overstreet 已提交
169 170
void bch_keylist_pop_front(struct keylist *l)
{
K
Kent Overstreet 已提交
171
	l->top_p -= bkey_u64s(l->keys);
K
Kent Overstreet 已提交
172

K
Kent Overstreet 已提交
173 174 175
	memmove(l->keys,
		bkey_next(l->keys),
		bch_keylist_bytes(l));
K
Kent Overstreet 已提交
176 177
}

K
Kent Overstreet 已提交
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
/* Key/pointer manipulation */

void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
			      unsigned i)
{
	BUG_ON(i > KEY_PTRS(src));

	/* Only copy the header, key, and one pointer. */
	memcpy(dest, src, 2 * sizeof(uint64_t));
	dest->ptr[0] = src->ptr[i];
	SET_KEY_PTRS(dest, 1);
	/* We didn't copy the checksum so clear that bit. */
	SET_KEY_CSUM(dest, 0);
}

bool __bch_cut_front(const struct bkey *where, struct bkey *k)
{
	unsigned i, len = 0;

	if (bkey_cmp(where, &START_KEY(k)) <= 0)
		return false;

	if (bkey_cmp(where, k) < 0)
		len = KEY_OFFSET(k) - KEY_OFFSET(where);
	else
		bkey_copy_key(k, where);

	for (i = 0; i < KEY_PTRS(k); i++)
		SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len);

	BUG_ON(len > KEY_SIZE(k));
	SET_KEY_SIZE(k, len);
	return true;
}

bool __bch_cut_back(const struct bkey *where, struct bkey *k)
{
	unsigned len = 0;

	if (bkey_cmp(where, k) >= 0)
		return false;

	BUG_ON(KEY_INODE(where) != KEY_INODE(k));

	if (bkey_cmp(where, &START_KEY(k)) > 0)
		len = KEY_OFFSET(where) - KEY_START(k);

	bkey_copy_key(k, where);

	BUG_ON(len > KEY_SIZE(k));
	SET_KEY_SIZE(k, len);
	return true;
}

232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
/* Auxiliary search trees */

/* 32 bits total: */
#define BKEY_MID_BITS		3
#define BKEY_EXPONENT_BITS	7
#define BKEY_MANTISSA_BITS	(32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS)
#define BKEY_MANTISSA_MASK	((1 << BKEY_MANTISSA_BITS) - 1)

struct bkey_float {
	unsigned	exponent:BKEY_EXPONENT_BITS;
	unsigned	m:BKEY_MID_BITS;
	unsigned	mantissa:BKEY_MANTISSA_BITS;
} __packed;

/*
 * BSET_CACHELINE was originally intended to match the hardware cacheline size -
 * it used to be 64, but I realized the lookup code would touch slightly less
 * memory if it was 128.
 *
 * It definites the number of bytes (in struct bset) per struct bkey_float in
 * the auxiliar search tree - when we're done searching the bset_float tree we
 * have this many bytes left that we do a linear search over.
 *
 * Since (after level 5) every level of the bset_tree is on a new cacheline,
 * we're touching one fewer cacheline in the bset tree in exchange for one more
 * cacheline in the linear search - but the linear search might stop before it
 * gets to the second cacheline.
 */

#define BSET_CACHELINE		128

/* Space required for the btree node keys */
K
Kent Overstreet 已提交
264
static inline size_t btree_keys_bytes(struct btree_keys *b)
265 266 267 268
{
	return PAGE_SIZE << b->page_order;
}

K
Kent Overstreet 已提交
269
static inline size_t btree_keys_cachelines(struct btree_keys *b)
270 271 272 273 274
{
	return btree_keys_bytes(b) / BSET_CACHELINE;
}

/* Space required for the auxiliary search trees */
K
Kent Overstreet 已提交
275
static inline size_t bset_tree_bytes(struct btree_keys *b)
276 277 278 279 280
{
	return btree_keys_cachelines(b) * sizeof(struct bkey_float);
}

/* Space required for the prev pointers */
K
Kent Overstreet 已提交
281
static inline size_t bset_prev_bytes(struct btree_keys *b)
282 283 284 285 286 287
{
	return btree_keys_cachelines(b) * sizeof(uint8_t);
}

/* Memory allocation */

K
Kent Overstreet 已提交
288
void bch_btree_keys_free(struct btree_keys *b)
289
{
K
Kent Overstreet 已提交
290
	struct bset_tree *t = b->set;
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309

	if (bset_prev_bytes(b) < PAGE_SIZE)
		kfree(t->prev);
	else
		free_pages((unsigned long) t->prev,
			   get_order(bset_prev_bytes(b)));

	if (bset_tree_bytes(b) < PAGE_SIZE)
		kfree(t->tree);
	else
		free_pages((unsigned long) t->tree,
			   get_order(bset_tree_bytes(b)));

	free_pages((unsigned long) t->data, b->page_order);

	t->prev = NULL;
	t->tree = NULL;
	t->data = NULL;
}
K
Kent Overstreet 已提交
310
EXPORT_SYMBOL(bch_btree_keys_free);
311

K
Kent Overstreet 已提交
312
int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp)
313
{
K
Kent Overstreet 已提交
314
	struct bset_tree *t = b->set;
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340

	BUG_ON(t->data);

	b->page_order = page_order;

	t->data = (void *) __get_free_pages(gfp, b->page_order);
	if (!t->data)
		goto err;

	t->tree = bset_tree_bytes(b) < PAGE_SIZE
		? kmalloc(bset_tree_bytes(b), gfp)
		: (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
	if (!t->tree)
		goto err;

	t->prev = bset_prev_bytes(b) < PAGE_SIZE
		? kmalloc(bset_prev_bytes(b), gfp)
		: (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
	if (!t->prev)
		goto err;

	return 0;
err:
	bch_btree_keys_free(b);
	return -ENOMEM;
}
K
Kent Overstreet 已提交
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
EXPORT_SYMBOL(bch_btree_keys_alloc);

void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
			 bool *expensive_debug_checks)
{
	unsigned i;

	b->ops = ops;
	b->expensive_debug_checks = expensive_debug_checks;
	b->nsets = 0;
	b->last_set_unwritten = 0;

	/* XXX: shouldn't be needed */
	for (i = 0; i < MAX_BSETS; i++)
		b->set[i].size = 0;
	/*
	 * Second loop starts at 1 because b->keys[0]->data is the memory we
	 * allocated
	 */
	for (i = 1; i < MAX_BSETS; i++)
		b->set[i].data = NULL;
}
EXPORT_SYMBOL(bch_btree_keys_init);
364

K
Kent Overstreet 已提交
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
/* Binary tree stuff for auxiliary search trees */

static unsigned inorder_next(unsigned j, unsigned size)
{
	if (j * 2 + 1 < size) {
		j = j * 2 + 1;

		while (j * 2 < size)
			j *= 2;
	} else
		j >>= ffz(j) + 1;

	return j;
}

static unsigned inorder_prev(unsigned j, unsigned size)
{
	if (j * 2 < size) {
		j = j * 2;

		while (j * 2 + 1 < size)
			j = j * 2 + 1;
	} else
		j >>= ffs(j);

	return j;
}

/* I have no idea why this code works... and I'm the one who wrote it
 *
 * However, I do know what it does:
 * Given a binary tree constructed in an array (i.e. how you normally implement
 * a heap), it converts a node in the tree - referenced by array index - to the
 * index it would have if you did an inorder traversal.
 *
 * Also tested for every j, size up to size somewhere around 6 million.
 *
 * The binary tree starts at array index 1, not 0
 * extra is a function of size:
 *   extra = (size - rounddown_pow_of_two(size - 1)) << 1;
 */
static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
{
	unsigned b = fls(j);
	unsigned shift = fls(size - 1) - b;

	j  ^= 1U << (b - 1);
	j <<= 1;
	j  |= 1;
	j <<= shift;

	if (j > extra)
		j -= (j - extra) >> 1;

	return j;
}

static unsigned to_inorder(unsigned j, struct bset_tree *t)
{
	return __to_inorder(j, t->size, t->extra);
}

static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
{
	unsigned shift;

	if (j > extra)
		j += j - extra;

	shift = ffs(j);

	j >>= shift;
	j  |= roundup_pow_of_two(size) >> shift;

	return j;
}

static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
{
	return __inorder_to_tree(j, t->size, t->extra);
}

#if 0
void inorder_test(void)
{
	unsigned long done = 0;
	ktime_t start = ktime_get();

	for (unsigned size = 2;
	     size < 65536000;
	     size++) {
		unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
		unsigned i = 1, j = rounddown_pow_of_two(size - 1);

		if (!(size % 4096))
			printk(KERN_NOTICE "loop %u, %llu per us\n", size,
			       done / ktime_us_delta(ktime_get(), start));

		while (1) {
			if (__inorder_to_tree(i, size, extra) != j)
				panic("size %10u j %10u i %10u", size, j, i);

			if (__to_inorder(j, size, extra) != i)
				panic("size %10u j %10u i %10u", size, j, i);

			if (j == rounddown_pow_of_two(size) - 1)
				break;

			BUG_ON(inorder_prev(inorder_next(j, size), size) != j);

			j = inorder_next(j, size);
			i++;
		}

		done += size - 1;
	}
}
#endif

/*
485
 * Cacheline/offset <-> bkey pointer arithmetic:
K
Kent Overstreet 已提交
486 487 488 489 490 491 492 493
 *
 * t->tree is a binary search tree in an array; each node corresponds to a key
 * in one cacheline in t->set (BSET_CACHELINE bytes).
 *
 * This means we don't have to store the full index of the key that a node in
 * the binary tree points to; to_inorder() gives us the cacheline, and then
 * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes.
 *
494
 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
K
Kent Overstreet 已提交
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
 * make this work.
 *
 * To construct the bfloat for an arbitrary key we need to know what the key
 * immediately preceding it is: we have to check if the two keys differ in the
 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
 */

static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
				      unsigned offset)
{
	return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
}

static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
{
	return ((void *) k - (void *) t->data) / BSET_CACHELINE;
}

514 515 516
static unsigned bkey_to_cacheline_offset(struct bset_tree *t,
					 unsigned cacheline,
					 struct bkey *k)
K
Kent Overstreet 已提交
517
{
518
	return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
K
Kent Overstreet 已提交
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
}

static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
{
	return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
}

static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
{
	return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
}

/*
 * For the write set - the one we're currently inserting keys into - we don't
 * maintain a full search tree, we just keep a simple lookup table in t->prev.
 */
static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
{
	return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
}

static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
{
	low >>= shift;
	low  |= (high << 1) << (63U - shift);
	return low;
}

static inline unsigned bfloat_mantissa(const struct bkey *k,
				       struct bkey_float *f)
{
	const uint64_t *p = &k->low - (f->exponent >> 6);
	return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
}

static void make_bfloat(struct bset_tree *t, unsigned j)
{
	struct bkey_float *f = &t->tree[j];
	struct bkey *m = tree_to_bkey(t, j);
	struct bkey *p = tree_to_prev_bkey(t, j);

	struct bkey *l = is_power_of_2(j)
		? t->data->start
		: tree_to_prev_bkey(t, j >> ffs(j));

	struct bkey *r = is_power_of_2(j + 1)
K
Kent Overstreet 已提交
565
		? bset_bkey_idx(t->data, t->data->keys - bkey_u64s(&t->end))
K
Kent Overstreet 已提交
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
		: tree_to_bkey(t, j >> (ffz(j) + 1));

	BUG_ON(m < l || m > r);
	BUG_ON(bkey_next(p) != m);

	if (KEY_INODE(l) != KEY_INODE(r))
		f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64;
	else
		f->exponent = fls64(r->low ^ l->low);

	f->exponent = max_t(int, f->exponent - BKEY_MANTISSA_BITS, 0);

	/*
	 * Setting f->exponent = 127 flags this node as failed, and causes the
	 * lookup code to fall back to comparing against the original key.
	 */

	if (bfloat_mantissa(m, f) != bfloat_mantissa(p, f))
		f->mantissa = bfloat_mantissa(m, f) - 1;
	else
		f->exponent = 127;
}

K
Kent Overstreet 已提交
589
static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
K
Kent Overstreet 已提交
590
{
K
Kent Overstreet 已提交
591
	if (t != b->set) {
K
Kent Overstreet 已提交
592 593 594 595 596 597 598
		unsigned j = roundup(t[-1].size,
				     64 / sizeof(struct bkey_float));

		t->tree = t[-1].tree + j;
		t->prev = t[-1].prev + j;
	}

K
Kent Overstreet 已提交
599
	while (t < b->set + MAX_BSETS)
K
Kent Overstreet 已提交
600 601 602
		t++->size = 0;
}

K
Kent Overstreet 已提交
603
static void bch_bset_build_unwritten_tree(struct btree_keys *b)
K
Kent Overstreet 已提交
604
{
605
	struct bset_tree *t = bset_tree_last(b);
K
Kent Overstreet 已提交
606

K
Kent Overstreet 已提交
607 608 609
	BUG_ON(b->last_set_unwritten);
	b->last_set_unwritten = 1;

K
Kent Overstreet 已提交
610 611
	bset_alloc_tree(b, t);

K
Kent Overstreet 已提交
612
	if (t->tree != b->set->tree + btree_keys_cachelines(b)) {
613
		t->prev[0] = bkey_to_cacheline_offset(t, 0, t->data->start);
K
Kent Overstreet 已提交
614 615 616 617
		t->size = 1;
	}
}

K
Kent Overstreet 已提交
618
void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic)
619
{
K
Kent Overstreet 已提交
620 621 622
	if (i != b->set->data) {
		b->set[++b->nsets].data = i;
		i->seq = b->set->data->seq;
623 624 625 626 627 628 629 630 631
	} else
		get_random_bytes(&i->seq, sizeof(uint64_t));

	i->magic	= magic;
	i->version	= 0;
	i->keys		= 0;

	bch_bset_build_unwritten_tree(b);
}
K
Kent Overstreet 已提交
632
EXPORT_SYMBOL(bch_bset_init_next);
633

K
Kent Overstreet 已提交
634
void bch_bset_build_written_tree(struct btree_keys *b)
K
Kent Overstreet 已提交
635
{
636
	struct bset_tree *t = bset_tree_last(b);
637
	struct bkey *prev = NULL, *k = t->data->start;
K
Kent Overstreet 已提交
638 639
	unsigned j, cacheline = 1;

K
Kent Overstreet 已提交
640 641
	b->last_set_unwritten = 0;

K
Kent Overstreet 已提交
642 643 644
	bset_alloc_tree(b, t);

	t->size = min_t(unsigned,
K
Kent Overstreet 已提交
645
			bkey_to_cacheline(t, bset_bkey_last(t->data)),
K
Kent Overstreet 已提交
646
			b->set->tree + btree_keys_cachelines(b) - t->tree);
K
Kent Overstreet 已提交
647 648 649 650 651 652 653 654 655 656 657 658

	if (t->size < 2) {
		t->size = 0;
		return;
	}

	t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;

	/* First we figure out where the first key in each cacheline is */
	for (j = inorder_next(0, t->size);
	     j;
	     j = inorder_next(j, t->size)) {
659 660
		while (bkey_to_cacheline(t, k) < cacheline)
			prev = k, k = bkey_next(k);
K
Kent Overstreet 已提交
661

662 663
		t->prev[j] = bkey_u64s(prev);
		t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k);
K
Kent Overstreet 已提交
664 665
	}

K
Kent Overstreet 已提交
666
	while (bkey_next(k) != bset_bkey_last(t->data))
K
Kent Overstreet 已提交
667 668 669 670 671 672 673 674 675 676
		k = bkey_next(k);

	t->end = *k;

	/* Then we build the tree */
	for (j = inorder_next(0, t->size);
	     j;
	     j = inorder_next(j, t->size))
		make_bfloat(t, j);
}
K
Kent Overstreet 已提交
677
EXPORT_SYMBOL(bch_bset_build_written_tree);
K
Kent Overstreet 已提交
678

679 680
/* Insert */

K
Kent Overstreet 已提交
681
void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
K
Kent Overstreet 已提交
682 683 684 685
{
	struct bset_tree *t;
	unsigned inorder, j = 1;

K
Kent Overstreet 已提交
686
	for (t = b->set; t <= bset_tree_last(b); t++)
K
Kent Overstreet 已提交
687
		if (k < bset_bkey_last(t->data))
K
Kent Overstreet 已提交
688 689 690 691 692 693 694 695 696 697 698 699
			goto found_set;

	BUG();
found_set:
	if (!t->size || !bset_written(b, t))
		return;

	inorder = bkey_to_cacheline(t, k);

	if (k == t->data->start)
		goto fix_left;

K
Kent Overstreet 已提交
700
	if (bkey_next(k) == bset_bkey_last(t->data)) {
K
Kent Overstreet 已提交
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
		t->end = *k;
		goto fix_right;
	}

	j = inorder_to_tree(inorder, t);

	if (j &&
	    j < t->size &&
	    k == tree_to_bkey(t, j))
fix_left:	do {
			make_bfloat(t, j);
			j = j * 2;
		} while (j < t->size);

	j = inorder_to_tree(inorder + 1, t);

	if (j &&
	    j < t->size &&
	    k == tree_to_prev_bkey(t, j))
fix_right:	do {
			make_bfloat(t, j);
			j = j * 2 + 1;
		} while (j < t->size);
}
K
Kent Overstreet 已提交
725
EXPORT_SYMBOL(bch_bset_fix_invalidated_key);
K
Kent Overstreet 已提交
726

K
Kent Overstreet 已提交
727
static void bch_bset_fix_lookup_table(struct btree_keys *b,
728 729
				      struct bset_tree *t,
				      struct bkey *k)
K
Kent Overstreet 已提交
730 731 732 733 734 735 736 737 738 739 740 741
{
	unsigned shift = bkey_u64s(k);
	unsigned j = bkey_to_cacheline(t, k);

	/* We're getting called from btree_split() or btree_gc, just bail out */
	if (!t->size)
		return;

	/* k is the key we just inserted; we need to find the entry in the
	 * lookup table for the first key that is strictly greater than k:
	 * it's either k's cacheline or the next one
	 */
742 743
	while (j < t->size &&
	       table_to_bkey(t, j) <= k)
K
Kent Overstreet 已提交
744 745 746 747 748 749 750 751 752 753 754 755 756 757
		j++;

	/* Adjust all the lookup table entries, and find a new key for any that
	 * have gotten too big
	 */
	for (; j < t->size; j++) {
		t->prev[j] += shift;

		if (t->prev[j] > 7) {
			k = table_to_bkey(t, j - 1);

			while (k < cacheline_to_bkey(t, j, 0))
				k = bkey_next(k);

758
			t->prev[j] = bkey_to_cacheline_offset(t, j, k);
K
Kent Overstreet 已提交
759 760 761
		}
	}

K
Kent Overstreet 已提交
762
	if (t->size == b->set->tree + btree_keys_cachelines(b) - t->tree)
K
Kent Overstreet 已提交
763 764 765 766 767
		return;

	/* Possibly add a new entry to the end of the lookup table */

	for (k = table_to_bkey(t, t->size - 1);
K
Kent Overstreet 已提交
768
	     k != bset_bkey_last(t->data);
K
Kent Overstreet 已提交
769 770
	     k = bkey_next(k))
		if (t->size == bkey_to_cacheline(t, k)) {
771
			t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k);
K
Kent Overstreet 已提交
772 773 774 775
			t->size++;
		}
}

776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
/*
 * Tries to merge l and r: l should be lower than r
 * Returns true if we were able to merge. If we did merge, l will be the merged
 * key, r will be untouched.
 */
bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r)
{
	if (!b->ops->key_merge)
		return false;

	/*
	 * Generic header checks
	 * Assumes left and right are in order
	 * Left and right must be exactly aligned
	 */
791 792
	if (!bch_bkey_equal_header(l, r) ||
	     bkey_cmp(l, &START_KEY(r)))
793 794 795 796 797 798
		return false;

	return b->ops->key_merge(b, l, r);
}
EXPORT_SYMBOL(bch_bkey_try_merge);

K
Kent Overstreet 已提交
799
void bch_bset_insert(struct btree_keys *b, struct bkey *where,
800
		     struct bkey *insert)
K
Kent Overstreet 已提交
801
{
802
	struct bset_tree *t = bset_tree_last(b);
K
Kent Overstreet 已提交
803

K
Kent Overstreet 已提交
804
	BUG_ON(!b->last_set_unwritten);
805 806 807
	BUG_ON(bset_byte_offset(b, t->data) +
	       __set_bytes(t->data, t->data->keys + bkey_u64s(insert)) >
	       PAGE_SIZE << b->page_order);
K
Kent Overstreet 已提交
808

809 810 811
	memmove((uint64_t *) where + bkey_u64s(insert),
		where,
		(void *) bset_bkey_last(t->data) - (void *) where);
K
Kent Overstreet 已提交
812

813 814 815
	t->data->keys += bkey_u64s(insert);
	bkey_copy(where, insert);
	bch_bset_fix_lookup_table(b, t, where);
K
Kent Overstreet 已提交
816
}
K
Kent Overstreet 已提交
817
EXPORT_SYMBOL(bch_bset_insert);
K
Kent Overstreet 已提交
818

819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
			      struct bkey *replace_key)
{
	unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
	struct bset *i = bset_tree_last(b)->data;
	struct bkey *m, *prev = NULL;
	struct btree_iter iter;

	BUG_ON(b->ops->is_extents && !KEY_SIZE(k));

	m = bch_btree_iter_init(b, &iter, b->ops->is_extents
				? PRECEDING_KEY(&START_KEY(k))
				: PRECEDING_KEY(k));

	if (b->ops->insert_fixup(b, k, &iter, replace_key))
		return status;

	status = BTREE_INSERT_STATUS_INSERT;

	while (m != bset_bkey_last(i) &&
	       bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0)
		prev = m, m = bkey_next(m);

	/* prev is in the tree, if we merge we're done */
	status = BTREE_INSERT_STATUS_BACK_MERGE;
	if (prev &&
	    bch_bkey_try_merge(b, prev, k))
		goto merged;
#if 0
	status = BTREE_INSERT_STATUS_OVERWROTE;
	if (m != bset_bkey_last(i) &&
	    KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
		goto copy;
#endif
	status = BTREE_INSERT_STATUS_FRONT_MERGE;
	if (m != bset_bkey_last(i) &&
	    bch_bkey_try_merge(b, k, m))
		goto copy;

	bch_bset_insert(b, m, k);
copy:	bkey_copy(m, k);
merged:
	return status;
}
EXPORT_SYMBOL(bch_btree_insert_key);

/* Lookup */

K
Kent Overstreet 已提交
867 868 869 870
struct bset_search_iter {
	struct bkey *l, *r;
};

K
Kent Overstreet 已提交
871
static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
K
Kent Overstreet 已提交
872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
						     const struct bkey *search)
{
	unsigned li = 0, ri = t->size;

	while (li + 1 != ri) {
		unsigned m = (li + ri) >> 1;

		if (bkey_cmp(table_to_bkey(t, m), search) > 0)
			ri = m;
		else
			li = m;
	}

	return (struct bset_search_iter) {
		table_to_bkey(t, li),
K
Kent Overstreet 已提交
887
		ri < t->size ? table_to_bkey(t, ri) : bset_bkey_last(t->data)
K
Kent Overstreet 已提交
888 889 890
	};
}

K
Kent Overstreet 已提交
891
static struct bset_search_iter bset_search_tree(struct bset_tree *t,
K
Kent Overstreet 已提交
892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937
						const struct bkey *search)
{
	struct bkey *l, *r;
	struct bkey_float *f;
	unsigned inorder, j, n = 1;

	do {
		unsigned p = n << 4;
		p &= ((int) (p - t->size)) >> 31;

		prefetch(&t->tree[p]);

		j = n;
		f = &t->tree[j];

		/*
		 * n = (f->mantissa > bfloat_mantissa())
		 *	? j * 2
		 *	: j * 2 + 1;
		 *
		 * We need to subtract 1 from f->mantissa for the sign bit trick
		 * to work  - that's done in make_bfloat()
		 */
		if (likely(f->exponent != 127))
			n = j * 2 + (((unsigned)
				      (f->mantissa -
				       bfloat_mantissa(search, f))) >> 31);
		else
			n = (bkey_cmp(tree_to_bkey(t, j), search) > 0)
				? j * 2
				: j * 2 + 1;
	} while (n < t->size);

	inorder = to_inorder(j, t);

	/*
	 * n would have been the node we recursed to - the low bit tells us if
	 * we recursed left or recursed right.
	 */
	if (n & 1) {
		l = cacheline_to_bkey(t, inorder, f->m);

		if (++inorder != t->size) {
			f = &t->tree[inorder_next(j, t->size)];
			r = cacheline_to_bkey(t, inorder, f->m);
		} else
K
Kent Overstreet 已提交
938
			r = bset_bkey_last(t->data);
K
Kent Overstreet 已提交
939 940 941 942 943 944 945 946 947 948 949 950 951
	} else {
		r = cacheline_to_bkey(t, inorder, f->m);

		if (--inorder) {
			f = &t->tree[inorder_prev(j, t->size)];
			l = cacheline_to_bkey(t, inorder, f->m);
		} else
			l = t->data->start;
	}

	return (struct bset_search_iter) {l, r};
}

952
struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
K
Kent Overstreet 已提交
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
			       const struct bkey *search)
{
	struct bset_search_iter i;

	/*
	 * First, we search for a cacheline, then lastly we do a linear search
	 * within that cacheline.
	 *
	 * To search for the cacheline, there's three different possibilities:
	 *  * The set is too small to have a search tree, so we just do a linear
	 *    search over the whole set.
	 *  * The set is the one we're currently inserting into; keeping a full
	 *    auxiliary search tree up to date would be too expensive, so we
	 *    use a much simpler lookup table to do a binary search -
	 *    bset_search_write_set().
	 *  * Or we use the auxiliary search tree we constructed earlier -
	 *    bset_search_tree()
	 */

	if (unlikely(!t->size)) {
		i.l = t->data->start;
K
Kent Overstreet 已提交
974
		i.r = bset_bkey_last(t->data);
975
	} else if (bset_written(b, t)) {
K
Kent Overstreet 已提交
976 977 978 979 980 981 982 983
		/*
		 * Each node in the auxiliary search tree covers a certain range
		 * of bits, and keys above and below the set it covers might
		 * differ outside those bits - so we have to special case the
		 * start and end - handle that here:
		 */

		if (unlikely(bkey_cmp(search, &t->end) >= 0))
K
Kent Overstreet 已提交
984
			return bset_bkey_last(t->data);
K
Kent Overstreet 已提交
985 986 987 988

		if (unlikely(bkey_cmp(search, t->data->start) < 0))
			return t->data->start;

K
Kent Overstreet 已提交
989 990
		i = bset_search_tree(t, search);
	} else {
991
		BUG_ON(!b->nsets &&
K
Kent Overstreet 已提交
992 993 994 995
		       t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));

		i = bset_search_write_set(t, search);
	}
K
Kent Overstreet 已提交
996

997 998
	if (btree_keys_expensive_checks(b)) {
		BUG_ON(bset_written(b, t) &&
K
Kent Overstreet 已提交
999 1000 1001 1002
		       i.l != t->data->start &&
		       bkey_cmp(tree_to_prev_bkey(t,
			  inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
				search) > 0);
K
Kent Overstreet 已提交
1003

K
Kent Overstreet 已提交
1004
		BUG_ON(i.r != bset_bkey_last(t->data) &&
K
Kent Overstreet 已提交
1005 1006
		       bkey_cmp(i.r, search) <= 0);
	}
K
Kent Overstreet 已提交
1007 1008 1009 1010 1011 1012 1013

	while (likely(i.l != i.r) &&
	       bkey_cmp(i.l, search) <= 0)
		i.l = bkey_next(i.l);

	return i.l;
}
K
Kent Overstreet 已提交
1014
EXPORT_SYMBOL(__bch_bset_search);
K
Kent Overstreet 已提交
1015 1016 1017

/* Btree iterator */

1018 1019 1020
typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
				 struct btree_iter_set);

K
Kent Overstreet 已提交
1021 1022 1023
static inline bool btree_iter_cmp(struct btree_iter_set l,
				  struct btree_iter_set r)
{
1024
	return bkey_cmp(l.k, r.k) > 0;
K
Kent Overstreet 已提交
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
}

static inline bool btree_iter_end(struct btree_iter *iter)
{
	return !iter->used;
}

void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
			 struct bkey *end)
{
	if (k != end)
		BUG_ON(!heap_add(iter,
				 ((struct btree_iter_set) { k, end }),
				 btree_iter_cmp));
}

1041
static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
1042 1043 1044
					  struct btree_iter *iter,
					  struct bkey *search,
					  struct bset_tree *start)
K
Kent Overstreet 已提交
1045 1046 1047 1048 1049
{
	struct bkey *ret = NULL;
	iter->size = ARRAY_SIZE(iter->data);
	iter->used = 0;

K
Kent Overstreet 已提交
1050 1051 1052 1053
#ifdef CONFIG_BCACHE_DEBUG
	iter->b = b;
#endif

1054
	for (; start <= bset_tree_last(b); start++) {
K
Kent Overstreet 已提交
1055
		ret = bch_bset_search(b, start, search);
K
Kent Overstreet 已提交
1056
		bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
K
Kent Overstreet 已提交
1057 1058 1059 1060 1061
	}

	return ret;
}

1062
struct bkey *bch_btree_iter_init(struct btree_keys *b,
1063 1064 1065
				 struct btree_iter *iter,
				 struct bkey *search)
{
1066
	return __bch_btree_iter_init(b, iter, search, b->set);
1067
}
K
Kent Overstreet 已提交
1068
EXPORT_SYMBOL(bch_btree_iter_init);
1069 1070 1071

static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
						 btree_iter_cmp_fn *cmp)
K
Kent Overstreet 已提交
1072 1073 1074 1075 1076
{
	struct btree_iter_set unused;
	struct bkey *ret = NULL;

	if (!btree_iter_end(iter)) {
K
Kent Overstreet 已提交
1077 1078
		bch_btree_iter_next_check(iter);

K
Kent Overstreet 已提交
1079 1080 1081 1082
		ret = iter->data->k;
		iter->data->k = bkey_next(iter->data->k);

		if (iter->data->k > iter->data->end) {
1083
			WARN_ONCE(1, "bset was corrupt!\n");
K
Kent Overstreet 已提交
1084 1085 1086 1087
			iter->data->k = iter->data->end;
		}

		if (iter->data->k == iter->data->end)
1088
			heap_pop(iter, unused, cmp);
K
Kent Overstreet 已提交
1089
		else
1090
			heap_sift(iter, 0, cmp);
K
Kent Overstreet 已提交
1091 1092 1093 1094 1095
	}

	return ret;
}

1096 1097 1098 1099 1100
struct bkey *bch_btree_iter_next(struct btree_iter *iter)
{
	return __bch_btree_iter_next(iter, btree_iter_cmp);

}
K
Kent Overstreet 已提交
1101
EXPORT_SYMBOL(bch_btree_iter_next);
1102

K
Kent Overstreet 已提交
1103
struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
K
Kent Overstreet 已提交
1104
					struct btree_keys *b, ptr_filter_fn fn)
K
Kent Overstreet 已提交
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
{
	struct bkey *ret;

	do {
		ret = bch_btree_iter_next(iter);
	} while (ret && fn(b, ret));

	return ret;
}

/* Mergesort */

1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
void bch_bset_sort_state_free(struct bset_sort_state *state)
{
	if (state->pool)
		mempool_destroy(state->pool);
}

int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
{
	spin_lock_init(&state->time.lock);

	state->page_order = page_order;
	state->crit_factor = int_sqrt(1 << page_order);

	state->pool = mempool_create_page_pool(1, page_order);
	if (!state->pool)
		return -ENOMEM;

	return 0;
}
K
Kent Overstreet 已提交
1136
EXPORT_SYMBOL(bch_bset_sort_state_init);
1137

K
Kent Overstreet 已提交
1138
static void btree_mergesort(struct btree_keys *b, struct bset *out,
K
Kent Overstreet 已提交
1139 1140 1141
			    struct btree_iter *iter,
			    bool fixup, bool remove_stale)
{
1142
	int i;
K
Kent Overstreet 已提交
1143
	struct bkey *k, *last = NULL;
K
Kent Overstreet 已提交
1144
	BKEY_PADDED(k) tmp;
K
Kent Overstreet 已提交
1145
	bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
K
Kent Overstreet 已提交
1146 1147 1148
		? bch_ptr_bad
		: bch_ptr_invalid;

1149 1150
	/* Heapify the iterator, using our comparison function */
	for (i = iter->used / 2 - 1; i >= 0; --i)
1151
		heap_sift(iter, i, b->ops->sort_cmp);
1152

K
Kent Overstreet 已提交
1153
	while (!btree_iter_end(iter)) {
1154 1155
		if (b->ops->sort_fixup && fixup)
			k = b->ops->sort_fixup(iter, &tmp.k);
K
Kent Overstreet 已提交
1156 1157 1158 1159
		else
			k = NULL;

		if (!k)
1160
			k = __bch_btree_iter_next(iter, b->ops->sort_cmp);
K
Kent Overstreet 已提交
1161 1162 1163 1164 1165 1166 1167

		if (bad(b, k))
			continue;

		if (!last) {
			last = out->start;
			bkey_copy(last, k);
1168
		} else if (!bch_bkey_try_merge(b, last, k)) {
K
Kent Overstreet 已提交
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
			last = bkey_next(last);
			bkey_copy(last, k);
		}
	}

	out->keys = last ? (uint64_t *) bkey_next(last) - out->d : 0;

	pr_debug("sorted %i keys", out->keys);
}

K
Kent Overstreet 已提交
1179
static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
1180 1181
			 unsigned start, unsigned order, bool fixup,
			 struct bset_sort_state *state)
K
Kent Overstreet 已提交
1182 1183
{
	uint64_t start_time;
1184
	bool used_mempool = false;
K
Kent Overstreet 已提交
1185 1186 1187
	struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
						     order);
	if (!out) {
1188 1189
		struct page *outp;

1190 1191
		BUG_ON(order > state->page_order);

1192 1193
		outp = mempool_alloc(state->pool, GFP_NOIO);
		out = page_address(outp);
1194
		used_mempool = true;
K
Kent Overstreet 已提交
1195
		order = state->page_order;
K
Kent Overstreet 已提交
1196 1197 1198 1199
	}

	start_time = local_clock();

1200
	btree_mergesort(b, out, iter, fixup, false);
K
Kent Overstreet 已提交
1201 1202 1203 1204 1205 1206 1207 1208 1209
	b->nsets = start;

	if (!start && order == b->page_order) {
		/*
		 * Our temporary buffer is the same size as the btree node's
		 * buffer, we can just swap buffers instead of doing a big
		 * memcpy()
		 */

K
Kent Overstreet 已提交
1210 1211 1212 1213
		out->magic	= b->set->data->magic;
		out->seq	= b->set->data->seq;
		out->version	= b->set->data->version;
		swap(out, b->set->data);
K
Kent Overstreet 已提交
1214
	} else {
K
Kent Overstreet 已提交
1215 1216
		b->set[start].data->keys = out->keys;
		memcpy(b->set[start].data->start, out->start,
K
Kent Overstreet 已提交
1217
		       (void *) bset_bkey_last(out) - (void *) out->start);
K
Kent Overstreet 已提交
1218 1219
	}

1220
	if (used_mempool)
1221
		mempool_free(virt_to_page(out), state->pool);
K
Kent Overstreet 已提交
1222 1223 1224
	else
		free_pages((unsigned long) out, order);

K
Kent Overstreet 已提交
1225
	bch_bset_build_written_tree(b);
K
Kent Overstreet 已提交
1226

1227
	if (!start)
1228
		bch_time_stats_update(&state->time, start_time);
K
Kent Overstreet 已提交
1229 1230
}

1231
void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
1232
			    struct bset_sort_state *state)
K
Kent Overstreet 已提交
1233
{
1234
	size_t order = b->page_order, keys = 0;
K
Kent Overstreet 已提交
1235
	struct btree_iter iter;
1236
	int oldsize = bch_count_data(b);
K
Kent Overstreet 已提交
1237

1238
	__bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
K
Kent Overstreet 已提交
1239 1240 1241 1242

	if (start) {
		unsigned i;

1243 1244
		for (i = start; i <= b->nsets; i++)
			keys += b->set[i].data->keys;
K
Kent Overstreet 已提交
1245

1246
		order = get_order(__set_bytes(b->set->data, keys));
K
Kent Overstreet 已提交
1247 1248
	}

1249
	__btree_sort(b, &iter, start, order, false, state);
K
Kent Overstreet 已提交
1250

1251
	EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
K
Kent Overstreet 已提交
1252
}
1253
EXPORT_SYMBOL(bch_btree_sort_partial);
K
Kent Overstreet 已提交
1254

K
Kent Overstreet 已提交
1255 1256
void bch_btree_sort_and_fix_extents(struct btree_keys *b,
				    struct btree_iter *iter,
1257
				    struct bset_sort_state *state)
K
Kent Overstreet 已提交
1258
{
1259
	__btree_sort(b, iter, 0, b->page_order, true, state);
K
Kent Overstreet 已提交
1260 1261
}

1262
void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
1263
			 struct bset_sort_state *state)
K
Kent Overstreet 已提交
1264 1265 1266 1267
{
	uint64_t start_time = local_clock();

	struct btree_iter iter;
1268
	bch_btree_iter_init(b, &iter, NULL);
K
Kent Overstreet 已提交
1269

1270
	btree_mergesort(b, new->set->data, &iter, false, true);
K
Kent Overstreet 已提交
1271

1272
	bch_time_stats_update(&state->time, start_time);
K
Kent Overstreet 已提交
1273

1274
	new->set->size = 0; // XXX: why?
K
Kent Overstreet 已提交
1275 1276
}

K
Kent Overstreet 已提交
1277 1278
#define SORT_CRIT	(4096 / sizeof(uint64_t))

1279
void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
K
Kent Overstreet 已提交
1280
{
K
Kent Overstreet 已提交
1281 1282
	unsigned crit = SORT_CRIT;
	int i;
K
Kent Overstreet 已提交
1283

K
Kent Overstreet 已提交
1284
	/* Don't sort if nothing to do */
1285
	if (!b->nsets)
K
Kent Overstreet 已提交
1286
		goto out;
K
Kent Overstreet 已提交
1287

1288
	for (i = b->nsets - 1; i >= 0; --i) {
1289
		crit *= state->crit_factor;
K
Kent Overstreet 已提交
1290

1291
		if (b->set[i].data->keys < crit) {
1292
			bch_btree_sort_partial(b, i, state);
K
Kent Overstreet 已提交
1293 1294 1295 1296
			return;
		}
	}

K
Kent Overstreet 已提交
1297
	/* Sort if we'd overflow */
1298
	if (b->nsets + 1 == MAX_BSETS) {
1299
		bch_btree_sort(b, state);
K
Kent Overstreet 已提交
1300 1301 1302 1303
		return;
	}

out:
1304
	bch_bset_build_written_tree(b);
K
Kent Overstreet 已提交
1305
}
K
Kent Overstreet 已提交
1306
EXPORT_SYMBOL(bch_btree_sort_lazy);
K
Kent Overstreet 已提交
1307

1308
void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
K
Kent Overstreet 已提交
1309 1310 1311
{
	unsigned i;

1312 1313
	for (i = 0; i <= b->nsets; i++) {
		struct bset_tree *t = &b->set[i];
K
Kent Overstreet 已提交
1314 1315 1316
		size_t bytes = t->data->keys * sizeof(uint64_t);
		size_t j;

1317
		if (bset_written(b, t)) {
K
Kent Overstreet 已提交
1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331
			stats->sets_written++;
			stats->bytes_written += bytes;

			stats->floats += t->size - 1;

			for (j = 1; j < t->size; j++)
				if (t->tree[j].exponent == 127)
					stats->failed++;
		} else {
			stats->sets_unwritten++;
			stats->bytes_unwritten += bytes;
		}
	}
}