free-space-cache.c 67.5 KB
Newer Older
J
Josef Bacik 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2008 Red Hat.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

19
#include <linux/pagemap.h>
J
Josef Bacik 已提交
20
#include <linux/sched.h>
21
#include <linux/slab.h>
22
#include <linux/math64.h>
23
#include <linux/ratelimit.h>
J
Josef Bacik 已提交
24
#include "ctree.h"
25 26
#include "free-space-cache.h"
#include "transaction.h"
27
#include "disk-io.h"
28
#include "extent_io.h"
29
#include "inode-map.h"
30

31 32
#define BITS_PER_BITMAP		(PAGE_CACHE_SIZE * 8)
#define MAX_CACHE_BYTES_PER_GIG	(32 * 1024)
J
Josef Bacik 已提交
33

34
static int link_free_space(struct btrfs_free_space_ctl *ctl,
J
Josef Bacik 已提交
35 36
			   struct btrfs_free_space *info);

37 38 39
static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
					       struct btrfs_path *path,
					       u64 offset)
40 41 42 43 44 45 46 47 48 49
{
	struct btrfs_key key;
	struct btrfs_key location;
	struct btrfs_disk_key disk_key;
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
	struct inode *inode = NULL;
	int ret;

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
50
	key.offset = offset;
51 52 53 54 55 56
	key.type = 0;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		return ERR_PTR(ret);
	if (ret > 0) {
57
		btrfs_release_path(path);
58 59 60 61 62 63 64 65
		return ERR_PTR(-ENOENT);
	}

	leaf = path->nodes[0];
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	btrfs_free_space_key(leaf, header, &disk_key);
	btrfs_disk_key_to_cpu(&location, &disk_key);
66
	btrfs_release_path(path);
67 68 69 70 71 72 73 74 75 76 77

	inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
	if (!inode)
		return ERR_PTR(-ENOENT);
	if (IS_ERR(inode))
		return inode;
	if (is_bad_inode(inode)) {
		iput(inode);
		return ERR_PTR(-ENOENT);
	}

78 79
	inode->i_mapping->flags &= ~__GFP_FS;

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
	return inode;
}

struct inode *lookup_free_space_inode(struct btrfs_root *root,
				      struct btrfs_block_group_cache
				      *block_group, struct btrfs_path *path)
{
	struct inode *inode = NULL;

	spin_lock(&block_group->lock);
	if (block_group->inode)
		inode = igrab(block_group->inode);
	spin_unlock(&block_group->lock);
	if (inode)
		return inode;

	inode = __lookup_free_space_inode(root, path,
					  block_group->key.objectid);
	if (IS_ERR(inode))
		return inode;

101
	spin_lock(&block_group->lock);
102 103 104 105 106 107
	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) {
		printk(KERN_INFO "Old style space inode found, converting.\n");
		BTRFS_I(inode)->flags &= ~BTRFS_INODE_NODATASUM;
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
	}

108
	if (!block_group->iref) {
109 110 111 112 113 114 115 116
		block_group->inode = igrab(inode);
		block_group->iref = 1;
	}
	spin_unlock(&block_group->lock);

	return inode;
}

117 118 119
int __create_free_space_inode(struct btrfs_root *root,
			      struct btrfs_trans_handle *trans,
			      struct btrfs_path *path, u64 ino, u64 offset)
120 121 122 123 124 125 126 127
{
	struct btrfs_key key;
	struct btrfs_disk_key disk_key;
	struct btrfs_free_space_header *header;
	struct btrfs_inode_item *inode_item;
	struct extent_buffer *leaf;
	int ret;

128
	ret = btrfs_insert_empty_inode(trans, root, path, ino);
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
	if (ret)
		return ret;

	leaf = path->nodes[0];
	inode_item = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_inode_item);
	btrfs_item_key(leaf, &disk_key, path->slots[0]);
	memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
			     sizeof(*inode_item));
	btrfs_set_inode_generation(leaf, inode_item, trans->transid);
	btrfs_set_inode_size(leaf, inode_item, 0);
	btrfs_set_inode_nbytes(leaf, inode_item, 0);
	btrfs_set_inode_uid(leaf, inode_item, 0);
	btrfs_set_inode_gid(leaf, inode_item, 0);
	btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
	btrfs_set_inode_flags(leaf, inode_item, BTRFS_INODE_NOCOMPRESS |
145
			      BTRFS_INODE_PREALLOC);
146 147
	btrfs_set_inode_nlink(leaf, inode_item, 1);
	btrfs_set_inode_transid(leaf, inode_item, trans->transid);
148
	btrfs_set_inode_block_group(leaf, inode_item, offset);
149
	btrfs_mark_buffer_dirty(leaf);
150
	btrfs_release_path(path);
151 152

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
153
	key.offset = offset;
154 155 156 157 158
	key.type = 0;

	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(struct btrfs_free_space_header));
	if (ret < 0) {
159
		btrfs_release_path(path);
160 161 162 163 164 165 166 167
		return ret;
	}
	leaf = path->nodes[0];
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
	btrfs_set_free_space_key(leaf, header, &disk_key);
	btrfs_mark_buffer_dirty(leaf);
168
	btrfs_release_path(path);
169 170 171 172

	return 0;
}

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
int create_free_space_inode(struct btrfs_root *root,
			    struct btrfs_trans_handle *trans,
			    struct btrfs_block_group_cache *block_group,
			    struct btrfs_path *path)
{
	int ret;
	u64 ino;

	ret = btrfs_find_free_objectid(root, &ino);
	if (ret < 0)
		return ret;

	return __create_free_space_inode(root, trans, path, ino,
					 block_group->key.objectid);
}

189 190 191 192 193
int btrfs_truncate_free_space_cache(struct btrfs_root *root,
				    struct btrfs_trans_handle *trans,
				    struct btrfs_path *path,
				    struct inode *inode)
{
194
	struct btrfs_block_rsv *rsv;
195 196 197
	loff_t oldsize;
	int ret = 0;

198
	rsv = trans->block_rsv;
199
	trans->block_rsv = root->orphan_block_rsv;
200
	ret = btrfs_block_rsv_check(root, root->orphan_block_rsv, 0, 5, 0);
201 202 203 204 205 206 207 208 209 210 211 212 213
	if (ret)
		return ret;

	oldsize = i_size_read(inode);
	btrfs_i_size_write(inode, 0);
	truncate_pagecache(inode, oldsize, 0);

	/*
	 * We don't need an orphan item because truncating the free space cache
	 * will never be split across transactions.
	 */
	ret = btrfs_truncate_inode_items(trans, root, inode,
					 0, BTRFS_EXTENT_DATA_KEY);
214 215

	trans->block_rsv = rsv;
216 217 218 219 220
	if (ret) {
		WARN_ON(1);
		return ret;
	}

221 222
	ret = btrfs_update_inode(trans, root, inode);
	return ret;
223 224
}

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
static int readahead_cache(struct inode *inode)
{
	struct file_ra_state *ra;
	unsigned long last_index;

	ra = kzalloc(sizeof(*ra), GFP_NOFS);
	if (!ra)
		return -ENOMEM;

	file_ra_state_init(ra, inode->i_mapping);
	last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;

	page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);

	kfree(ra);

	return 0;
}

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
struct io_ctl {
	void *cur, *orig;
	struct page *page;
	struct page **pages;
	struct btrfs_root *root;
	unsigned long size;
	int index;
	int num_pages;
};

static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
		       struct btrfs_root *root)
{
	memset(io_ctl, 0, sizeof(struct io_ctl));
	io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
		PAGE_CACHE_SHIFT;
	io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages,
				GFP_NOFS);
	if (!io_ctl->pages)
		return -ENOMEM;
	io_ctl->root = root;
	return 0;
}

static void io_ctl_free(struct io_ctl *io_ctl)
{
	kfree(io_ctl->pages);
}

static void io_ctl_unmap_page(struct io_ctl *io_ctl)
{
	if (io_ctl->cur) {
		kunmap(io_ctl->page);
		io_ctl->cur = NULL;
		io_ctl->orig = NULL;
	}
}

static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
{
	WARN_ON(io_ctl->cur);
	BUG_ON(io_ctl->index >= io_ctl->num_pages);
	io_ctl->page = io_ctl->pages[io_ctl->index++];
	io_ctl->cur = kmap(io_ctl->page);
	io_ctl->orig = io_ctl->cur;
	io_ctl->size = PAGE_CACHE_SIZE;
	if (clear)
		memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
}

static void io_ctl_drop_pages(struct io_ctl *io_ctl)
{
	int i;

	io_ctl_unmap_page(io_ctl);

	for (i = 0; i < io_ctl->num_pages; i++) {
		ClearPageChecked(io_ctl->pages[i]);
		unlock_page(io_ctl->pages[i]);
		page_cache_release(io_ctl->pages[i]);
	}
}

static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
				int uptodate)
{
	struct page *page;
	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
	int i;

	for (i = 0; i < io_ctl->num_pages; i++) {
		page = find_or_create_page(inode->i_mapping, i, mask);
		if (!page) {
			io_ctl_drop_pages(io_ctl);
			return -ENOMEM;
		}
		io_ctl->pages[i] = page;
		if (uptodate && !PageUptodate(page)) {
			btrfs_readpage(NULL, page);
			lock_page(page);
			if (!PageUptodate(page)) {
				printk(KERN_ERR "btrfs: error reading free "
				       "space cache\n");
				io_ctl_drop_pages(io_ctl);
				return -EIO;
			}
		}
	}

	return 0;
}

static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
{
	u64 *val;

	io_ctl_map_page(io_ctl, 1);

	/*
	 * Skip the first 64bits to make sure theres a bogus crc for old
	 * kernels
	 */
	io_ctl->cur += sizeof(u64);

	val = io_ctl->cur;
	*val = cpu_to_le64(generation);
	io_ctl->cur += sizeof(u64);
	io_ctl->size -= sizeof(u64) * 2;
}

static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
{
	u64 *gen;

	io_ctl_map_page(io_ctl, 0);

	/* Skip the bogus crc area */
	io_ctl->cur += sizeof(u64);
	gen = io_ctl->cur;
	if (le64_to_cpu(*gen) != generation) {
		printk_ratelimited(KERN_ERR "btrfs: space cache generation "
				   "(%Lu) does not match inode (%Lu)\n", *gen,
				   generation);
		io_ctl_unmap_page(io_ctl);
		return -EIO;
	}
	io_ctl->cur += sizeof(u64);
	io_ctl->size -= sizeof(u64) * 2;
	return 0;
}

static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes,
			    void *bitmap)
{
	struct btrfs_free_space_entry *entry;

	if (!io_ctl->cur)
		return -ENOSPC;

	entry = io_ctl->cur;
	entry->offset = cpu_to_le64(offset);
	entry->bytes = cpu_to_le64(bytes);
	entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
		BTRFS_FREE_SPACE_EXTENT;
	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
	io_ctl->size -= sizeof(struct btrfs_free_space_entry);

	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
		return 0;

	/*
	 * index == 1 means the current page is 0, we need to generate a bogus
	 * crc for older kernels.
	 */
	if (io_ctl->index == 1) {
		u32 *tmp;
		u32 crc = ~(u32)0;

		crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + sizeof(u64),
				      crc, PAGE_CACHE_SIZE - sizeof(u64));
		btrfs_csum_final(crc, (char *)&crc);
		crc++;
		tmp = io_ctl->orig;
		*tmp = crc;
	}
	io_ctl_unmap_page(io_ctl);

	/* No more pages to map */
	if (io_ctl->index >= io_ctl->num_pages)
		return 0;

	/* map the next page */
	io_ctl_map_page(io_ctl, 1);
	return 0;
}

static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
{
	if (!io_ctl->cur)
		return -ENOSPC;

	/*
	 * If we aren't at the start of the current page, unmap this one and
	 * map the next one if there is any left.
	 */
	if (io_ctl->cur != io_ctl->orig) {
		io_ctl_unmap_page(io_ctl);
		if (io_ctl->index >= io_ctl->num_pages)
			return -ENOSPC;
		io_ctl_map_page(io_ctl, 0);
	}

	memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
	io_ctl_unmap_page(io_ctl);
	if (io_ctl->index < io_ctl->num_pages)
		io_ctl_map_page(io_ctl, 0);
	return 0;
}

static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl)
{
	io_ctl_unmap_page(io_ctl);

	while (io_ctl->index < io_ctl->num_pages) {
		io_ctl_map_page(io_ctl, 1);
		io_ctl_unmap_page(io_ctl);
	}
}

static u8 io_ctl_read_entry(struct io_ctl *io_ctl,
			    struct btrfs_free_space *entry)
{
	struct btrfs_free_space_entry *e;
	u8 type;

	e = io_ctl->cur;
	entry->offset = le64_to_cpu(e->offset);
	entry->bytes = le64_to_cpu(e->bytes);
	type = e->type;
	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
	io_ctl->size -= sizeof(struct btrfs_free_space_entry);

	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
		return type;

	io_ctl_unmap_page(io_ctl);

	if (io_ctl->index >= io_ctl->num_pages)
		return type;

	io_ctl_map_page(io_ctl, 0);
	return type;
}

static void io_ctl_read_bitmap(struct io_ctl *io_ctl,
			       struct btrfs_free_space *entry)
{
	BUG_ON(!io_ctl->cur);
	if (io_ctl->cur != io_ctl->orig) {
		io_ctl_unmap_page(io_ctl);
		io_ctl_map_page(io_ctl, 0);
	}
	memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
	io_ctl_unmap_page(io_ctl);
	if (io_ctl->index < io_ctl->num_pages)
		io_ctl_map_page(io_ctl, 0);
}

492 493 494
int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
			    struct btrfs_free_space_ctl *ctl,
			    struct btrfs_path *path, u64 offset)
495 496 497
{
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
498
	struct io_ctl io_ctl;
499
	struct btrfs_key key;
500
	struct btrfs_free_space *e, *n;
501 502 503 504
	struct list_head bitmaps;
	u64 num_entries;
	u64 num_bitmaps;
	u64 generation;
505
	u8 type;
506
	int ret = 0;
507 508 509 510

	INIT_LIST_HEAD(&bitmaps);

	/* Nothing in the space cache, goodbye */
511
	if (!i_size_read(inode))
512
		return 0;
513 514

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
515
	key.offset = offset;
516 517 518
	key.type = 0;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
519
	if (ret < 0)
520
		return 0;
521
	else if (ret > 0) {
522
		btrfs_release_path(path);
523
		return 0;
524 525
	}

526 527
	ret = -1;

528 529 530 531 532 533
	leaf = path->nodes[0];
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	num_entries = btrfs_free_space_entries(leaf, header);
	num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
	generation = btrfs_free_space_generation(leaf, header);
534
	btrfs_release_path(path);
535 536 537

	if (BTRFS_I(inode)->generation != generation) {
		printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
538
		       " not match free space cache generation (%llu)\n",
539
		       (unsigned long long)BTRFS_I(inode)->generation,
540
		       (unsigned long long)generation);
541
		return 0;
542 543 544
	}

	if (!num_entries)
545
		return 0;
546

547
	io_ctl_init(&io_ctl, inode, root);
548
	ret = readahead_cache(inode);
549
	if (ret)
550 551
		goto out;

552 553 554
	ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
	if (ret)
		goto out;
555

556 557 558
	ret = io_ctl_check_generation(&io_ctl, generation);
	if (ret)
		goto free_cache;
559

560 561 562 563
	while (num_entries) {
		e = kmem_cache_zalloc(btrfs_free_space_cachep,
				      GFP_NOFS);
		if (!e)
564 565
			goto free_cache;

566 567 568 569
		type = io_ctl_read_entry(&io_ctl, e);
		if (!e->bytes) {
			kmem_cache_free(btrfs_free_space_cachep, e);
			goto free_cache;
570
		}
571 572 573 574 575 576 577 578 579

		if (type == BTRFS_FREE_SPACE_EXTENT) {
			spin_lock(&ctl->tree_lock);
			ret = link_free_space(ctl, e);
			spin_unlock(&ctl->tree_lock);
			if (ret) {
				printk(KERN_ERR "Duplicate entries in "
				       "free space cache, dumping\n");
				kmem_cache_free(btrfs_free_space_cachep, e);
580 581
				goto free_cache;
			}
582 583 584 585 586 587 588
		} else {
			BUG_ON(!num_bitmaps);
			num_bitmaps--;
			e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
			if (!e->bitmap) {
				kmem_cache_free(
					btrfs_free_space_cachep, e);
589 590
				goto free_cache;
			}
591 592 593 594 595 596 597 598
			spin_lock(&ctl->tree_lock);
			ret = link_free_space(ctl, e);
			ctl->total_bitmaps++;
			ctl->op->recalc_thresholds(ctl);
			spin_unlock(&ctl->tree_lock);
			if (ret) {
				printk(KERN_ERR "Duplicate entries in "
				       "free space cache, dumping\n");
599
				kmem_cache_free(btrfs_free_space_cachep, e);
600 601
				goto free_cache;
			}
602
			list_add_tail(&e->list, &bitmaps);
603 604
		}

605 606
		num_entries--;
	}
607

608 609 610 611 612
	/*
	 * We add the bitmaps at the end of the entries in order that
	 * the bitmap entries are added to the cache.
	 */
	list_for_each_entry_safe(e, n, &bitmaps, list) {
613
		list_del_init(&e->list);
614
		io_ctl_read_bitmap(&io_ctl, e);
615 616
	}

617
	io_ctl_drop_pages(&io_ctl);
618 619
	ret = 1;
out:
620
	io_ctl_free(&io_ctl);
621 622
	return ret;
free_cache:
623
	io_ctl_drop_pages(&io_ctl);
624
	__btrfs_remove_free_space_cache(ctl);
625 626 627
	goto out;
}

628 629
int load_free_space_cache(struct btrfs_fs_info *fs_info,
			  struct btrfs_block_group_cache *block_group)
J
Josef Bacik 已提交
630
{
631
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
632 633 634 635 636 637 638 639 640 641 642
	struct btrfs_root *root = fs_info->tree_root;
	struct inode *inode;
	struct btrfs_path *path;
	int ret;
	bool matched;
	u64 used = btrfs_block_group_used(&block_group->item);

	/*
	 * If we're unmounting then just return, since this does a search on the
	 * normal root and not the commit root and we could deadlock.
	 */
643
	if (btrfs_fs_closing(fs_info))
644 645 646 647 648 649
		return 0;

	/*
	 * If this block group has been marked to be cleared for one reason or
	 * another then we can't trust the on disk cache, so just return.
	 */
650
	spin_lock(&block_group->lock);
651 652 653 654
	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
		spin_unlock(&block_group->lock);
		return 0;
	}
655
	spin_unlock(&block_group->lock);
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689

	path = btrfs_alloc_path();
	if (!path)
		return 0;

	inode = lookup_free_space_inode(root, block_group, path);
	if (IS_ERR(inode)) {
		btrfs_free_path(path);
		return 0;
	}

	ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
				      path, block_group->key.objectid);
	btrfs_free_path(path);
	if (ret <= 0)
		goto out;

	spin_lock(&ctl->tree_lock);
	matched = (ctl->free_space == (block_group->key.offset - used -
				       block_group->bytes_super));
	spin_unlock(&ctl->tree_lock);

	if (!matched) {
		__btrfs_remove_free_space_cache(ctl);
		printk(KERN_ERR "block group %llu has an wrong amount of free "
		       "space\n", block_group->key.objectid);
		ret = -1;
	}
out:
	if (ret < 0) {
		/* This cache is bogus, make sure it gets cleared */
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
		spin_unlock(&block_group->lock);
690
		ret = 0;
691 692 693 694 695 696 697

		printk(KERN_ERR "btrfs: failed to load free space cache "
		       "for block group %llu\n", block_group->key.objectid);
	}

	iput(inode);
	return ret;
698 699
}

700 701 702 703 704 705 706 707 708 709 710 711 712
/**
 * __btrfs_write_out_cache - write out cached info to an inode
 * @root - the root the inode belongs to
 * @ctl - the free space cache we are going to write out
 * @block_group - the block_group for this cache if it belongs to a block_group
 * @trans - the trans handle
 * @path - the path to use
 * @offset - the offset for the key we'll insert
 *
 * This function writes out a free space cache struct to disk for quick recovery
 * on mount.  This will return 0 if it was successfull in writing the cache out,
 * and -1 if it was not.
 */
713 714 715 716 717
int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
			    struct btrfs_free_space_ctl *ctl,
			    struct btrfs_block_group_cache *block_group,
			    struct btrfs_trans_handle *trans,
			    struct btrfs_path *path, u64 offset)
J
Josef Bacik 已提交
718 719 720 721 722 723
{
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
	struct rb_node *node;
	struct list_head *pos, *n;
	struct extent_state *cached_state = NULL;
724 725
	struct btrfs_free_cluster *cluster = NULL;
	struct extent_io_tree *unpin = NULL;
726
	struct io_ctl io_ctl;
J
Josef Bacik 已提交
727 728
	struct list_head bitmap_list;
	struct btrfs_key key;
729
	u64 start, end, len;
J
Josef Bacik 已提交
730 731
	int entries = 0;
	int bitmaps = 0;
732 733
	int ret;
	int err = -1;
J
Josef Bacik 已提交
734 735 736

	INIT_LIST_HEAD(&bitmap_list);

737 738
	if (!i_size_read(inode))
		return -1;
739

J
Josef Bacik 已提交
740 741 742 743
	filemap_write_and_wait(inode->i_mapping);
	btrfs_wait_ordered_range(inode, inode->i_size &
				 ~(root->sectorsize - 1), (u64)-1);

744
	io_ctl_init(&io_ctl, inode, root);
745

746
	/* Get the cluster for this block_group if it exists */
747
	if (block_group && !list_empty(&block_group->cluster_list))
748 749 750 751 752 753 754 755 756 757
		cluster = list_entry(block_group->cluster_list.next,
				     struct btrfs_free_cluster,
				     block_group_list);

	/*
	 * We shouldn't have switched the pinned extents yet so this is the
	 * right one
	 */
	unpin = root->fs_info->pinned_extents;

758 759
	/* Lock all pages first so we can lock the extent safely. */
	io_ctl_prepare_pages(&io_ctl, inode, 0);
J
Josef Bacik 已提交
760 761 762 763

	lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
			 0, &cached_state, GFP_NOFS);

764 765 766 767
	/*
	 * When searching for pinned extents, we need to start at our start
	 * offset.
	 */
768 769
	if (block_group)
		start = block_group->key.objectid;
770

771 772 773 774 775 776
	node = rb_first(&ctl->free_space_offset);
	if (!node && cluster) {
		node = rb_first(&cluster->root);
		cluster = NULL;
	}

777
	io_ctl_set_generation(&io_ctl, trans->transid);
778

779 780 781
	/* Write out the extent entries */
	while (node) {
		struct btrfs_free_space *e;
J
Josef Bacik 已提交
782

783 784
		e = rb_entry(node, struct btrfs_free_space, offset_index);
		entries++;
J
Josef Bacik 已提交
785

786 787 788 789
		ret = io_ctl_add_entry(&io_ctl, e->offset, e->bytes,
				       e->bitmap);
		if (ret)
			goto out_nospc;
790

791 792 793
		if (e->bitmap) {
			list_add_tail(&e->list, &bitmap_list);
			bitmaps++;
794
		}
795 796 797 798
		node = rb_next(node);
		if (!node && cluster) {
			node = rb_first(&cluster->root);
			cluster = NULL;
799
		}
800
	}
801

802 803 804 805 806 807 808 809 810 811 812
	/*
	 * We want to add any pinned extents to our free space cache
	 * so we don't leak the space
	 */
	while (block_group && (start < block_group->key.objectid +
			       block_group->key.offset)) {
		ret = find_first_extent_bit(unpin, start, &start, &end,
					    EXTENT_DIRTY);
		if (ret) {
			ret = 0;
			break;
J
Josef Bacik 已提交
813 814
		}

815 816 817 818
		/* This pinned extent is out of our range */
		if (start >= block_group->key.objectid +
		    block_group->key.offset)
			break;
819

820 821 822
		len = block_group->key.objectid +
			block_group->key.offset - start;
		len = min(len, end + 1 - start);
J
Josef Bacik 已提交
823

824 825 826 827
		entries++;
		ret = io_ctl_add_entry(&io_ctl, start, len, NULL);
		if (ret)
			goto out_nospc;
J
Josef Bacik 已提交
828

829 830
		start = end + 1;
	}
J
Josef Bacik 已提交
831 832 833 834 835 836

	/* Write out the bitmaps */
	list_for_each_safe(pos, n, &bitmap_list) {
		struct btrfs_free_space *entry =
			list_entry(pos, struct btrfs_free_space, list);

837 838 839
		ret = io_ctl_add_bitmap(&io_ctl, entry->bitmap);
		if (ret)
			goto out_nospc;
J
Josef Bacik 已提交
840
		list_del_init(&entry->list);
841 842
	}

J
Josef Bacik 已提交
843
	/* Zero out the rest of the pages just to make sure */
844
	io_ctl_zero_remaining_pages(&io_ctl);
J
Josef Bacik 已提交
845

846 847 848
	ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
				0, i_size_read(inode), &cached_state);
	io_ctl_drop_pages(&io_ctl);
J
Josef Bacik 已提交
849 850 851
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
			     i_size_read(inode) - 1, &cached_state, GFP_NOFS);

852
	if (ret)
853
		goto out;
854 855 856

	BTRFS_I(inode)->generation = trans->transid;

J
Josef Bacik 已提交
857 858 859
	filemap_write_and_wait(inode->i_mapping);

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
860
	key.offset = offset;
J
Josef Bacik 已提交
861 862
	key.type = 0;

863
	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
J
Josef Bacik 已提交
864
	if (ret < 0) {
865
		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
J
Josef Bacik 已提交
866 867
				 EXTENT_DIRTY | EXTENT_DELALLOC |
				 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
868
		goto out;
J
Josef Bacik 已提交
869 870 871 872 873 874 875 876
	}
	leaf = path->nodes[0];
	if (ret > 0) {
		struct btrfs_key found_key;
		BUG_ON(!path->slots[0]);
		path->slots[0]--;
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
877
		    found_key.offset != offset) {
878 879
			clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
					 inode->i_size - 1,
J
Josef Bacik 已提交
880 881 882
					 EXTENT_DIRTY | EXTENT_DELALLOC |
					 EXTENT_DO_ACCOUNTING, 0, 0, NULL,
					 GFP_NOFS);
883
			btrfs_release_path(path);
884
			goto out;
J
Josef Bacik 已提交
885 886 887 888 889 890 891 892
		}
	}
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	btrfs_set_free_space_entries(leaf, header, entries);
	btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
	btrfs_set_free_space_generation(leaf, header, trans->transid);
	btrfs_mark_buffer_dirty(leaf);
893
	btrfs_release_path(path);
J
Josef Bacik 已提交
894

895
	err = 0;
896
out:
897
	io_ctl_free(&io_ctl);
898
	if (err) {
899
		invalidate_inode_pages2(inode->i_mapping);
J
Josef Bacik 已提交
900 901 902
		BTRFS_I(inode)->generation = 0;
	}
	btrfs_update_inode(trans, root, inode);
903
	return err;
904 905 906 907 908 909 910 911 912 913 914

out_nospc:
	list_for_each_safe(pos, n, &bitmap_list) {
		struct btrfs_free_space *entry =
			list_entry(pos, struct btrfs_free_space, list);
		list_del_init(&entry->list);
	}
	io_ctl_drop_pages(&io_ctl);
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
			     i_size_read(inode) - 1, &cached_state, GFP_NOFS);
	goto out;
915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
}

int btrfs_write_out_cache(struct btrfs_root *root,
			  struct btrfs_trans_handle *trans,
			  struct btrfs_block_group_cache *block_group,
			  struct btrfs_path *path)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct inode *inode;
	int ret = 0;

	root = root->fs_info->tree_root;

	spin_lock(&block_group->lock);
	if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
		spin_unlock(&block_group->lock);
		return 0;
	}
	spin_unlock(&block_group->lock);

	inode = lookup_free_space_inode(root, block_group, path);
	if (IS_ERR(inode))
		return 0;

	ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
				      path, block_group->key.objectid);
941 942
	if (ret) {
		btrfs_delalloc_release_metadata(inode, inode->i_size);
943 944 945
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_ERROR;
		spin_unlock(&block_group->lock);
946
		ret = 0;
947
#ifdef DEBUG
948 949
		printk(KERN_ERR "btrfs: failed to write free space cace "
		       "for block group %llu\n", block_group->key.objectid);
950
#endif
951 952
	}

J
Josef Bacik 已提交
953 954 955 956
	iput(inode);
	return ret;
}

957
static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
958
					  u64 offset)
J
Josef Bacik 已提交
959
{
960 961
	BUG_ON(offset < bitmap_start);
	offset -= bitmap_start;
962
	return (unsigned long)(div_u64(offset, unit));
963
}
J
Josef Bacik 已提交
964

965
static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
966
{
967
	return (unsigned long)(div_u64(bytes, unit));
968
}
J
Josef Bacik 已提交
969

970
static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
971 972 973 974
				   u64 offset)
{
	u64 bitmap_start;
	u64 bytes_per_bitmap;
J
Josef Bacik 已提交
975

976 977
	bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
	bitmap_start = offset - ctl->start;
978 979
	bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
	bitmap_start *= bytes_per_bitmap;
980
	bitmap_start += ctl->start;
J
Josef Bacik 已提交
981

982
	return bitmap_start;
J
Josef Bacik 已提交
983 984
}

985 986
static int tree_insert_offset(struct rb_root *root, u64 offset,
			      struct rb_node *node, int bitmap)
J
Josef Bacik 已提交
987 988 989 990 991 992 993
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
	struct btrfs_free_space *info;

	while (*p) {
		parent = *p;
994
		info = rb_entry(parent, struct btrfs_free_space, offset_index);
J
Josef Bacik 已提交
995

996
		if (offset < info->offset) {
J
Josef Bacik 已提交
997
			p = &(*p)->rb_left;
998
		} else if (offset > info->offset) {
J
Josef Bacik 已提交
999
			p = &(*p)->rb_right;
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
		} else {
			/*
			 * we could have a bitmap entry and an extent entry
			 * share the same offset.  If this is the case, we want
			 * the extent entry to always be found first if we do a
			 * linear search through the tree, since we want to have
			 * the quickest allocation time, and allocating from an
			 * extent is faster than allocating from a bitmap.  So
			 * if we're inserting a bitmap and we find an entry at
			 * this offset, we want to go right, or after this entry
			 * logically.  If we are inserting an extent and we've
			 * found a bitmap, we want to go left, or before
			 * logically.
			 */
			if (bitmap) {
1015 1016 1017 1018
				if (info->bitmap) {
					WARN_ON_ONCE(1);
					return -EEXIST;
				}
1019 1020
				p = &(*p)->rb_right;
			} else {
1021 1022 1023 1024
				if (!info->bitmap) {
					WARN_ON_ONCE(1);
					return -EEXIST;
				}
1025 1026 1027
				p = &(*p)->rb_left;
			}
		}
J
Josef Bacik 已提交
1028 1029 1030 1031 1032 1033 1034 1035 1036
	}

	rb_link_node(node, parent, p);
	rb_insert_color(node, root);

	return 0;
}

/*
J
Josef Bacik 已提交
1037 1038
 * searches the tree for the given offset.
 *
1039 1040 1041
 * fuzzy - If this is set, then we are trying to make an allocation, and we just
 * want a section that has at least bytes size and comes at or after the given
 * offset.
J
Josef Bacik 已提交
1042
 */
1043
static struct btrfs_free_space *
1044
tree_search_offset(struct btrfs_free_space_ctl *ctl,
1045
		   u64 offset, int bitmap_only, int fuzzy)
J
Josef Bacik 已提交
1046
{
1047
	struct rb_node *n = ctl->free_space_offset.rb_node;
1048 1049 1050 1051 1052 1053 1054 1055
	struct btrfs_free_space *entry, *prev = NULL;

	/* find entry that is closest to the 'offset' */
	while (1) {
		if (!n) {
			entry = NULL;
			break;
		}
J
Josef Bacik 已提交
1056 1057

		entry = rb_entry(n, struct btrfs_free_space, offset_index);
1058
		prev = entry;
J
Josef Bacik 已提交
1059

1060
		if (offset < entry->offset)
J
Josef Bacik 已提交
1061
			n = n->rb_left;
1062
		else if (offset > entry->offset)
J
Josef Bacik 已提交
1063
			n = n->rb_right;
1064
		else
J
Josef Bacik 已提交
1065 1066 1067
			break;
	}

1068 1069 1070 1071 1072
	if (bitmap_only) {
		if (!entry)
			return NULL;
		if (entry->bitmap)
			return entry;
J
Josef Bacik 已提交
1073

1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
		/*
		 * bitmap entry and extent entry may share same offset,
		 * in that case, bitmap entry comes after extent entry.
		 */
		n = rb_next(n);
		if (!n)
			return NULL;
		entry = rb_entry(n, struct btrfs_free_space, offset_index);
		if (entry->offset != offset)
			return NULL;
J
Josef Bacik 已提交
1084

1085 1086 1087 1088
		WARN_ON(!entry->bitmap);
		return entry;
	} else if (entry) {
		if (entry->bitmap) {
J
Josef Bacik 已提交
1089
			/*
1090 1091
			 * if previous extent entry covers the offset,
			 * we should return it instead of the bitmap entry
J
Josef Bacik 已提交
1092
			 */
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
			n = &entry->offset_index;
			while (1) {
				n = rb_prev(n);
				if (!n)
					break;
				prev = rb_entry(n, struct btrfs_free_space,
						offset_index);
				if (!prev->bitmap) {
					if (prev->offset + prev->bytes > offset)
						entry = prev;
					break;
				}
J
Josef Bacik 已提交
1105
			}
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
		}
		return entry;
	}

	if (!prev)
		return NULL;

	/* find last entry before the 'offset' */
	entry = prev;
	if (entry->offset > offset) {
		n = rb_prev(&entry->offset_index);
		if (n) {
			entry = rb_entry(n, struct btrfs_free_space,
					offset_index);
			BUG_ON(entry->offset > offset);
J
Josef Bacik 已提交
1121
		} else {
1122 1123 1124 1125
			if (fuzzy)
				return entry;
			else
				return NULL;
J
Josef Bacik 已提交
1126 1127 1128
		}
	}

1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
	if (entry->bitmap) {
		n = &entry->offset_index;
		while (1) {
			n = rb_prev(n);
			if (!n)
				break;
			prev = rb_entry(n, struct btrfs_free_space,
					offset_index);
			if (!prev->bitmap) {
				if (prev->offset + prev->bytes > offset)
					return prev;
				break;
			}
		}
1143
		if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
			return entry;
	} else if (entry->offset + entry->bytes > offset)
		return entry;

	if (!fuzzy)
		return NULL;

	while (1) {
		if (entry->bitmap) {
			if (entry->offset + BITS_PER_BITMAP *
1154
			    ctl->unit > offset)
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
				break;
		} else {
			if (entry->offset + entry->bytes > offset)
				break;
		}

		n = rb_next(&entry->offset_index);
		if (!n)
			return NULL;
		entry = rb_entry(n, struct btrfs_free_space, offset_index);
	}
	return entry;
J
Josef Bacik 已提交
1167 1168
}

1169
static inline void
1170
__unlink_free_space(struct btrfs_free_space_ctl *ctl,
1171
		    struct btrfs_free_space *info)
J
Josef Bacik 已提交
1172
{
1173 1174
	rb_erase(&info->offset_index, &ctl->free_space_offset);
	ctl->free_extents--;
1175 1176
}

1177
static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1178 1179
			      struct btrfs_free_space *info)
{
1180 1181
	__unlink_free_space(ctl, info);
	ctl->free_space -= info->bytes;
J
Josef Bacik 已提交
1182 1183
}

1184
static int link_free_space(struct btrfs_free_space_ctl *ctl,
J
Josef Bacik 已提交
1185 1186 1187 1188
			   struct btrfs_free_space *info)
{
	int ret = 0;

1189
	BUG_ON(!info->bitmap && !info->bytes);
1190
	ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1191
				 &info->offset_index, (info->bitmap != NULL));
J
Josef Bacik 已提交
1192 1193 1194
	if (ret)
		return ret;

1195 1196
	ctl->free_space += info->bytes;
	ctl->free_extents++;
1197 1198 1199
	return ret;
}

1200
static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1201
{
1202
	struct btrfs_block_group_cache *block_group = ctl->private;
1203 1204 1205
	u64 max_bytes;
	u64 bitmap_bytes;
	u64 extent_bytes;
1206
	u64 size = block_group->key.offset;
1207 1208 1209 1210
	u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
	int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);

	BUG_ON(ctl->total_bitmaps > max_bitmaps);
1211 1212 1213 1214 1215 1216

	/*
	 * The goal is to keep the total amount of memory used per 1gb of space
	 * at or below 32k, so we need to adjust how much memory we allow to be
	 * used by extent based free space tracking
	 */
1217 1218 1219 1220 1221
	if (size < 1024 * 1024 * 1024)
		max_bytes = MAX_CACHE_BYTES_PER_GIG;
	else
		max_bytes = MAX_CACHE_BYTES_PER_GIG *
			div64_u64(size, 1024 * 1024 * 1024);
1222

1223 1224 1225 1226 1227
	/*
	 * we want to account for 1 more bitmap than what we have so we can make
	 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
	 * we add more bitmaps.
	 */
1228
	bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
1229

1230
	if (bitmap_bytes >= max_bytes) {
1231
		ctl->extents_thresh = 0;
1232 1233
		return;
	}
1234

1235 1236 1237 1238 1239 1240
	/*
	 * we want the extent entry threshold to always be at most 1/2 the maxw
	 * bytes we can have, or whatever is less than that.
	 */
	extent_bytes = max_bytes - bitmap_bytes;
	extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
1241

1242
	ctl->extents_thresh =
1243
		div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
1244 1245
}

1246 1247 1248
static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
				       struct btrfs_free_space *info,
				       u64 offset, u64 bytes)
1249
{
L
Li Zefan 已提交
1250
	unsigned long start, count;
1251

1252 1253
	start = offset_to_bit(info->offset, ctl->unit, offset);
	count = bytes_to_bits(bytes, ctl->unit);
L
Li Zefan 已提交
1254
	BUG_ON(start + count > BITS_PER_BITMAP);
1255

L
Li Zefan 已提交
1256
	bitmap_clear(info->bitmap, start, count);
1257 1258

	info->bytes -= bytes;
1259 1260 1261 1262 1263 1264 1265
}

static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info, u64 offset,
			      u64 bytes)
{
	__bitmap_clear_bits(ctl, info, offset, bytes);
1266
	ctl->free_space -= bytes;
1267 1268
}

1269
static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
J
Josef Bacik 已提交
1270 1271
			    struct btrfs_free_space *info, u64 offset,
			    u64 bytes)
1272
{
L
Li Zefan 已提交
1273
	unsigned long start, count;
1274

1275 1276
	start = offset_to_bit(info->offset, ctl->unit, offset);
	count = bytes_to_bits(bytes, ctl->unit);
L
Li Zefan 已提交
1277
	BUG_ON(start + count > BITS_PER_BITMAP);
1278

L
Li Zefan 已提交
1279
	bitmap_set(info->bitmap, start, count);
1280 1281

	info->bytes += bytes;
1282
	ctl->free_space += bytes;
1283 1284
}

1285
static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1286 1287 1288 1289 1290 1291 1292
			 struct btrfs_free_space *bitmap_info, u64 *offset,
			 u64 *bytes)
{
	unsigned long found_bits = 0;
	unsigned long bits, i;
	unsigned long next_zero;

1293
	i = offset_to_bit(bitmap_info->offset, ctl->unit,
1294
			  max_t(u64, *offset, bitmap_info->offset));
1295
	bits = bytes_to_bits(*bytes, ctl->unit);
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309

	for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
	     i < BITS_PER_BITMAP;
	     i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
		next_zero = find_next_zero_bit(bitmap_info->bitmap,
					       BITS_PER_BITMAP, i);
		if ((next_zero - i) >= bits) {
			found_bits = next_zero - i;
			break;
		}
		i = next_zero;
	}

	if (found_bits) {
1310 1311
		*offset = (u64)(i * ctl->unit) + bitmap_info->offset;
		*bytes = (u64)(found_bits) * ctl->unit;
1312 1313 1314 1315 1316 1317
		return 0;
	}

	return -1;
}

1318 1319
static struct btrfs_free_space *
find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
1320 1321 1322 1323 1324
{
	struct btrfs_free_space *entry;
	struct rb_node *node;
	int ret;

1325
	if (!ctl->free_space_offset.rb_node)
1326 1327
		return NULL;

1328
	entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1329 1330 1331 1332 1333 1334 1335 1336 1337
	if (!entry)
		return NULL;

	for (node = &entry->offset_index; node; node = rb_next(node)) {
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		if (entry->bytes < *bytes)
			continue;

		if (entry->bitmap) {
1338
			ret = search_bitmap(ctl, entry, offset, bytes);
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
			if (!ret)
				return entry;
			continue;
		}

		*offset = entry->offset;
		*bytes = entry->bytes;
		return entry;
	}

	return NULL;
}

1352
static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1353 1354
			   struct btrfs_free_space *info, u64 offset)
{
1355
	info->offset = offset_to_bitmap(ctl, offset);
J
Josef Bacik 已提交
1356
	info->bytes = 0;
1357 1358
	link_free_space(ctl, info);
	ctl->total_bitmaps++;
1359

1360
	ctl->op->recalc_thresholds(ctl);
1361 1362
}

1363
static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1364 1365
			struct btrfs_free_space *bitmap_info)
{
1366
	unlink_free_space(ctl, bitmap_info);
1367
	kfree(bitmap_info->bitmap);
1368
	kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1369 1370
	ctl->total_bitmaps--;
	ctl->op->recalc_thresholds(ctl);
1371 1372
}

1373
static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1374 1375 1376 1377
			      struct btrfs_free_space *bitmap_info,
			      u64 *offset, u64 *bytes)
{
	u64 end;
1378 1379
	u64 search_start, search_bytes;
	int ret;
1380 1381

again:
1382
	end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1383

1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
	/*
	 * XXX - this can go away after a few releases.
	 *
	 * since the only user of btrfs_remove_free_space is the tree logging
	 * stuff, and the only way to test that is under crash conditions, we
	 * want to have this debug stuff here just in case somethings not
	 * working.  Search the bitmap for the space we are trying to use to
	 * make sure its actually there.  If its not there then we need to stop
	 * because something has gone wrong.
	 */
	search_start = *offset;
	search_bytes = *bytes;
1396
	search_bytes = min(search_bytes, end - search_start + 1);
1397
	ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
1398 1399
	BUG_ON(ret < 0 || search_start != *offset);

1400
	if (*offset > bitmap_info->offset && *offset + *bytes > end) {
1401
		bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
1402 1403 1404
		*bytes -= end - *offset + 1;
		*offset = end + 1;
	} else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
1405
		bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
1406 1407 1408 1409
		*bytes = 0;
	}

	if (*bytes) {
1410
		struct rb_node *next = rb_next(&bitmap_info->offset_index);
1411
		if (!bitmap_info->bytes)
1412
			free_bitmap(ctl, bitmap_info);
1413

1414 1415 1416 1417 1418
		/*
		 * no entry after this bitmap, but we still have bytes to
		 * remove, so something has gone wrong.
		 */
		if (!next)
1419 1420
			return -EINVAL;

1421 1422 1423 1424 1425 1426 1427
		bitmap_info = rb_entry(next, struct btrfs_free_space,
				       offset_index);

		/*
		 * if the next entry isn't a bitmap we need to return to let the
		 * extent stuff do its work.
		 */
1428 1429 1430
		if (!bitmap_info->bitmap)
			return -EAGAIN;

1431 1432 1433 1434 1435 1436 1437 1438
		/*
		 * Ok the next item is a bitmap, but it may not actually hold
		 * the information for the rest of this free space stuff, so
		 * look for it, and if we don't find it return so we can try
		 * everything over again.
		 */
		search_start = *offset;
		search_bytes = *bytes;
1439
		ret = search_bitmap(ctl, bitmap_info, &search_start,
1440 1441 1442 1443
				    &search_bytes);
		if (ret < 0 || search_start != *offset)
			return -EAGAIN;

1444
		goto again;
1445
	} else if (!bitmap_info->bytes)
1446
		free_bitmap(ctl, bitmap_info);
1447 1448 1449 1450

	return 0;
}

J
Josef Bacik 已提交
1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
			       struct btrfs_free_space *info, u64 offset,
			       u64 bytes)
{
	u64 bytes_to_set = 0;
	u64 end;

	end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);

	bytes_to_set = min(end - offset, bytes);

	bitmap_set_bits(ctl, info, offset, bytes_to_set);

	return bytes_to_set;

}

1468 1469
static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
		      struct btrfs_free_space *info)
1470
{
1471
	struct btrfs_block_group_cache *block_group = ctl->private;
1472 1473 1474 1475 1476

	/*
	 * If we are below the extents threshold then we can add this as an
	 * extent, and don't have to deal with the bitmap
	 */
1477
	if (ctl->free_extents < ctl->extents_thresh) {
1478 1479 1480 1481 1482 1483 1484 1485
		/*
		 * If this block group has some small extents we don't want to
		 * use up all of our free slots in the cache with them, we want
		 * to reserve them to larger extents, however if we have plent
		 * of cache left then go ahead an dadd them, no sense in adding
		 * the overhead of a bitmap if we don't have to.
		 */
		if (info->bytes <= block_group->sectorsize * 4) {
1486 1487
			if (ctl->free_extents * 2 <= ctl->extents_thresh)
				return false;
1488
		} else {
1489
			return false;
1490 1491
		}
	}
1492 1493 1494 1495 1496 1497 1498

	/*
	 * some block groups are so tiny they can't be enveloped by a bitmap, so
	 * don't even bother to create a bitmap for this
	 */
	if (BITS_PER_BITMAP * block_group->sectorsize >
	    block_group->key.offset)
1499 1500 1501 1502 1503
		return false;

	return true;
}

J
Josef Bacik 已提交
1504 1505 1506 1507 1508
static struct btrfs_free_space_op free_space_op = {
	.recalc_thresholds	= recalculate_thresholds,
	.use_bitmap		= use_bitmap,
};

1509 1510 1511 1512
static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info)
{
	struct btrfs_free_space *bitmap_info;
J
Josef Bacik 已提交
1513
	struct btrfs_block_group_cache *block_group = NULL;
1514
	int added = 0;
J
Josef Bacik 已提交
1515
	u64 bytes, offset, bytes_added;
1516
	int ret;
1517 1518 1519 1520

	bytes = info->bytes;
	offset = info->offset;

1521 1522 1523
	if (!ctl->op->use_bitmap(ctl, info))
		return 0;

J
Josef Bacik 已提交
1524 1525
	if (ctl->op == &free_space_op)
		block_group = ctl->private;
1526
again:
J
Josef Bacik 已提交
1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543
	/*
	 * Since we link bitmaps right into the cluster we need to see if we
	 * have a cluster here, and if so and it has our bitmap we need to add
	 * the free space to that bitmap.
	 */
	if (block_group && !list_empty(&block_group->cluster_list)) {
		struct btrfs_free_cluster *cluster;
		struct rb_node *node;
		struct btrfs_free_space *entry;

		cluster = list_entry(block_group->cluster_list.next,
				     struct btrfs_free_cluster,
				     block_group_list);
		spin_lock(&cluster->lock);
		node = rb_first(&cluster->root);
		if (!node) {
			spin_unlock(&cluster->lock);
1544
			goto no_cluster_bitmap;
J
Josef Bacik 已提交
1545 1546 1547 1548 1549
		}

		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		if (!entry->bitmap) {
			spin_unlock(&cluster->lock);
1550
			goto no_cluster_bitmap;
J
Josef Bacik 已提交
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564
		}

		if (entry->offset == offset_to_bitmap(ctl, offset)) {
			bytes_added = add_bytes_to_bitmap(ctl, entry,
							  offset, bytes);
			bytes -= bytes_added;
			offset += bytes_added;
		}
		spin_unlock(&cluster->lock);
		if (!bytes) {
			ret = 1;
			goto out;
		}
	}
1565 1566

no_cluster_bitmap:
1567
	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1568 1569 1570 1571 1572 1573
					 1, 0);
	if (!bitmap_info) {
		BUG_ON(added);
		goto new_bitmap;
	}

J
Josef Bacik 已提交
1574 1575 1576 1577
	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
	bytes -= bytes_added;
	offset += bytes_added;
	added = 0;
1578 1579 1580 1581 1582 1583 1584 1585 1586

	if (!bytes) {
		ret = 1;
		goto out;
	} else
		goto again;

new_bitmap:
	if (info && info->bitmap) {
1587
		add_new_bitmap(ctl, info, offset);
1588 1589 1590 1591
		added = 1;
		info = NULL;
		goto again;
	} else {
1592
		spin_unlock(&ctl->tree_lock);
1593 1594 1595

		/* no pre-allocated info, allocate a new one */
		if (!info) {
1596 1597
			info = kmem_cache_zalloc(btrfs_free_space_cachep,
						 GFP_NOFS);
1598
			if (!info) {
1599
				spin_lock(&ctl->tree_lock);
1600 1601 1602 1603 1604 1605 1606
				ret = -ENOMEM;
				goto out;
			}
		}

		/* allocate the bitmap */
		info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
1607
		spin_lock(&ctl->tree_lock);
1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
		if (!info->bitmap) {
			ret = -ENOMEM;
			goto out;
		}
		goto again;
	}

out:
	if (info) {
		if (info->bitmap)
			kfree(info->bitmap);
1619
		kmem_cache_free(btrfs_free_space_cachep, info);
1620
	}
J
Josef Bacik 已提交
1621 1622 1623 1624

	return ret;
}

1625
static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
1626
			  struct btrfs_free_space *info, bool update_stat)
J
Josef Bacik 已提交
1627
{
1628 1629 1630 1631 1632
	struct btrfs_free_space *left_info;
	struct btrfs_free_space *right_info;
	bool merged = false;
	u64 offset = info->offset;
	u64 bytes = info->bytes;
1633

J
Josef Bacik 已提交
1634 1635 1636 1637 1638
	/*
	 * first we want to see if there is free space adjacent to the range we
	 * are adding, if there is remove that struct and add a new one to
	 * cover the entire range
	 */
1639
	right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
1640 1641 1642 1643
	if (right_info && rb_prev(&right_info->offset_index))
		left_info = rb_entry(rb_prev(&right_info->offset_index),
				     struct btrfs_free_space, offset_index);
	else
1644
		left_info = tree_search_offset(ctl, offset - 1, 0, 0);
J
Josef Bacik 已提交
1645

1646
	if (right_info && !right_info->bitmap) {
1647
		if (update_stat)
1648
			unlink_free_space(ctl, right_info);
1649
		else
1650
			__unlink_free_space(ctl, right_info);
1651
		info->bytes += right_info->bytes;
1652
		kmem_cache_free(btrfs_free_space_cachep, right_info);
1653
		merged = true;
J
Josef Bacik 已提交
1654 1655
	}

1656 1657
	if (left_info && !left_info->bitmap &&
	    left_info->offset + left_info->bytes == offset) {
1658
		if (update_stat)
1659
			unlink_free_space(ctl, left_info);
1660
		else
1661
			__unlink_free_space(ctl, left_info);
1662 1663
		info->offset = left_info->offset;
		info->bytes += left_info->bytes;
1664
		kmem_cache_free(btrfs_free_space_cachep, left_info);
1665
		merged = true;
J
Josef Bacik 已提交
1666 1667
	}

1668 1669 1670
	return merged;
}

1671 1672
int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
			   u64 offset, u64 bytes)
1673 1674 1675 1676
{
	struct btrfs_free_space *info;
	int ret = 0;

1677
	info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
1678 1679 1680 1681 1682 1683
	if (!info)
		return -ENOMEM;

	info->offset = offset;
	info->bytes = bytes;

1684
	spin_lock(&ctl->tree_lock);
1685

1686
	if (try_merge_free_space(ctl, info, true))
1687 1688 1689 1690 1691 1692 1693
		goto link;

	/*
	 * There was no extent directly to the left or right of this new
	 * extent then we know we're going to have to allocate a new extent, so
	 * before we do that see if we need to drop this into a bitmap
	 */
1694
	ret = insert_into_bitmap(ctl, info);
1695 1696 1697 1698 1699 1700 1701
	if (ret < 0) {
		goto out;
	} else if (ret) {
		ret = 0;
		goto out;
	}
link:
1702
	ret = link_free_space(ctl, info);
J
Josef Bacik 已提交
1703
	if (ret)
1704
		kmem_cache_free(btrfs_free_space_cachep, info);
1705
out:
1706
	spin_unlock(&ctl->tree_lock);
1707

J
Josef Bacik 已提交
1708
	if (ret) {
1709
		printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
S
Stoyan Gaydarov 已提交
1710
		BUG_ON(ret == -EEXIST);
J
Josef Bacik 已提交
1711 1712 1713 1714 1715
	}

	return ret;
}

1716 1717
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
			    u64 offset, u64 bytes)
J
Josef Bacik 已提交
1718
{
1719
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
J
Josef Bacik 已提交
1720
	struct btrfs_free_space *info;
1721
	struct btrfs_free_space *next_info = NULL;
J
Josef Bacik 已提交
1722 1723
	int ret = 0;

1724
	spin_lock(&ctl->tree_lock);
1725

1726
again:
1727
	info = tree_search_offset(ctl, offset, 0, 0);
1728
	if (!info) {
1729 1730 1731 1732
		/*
		 * oops didn't find an extent that matched the space we wanted
		 * to remove, look for a bitmap instead
		 */
1733
		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1734 1735 1736 1737 1738
					  1, 0);
		if (!info) {
			WARN_ON(1);
			goto out_lock;
		}
1739 1740 1741 1742 1743 1744 1745 1746 1747
	}

	if (info->bytes < bytes && rb_next(&info->offset_index)) {
		u64 end;
		next_info = rb_entry(rb_next(&info->offset_index),
					     struct btrfs_free_space,
					     offset_index);

		if (next_info->bitmap)
1748 1749
			end = next_info->offset +
			      BITS_PER_BITMAP * ctl->unit - 1;
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
		else
			end = next_info->offset + next_info->bytes;

		if (next_info->bytes < bytes ||
		    next_info->offset > offset || offset > end) {
			printk(KERN_CRIT "Found free space at %llu, size %llu,"
			      " trying to use %llu\n",
			      (unsigned long long)info->offset,
			      (unsigned long long)info->bytes,
			      (unsigned long long)bytes);
J
Josef Bacik 已提交
1760 1761
			WARN_ON(1);
			ret = -EINVAL;
1762
			goto out_lock;
J
Josef Bacik 已提交
1763 1764
		}

1765 1766 1767 1768
		info = next_info;
	}

	if (info->bytes == bytes) {
1769
		unlink_free_space(ctl, info);
1770 1771
		if (info->bitmap) {
			kfree(info->bitmap);
1772
			ctl->total_bitmaps--;
J
Josef Bacik 已提交
1773
		}
1774
		kmem_cache_free(btrfs_free_space_cachep, info);
1775 1776
		goto out_lock;
	}
J
Josef Bacik 已提交
1777

1778
	if (!info->bitmap && info->offset == offset) {
1779
		unlink_free_space(ctl, info);
J
Josef Bacik 已提交
1780 1781
		info->offset += bytes;
		info->bytes -= bytes;
1782
		link_free_space(ctl, info);
1783 1784
		goto out_lock;
	}
J
Josef Bacik 已提交
1785

1786 1787
	if (!info->bitmap && info->offset <= offset &&
	    info->offset + info->bytes >= offset + bytes) {
1788 1789 1790 1791 1792 1793 1794 1795
		u64 old_start = info->offset;
		/*
		 * we're freeing space in the middle of the info,
		 * this can happen during tree log replay
		 *
		 * first unlink the old info and then
		 * insert it again after the hole we're creating
		 */
1796
		unlink_free_space(ctl, info);
1797 1798 1799 1800 1801
		if (offset + bytes < info->offset + info->bytes) {
			u64 old_end = info->offset + info->bytes;

			info->offset = offset + bytes;
			info->bytes = old_end - info->offset;
1802
			ret = link_free_space(ctl, info);
1803 1804 1805
			WARN_ON(ret);
			if (ret)
				goto out_lock;
1806 1807 1808 1809
		} else {
			/* the hole we're creating ends at the end
			 * of the info struct, just free the info
			 */
1810
			kmem_cache_free(btrfs_free_space_cachep, info);
1811
		}
1812
		spin_unlock(&ctl->tree_lock);
1813 1814 1815

		/* step two, insert a new info struct to cover
		 * anything before the hole
1816
		 */
1817 1818
		ret = btrfs_add_free_space(block_group, old_start,
					   offset - old_start);
1819 1820
		WARN_ON(ret);
		goto out;
J
Josef Bacik 已提交
1821
	}
1822

1823
	ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1824 1825 1826 1827
	if (ret == -EAGAIN)
		goto again;
	BUG_ON(ret);
out_lock:
1828
	spin_unlock(&ctl->tree_lock);
J
Josef Bacik 已提交
1829
out:
1830 1831 1832
	return ret;
}

J
Josef Bacik 已提交
1833 1834 1835
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
			   u64 bytes)
{
1836
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
J
Josef Bacik 已提交
1837 1838 1839 1840
	struct btrfs_free_space *info;
	struct rb_node *n;
	int count = 0;

1841
	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
J
Josef Bacik 已提交
1842 1843 1844
		info = rb_entry(n, struct btrfs_free_space, offset_index);
		if (info->bytes >= bytes)
			count++;
1845
		printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
1846
		       (unsigned long long)info->offset,
1847 1848
		       (unsigned long long)info->bytes,
		       (info->bitmap) ? "yes" : "no");
J
Josef Bacik 已提交
1849
	}
1850 1851
	printk(KERN_INFO "block group has cluster?: %s\n",
	       list_empty(&block_group->cluster_list) ? "no" : "yes");
J
Josef Bacik 已提交
1852 1853 1854 1855
	printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
	       "\n", count);
}

1856
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
J
Josef Bacik 已提交
1857
{
1858
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
J
Josef Bacik 已提交
1859

1860 1861 1862 1863 1864
	spin_lock_init(&ctl->tree_lock);
	ctl->unit = block_group->sectorsize;
	ctl->start = block_group->key.objectid;
	ctl->private = block_group;
	ctl->op = &free_space_op;
J
Josef Bacik 已提交
1865

1866 1867 1868 1869 1870 1871 1872
	/*
	 * we only want to have 32k of ram per block group for keeping
	 * track of free space, and if we pass 1/2 of that we want to
	 * start converting things over to using bitmaps
	 */
	ctl->extents_thresh = ((1024 * 32) / 2) /
				sizeof(struct btrfs_free_space);
J
Josef Bacik 已提交
1873 1874
}

1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885
/*
 * for a given cluster, put all of its extents back into the free
 * space cache.  If the block group passed doesn't match the block group
 * pointed to by the cluster, someone else raced in and freed the
 * cluster already.  In that case, we just return without changing anything
 */
static int
__btrfs_return_cluster_to_free_space(
			     struct btrfs_block_group_cache *block_group,
			     struct btrfs_free_cluster *cluster)
{
1886
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1887 1888 1889 1890 1891 1892 1893
	struct btrfs_free_space *entry;
	struct rb_node *node;

	spin_lock(&cluster->lock);
	if (cluster->block_group != block_group)
		goto out;

1894
	cluster->block_group = NULL;
1895
	cluster->window_start = 0;
1896 1897
	list_del_init(&cluster->block_group_list);

1898
	node = rb_first(&cluster->root);
1899
	while (node) {
1900 1901
		bool bitmap;

1902 1903 1904
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		node = rb_next(&entry->offset_index);
		rb_erase(&entry->offset_index, &cluster->root);
1905 1906 1907

		bitmap = (entry->bitmap != NULL);
		if (!bitmap)
1908 1909
			try_merge_free_space(ctl, entry, false);
		tree_insert_offset(&ctl->free_space_offset,
1910
				   entry->offset, &entry->offset_index, bitmap);
1911
	}
1912
	cluster->root = RB_ROOT;
1913

1914 1915
out:
	spin_unlock(&cluster->lock);
1916
	btrfs_put_block_group(block_group);
1917 1918 1919
	return 0;
}

1920
void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
J
Josef Bacik 已提交
1921 1922 1923
{
	struct btrfs_free_space *info;
	struct rb_node *node;
1924 1925 1926

	while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
		info = rb_entry(node, struct btrfs_free_space, offset_index);
1927 1928 1929 1930 1931 1932
		if (!info->bitmap) {
			unlink_free_space(ctl, info);
			kmem_cache_free(btrfs_free_space_cachep, info);
		} else {
			free_bitmap(ctl, info);
		}
1933 1934 1935 1936 1937 1938
		if (need_resched()) {
			spin_unlock(&ctl->tree_lock);
			cond_resched();
			spin_lock(&ctl->tree_lock);
		}
	}
1939 1940 1941 1942 1943 1944
}

void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
{
	spin_lock(&ctl->tree_lock);
	__btrfs_remove_free_space_cache_locked(ctl);
1945 1946 1947 1948 1949 1950
	spin_unlock(&ctl->tree_lock);
}

void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1951
	struct btrfs_free_cluster *cluster;
1952
	struct list_head *head;
J
Josef Bacik 已提交
1953

1954
	spin_lock(&ctl->tree_lock);
1955 1956 1957 1958
	while ((head = block_group->cluster_list.next) !=
	       &block_group->cluster_list) {
		cluster = list_entry(head, struct btrfs_free_cluster,
				     block_group_list);
1959 1960 1961

		WARN_ON(cluster->block_group != block_group);
		__btrfs_return_cluster_to_free_space(block_group, cluster);
1962
		if (need_resched()) {
1963
			spin_unlock(&ctl->tree_lock);
1964
			cond_resched();
1965
			spin_lock(&ctl->tree_lock);
1966
		}
1967
	}
1968
	__btrfs_remove_free_space_cache_locked(ctl);
1969
	spin_unlock(&ctl->tree_lock);
1970

J
Josef Bacik 已提交
1971 1972
}

1973 1974
u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
			       u64 offset, u64 bytes, u64 empty_size)
J
Josef Bacik 已提交
1975
{
1976
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1977
	struct btrfs_free_space *entry = NULL;
1978
	u64 bytes_search = bytes + empty_size;
1979
	u64 ret = 0;
J
Josef Bacik 已提交
1980

1981 1982
	spin_lock(&ctl->tree_lock);
	entry = find_free_space(ctl, &offset, &bytes_search);
1983
	if (!entry)
1984 1985 1986 1987
		goto out;

	ret = offset;
	if (entry->bitmap) {
1988
		bitmap_clear_bits(ctl, entry, offset, bytes);
1989
		if (!entry->bytes)
1990
			free_bitmap(ctl, entry);
1991
	} else {
1992
		unlink_free_space(ctl, entry);
1993 1994 1995
		entry->offset += bytes;
		entry->bytes -= bytes;
		if (!entry->bytes)
1996
			kmem_cache_free(btrfs_free_space_cachep, entry);
1997
		else
1998
			link_free_space(ctl, entry);
1999
	}
J
Josef Bacik 已提交
2000

2001
out:
2002
	spin_unlock(&ctl->tree_lock);
J
Josef Bacik 已提交
2003

J
Josef Bacik 已提交
2004 2005
	return ret;
}
2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018

/*
 * given a cluster, put all of its extents back into the free space
 * cache.  If a block group is passed, this function will only free
 * a cluster that belongs to the passed block group.
 *
 * Otherwise, it'll get a reference on the block group pointed to by the
 * cluster and remove the cluster from it.
 */
int btrfs_return_cluster_to_free_space(
			       struct btrfs_block_group_cache *block_group,
			       struct btrfs_free_cluster *cluster)
{
2019
	struct btrfs_free_space_ctl *ctl;
2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037
	int ret;

	/* first, get a safe pointer to the block group */
	spin_lock(&cluster->lock);
	if (!block_group) {
		block_group = cluster->block_group;
		if (!block_group) {
			spin_unlock(&cluster->lock);
			return 0;
		}
	} else if (cluster->block_group != block_group) {
		/* someone else has already freed it don't redo their work */
		spin_unlock(&cluster->lock);
		return 0;
	}
	atomic_inc(&block_group->count);
	spin_unlock(&cluster->lock);

2038 2039
	ctl = block_group->free_space_ctl;

2040
	/* now return any extents the cluster had on it */
2041
	spin_lock(&ctl->tree_lock);
2042
	ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
2043
	spin_unlock(&ctl->tree_lock);
2044 2045 2046 2047 2048 2049

	/* finally drop our ref */
	btrfs_put_block_group(block_group);
	return ret;
}

2050 2051
static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
				   struct btrfs_free_cluster *cluster,
2052
				   struct btrfs_free_space *entry,
2053 2054
				   u64 bytes, u64 min_start)
{
2055
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2056 2057 2058 2059 2060 2061 2062 2063
	int err;
	u64 search_start = cluster->window_start;
	u64 search_bytes = bytes;
	u64 ret = 0;

	search_start = min_start;
	search_bytes = bytes;

2064
	err = search_bitmap(ctl, entry, &search_start, &search_bytes);
2065
	if (err)
2066
		return 0;
2067 2068

	ret = search_start;
2069
	__bitmap_clear_bits(ctl, entry, ret, bytes);
2070 2071 2072 2073

	return ret;
}

2074 2075 2076 2077 2078 2079 2080 2081 2082
/*
 * given a cluster, try to allocate 'bytes' from it, returns 0
 * if it couldn't find anything suitably large, or a logical disk offset
 * if things worked out
 */
u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
			     struct btrfs_free_cluster *cluster, u64 bytes,
			     u64 min_start)
{
2083
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100
	struct btrfs_free_space *entry = NULL;
	struct rb_node *node;
	u64 ret = 0;

	spin_lock(&cluster->lock);
	if (bytes > cluster->max_size)
		goto out;

	if (cluster->block_group != block_group)
		goto out;

	node = rb_first(&cluster->root);
	if (!node)
		goto out;

	entry = rb_entry(node, struct btrfs_free_space, offset_index);
	while(1) {
2101 2102
		if (entry->bytes < bytes ||
		    (!entry->bitmap && entry->offset < min_start)) {
2103 2104 2105 2106 2107 2108 2109 2110
			node = rb_next(&entry->offset_index);
			if (!node)
				break;
			entry = rb_entry(node, struct btrfs_free_space,
					 offset_index);
			continue;
		}

2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128
		if (entry->bitmap) {
			ret = btrfs_alloc_from_bitmap(block_group,
						      cluster, entry, bytes,
						      min_start);
			if (ret == 0) {
				node = rb_next(&entry->offset_index);
				if (!node)
					break;
				entry = rb_entry(node, struct btrfs_free_space,
						 offset_index);
				continue;
			}
		} else {
			ret = entry->offset;

			entry->offset += bytes;
			entry->bytes -= bytes;
		}
2129

2130
		if (entry->bytes == 0)
2131 2132 2133 2134 2135
			rb_erase(&entry->offset_index, &cluster->root);
		break;
	}
out:
	spin_unlock(&cluster->lock);
2136

2137 2138 2139
	if (!ret)
		return 0;

2140
	spin_lock(&ctl->tree_lock);
2141

2142
	ctl->free_space -= bytes;
2143
	if (entry->bytes == 0) {
2144
		ctl->free_extents--;
2145 2146
		if (entry->bitmap) {
			kfree(entry->bitmap);
2147 2148
			ctl->total_bitmaps--;
			ctl->op->recalc_thresholds(ctl);
2149
		}
2150
		kmem_cache_free(btrfs_free_space_cachep, entry);
2151 2152
	}

2153
	spin_unlock(&ctl->tree_lock);
2154

2155 2156 2157
	return ret;
}

2158 2159 2160 2161 2162
static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
				struct btrfs_free_space *entry,
				struct btrfs_free_cluster *cluster,
				u64 offset, u64 bytes, u64 min_bytes)
{
2163
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2164 2165 2166 2167 2168 2169 2170
	unsigned long next_zero;
	unsigned long i;
	unsigned long search_bits;
	unsigned long total_bits;
	unsigned long found_bits;
	unsigned long start = 0;
	unsigned long total_found = 0;
2171
	int ret;
2172 2173 2174 2175
	bool found = false;

	i = offset_to_bit(entry->offset, block_group->sectorsize,
			  max_t(u64, offset, entry->offset));
2176 2177
	search_bits = bytes_to_bits(bytes, block_group->sectorsize);
	total_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193

again:
	found_bits = 0;
	for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
	     i < BITS_PER_BITMAP;
	     i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
		next_zero = find_next_zero_bit(entry->bitmap,
					       BITS_PER_BITMAP, i);
		if (next_zero - i >= search_bits) {
			found_bits = next_zero - i;
			break;
		}
		i = next_zero;
	}

	if (!found_bits)
2194
		return -ENOSPC;
2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217

	if (!found) {
		start = i;
		found = true;
	}

	total_found += found_bits;

	if (cluster->max_size < found_bits * block_group->sectorsize)
		cluster->max_size = found_bits * block_group->sectorsize;

	if (total_found < total_bits) {
		i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero);
		if (i - start > total_bits * 2) {
			total_found = 0;
			cluster->max_size = 0;
			found = false;
		}
		goto again;
	}

	cluster->window_start = start * block_group->sectorsize +
		entry->offset;
2218
	rb_erase(&entry->offset_index, &ctl->free_space_offset);
2219 2220 2221
	ret = tree_insert_offset(&cluster->root, entry->offset,
				 &entry->offset_index, 1);
	BUG_ON(ret);
2222 2223 2224 2225

	return 0;
}

2226 2227 2228
/*
 * This searches the block group for just extents to fill the cluster with.
 */
2229 2230 2231 2232 2233
static noinline int
setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
			struct btrfs_free_cluster *cluster,
			struct list_head *bitmaps, u64 offset, u64 bytes,
			u64 min_bytes)
2234
{
2235
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2236 2237 2238 2239 2240 2241 2242 2243 2244 2245
	struct btrfs_free_space *first = NULL;
	struct btrfs_free_space *entry = NULL;
	struct btrfs_free_space *prev = NULL;
	struct btrfs_free_space *last;
	struct rb_node *node;
	u64 window_start;
	u64 window_free;
	u64 max_extent;
	u64 max_gap = 128 * 1024;

2246
	entry = tree_search_offset(ctl, offset, 0, 1);
2247 2248 2249 2250 2251 2252 2253 2254
	if (!entry)
		return -ENOSPC;

	/*
	 * We don't want bitmaps, so just move along until we find a normal
	 * extent entry.
	 */
	while (entry->bitmap) {
2255 2256
		if (list_empty(&entry->list))
			list_add_tail(&entry->list, bitmaps);
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275
		node = rb_next(&entry->offset_index);
		if (!node)
			return -ENOSPC;
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
	}

	window_start = entry->offset;
	window_free = entry->bytes;
	max_extent = entry->bytes;
	first = entry;
	last = entry;
	prev = entry;

	while (window_free <= min_bytes) {
		node = rb_next(&entry->offset_index);
		if (!node)
			return -ENOSPC;
		entry = rb_entry(node, struct btrfs_free_space, offset_index);

2276 2277 2278
		if (entry->bitmap) {
			if (list_empty(&entry->list))
				list_add_tail(&entry->list, bitmaps);
2279
			continue;
2280 2281
		}

2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317
		/*
		 * we haven't filled the empty size and the window is
		 * very large.  reset and try again
		 */
		if (entry->offset - (prev->offset + prev->bytes) > max_gap ||
		    entry->offset - window_start > (min_bytes * 2)) {
			first = entry;
			window_start = entry->offset;
			window_free = entry->bytes;
			last = entry;
			max_extent = entry->bytes;
		} else {
			last = entry;
			window_free += entry->bytes;
			if (entry->bytes > max_extent)
				max_extent = entry->bytes;
		}
		prev = entry;
	}

	cluster->window_start = first->offset;

	node = &first->offset_index;

	/*
	 * now we've found our entries, pull them out of the free space
	 * cache and put them into the cluster rbtree
	 */
	do {
		int ret;

		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		node = rb_next(&entry->offset_index);
		if (entry->bitmap)
			continue;

2318
		rb_erase(&entry->offset_index, &ctl->free_space_offset);
2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332
		ret = tree_insert_offset(&cluster->root, entry->offset,
					 &entry->offset_index, 0);
		BUG_ON(ret);
	} while (node && entry != last);

	cluster->max_size = max_extent;

	return 0;
}

/*
 * This specifically looks for bitmaps that may work in the cluster, we assume
 * that we have already failed to find extents that will work.
 */
2333 2334 2335 2336 2337
static noinline int
setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
		     struct btrfs_free_cluster *cluster,
		     struct list_head *bitmaps, u64 offset, u64 bytes,
		     u64 min_bytes)
2338
{
2339
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2340 2341 2342 2343
	struct btrfs_free_space *entry;
	struct rb_node *node;
	int ret = -ENOSPC;

2344
	if (ctl->total_bitmaps == 0)
2345 2346
		return -ENOSPC;

2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374
	/*
	 * First check our cached list of bitmaps and see if there is an entry
	 * here that will work.
	 */
	list_for_each_entry(entry, bitmaps, list) {
		if (entry->bytes < min_bytes)
			continue;
		ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
					   bytes, min_bytes);
		if (!ret)
			return 0;
	}

	/*
	 * If we do have entries on our list and we are here then we didn't find
	 * anything, so go ahead and get the next entry after the last entry in
	 * this list and start the search from there.
	 */
	if (!list_empty(bitmaps)) {
		entry = list_entry(bitmaps->prev, struct btrfs_free_space,
				   list);
		node = rb_next(&entry->offset_index);
		if (!node)
			return -ENOSPC;
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		goto search;
	}

2375
	entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
2376 2377 2378
	if (!entry)
		return -ENOSPC;

2379
search:
2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394
	node = &entry->offset_index;
	do {
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		node = rb_next(&entry->offset_index);
		if (!entry->bitmap)
			continue;
		if (entry->bytes < min_bytes)
			continue;
		ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
					   bytes, min_bytes);
	} while (ret && node);

	return ret;
}

2395 2396 2397 2398 2399 2400 2401 2402 2403
/*
 * here we try to find a cluster of blocks in a block group.  The goal
 * is to find at least bytes free and up to empty_size + bytes free.
 * We might not find them all in one contiguous area.
 *
 * returns zero and sets up cluster if things worked out, otherwise
 * it returns -enospc
 */
int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2404
			     struct btrfs_root *root,
2405 2406 2407 2408
			     struct btrfs_block_group_cache *block_group,
			     struct btrfs_free_cluster *cluster,
			     u64 offset, u64 bytes, u64 empty_size)
{
2409
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2410 2411
	struct list_head bitmaps;
	struct btrfs_free_space *entry, *tmp;
2412 2413 2414 2415
	u64 min_bytes;
	int ret;

	/* for metadata, allow allocates with more holes */
2416 2417 2418
	if (btrfs_test_opt(root, SSD_SPREAD)) {
		min_bytes = bytes + empty_size;
	} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430
		/*
		 * we want to do larger allocations when we are
		 * flushing out the delayed refs, it helps prevent
		 * making more work as we go along.
		 */
		if (trans->transaction->delayed_refs.flushing)
			min_bytes = max(bytes, (bytes + empty_size) >> 1);
		else
			min_bytes = max(bytes, (bytes + empty_size) >> 4);
	} else
		min_bytes = max(bytes, (bytes + empty_size) >> 2);

2431
	spin_lock(&ctl->tree_lock);
2432 2433 2434 2435 2436

	/*
	 * If we know we don't have enough space to make a cluster don't even
	 * bother doing all the work to try and find one.
	 */
2437 2438
	if (ctl->free_space < min_bytes) {
		spin_unlock(&ctl->tree_lock);
2439 2440 2441
		return -ENOSPC;
	}

2442 2443 2444 2445 2446 2447 2448 2449
	spin_lock(&cluster->lock);

	/* someone already found a cluster, hooray */
	if (cluster->block_group) {
		ret = 0;
		goto out;
	}

2450 2451 2452
	INIT_LIST_HEAD(&bitmaps);
	ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
				      bytes, min_bytes);
2453
	if (ret)
2454 2455 2456 2457 2458 2459
		ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
					   offset, bytes, min_bytes);

	/* Clear our temporary list */
	list_for_each_entry_safe(entry, tmp, &bitmaps, list)
		list_del_init(&entry->list);
2460

2461 2462 2463 2464 2465
	if (!ret) {
		atomic_inc(&block_group->count);
		list_add_tail(&cluster->block_group_list,
			      &block_group->cluster_list);
		cluster->block_group = block_group;
2466 2467 2468
	}
out:
	spin_unlock(&cluster->lock);
2469
	spin_unlock(&ctl->tree_lock);
2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480

	return ret;
}

/*
 * simple code to zero out a cluster
 */
void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
{
	spin_lock_init(&cluster->lock);
	spin_lock_init(&cluster->refill_lock);
2481
	cluster->root = RB_ROOT;
2482 2483 2484 2485 2486
	cluster->max_size = 0;
	INIT_LIST_HEAD(&cluster->block_group_list);
	cluster->block_group = NULL;
}

2487 2488 2489
int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
			   u64 *trimmed, u64 start, u64 end, u64 minlen)
{
2490
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2491 2492 2493 2494 2495 2496 2497 2498 2499
	struct btrfs_free_space *entry = NULL;
	struct btrfs_fs_info *fs_info = block_group->fs_info;
	u64 bytes = 0;
	u64 actually_trimmed;
	int ret = 0;

	*trimmed = 0;

	while (start < end) {
2500
		spin_lock(&ctl->tree_lock);
2501

2502 2503
		if (ctl->free_space < minlen) {
			spin_unlock(&ctl->tree_lock);
2504 2505 2506
			break;
		}

2507
		entry = tree_search_offset(ctl, start, 0, 1);
2508
		if (!entry)
2509 2510
			entry = tree_search_offset(ctl,
						   offset_to_bitmap(ctl, start),
2511 2512 2513
						   1, 1);

		if (!entry || entry->offset >= end) {
2514
			spin_unlock(&ctl->tree_lock);
2515 2516 2517 2518
			break;
		}

		if (entry->bitmap) {
2519
			ret = search_bitmap(ctl, entry, &start, &bytes);
2520 2521
			if (!ret) {
				if (start >= end) {
2522
					spin_unlock(&ctl->tree_lock);
2523 2524 2525
					break;
				}
				bytes = min(bytes, end - start);
2526
				bitmap_clear_bits(ctl, entry, start, bytes);
2527
				if (entry->bytes == 0)
2528
					free_bitmap(ctl, entry);
2529 2530 2531
			} else {
				start = entry->offset + BITS_PER_BITMAP *
					block_group->sectorsize;
2532
				spin_unlock(&ctl->tree_lock);
2533 2534 2535 2536 2537 2538
				ret = 0;
				continue;
			}
		} else {
			start = entry->offset;
			bytes = min(entry->bytes, end - start);
2539
			unlink_free_space(ctl, entry);
2540
			kmem_cache_free(btrfs_free_space_cachep, entry);
2541 2542
		}

2543
		spin_unlock(&ctl->tree_lock);
2544 2545

		if (bytes >= minlen) {
2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558
			struct btrfs_space_info *space_info;
			int update = 0;

			space_info = block_group->space_info;
			spin_lock(&space_info->lock);
			spin_lock(&block_group->lock);
			if (!block_group->ro) {
				block_group->reserved += bytes;
				space_info->bytes_reserved += bytes;
				update = 1;
			}
			spin_unlock(&block_group->lock);
			spin_unlock(&space_info->lock);
2559 2560 2561 2562 2563 2564

			ret = btrfs_error_discard_extent(fs_info->extent_root,
							 start,
							 bytes,
							 &actually_trimmed);

2565
			btrfs_add_free_space(block_group, start, bytes);
2566 2567 2568 2569 2570 2571 2572 2573 2574 2575
			if (update) {
				spin_lock(&space_info->lock);
				spin_lock(&block_group->lock);
				if (block_group->ro)
					space_info->bytes_readonly += bytes;
				block_group->reserved -= bytes;
				space_info->bytes_reserved -= bytes;
				spin_unlock(&space_info->lock);
				spin_unlock(&block_group->lock);
			}
2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593

			if (ret)
				break;
			*trimmed += actually_trimmed;
		}
		start += bytes;
		bytes = 0;

		if (fatal_signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}

		cond_resched();
	}

	return ret;
}
2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643

/*
 * Find the left-most item in the cache tree, and then return the
 * smallest inode number in the item.
 *
 * Note: the returned inode number may not be the smallest one in
 * the tree, if the left-most item is a bitmap.
 */
u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
{
	struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
	struct btrfs_free_space *entry = NULL;
	u64 ino = 0;

	spin_lock(&ctl->tree_lock);

	if (RB_EMPTY_ROOT(&ctl->free_space_offset))
		goto out;

	entry = rb_entry(rb_first(&ctl->free_space_offset),
			 struct btrfs_free_space, offset_index);

	if (!entry->bitmap) {
		ino = entry->offset;

		unlink_free_space(ctl, entry);
		entry->offset++;
		entry->bytes--;
		if (!entry->bytes)
			kmem_cache_free(btrfs_free_space_cachep, entry);
		else
			link_free_space(ctl, entry);
	} else {
		u64 offset = 0;
		u64 count = 1;
		int ret;

		ret = search_bitmap(ctl, entry, &offset, &count);
		BUG_ON(ret);

		ino = offset;
		bitmap_clear_bits(ctl, entry, offset, 1);
		if (entry->bytes == 0)
			free_bitmap(ctl, entry);
	}
out:
	spin_unlock(&ctl->tree_lock);

	return ino;
}
2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661

struct inode *lookup_free_ino_inode(struct btrfs_root *root,
				    struct btrfs_path *path)
{
	struct inode *inode = NULL;

	spin_lock(&root->cache_lock);
	if (root->cache_inode)
		inode = igrab(root->cache_inode);
	spin_unlock(&root->cache_lock);
	if (inode)
		return inode;

	inode = __lookup_free_space_inode(root, path, 0);
	if (IS_ERR(inode))
		return inode;

	spin_lock(&root->cache_lock);
2662
	if (!btrfs_fs_closing(root->fs_info))
2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684
		root->cache_inode = igrab(inode);
	spin_unlock(&root->cache_lock);

	return inode;
}

int create_free_ino_inode(struct btrfs_root *root,
			  struct btrfs_trans_handle *trans,
			  struct btrfs_path *path)
{
	return __create_free_space_inode(root, trans, path,
					 BTRFS_FREE_INO_OBJECTID, 0);
}

int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
{
	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
	struct btrfs_path *path;
	struct inode *inode;
	int ret = 0;
	u64 root_gen = btrfs_root_generation(&root->root_item);

C
Chris Mason 已提交
2685 2686 2687
	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
		return 0;

2688 2689 2690 2691
	/*
	 * If we're unmounting then just return, since this does a search on the
	 * normal root and not the commit root and we could deadlock.
	 */
2692
	if (btrfs_fs_closing(fs_info))
2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725
		return 0;

	path = btrfs_alloc_path();
	if (!path)
		return 0;

	inode = lookup_free_ino_inode(root, path);
	if (IS_ERR(inode))
		goto out;

	if (root_gen != BTRFS_I(inode)->generation)
		goto out_put;

	ret = __load_free_space_cache(root, inode, ctl, path, 0);

	if (ret < 0)
		printk(KERN_ERR "btrfs: failed to load free ino cache for "
		       "root %llu\n", root->root_key.objectid);
out_put:
	iput(inode);
out:
	btrfs_free_path(path);
	return ret;
}

int btrfs_write_out_ino_cache(struct btrfs_root *root,
			      struct btrfs_trans_handle *trans,
			      struct btrfs_path *path)
{
	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
	struct inode *inode;
	int ret;

C
Chris Mason 已提交
2726 2727 2728
	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
		return 0;

2729 2730 2731 2732 2733
	inode = lookup_free_ino_inode(root, path);
	if (IS_ERR(inode))
		return 0;

	ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
2734 2735 2736
	if (ret) {
		btrfs_delalloc_release_metadata(inode, inode->i_size);
#ifdef DEBUG
2737 2738
		printk(KERN_ERR "btrfs: failed to write free ino cache "
		       "for root %llu\n", root->root_key.objectid);
2739 2740
#endif
	}
2741 2742 2743 2744

	iput(inode);
	return ret;
}