free-space-cache.c 93.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
J
Josef Bacik 已提交
2 3 4 5
/*
 * Copyright (C) 2008 Red Hat.  All rights reserved.
 */

6
#include <linux/pagemap.h>
J
Josef Bacik 已提交
7
#include <linux/sched.h>
8
#include <linux/sched/signal.h>
9
#include <linux/slab.h>
10
#include <linux/math64.h>
11
#include <linux/ratelimit.h>
12
#include <linux/error-injection.h>
13
#include <linux/sched/mm.h>
J
Josef Bacik 已提交
14
#include "ctree.h"
15 16
#include "free-space-cache.h"
#include "transaction.h"
17
#include "disk-io.h"
18
#include "extent_io.h"
19
#include "inode-map.h"
20
#include "volumes.h"
21
#include "space-info.h"
22
#include "delalloc-space.h"
23
#include "block-group.h"
24

25
#define BITS_PER_BITMAP		(PAGE_SIZE * 8UL)
26
#define MAX_CACHE_BYTES_PER_GIG	SZ_32K
J
Josef Bacik 已提交
27

28 29 30 31 32 33
struct btrfs_trim_range {
	u64 start;
	u64 bytes;
	struct list_head list;
};

34
static int link_free_space(struct btrfs_free_space_ctl *ctl,
J
Josef Bacik 已提交
35
			   struct btrfs_free_space *info);
36 37
static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info);
38 39 40 41
static int btrfs_wait_cache_io_root(struct btrfs_root *root,
			     struct btrfs_trans_handle *trans,
			     struct btrfs_io_ctl *io_ctl,
			     struct btrfs_path *path);
J
Josef Bacik 已提交
42

43 44 45
static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
					       struct btrfs_path *path,
					       u64 offset)
46
{
47
	struct btrfs_fs_info *fs_info = root->fs_info;
48 49 50 51 52 53
	struct btrfs_key key;
	struct btrfs_key location;
	struct btrfs_disk_key disk_key;
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
	struct inode *inode = NULL;
54
	unsigned nofs_flag;
55 56 57
	int ret;

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
58
	key.offset = offset;
59 60 61 62 63 64
	key.type = 0;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		return ERR_PTR(ret);
	if (ret > 0) {
65
		btrfs_release_path(path);
66 67 68 69 70 71 72 73
		return ERR_PTR(-ENOENT);
	}

	leaf = path->nodes[0];
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	btrfs_free_space_key(leaf, header, &disk_key);
	btrfs_disk_key_to_cpu(&location, &disk_key);
74
	btrfs_release_path(path);
75

76 77 78 79 80
	/*
	 * We are often under a trans handle at this point, so we need to make
	 * sure NOFS is set to keep us from deadlocking.
	 */
	nofs_flag = memalloc_nofs_save();
81
	inode = btrfs_iget_path(fs_info->sb, &location, root, path);
82
	btrfs_release_path(path);
83
	memalloc_nofs_restore(nofs_flag);
84 85 86
	if (IS_ERR(inode))
		return inode;

A
Al Viro 已提交
87
	mapping_set_gfp_mask(inode->i_mapping,
88 89
			mapping_gfp_constraint(inode->i_mapping,
			~(__GFP_FS | __GFP_HIGHMEM)));
90

91 92 93
	return inode;
}

94 95 96
struct inode *lookup_free_space_inode(
		struct btrfs_block_group_cache *block_group,
		struct btrfs_path *path)
97
{
98
	struct btrfs_fs_info *fs_info = block_group->fs_info;
99
	struct inode *inode = NULL;
100
	u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
101 102 103 104 105 106 107 108

	spin_lock(&block_group->lock);
	if (block_group->inode)
		inode = igrab(block_group->inode);
	spin_unlock(&block_group->lock);
	if (inode)
		return inode;

109
	inode = __lookup_free_space_inode(fs_info->tree_root, path,
110 111 112 113
					  block_group->key.objectid);
	if (IS_ERR(inode))
		return inode;

114
	spin_lock(&block_group->lock);
115
	if (!((BTRFS_I(inode)->flags & flags) == flags)) {
116
		btrfs_info(fs_info, "Old style space inode found, converting.");
117 118
		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
			BTRFS_INODE_NODATACOW;
119 120 121
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
	}

122
	if (!block_group->iref) {
123 124 125 126 127 128 129 130
		block_group->inode = igrab(inode);
		block_group->iref = 1;
	}
	spin_unlock(&block_group->lock);

	return inode;
}

131 132 133 134
static int __create_free_space_inode(struct btrfs_root *root,
				     struct btrfs_trans_handle *trans,
				     struct btrfs_path *path,
				     u64 ino, u64 offset)
135 136 137 138 139 140
{
	struct btrfs_key key;
	struct btrfs_disk_key disk_key;
	struct btrfs_free_space_header *header;
	struct btrfs_inode_item *inode_item;
	struct extent_buffer *leaf;
141
	u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
142 143
	int ret;

144
	ret = btrfs_insert_empty_inode(trans, root, path, ino);
145 146 147
	if (ret)
		return ret;

148 149 150 151
	/* We inline crc's for the free disk space cache */
	if (ino != BTRFS_FREE_INO_OBJECTID)
		flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;

152 153 154 155
	leaf = path->nodes[0];
	inode_item = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_inode_item);
	btrfs_item_key(leaf, &disk_key, path->slots[0]);
156
	memzero_extent_buffer(leaf, (unsigned long)inode_item,
157 158 159 160 161 162 163
			     sizeof(*inode_item));
	btrfs_set_inode_generation(leaf, inode_item, trans->transid);
	btrfs_set_inode_size(leaf, inode_item, 0);
	btrfs_set_inode_nbytes(leaf, inode_item, 0);
	btrfs_set_inode_uid(leaf, inode_item, 0);
	btrfs_set_inode_gid(leaf, inode_item, 0);
	btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
164
	btrfs_set_inode_flags(leaf, inode_item, flags);
165 166
	btrfs_set_inode_nlink(leaf, inode_item, 1);
	btrfs_set_inode_transid(leaf, inode_item, trans->transid);
167
	btrfs_set_inode_block_group(leaf, inode_item, offset);
168
	btrfs_mark_buffer_dirty(leaf);
169
	btrfs_release_path(path);
170 171

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
172
	key.offset = offset;
173 174 175 176
	key.type = 0;
	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(struct btrfs_free_space_header));
	if (ret < 0) {
177
		btrfs_release_path(path);
178 179
		return ret;
	}
180

181 182 183
	leaf = path->nodes[0];
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
184
	memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
185 186
	btrfs_set_free_space_key(leaf, header, &disk_key);
	btrfs_mark_buffer_dirty(leaf);
187
	btrfs_release_path(path);
188 189 190 191

	return 0;
}

192
int create_free_space_inode(struct btrfs_trans_handle *trans,
193 194 195 196 197 198
			    struct btrfs_block_group_cache *block_group,
			    struct btrfs_path *path)
{
	int ret;
	u64 ino;

199
	ret = btrfs_find_free_objectid(trans->fs_info->tree_root, &ino);
200 201 202
	if (ret < 0)
		return ret;

203 204
	return __create_free_space_inode(trans->fs_info->tree_root, trans, path,
					 ino, block_group->key.objectid);
205 206
}

207
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
208
				       struct btrfs_block_rsv *rsv)
209
{
210
	u64 needed_bytes;
211
	int ret;
212 213

	/* 1 for slack space, 1 for updating the inode */
214 215
	needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) +
		btrfs_calc_metadata_size(fs_info, 1);
216

217 218 219 220 221 222
	spin_lock(&rsv->lock);
	if (rsv->reserved < needed_bytes)
		ret = -ENOSPC;
	else
		ret = 0;
	spin_unlock(&rsv->lock);
223
	return ret;
224 225
}

226
int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
227
				    struct btrfs_block_group_cache *block_group,
228 229
				    struct inode *inode)
{
230
	struct btrfs_root *root = BTRFS_I(inode)->root;
231
	int ret = 0;
232
	bool locked = false;
233 234

	if (block_group) {
235 236 237 238 239 240
		struct btrfs_path *path = btrfs_alloc_path();

		if (!path) {
			ret = -ENOMEM;
			goto fail;
		}
241
		locked = true;
242 243 244 245
		mutex_lock(&trans->transaction->cache_write_mutex);
		if (!list_empty(&block_group->io_list)) {
			list_del_init(&block_group->io_list);

246
			btrfs_wait_cache_io(trans, block_group, path);
247 248 249 250 251 252 253 254 255 256
			btrfs_put_block_group(block_group);
		}

		/*
		 * now that we've truncated the cache away, its no longer
		 * setup or written
		 */
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
		spin_unlock(&block_group->lock);
257
		btrfs_free_path(path);
258
	}
259

260
	btrfs_i_size_write(BTRFS_I(inode), 0);
261
	truncate_pagecache(inode, 0);
262 263

	/*
264 265
	 * We skip the throttling logic for free space cache inodes, so we don't
	 * need to check for -EAGAIN.
266 267 268
	 */
	ret = btrfs_truncate_inode_items(trans, root, inode,
					 0, BTRFS_EXTENT_DATA_KEY);
269 270
	if (ret)
		goto fail;
271

272
	ret = btrfs_update_inode(trans, root, inode);
273 274

fail:
275 276
	if (locked)
		mutex_unlock(&trans->transaction->cache_write_mutex);
277
	if (ret)
278
		btrfs_abort_transaction(trans, ret);
279

280
	return ret;
281 282
}

283
static void readahead_cache(struct inode *inode)
284 285 286 287 288 289
{
	struct file_ra_state *ra;
	unsigned long last_index;

	ra = kzalloc(sizeof(*ra), GFP_NOFS);
	if (!ra)
290
		return;
291 292

	file_ra_state_init(ra, inode->i_mapping);
293
	last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
294 295 296 297 298 299

	page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);

	kfree(ra);
}

300
static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
301
		       int write)
302
{
303 304 305
	int num_pages;
	int check_crcs = 0;

306
	num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
307

308
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FREE_INO_OBJECTID)
309 310
		check_crcs = 1;

311
	/* Make sure we can fit our crcs and generation into the first page */
312
	if (write && check_crcs &&
313
	    (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE)
314 315
		return -ENOSPC;

316
	memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
317

318
	io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
319 320
	if (!io_ctl->pages)
		return -ENOMEM;
321 322

	io_ctl->num_pages = num_pages;
323
	io_ctl->fs_info = btrfs_sb(inode->i_sb);
324
	io_ctl->check_crcs = check_crcs;
325
	io_ctl->inode = inode;
326

327 328
	return 0;
}
329
ALLOW_ERROR_INJECTION(io_ctl_init, ERRNO);
330

331
static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
332 333
{
	kfree(io_ctl->pages);
334
	io_ctl->pages = NULL;
335 336
}

337
static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl)
338 339 340 341 342 343 344
{
	if (io_ctl->cur) {
		io_ctl->cur = NULL;
		io_ctl->orig = NULL;
	}
}

345
static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
346
{
347
	ASSERT(io_ctl->index < io_ctl->num_pages);
348
	io_ctl->page = io_ctl->pages[io_ctl->index++];
349
	io_ctl->cur = page_address(io_ctl->page);
350
	io_ctl->orig = io_ctl->cur;
351
	io_ctl->size = PAGE_SIZE;
352
	if (clear)
353
		clear_page(io_ctl->cur);
354 355
}

356
static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
357 358 359 360 361 362
{
	int i;

	io_ctl_unmap_page(io_ctl);

	for (i = 0; i < io_ctl->num_pages; i++) {
363 364 365
		if (io_ctl->pages[i]) {
			ClearPageChecked(io_ctl->pages[i]);
			unlock_page(io_ctl->pages[i]);
366
			put_page(io_ctl->pages[i]);
367
		}
368 369 370
	}
}

371
static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode,
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
				int uptodate)
{
	struct page *page;
	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
	int i;

	for (i = 0; i < io_ctl->num_pages; i++) {
		page = find_or_create_page(inode->i_mapping, i, mask);
		if (!page) {
			io_ctl_drop_pages(io_ctl);
			return -ENOMEM;
		}
		io_ctl->pages[i] = page;
		if (uptodate && !PageUptodate(page)) {
			btrfs_readpage(NULL, page);
			lock_page(page);
			if (!PageUptodate(page)) {
389 390
				btrfs_err(BTRFS_I(inode)->root->fs_info,
					   "error reading free space cache");
391 392 393 394 395 396
				io_ctl_drop_pages(io_ctl);
				return -EIO;
			}
		}
	}

397 398 399 400 401
	for (i = 0; i < io_ctl->num_pages; i++) {
		clear_page_dirty_for_io(io_ctl->pages[i]);
		set_page_extent_mapped(io_ctl->pages[i]);
	}

402 403 404
	return 0;
}

405
static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
406
{
A
Al Viro 已提交
407
	__le64 *val;
408 409 410 411

	io_ctl_map_page(io_ctl, 1);

	/*
412 413
	 * Skip the csum areas.  If we don't check crcs then we just have a
	 * 64bit chunk at the front of the first page.
414
	 */
415 416 417 418 419 420 421
	if (io_ctl->check_crcs) {
		io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
		io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
	} else {
		io_ctl->cur += sizeof(u64);
		io_ctl->size -= sizeof(u64) * 2;
	}
422 423 424 425 426 427

	val = io_ctl->cur;
	*val = cpu_to_le64(generation);
	io_ctl->cur += sizeof(u64);
}

428
static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
429
{
A
Al Viro 已提交
430
	__le64 *gen;
431

432 433 434 435 436 437 438 439 440 441 442 443
	/*
	 * Skip the crc area.  If we don't check crcs then we just have a 64bit
	 * chunk at the front of the first page.
	 */
	if (io_ctl->check_crcs) {
		io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
		io_ctl->size -= sizeof(u64) +
			(sizeof(u32) * io_ctl->num_pages);
	} else {
		io_ctl->cur += sizeof(u64);
		io_ctl->size -= sizeof(u64) * 2;
	}
444 445 446

	gen = io_ctl->cur;
	if (le64_to_cpu(*gen) != generation) {
447
		btrfs_err_rl(io_ctl->fs_info,
448 449
			"space cache generation (%llu) does not match inode (%llu)",
				*gen, generation);
450 451 452 453
		io_ctl_unmap_page(io_ctl);
		return -EIO;
	}
	io_ctl->cur += sizeof(u64);
454 455 456
	return 0;
}

457
static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
458 459 460 461 462 463 464 465 466 467 468
{
	u32 *tmp;
	u32 crc = ~(u32)0;
	unsigned offset = 0;

	if (!io_ctl->check_crcs) {
		io_ctl_unmap_page(io_ctl);
		return;
	}

	if (index == 0)
469
		offset = sizeof(u32) * io_ctl->num_pages;
470

471 472
	crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
	btrfs_crc32c_final(crc, (u8 *)&crc);
473
	io_ctl_unmap_page(io_ctl);
474
	tmp = page_address(io_ctl->pages[0]);
475 476 477 478
	tmp += index;
	*tmp = crc;
}

479
static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
480 481 482 483 484 485 486 487 488 489 490 491 492
{
	u32 *tmp, val;
	u32 crc = ~(u32)0;
	unsigned offset = 0;

	if (!io_ctl->check_crcs) {
		io_ctl_map_page(io_ctl, 0);
		return 0;
	}

	if (index == 0)
		offset = sizeof(u32) * io_ctl->num_pages;

493
	tmp = page_address(io_ctl->pages[0]);
494 495 496 497
	tmp += index;
	val = *tmp;

	io_ctl_map_page(io_ctl, 0);
498 499
	crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
	btrfs_crc32c_final(crc, (u8 *)&crc);
500
	if (val != crc) {
501
		btrfs_err_rl(io_ctl->fs_info,
502
			"csum mismatch on free space cache");
503 504 505 506
		io_ctl_unmap_page(io_ctl);
		return -EIO;
	}

507 508 509
	return 0;
}

510
static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes,
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
			    void *bitmap)
{
	struct btrfs_free_space_entry *entry;

	if (!io_ctl->cur)
		return -ENOSPC;

	entry = io_ctl->cur;
	entry->offset = cpu_to_le64(offset);
	entry->bytes = cpu_to_le64(bytes);
	entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
		BTRFS_FREE_SPACE_EXTENT;
	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
	io_ctl->size -= sizeof(struct btrfs_free_space_entry);

	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
		return 0;

529
	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
530 531 532 533 534 535 536 537 538 539

	/* No more pages to map */
	if (io_ctl->index >= io_ctl->num_pages)
		return 0;

	/* map the next page */
	io_ctl_map_page(io_ctl, 1);
	return 0;
}

540
static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
541 542 543 544 545 546 547 548 549
{
	if (!io_ctl->cur)
		return -ENOSPC;

	/*
	 * If we aren't at the start of the current page, unmap this one and
	 * map the next one if there is any left.
	 */
	if (io_ctl->cur != io_ctl->orig) {
550
		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
551 552 553 554 555
		if (io_ctl->index >= io_ctl->num_pages)
			return -ENOSPC;
		io_ctl_map_page(io_ctl, 0);
	}

556
	copy_page(io_ctl->cur, bitmap);
557
	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
558 559 560 561 562
	if (io_ctl->index < io_ctl->num_pages)
		io_ctl_map_page(io_ctl, 0);
	return 0;
}

563
static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl)
564
{
565 566 567 568 569 570 571 572
	/*
	 * If we're not on the boundary we know we've modified the page and we
	 * need to crc the page.
	 */
	if (io_ctl->cur != io_ctl->orig)
		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
	else
		io_ctl_unmap_page(io_ctl);
573 574 575

	while (io_ctl->index < io_ctl->num_pages) {
		io_ctl_map_page(io_ctl, 1);
576
		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
577 578 579
	}
}

580
static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl,
581
			    struct btrfs_free_space *entry, u8 *type)
582 583
{
	struct btrfs_free_space_entry *e;
584 585 586 587 588 589 590
	int ret;

	if (!io_ctl->cur) {
		ret = io_ctl_check_crc(io_ctl, io_ctl->index);
		if (ret)
			return ret;
	}
591 592 593 594

	e = io_ctl->cur;
	entry->offset = le64_to_cpu(e->offset);
	entry->bytes = le64_to_cpu(e->bytes);
595
	*type = e->type;
596 597 598 599
	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
	io_ctl->size -= sizeof(struct btrfs_free_space_entry);

	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
600
		return 0;
601 602 603

	io_ctl_unmap_page(io_ctl);

604
	return 0;
605 606
}

607
static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
608
			      struct btrfs_free_space *entry)
609
{
610 611 612 613 614 615
	int ret;

	ret = io_ctl_check_crc(io_ctl, io_ctl->index);
	if (ret)
		return ret;

616
	copy_page(entry->bitmap, io_ctl->cur);
617
	io_ctl_unmap_page(io_ctl);
618 619

	return 0;
620 621
}

622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
/*
 * Since we attach pinned extents after the fact we can have contiguous sections
 * of free space that are split up in entries.  This poses a problem with the
 * tree logging stuff since it could have allocated across what appears to be 2
 * entries since we would have merged the entries when adding the pinned extents
 * back to the free space cache.  So run through the space cache that we just
 * loaded and merge contiguous entries.  This will make the log replay stuff not
 * blow up and it will make for nicer allocator behavior.
 */
static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
{
	struct btrfs_free_space *e, *prev = NULL;
	struct rb_node *n;

again:
	spin_lock(&ctl->tree_lock);
	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
		e = rb_entry(n, struct btrfs_free_space, offset_index);
		if (!prev)
			goto next;
		if (e->bitmap || prev->bitmap)
			goto next;
		if (prev->offset + prev->bytes == e->offset) {
			unlink_free_space(ctl, prev);
			unlink_free_space(ctl, e);
			prev->bytes += e->bytes;
			kmem_cache_free(btrfs_free_space_cachep, e);
			link_free_space(ctl, prev);
			prev = NULL;
			spin_unlock(&ctl->tree_lock);
			goto again;
		}
next:
		prev = e;
	}
	spin_unlock(&ctl->tree_lock);
}

660 661 662
static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
				   struct btrfs_free_space_ctl *ctl,
				   struct btrfs_path *path, u64 offset)
663
{
664
	struct btrfs_fs_info *fs_info = root->fs_info;
665 666
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
667
	struct btrfs_io_ctl io_ctl;
668
	struct btrfs_key key;
669
	struct btrfs_free_space *e, *n;
670
	LIST_HEAD(bitmaps);
671 672 673
	u64 num_entries;
	u64 num_bitmaps;
	u64 generation;
674
	u8 type;
675
	int ret = 0;
676 677

	/* Nothing in the space cache, goodbye */
678
	if (!i_size_read(inode))
679
		return 0;
680 681

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
682
	key.offset = offset;
683 684 685
	key.type = 0;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
686
	if (ret < 0)
687
		return 0;
688
	else if (ret > 0) {
689
		btrfs_release_path(path);
690
		return 0;
691 692
	}

693 694
	ret = -1;

695 696 697 698 699 700
	leaf = path->nodes[0];
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	num_entries = btrfs_free_space_entries(leaf, header);
	num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
	generation = btrfs_free_space_generation(leaf, header);
701
	btrfs_release_path(path);
702

703
	if (!BTRFS_I(inode)->generation) {
704
		btrfs_info(fs_info,
705
			   "the free space cache file (%llu) is invalid, skip it",
706 707 708 709
			   offset);
		return 0;
	}

710
	if (BTRFS_I(inode)->generation != generation) {
711 712 713
		btrfs_err(fs_info,
			  "free space inode generation (%llu) did not match free space cache generation (%llu)",
			  BTRFS_I(inode)->generation, generation);
714
		return 0;
715 716 717
	}

	if (!num_entries)
718
		return 0;
719

720
	ret = io_ctl_init(&io_ctl, inode, 0);
721 722 723
	if (ret)
		return ret;

724
	readahead_cache(inode);
725

726 727 728
	ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
	if (ret)
		goto out;
729

730 731 732 733
	ret = io_ctl_check_crc(&io_ctl, 0);
	if (ret)
		goto free_cache;

734 735 736
	ret = io_ctl_check_generation(&io_ctl, generation);
	if (ret)
		goto free_cache;
737

738 739 740 741
	while (num_entries) {
		e = kmem_cache_zalloc(btrfs_free_space_cachep,
				      GFP_NOFS);
		if (!e)
742 743
			goto free_cache;

744 745 746 747 748 749
		ret = io_ctl_read_entry(&io_ctl, e, &type);
		if (ret) {
			kmem_cache_free(btrfs_free_space_cachep, e);
			goto free_cache;
		}

750 751 752
		if (!e->bytes) {
			kmem_cache_free(btrfs_free_space_cachep, e);
			goto free_cache;
753
		}
754 755 756 757 758 759

		if (type == BTRFS_FREE_SPACE_EXTENT) {
			spin_lock(&ctl->tree_lock);
			ret = link_free_space(ctl, e);
			spin_unlock(&ctl->tree_lock);
			if (ret) {
760
				btrfs_err(fs_info,
761
					"Duplicate entries in free space cache, dumping");
762
				kmem_cache_free(btrfs_free_space_cachep, e);
763 764
				goto free_cache;
			}
765
		} else {
766
			ASSERT(num_bitmaps);
767
			num_bitmaps--;
768 769
			e->bitmap = kmem_cache_zalloc(
					btrfs_free_space_bitmap_cachep, GFP_NOFS);
770 771 772
			if (!e->bitmap) {
				kmem_cache_free(
					btrfs_free_space_cachep, e);
773 774
				goto free_cache;
			}
775 776 777 778 779 780
			spin_lock(&ctl->tree_lock);
			ret = link_free_space(ctl, e);
			ctl->total_bitmaps++;
			ctl->op->recalc_thresholds(ctl);
			spin_unlock(&ctl->tree_lock);
			if (ret) {
781
				btrfs_err(fs_info,
782
					"Duplicate entries in free space cache, dumping");
783
				kmem_cache_free(btrfs_free_space_cachep, e);
784 785
				goto free_cache;
			}
786
			list_add_tail(&e->list, &bitmaps);
787 788
		}

789 790
		num_entries--;
	}
791

792 793
	io_ctl_unmap_page(&io_ctl);

794 795 796 797 798
	/*
	 * We add the bitmaps at the end of the entries in order that
	 * the bitmap entries are added to the cache.
	 */
	list_for_each_entry_safe(e, n, &bitmaps, list) {
799
		list_del_init(&e->list);
800 801 802
		ret = io_ctl_read_bitmap(&io_ctl, e);
		if (ret)
			goto free_cache;
803 804
	}

805
	io_ctl_drop_pages(&io_ctl);
806
	merge_space_tree(ctl);
807 808
	ret = 1;
out:
809
	io_ctl_free(&io_ctl);
810 811
	return ret;
free_cache:
812
	io_ctl_drop_pages(&io_ctl);
813
	__btrfs_remove_free_space_cache(ctl);
814 815 816
	goto out;
}

817
int load_free_space_cache(struct btrfs_block_group_cache *block_group)
J
Josef Bacik 已提交
818
{
819
	struct btrfs_fs_info *fs_info = block_group->fs_info;
820
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
821 822
	struct inode *inode;
	struct btrfs_path *path;
823
	int ret = 0;
824 825 826 827 828 829 830
	bool matched;
	u64 used = btrfs_block_group_used(&block_group->item);

	/*
	 * If this block group has been marked to be cleared for one reason or
	 * another then we can't trust the on disk cache, so just return.
	 */
831
	spin_lock(&block_group->lock);
832 833 834 835
	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
		spin_unlock(&block_group->lock);
		return 0;
	}
836
	spin_unlock(&block_group->lock);
837 838 839 840

	path = btrfs_alloc_path();
	if (!path)
		return 0;
841 842
	path->search_commit_root = 1;
	path->skip_locking = 1;
843

844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
	/*
	 * We must pass a path with search_commit_root set to btrfs_iget in
	 * order to avoid a deadlock when allocating extents for the tree root.
	 *
	 * When we are COWing an extent buffer from the tree root, when looking
	 * for a free extent, at extent-tree.c:find_free_extent(), we can find
	 * block group without its free space cache loaded. When we find one
	 * we must load its space cache which requires reading its free space
	 * cache's inode item from the root tree. If this inode item is located
	 * in the same leaf that we started COWing before, then we end up in
	 * deadlock on the extent buffer (trying to read lock it when we
	 * previously write locked it).
	 *
	 * It's safe to read the inode item using the commit root because
	 * block groups, once loaded, stay in memory forever (until they are
	 * removed) as well as their space caches once loaded. New block groups
	 * once created get their ->cached field set to BTRFS_CACHE_FINISHED so
	 * we will never try to read their inode item while the fs is mounted.
	 */
863
	inode = lookup_free_space_inode(block_group, path);
864 865 866 867 868
	if (IS_ERR(inode)) {
		btrfs_free_path(path);
		return 0;
	}

869 870 871 872
	/* We may have converted the inode and made the cache invalid. */
	spin_lock(&block_group->lock);
	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
		spin_unlock(&block_group->lock);
873
		btrfs_free_path(path);
874 875 876 877
		goto out;
	}
	spin_unlock(&block_group->lock);

878 879 880 881 882 883 884 885 886 887 888 889 890
	ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
				      path, block_group->key.objectid);
	btrfs_free_path(path);
	if (ret <= 0)
		goto out;

	spin_lock(&ctl->tree_lock);
	matched = (ctl->free_space == (block_group->key.offset - used -
				       block_group->bytes_super));
	spin_unlock(&ctl->tree_lock);

	if (!matched) {
		__btrfs_remove_free_space_cache(ctl);
J
Jeff Mahoney 已提交
891 892 893
		btrfs_warn(fs_info,
			   "block group %llu has wrong amount of free space",
			   block_group->key.objectid);
894 895 896 897 898 899 900 901
		ret = -1;
	}
out:
	if (ret < 0) {
		/* This cache is bogus, make sure it gets cleared */
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
		spin_unlock(&block_group->lock);
902
		ret = 0;
903

J
Jeff Mahoney 已提交
904 905 906
		btrfs_warn(fs_info,
			   "failed to load free space cache for block group %llu, rebuilding it now",
			   block_group->key.objectid);
907 908 909 910
	}

	iput(inode);
	return ret;
911 912
}

913
static noinline_for_stack
914
int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
915 916 917 918
			      struct btrfs_free_space_ctl *ctl,
			      struct btrfs_block_group_cache *block_group,
			      int *entries, int *bitmaps,
			      struct list_head *bitmap_list)
J
Josef Bacik 已提交
919
{
920
	int ret;
921
	struct btrfs_free_cluster *cluster = NULL;
922
	struct btrfs_free_cluster *cluster_locked = NULL;
923
	struct rb_node *node = rb_first(&ctl->free_space_offset);
924
	struct btrfs_trim_range *trim_entry;
925

926
	/* Get the cluster for this block_group if it exists */
927
	if (block_group && !list_empty(&block_group->cluster_list)) {
928 929 930
		cluster = list_entry(block_group->cluster_list.next,
				     struct btrfs_free_cluster,
				     block_group_list);
931
	}
932

933
	if (!node && cluster) {
934 935
		cluster_locked = cluster;
		spin_lock(&cluster_locked->lock);
936 937 938 939
		node = rb_first(&cluster->root);
		cluster = NULL;
	}

940 941 942
	/* Write out the extent entries */
	while (node) {
		struct btrfs_free_space *e;
J
Josef Bacik 已提交
943

944
		e = rb_entry(node, struct btrfs_free_space, offset_index);
945
		*entries += 1;
J
Josef Bacik 已提交
946

947
		ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes,
948 949
				       e->bitmap);
		if (ret)
950
			goto fail;
951

952
		if (e->bitmap) {
953 954
			list_add_tail(&e->list, bitmap_list);
			*bitmaps += 1;
955
		}
956 957 958
		node = rb_next(node);
		if (!node && cluster) {
			node = rb_first(&cluster->root);
959 960
			cluster_locked = cluster;
			spin_lock(&cluster_locked->lock);
961
			cluster = NULL;
962
		}
963
	}
964 965 966 967
	if (cluster_locked) {
		spin_unlock(&cluster_locked->lock);
		cluster_locked = NULL;
	}
968 969 970 971 972 973 974 975 976 977 978 979 980 981 982

	/*
	 * Make sure we don't miss any range that was removed from our rbtree
	 * because trimming is running. Otherwise after a umount+mount (or crash
	 * after committing the transaction) we would leak free space and get
	 * an inconsistent free space cache report from fsck.
	 */
	list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
		ret = io_ctl_add_entry(io_ctl, trim_entry->start,
				       trim_entry->bytes, NULL);
		if (ret)
			goto fail;
		*entries += 1;
	}

983 984
	return 0;
fail:
985 986
	if (cluster_locked)
		spin_unlock(&cluster_locked->lock);
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
	return -ENOSPC;
}

static noinline_for_stack int
update_cache_item(struct btrfs_trans_handle *trans,
		  struct btrfs_root *root,
		  struct inode *inode,
		  struct btrfs_path *path, u64 offset,
		  int entries, int bitmaps)
{
	struct btrfs_key key;
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
	int ret;

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
	key.offset = offset;
	key.type = 0;

	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
	if (ret < 0) {
		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1009
				 EXTENT_DELALLOC, 0, 0, NULL);
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
		goto fail;
	}
	leaf = path->nodes[0];
	if (ret > 0) {
		struct btrfs_key found_key;
		ASSERT(path->slots[0]);
		path->slots[0]--;
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
		    found_key.offset != offset) {
			clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1021 1022
					 inode->i_size - 1, EXTENT_DELALLOC, 0,
					 0, NULL);
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
			btrfs_release_path(path);
			goto fail;
		}
	}

	BTRFS_I(inode)->generation = trans->transid;
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	btrfs_set_free_space_entries(leaf, header, entries);
	btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
	btrfs_set_free_space_generation(leaf, header, trans->transid);
	btrfs_mark_buffer_dirty(leaf);
	btrfs_release_path(path);

	return 0;

fail:
	return -1;
}

1043
static noinline_for_stack int write_pinned_extent_entries(
1044
			    struct btrfs_block_group_cache *block_group,
1045
			    struct btrfs_io_ctl *io_ctl,
1046
			    int *entries)
1047 1048 1049 1050
{
	u64 start, extent_start, extent_end, len;
	struct extent_io_tree *unpin = NULL;
	int ret;
1051

1052 1053 1054
	if (!block_group)
		return 0;

1055 1056 1057
	/*
	 * We want to add any pinned extents to our free space cache
	 * so we don't leak the space
1058
	 *
1059 1060 1061
	 * We shouldn't have switched the pinned extents yet so this is the
	 * right one
	 */
1062
	unpin = block_group->fs_info->pinned_extents;
1063

1064
	start = block_group->key.objectid;
1065

1066
	while (start < block_group->key.objectid + block_group->key.offset) {
1067 1068
		ret = find_first_extent_bit(unpin, start,
					    &extent_start, &extent_end,
1069
					    EXTENT_DIRTY, NULL);
1070 1071
		if (ret)
			return 0;
J
Josef Bacik 已提交
1072

1073
		/* This pinned extent is out of our range */
1074
		if (extent_start >= block_group->key.objectid +
1075
		    block_group->key.offset)
1076
			return 0;
1077

1078 1079 1080 1081
		extent_start = max(extent_start, start);
		extent_end = min(block_group->key.objectid +
				 block_group->key.offset, extent_end + 1);
		len = extent_end - extent_start;
J
Josef Bacik 已提交
1082

1083 1084
		*entries += 1;
		ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
1085
		if (ret)
1086
			return -ENOSPC;
J
Josef Bacik 已提交
1087

1088
		start = extent_end;
1089
	}
J
Josef Bacik 已提交
1090

1091 1092 1093 1094
	return 0;
}

static noinline_for_stack int
1095
write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
1096
{
1097
	struct btrfs_free_space *entry, *next;
1098 1099
	int ret;

J
Josef Bacik 已提交
1100
	/* Write out the bitmaps */
1101
	list_for_each_entry_safe(entry, next, bitmap_list, list) {
1102
		ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
1103
		if (ret)
1104
			return -ENOSPC;
J
Josef Bacik 已提交
1105
		list_del_init(&entry->list);
1106 1107
	}

1108 1109
	return 0;
}
J
Josef Bacik 已提交
1110

1111 1112 1113
static int flush_dirty_cache(struct inode *inode)
{
	int ret;
1114

1115
	ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
1116
	if (ret)
1117
		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1118
				 EXTENT_DELALLOC, 0, 0, NULL);
J
Josef Bacik 已提交
1119

1120
	return ret;
1121 1122 1123
}

static void noinline_for_stack
1124
cleanup_bitmap_list(struct list_head *bitmap_list)
1125
{
1126
	struct btrfs_free_space *entry, *next;
1127

1128
	list_for_each_entry_safe(entry, next, bitmap_list, list)
1129
		list_del_init(&entry->list);
1130 1131 1132 1133 1134
}

static void noinline_for_stack
cleanup_write_cache_enospc(struct inode *inode,
			   struct btrfs_io_ctl *io_ctl,
1135
			   struct extent_state **cached_state)
1136
{
1137 1138
	io_ctl_drop_pages(io_ctl);
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1139
			     i_size_read(inode) - 1, cached_state);
1140
}
1141

1142 1143 1144 1145 1146
static int __btrfs_wait_cache_io(struct btrfs_root *root,
				 struct btrfs_trans_handle *trans,
				 struct btrfs_block_group_cache *block_group,
				 struct btrfs_io_ctl *io_ctl,
				 struct btrfs_path *path, u64 offset)
1147 1148 1149 1150
{
	int ret;
	struct inode *inode = io_ctl->inode;

1151 1152 1153
	if (!inode)
		return 0;

1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
	/* Flush the dirty pages in the cache file. */
	ret = flush_dirty_cache(inode);
	if (ret)
		goto out;

	/* Update the cache item to tell everyone this cache file is valid. */
	ret = update_cache_item(trans, root, inode, path, offset,
				io_ctl->entries, io_ctl->bitmaps);
out:
	io_ctl_free(io_ctl);
	if (ret) {
		invalidate_inode_pages2(inode->i_mapping);
		BTRFS_I(inode)->generation = 0;
		if (block_group) {
#ifdef DEBUG
1169
			btrfs_err(root->fs_info,
1170 1171
				  "failed to write free space cache for block group %llu",
				  block_group->key.objectid);
1172 1173 1174 1175 1176 1177
#endif
		}
	}
	btrfs_update_inode(trans, root, inode);

	if (block_group) {
1178 1179 1180 1181
		/* the dirty list is protected by the dirty_bgs_lock */
		spin_lock(&trans->transaction->dirty_bgs_lock);

		/* the disk_cache_state is protected by the block group lock */
1182 1183 1184 1185
		spin_lock(&block_group->lock);

		/*
		 * only mark this as written if we didn't get put back on
1186 1187
		 * the dirty list while waiting for IO.   Otherwise our
		 * cache state won't be right, and we won't get written again
1188 1189 1190 1191 1192 1193 1194
		 */
		if (!ret && list_empty(&block_group->dirty_list))
			block_group->disk_cache_state = BTRFS_DC_WRITTEN;
		else if (ret)
			block_group->disk_cache_state = BTRFS_DC_ERROR;

		spin_unlock(&block_group->lock);
1195
		spin_unlock(&trans->transaction->dirty_bgs_lock);
1196 1197 1198 1199 1200 1201 1202 1203
		io_ctl->inode = NULL;
		iput(inode);
	}

	return ret;

}

1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
static int btrfs_wait_cache_io_root(struct btrfs_root *root,
				    struct btrfs_trans_handle *trans,
				    struct btrfs_io_ctl *io_ctl,
				    struct btrfs_path *path)
{
	return __btrfs_wait_cache_io(root, trans, NULL, io_ctl, path, 0);
}

int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
			struct btrfs_block_group_cache *block_group,
			struct btrfs_path *path)
{
	return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
				     block_group, &block_group->io_ctl,
				     path, block_group->key.objectid);
}

1221 1222 1223 1224 1225 1226 1227 1228
/**
 * __btrfs_write_out_cache - write out cached info to an inode
 * @root - the root the inode belongs to
 * @ctl - the free space cache we are going to write out
 * @block_group - the block_group for this cache if it belongs to a block_group
 * @trans - the trans handle
 *
 * This function writes out a free space cache struct to disk for quick recovery
G
Geliang Tang 已提交
1229
 * on mount.  This will return 0 if it was successful in writing the cache out,
1230
 * or an errno if it was not.
1231 1232 1233 1234
 */
static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
				   struct btrfs_free_space_ctl *ctl,
				   struct btrfs_block_group_cache *block_group,
1235
				   struct btrfs_io_ctl *io_ctl,
1236
				   struct btrfs_trans_handle *trans)
1237 1238
{
	struct extent_state *cached_state = NULL;
1239
	LIST_HEAD(bitmap_list);
1240 1241 1242
	int entries = 0;
	int bitmaps = 0;
	int ret;
1243
	int must_iput = 0;
1244 1245

	if (!i_size_read(inode))
1246
		return -EIO;
1247

1248
	WARN_ON(io_ctl->pages);
1249
	ret = io_ctl_init(io_ctl, inode, 1);
1250
	if (ret)
1251
		return ret;
1252

1253 1254 1255 1256 1257 1258 1259 1260 1261
	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
		down_write(&block_group->data_rwsem);
		spin_lock(&block_group->lock);
		if (block_group->delalloc_bytes) {
			block_group->disk_cache_state = BTRFS_DC_WRITTEN;
			spin_unlock(&block_group->lock);
			up_write(&block_group->data_rwsem);
			BTRFS_I(inode)->generation = 0;
			ret = 0;
1262
			must_iput = 1;
1263 1264 1265 1266 1267
			goto out;
		}
		spin_unlock(&block_group->lock);
	}

1268
	/* Lock all pages first so we can lock the extent safely. */
1269 1270
	ret = io_ctl_prepare_pages(io_ctl, inode, 0);
	if (ret)
1271
		goto out_unlock;
1272 1273

	lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1274
			 &cached_state);
1275

1276
	io_ctl_set_generation(io_ctl, trans->transid);
1277

1278
	mutex_lock(&ctl->cache_writeout_mutex);
1279
	/* Write out the extent entries in the free space cache */
1280
	spin_lock(&ctl->tree_lock);
1281
	ret = write_cache_extent_entries(io_ctl, ctl,
1282 1283
					 block_group, &entries, &bitmaps,
					 &bitmap_list);
1284 1285
	if (ret)
		goto out_nospc_locked;
1286

1287 1288 1289 1290
	/*
	 * Some spaces that are freed in the current transaction are pinned,
	 * they will be added into free space cache after the transaction is
	 * committed, we shouldn't lose them.
1291 1292 1293
	 *
	 * If this changes while we are working we'll get added back to
	 * the dirty list and redo it.  No locking needed
1294
	 */
1295
	ret = write_pinned_extent_entries(block_group, io_ctl, &entries);
1296 1297
	if (ret)
		goto out_nospc_locked;
1298

1299 1300 1301 1302 1303
	/*
	 * At last, we write out all the bitmaps and keep cache_writeout_mutex
	 * locked while doing it because a concurrent trim can be manipulating
	 * or freeing the bitmap.
	 */
1304
	ret = write_bitmap_entries(io_ctl, &bitmap_list);
1305
	spin_unlock(&ctl->tree_lock);
1306
	mutex_unlock(&ctl->cache_writeout_mutex);
1307 1308 1309 1310
	if (ret)
		goto out_nospc;

	/* Zero out the rest of the pages just to make sure */
1311
	io_ctl_zero_remaining_pages(io_ctl);
1312

1313
	/* Everything is written out, now we dirty the pages in the file. */
1314 1315
	ret = btrfs_dirty_pages(inode, io_ctl->pages, io_ctl->num_pages, 0,
				i_size_read(inode), &cached_state);
1316
	if (ret)
1317
		goto out_nospc;
1318

1319 1320
	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
		up_write(&block_group->data_rwsem);
1321 1322 1323 1324
	/*
	 * Release the pages and unlock the extent, we will flush
	 * them out later
	 */
1325
	io_ctl_drop_pages(io_ctl);
1326 1327

	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1328
			     i_size_read(inode) - 1, &cached_state);
1329

1330 1331 1332 1333 1334 1335 1336 1337 1338
	/*
	 * at this point the pages are under IO and we're happy,
	 * The caller is responsible for waiting on them and updating the
	 * the cache and the inode
	 */
	io_ctl->entries = entries;
	io_ctl->bitmaps = bitmaps;

	ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
1339
	if (ret)
1340 1341
		goto out;

1342 1343
	return 0;

1344
out:
1345 1346
	io_ctl->inode = NULL;
	io_ctl_free(io_ctl);
1347
	if (ret) {
1348
		invalidate_inode_pages2(inode->i_mapping);
J
Josef Bacik 已提交
1349 1350 1351
		BTRFS_I(inode)->generation = 0;
	}
	btrfs_update_inode(trans, root, inode);
1352 1353
	if (must_iput)
		iput(inode);
1354
	return ret;
1355

1356 1357 1358 1359 1360
out_nospc_locked:
	cleanup_bitmap_list(&bitmap_list);
	spin_unlock(&ctl->tree_lock);
	mutex_unlock(&ctl->cache_writeout_mutex);

1361
out_nospc:
1362
	cleanup_write_cache_enospc(inode, io_ctl, &cached_state);
1363

1364
out_unlock:
1365 1366 1367
	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
		up_write(&block_group->data_rwsem);

1368
	goto out;
1369 1370
}

1371
int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
1372 1373 1374
			  struct btrfs_block_group_cache *block_group,
			  struct btrfs_path *path)
{
1375
	struct btrfs_fs_info *fs_info = trans->fs_info;
1376 1377 1378 1379 1380 1381 1382
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct inode *inode;
	int ret = 0;

	spin_lock(&block_group->lock);
	if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
		spin_unlock(&block_group->lock);
1383 1384
		return 0;
	}
1385 1386
	spin_unlock(&block_group->lock);

1387
	inode = lookup_free_space_inode(block_group, path);
1388 1389 1390
	if (IS_ERR(inode))
		return 0;

1391 1392
	ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl,
				block_group, &block_group->io_ctl, trans);
1393 1394
	if (ret) {
#ifdef DEBUG
1395 1396 1397
		btrfs_err(fs_info,
			  "failed to write free space cache for block group %llu",
			  block_group->key.objectid);
1398
#endif
1399 1400 1401 1402 1403 1404
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_ERROR;
		spin_unlock(&block_group->lock);

		block_group->io_ctl.inode = NULL;
		iput(inode);
1405 1406
	}

1407 1408 1409 1410 1411
	/*
	 * if ret == 0 the caller is expected to call btrfs_wait_cache_io
	 * to wait for IO and put the inode
	 */

J
Josef Bacik 已提交
1412 1413 1414
	return ret;
}

1415
static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1416
					  u64 offset)
J
Josef Bacik 已提交
1417
{
1418
	ASSERT(offset >= bitmap_start);
1419
	offset -= bitmap_start;
1420
	return (unsigned long)(div_u64(offset, unit));
1421
}
J
Josef Bacik 已提交
1422

1423
static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1424
{
1425
	return (unsigned long)(div_u64(bytes, unit));
1426
}
J
Josef Bacik 已提交
1427

1428
static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1429 1430 1431
				   u64 offset)
{
	u64 bitmap_start;
1432
	u64 bytes_per_bitmap;
J
Josef Bacik 已提交
1433

1434 1435
	bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
	bitmap_start = offset - ctl->start;
1436
	bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1437
	bitmap_start *= bytes_per_bitmap;
1438
	bitmap_start += ctl->start;
J
Josef Bacik 已提交
1439

1440
	return bitmap_start;
J
Josef Bacik 已提交
1441 1442
}

1443 1444
static int tree_insert_offset(struct rb_root *root, u64 offset,
			      struct rb_node *node, int bitmap)
J
Josef Bacik 已提交
1445 1446 1447 1448 1449 1450 1451
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
	struct btrfs_free_space *info;

	while (*p) {
		parent = *p;
1452
		info = rb_entry(parent, struct btrfs_free_space, offset_index);
J
Josef Bacik 已提交
1453

1454
		if (offset < info->offset) {
J
Josef Bacik 已提交
1455
			p = &(*p)->rb_left;
1456
		} else if (offset > info->offset) {
J
Josef Bacik 已提交
1457
			p = &(*p)->rb_right;
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472
		} else {
			/*
			 * we could have a bitmap entry and an extent entry
			 * share the same offset.  If this is the case, we want
			 * the extent entry to always be found first if we do a
			 * linear search through the tree, since we want to have
			 * the quickest allocation time, and allocating from an
			 * extent is faster than allocating from a bitmap.  So
			 * if we're inserting a bitmap and we find an entry at
			 * this offset, we want to go right, or after this entry
			 * logically.  If we are inserting an extent and we've
			 * found a bitmap, we want to go left, or before
			 * logically.
			 */
			if (bitmap) {
1473 1474 1475 1476
				if (info->bitmap) {
					WARN_ON_ONCE(1);
					return -EEXIST;
				}
1477 1478
				p = &(*p)->rb_right;
			} else {
1479 1480 1481 1482
				if (!info->bitmap) {
					WARN_ON_ONCE(1);
					return -EEXIST;
				}
1483 1484 1485
				p = &(*p)->rb_left;
			}
		}
J
Josef Bacik 已提交
1486 1487 1488 1489 1490 1491 1492 1493 1494
	}

	rb_link_node(node, parent, p);
	rb_insert_color(node, root);

	return 0;
}

/*
J
Josef Bacik 已提交
1495 1496
 * searches the tree for the given offset.
 *
1497 1498 1499
 * fuzzy - If this is set, then we are trying to make an allocation, and we just
 * want a section that has at least bytes size and comes at or after the given
 * offset.
J
Josef Bacik 已提交
1500
 */
1501
static struct btrfs_free_space *
1502
tree_search_offset(struct btrfs_free_space_ctl *ctl,
1503
		   u64 offset, int bitmap_only, int fuzzy)
J
Josef Bacik 已提交
1504
{
1505
	struct rb_node *n = ctl->free_space_offset.rb_node;
1506 1507 1508 1509 1510 1511 1512 1513
	struct btrfs_free_space *entry, *prev = NULL;

	/* find entry that is closest to the 'offset' */
	while (1) {
		if (!n) {
			entry = NULL;
			break;
		}
J
Josef Bacik 已提交
1514 1515

		entry = rb_entry(n, struct btrfs_free_space, offset_index);
1516
		prev = entry;
J
Josef Bacik 已提交
1517

1518
		if (offset < entry->offset)
J
Josef Bacik 已提交
1519
			n = n->rb_left;
1520
		else if (offset > entry->offset)
J
Josef Bacik 已提交
1521
			n = n->rb_right;
1522
		else
J
Josef Bacik 已提交
1523 1524 1525
			break;
	}

1526 1527 1528 1529 1530
	if (bitmap_only) {
		if (!entry)
			return NULL;
		if (entry->bitmap)
			return entry;
J
Josef Bacik 已提交
1531

1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
		/*
		 * bitmap entry and extent entry may share same offset,
		 * in that case, bitmap entry comes after extent entry.
		 */
		n = rb_next(n);
		if (!n)
			return NULL;
		entry = rb_entry(n, struct btrfs_free_space, offset_index);
		if (entry->offset != offset)
			return NULL;
J
Josef Bacik 已提交
1542

1543 1544 1545 1546
		WARN_ON(!entry->bitmap);
		return entry;
	} else if (entry) {
		if (entry->bitmap) {
J
Josef Bacik 已提交
1547
			/*
1548 1549
			 * if previous extent entry covers the offset,
			 * we should return it instead of the bitmap entry
J
Josef Bacik 已提交
1550
			 */
1551 1552
			n = rb_prev(&entry->offset_index);
			if (n) {
1553 1554
				prev = rb_entry(n, struct btrfs_free_space,
						offset_index);
1555 1556 1557
				if (!prev->bitmap &&
				    prev->offset + prev->bytes > offset)
					entry = prev;
J
Josef Bacik 已提交
1558
			}
1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
		}
		return entry;
	}

	if (!prev)
		return NULL;

	/* find last entry before the 'offset' */
	entry = prev;
	if (entry->offset > offset) {
		n = rb_prev(&entry->offset_index);
		if (n) {
			entry = rb_entry(n, struct btrfs_free_space,
					offset_index);
1573
			ASSERT(entry->offset <= offset);
J
Josef Bacik 已提交
1574
		} else {
1575 1576 1577 1578
			if (fuzzy)
				return entry;
			else
				return NULL;
J
Josef Bacik 已提交
1579 1580 1581
		}
	}

1582
	if (entry->bitmap) {
1583 1584
		n = rb_prev(&entry->offset_index);
		if (n) {
1585 1586
			prev = rb_entry(n, struct btrfs_free_space,
					offset_index);
1587 1588 1589
			if (!prev->bitmap &&
			    prev->offset + prev->bytes > offset)
				return prev;
1590
		}
1591
		if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1592 1593 1594 1595 1596 1597 1598 1599 1600 1601
			return entry;
	} else if (entry->offset + entry->bytes > offset)
		return entry;

	if (!fuzzy)
		return NULL;

	while (1) {
		if (entry->bitmap) {
			if (entry->offset + BITS_PER_BITMAP *
1602
			    ctl->unit > offset)
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
				break;
		} else {
			if (entry->offset + entry->bytes > offset)
				break;
		}

		n = rb_next(&entry->offset_index);
		if (!n)
			return NULL;
		entry = rb_entry(n, struct btrfs_free_space, offset_index);
	}
	return entry;
J
Josef Bacik 已提交
1615 1616
}

1617
static inline void
1618
__unlink_free_space(struct btrfs_free_space_ctl *ctl,
1619
		    struct btrfs_free_space *info)
J
Josef Bacik 已提交
1620
{
1621 1622
	rb_erase(&info->offset_index, &ctl->free_space_offset);
	ctl->free_extents--;
1623 1624
}

1625
static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1626 1627
			      struct btrfs_free_space *info)
{
1628 1629
	__unlink_free_space(ctl, info);
	ctl->free_space -= info->bytes;
J
Josef Bacik 已提交
1630 1631
}

1632
static int link_free_space(struct btrfs_free_space_ctl *ctl,
J
Josef Bacik 已提交
1633 1634 1635 1636
			   struct btrfs_free_space *info)
{
	int ret = 0;

1637
	ASSERT(info->bytes || info->bitmap);
1638
	ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1639
				 &info->offset_index, (info->bitmap != NULL));
J
Josef Bacik 已提交
1640 1641 1642
	if (ret)
		return ret;

1643 1644
	ctl->free_space += info->bytes;
	ctl->free_extents++;
1645 1646 1647
	return ret;
}

1648
static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1649
{
1650
	struct btrfs_block_group_cache *block_group = ctl->private;
1651 1652 1653
	u64 max_bytes;
	u64 bitmap_bytes;
	u64 extent_bytes;
1654
	u64 size = block_group->key.offset;
1655 1656
	u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
	u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1657

1658
	max_bitmaps = max_t(u64, max_bitmaps, 1);
1659

1660
	ASSERT(ctl->total_bitmaps <= max_bitmaps);
1661 1662 1663 1664 1665 1666

	/*
	 * The goal is to keep the total amount of memory used per 1gb of space
	 * at or below 32k, so we need to adjust how much memory we allow to be
	 * used by extent based free space tracking
	 */
1667
	if (size < SZ_1G)
1668 1669
		max_bytes = MAX_CACHE_BYTES_PER_GIG;
	else
1670
		max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G);
1671

1672 1673 1674 1675 1676
	/*
	 * we want to account for 1 more bitmap than what we have so we can make
	 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
	 * we add more bitmaps.
	 */
1677
	bitmap_bytes = (ctl->total_bitmaps + 1) * ctl->unit;
1678

1679
	if (bitmap_bytes >= max_bytes) {
1680
		ctl->extents_thresh = 0;
1681 1682
		return;
	}
1683

1684
	/*
1685
	 * we want the extent entry threshold to always be at most 1/2 the max
1686 1687 1688
	 * bytes we can have, or whatever is less than that.
	 */
	extent_bytes = max_bytes - bitmap_bytes;
1689
	extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
1690

1691
	ctl->extents_thresh =
1692
		div_u64(extent_bytes, sizeof(struct btrfs_free_space));
1693 1694
}

1695 1696 1697
static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
				       struct btrfs_free_space *info,
				       u64 offset, u64 bytes)
1698
{
L
Li Zefan 已提交
1699
	unsigned long start, count;
1700

1701 1702
	start = offset_to_bit(info->offset, ctl->unit, offset);
	count = bytes_to_bits(bytes, ctl->unit);
1703
	ASSERT(start + count <= BITS_PER_BITMAP);
1704

L
Li Zefan 已提交
1705
	bitmap_clear(info->bitmap, start, count);
1706 1707

	info->bytes -= bytes;
1708 1709
	if (info->max_extent_size > ctl->unit)
		info->max_extent_size = 0;
1710 1711 1712 1713 1714 1715 1716
}

static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info, u64 offset,
			      u64 bytes)
{
	__bitmap_clear_bits(ctl, info, offset, bytes);
1717
	ctl->free_space -= bytes;
1718 1719
}

1720
static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
J
Josef Bacik 已提交
1721 1722
			    struct btrfs_free_space *info, u64 offset,
			    u64 bytes)
1723
{
L
Li Zefan 已提交
1724
	unsigned long start, count;
1725

1726 1727
	start = offset_to_bit(info->offset, ctl->unit, offset);
	count = bytes_to_bits(bytes, ctl->unit);
1728
	ASSERT(start + count <= BITS_PER_BITMAP);
1729

L
Li Zefan 已提交
1730
	bitmap_set(info->bitmap, start, count);
1731 1732

	info->bytes += bytes;
1733
	ctl->free_space += bytes;
1734 1735
}

1736 1737 1738 1739
/*
 * If we can not find suitable extent, we will use bytes to record
 * the size of the max extent.
 */
1740
static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1741
			 struct btrfs_free_space *bitmap_info, u64 *offset,
1742
			 u64 *bytes, bool for_alloc)
1743 1744
{
	unsigned long found_bits = 0;
1745
	unsigned long max_bits = 0;
1746 1747
	unsigned long bits, i;
	unsigned long next_zero;
1748
	unsigned long extent_bits;
1749

1750 1751 1752 1753
	/*
	 * Skip searching the bitmap if we don't have a contiguous section that
	 * is large enough for this allocation.
	 */
1754 1755
	if (for_alloc &&
	    bitmap_info->max_extent_size &&
1756 1757 1758 1759 1760
	    bitmap_info->max_extent_size < *bytes) {
		*bytes = bitmap_info->max_extent_size;
		return -1;
	}

1761
	i = offset_to_bit(bitmap_info->offset, ctl->unit,
1762
			  max_t(u64, *offset, bitmap_info->offset));
1763
	bits = bytes_to_bits(*bytes, ctl->unit);
1764

1765
	for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1766 1767 1768 1769
		if (for_alloc && bits == 1) {
			found_bits = 1;
			break;
		}
1770 1771
		next_zero = find_next_zero_bit(bitmap_info->bitmap,
					       BITS_PER_BITMAP, i);
1772 1773 1774
		extent_bits = next_zero - i;
		if (extent_bits >= bits) {
			found_bits = extent_bits;
1775
			break;
1776 1777
		} else if (extent_bits > max_bits) {
			max_bits = extent_bits;
1778 1779 1780 1781 1782
		}
		i = next_zero;
	}

	if (found_bits) {
1783 1784
		*offset = (u64)(i * ctl->unit) + bitmap_info->offset;
		*bytes = (u64)(found_bits) * ctl->unit;
1785 1786 1787
		return 0;
	}

1788
	*bytes = (u64)(max_bits) * ctl->unit;
1789
	bitmap_info->max_extent_size = *bytes;
1790 1791 1792
	return -1;
}

J
Josef Bacik 已提交
1793 1794 1795 1796 1797 1798 1799
static inline u64 get_max_extent_size(struct btrfs_free_space *entry)
{
	if (entry->bitmap)
		return entry->max_extent_size;
	return entry->bytes;
}

1800
/* Cache the size of the max extent in bytes */
1801
static struct btrfs_free_space *
D
David Woodhouse 已提交
1802
find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1803
		unsigned long align, u64 *max_extent_size)
1804 1805 1806
{
	struct btrfs_free_space *entry;
	struct rb_node *node;
D
David Woodhouse 已提交
1807 1808
	u64 tmp;
	u64 align_off;
1809 1810
	int ret;

1811
	if (!ctl->free_space_offset.rb_node)
1812
		goto out;
1813

1814
	entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1815
	if (!entry)
1816
		goto out;
1817 1818 1819

	for (node = &entry->offset_index; node; node = rb_next(node)) {
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
1820
		if (entry->bytes < *bytes) {
J
Josef Bacik 已提交
1821 1822
			*max_extent_size = max(get_max_extent_size(entry),
					       *max_extent_size);
1823
			continue;
1824
		}
1825

D
David Woodhouse 已提交
1826 1827 1828 1829
		/* make sure the space returned is big enough
		 * to match our requested alignment
		 */
		if (*bytes >= align) {
1830
			tmp = entry->offset - ctl->start + align - 1;
1831
			tmp = div64_u64(tmp, align);
D
David Woodhouse 已提交
1832 1833 1834 1835 1836 1837 1838
			tmp = tmp * align + ctl->start;
			align_off = tmp - entry->offset;
		} else {
			align_off = 0;
			tmp = entry->offset;
		}

1839
		if (entry->bytes < *bytes + align_off) {
J
Josef Bacik 已提交
1840 1841
			*max_extent_size = max(get_max_extent_size(entry),
					       *max_extent_size);
D
David Woodhouse 已提交
1842
			continue;
1843
		}
D
David Woodhouse 已提交
1844

1845
		if (entry->bitmap) {
1846 1847
			u64 size = *bytes;

1848
			ret = search_bitmap(ctl, entry, &tmp, &size, true);
D
David Woodhouse 已提交
1849 1850
			if (!ret) {
				*offset = tmp;
1851
				*bytes = size;
1852
				return entry;
J
Josef Bacik 已提交
1853 1854 1855 1856
			} else {
				*max_extent_size =
					max(get_max_extent_size(entry),
					    *max_extent_size);
D
David Woodhouse 已提交
1857
			}
1858 1859 1860
			continue;
		}

D
David Woodhouse 已提交
1861 1862
		*offset = tmp;
		*bytes = entry->bytes - align_off;
1863 1864
		return entry;
	}
1865
out:
1866 1867 1868
	return NULL;
}

1869
static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1870 1871
			   struct btrfs_free_space *info, u64 offset)
{
1872
	info->offset = offset_to_bitmap(ctl, offset);
J
Josef Bacik 已提交
1873
	info->bytes = 0;
1874
	INIT_LIST_HEAD(&info->list);
1875 1876
	link_free_space(ctl, info);
	ctl->total_bitmaps++;
1877

1878
	ctl->op->recalc_thresholds(ctl);
1879 1880
}

1881
static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1882 1883
			struct btrfs_free_space *bitmap_info)
{
1884
	unlink_free_space(ctl, bitmap_info);
1885
	kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
1886
	kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1887 1888
	ctl->total_bitmaps--;
	ctl->op->recalc_thresholds(ctl);
1889 1890
}

1891
static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1892 1893 1894 1895
			      struct btrfs_free_space *bitmap_info,
			      u64 *offset, u64 *bytes)
{
	u64 end;
1896 1897
	u64 search_start, search_bytes;
	int ret;
1898 1899

again:
1900
	end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1901

1902
	/*
1903 1904 1905 1906
	 * We need to search for bits in this bitmap.  We could only cover some
	 * of the extent in this bitmap thanks to how we add space, so we need
	 * to search for as much as it as we can and clear that amount, and then
	 * go searching for the next bit.
1907 1908
	 */
	search_start = *offset;
1909
	search_bytes = ctl->unit;
1910
	search_bytes = min(search_bytes, end - search_start + 1);
1911 1912
	ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes,
			    false);
1913 1914
	if (ret < 0 || search_start != *offset)
		return -EINVAL;
1915

1916 1917 1918 1919 1920 1921 1922 1923 1924
	/* We may have found more bits than what we need */
	search_bytes = min(search_bytes, *bytes);

	/* Cannot clear past the end of the bitmap */
	search_bytes = min(search_bytes, end - search_start + 1);

	bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
	*offset += search_bytes;
	*bytes -= search_bytes;
1925 1926

	if (*bytes) {
1927
		struct rb_node *next = rb_next(&bitmap_info->offset_index);
1928
		if (!bitmap_info->bytes)
1929
			free_bitmap(ctl, bitmap_info);
1930

1931 1932 1933 1934 1935
		/*
		 * no entry after this bitmap, but we still have bytes to
		 * remove, so something has gone wrong.
		 */
		if (!next)
1936 1937
			return -EINVAL;

1938 1939 1940 1941 1942 1943 1944
		bitmap_info = rb_entry(next, struct btrfs_free_space,
				       offset_index);

		/*
		 * if the next entry isn't a bitmap we need to return to let the
		 * extent stuff do its work.
		 */
1945 1946 1947
		if (!bitmap_info->bitmap)
			return -EAGAIN;

1948 1949 1950 1951 1952 1953 1954
		/*
		 * Ok the next item is a bitmap, but it may not actually hold
		 * the information for the rest of this free space stuff, so
		 * look for it, and if we don't find it return so we can try
		 * everything over again.
		 */
		search_start = *offset;
1955
		search_bytes = ctl->unit;
1956
		ret = search_bitmap(ctl, bitmap_info, &search_start,
1957
				    &search_bytes, false);
1958 1959 1960
		if (ret < 0 || search_start != *offset)
			return -EAGAIN;

1961
		goto again;
1962
	} else if (!bitmap_info->bytes)
1963
		free_bitmap(ctl, bitmap_info);
1964 1965 1966 1967

	return 0;
}

J
Josef Bacik 已提交
1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980
static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
			       struct btrfs_free_space *info, u64 offset,
			       u64 bytes)
{
	u64 bytes_to_set = 0;
	u64 end;

	end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);

	bytes_to_set = min(end - offset, bytes);

	bitmap_set_bits(ctl, info, offset, bytes_to_set);

1981 1982 1983 1984 1985 1986
	/*
	 * We set some bytes, we have no idea what the max extent size is
	 * anymore.
	 */
	info->max_extent_size = 0;

J
Josef Bacik 已提交
1987 1988 1989 1990
	return bytes_to_set;

}

1991 1992
static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
		      struct btrfs_free_space *info)
1993
{
1994
	struct btrfs_block_group_cache *block_group = ctl->private;
1995
	struct btrfs_fs_info *fs_info = block_group->fs_info;
1996 1997 1998
	bool forced = false;

#ifdef CONFIG_BTRFS_DEBUG
1999
	if (btrfs_should_fragment_free_space(block_group))
2000 2001
		forced = true;
#endif
2002 2003 2004 2005 2006

	/*
	 * If we are below the extents threshold then we can add this as an
	 * extent, and don't have to deal with the bitmap
	 */
2007
	if (!forced && ctl->free_extents < ctl->extents_thresh) {
2008 2009 2010
		/*
		 * If this block group has some small extents we don't want to
		 * use up all of our free slots in the cache with them, we want
2011
		 * to reserve them to larger extents, however if we have plenty
2012 2013 2014
		 * of cache left then go ahead an dadd them, no sense in adding
		 * the overhead of a bitmap if we don't have to.
		 */
2015
		if (info->bytes <= fs_info->sectorsize * 4) {
2016 2017
			if (ctl->free_extents * 2 <= ctl->extents_thresh)
				return false;
2018
		} else {
2019
			return false;
2020 2021
		}
	}
2022 2023

	/*
2024 2025 2026 2027 2028 2029
	 * The original block groups from mkfs can be really small, like 8
	 * megabytes, so don't bother with a bitmap for those entries.  However
	 * some block groups can be smaller than what a bitmap would cover but
	 * are still large enough that they could overflow the 32k memory limit,
	 * so allow those block groups to still be allowed to have a bitmap
	 * entry.
2030
	 */
2031
	if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
2032 2033 2034 2035 2036
		return false;

	return true;
}

2037
static const struct btrfs_free_space_op free_space_op = {
J
Josef Bacik 已提交
2038 2039 2040 2041
	.recalc_thresholds	= recalculate_thresholds,
	.use_bitmap		= use_bitmap,
};

2042 2043 2044 2045
static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info)
{
	struct btrfs_free_space *bitmap_info;
J
Josef Bacik 已提交
2046
	struct btrfs_block_group_cache *block_group = NULL;
2047
	int added = 0;
J
Josef Bacik 已提交
2048
	u64 bytes, offset, bytes_added;
2049
	int ret;
2050 2051 2052 2053

	bytes = info->bytes;
	offset = info->offset;

2054 2055 2056
	if (!ctl->op->use_bitmap(ctl, info))
		return 0;

J
Josef Bacik 已提交
2057 2058
	if (ctl->op == &free_space_op)
		block_group = ctl->private;
2059
again:
J
Josef Bacik 已提交
2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
	/*
	 * Since we link bitmaps right into the cluster we need to see if we
	 * have a cluster here, and if so and it has our bitmap we need to add
	 * the free space to that bitmap.
	 */
	if (block_group && !list_empty(&block_group->cluster_list)) {
		struct btrfs_free_cluster *cluster;
		struct rb_node *node;
		struct btrfs_free_space *entry;

		cluster = list_entry(block_group->cluster_list.next,
				     struct btrfs_free_cluster,
				     block_group_list);
		spin_lock(&cluster->lock);
		node = rb_first(&cluster->root);
		if (!node) {
			spin_unlock(&cluster->lock);
2077
			goto no_cluster_bitmap;
J
Josef Bacik 已提交
2078 2079 2080 2081 2082
		}

		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		if (!entry->bitmap) {
			spin_unlock(&cluster->lock);
2083
			goto no_cluster_bitmap;
J
Josef Bacik 已提交
2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097
		}

		if (entry->offset == offset_to_bitmap(ctl, offset)) {
			bytes_added = add_bytes_to_bitmap(ctl, entry,
							  offset, bytes);
			bytes -= bytes_added;
			offset += bytes_added;
		}
		spin_unlock(&cluster->lock);
		if (!bytes) {
			ret = 1;
			goto out;
		}
	}
2098 2099

no_cluster_bitmap:
2100
	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2101 2102
					 1, 0);
	if (!bitmap_info) {
2103
		ASSERT(added == 0);
2104 2105 2106
		goto new_bitmap;
	}

J
Josef Bacik 已提交
2107 2108 2109 2110
	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
	bytes -= bytes_added;
	offset += bytes_added;
	added = 0;
2111 2112 2113 2114 2115 2116 2117 2118 2119

	if (!bytes) {
		ret = 1;
		goto out;
	} else
		goto again;

new_bitmap:
	if (info && info->bitmap) {
2120
		add_new_bitmap(ctl, info, offset);
2121 2122 2123 2124
		added = 1;
		info = NULL;
		goto again;
	} else {
2125
		spin_unlock(&ctl->tree_lock);
2126 2127 2128

		/* no pre-allocated info, allocate a new one */
		if (!info) {
2129 2130
			info = kmem_cache_zalloc(btrfs_free_space_cachep,
						 GFP_NOFS);
2131
			if (!info) {
2132
				spin_lock(&ctl->tree_lock);
2133 2134 2135 2136 2137 2138
				ret = -ENOMEM;
				goto out;
			}
		}

		/* allocate the bitmap */
2139 2140
		info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
						 GFP_NOFS);
2141
		spin_lock(&ctl->tree_lock);
2142 2143 2144 2145 2146 2147 2148 2149 2150
		if (!info->bitmap) {
			ret = -ENOMEM;
			goto out;
		}
		goto again;
	}

out:
	if (info) {
2151 2152 2153
		if (info->bitmap)
			kmem_cache_free(btrfs_free_space_bitmap_cachep,
					info->bitmap);
2154
		kmem_cache_free(btrfs_free_space_cachep, info);
2155
	}
J
Josef Bacik 已提交
2156 2157 2158 2159

	return ret;
}

2160
static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
2161
			  struct btrfs_free_space *info, bool update_stat)
J
Josef Bacik 已提交
2162
{
2163 2164 2165 2166 2167
	struct btrfs_free_space *left_info;
	struct btrfs_free_space *right_info;
	bool merged = false;
	u64 offset = info->offset;
	u64 bytes = info->bytes;
2168

J
Josef Bacik 已提交
2169 2170 2171 2172 2173
	/*
	 * first we want to see if there is free space adjacent to the range we
	 * are adding, if there is remove that struct and add a new one to
	 * cover the entire range
	 */
2174
	right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
2175 2176 2177 2178
	if (right_info && rb_prev(&right_info->offset_index))
		left_info = rb_entry(rb_prev(&right_info->offset_index),
				     struct btrfs_free_space, offset_index);
	else
2179
		left_info = tree_search_offset(ctl, offset - 1, 0, 0);
J
Josef Bacik 已提交
2180

2181
	if (right_info && !right_info->bitmap) {
2182
		if (update_stat)
2183
			unlink_free_space(ctl, right_info);
2184
		else
2185
			__unlink_free_space(ctl, right_info);
2186
		info->bytes += right_info->bytes;
2187
		kmem_cache_free(btrfs_free_space_cachep, right_info);
2188
		merged = true;
J
Josef Bacik 已提交
2189 2190
	}

2191 2192
	if (left_info && !left_info->bitmap &&
	    left_info->offset + left_info->bytes == offset) {
2193
		if (update_stat)
2194
			unlink_free_space(ctl, left_info);
2195
		else
2196
			__unlink_free_space(ctl, left_info);
2197 2198
		info->offset = left_info->offset;
		info->bytes += left_info->bytes;
2199
		kmem_cache_free(btrfs_free_space_cachep, left_info);
2200
		merged = true;
J
Josef Bacik 已提交
2201 2202
	}

2203 2204 2205
	return merged;
}

2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327
static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl,
				     struct btrfs_free_space *info,
				     bool update_stat)
{
	struct btrfs_free_space *bitmap;
	unsigned long i;
	unsigned long j;
	const u64 end = info->offset + info->bytes;
	const u64 bitmap_offset = offset_to_bitmap(ctl, end);
	u64 bytes;

	bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
	if (!bitmap)
		return false;

	i = offset_to_bit(bitmap->offset, ctl->unit, end);
	j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i);
	if (j == i)
		return false;
	bytes = (j - i) * ctl->unit;
	info->bytes += bytes;

	if (update_stat)
		bitmap_clear_bits(ctl, bitmap, end, bytes);
	else
		__bitmap_clear_bits(ctl, bitmap, end, bytes);

	if (!bitmap->bytes)
		free_bitmap(ctl, bitmap);

	return true;
}

static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl,
				       struct btrfs_free_space *info,
				       bool update_stat)
{
	struct btrfs_free_space *bitmap;
	u64 bitmap_offset;
	unsigned long i;
	unsigned long j;
	unsigned long prev_j;
	u64 bytes;

	bitmap_offset = offset_to_bitmap(ctl, info->offset);
	/* If we're on a boundary, try the previous logical bitmap. */
	if (bitmap_offset == info->offset) {
		if (info->offset == 0)
			return false;
		bitmap_offset = offset_to_bitmap(ctl, info->offset - 1);
	}

	bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
	if (!bitmap)
		return false;

	i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
	j = 0;
	prev_j = (unsigned long)-1;
	for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
		if (j > i)
			break;
		prev_j = j;
	}
	if (prev_j == i)
		return false;

	if (prev_j == (unsigned long)-1)
		bytes = (i + 1) * ctl->unit;
	else
		bytes = (i - prev_j) * ctl->unit;

	info->offset -= bytes;
	info->bytes += bytes;

	if (update_stat)
		bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
	else
		__bitmap_clear_bits(ctl, bitmap, info->offset, bytes);

	if (!bitmap->bytes)
		free_bitmap(ctl, bitmap);

	return true;
}

/*
 * We prefer always to allocate from extent entries, both for clustered and
 * non-clustered allocation requests. So when attempting to add a new extent
 * entry, try to see if there's adjacent free space in bitmap entries, and if
 * there is, migrate that space from the bitmaps to the extent.
 * Like this we get better chances of satisfying space allocation requests
 * because we attempt to satisfy them based on a single cache entry, and never
 * on 2 or more entries - even if the entries represent a contiguous free space
 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
 * ends).
 */
static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info,
			      bool update_stat)
{
	/*
	 * Only work with disconnected entries, as we can change their offset,
	 * and must be extent entries.
	 */
	ASSERT(!info->bitmap);
	ASSERT(RB_EMPTY_NODE(&info->offset_index));

	if (ctl->total_bitmaps > 0) {
		bool stole_end;
		bool stole_front = false;

		stole_end = steal_from_bitmap_to_end(ctl, info, update_stat);
		if (ctl->total_bitmaps > 0)
			stole_front = steal_from_bitmap_to_front(ctl, info,
								 update_stat);

		if (stole_end || stole_front)
			try_merge_free_space(ctl, info, update_stat);
	}
}

2328 2329
int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
			   struct btrfs_free_space_ctl *ctl,
2330
			   u64 offset, u64 bytes)
2331 2332 2333 2334
{
	struct btrfs_free_space *info;
	int ret = 0;

2335
	info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
2336 2337 2338 2339 2340
	if (!info)
		return -ENOMEM;

	info->offset = offset;
	info->bytes = bytes;
2341
	RB_CLEAR_NODE(&info->offset_index);
2342

2343
	spin_lock(&ctl->tree_lock);
2344

2345
	if (try_merge_free_space(ctl, info, true))
2346 2347 2348 2349 2350 2351 2352
		goto link;

	/*
	 * There was no extent directly to the left or right of this new
	 * extent then we know we're going to have to allocate a new extent, so
	 * before we do that see if we need to drop this into a bitmap
	 */
2353
	ret = insert_into_bitmap(ctl, info);
2354 2355 2356 2357 2358 2359 2360
	if (ret < 0) {
		goto out;
	} else if (ret) {
		ret = 0;
		goto out;
	}
link:
2361 2362 2363 2364 2365 2366 2367 2368
	/*
	 * Only steal free space from adjacent bitmaps if we're sure we're not
	 * going to add the new free space to existing bitmap entries - because
	 * that would mean unnecessary work that would be reverted. Therefore
	 * attempt to steal space from bitmaps if we're adding an extent entry.
	 */
	steal_from_bitmap(ctl, info, true);

2369
	ret = link_free_space(ctl, info);
J
Josef Bacik 已提交
2370
	if (ret)
2371
		kmem_cache_free(btrfs_free_space_cachep, info);
2372
out:
2373
	spin_unlock(&ctl->tree_lock);
2374

J
Josef Bacik 已提交
2375
	if (ret) {
2376
		btrfs_crit(fs_info, "unable to add free space :%d", ret);
2377
		ASSERT(ret != -EEXIST);
J
Josef Bacik 已提交
2378 2379 2380 2381 2382
	}

	return ret;
}

2383 2384 2385 2386 2387 2388 2389 2390
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
			 u64 bytenr, u64 size)
{
	return __btrfs_add_free_space(block_group->fs_info,
				      block_group->free_space_ctl,
				      bytenr, size);
}

2391 2392
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
			    u64 offset, u64 bytes)
J
Josef Bacik 已提交
2393
{
2394
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
J
Josef Bacik 已提交
2395
	struct btrfs_free_space *info;
2396 2397
	int ret;
	bool re_search = false;
J
Josef Bacik 已提交
2398

2399
	spin_lock(&ctl->tree_lock);
2400

2401
again:
2402
	ret = 0;
2403 2404 2405
	if (!bytes)
		goto out_lock;

2406
	info = tree_search_offset(ctl, offset, 0, 0);
2407
	if (!info) {
2408 2409 2410 2411
		/*
		 * oops didn't find an extent that matched the space we wanted
		 * to remove, look for a bitmap instead
		 */
2412
		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2413 2414
					  1, 0);
		if (!info) {
2415 2416 2417 2418
			/*
			 * If we found a partial bit of our free space in a
			 * bitmap but then couldn't find the other part this may
			 * be a problem, so WARN about it.
2419
			 */
2420
			WARN_ON(re_search);
2421 2422
			goto out_lock;
		}
2423 2424
	}

2425
	re_search = false;
2426
	if (!info->bitmap) {
2427
		unlink_free_space(ctl, info);
2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438
		if (offset == info->offset) {
			u64 to_free = min(bytes, info->bytes);

			info->bytes -= to_free;
			info->offset += to_free;
			if (info->bytes) {
				ret = link_free_space(ctl, info);
				WARN_ON(ret);
			} else {
				kmem_cache_free(btrfs_free_space_cachep, info);
			}
J
Josef Bacik 已提交
2439

2440 2441 2442 2443 2444
			offset += to_free;
			bytes -= to_free;
			goto again;
		} else {
			u64 old_end = info->bytes + info->offset;
2445

2446
			info->bytes = offset - info->offset;
2447
			ret = link_free_space(ctl, info);
2448 2449 2450 2451
			WARN_ON(ret);
			if (ret)
				goto out_lock;

2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467
			/* Not enough bytes in this entry to satisfy us */
			if (old_end < offset + bytes) {
				bytes -= old_end - offset;
				offset = old_end;
				goto again;
			} else if (old_end == offset + bytes) {
				/* all done */
				goto out_lock;
			}
			spin_unlock(&ctl->tree_lock);

			ret = btrfs_add_free_space(block_group, offset + bytes,
						   old_end - (offset + bytes));
			WARN_ON(ret);
			goto out;
		}
J
Josef Bacik 已提交
2468
	}
2469

2470
	ret = remove_from_bitmap(ctl, info, &offset, &bytes);
2471 2472
	if (ret == -EAGAIN) {
		re_search = true;
2473
		goto again;
2474
	}
2475
out_lock:
2476
	spin_unlock(&ctl->tree_lock);
J
Josef Bacik 已提交
2477
out:
2478 2479 2480
	return ret;
}

J
Josef Bacik 已提交
2481 2482 2483
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
			   u64 bytes)
{
2484
	struct btrfs_fs_info *fs_info = block_group->fs_info;
2485
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
J
Josef Bacik 已提交
2486 2487 2488 2489
	struct btrfs_free_space *info;
	struct rb_node *n;
	int count = 0;

2490
	spin_lock(&ctl->tree_lock);
2491
	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
J
Josef Bacik 已提交
2492
		info = rb_entry(n, struct btrfs_free_space, offset_index);
L
Liu Bo 已提交
2493
		if (info->bytes >= bytes && !block_group->ro)
J
Josef Bacik 已提交
2494
			count++;
2495
		btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
2496
			   info->offset, info->bytes,
2497
		       (info->bitmap) ? "yes" : "no");
J
Josef Bacik 已提交
2498
	}
2499
	spin_unlock(&ctl->tree_lock);
2500
	btrfs_info(fs_info, "block group has cluster?: %s",
2501
	       list_empty(&block_group->cluster_list) ? "no" : "yes");
2502
	btrfs_info(fs_info,
2503
		   "%d blocks of free space at or bigger than bytes is", count);
J
Josef Bacik 已提交
2504 2505
}

2506
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
J
Josef Bacik 已提交
2507
{
2508
	struct btrfs_fs_info *fs_info = block_group->fs_info;
2509
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
J
Josef Bacik 已提交
2510

2511
	spin_lock_init(&ctl->tree_lock);
2512
	ctl->unit = fs_info->sectorsize;
2513 2514 2515
	ctl->start = block_group->key.objectid;
	ctl->private = block_group;
	ctl->op = &free_space_op;
2516 2517
	INIT_LIST_HEAD(&ctl->trimming_ranges);
	mutex_init(&ctl->cache_writeout_mutex);
J
Josef Bacik 已提交
2518

2519 2520 2521 2522 2523
	/*
	 * we only want to have 32k of ram per block group for keeping
	 * track of free space, and if we pass 1/2 of that we want to
	 * start converting things over to using bitmaps
	 */
2524
	ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space);
J
Josef Bacik 已提交
2525 2526
}

2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537
/*
 * for a given cluster, put all of its extents back into the free
 * space cache.  If the block group passed doesn't match the block group
 * pointed to by the cluster, someone else raced in and freed the
 * cluster already.  In that case, we just return without changing anything
 */
static int
__btrfs_return_cluster_to_free_space(
			     struct btrfs_block_group_cache *block_group,
			     struct btrfs_free_cluster *cluster)
{
2538
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2539 2540 2541 2542 2543 2544 2545
	struct btrfs_free_space *entry;
	struct rb_node *node;

	spin_lock(&cluster->lock);
	if (cluster->block_group != block_group)
		goto out;

2546
	cluster->block_group = NULL;
2547
	cluster->window_start = 0;
2548 2549
	list_del_init(&cluster->block_group_list);

2550
	node = rb_first(&cluster->root);
2551
	while (node) {
2552 2553
		bool bitmap;

2554 2555 2556
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		node = rb_next(&entry->offset_index);
		rb_erase(&entry->offset_index, &cluster->root);
2557
		RB_CLEAR_NODE(&entry->offset_index);
2558 2559

		bitmap = (entry->bitmap != NULL);
2560
		if (!bitmap) {
2561
			try_merge_free_space(ctl, entry, false);
2562 2563
			steal_from_bitmap(ctl, entry, false);
		}
2564
		tree_insert_offset(&ctl->free_space_offset,
2565
				   entry->offset, &entry->offset_index, bitmap);
2566
	}
2567
	cluster->root = RB_ROOT;
2568

2569 2570
out:
	spin_unlock(&cluster->lock);
2571
	btrfs_put_block_group(block_group);
2572 2573 2574
	return 0;
}

2575 2576
static void __btrfs_remove_free_space_cache_locked(
				struct btrfs_free_space_ctl *ctl)
J
Josef Bacik 已提交
2577 2578 2579
{
	struct btrfs_free_space *info;
	struct rb_node *node;
2580 2581 2582

	while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
		info = rb_entry(node, struct btrfs_free_space, offset_index);
2583 2584 2585 2586 2587 2588
		if (!info->bitmap) {
			unlink_free_space(ctl, info);
			kmem_cache_free(btrfs_free_space_cachep, info);
		} else {
			free_bitmap(ctl, info);
		}
2589 2590

		cond_resched_lock(&ctl->tree_lock);
2591
	}
2592 2593 2594 2595 2596 2597
}

void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
{
	spin_lock(&ctl->tree_lock);
	__btrfs_remove_free_space_cache_locked(ctl);
2598 2599 2600 2601 2602 2603
	spin_unlock(&ctl->tree_lock);
}

void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2604
	struct btrfs_free_cluster *cluster;
2605
	struct list_head *head;
J
Josef Bacik 已提交
2606

2607
	spin_lock(&ctl->tree_lock);
2608 2609 2610 2611
	while ((head = block_group->cluster_list.next) !=
	       &block_group->cluster_list) {
		cluster = list_entry(head, struct btrfs_free_cluster,
				     block_group_list);
2612 2613 2614

		WARN_ON(cluster->block_group != block_group);
		__btrfs_return_cluster_to_free_space(block_group, cluster);
2615 2616

		cond_resched_lock(&ctl->tree_lock);
2617
	}
2618
	__btrfs_remove_free_space_cache_locked(ctl);
2619
	spin_unlock(&ctl->tree_lock);
2620

J
Josef Bacik 已提交
2621 2622
}

2623
u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2624 2625
			       u64 offset, u64 bytes, u64 empty_size,
			       u64 *max_extent_size)
J
Josef Bacik 已提交
2626
{
2627
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2628
	struct btrfs_free_space *entry = NULL;
2629
	u64 bytes_search = bytes + empty_size;
2630
	u64 ret = 0;
D
David Woodhouse 已提交
2631 2632
	u64 align_gap = 0;
	u64 align_gap_len = 0;
J
Josef Bacik 已提交
2633

2634
	spin_lock(&ctl->tree_lock);
D
David Woodhouse 已提交
2635
	entry = find_free_space(ctl, &offset, &bytes_search,
2636
				block_group->full_stripe_len, max_extent_size);
2637
	if (!entry)
2638 2639 2640 2641
		goto out;

	ret = offset;
	if (entry->bitmap) {
2642
		bitmap_clear_bits(ctl, entry, offset, bytes);
2643
		if (!entry->bytes)
2644
			free_bitmap(ctl, entry);
2645
	} else {
2646
		unlink_free_space(ctl, entry);
D
David Woodhouse 已提交
2647 2648 2649 2650 2651 2652 2653
		align_gap_len = offset - entry->offset;
		align_gap = entry->offset;

		entry->offset = offset + bytes;
		WARN_ON(entry->bytes < bytes + align_gap_len);

		entry->bytes -= bytes + align_gap_len;
2654
		if (!entry->bytes)
2655
			kmem_cache_free(btrfs_free_space_cachep, entry);
2656
		else
2657
			link_free_space(ctl, entry);
2658
	}
2659
out:
2660
	spin_unlock(&ctl->tree_lock);
J
Josef Bacik 已提交
2661

D
David Woodhouse 已提交
2662
	if (align_gap_len)
2663 2664
		__btrfs_add_free_space(block_group->fs_info, ctl,
				       align_gap, align_gap_len);
J
Josef Bacik 已提交
2665 2666
	return ret;
}
2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679

/*
 * given a cluster, put all of its extents back into the free space
 * cache.  If a block group is passed, this function will only free
 * a cluster that belongs to the passed block group.
 *
 * Otherwise, it'll get a reference on the block group pointed to by the
 * cluster and remove the cluster from it.
 */
int btrfs_return_cluster_to_free_space(
			       struct btrfs_block_group_cache *block_group,
			       struct btrfs_free_cluster *cluster)
{
2680
	struct btrfs_free_space_ctl *ctl;
2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698
	int ret;

	/* first, get a safe pointer to the block group */
	spin_lock(&cluster->lock);
	if (!block_group) {
		block_group = cluster->block_group;
		if (!block_group) {
			spin_unlock(&cluster->lock);
			return 0;
		}
	} else if (cluster->block_group != block_group) {
		/* someone else has already freed it don't redo their work */
		spin_unlock(&cluster->lock);
		return 0;
	}
	atomic_inc(&block_group->count);
	spin_unlock(&cluster->lock);

2699 2700
	ctl = block_group->free_space_ctl;

2701
	/* now return any extents the cluster had on it */
2702
	spin_lock(&ctl->tree_lock);
2703
	ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
2704
	spin_unlock(&ctl->tree_lock);
2705 2706 2707 2708 2709 2710

	/* finally drop our ref */
	btrfs_put_block_group(block_group);
	return ret;
}

2711 2712
static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
				   struct btrfs_free_cluster *cluster,
2713
				   struct btrfs_free_space *entry,
2714 2715
				   u64 bytes, u64 min_start,
				   u64 *max_extent_size)
2716
{
2717
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2718 2719 2720 2721 2722 2723 2724 2725
	int err;
	u64 search_start = cluster->window_start;
	u64 search_bytes = bytes;
	u64 ret = 0;

	search_start = min_start;
	search_bytes = bytes;

2726
	err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
2727
	if (err) {
J
Josef Bacik 已提交
2728 2729
		*max_extent_size = max(get_max_extent_size(entry),
				       *max_extent_size);
2730
		return 0;
2731
	}
2732 2733

	ret = search_start;
2734
	__bitmap_clear_bits(ctl, entry, ret, bytes);
2735 2736 2737 2738

	return ret;
}

2739 2740 2741 2742 2743 2744 2745
/*
 * given a cluster, try to allocate 'bytes' from it, returns 0
 * if it couldn't find anything suitably large, or a logical disk offset
 * if things worked out
 */
u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
			     struct btrfs_free_cluster *cluster, u64 bytes,
2746
			     u64 min_start, u64 *max_extent_size)
2747
{
2748
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764
	struct btrfs_free_space *entry = NULL;
	struct rb_node *node;
	u64 ret = 0;

	spin_lock(&cluster->lock);
	if (bytes > cluster->max_size)
		goto out;

	if (cluster->block_group != block_group)
		goto out;

	node = rb_first(&cluster->root);
	if (!node)
		goto out;

	entry = rb_entry(node, struct btrfs_free_space, offset_index);
2765
	while (1) {
J
Josef Bacik 已提交
2766 2767 2768
		if (entry->bytes < bytes)
			*max_extent_size = max(get_max_extent_size(entry),
					       *max_extent_size);
2769

2770 2771
		if (entry->bytes < bytes ||
		    (!entry->bitmap && entry->offset < min_start)) {
2772 2773 2774 2775 2776 2777 2778 2779
			node = rb_next(&entry->offset_index);
			if (!node)
				break;
			entry = rb_entry(node, struct btrfs_free_space,
					 offset_index);
			continue;
		}

2780 2781 2782
		if (entry->bitmap) {
			ret = btrfs_alloc_from_bitmap(block_group,
						      cluster, entry, bytes,
2783 2784
						      cluster->window_start,
						      max_extent_size);
2785 2786 2787 2788 2789 2790 2791 2792
			if (ret == 0) {
				node = rb_next(&entry->offset_index);
				if (!node)
					break;
				entry = rb_entry(node, struct btrfs_free_space,
						 offset_index);
				continue;
			}
2793
			cluster->window_start += bytes;
2794 2795 2796 2797 2798 2799
		} else {
			ret = entry->offset;

			entry->offset += bytes;
			entry->bytes -= bytes;
		}
2800

2801
		if (entry->bytes == 0)
2802 2803 2804 2805 2806
			rb_erase(&entry->offset_index, &cluster->root);
		break;
	}
out:
	spin_unlock(&cluster->lock);
2807

2808 2809 2810
	if (!ret)
		return 0;

2811
	spin_lock(&ctl->tree_lock);
2812

2813
	ctl->free_space -= bytes;
2814
	if (entry->bytes == 0) {
2815
		ctl->free_extents--;
2816
		if (entry->bitmap) {
2817 2818
			kmem_cache_free(btrfs_free_space_bitmap_cachep,
					entry->bitmap);
2819 2820
			ctl->total_bitmaps--;
			ctl->op->recalc_thresholds(ctl);
2821
		}
2822
		kmem_cache_free(btrfs_free_space_cachep, entry);
2823 2824
	}

2825
	spin_unlock(&ctl->tree_lock);
2826

2827 2828 2829
	return ret;
}

2830 2831 2832
static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
				struct btrfs_free_space *entry,
				struct btrfs_free_cluster *cluster,
2833 2834
				u64 offset, u64 bytes,
				u64 cont1_bytes, u64 min_bytes)
2835
{
2836
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2837 2838
	unsigned long next_zero;
	unsigned long i;
2839 2840
	unsigned long want_bits;
	unsigned long min_bits;
2841
	unsigned long found_bits;
2842
	unsigned long max_bits = 0;
2843 2844
	unsigned long start = 0;
	unsigned long total_found = 0;
2845
	int ret;
2846

2847
	i = offset_to_bit(entry->offset, ctl->unit,
2848
			  max_t(u64, offset, entry->offset));
2849 2850
	want_bits = bytes_to_bits(bytes, ctl->unit);
	min_bits = bytes_to_bits(min_bytes, ctl->unit);
2851

2852 2853 2854 2855 2856 2857 2858
	/*
	 * Don't bother looking for a cluster in this bitmap if it's heavily
	 * fragmented.
	 */
	if (entry->max_extent_size &&
	    entry->max_extent_size < cont1_bytes)
		return -ENOSPC;
2859 2860
again:
	found_bits = 0;
2861
	for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
2862 2863
		next_zero = find_next_zero_bit(entry->bitmap,
					       BITS_PER_BITMAP, i);
2864
		if (next_zero - i >= min_bits) {
2865
			found_bits = next_zero - i;
2866 2867
			if (found_bits > max_bits)
				max_bits = found_bits;
2868 2869
			break;
		}
2870 2871
		if (next_zero - i > max_bits)
			max_bits = next_zero - i;
2872 2873 2874
		i = next_zero;
	}

2875 2876
	if (!found_bits) {
		entry->max_extent_size = (u64)max_bits * ctl->unit;
2877
		return -ENOSPC;
2878
	}
2879

2880
	if (!total_found) {
2881
		start = i;
2882
		cluster->max_size = 0;
2883 2884 2885 2886
	}

	total_found += found_bits;

2887 2888
	if (cluster->max_size < found_bits * ctl->unit)
		cluster->max_size = found_bits * ctl->unit;
2889

2890 2891
	if (total_found < want_bits || cluster->max_size < cont1_bytes) {
		i = next_zero + 1;
2892 2893 2894
		goto again;
	}

2895
	cluster->window_start = start * ctl->unit + entry->offset;
2896
	rb_erase(&entry->offset_index, &ctl->free_space_offset);
2897 2898
	ret = tree_insert_offset(&cluster->root, entry->offset,
				 &entry->offset_index, 1);
2899
	ASSERT(!ret); /* -EEXIST; Logic error */
2900

J
Josef Bacik 已提交
2901
	trace_btrfs_setup_cluster(block_group, cluster,
2902
				  total_found * ctl->unit, 1);
2903 2904 2905
	return 0;
}

2906 2907
/*
 * This searches the block group for just extents to fill the cluster with.
2908 2909
 * Try to find a cluster with at least bytes total bytes, at least one
 * extent of cont1_bytes, and other clusters of at least min_bytes.
2910
 */
2911 2912 2913 2914
static noinline int
setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
			struct btrfs_free_cluster *cluster,
			struct list_head *bitmaps, u64 offset, u64 bytes,
2915
			u64 cont1_bytes, u64 min_bytes)
2916
{
2917
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2918 2919 2920 2921 2922 2923
	struct btrfs_free_space *first = NULL;
	struct btrfs_free_space *entry = NULL;
	struct btrfs_free_space *last;
	struct rb_node *node;
	u64 window_free;
	u64 max_extent;
J
Josef Bacik 已提交
2924
	u64 total_size = 0;
2925

2926
	entry = tree_search_offset(ctl, offset, 0, 1);
2927 2928 2929 2930 2931 2932 2933
	if (!entry)
		return -ENOSPC;

	/*
	 * We don't want bitmaps, so just move along until we find a normal
	 * extent entry.
	 */
2934 2935
	while (entry->bitmap || entry->bytes < min_bytes) {
		if (entry->bitmap && list_empty(&entry->list))
2936
			list_add_tail(&entry->list, bitmaps);
2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947
		node = rb_next(&entry->offset_index);
		if (!node)
			return -ENOSPC;
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
	}

	window_free = entry->bytes;
	max_extent = entry->bytes;
	first = entry;
	last = entry;

2948 2949
	for (node = rb_next(&entry->offset_index); node;
	     node = rb_next(&entry->offset_index)) {
2950 2951
		entry = rb_entry(node, struct btrfs_free_space, offset_index);

2952 2953 2954
		if (entry->bitmap) {
			if (list_empty(&entry->list))
				list_add_tail(&entry->list, bitmaps);
2955
			continue;
2956 2957
		}

2958 2959 2960 2961 2962 2963
		if (entry->bytes < min_bytes)
			continue;

		last = entry;
		window_free += entry->bytes;
		if (entry->bytes > max_extent)
2964 2965 2966
			max_extent = entry->bytes;
	}

2967 2968 2969
	if (window_free < bytes || max_extent < cont1_bytes)
		return -ENOSPC;

2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982
	cluster->window_start = first->offset;

	node = &first->offset_index;

	/*
	 * now we've found our entries, pull them out of the free space
	 * cache and put them into the cluster rbtree
	 */
	do {
		int ret;

		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		node = rb_next(&entry->offset_index);
2983
		if (entry->bitmap || entry->bytes < min_bytes)
2984 2985
			continue;

2986
		rb_erase(&entry->offset_index, &ctl->free_space_offset);
2987 2988
		ret = tree_insert_offset(&cluster->root, entry->offset,
					 &entry->offset_index, 0);
J
Josef Bacik 已提交
2989
		total_size += entry->bytes;
2990
		ASSERT(!ret); /* -EEXIST; Logic error */
2991 2992 2993
	} while (node && entry != last);

	cluster->max_size = max_extent;
J
Josef Bacik 已提交
2994
	trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
2995 2996 2997 2998 2999 3000 3001
	return 0;
}

/*
 * This specifically looks for bitmaps that may work in the cluster, we assume
 * that we have already failed to find extents that will work.
 */
3002 3003 3004 3005
static noinline int
setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
		     struct btrfs_free_cluster *cluster,
		     struct list_head *bitmaps, u64 offset, u64 bytes,
3006
		     u64 cont1_bytes, u64 min_bytes)
3007
{
3008
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3009
	struct btrfs_free_space *entry = NULL;
3010
	int ret = -ENOSPC;
3011
	u64 bitmap_offset = offset_to_bitmap(ctl, offset);
3012

3013
	if (ctl->total_bitmaps == 0)
3014 3015
		return -ENOSPC;

3016 3017 3018 3019
	/*
	 * The bitmap that covers offset won't be in the list unless offset
	 * is just its start offset.
	 */
3020 3021 3022 3023
	if (!list_empty(bitmaps))
		entry = list_first_entry(bitmaps, struct btrfs_free_space, list);

	if (!entry || entry->offset != bitmap_offset) {
3024 3025 3026 3027 3028
		entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
		if (entry && list_empty(&entry->list))
			list_add(&entry->list, bitmaps);
	}

3029
	list_for_each_entry(entry, bitmaps, list) {
3030
		if (entry->bytes < bytes)
3031 3032
			continue;
		ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
3033
					   bytes, cont1_bytes, min_bytes);
3034 3035 3036 3037 3038
		if (!ret)
			return 0;
	}

	/*
3039 3040
	 * The bitmaps list has all the bitmaps that record free space
	 * starting after offset, so no more search is required.
3041
	 */
3042
	return -ENOSPC;
3043 3044
}

3045 3046
/*
 * here we try to find a cluster of blocks in a block group.  The goal
3047
 * is to find at least bytes+empty_size.
3048 3049 3050 3051 3052
 * We might not find them all in one contiguous area.
 *
 * returns zero and sets up cluster if things worked out, otherwise
 * it returns -enospc
 */
3053
int btrfs_find_space_cluster(struct btrfs_block_group_cache *block_group,
3054 3055 3056
			     struct btrfs_free_cluster *cluster,
			     u64 offset, u64 bytes, u64 empty_size)
{
3057
	struct btrfs_fs_info *fs_info = block_group->fs_info;
3058
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3059
	struct btrfs_free_space *entry, *tmp;
3060
	LIST_HEAD(bitmaps);
3061
	u64 min_bytes;
3062
	u64 cont1_bytes;
3063 3064
	int ret;

3065 3066 3067 3068 3069 3070
	/*
	 * Choose the minimum extent size we'll require for this
	 * cluster.  For SSD_SPREAD, don't allow any fragmentation.
	 * For metadata, allow allocates with smaller extents.  For
	 * data, keep it dense.
	 */
3071
	if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
3072
		cont1_bytes = min_bytes = bytes + empty_size;
3073
	} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
3074
		cont1_bytes = bytes;
3075
		min_bytes = fs_info->sectorsize;
3076 3077
	} else {
		cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
3078
		min_bytes = fs_info->sectorsize;
3079
	}
3080

3081
	spin_lock(&ctl->tree_lock);
3082 3083 3084 3085 3086

	/*
	 * If we know we don't have enough space to make a cluster don't even
	 * bother doing all the work to try and find one.
	 */
3087
	if (ctl->free_space < bytes) {
3088
		spin_unlock(&ctl->tree_lock);
3089 3090 3091
		return -ENOSPC;
	}

3092 3093 3094 3095 3096 3097 3098 3099
	spin_lock(&cluster->lock);

	/* someone already found a cluster, hooray */
	if (cluster->block_group) {
		ret = 0;
		goto out;
	}

J
Josef Bacik 已提交
3100 3101 3102
	trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
				 min_bytes);

3103
	ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
3104 3105
				      bytes + empty_size,
				      cont1_bytes, min_bytes);
3106
	if (ret)
3107
		ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
3108 3109
					   offset, bytes + empty_size,
					   cont1_bytes, min_bytes);
3110 3111 3112 3113

	/* Clear our temporary list */
	list_for_each_entry_safe(entry, tmp, &bitmaps, list)
		list_del_init(&entry->list);
3114

3115 3116 3117 3118 3119
	if (!ret) {
		atomic_inc(&block_group->count);
		list_add_tail(&cluster->block_group_list,
			      &block_group->cluster_list);
		cluster->block_group = block_group;
J
Josef Bacik 已提交
3120 3121
	} else {
		trace_btrfs_failed_cluster_setup(block_group);
3122 3123 3124
	}
out:
	spin_unlock(&cluster->lock);
3125
	spin_unlock(&ctl->tree_lock);
3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136

	return ret;
}

/*
 * simple code to zero out a cluster
 */
void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
{
	spin_lock_init(&cluster->lock);
	spin_lock_init(&cluster->refill_lock);
3137
	cluster->root = RB_ROOT;
3138
	cluster->max_size = 0;
3139
	cluster->fragmented = false;
3140 3141 3142 3143
	INIT_LIST_HEAD(&cluster->block_group_list);
	cluster->block_group = NULL;
}

3144 3145
static int do_trimming(struct btrfs_block_group_cache *block_group,
		       u64 *total_trimmed, u64 start, u64 bytes,
3146 3147
		       u64 reserved_start, u64 reserved_bytes,
		       struct btrfs_trim_range *trim_entry)
3148
{
3149
	struct btrfs_space_info *space_info = block_group->space_info;
3150
	struct btrfs_fs_info *fs_info = block_group->fs_info;
3151
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3152 3153 3154
	int ret;
	int update = 0;
	u64 trimmed = 0;
3155

3156 3157 3158 3159 3160 3161 3162 3163 3164 3165
	spin_lock(&space_info->lock);
	spin_lock(&block_group->lock);
	if (!block_group->ro) {
		block_group->reserved += reserved_bytes;
		space_info->bytes_reserved += reserved_bytes;
		update = 1;
	}
	spin_unlock(&block_group->lock);
	spin_unlock(&space_info->lock);

3166
	ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
3167 3168 3169
	if (!ret)
		*total_trimmed += trimmed;

3170
	mutex_lock(&ctl->cache_writeout_mutex);
3171
	btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
3172 3173
	list_del(&trim_entry->list);
	mutex_unlock(&ctl->cache_writeout_mutex);
3174 3175 3176 3177 3178 3179 3180 3181 3182

	if (update) {
		spin_lock(&space_info->lock);
		spin_lock(&block_group->lock);
		if (block_group->ro)
			space_info->bytes_readonly += reserved_bytes;
		block_group->reserved -= reserved_bytes;
		space_info->bytes_reserved -= reserved_bytes;
		spin_unlock(&block_group->lock);
3183
		spin_unlock(&space_info->lock);
3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198
	}

	return ret;
}

static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
			  u64 *total_trimmed, u64 start, u64 end, u64 minlen)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct btrfs_free_space *entry;
	struct rb_node *node;
	int ret = 0;
	u64 extent_start;
	u64 extent_bytes;
	u64 bytes;
3199 3200

	while (start < end) {
3201 3202 3203
		struct btrfs_trim_range trim_entry;

		mutex_lock(&ctl->cache_writeout_mutex);
3204
		spin_lock(&ctl->tree_lock);
3205

3206 3207
		if (ctl->free_space < minlen) {
			spin_unlock(&ctl->tree_lock);
3208
			mutex_unlock(&ctl->cache_writeout_mutex);
3209 3210 3211
			break;
		}

3212
		entry = tree_search_offset(ctl, start, 0, 1);
3213
		if (!entry) {
3214
			spin_unlock(&ctl->tree_lock);
3215
			mutex_unlock(&ctl->cache_writeout_mutex);
3216 3217 3218
			break;
		}

3219 3220 3221 3222
		/* skip bitmaps */
		while (entry->bitmap) {
			node = rb_next(&entry->offset_index);
			if (!node) {
3223
				spin_unlock(&ctl->tree_lock);
3224
				mutex_unlock(&ctl->cache_writeout_mutex);
3225
				goto out;
3226
			}
3227 3228
			entry = rb_entry(node, struct btrfs_free_space,
					 offset_index);
3229 3230
		}

3231 3232
		if (entry->offset >= end) {
			spin_unlock(&ctl->tree_lock);
3233
			mutex_unlock(&ctl->cache_writeout_mutex);
3234
			break;
3235 3236
		}

3237 3238 3239 3240 3241 3242
		extent_start = entry->offset;
		extent_bytes = entry->bytes;
		start = max(start, extent_start);
		bytes = min(extent_start + extent_bytes, end) - start;
		if (bytes < minlen) {
			spin_unlock(&ctl->tree_lock);
3243
			mutex_unlock(&ctl->cache_writeout_mutex);
3244
			goto next;
3245 3246
		}

3247 3248 3249
		unlink_free_space(ctl, entry);
		kmem_cache_free(btrfs_free_space_cachep, entry);

3250
		spin_unlock(&ctl->tree_lock);
3251 3252 3253 3254
		trim_entry.start = extent_start;
		trim_entry.bytes = extent_bytes;
		list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
		mutex_unlock(&ctl->cache_writeout_mutex);
3255

3256
		ret = do_trimming(block_group, total_trimmed, start, bytes,
3257
				  extent_start, extent_bytes, &trim_entry);
3258 3259 3260 3261
		if (ret)
			break;
next:
		start += bytes;
3262

3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285
		if (fatal_signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}

		cond_resched();
	}
out:
	return ret;
}

static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
			u64 *total_trimmed, u64 start, u64 end, u64 minlen)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct btrfs_free_space *entry;
	int ret = 0;
	int ret2;
	u64 bytes;
	u64 offset = offset_to_bitmap(ctl, start);

	while (offset < end) {
		bool next_bitmap = false;
3286
		struct btrfs_trim_range trim_entry;
3287

3288
		mutex_lock(&ctl->cache_writeout_mutex);
3289 3290 3291 3292
		spin_lock(&ctl->tree_lock);

		if (ctl->free_space < minlen) {
			spin_unlock(&ctl->tree_lock);
3293
			mutex_unlock(&ctl->cache_writeout_mutex);
3294 3295 3296 3297 3298 3299
			break;
		}

		entry = tree_search_offset(ctl, offset, 1, 0);
		if (!entry) {
			spin_unlock(&ctl->tree_lock);
3300
			mutex_unlock(&ctl->cache_writeout_mutex);
3301 3302 3303 3304 3305
			next_bitmap = true;
			goto next;
		}

		bytes = minlen;
3306
		ret2 = search_bitmap(ctl, entry, &start, &bytes, false);
3307 3308
		if (ret2 || start >= end) {
			spin_unlock(&ctl->tree_lock);
3309
			mutex_unlock(&ctl->cache_writeout_mutex);
3310 3311 3312 3313 3314 3315 3316
			next_bitmap = true;
			goto next;
		}

		bytes = min(bytes, end - start);
		if (bytes < minlen) {
			spin_unlock(&ctl->tree_lock);
3317
			mutex_unlock(&ctl->cache_writeout_mutex);
3318 3319 3320 3321 3322 3323 3324 3325
			goto next;
		}

		bitmap_clear_bits(ctl, entry, start, bytes);
		if (entry->bytes == 0)
			free_bitmap(ctl, entry);

		spin_unlock(&ctl->tree_lock);
3326 3327 3328 3329
		trim_entry.start = start;
		trim_entry.bytes = bytes;
		list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
		mutex_unlock(&ctl->cache_writeout_mutex);
3330 3331

		ret = do_trimming(block_group, total_trimmed, start, bytes,
3332
				  start, bytes, &trim_entry);
3333 3334 3335 3336 3337 3338 3339 3340 3341
		if (ret)
			break;
next:
		if (next_bitmap) {
			offset += BITS_PER_BITMAP * ctl->unit;
		} else {
			start += bytes;
			if (start >= offset + BITS_PER_BITMAP * ctl->unit)
				offset += BITS_PER_BITMAP * ctl->unit;
3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353
		}

		if (fatal_signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}

		cond_resched();
	}

	return ret;
}
3354

3355
void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache)
3356
{
3357 3358
	atomic_inc(&cache->trimming);
}
3359

3360 3361
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
{
3362
	struct btrfs_fs_info *fs_info = block_group->fs_info;
3363 3364 3365
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	bool cleanup;
3366

3367
	spin_lock(&block_group->lock);
3368 3369
	cleanup = (atomic_dec_and_test(&block_group->trimming) &&
		   block_group->removed);
3370 3371
	spin_unlock(&block_group->lock);

3372
	if (cleanup) {
3373
		mutex_lock(&fs_info->chunk_mutex);
3374
		em_tree = &fs_info->mapping_tree;
3375 3376 3377 3378 3379 3380
		write_lock(&em_tree->lock);
		em = lookup_extent_mapping(em_tree, block_group->key.objectid,
					   1);
		BUG_ON(!em); /* logic error, can't happen */
		remove_extent_mapping(em_tree, em);
		write_unlock(&em_tree->lock);
3381
		mutex_unlock(&fs_info->chunk_mutex);
3382 3383 3384 3385

		/* once for us and once for the tree */
		free_extent_map(em);
		free_extent_map(em);
3386 3387 3388 3389 3390 3391

		/*
		 * We've left one free space entry and other tasks trimming
		 * this block group have left 1 entry each one. Free them.
		 */
		__btrfs_remove_free_space_cache(block_group->free_space_ctl);
3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403
	}
}

int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
			   u64 *trimmed, u64 start, u64 end, u64 minlen)
{
	int ret;

	*trimmed = 0;

	spin_lock(&block_group->lock);
	if (block_group->removed) {
3404
		spin_unlock(&block_group->lock);
3405
		return 0;
3406
	}
3407 3408 3409 3410 3411 3412
	btrfs_get_block_group_trimming(block_group);
	spin_unlock(&block_group->lock);

	ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
	if (ret)
		goto out;
3413

3414 3415 3416
	ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
out:
	btrfs_put_block_group_trimming(block_group);
3417 3418 3419
	return ret;
}

3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455
/*
 * Find the left-most item in the cache tree, and then return the
 * smallest inode number in the item.
 *
 * Note: the returned inode number may not be the smallest one in
 * the tree, if the left-most item is a bitmap.
 */
u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
{
	struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
	struct btrfs_free_space *entry = NULL;
	u64 ino = 0;

	spin_lock(&ctl->tree_lock);

	if (RB_EMPTY_ROOT(&ctl->free_space_offset))
		goto out;

	entry = rb_entry(rb_first(&ctl->free_space_offset),
			 struct btrfs_free_space, offset_index);

	if (!entry->bitmap) {
		ino = entry->offset;

		unlink_free_space(ctl, entry);
		entry->offset++;
		entry->bytes--;
		if (!entry->bytes)
			kmem_cache_free(btrfs_free_space_cachep, entry);
		else
			link_free_space(ctl, entry);
	} else {
		u64 offset = 0;
		u64 count = 1;
		int ret;

3456
		ret = search_bitmap(ctl, entry, &offset, &count, true);
3457
		/* Logic error; Should be empty if it can't find anything */
3458
		ASSERT(!ret);
3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469

		ino = offset;
		bitmap_clear_bits(ctl, entry, offset, 1);
		if (entry->bytes == 0)
			free_bitmap(ctl, entry);
	}
out:
	spin_unlock(&ctl->tree_lock);

	return ino;
}
3470 3471 3472 3473 3474 3475

struct inode *lookup_free_ino_inode(struct btrfs_root *root,
				    struct btrfs_path *path)
{
	struct inode *inode = NULL;

3476 3477 3478 3479
	spin_lock(&root->ino_cache_lock);
	if (root->ino_cache_inode)
		inode = igrab(root->ino_cache_inode);
	spin_unlock(&root->ino_cache_lock);
3480 3481 3482 3483 3484 3485 3486
	if (inode)
		return inode;

	inode = __lookup_free_space_inode(root, path, 0);
	if (IS_ERR(inode))
		return inode;

3487
	spin_lock(&root->ino_cache_lock);
3488
	if (!btrfs_fs_closing(root->fs_info))
3489 3490
		root->ino_cache_inode = igrab(inode);
	spin_unlock(&root->ino_cache_lock);
3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510

	return inode;
}

int create_free_ino_inode(struct btrfs_root *root,
			  struct btrfs_trans_handle *trans,
			  struct btrfs_path *path)
{
	return __create_free_space_inode(root, trans, path,
					 BTRFS_FREE_INO_OBJECTID, 0);
}

int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
{
	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
	struct btrfs_path *path;
	struct inode *inode;
	int ret = 0;
	u64 root_gen = btrfs_root_generation(&root->root_item);

3511
	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
C
Chris Mason 已提交
3512 3513
		return 0;

3514 3515 3516 3517
	/*
	 * If we're unmounting then just return, since this does a search on the
	 * normal root and not the commit root and we could deadlock.
	 */
3518
	if (btrfs_fs_closing(fs_info))
3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534
		return 0;

	path = btrfs_alloc_path();
	if (!path)
		return 0;

	inode = lookup_free_ino_inode(root, path);
	if (IS_ERR(inode))
		goto out;

	if (root_gen != BTRFS_I(inode)->generation)
		goto out_put;

	ret = __load_free_space_cache(root, inode, ctl, path, 0);

	if (ret < 0)
3535 3536 3537
		btrfs_err(fs_info,
			"failed to load free ino cache for root %llu",
			root->root_key.objectid);
3538 3539 3540 3541 3542 3543 3544 3545 3546
out_put:
	iput(inode);
out:
	btrfs_free_path(path);
	return ret;
}

int btrfs_write_out_ino_cache(struct btrfs_root *root,
			      struct btrfs_trans_handle *trans,
3547 3548
			      struct btrfs_path *path,
			      struct inode *inode)
3549
{
3550
	struct btrfs_fs_info *fs_info = root->fs_info;
3551 3552
	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
	int ret;
3553
	struct btrfs_io_ctl io_ctl;
3554
	bool release_metadata = true;
3555

3556
	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
C
Chris Mason 已提交
3557 3558
		return 0;

C
Chris Mason 已提交
3559
	memset(&io_ctl, 0, sizeof(io_ctl));
3560
	ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl, trans);
3561 3562 3563 3564 3565 3566 3567 3568
	if (!ret) {
		/*
		 * At this point writepages() didn't error out, so our metadata
		 * reservation is released when the writeback finishes, at
		 * inode.c:btrfs_finish_ordered_io(), regardless of it finishing
		 * with or without an error.
		 */
		release_metadata = false;
3569
		ret = btrfs_wait_cache_io_root(root, trans, &io_ctl, path);
3570
	}
C
Chris Mason 已提交
3571

3572
	if (ret) {
3573
		if (release_metadata)
3574
			btrfs_delalloc_release_metadata(BTRFS_I(inode),
3575
					inode->i_size, true);
3576
#ifdef DEBUG
3577 3578 3579
		btrfs_err(fs_info,
			  "failed to write free ino cache for root %llu",
			  root->root_key.objectid);
3580 3581
#endif
	}
3582 3583 3584

	return ret;
}
3585 3586

#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3587 3588 3589 3590 3591 3592 3593 3594
/*
 * Use this if you need to make a bitmap or extent entry specifically, it
 * doesn't do any of the merging that add_free_space does, this acts a lot like
 * how the free space cache loading stuff works, so you can get really weird
 * configurations.
 */
int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
			      u64 offset, u64 bytes, bool bitmap)
3595
{
3596 3597 3598 3599 3600
	struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
	struct btrfs_free_space *info = NULL, *bitmap_info;
	void *map = NULL;
	u64 bytes_added;
	int ret;
3601

3602 3603 3604 3605 3606
again:
	if (!info) {
		info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
		if (!info)
			return -ENOMEM;
3607 3608
	}

3609 3610 3611 3612
	if (!bitmap) {
		spin_lock(&ctl->tree_lock);
		info->offset = offset;
		info->bytes = bytes;
3613
		info->max_extent_size = 0;
3614 3615 3616 3617 3618 3619 3620 3621
		ret = link_free_space(ctl, info);
		spin_unlock(&ctl->tree_lock);
		if (ret)
			kmem_cache_free(btrfs_free_space_cachep, info);
		return ret;
	}

	if (!map) {
3622
		map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636
		if (!map) {
			kmem_cache_free(btrfs_free_space_cachep, info);
			return -ENOMEM;
		}
	}

	spin_lock(&ctl->tree_lock);
	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
					 1, 0);
	if (!bitmap_info) {
		info->bitmap = map;
		map = NULL;
		add_new_bitmap(ctl, info, offset);
		bitmap_info = info;
3637
		info = NULL;
3638
	}
3639

3640
	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
3641

3642 3643 3644
	bytes -= bytes_added;
	offset += bytes_added;
	spin_unlock(&ctl->tree_lock);
3645

3646 3647
	if (bytes)
		goto again;
3648

3649 3650
	if (info)
		kmem_cache_free(btrfs_free_space_cachep, info);
3651 3652
	if (map)
		kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
3653
	return 0;
3654 3655 3656 3657 3658 3659 3660
}

/*
 * Checks to see if the given range is in the free space cache.  This is really
 * just used to check the absence of space, so if there is free space in the
 * range at all we will return 1.
 */
3661 3662
int test_check_exists(struct btrfs_block_group_cache *cache,
		      u64 offset, u64 bytes)
3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684
{
	struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
	struct btrfs_free_space *info;
	int ret = 0;

	spin_lock(&ctl->tree_lock);
	info = tree_search_offset(ctl, offset, 0, 0);
	if (!info) {
		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
					  1, 0);
		if (!info)
			goto out;
	}

have_info:
	if (info->bitmap) {
		u64 bit_off, bit_bytes;
		struct rb_node *n;
		struct btrfs_free_space *tmp;

		bit_off = offset;
		bit_bytes = ctl->unit;
3685
		ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false);
3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703
		if (!ret) {
			if (bit_off == offset) {
				ret = 1;
				goto out;
			} else if (bit_off > offset &&
				   offset + bytes > bit_off) {
				ret = 1;
				goto out;
			}
		}

		n = rb_prev(&info->offset_index);
		while (n) {
			tmp = rb_entry(n, struct btrfs_free_space,
				       offset_index);
			if (tmp->offset + tmp->bytes < offset)
				break;
			if (offset + bytes < tmp->offset) {
3704
				n = rb_prev(&tmp->offset_index);
3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717
				continue;
			}
			info = tmp;
			goto have_info;
		}

		n = rb_next(&info->offset_index);
		while (n) {
			tmp = rb_entry(n, struct btrfs_free_space,
				       offset_index);
			if (offset + bytes < tmp->offset)
				break;
			if (tmp->offset + tmp->bytes < offset) {
3718
				n = rb_next(&tmp->offset_index);
3719 3720 3721 3722 3723 3724
				continue;
			}
			info = tmp;
			goto have_info;
		}

3725
		ret = 0;
3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739
		goto out;
	}

	if (info->offset == offset) {
		ret = 1;
		goto out;
	}

	if (offset > info->offset && offset < info->offset + info->bytes)
		ret = 1;
out:
	spin_unlock(&ctl->tree_lock);
	return ret;
}
3740
#endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */