free-space-cache.c 107.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
J
Josef Bacik 已提交
2 3 4 5
/*
 * Copyright (C) 2008 Red Hat.  All rights reserved.
 */

6
#include <linux/pagemap.h>
J
Josef Bacik 已提交
7
#include <linux/sched.h>
8
#include <linux/sched/signal.h>
9
#include <linux/slab.h>
10
#include <linux/math64.h>
11
#include <linux/ratelimit.h>
12
#include <linux/error-injection.h>
13
#include <linux/sched/mm.h>
J
Josef Bacik 已提交
14
#include "ctree.h"
15 16
#include "free-space-cache.h"
#include "transaction.h"
17
#include "disk-io.h"
18
#include "extent_io.h"
19
#include "inode-map.h"
20
#include "volumes.h"
21
#include "space-info.h"
22
#include "delalloc-space.h"
23
#include "block-group.h"
24
#include "discard.h"
25

26
#define BITS_PER_BITMAP		(PAGE_SIZE * 8UL)
27 28
#define MAX_CACHE_BYTES_PER_GIG	SZ_64K
#define FORCE_EXTENT_THRESHOLD	SZ_1M
J
Josef Bacik 已提交
29

30 31 32 33 34 35
struct btrfs_trim_range {
	u64 start;
	u64 bytes;
	struct list_head list;
};

36 37
static int count_bitmap_extents(struct btrfs_free_space_ctl *ctl,
				struct btrfs_free_space *bitmap_info);
38
static int link_free_space(struct btrfs_free_space_ctl *ctl,
J
Josef Bacik 已提交
39
			   struct btrfs_free_space *info);
40 41
static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info);
42 43 44 45
static int btrfs_wait_cache_io_root(struct btrfs_root *root,
			     struct btrfs_trans_handle *trans,
			     struct btrfs_io_ctl *io_ctl,
			     struct btrfs_path *path);
J
Josef Bacik 已提交
46

47 48 49
static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
					       struct btrfs_path *path,
					       u64 offset)
50
{
51
	struct btrfs_fs_info *fs_info = root->fs_info;
52 53 54 55 56 57
	struct btrfs_key key;
	struct btrfs_key location;
	struct btrfs_disk_key disk_key;
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
	struct inode *inode = NULL;
58
	unsigned nofs_flag;
59 60 61
	int ret;

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
62
	key.offset = offset;
63 64 65 66 67 68
	key.type = 0;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		return ERR_PTR(ret);
	if (ret > 0) {
69
		btrfs_release_path(path);
70 71 72 73 74 75 76 77
		return ERR_PTR(-ENOENT);
	}

	leaf = path->nodes[0];
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	btrfs_free_space_key(leaf, header, &disk_key);
	btrfs_disk_key_to_cpu(&location, &disk_key);
78
	btrfs_release_path(path);
79

80 81 82 83 84
	/*
	 * We are often under a trans handle at this point, so we need to make
	 * sure NOFS is set to keep us from deadlocking.
	 */
	nofs_flag = memalloc_nofs_save();
85
	inode = btrfs_iget_path(fs_info->sb, &location, root, path);
86
	btrfs_release_path(path);
87
	memalloc_nofs_restore(nofs_flag);
88 89 90
	if (IS_ERR(inode))
		return inode;

A
Al Viro 已提交
91
	mapping_set_gfp_mask(inode->i_mapping,
92 93
			mapping_gfp_constraint(inode->i_mapping,
			~(__GFP_FS | __GFP_HIGHMEM)));
94

95 96 97
	return inode;
}

98
struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
99
		struct btrfs_path *path)
100
{
101
	struct btrfs_fs_info *fs_info = block_group->fs_info;
102
	struct inode *inode = NULL;
103
	u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
104 105 106 107 108 109 110 111

	spin_lock(&block_group->lock);
	if (block_group->inode)
		inode = igrab(block_group->inode);
	spin_unlock(&block_group->lock);
	if (inode)
		return inode;

112
	inode = __lookup_free_space_inode(fs_info->tree_root, path,
113
					  block_group->start);
114 115 116
	if (IS_ERR(inode))
		return inode;

117
	spin_lock(&block_group->lock);
118
	if (!((BTRFS_I(inode)->flags & flags) == flags)) {
119
		btrfs_info(fs_info, "Old style space inode found, converting.");
120 121
		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
			BTRFS_INODE_NODATACOW;
122 123 124
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
	}

125
	if (!block_group->iref) {
126 127 128 129 130 131 132 133
		block_group->inode = igrab(inode);
		block_group->iref = 1;
	}
	spin_unlock(&block_group->lock);

	return inode;
}

134 135 136 137
static int __create_free_space_inode(struct btrfs_root *root,
				     struct btrfs_trans_handle *trans,
				     struct btrfs_path *path,
				     u64 ino, u64 offset)
138 139 140 141 142 143
{
	struct btrfs_key key;
	struct btrfs_disk_key disk_key;
	struct btrfs_free_space_header *header;
	struct btrfs_inode_item *inode_item;
	struct extent_buffer *leaf;
144
	u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
145 146
	int ret;

147
	ret = btrfs_insert_empty_inode(trans, root, path, ino);
148 149 150
	if (ret)
		return ret;

151 152 153 154
	/* We inline crc's for the free disk space cache */
	if (ino != BTRFS_FREE_INO_OBJECTID)
		flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;

155 156 157 158
	leaf = path->nodes[0];
	inode_item = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_inode_item);
	btrfs_item_key(leaf, &disk_key, path->slots[0]);
159
	memzero_extent_buffer(leaf, (unsigned long)inode_item,
160 161 162 163 164 165 166
			     sizeof(*inode_item));
	btrfs_set_inode_generation(leaf, inode_item, trans->transid);
	btrfs_set_inode_size(leaf, inode_item, 0);
	btrfs_set_inode_nbytes(leaf, inode_item, 0);
	btrfs_set_inode_uid(leaf, inode_item, 0);
	btrfs_set_inode_gid(leaf, inode_item, 0);
	btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
167
	btrfs_set_inode_flags(leaf, inode_item, flags);
168 169
	btrfs_set_inode_nlink(leaf, inode_item, 1);
	btrfs_set_inode_transid(leaf, inode_item, trans->transid);
170
	btrfs_set_inode_block_group(leaf, inode_item, offset);
171
	btrfs_mark_buffer_dirty(leaf);
172
	btrfs_release_path(path);
173 174

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
175
	key.offset = offset;
176 177 178 179
	key.type = 0;
	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(struct btrfs_free_space_header));
	if (ret < 0) {
180
		btrfs_release_path(path);
181 182
		return ret;
	}
183

184 185 186
	leaf = path->nodes[0];
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
187
	memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
188 189
	btrfs_set_free_space_key(leaf, header, &disk_key);
	btrfs_mark_buffer_dirty(leaf);
190
	btrfs_release_path(path);
191 192 193 194

	return 0;
}

195
int create_free_space_inode(struct btrfs_trans_handle *trans,
196
			    struct btrfs_block_group *block_group,
197 198 199 200 201
			    struct btrfs_path *path)
{
	int ret;
	u64 ino;

202
	ret = btrfs_find_free_objectid(trans->fs_info->tree_root, &ino);
203 204 205
	if (ret < 0)
		return ret;

206
	return __create_free_space_inode(trans->fs_info->tree_root, trans, path,
207
					 ino, block_group->start);
208 209
}

210
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
211
				       struct btrfs_block_rsv *rsv)
212
{
213
	u64 needed_bytes;
214
	int ret;
215 216

	/* 1 for slack space, 1 for updating the inode */
217 218
	needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) +
		btrfs_calc_metadata_size(fs_info, 1);
219

220 221 222 223 224 225
	spin_lock(&rsv->lock);
	if (rsv->reserved < needed_bytes)
		ret = -ENOSPC;
	else
		ret = 0;
	spin_unlock(&rsv->lock);
226
	return ret;
227 228
}

229
int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
230
				    struct btrfs_block_group *block_group,
231 232
				    struct inode *inode)
{
233
	struct btrfs_root *root = BTRFS_I(inode)->root;
234
	int ret = 0;
235
	bool locked = false;
236 237

	if (block_group) {
238 239 240 241 242 243
		struct btrfs_path *path = btrfs_alloc_path();

		if (!path) {
			ret = -ENOMEM;
			goto fail;
		}
244
		locked = true;
245 246 247 248
		mutex_lock(&trans->transaction->cache_write_mutex);
		if (!list_empty(&block_group->io_list)) {
			list_del_init(&block_group->io_list);

249
			btrfs_wait_cache_io(trans, block_group, path);
250 251 252 253 254 255 256 257 258 259
			btrfs_put_block_group(block_group);
		}

		/*
		 * now that we've truncated the cache away, its no longer
		 * setup or written
		 */
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
		spin_unlock(&block_group->lock);
260
		btrfs_free_path(path);
261
	}
262

263
	btrfs_i_size_write(BTRFS_I(inode), 0);
264
	truncate_pagecache(inode, 0);
265 266

	/*
267 268
	 * We skip the throttling logic for free space cache inodes, so we don't
	 * need to check for -EAGAIN.
269 270 271
	 */
	ret = btrfs_truncate_inode_items(trans, root, inode,
					 0, BTRFS_EXTENT_DATA_KEY);
272 273
	if (ret)
		goto fail;
274

275
	ret = btrfs_update_inode(trans, root, inode);
276 277

fail:
278 279
	if (locked)
		mutex_unlock(&trans->transaction->cache_write_mutex);
280
	if (ret)
281
		btrfs_abort_transaction(trans, ret);
282

283
	return ret;
284 285
}

286
static void readahead_cache(struct inode *inode)
287 288 289 290 291 292
{
	struct file_ra_state *ra;
	unsigned long last_index;

	ra = kzalloc(sizeof(*ra), GFP_NOFS);
	if (!ra)
293
		return;
294 295

	file_ra_state_init(ra, inode->i_mapping);
296
	last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
297 298 299 300 301 302

	page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);

	kfree(ra);
}

303
static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
304
		       int write)
305
{
306 307 308
	int num_pages;
	int check_crcs = 0;

309
	num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
310

311
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FREE_INO_OBJECTID)
312 313
		check_crcs = 1;

314
	/* Make sure we can fit our crcs and generation into the first page */
315
	if (write && check_crcs &&
316
	    (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE)
317 318
		return -ENOSPC;

319
	memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
320

321
	io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
322 323
	if (!io_ctl->pages)
		return -ENOMEM;
324 325

	io_ctl->num_pages = num_pages;
326
	io_ctl->fs_info = btrfs_sb(inode->i_sb);
327
	io_ctl->check_crcs = check_crcs;
328
	io_ctl->inode = inode;
329

330 331
	return 0;
}
332
ALLOW_ERROR_INJECTION(io_ctl_init, ERRNO);
333

334
static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
335 336
{
	kfree(io_ctl->pages);
337
	io_ctl->pages = NULL;
338 339
}

340
static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl)
341 342 343 344 345 346 347
{
	if (io_ctl->cur) {
		io_ctl->cur = NULL;
		io_ctl->orig = NULL;
	}
}

348
static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
349
{
350
	ASSERT(io_ctl->index < io_ctl->num_pages);
351
	io_ctl->page = io_ctl->pages[io_ctl->index++];
352
	io_ctl->cur = page_address(io_ctl->page);
353
	io_ctl->orig = io_ctl->cur;
354
	io_ctl->size = PAGE_SIZE;
355
	if (clear)
356
		clear_page(io_ctl->cur);
357 358
}

359
static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
360 361 362 363 364 365
{
	int i;

	io_ctl_unmap_page(io_ctl);

	for (i = 0; i < io_ctl->num_pages; i++) {
366 367 368
		if (io_ctl->pages[i]) {
			ClearPageChecked(io_ctl->pages[i]);
			unlock_page(io_ctl->pages[i]);
369
			put_page(io_ctl->pages[i]);
370
		}
371 372 373
	}
}

374
static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode,
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
				int uptodate)
{
	struct page *page;
	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
	int i;

	for (i = 0; i < io_ctl->num_pages; i++) {
		page = find_or_create_page(inode->i_mapping, i, mask);
		if (!page) {
			io_ctl_drop_pages(io_ctl);
			return -ENOMEM;
		}
		io_ctl->pages[i] = page;
		if (uptodate && !PageUptodate(page)) {
			btrfs_readpage(NULL, page);
			lock_page(page);
391 392 393 394 395 396
			if (page->mapping != inode->i_mapping) {
				btrfs_err(BTRFS_I(inode)->root->fs_info,
					  "free space cache page truncated");
				io_ctl_drop_pages(io_ctl);
				return -EIO;
			}
397
			if (!PageUptodate(page)) {
398 399
				btrfs_err(BTRFS_I(inode)->root->fs_info,
					   "error reading free space cache");
400 401 402 403 404 405
				io_ctl_drop_pages(io_ctl);
				return -EIO;
			}
		}
	}

406 407 408 409 410
	for (i = 0; i < io_ctl->num_pages; i++) {
		clear_page_dirty_for_io(io_ctl->pages[i]);
		set_page_extent_mapped(io_ctl->pages[i]);
	}

411 412 413
	return 0;
}

414
static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
415
{
A
Al Viro 已提交
416
	__le64 *val;
417 418 419 420

	io_ctl_map_page(io_ctl, 1);

	/*
421 422
	 * Skip the csum areas.  If we don't check crcs then we just have a
	 * 64bit chunk at the front of the first page.
423
	 */
424 425 426 427 428 429 430
	if (io_ctl->check_crcs) {
		io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
		io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
	} else {
		io_ctl->cur += sizeof(u64);
		io_ctl->size -= sizeof(u64) * 2;
	}
431 432 433 434 435 436

	val = io_ctl->cur;
	*val = cpu_to_le64(generation);
	io_ctl->cur += sizeof(u64);
}

437
static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
438
{
A
Al Viro 已提交
439
	__le64 *gen;
440

441 442 443 444 445 446 447 448 449 450 451 452
	/*
	 * Skip the crc area.  If we don't check crcs then we just have a 64bit
	 * chunk at the front of the first page.
	 */
	if (io_ctl->check_crcs) {
		io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
		io_ctl->size -= sizeof(u64) +
			(sizeof(u32) * io_ctl->num_pages);
	} else {
		io_ctl->cur += sizeof(u64);
		io_ctl->size -= sizeof(u64) * 2;
	}
453 454 455

	gen = io_ctl->cur;
	if (le64_to_cpu(*gen) != generation) {
456
		btrfs_err_rl(io_ctl->fs_info,
457 458
			"space cache generation (%llu) does not match inode (%llu)",
				*gen, generation);
459 460 461 462
		io_ctl_unmap_page(io_ctl);
		return -EIO;
	}
	io_ctl->cur += sizeof(u64);
463 464 465
	return 0;
}

466
static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
467 468 469 470 471 472 473 474 475 476 477
{
	u32 *tmp;
	u32 crc = ~(u32)0;
	unsigned offset = 0;

	if (!io_ctl->check_crcs) {
		io_ctl_unmap_page(io_ctl);
		return;
	}

	if (index == 0)
478
		offset = sizeof(u32) * io_ctl->num_pages;
479

480 481
	crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
	btrfs_crc32c_final(crc, (u8 *)&crc);
482
	io_ctl_unmap_page(io_ctl);
483
	tmp = page_address(io_ctl->pages[0]);
484 485 486 487
	tmp += index;
	*tmp = crc;
}

488
static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
489 490 491 492 493 494 495 496 497 498 499 500 501
{
	u32 *tmp, val;
	u32 crc = ~(u32)0;
	unsigned offset = 0;

	if (!io_ctl->check_crcs) {
		io_ctl_map_page(io_ctl, 0);
		return 0;
	}

	if (index == 0)
		offset = sizeof(u32) * io_ctl->num_pages;

502
	tmp = page_address(io_ctl->pages[0]);
503 504 505 506
	tmp += index;
	val = *tmp;

	io_ctl_map_page(io_ctl, 0);
507 508
	crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
	btrfs_crc32c_final(crc, (u8 *)&crc);
509
	if (val != crc) {
510
		btrfs_err_rl(io_ctl->fs_info,
511
			"csum mismatch on free space cache");
512 513 514 515
		io_ctl_unmap_page(io_ctl);
		return -EIO;
	}

516 517 518
	return 0;
}

519
static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes,
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
			    void *bitmap)
{
	struct btrfs_free_space_entry *entry;

	if (!io_ctl->cur)
		return -ENOSPC;

	entry = io_ctl->cur;
	entry->offset = cpu_to_le64(offset);
	entry->bytes = cpu_to_le64(bytes);
	entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
		BTRFS_FREE_SPACE_EXTENT;
	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
	io_ctl->size -= sizeof(struct btrfs_free_space_entry);

	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
		return 0;

538
	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
539 540 541 542 543 544 545 546 547 548

	/* No more pages to map */
	if (io_ctl->index >= io_ctl->num_pages)
		return 0;

	/* map the next page */
	io_ctl_map_page(io_ctl, 1);
	return 0;
}

549
static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
550 551 552 553 554 555 556 557 558
{
	if (!io_ctl->cur)
		return -ENOSPC;

	/*
	 * If we aren't at the start of the current page, unmap this one and
	 * map the next one if there is any left.
	 */
	if (io_ctl->cur != io_ctl->orig) {
559
		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
560 561 562 563 564
		if (io_ctl->index >= io_ctl->num_pages)
			return -ENOSPC;
		io_ctl_map_page(io_ctl, 0);
	}

565
	copy_page(io_ctl->cur, bitmap);
566
	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
567 568 569 570 571
	if (io_ctl->index < io_ctl->num_pages)
		io_ctl_map_page(io_ctl, 0);
	return 0;
}

572
static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl)
573
{
574 575 576 577 578 579 580 581
	/*
	 * If we're not on the boundary we know we've modified the page and we
	 * need to crc the page.
	 */
	if (io_ctl->cur != io_ctl->orig)
		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
	else
		io_ctl_unmap_page(io_ctl);
582 583 584

	while (io_ctl->index < io_ctl->num_pages) {
		io_ctl_map_page(io_ctl, 1);
585
		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
586 587 588
	}
}

589
static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl,
590
			    struct btrfs_free_space *entry, u8 *type)
591 592
{
	struct btrfs_free_space_entry *e;
593 594 595 596 597 598 599
	int ret;

	if (!io_ctl->cur) {
		ret = io_ctl_check_crc(io_ctl, io_ctl->index);
		if (ret)
			return ret;
	}
600 601 602 603

	e = io_ctl->cur;
	entry->offset = le64_to_cpu(e->offset);
	entry->bytes = le64_to_cpu(e->bytes);
604
	*type = e->type;
605 606 607 608
	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
	io_ctl->size -= sizeof(struct btrfs_free_space_entry);

	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
609
		return 0;
610 611 612

	io_ctl_unmap_page(io_ctl);

613
	return 0;
614 615
}

616
static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
617
			      struct btrfs_free_space *entry)
618
{
619 620 621 622 623 624
	int ret;

	ret = io_ctl_check_crc(io_ctl, io_ctl->index);
	if (ret)
		return ret;

625
	copy_page(entry->bitmap, io_ctl->cur);
626
	io_ctl_unmap_page(io_ctl);
627 628

	return 0;
629 630
}

631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
/*
 * Since we attach pinned extents after the fact we can have contiguous sections
 * of free space that are split up in entries.  This poses a problem with the
 * tree logging stuff since it could have allocated across what appears to be 2
 * entries since we would have merged the entries when adding the pinned extents
 * back to the free space cache.  So run through the space cache that we just
 * loaded and merge contiguous entries.  This will make the log replay stuff not
 * blow up and it will make for nicer allocator behavior.
 */
static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
{
	struct btrfs_free_space *e, *prev = NULL;
	struct rb_node *n;

again:
	spin_lock(&ctl->tree_lock);
	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
		e = rb_entry(n, struct btrfs_free_space, offset_index);
		if (!prev)
			goto next;
		if (e->bitmap || prev->bitmap)
			goto next;
		if (prev->offset + prev->bytes == e->offset) {
			unlink_free_space(ctl, prev);
			unlink_free_space(ctl, e);
			prev->bytes += e->bytes;
			kmem_cache_free(btrfs_free_space_cachep, e);
			link_free_space(ctl, prev);
			prev = NULL;
			spin_unlock(&ctl->tree_lock);
			goto again;
		}
next:
		prev = e;
	}
	spin_unlock(&ctl->tree_lock);
}

669 670 671
static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
				   struct btrfs_free_space_ctl *ctl,
				   struct btrfs_path *path, u64 offset)
672
{
673
	struct btrfs_fs_info *fs_info = root->fs_info;
674 675
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
676
	struct btrfs_io_ctl io_ctl;
677
	struct btrfs_key key;
678
	struct btrfs_free_space *e, *n;
679
	LIST_HEAD(bitmaps);
680 681 682
	u64 num_entries;
	u64 num_bitmaps;
	u64 generation;
683
	u8 type;
684
	int ret = 0;
685 686

	/* Nothing in the space cache, goodbye */
687
	if (!i_size_read(inode))
688
		return 0;
689 690

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
691
	key.offset = offset;
692 693 694
	key.type = 0;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
695
	if (ret < 0)
696
		return 0;
697
	else if (ret > 0) {
698
		btrfs_release_path(path);
699
		return 0;
700 701
	}

702 703
	ret = -1;

704 705 706 707 708 709
	leaf = path->nodes[0];
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	num_entries = btrfs_free_space_entries(leaf, header);
	num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
	generation = btrfs_free_space_generation(leaf, header);
710
	btrfs_release_path(path);
711

712
	if (!BTRFS_I(inode)->generation) {
713
		btrfs_info(fs_info,
714
			   "the free space cache file (%llu) is invalid, skip it",
715 716 717 718
			   offset);
		return 0;
	}

719
	if (BTRFS_I(inode)->generation != generation) {
720 721 722
		btrfs_err(fs_info,
			  "free space inode generation (%llu) did not match free space cache generation (%llu)",
			  BTRFS_I(inode)->generation, generation);
723
		return 0;
724 725 726
	}

	if (!num_entries)
727
		return 0;
728

729
	ret = io_ctl_init(&io_ctl, inode, 0);
730 731 732
	if (ret)
		return ret;

733
	readahead_cache(inode);
734

735 736 737
	ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
	if (ret)
		goto out;
738

739 740 741 742
	ret = io_ctl_check_crc(&io_ctl, 0);
	if (ret)
		goto free_cache;

743 744 745
	ret = io_ctl_check_generation(&io_ctl, generation);
	if (ret)
		goto free_cache;
746

747 748 749 750
	while (num_entries) {
		e = kmem_cache_zalloc(btrfs_free_space_cachep,
				      GFP_NOFS);
		if (!e)
751 752
			goto free_cache;

753 754 755 756 757 758
		ret = io_ctl_read_entry(&io_ctl, e, &type);
		if (ret) {
			kmem_cache_free(btrfs_free_space_cachep, e);
			goto free_cache;
		}

759 760 761
		/*
		 * Sync discard ensures that the free space cache is always
		 * trimmed.  So when reading this in, the state should reflect
762 763
		 * that.  We also do this for async as a stop gap for lack of
		 * persistence.
764
		 */
765 766
		if (btrfs_test_opt(fs_info, DISCARD_SYNC) ||
		    btrfs_test_opt(fs_info, DISCARD_ASYNC))
767 768
			e->trim_state = BTRFS_TRIM_STATE_TRIMMED;

769 770 771
		if (!e->bytes) {
			kmem_cache_free(btrfs_free_space_cachep, e);
			goto free_cache;
772
		}
773 774 775 776 777 778

		if (type == BTRFS_FREE_SPACE_EXTENT) {
			spin_lock(&ctl->tree_lock);
			ret = link_free_space(ctl, e);
			spin_unlock(&ctl->tree_lock);
			if (ret) {
779
				btrfs_err(fs_info,
780
					"Duplicate entries in free space cache, dumping");
781
				kmem_cache_free(btrfs_free_space_cachep, e);
782 783
				goto free_cache;
			}
784
		} else {
785
			ASSERT(num_bitmaps);
786
			num_bitmaps--;
787 788
			e->bitmap = kmem_cache_zalloc(
					btrfs_free_space_bitmap_cachep, GFP_NOFS);
789 790 791
			if (!e->bitmap) {
				kmem_cache_free(
					btrfs_free_space_cachep, e);
792 793
				goto free_cache;
			}
794 795 796 797 798 799
			spin_lock(&ctl->tree_lock);
			ret = link_free_space(ctl, e);
			ctl->total_bitmaps++;
			ctl->op->recalc_thresholds(ctl);
			spin_unlock(&ctl->tree_lock);
			if (ret) {
800
				btrfs_err(fs_info,
801
					"Duplicate entries in free space cache, dumping");
802
				kmem_cache_free(btrfs_free_space_cachep, e);
803 804
				goto free_cache;
			}
805
			list_add_tail(&e->list, &bitmaps);
806 807
		}

808 809
		num_entries--;
	}
810

811 812
	io_ctl_unmap_page(&io_ctl);

813 814 815 816 817
	/*
	 * We add the bitmaps at the end of the entries in order that
	 * the bitmap entries are added to the cache.
	 */
	list_for_each_entry_safe(e, n, &bitmaps, list) {
818
		list_del_init(&e->list);
819 820 821
		ret = io_ctl_read_bitmap(&io_ctl, e);
		if (ret)
			goto free_cache;
822
		e->bitmap_extents = count_bitmap_extents(ctl, e);
823
		if (!btrfs_free_space_trimmed(e)) {
824 825
			ctl->discardable_extents[BTRFS_STAT_CURR] +=
				e->bitmap_extents;
826 827
			ctl->discardable_bytes[BTRFS_STAT_CURR] += e->bytes;
		}
828 829
	}

830
	io_ctl_drop_pages(&io_ctl);
831
	merge_space_tree(ctl);
832 833
	ret = 1;
out:
834
	btrfs_discard_update_discardable(ctl->private, ctl);
835
	io_ctl_free(&io_ctl);
836 837
	return ret;
free_cache:
838
	io_ctl_drop_pages(&io_ctl);
839
	__btrfs_remove_free_space_cache(ctl);
840 841 842
	goto out;
}

843
int load_free_space_cache(struct btrfs_block_group *block_group)
J
Josef Bacik 已提交
844
{
845
	struct btrfs_fs_info *fs_info = block_group->fs_info;
846
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
847 848
	struct inode *inode;
	struct btrfs_path *path;
849
	int ret = 0;
850
	bool matched;
851
	u64 used = block_group->used;
852 853 854 855 856

	/*
	 * If this block group has been marked to be cleared for one reason or
	 * another then we can't trust the on disk cache, so just return.
	 */
857
	spin_lock(&block_group->lock);
858 859 860 861
	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
		spin_unlock(&block_group->lock);
		return 0;
	}
862
	spin_unlock(&block_group->lock);
863 864 865 866

	path = btrfs_alloc_path();
	if (!path)
		return 0;
867 868
	path->search_commit_root = 1;
	path->skip_locking = 1;
869

870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
	/*
	 * We must pass a path with search_commit_root set to btrfs_iget in
	 * order to avoid a deadlock when allocating extents for the tree root.
	 *
	 * When we are COWing an extent buffer from the tree root, when looking
	 * for a free extent, at extent-tree.c:find_free_extent(), we can find
	 * block group without its free space cache loaded. When we find one
	 * we must load its space cache which requires reading its free space
	 * cache's inode item from the root tree. If this inode item is located
	 * in the same leaf that we started COWing before, then we end up in
	 * deadlock on the extent buffer (trying to read lock it when we
	 * previously write locked it).
	 *
	 * It's safe to read the inode item using the commit root because
	 * block groups, once loaded, stay in memory forever (until they are
	 * removed) as well as their space caches once loaded. New block groups
	 * once created get their ->cached field set to BTRFS_CACHE_FINISHED so
	 * we will never try to read their inode item while the fs is mounted.
	 */
889
	inode = lookup_free_space_inode(block_group, path);
890 891 892 893 894
	if (IS_ERR(inode)) {
		btrfs_free_path(path);
		return 0;
	}

895 896 897 898
	/* We may have converted the inode and made the cache invalid. */
	spin_lock(&block_group->lock);
	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
		spin_unlock(&block_group->lock);
899
		btrfs_free_path(path);
900 901 902 903
		goto out;
	}
	spin_unlock(&block_group->lock);

904
	ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
905
				      path, block_group->start);
906 907 908 909 910
	btrfs_free_path(path);
	if (ret <= 0)
		goto out;

	spin_lock(&ctl->tree_lock);
911
	matched = (ctl->free_space == (block_group->length - used -
912 913 914 915 916
				       block_group->bytes_super));
	spin_unlock(&ctl->tree_lock);

	if (!matched) {
		__btrfs_remove_free_space_cache(ctl);
J
Jeff Mahoney 已提交
917 918
		btrfs_warn(fs_info,
			   "block group %llu has wrong amount of free space",
919
			   block_group->start);
920 921 922 923 924 925 926 927
		ret = -1;
	}
out:
	if (ret < 0) {
		/* This cache is bogus, make sure it gets cleared */
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
		spin_unlock(&block_group->lock);
928
		ret = 0;
929

J
Jeff Mahoney 已提交
930 931
		btrfs_warn(fs_info,
			   "failed to load free space cache for block group %llu, rebuilding it now",
932
			   block_group->start);
933 934 935 936
	}

	iput(inode);
	return ret;
937 938
}

939
static noinline_for_stack
940
int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
941
			      struct btrfs_free_space_ctl *ctl,
942
			      struct btrfs_block_group *block_group,
943 944
			      int *entries, int *bitmaps,
			      struct list_head *bitmap_list)
J
Josef Bacik 已提交
945
{
946
	int ret;
947
	struct btrfs_free_cluster *cluster = NULL;
948
	struct btrfs_free_cluster *cluster_locked = NULL;
949
	struct rb_node *node = rb_first(&ctl->free_space_offset);
950
	struct btrfs_trim_range *trim_entry;
951

952
	/* Get the cluster for this block_group if it exists */
953
	if (block_group && !list_empty(&block_group->cluster_list)) {
954 955 956
		cluster = list_entry(block_group->cluster_list.next,
				     struct btrfs_free_cluster,
				     block_group_list);
957
	}
958

959
	if (!node && cluster) {
960 961
		cluster_locked = cluster;
		spin_lock(&cluster_locked->lock);
962 963 964 965
		node = rb_first(&cluster->root);
		cluster = NULL;
	}

966 967 968
	/* Write out the extent entries */
	while (node) {
		struct btrfs_free_space *e;
J
Josef Bacik 已提交
969

970
		e = rb_entry(node, struct btrfs_free_space, offset_index);
971
		*entries += 1;
J
Josef Bacik 已提交
972

973
		ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes,
974 975
				       e->bitmap);
		if (ret)
976
			goto fail;
977

978
		if (e->bitmap) {
979 980
			list_add_tail(&e->list, bitmap_list);
			*bitmaps += 1;
981
		}
982 983 984
		node = rb_next(node);
		if (!node && cluster) {
			node = rb_first(&cluster->root);
985 986
			cluster_locked = cluster;
			spin_lock(&cluster_locked->lock);
987
			cluster = NULL;
988
		}
989
	}
990 991 992 993
	if (cluster_locked) {
		spin_unlock(&cluster_locked->lock);
		cluster_locked = NULL;
	}
994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008

	/*
	 * Make sure we don't miss any range that was removed from our rbtree
	 * because trimming is running. Otherwise after a umount+mount (or crash
	 * after committing the transaction) we would leak free space and get
	 * an inconsistent free space cache report from fsck.
	 */
	list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
		ret = io_ctl_add_entry(io_ctl, trim_entry->start,
				       trim_entry->bytes, NULL);
		if (ret)
			goto fail;
		*entries += 1;
	}

1009 1010
	return 0;
fail:
1011 1012
	if (cluster_locked)
		spin_unlock(&cluster_locked->lock);
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
	return -ENOSPC;
}

static noinline_for_stack int
update_cache_item(struct btrfs_trans_handle *trans,
		  struct btrfs_root *root,
		  struct inode *inode,
		  struct btrfs_path *path, u64 offset,
		  int entries, int bitmaps)
{
	struct btrfs_key key;
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
	int ret;

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
	key.offset = offset;
	key.type = 0;

	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
	if (ret < 0) {
		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1035
				 EXTENT_DELALLOC, 0, 0, NULL);
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
		goto fail;
	}
	leaf = path->nodes[0];
	if (ret > 0) {
		struct btrfs_key found_key;
		ASSERT(path->slots[0]);
		path->slots[0]--;
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
		    found_key.offset != offset) {
			clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1047 1048
					 inode->i_size - 1, EXTENT_DELALLOC, 0,
					 0, NULL);
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
			btrfs_release_path(path);
			goto fail;
		}
	}

	BTRFS_I(inode)->generation = trans->transid;
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	btrfs_set_free_space_entries(leaf, header, entries);
	btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
	btrfs_set_free_space_generation(leaf, header, trans->transid);
	btrfs_mark_buffer_dirty(leaf);
	btrfs_release_path(path);

	return 0;

fail:
	return -1;
}

1069
static noinline_for_stack int write_pinned_extent_entries(
1070
			    struct btrfs_block_group *block_group,
1071
			    struct btrfs_io_ctl *io_ctl,
1072
			    int *entries)
1073 1074 1075 1076
{
	u64 start, extent_start, extent_end, len;
	struct extent_io_tree *unpin = NULL;
	int ret;
1077

1078 1079 1080
	if (!block_group)
		return 0;

1081 1082 1083
	/*
	 * We want to add any pinned extents to our free space cache
	 * so we don't leak the space
1084
	 *
1085 1086 1087
	 * We shouldn't have switched the pinned extents yet so this is the
	 * right one
	 */
1088
	unpin = block_group->fs_info->pinned_extents;
1089

1090
	start = block_group->start;
1091

1092
	while (start < block_group->start + block_group->length) {
1093 1094
		ret = find_first_extent_bit(unpin, start,
					    &extent_start, &extent_end,
1095
					    EXTENT_DIRTY, NULL);
1096 1097
		if (ret)
			return 0;
J
Josef Bacik 已提交
1098

1099
		/* This pinned extent is out of our range */
1100
		if (extent_start >= block_group->start + block_group->length)
1101
			return 0;
1102

1103
		extent_start = max(extent_start, start);
1104 1105
		extent_end = min(block_group->start + block_group->length,
				 extent_end + 1);
1106
		len = extent_end - extent_start;
J
Josef Bacik 已提交
1107

1108 1109
		*entries += 1;
		ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
1110
		if (ret)
1111
			return -ENOSPC;
J
Josef Bacik 已提交
1112

1113
		start = extent_end;
1114
	}
J
Josef Bacik 已提交
1115

1116 1117 1118 1119
	return 0;
}

static noinline_for_stack int
1120
write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
1121
{
1122
	struct btrfs_free_space *entry, *next;
1123 1124
	int ret;

J
Josef Bacik 已提交
1125
	/* Write out the bitmaps */
1126
	list_for_each_entry_safe(entry, next, bitmap_list, list) {
1127
		ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
1128
		if (ret)
1129
			return -ENOSPC;
J
Josef Bacik 已提交
1130
		list_del_init(&entry->list);
1131 1132
	}

1133 1134
	return 0;
}
J
Josef Bacik 已提交
1135

1136 1137 1138
static int flush_dirty_cache(struct inode *inode)
{
	int ret;
1139

1140
	ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
1141
	if (ret)
1142
		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1143
				 EXTENT_DELALLOC, 0, 0, NULL);
J
Josef Bacik 已提交
1144

1145
	return ret;
1146 1147 1148
}

static void noinline_for_stack
1149
cleanup_bitmap_list(struct list_head *bitmap_list)
1150
{
1151
	struct btrfs_free_space *entry, *next;
1152

1153
	list_for_each_entry_safe(entry, next, bitmap_list, list)
1154
		list_del_init(&entry->list);
1155 1156 1157 1158 1159
}

static void noinline_for_stack
cleanup_write_cache_enospc(struct inode *inode,
			   struct btrfs_io_ctl *io_ctl,
1160
			   struct extent_state **cached_state)
1161
{
1162 1163
	io_ctl_drop_pages(io_ctl);
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1164
			     i_size_read(inode) - 1, cached_state);
1165
}
1166

1167 1168
static int __btrfs_wait_cache_io(struct btrfs_root *root,
				 struct btrfs_trans_handle *trans,
1169
				 struct btrfs_block_group *block_group,
1170 1171
				 struct btrfs_io_ctl *io_ctl,
				 struct btrfs_path *path, u64 offset)
1172 1173 1174 1175
{
	int ret;
	struct inode *inode = io_ctl->inode;

1176 1177 1178
	if (!inode)
		return 0;

1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
	/* Flush the dirty pages in the cache file. */
	ret = flush_dirty_cache(inode);
	if (ret)
		goto out;

	/* Update the cache item to tell everyone this cache file is valid. */
	ret = update_cache_item(trans, root, inode, path, offset,
				io_ctl->entries, io_ctl->bitmaps);
out:
	io_ctl_free(io_ctl);
	if (ret) {
		invalidate_inode_pages2(inode->i_mapping);
		BTRFS_I(inode)->generation = 0;
		if (block_group) {
#ifdef DEBUG
1194
			btrfs_err(root->fs_info,
1195
				  "failed to write free space cache for block group %llu",
1196
				  block_group->start);
1197 1198 1199 1200 1201 1202
#endif
		}
	}
	btrfs_update_inode(trans, root, inode);

	if (block_group) {
1203 1204 1205 1206
		/* the dirty list is protected by the dirty_bgs_lock */
		spin_lock(&trans->transaction->dirty_bgs_lock);

		/* the disk_cache_state is protected by the block group lock */
1207 1208 1209 1210
		spin_lock(&block_group->lock);

		/*
		 * only mark this as written if we didn't get put back on
1211 1212
		 * the dirty list while waiting for IO.   Otherwise our
		 * cache state won't be right, and we won't get written again
1213 1214 1215 1216 1217 1218 1219
		 */
		if (!ret && list_empty(&block_group->dirty_list))
			block_group->disk_cache_state = BTRFS_DC_WRITTEN;
		else if (ret)
			block_group->disk_cache_state = BTRFS_DC_ERROR;

		spin_unlock(&block_group->lock);
1220
		spin_unlock(&trans->transaction->dirty_bgs_lock);
1221 1222 1223 1224 1225 1226 1227 1228
		io_ctl->inode = NULL;
		iput(inode);
	}

	return ret;

}

1229 1230 1231 1232 1233 1234 1235 1236 1237
static int btrfs_wait_cache_io_root(struct btrfs_root *root,
				    struct btrfs_trans_handle *trans,
				    struct btrfs_io_ctl *io_ctl,
				    struct btrfs_path *path)
{
	return __btrfs_wait_cache_io(root, trans, NULL, io_ctl, path, 0);
}

int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
1238
			struct btrfs_block_group *block_group,
1239 1240 1241 1242
			struct btrfs_path *path)
{
	return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
				     block_group, &block_group->io_ctl,
1243
				     path, block_group->start);
1244 1245
}

1246 1247 1248 1249 1250 1251 1252 1253
/**
 * __btrfs_write_out_cache - write out cached info to an inode
 * @root - the root the inode belongs to
 * @ctl - the free space cache we are going to write out
 * @block_group - the block_group for this cache if it belongs to a block_group
 * @trans - the trans handle
 *
 * This function writes out a free space cache struct to disk for quick recovery
G
Geliang Tang 已提交
1254
 * on mount.  This will return 0 if it was successful in writing the cache out,
1255
 * or an errno if it was not.
1256 1257 1258
 */
static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
				   struct btrfs_free_space_ctl *ctl,
1259
				   struct btrfs_block_group *block_group,
1260
				   struct btrfs_io_ctl *io_ctl,
1261
				   struct btrfs_trans_handle *trans)
1262 1263
{
	struct extent_state *cached_state = NULL;
1264
	LIST_HEAD(bitmap_list);
1265 1266 1267
	int entries = 0;
	int bitmaps = 0;
	int ret;
1268
	int must_iput = 0;
1269 1270

	if (!i_size_read(inode))
1271
		return -EIO;
1272

1273
	WARN_ON(io_ctl->pages);
1274
	ret = io_ctl_init(io_ctl, inode, 1);
1275
	if (ret)
1276
		return ret;
1277

1278 1279 1280 1281 1282 1283 1284 1285 1286
	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
		down_write(&block_group->data_rwsem);
		spin_lock(&block_group->lock);
		if (block_group->delalloc_bytes) {
			block_group->disk_cache_state = BTRFS_DC_WRITTEN;
			spin_unlock(&block_group->lock);
			up_write(&block_group->data_rwsem);
			BTRFS_I(inode)->generation = 0;
			ret = 0;
1287
			must_iput = 1;
1288 1289 1290 1291 1292
			goto out;
		}
		spin_unlock(&block_group->lock);
	}

1293
	/* Lock all pages first so we can lock the extent safely. */
1294 1295
	ret = io_ctl_prepare_pages(io_ctl, inode, 0);
	if (ret)
1296
		goto out_unlock;
1297 1298

	lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1299
			 &cached_state);
1300

1301
	io_ctl_set_generation(io_ctl, trans->transid);
1302

1303
	mutex_lock(&ctl->cache_writeout_mutex);
1304
	/* Write out the extent entries in the free space cache */
1305
	spin_lock(&ctl->tree_lock);
1306
	ret = write_cache_extent_entries(io_ctl, ctl,
1307 1308
					 block_group, &entries, &bitmaps,
					 &bitmap_list);
1309 1310
	if (ret)
		goto out_nospc_locked;
1311

1312 1313 1314 1315
	/*
	 * Some spaces that are freed in the current transaction are pinned,
	 * they will be added into free space cache after the transaction is
	 * committed, we shouldn't lose them.
1316 1317 1318
	 *
	 * If this changes while we are working we'll get added back to
	 * the dirty list and redo it.  No locking needed
1319
	 */
1320
	ret = write_pinned_extent_entries(block_group, io_ctl, &entries);
1321 1322
	if (ret)
		goto out_nospc_locked;
1323

1324 1325 1326 1327 1328
	/*
	 * At last, we write out all the bitmaps and keep cache_writeout_mutex
	 * locked while doing it because a concurrent trim can be manipulating
	 * or freeing the bitmap.
	 */
1329
	ret = write_bitmap_entries(io_ctl, &bitmap_list);
1330
	spin_unlock(&ctl->tree_lock);
1331
	mutex_unlock(&ctl->cache_writeout_mutex);
1332 1333 1334 1335
	if (ret)
		goto out_nospc;

	/* Zero out the rest of the pages just to make sure */
1336
	io_ctl_zero_remaining_pages(io_ctl);
1337

1338
	/* Everything is written out, now we dirty the pages in the file. */
1339 1340
	ret = btrfs_dirty_pages(inode, io_ctl->pages, io_ctl->num_pages, 0,
				i_size_read(inode), &cached_state);
1341
	if (ret)
1342
		goto out_nospc;
1343

1344 1345
	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
		up_write(&block_group->data_rwsem);
1346 1347 1348 1349
	/*
	 * Release the pages and unlock the extent, we will flush
	 * them out later
	 */
1350
	io_ctl_drop_pages(io_ctl);
1351 1352

	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1353
			     i_size_read(inode) - 1, &cached_state);
1354

1355 1356 1357 1358 1359 1360 1361 1362 1363
	/*
	 * at this point the pages are under IO and we're happy,
	 * The caller is responsible for waiting on them and updating the
	 * the cache and the inode
	 */
	io_ctl->entries = entries;
	io_ctl->bitmaps = bitmaps;

	ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
1364
	if (ret)
1365 1366
		goto out;

1367 1368
	return 0;

1369
out:
1370 1371
	io_ctl->inode = NULL;
	io_ctl_free(io_ctl);
1372
	if (ret) {
1373
		invalidate_inode_pages2(inode->i_mapping);
J
Josef Bacik 已提交
1374 1375 1376
		BTRFS_I(inode)->generation = 0;
	}
	btrfs_update_inode(trans, root, inode);
1377 1378
	if (must_iput)
		iput(inode);
1379
	return ret;
1380

1381 1382 1383 1384 1385
out_nospc_locked:
	cleanup_bitmap_list(&bitmap_list);
	spin_unlock(&ctl->tree_lock);
	mutex_unlock(&ctl->cache_writeout_mutex);

1386
out_nospc:
1387
	cleanup_write_cache_enospc(inode, io_ctl, &cached_state);
1388

1389
out_unlock:
1390 1391 1392
	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
		up_write(&block_group->data_rwsem);

1393
	goto out;
1394 1395
}

1396
int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
1397
			  struct btrfs_block_group *block_group,
1398 1399
			  struct btrfs_path *path)
{
1400
	struct btrfs_fs_info *fs_info = trans->fs_info;
1401 1402 1403 1404 1405 1406 1407
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct inode *inode;
	int ret = 0;

	spin_lock(&block_group->lock);
	if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
		spin_unlock(&block_group->lock);
1408 1409
		return 0;
	}
1410 1411
	spin_unlock(&block_group->lock);

1412
	inode = lookup_free_space_inode(block_group, path);
1413 1414 1415
	if (IS_ERR(inode))
		return 0;

1416 1417
	ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl,
				block_group, &block_group->io_ctl, trans);
1418 1419
	if (ret) {
#ifdef DEBUG
1420 1421
		btrfs_err(fs_info,
			  "failed to write free space cache for block group %llu",
1422
			  block_group->start);
1423
#endif
1424 1425 1426 1427 1428 1429
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_ERROR;
		spin_unlock(&block_group->lock);

		block_group->io_ctl.inode = NULL;
		iput(inode);
1430 1431
	}

1432 1433 1434 1435 1436
	/*
	 * if ret == 0 the caller is expected to call btrfs_wait_cache_io
	 * to wait for IO and put the inode
	 */

J
Josef Bacik 已提交
1437 1438 1439
	return ret;
}

1440
static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1441
					  u64 offset)
J
Josef Bacik 已提交
1442
{
1443
	ASSERT(offset >= bitmap_start);
1444
	offset -= bitmap_start;
1445
	return (unsigned long)(div_u64(offset, unit));
1446
}
J
Josef Bacik 已提交
1447

1448
static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1449
{
1450
	return (unsigned long)(div_u64(bytes, unit));
1451
}
J
Josef Bacik 已提交
1452

1453
static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1454 1455 1456
				   u64 offset)
{
	u64 bitmap_start;
1457
	u64 bytes_per_bitmap;
J
Josef Bacik 已提交
1458

1459 1460
	bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
	bitmap_start = offset - ctl->start;
1461
	bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1462
	bitmap_start *= bytes_per_bitmap;
1463
	bitmap_start += ctl->start;
J
Josef Bacik 已提交
1464

1465
	return bitmap_start;
J
Josef Bacik 已提交
1466 1467
}

1468 1469
static int tree_insert_offset(struct rb_root *root, u64 offset,
			      struct rb_node *node, int bitmap)
J
Josef Bacik 已提交
1470 1471 1472 1473 1474 1475 1476
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
	struct btrfs_free_space *info;

	while (*p) {
		parent = *p;
1477
		info = rb_entry(parent, struct btrfs_free_space, offset_index);
J
Josef Bacik 已提交
1478

1479
		if (offset < info->offset) {
J
Josef Bacik 已提交
1480
			p = &(*p)->rb_left;
1481
		} else if (offset > info->offset) {
J
Josef Bacik 已提交
1482
			p = &(*p)->rb_right;
1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
		} else {
			/*
			 * we could have a bitmap entry and an extent entry
			 * share the same offset.  If this is the case, we want
			 * the extent entry to always be found first if we do a
			 * linear search through the tree, since we want to have
			 * the quickest allocation time, and allocating from an
			 * extent is faster than allocating from a bitmap.  So
			 * if we're inserting a bitmap and we find an entry at
			 * this offset, we want to go right, or after this entry
			 * logically.  If we are inserting an extent and we've
			 * found a bitmap, we want to go left, or before
			 * logically.
			 */
			if (bitmap) {
1498 1499 1500 1501
				if (info->bitmap) {
					WARN_ON_ONCE(1);
					return -EEXIST;
				}
1502 1503
				p = &(*p)->rb_right;
			} else {
1504 1505 1506 1507
				if (!info->bitmap) {
					WARN_ON_ONCE(1);
					return -EEXIST;
				}
1508 1509 1510
				p = &(*p)->rb_left;
			}
		}
J
Josef Bacik 已提交
1511 1512 1513 1514 1515 1516 1517 1518 1519
	}

	rb_link_node(node, parent, p);
	rb_insert_color(node, root);

	return 0;
}

/*
J
Josef Bacik 已提交
1520 1521
 * searches the tree for the given offset.
 *
1522 1523 1524
 * fuzzy - If this is set, then we are trying to make an allocation, and we just
 * want a section that has at least bytes size and comes at or after the given
 * offset.
J
Josef Bacik 已提交
1525
 */
1526
static struct btrfs_free_space *
1527
tree_search_offset(struct btrfs_free_space_ctl *ctl,
1528
		   u64 offset, int bitmap_only, int fuzzy)
J
Josef Bacik 已提交
1529
{
1530
	struct rb_node *n = ctl->free_space_offset.rb_node;
1531 1532 1533 1534 1535 1536 1537 1538
	struct btrfs_free_space *entry, *prev = NULL;

	/* find entry that is closest to the 'offset' */
	while (1) {
		if (!n) {
			entry = NULL;
			break;
		}
J
Josef Bacik 已提交
1539 1540

		entry = rb_entry(n, struct btrfs_free_space, offset_index);
1541
		prev = entry;
J
Josef Bacik 已提交
1542

1543
		if (offset < entry->offset)
J
Josef Bacik 已提交
1544
			n = n->rb_left;
1545
		else if (offset > entry->offset)
J
Josef Bacik 已提交
1546
			n = n->rb_right;
1547
		else
J
Josef Bacik 已提交
1548 1549 1550
			break;
	}

1551 1552 1553 1554 1555
	if (bitmap_only) {
		if (!entry)
			return NULL;
		if (entry->bitmap)
			return entry;
J
Josef Bacik 已提交
1556

1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
		/*
		 * bitmap entry and extent entry may share same offset,
		 * in that case, bitmap entry comes after extent entry.
		 */
		n = rb_next(n);
		if (!n)
			return NULL;
		entry = rb_entry(n, struct btrfs_free_space, offset_index);
		if (entry->offset != offset)
			return NULL;
J
Josef Bacik 已提交
1567

1568 1569 1570 1571
		WARN_ON(!entry->bitmap);
		return entry;
	} else if (entry) {
		if (entry->bitmap) {
J
Josef Bacik 已提交
1572
			/*
1573 1574
			 * if previous extent entry covers the offset,
			 * we should return it instead of the bitmap entry
J
Josef Bacik 已提交
1575
			 */
1576 1577
			n = rb_prev(&entry->offset_index);
			if (n) {
1578 1579
				prev = rb_entry(n, struct btrfs_free_space,
						offset_index);
1580 1581 1582
				if (!prev->bitmap &&
				    prev->offset + prev->bytes > offset)
					entry = prev;
J
Josef Bacik 已提交
1583
			}
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597
		}
		return entry;
	}

	if (!prev)
		return NULL;

	/* find last entry before the 'offset' */
	entry = prev;
	if (entry->offset > offset) {
		n = rb_prev(&entry->offset_index);
		if (n) {
			entry = rb_entry(n, struct btrfs_free_space,
					offset_index);
1598
			ASSERT(entry->offset <= offset);
J
Josef Bacik 已提交
1599
		} else {
1600 1601 1602 1603
			if (fuzzy)
				return entry;
			else
				return NULL;
J
Josef Bacik 已提交
1604 1605 1606
		}
	}

1607
	if (entry->bitmap) {
1608 1609
		n = rb_prev(&entry->offset_index);
		if (n) {
1610 1611
			prev = rb_entry(n, struct btrfs_free_space,
					offset_index);
1612 1613 1614
			if (!prev->bitmap &&
			    prev->offset + prev->bytes > offset)
				return prev;
1615
		}
1616
		if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626
			return entry;
	} else if (entry->offset + entry->bytes > offset)
		return entry;

	if (!fuzzy)
		return NULL;

	while (1) {
		if (entry->bitmap) {
			if (entry->offset + BITS_PER_BITMAP *
1627
			    ctl->unit > offset)
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639
				break;
		} else {
			if (entry->offset + entry->bytes > offset)
				break;
		}

		n = rb_next(&entry->offset_index);
		if (!n)
			return NULL;
		entry = rb_entry(n, struct btrfs_free_space, offset_index);
	}
	return entry;
J
Josef Bacik 已提交
1640 1641
}

1642
static inline void
1643
__unlink_free_space(struct btrfs_free_space_ctl *ctl,
1644
		    struct btrfs_free_space *info)
J
Josef Bacik 已提交
1645
{
1646 1647
	rb_erase(&info->offset_index, &ctl->free_space_offset);
	ctl->free_extents--;
1648

1649
	if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
1650
		ctl->discardable_extents[BTRFS_STAT_CURR]--;
1651 1652
		ctl->discardable_bytes[BTRFS_STAT_CURR] -= info->bytes;
	}
1653 1654
}

1655
static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1656 1657
			      struct btrfs_free_space *info)
{
1658 1659
	__unlink_free_space(ctl, info);
	ctl->free_space -= info->bytes;
J
Josef Bacik 已提交
1660 1661
}

1662
static int link_free_space(struct btrfs_free_space_ctl *ctl,
J
Josef Bacik 已提交
1663 1664 1665 1666
			   struct btrfs_free_space *info)
{
	int ret = 0;

1667
	ASSERT(info->bytes || info->bitmap);
1668
	ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1669
				 &info->offset_index, (info->bitmap != NULL));
J
Josef Bacik 已提交
1670 1671 1672
	if (ret)
		return ret;

1673
	if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
1674
		ctl->discardable_extents[BTRFS_STAT_CURR]++;
1675 1676
		ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes;
	}
1677

1678 1679
	ctl->free_space += info->bytes;
	ctl->free_extents++;
1680 1681 1682
	return ret;
}

1683
static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1684
{
1685
	struct btrfs_block_group *block_group = ctl->private;
1686 1687 1688
	u64 max_bytes;
	u64 bitmap_bytes;
	u64 extent_bytes;
1689
	u64 size = block_group->length;
1690 1691
	u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
	u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1692

1693
	max_bitmaps = max_t(u64, max_bitmaps, 1);
1694

1695
	ASSERT(ctl->total_bitmaps <= max_bitmaps);
1696 1697

	/*
1698 1699 1700 1701
	 * We are trying to keep the total amount of memory used per 1GiB of
	 * space to be MAX_CACHE_BYTES_PER_GIG.  However, with a reclamation
	 * mechanism of pulling extents >= FORCE_EXTENT_THRESHOLD out of
	 * bitmaps, we may end up using more memory than this.
1702
	 */
1703
	if (size < SZ_1G)
1704 1705
		max_bytes = MAX_CACHE_BYTES_PER_GIG;
	else
1706
		max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G);
1707

1708
	bitmap_bytes = ctl->total_bitmaps * ctl->unit;
1709

1710
	/*
1711
	 * we want the extent entry threshold to always be at most 1/2 the max
1712 1713 1714
	 * bytes we can have, or whatever is less than that.
	 */
	extent_bytes = max_bytes - bitmap_bytes;
1715
	extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
1716

1717
	ctl->extents_thresh =
1718
		div_u64(extent_bytes, sizeof(struct btrfs_free_space));
1719 1720
}

1721 1722 1723
static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
				       struct btrfs_free_space *info,
				       u64 offset, u64 bytes)
1724
{
1725 1726
	unsigned long start, count, end;
	int extent_delta = -1;
1727

1728 1729
	start = offset_to_bit(info->offset, ctl->unit, offset);
	count = bytes_to_bits(bytes, ctl->unit);
1730 1731
	end = start + count;
	ASSERT(end <= BITS_PER_BITMAP);
1732

L
Li Zefan 已提交
1733
	bitmap_clear(info->bitmap, start, count);
1734 1735

	info->bytes -= bytes;
1736 1737
	if (info->max_extent_size > ctl->unit)
		info->max_extent_size = 0;
1738 1739 1740 1741 1742 1743 1744 1745

	if (start && test_bit(start - 1, info->bitmap))
		extent_delta++;

	if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap))
		extent_delta++;

	info->bitmap_extents += extent_delta;
1746
	if (!btrfs_free_space_trimmed(info)) {
1747
		ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta;
1748 1749
		ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
	}
1750 1751 1752 1753 1754 1755 1756
}

static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info, u64 offset,
			      u64 bytes)
{
	__bitmap_clear_bits(ctl, info, offset, bytes);
1757
	ctl->free_space -= bytes;
1758 1759
}

1760
static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
J
Josef Bacik 已提交
1761 1762
			    struct btrfs_free_space *info, u64 offset,
			    u64 bytes)
1763
{
1764 1765
	unsigned long start, count, end;
	int extent_delta = 1;
1766

1767 1768
	start = offset_to_bit(info->offset, ctl->unit, offset);
	count = bytes_to_bits(bytes, ctl->unit);
1769 1770
	end = start + count;
	ASSERT(end <= BITS_PER_BITMAP);
1771

L
Li Zefan 已提交
1772
	bitmap_set(info->bitmap, start, count);
1773 1774

	info->bytes += bytes;
1775
	ctl->free_space += bytes;
1776 1777 1778 1779 1780 1781 1782 1783

	if (start && test_bit(start - 1, info->bitmap))
		extent_delta--;

	if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap))
		extent_delta--;

	info->bitmap_extents += extent_delta;
1784
	if (!btrfs_free_space_trimmed(info)) {
1785
		ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta;
1786 1787
		ctl->discardable_bytes[BTRFS_STAT_CURR] += bytes;
	}
1788 1789
}

1790 1791 1792 1793
/*
 * If we can not find suitable extent, we will use bytes to record
 * the size of the max extent.
 */
1794
static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1795
			 struct btrfs_free_space *bitmap_info, u64 *offset,
1796
			 u64 *bytes, bool for_alloc)
1797 1798
{
	unsigned long found_bits = 0;
1799
	unsigned long max_bits = 0;
1800 1801
	unsigned long bits, i;
	unsigned long next_zero;
1802
	unsigned long extent_bits;
1803

1804 1805 1806 1807
	/*
	 * Skip searching the bitmap if we don't have a contiguous section that
	 * is large enough for this allocation.
	 */
1808 1809
	if (for_alloc &&
	    bitmap_info->max_extent_size &&
1810 1811 1812 1813 1814
	    bitmap_info->max_extent_size < *bytes) {
		*bytes = bitmap_info->max_extent_size;
		return -1;
	}

1815
	i = offset_to_bit(bitmap_info->offset, ctl->unit,
1816
			  max_t(u64, *offset, bitmap_info->offset));
1817
	bits = bytes_to_bits(*bytes, ctl->unit);
1818

1819
	for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1820 1821 1822 1823
		if (for_alloc && bits == 1) {
			found_bits = 1;
			break;
		}
1824 1825
		next_zero = find_next_zero_bit(bitmap_info->bitmap,
					       BITS_PER_BITMAP, i);
1826 1827 1828
		extent_bits = next_zero - i;
		if (extent_bits >= bits) {
			found_bits = extent_bits;
1829
			break;
1830 1831
		} else if (extent_bits > max_bits) {
			max_bits = extent_bits;
1832 1833 1834 1835 1836
		}
		i = next_zero;
	}

	if (found_bits) {
1837 1838
		*offset = (u64)(i * ctl->unit) + bitmap_info->offset;
		*bytes = (u64)(found_bits) * ctl->unit;
1839 1840 1841
		return 0;
	}

1842
	*bytes = (u64)(max_bits) * ctl->unit;
1843
	bitmap_info->max_extent_size = *bytes;
1844 1845 1846
	return -1;
}

J
Josef Bacik 已提交
1847 1848 1849 1850 1851 1852 1853
static inline u64 get_max_extent_size(struct btrfs_free_space *entry)
{
	if (entry->bitmap)
		return entry->max_extent_size;
	return entry->bytes;
}

1854
/* Cache the size of the max extent in bytes */
1855
static struct btrfs_free_space *
D
David Woodhouse 已提交
1856
find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1857
		unsigned long align, u64 *max_extent_size)
1858 1859 1860
{
	struct btrfs_free_space *entry;
	struct rb_node *node;
D
David Woodhouse 已提交
1861 1862
	u64 tmp;
	u64 align_off;
1863 1864
	int ret;

1865
	if (!ctl->free_space_offset.rb_node)
1866
		goto out;
1867

1868
	entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1869
	if (!entry)
1870
		goto out;
1871 1872 1873

	for (node = &entry->offset_index; node; node = rb_next(node)) {
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
1874
		if (entry->bytes < *bytes) {
J
Josef Bacik 已提交
1875 1876
			*max_extent_size = max(get_max_extent_size(entry),
					       *max_extent_size);
1877
			continue;
1878
		}
1879

D
David Woodhouse 已提交
1880 1881 1882 1883
		/* make sure the space returned is big enough
		 * to match our requested alignment
		 */
		if (*bytes >= align) {
1884
			tmp = entry->offset - ctl->start + align - 1;
1885
			tmp = div64_u64(tmp, align);
D
David Woodhouse 已提交
1886 1887 1888 1889 1890 1891 1892
			tmp = tmp * align + ctl->start;
			align_off = tmp - entry->offset;
		} else {
			align_off = 0;
			tmp = entry->offset;
		}

1893
		if (entry->bytes < *bytes + align_off) {
J
Josef Bacik 已提交
1894 1895
			*max_extent_size = max(get_max_extent_size(entry),
					       *max_extent_size);
D
David Woodhouse 已提交
1896
			continue;
1897
		}
D
David Woodhouse 已提交
1898

1899
		if (entry->bitmap) {
1900 1901
			u64 size = *bytes;

1902
			ret = search_bitmap(ctl, entry, &tmp, &size, true);
D
David Woodhouse 已提交
1903 1904
			if (!ret) {
				*offset = tmp;
1905
				*bytes = size;
1906
				return entry;
J
Josef Bacik 已提交
1907 1908 1909 1910
			} else {
				*max_extent_size =
					max(get_max_extent_size(entry),
					    *max_extent_size);
D
David Woodhouse 已提交
1911
			}
1912 1913 1914
			continue;
		}

D
David Woodhouse 已提交
1915 1916
		*offset = tmp;
		*bytes = entry->bytes - align_off;
1917 1918
		return entry;
	}
1919
out:
1920 1921 1922
	return NULL;
}

1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945
static int count_bitmap_extents(struct btrfs_free_space_ctl *ctl,
				struct btrfs_free_space *bitmap_info)
{
	struct btrfs_block_group *block_group = ctl->private;
	u64 bytes = bitmap_info->bytes;
	unsigned int rs, re;
	int count = 0;

	if (!block_group || !bytes)
		return count;

	bitmap_for_each_set_region(bitmap_info->bitmap, rs, re, 0,
				   BITS_PER_BITMAP) {
		bytes -= (rs - re) * ctl->unit;
		count++;

		if (!bytes)
			break;
	}

	return count;
}

1946
static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1947 1948
			   struct btrfs_free_space *info, u64 offset)
{
1949
	info->offset = offset_to_bitmap(ctl, offset);
J
Josef Bacik 已提交
1950
	info->bytes = 0;
1951
	info->bitmap_extents = 0;
1952
	INIT_LIST_HEAD(&info->list);
1953 1954
	link_free_space(ctl, info);
	ctl->total_bitmaps++;
1955

1956
	ctl->op->recalc_thresholds(ctl);
1957 1958
}

1959
static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1960 1961
			struct btrfs_free_space *bitmap_info)
{
1962
	unlink_free_space(ctl, bitmap_info);
1963
	kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
1964
	kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1965 1966
	ctl->total_bitmaps--;
	ctl->op->recalc_thresholds(ctl);
1967 1968
}

1969
static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1970 1971 1972 1973
			      struct btrfs_free_space *bitmap_info,
			      u64 *offset, u64 *bytes)
{
	u64 end;
1974 1975
	u64 search_start, search_bytes;
	int ret;
1976 1977

again:
1978
	end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1979

1980
	/*
1981 1982 1983 1984
	 * We need to search for bits in this bitmap.  We could only cover some
	 * of the extent in this bitmap thanks to how we add space, so we need
	 * to search for as much as it as we can and clear that amount, and then
	 * go searching for the next bit.
1985 1986
	 */
	search_start = *offset;
1987
	search_bytes = ctl->unit;
1988
	search_bytes = min(search_bytes, end - search_start + 1);
1989 1990
	ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes,
			    false);
1991 1992
	if (ret < 0 || search_start != *offset)
		return -EINVAL;
1993

1994 1995 1996 1997 1998 1999 2000 2001 2002
	/* We may have found more bits than what we need */
	search_bytes = min(search_bytes, *bytes);

	/* Cannot clear past the end of the bitmap */
	search_bytes = min(search_bytes, end - search_start + 1);

	bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
	*offset += search_bytes;
	*bytes -= search_bytes;
2003 2004

	if (*bytes) {
2005
		struct rb_node *next = rb_next(&bitmap_info->offset_index);
2006
		if (!bitmap_info->bytes)
2007
			free_bitmap(ctl, bitmap_info);
2008

2009 2010 2011 2012 2013
		/*
		 * no entry after this bitmap, but we still have bytes to
		 * remove, so something has gone wrong.
		 */
		if (!next)
2014 2015
			return -EINVAL;

2016 2017 2018 2019 2020 2021 2022
		bitmap_info = rb_entry(next, struct btrfs_free_space,
				       offset_index);

		/*
		 * if the next entry isn't a bitmap we need to return to let the
		 * extent stuff do its work.
		 */
2023 2024 2025
		if (!bitmap_info->bitmap)
			return -EAGAIN;

2026 2027 2028 2029 2030 2031 2032
		/*
		 * Ok the next item is a bitmap, but it may not actually hold
		 * the information for the rest of this free space stuff, so
		 * look for it, and if we don't find it return so we can try
		 * everything over again.
		 */
		search_start = *offset;
2033
		search_bytes = ctl->unit;
2034
		ret = search_bitmap(ctl, bitmap_info, &search_start,
2035
				    &search_bytes, false);
2036 2037 2038
		if (ret < 0 || search_start != *offset)
			return -EAGAIN;

2039
		goto again;
2040
	} else if (!bitmap_info->bytes)
2041
		free_bitmap(ctl, bitmap_info);
2042 2043 2044 2045

	return 0;
}

J
Josef Bacik 已提交
2046 2047
static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
			       struct btrfs_free_space *info, u64 offset,
2048
			       u64 bytes, enum btrfs_trim_state trim_state)
J
Josef Bacik 已提交
2049 2050 2051 2052
{
	u64 bytes_to_set = 0;
	u64 end;

2053 2054 2055 2056
	/*
	 * This is a tradeoff to make bitmap trim state minimal.  We mark the
	 * whole bitmap untrimmed if at any point we add untrimmed regions.
	 */
2057
	if (trim_state == BTRFS_TRIM_STATE_UNTRIMMED) {
2058
		if (btrfs_free_space_trimmed(info)) {
2059 2060
			ctl->discardable_extents[BTRFS_STAT_CURR] +=
				info->bitmap_extents;
2061 2062
			ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes;
		}
2063
		info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2064
	}
2065

J
Josef Bacik 已提交
2066 2067 2068 2069 2070 2071
	end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);

	bytes_to_set = min(end - offset, bytes);

	bitmap_set_bits(ctl, info, offset, bytes_to_set);

2072 2073 2074 2075 2076 2077
	/*
	 * We set some bytes, we have no idea what the max extent size is
	 * anymore.
	 */
	info->max_extent_size = 0;

J
Josef Bacik 已提交
2078 2079 2080 2081
	return bytes_to_set;

}

2082 2083
static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
		      struct btrfs_free_space *info)
2084
{
2085
	struct btrfs_block_group *block_group = ctl->private;
2086
	struct btrfs_fs_info *fs_info = block_group->fs_info;
2087 2088 2089
	bool forced = false;

#ifdef CONFIG_BTRFS_DEBUG
2090
	if (btrfs_should_fragment_free_space(block_group))
2091 2092
		forced = true;
#endif
2093

2094 2095 2096 2097
	/* This is a way to reclaim large regions from the bitmaps. */
	if (!forced && info->bytes >= FORCE_EXTENT_THRESHOLD)
		return false;

2098 2099 2100 2101
	/*
	 * If we are below the extents threshold then we can add this as an
	 * extent, and don't have to deal with the bitmap
	 */
2102
	if (!forced && ctl->free_extents < ctl->extents_thresh) {
2103 2104 2105
		/*
		 * If this block group has some small extents we don't want to
		 * use up all of our free slots in the cache with them, we want
2106
		 * to reserve them to larger extents, however if we have plenty
2107 2108 2109
		 * of cache left then go ahead an dadd them, no sense in adding
		 * the overhead of a bitmap if we don't have to.
		 */
2110 2111
		if (info->bytes <= fs_info->sectorsize * 8) {
			if (ctl->free_extents * 3 <= ctl->extents_thresh)
2112
				return false;
2113
		} else {
2114
			return false;
2115 2116
		}
	}
2117 2118

	/*
2119 2120 2121 2122 2123 2124
	 * The original block groups from mkfs can be really small, like 8
	 * megabytes, so don't bother with a bitmap for those entries.  However
	 * some block groups can be smaller than what a bitmap would cover but
	 * are still large enough that they could overflow the 32k memory limit,
	 * so allow those block groups to still be allowed to have a bitmap
	 * entry.
2125
	 */
2126
	if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length)
2127 2128 2129 2130 2131
		return false;

	return true;
}

2132
static const struct btrfs_free_space_op free_space_op = {
J
Josef Bacik 已提交
2133 2134 2135 2136
	.recalc_thresholds	= recalculate_thresholds,
	.use_bitmap		= use_bitmap,
};

2137 2138 2139 2140
static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info)
{
	struct btrfs_free_space *bitmap_info;
2141
	struct btrfs_block_group *block_group = NULL;
2142
	int added = 0;
J
Josef Bacik 已提交
2143
	u64 bytes, offset, bytes_added;
2144
	enum btrfs_trim_state trim_state;
2145
	int ret;
2146 2147 2148

	bytes = info->bytes;
	offset = info->offset;
2149
	trim_state = info->trim_state;
2150

2151 2152 2153
	if (!ctl->op->use_bitmap(ctl, info))
		return 0;

J
Josef Bacik 已提交
2154 2155
	if (ctl->op == &free_space_op)
		block_group = ctl->private;
2156
again:
J
Josef Bacik 已提交
2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173
	/*
	 * Since we link bitmaps right into the cluster we need to see if we
	 * have a cluster here, and if so and it has our bitmap we need to add
	 * the free space to that bitmap.
	 */
	if (block_group && !list_empty(&block_group->cluster_list)) {
		struct btrfs_free_cluster *cluster;
		struct rb_node *node;
		struct btrfs_free_space *entry;

		cluster = list_entry(block_group->cluster_list.next,
				     struct btrfs_free_cluster,
				     block_group_list);
		spin_lock(&cluster->lock);
		node = rb_first(&cluster->root);
		if (!node) {
			spin_unlock(&cluster->lock);
2174
			goto no_cluster_bitmap;
J
Josef Bacik 已提交
2175 2176 2177 2178 2179
		}

		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		if (!entry->bitmap) {
			spin_unlock(&cluster->lock);
2180
			goto no_cluster_bitmap;
J
Josef Bacik 已提交
2181 2182 2183
		}

		if (entry->offset == offset_to_bitmap(ctl, offset)) {
2184 2185
			bytes_added = add_bytes_to_bitmap(ctl, entry, offset,
							  bytes, trim_state);
J
Josef Bacik 已提交
2186 2187 2188 2189 2190 2191 2192 2193 2194
			bytes -= bytes_added;
			offset += bytes_added;
		}
		spin_unlock(&cluster->lock);
		if (!bytes) {
			ret = 1;
			goto out;
		}
	}
2195 2196

no_cluster_bitmap:
2197
	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2198 2199
					 1, 0);
	if (!bitmap_info) {
2200
		ASSERT(added == 0);
2201 2202 2203
		goto new_bitmap;
	}

2204 2205
	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes,
					  trim_state);
J
Josef Bacik 已提交
2206 2207 2208
	bytes -= bytes_added;
	offset += bytes_added;
	added = 0;
2209 2210 2211 2212 2213 2214 2215 2216 2217

	if (!bytes) {
		ret = 1;
		goto out;
	} else
		goto again;

new_bitmap:
	if (info && info->bitmap) {
2218
		add_new_bitmap(ctl, info, offset);
2219 2220 2221 2222
		added = 1;
		info = NULL;
		goto again;
	} else {
2223
		spin_unlock(&ctl->tree_lock);
2224 2225 2226

		/* no pre-allocated info, allocate a new one */
		if (!info) {
2227 2228
			info = kmem_cache_zalloc(btrfs_free_space_cachep,
						 GFP_NOFS);
2229
			if (!info) {
2230
				spin_lock(&ctl->tree_lock);
2231 2232 2233 2234 2235 2236
				ret = -ENOMEM;
				goto out;
			}
		}

		/* allocate the bitmap */
2237 2238
		info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
						 GFP_NOFS);
2239
		info->trim_state = BTRFS_TRIM_STATE_TRIMMED;
2240
		spin_lock(&ctl->tree_lock);
2241 2242 2243 2244 2245 2246 2247 2248 2249
		if (!info->bitmap) {
			ret = -ENOMEM;
			goto out;
		}
		goto again;
	}

out:
	if (info) {
2250 2251 2252
		if (info->bitmap)
			kmem_cache_free(btrfs_free_space_bitmap_cachep,
					info->bitmap);
2253
		kmem_cache_free(btrfs_free_space_cachep, info);
2254
	}
J
Josef Bacik 已提交
2255 2256 2257 2258

	return ret;
}

2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274
/*
 * Free space merging rules:
 *  1) Merge trimmed areas together
 *  2) Let untrimmed areas coalesce with trimmed areas
 *  3) Always pull neighboring regions from bitmaps
 *
 * The above rules are for when we merge free space based on btrfs_trim_state.
 * Rules 2 and 3 are subtle because they are suboptimal, but are done for the
 * same reason: to promote larger extent regions which makes life easier for
 * find_free_extent().  Rule 2 enables coalescing based on the common path
 * being returning free space from btrfs_finish_extent_commit().  So when free
 * space is trimmed, it will prevent aggregating trimmed new region and
 * untrimmed regions in the rb_tree.  Rule 3 is purely to obtain larger extents
 * and provide find_free_extent() with the largest extents possible hoping for
 * the reuse path.
 */
2275
static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
2276
			  struct btrfs_free_space *info, bool update_stat)
J
Josef Bacik 已提交
2277
{
2278 2279 2280 2281 2282
	struct btrfs_free_space *left_info;
	struct btrfs_free_space *right_info;
	bool merged = false;
	u64 offset = info->offset;
	u64 bytes = info->bytes;
2283
	const bool is_trimmed = btrfs_free_space_trimmed(info);
2284

J
Josef Bacik 已提交
2285 2286 2287 2288 2289
	/*
	 * first we want to see if there is free space adjacent to the range we
	 * are adding, if there is remove that struct and add a new one to
	 * cover the entire range
	 */
2290
	right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
2291 2292 2293 2294
	if (right_info && rb_prev(&right_info->offset_index))
		left_info = rb_entry(rb_prev(&right_info->offset_index),
				     struct btrfs_free_space, offset_index);
	else
2295
		left_info = tree_search_offset(ctl, offset - 1, 0, 0);
J
Josef Bacik 已提交
2296

2297 2298 2299
	/* See try_merge_free_space() comment. */
	if (right_info && !right_info->bitmap &&
	    (!is_trimmed || btrfs_free_space_trimmed(right_info))) {
2300
		if (update_stat)
2301
			unlink_free_space(ctl, right_info);
2302
		else
2303
			__unlink_free_space(ctl, right_info);
2304
		info->bytes += right_info->bytes;
2305
		kmem_cache_free(btrfs_free_space_cachep, right_info);
2306
		merged = true;
J
Josef Bacik 已提交
2307 2308
	}

2309
	/* See try_merge_free_space() comment. */
2310
	if (left_info && !left_info->bitmap &&
2311 2312
	    left_info->offset + left_info->bytes == offset &&
	    (!is_trimmed || btrfs_free_space_trimmed(left_info))) {
2313
		if (update_stat)
2314
			unlink_free_space(ctl, left_info);
2315
		else
2316
			__unlink_free_space(ctl, left_info);
2317 2318
		info->offset = left_info->offset;
		info->bytes += left_info->bytes;
2319
		kmem_cache_free(btrfs_free_space_cachep, left_info);
2320
		merged = true;
J
Josef Bacik 已提交
2321 2322
	}

2323 2324 2325
	return merged;
}

2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347
static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl,
				     struct btrfs_free_space *info,
				     bool update_stat)
{
	struct btrfs_free_space *bitmap;
	unsigned long i;
	unsigned long j;
	const u64 end = info->offset + info->bytes;
	const u64 bitmap_offset = offset_to_bitmap(ctl, end);
	u64 bytes;

	bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
	if (!bitmap)
		return false;

	i = offset_to_bit(bitmap->offset, ctl->unit, end);
	j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i);
	if (j == i)
		return false;
	bytes = (j - i) * ctl->unit;
	info->bytes += bytes;

2348 2349 2350 2351
	/* See try_merge_free_space() comment. */
	if (!btrfs_free_space_trimmed(bitmap))
		info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;

2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404
	if (update_stat)
		bitmap_clear_bits(ctl, bitmap, end, bytes);
	else
		__bitmap_clear_bits(ctl, bitmap, end, bytes);

	if (!bitmap->bytes)
		free_bitmap(ctl, bitmap);

	return true;
}

static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl,
				       struct btrfs_free_space *info,
				       bool update_stat)
{
	struct btrfs_free_space *bitmap;
	u64 bitmap_offset;
	unsigned long i;
	unsigned long j;
	unsigned long prev_j;
	u64 bytes;

	bitmap_offset = offset_to_bitmap(ctl, info->offset);
	/* If we're on a boundary, try the previous logical bitmap. */
	if (bitmap_offset == info->offset) {
		if (info->offset == 0)
			return false;
		bitmap_offset = offset_to_bitmap(ctl, info->offset - 1);
	}

	bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
	if (!bitmap)
		return false;

	i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
	j = 0;
	prev_j = (unsigned long)-1;
	for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
		if (j > i)
			break;
		prev_j = j;
	}
	if (prev_j == i)
		return false;

	if (prev_j == (unsigned long)-1)
		bytes = (i + 1) * ctl->unit;
	else
		bytes = (i - prev_j) * ctl->unit;

	info->offset -= bytes;
	info->bytes += bytes;

2405 2406 2407 2408
	/* See try_merge_free_space() comment. */
	if (!btrfs_free_space_trimmed(bitmap))
		info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;

2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455
	if (update_stat)
		bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
	else
		__bitmap_clear_bits(ctl, bitmap, info->offset, bytes);

	if (!bitmap->bytes)
		free_bitmap(ctl, bitmap);

	return true;
}

/*
 * We prefer always to allocate from extent entries, both for clustered and
 * non-clustered allocation requests. So when attempting to add a new extent
 * entry, try to see if there's adjacent free space in bitmap entries, and if
 * there is, migrate that space from the bitmaps to the extent.
 * Like this we get better chances of satisfying space allocation requests
 * because we attempt to satisfy them based on a single cache entry, and never
 * on 2 or more entries - even if the entries represent a contiguous free space
 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
 * ends).
 */
static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info,
			      bool update_stat)
{
	/*
	 * Only work with disconnected entries, as we can change their offset,
	 * and must be extent entries.
	 */
	ASSERT(!info->bitmap);
	ASSERT(RB_EMPTY_NODE(&info->offset_index));

	if (ctl->total_bitmaps > 0) {
		bool stole_end;
		bool stole_front = false;

		stole_end = steal_from_bitmap_to_end(ctl, info, update_stat);
		if (ctl->total_bitmaps > 0)
			stole_front = steal_from_bitmap_to_front(ctl, info,
								 update_stat);

		if (stole_end || stole_front)
			try_merge_free_space(ctl, info, update_stat);
	}
}

2456 2457
int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
			   struct btrfs_free_space_ctl *ctl,
2458 2459
			   u64 offset, u64 bytes,
			   enum btrfs_trim_state trim_state)
2460
{
2461
	struct btrfs_block_group *block_group = ctl->private;
2462 2463
	struct btrfs_free_space *info;
	int ret = 0;
D
Dennis Zhou 已提交
2464
	u64 filter_bytes = bytes;
2465

2466
	info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
2467 2468 2469 2470 2471
	if (!info)
		return -ENOMEM;

	info->offset = offset;
	info->bytes = bytes;
2472
	info->trim_state = trim_state;
2473
	RB_CLEAR_NODE(&info->offset_index);
2474

2475
	spin_lock(&ctl->tree_lock);
2476

2477
	if (try_merge_free_space(ctl, info, true))
2478 2479 2480 2481 2482 2483 2484
		goto link;

	/*
	 * There was no extent directly to the left or right of this new
	 * extent then we know we're going to have to allocate a new extent, so
	 * before we do that see if we need to drop this into a bitmap
	 */
2485
	ret = insert_into_bitmap(ctl, info);
2486 2487 2488 2489 2490 2491 2492
	if (ret < 0) {
		goto out;
	} else if (ret) {
		ret = 0;
		goto out;
	}
link:
2493 2494 2495 2496 2497 2498 2499 2500
	/*
	 * Only steal free space from adjacent bitmaps if we're sure we're not
	 * going to add the new free space to existing bitmap entries - because
	 * that would mean unnecessary work that would be reverted. Therefore
	 * attempt to steal space from bitmaps if we're adding an extent entry.
	 */
	steal_from_bitmap(ctl, info, true);

D
Dennis Zhou 已提交
2501 2502
	filter_bytes = max(filter_bytes, info->bytes);

2503
	ret = link_free_space(ctl, info);
J
Josef Bacik 已提交
2504
	if (ret)
2505
		kmem_cache_free(btrfs_free_space_cachep, info);
2506
out:
2507
	btrfs_discard_update_discardable(block_group, ctl);
2508
	spin_unlock(&ctl->tree_lock);
2509

J
Josef Bacik 已提交
2510
	if (ret) {
2511
		btrfs_crit(fs_info, "unable to add free space :%d", ret);
2512
		ASSERT(ret != -EEXIST);
J
Josef Bacik 已提交
2513 2514
	}

D
Dennis Zhou 已提交
2515 2516
	if (trim_state != BTRFS_TRIM_STATE_TRIMMED) {
		btrfs_discard_check_filter(block_group, filter_bytes);
2517
		btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
D
Dennis Zhou 已提交
2518
	}
2519

J
Josef Bacik 已提交
2520 2521 2522
	return ret;
}

2523
int btrfs_add_free_space(struct btrfs_block_group *block_group,
2524 2525
			 u64 bytenr, u64 size)
{
2526 2527 2528 2529 2530
	enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;

	if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC))
		trim_state = BTRFS_TRIM_STATE_TRIMMED;

2531 2532
	return __btrfs_add_free_space(block_group->fs_info,
				      block_group->free_space_ctl,
2533
				      bytenr, size, trim_state);
2534 2535
}

2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554
/*
 * This is a subtle distinction because when adding free space back in general,
 * we want it to be added as untrimmed for async. But in the case where we add
 * it on loading of a block group, we want to consider it trimmed.
 */
int btrfs_add_free_space_async_trimmed(struct btrfs_block_group *block_group,
				       u64 bytenr, u64 size)
{
	enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;

	if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC) ||
	    btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
		trim_state = BTRFS_TRIM_STATE_TRIMMED;

	return __btrfs_add_free_space(block_group->fs_info,
				      block_group->free_space_ctl,
				      bytenr, size, trim_state);
}

2555
int btrfs_remove_free_space(struct btrfs_block_group *block_group,
2556
			    u64 offset, u64 bytes)
J
Josef Bacik 已提交
2557
{
2558
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
J
Josef Bacik 已提交
2559
	struct btrfs_free_space *info;
2560 2561
	int ret;
	bool re_search = false;
J
Josef Bacik 已提交
2562

2563
	spin_lock(&ctl->tree_lock);
2564

2565
again:
2566
	ret = 0;
2567 2568 2569
	if (!bytes)
		goto out_lock;

2570
	info = tree_search_offset(ctl, offset, 0, 0);
2571
	if (!info) {
2572 2573 2574 2575
		/*
		 * oops didn't find an extent that matched the space we wanted
		 * to remove, look for a bitmap instead
		 */
2576
		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2577 2578
					  1, 0);
		if (!info) {
2579 2580 2581 2582
			/*
			 * If we found a partial bit of our free space in a
			 * bitmap but then couldn't find the other part this may
			 * be a problem, so WARN about it.
2583
			 */
2584
			WARN_ON(re_search);
2585 2586
			goto out_lock;
		}
2587 2588
	}

2589
	re_search = false;
2590
	if (!info->bitmap) {
2591
		unlink_free_space(ctl, info);
2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602
		if (offset == info->offset) {
			u64 to_free = min(bytes, info->bytes);

			info->bytes -= to_free;
			info->offset += to_free;
			if (info->bytes) {
				ret = link_free_space(ctl, info);
				WARN_ON(ret);
			} else {
				kmem_cache_free(btrfs_free_space_cachep, info);
			}
J
Josef Bacik 已提交
2603

2604 2605 2606 2607 2608
			offset += to_free;
			bytes -= to_free;
			goto again;
		} else {
			u64 old_end = info->bytes + info->offset;
2609

2610
			info->bytes = offset - info->offset;
2611
			ret = link_free_space(ctl, info);
2612 2613 2614 2615
			WARN_ON(ret);
			if (ret)
				goto out_lock;

2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626
			/* Not enough bytes in this entry to satisfy us */
			if (old_end < offset + bytes) {
				bytes -= old_end - offset;
				offset = old_end;
				goto again;
			} else if (old_end == offset + bytes) {
				/* all done */
				goto out_lock;
			}
			spin_unlock(&ctl->tree_lock);

2627 2628 2629 2630
			ret = __btrfs_add_free_space(block_group->fs_info, ctl,
						     offset + bytes,
						     old_end - (offset + bytes),
						     info->trim_state);
2631 2632 2633
			WARN_ON(ret);
			goto out;
		}
J
Josef Bacik 已提交
2634
	}
2635

2636
	ret = remove_from_bitmap(ctl, info, &offset, &bytes);
2637 2638
	if (ret == -EAGAIN) {
		re_search = true;
2639
		goto again;
2640
	}
2641
out_lock:
2642
	btrfs_discard_update_discardable(block_group, ctl);
2643
	spin_unlock(&ctl->tree_lock);
J
Josef Bacik 已提交
2644
out:
2645 2646 2647
	return ret;
}

2648
void btrfs_dump_free_space(struct btrfs_block_group *block_group,
J
Josef Bacik 已提交
2649 2650
			   u64 bytes)
{
2651
	struct btrfs_fs_info *fs_info = block_group->fs_info;
2652
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
J
Josef Bacik 已提交
2653 2654 2655 2656
	struct btrfs_free_space *info;
	struct rb_node *n;
	int count = 0;

2657
	spin_lock(&ctl->tree_lock);
2658
	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
J
Josef Bacik 已提交
2659
		info = rb_entry(n, struct btrfs_free_space, offset_index);
L
Liu Bo 已提交
2660
		if (info->bytes >= bytes && !block_group->ro)
J
Josef Bacik 已提交
2661
			count++;
2662
		btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
2663
			   info->offset, info->bytes,
2664
		       (info->bitmap) ? "yes" : "no");
J
Josef Bacik 已提交
2665
	}
2666
	spin_unlock(&ctl->tree_lock);
2667
	btrfs_info(fs_info, "block group has cluster?: %s",
2668
	       list_empty(&block_group->cluster_list) ? "no" : "yes");
2669
	btrfs_info(fs_info,
2670
		   "%d blocks of free space at or bigger than bytes is", count);
J
Josef Bacik 已提交
2671 2672
}

2673
void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group)
J
Josef Bacik 已提交
2674
{
2675
	struct btrfs_fs_info *fs_info = block_group->fs_info;
2676
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
J
Josef Bacik 已提交
2677

2678
	spin_lock_init(&ctl->tree_lock);
2679
	ctl->unit = fs_info->sectorsize;
2680
	ctl->start = block_group->start;
2681 2682
	ctl->private = block_group;
	ctl->op = &free_space_op;
2683 2684
	INIT_LIST_HEAD(&ctl->trimming_ranges);
	mutex_init(&ctl->cache_writeout_mutex);
J
Josef Bacik 已提交
2685

2686 2687 2688 2689 2690
	/*
	 * we only want to have 32k of ram per block group for keeping
	 * track of free space, and if we pass 1/2 of that we want to
	 * start converting things over to using bitmaps
	 */
2691
	ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space);
J
Josef Bacik 已提交
2692 2693
}

2694 2695 2696 2697 2698 2699 2700 2701
/*
 * for a given cluster, put all of its extents back into the free
 * space cache.  If the block group passed doesn't match the block group
 * pointed to by the cluster, someone else raced in and freed the
 * cluster already.  In that case, we just return without changing anything
 */
static int
__btrfs_return_cluster_to_free_space(
2702
			     struct btrfs_block_group *block_group,
2703 2704
			     struct btrfs_free_cluster *cluster)
{
2705
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2706 2707 2708 2709 2710 2711 2712
	struct btrfs_free_space *entry;
	struct rb_node *node;

	spin_lock(&cluster->lock);
	if (cluster->block_group != block_group)
		goto out;

2713
	cluster->block_group = NULL;
2714
	cluster->window_start = 0;
2715 2716
	list_del_init(&cluster->block_group_list);

2717
	node = rb_first(&cluster->root);
2718
	while (node) {
2719 2720
		bool bitmap;

2721 2722 2723
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		node = rb_next(&entry->offset_index);
		rb_erase(&entry->offset_index, &cluster->root);
2724
		RB_CLEAR_NODE(&entry->offset_index);
2725 2726

		bitmap = (entry->bitmap != NULL);
2727
		if (!bitmap) {
2728
			/* Merging treats extents as if they were new */
2729
			if (!btrfs_free_space_trimmed(entry)) {
2730
				ctl->discardable_extents[BTRFS_STAT_CURR]--;
2731 2732 2733
				ctl->discardable_bytes[BTRFS_STAT_CURR] -=
					entry->bytes;
			}
2734

2735
			try_merge_free_space(ctl, entry, false);
2736
			steal_from_bitmap(ctl, entry, false);
2737 2738

			/* As we insert directly, update these statistics */
2739
			if (!btrfs_free_space_trimmed(entry)) {
2740
				ctl->discardable_extents[BTRFS_STAT_CURR]++;
2741 2742 2743
				ctl->discardable_bytes[BTRFS_STAT_CURR] +=
					entry->bytes;
			}
2744
		}
2745
		tree_insert_offset(&ctl->free_space_offset,
2746
				   entry->offset, &entry->offset_index, bitmap);
2747
	}
2748
	cluster->root = RB_ROOT;
2749

2750 2751
out:
	spin_unlock(&cluster->lock);
2752
	btrfs_put_block_group(block_group);
2753 2754 2755
	return 0;
}

2756 2757
static void __btrfs_remove_free_space_cache_locked(
				struct btrfs_free_space_ctl *ctl)
J
Josef Bacik 已提交
2758 2759 2760
{
	struct btrfs_free_space *info;
	struct rb_node *node;
2761 2762 2763

	while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
		info = rb_entry(node, struct btrfs_free_space, offset_index);
2764 2765 2766 2767 2768 2769
		if (!info->bitmap) {
			unlink_free_space(ctl, info);
			kmem_cache_free(btrfs_free_space_cachep, info);
		} else {
			free_bitmap(ctl, info);
		}
2770 2771

		cond_resched_lock(&ctl->tree_lock);
2772
	}
2773 2774 2775 2776 2777 2778
}

void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
{
	spin_lock(&ctl->tree_lock);
	__btrfs_remove_free_space_cache_locked(ctl);
2779 2780 2781
	spin_unlock(&ctl->tree_lock);
}

2782
void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
2783 2784
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2785
	struct btrfs_free_cluster *cluster;
2786
	struct list_head *head;
J
Josef Bacik 已提交
2787

2788
	spin_lock(&ctl->tree_lock);
2789 2790 2791 2792
	while ((head = block_group->cluster_list.next) !=
	       &block_group->cluster_list) {
		cluster = list_entry(head, struct btrfs_free_cluster,
				     block_group_list);
2793 2794 2795

		WARN_ON(cluster->block_group != block_group);
		__btrfs_return_cluster_to_free_space(block_group, cluster);
2796 2797

		cond_resched_lock(&ctl->tree_lock);
2798
	}
2799
	__btrfs_remove_free_space_cache_locked(ctl);
2800
	btrfs_discard_update_discardable(block_group, ctl);
2801
	spin_unlock(&ctl->tree_lock);
2802

J
Josef Bacik 已提交
2803 2804
}

2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835
/**
 * btrfs_is_free_space_trimmed - see if everything is trimmed
 * @block_group: block_group of interest
 *
 * Walk @block_group's free space rb_tree to determine if everything is trimmed.
 */
bool btrfs_is_free_space_trimmed(struct btrfs_block_group *block_group)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct btrfs_free_space *info;
	struct rb_node *node;
	bool ret = true;

	spin_lock(&ctl->tree_lock);
	node = rb_first(&ctl->free_space_offset);

	while (node) {
		info = rb_entry(node, struct btrfs_free_space, offset_index);

		if (!btrfs_free_space_trimmed(info)) {
			ret = false;
			break;
		}

		node = rb_next(node);
	}

	spin_unlock(&ctl->tree_lock);
	return ret;
}

2836
u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
2837 2838
			       u64 offset, u64 bytes, u64 empty_size,
			       u64 *max_extent_size)
J
Josef Bacik 已提交
2839
{
2840
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2841 2842
	struct btrfs_discard_ctl *discard_ctl =
					&block_group->fs_info->discard_ctl;
2843
	struct btrfs_free_space *entry = NULL;
2844
	u64 bytes_search = bytes + empty_size;
2845
	u64 ret = 0;
D
David Woodhouse 已提交
2846 2847
	u64 align_gap = 0;
	u64 align_gap_len = 0;
2848
	enum btrfs_trim_state align_gap_trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
J
Josef Bacik 已提交
2849

2850
	spin_lock(&ctl->tree_lock);
D
David Woodhouse 已提交
2851
	entry = find_free_space(ctl, &offset, &bytes_search,
2852
				block_group->full_stripe_len, max_extent_size);
2853
	if (!entry)
2854 2855 2856 2857
		goto out;

	ret = offset;
	if (entry->bitmap) {
2858
		bitmap_clear_bits(ctl, entry, offset, bytes);
2859 2860 2861 2862

		if (!btrfs_free_space_trimmed(entry))
			atomic64_add(bytes, &discard_ctl->discard_bytes_saved);

2863
		if (!entry->bytes)
2864
			free_bitmap(ctl, entry);
2865
	} else {
2866
		unlink_free_space(ctl, entry);
D
David Woodhouse 已提交
2867 2868
		align_gap_len = offset - entry->offset;
		align_gap = entry->offset;
2869
		align_gap_trim_state = entry->trim_state;
D
David Woodhouse 已提交
2870

2871 2872 2873
		if (!btrfs_free_space_trimmed(entry))
			atomic64_add(bytes, &discard_ctl->discard_bytes_saved);

D
David Woodhouse 已提交
2874 2875 2876 2877
		entry->offset = offset + bytes;
		WARN_ON(entry->bytes < bytes + align_gap_len);

		entry->bytes -= bytes + align_gap_len;
2878
		if (!entry->bytes)
2879
			kmem_cache_free(btrfs_free_space_cachep, entry);
2880
		else
2881
			link_free_space(ctl, entry);
2882
	}
2883
out:
2884
	btrfs_discard_update_discardable(block_group, ctl);
2885
	spin_unlock(&ctl->tree_lock);
J
Josef Bacik 已提交
2886

D
David Woodhouse 已提交
2887
	if (align_gap_len)
2888
		__btrfs_add_free_space(block_group->fs_info, ctl,
2889 2890
				       align_gap, align_gap_len,
				       align_gap_trim_state);
J
Josef Bacik 已提交
2891 2892
	return ret;
}
2893 2894 2895 2896 2897 2898 2899 2900 2901 2902

/*
 * given a cluster, put all of its extents back into the free space
 * cache.  If a block group is passed, this function will only free
 * a cluster that belongs to the passed block group.
 *
 * Otherwise, it'll get a reference on the block group pointed to by the
 * cluster and remove the cluster from it.
 */
int btrfs_return_cluster_to_free_space(
2903
			       struct btrfs_block_group *block_group,
2904 2905
			       struct btrfs_free_cluster *cluster)
{
2906
	struct btrfs_free_space_ctl *ctl;
2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924
	int ret;

	/* first, get a safe pointer to the block group */
	spin_lock(&cluster->lock);
	if (!block_group) {
		block_group = cluster->block_group;
		if (!block_group) {
			spin_unlock(&cluster->lock);
			return 0;
		}
	} else if (cluster->block_group != block_group) {
		/* someone else has already freed it don't redo their work */
		spin_unlock(&cluster->lock);
		return 0;
	}
	atomic_inc(&block_group->count);
	spin_unlock(&cluster->lock);

2925 2926
	ctl = block_group->free_space_ctl;

2927
	/* now return any extents the cluster had on it */
2928
	spin_lock(&ctl->tree_lock);
2929
	ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
2930
	spin_unlock(&ctl->tree_lock);
2931

2932 2933
	btrfs_discard_queue_work(&block_group->fs_info->discard_ctl, block_group);

2934 2935 2936 2937 2938
	/* finally drop our ref */
	btrfs_put_block_group(block_group);
	return ret;
}

2939
static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group,
2940
				   struct btrfs_free_cluster *cluster,
2941
				   struct btrfs_free_space *entry,
2942 2943
				   u64 bytes, u64 min_start,
				   u64 *max_extent_size)
2944
{
2945
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2946 2947 2948 2949 2950 2951 2952 2953
	int err;
	u64 search_start = cluster->window_start;
	u64 search_bytes = bytes;
	u64 ret = 0;

	search_start = min_start;
	search_bytes = bytes;

2954
	err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
2955
	if (err) {
J
Josef Bacik 已提交
2956 2957
		*max_extent_size = max(get_max_extent_size(entry),
				       *max_extent_size);
2958
		return 0;
2959
	}
2960 2961

	ret = search_start;
2962
	__bitmap_clear_bits(ctl, entry, ret, bytes);
2963 2964 2965 2966

	return ret;
}

2967 2968 2969 2970 2971
/*
 * given a cluster, try to allocate 'bytes' from it, returns 0
 * if it couldn't find anything suitably large, or a logical disk offset
 * if things worked out
 */
2972
u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
2973
			     struct btrfs_free_cluster *cluster, u64 bytes,
2974
			     u64 min_start, u64 *max_extent_size)
2975
{
2976
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2977 2978
	struct btrfs_discard_ctl *discard_ctl =
					&block_group->fs_info->discard_ctl;
2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994
	struct btrfs_free_space *entry = NULL;
	struct rb_node *node;
	u64 ret = 0;

	spin_lock(&cluster->lock);
	if (bytes > cluster->max_size)
		goto out;

	if (cluster->block_group != block_group)
		goto out;

	node = rb_first(&cluster->root);
	if (!node)
		goto out;

	entry = rb_entry(node, struct btrfs_free_space, offset_index);
2995
	while (1) {
J
Josef Bacik 已提交
2996 2997 2998
		if (entry->bytes < bytes)
			*max_extent_size = max(get_max_extent_size(entry),
					       *max_extent_size);
2999

3000 3001
		if (entry->bytes < bytes ||
		    (!entry->bitmap && entry->offset < min_start)) {
3002 3003 3004 3005 3006 3007 3008 3009
			node = rb_next(&entry->offset_index);
			if (!node)
				break;
			entry = rb_entry(node, struct btrfs_free_space,
					 offset_index);
			continue;
		}

3010 3011 3012
		if (entry->bitmap) {
			ret = btrfs_alloc_from_bitmap(block_group,
						      cluster, entry, bytes,
3013 3014
						      cluster->window_start,
						      max_extent_size);
3015 3016 3017 3018 3019 3020 3021 3022
			if (ret == 0) {
				node = rb_next(&entry->offset_index);
				if (!node)
					break;
				entry = rb_entry(node, struct btrfs_free_space,
						 offset_index);
				continue;
			}
3023
			cluster->window_start += bytes;
3024 3025 3026 3027 3028 3029
		} else {
			ret = entry->offset;

			entry->offset += bytes;
			entry->bytes -= bytes;
		}
3030

3031
		if (entry->bytes == 0)
3032 3033 3034 3035 3036
			rb_erase(&entry->offset_index, &cluster->root);
		break;
	}
out:
	spin_unlock(&cluster->lock);
3037

3038 3039 3040
	if (!ret)
		return 0;

3041
	spin_lock(&ctl->tree_lock);
3042

3043 3044 3045
	if (!btrfs_free_space_trimmed(entry))
		atomic64_add(bytes, &discard_ctl->discard_bytes_saved);

3046
	ctl->free_space -= bytes;
3047 3048
	if (!entry->bitmap && !btrfs_free_space_trimmed(entry))
		ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
3049
	if (entry->bytes == 0) {
3050
		ctl->free_extents--;
3051
		if (entry->bitmap) {
3052 3053
			kmem_cache_free(btrfs_free_space_bitmap_cachep,
					entry->bitmap);
3054 3055
			ctl->total_bitmaps--;
			ctl->op->recalc_thresholds(ctl);
3056 3057
		} else if (!btrfs_free_space_trimmed(entry)) {
			ctl->discardable_extents[BTRFS_STAT_CURR]--;
3058
		}
3059
		kmem_cache_free(btrfs_free_space_cachep, entry);
3060 3061
	}

3062
	spin_unlock(&ctl->tree_lock);
3063

3064 3065 3066
	return ret;
}

3067
static int btrfs_bitmap_cluster(struct btrfs_block_group *block_group,
3068 3069
				struct btrfs_free_space *entry,
				struct btrfs_free_cluster *cluster,
3070 3071
				u64 offset, u64 bytes,
				u64 cont1_bytes, u64 min_bytes)
3072
{
3073
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3074 3075
	unsigned long next_zero;
	unsigned long i;
3076 3077
	unsigned long want_bits;
	unsigned long min_bits;
3078
	unsigned long found_bits;
3079
	unsigned long max_bits = 0;
3080 3081
	unsigned long start = 0;
	unsigned long total_found = 0;
3082
	int ret;
3083

3084
	i = offset_to_bit(entry->offset, ctl->unit,
3085
			  max_t(u64, offset, entry->offset));
3086 3087
	want_bits = bytes_to_bits(bytes, ctl->unit);
	min_bits = bytes_to_bits(min_bytes, ctl->unit);
3088

3089 3090 3091 3092 3093 3094 3095
	/*
	 * Don't bother looking for a cluster in this bitmap if it's heavily
	 * fragmented.
	 */
	if (entry->max_extent_size &&
	    entry->max_extent_size < cont1_bytes)
		return -ENOSPC;
3096 3097
again:
	found_bits = 0;
3098
	for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
3099 3100
		next_zero = find_next_zero_bit(entry->bitmap,
					       BITS_PER_BITMAP, i);
3101
		if (next_zero - i >= min_bits) {
3102
			found_bits = next_zero - i;
3103 3104
			if (found_bits > max_bits)
				max_bits = found_bits;
3105 3106
			break;
		}
3107 3108
		if (next_zero - i > max_bits)
			max_bits = next_zero - i;
3109 3110 3111
		i = next_zero;
	}

3112 3113
	if (!found_bits) {
		entry->max_extent_size = (u64)max_bits * ctl->unit;
3114
		return -ENOSPC;
3115
	}
3116

3117
	if (!total_found) {
3118
		start = i;
3119
		cluster->max_size = 0;
3120 3121 3122 3123
	}

	total_found += found_bits;

3124 3125
	if (cluster->max_size < found_bits * ctl->unit)
		cluster->max_size = found_bits * ctl->unit;
3126

3127 3128
	if (total_found < want_bits || cluster->max_size < cont1_bytes) {
		i = next_zero + 1;
3129 3130 3131
		goto again;
	}

3132
	cluster->window_start = start * ctl->unit + entry->offset;
3133
	rb_erase(&entry->offset_index, &ctl->free_space_offset);
3134 3135
	ret = tree_insert_offset(&cluster->root, entry->offset,
				 &entry->offset_index, 1);
3136
	ASSERT(!ret); /* -EEXIST; Logic error */
3137

J
Josef Bacik 已提交
3138
	trace_btrfs_setup_cluster(block_group, cluster,
3139
				  total_found * ctl->unit, 1);
3140 3141 3142
	return 0;
}

3143 3144
/*
 * This searches the block group for just extents to fill the cluster with.
3145 3146
 * Try to find a cluster with at least bytes total bytes, at least one
 * extent of cont1_bytes, and other clusters of at least min_bytes.
3147
 */
3148
static noinline int
3149
setup_cluster_no_bitmap(struct btrfs_block_group *block_group,
3150 3151
			struct btrfs_free_cluster *cluster,
			struct list_head *bitmaps, u64 offset, u64 bytes,
3152
			u64 cont1_bytes, u64 min_bytes)
3153
{
3154
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3155 3156 3157 3158 3159 3160
	struct btrfs_free_space *first = NULL;
	struct btrfs_free_space *entry = NULL;
	struct btrfs_free_space *last;
	struct rb_node *node;
	u64 window_free;
	u64 max_extent;
J
Josef Bacik 已提交
3161
	u64 total_size = 0;
3162

3163
	entry = tree_search_offset(ctl, offset, 0, 1);
3164 3165 3166 3167 3168 3169 3170
	if (!entry)
		return -ENOSPC;

	/*
	 * We don't want bitmaps, so just move along until we find a normal
	 * extent entry.
	 */
3171 3172
	while (entry->bitmap || entry->bytes < min_bytes) {
		if (entry->bitmap && list_empty(&entry->list))
3173
			list_add_tail(&entry->list, bitmaps);
3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184
		node = rb_next(&entry->offset_index);
		if (!node)
			return -ENOSPC;
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
	}

	window_free = entry->bytes;
	max_extent = entry->bytes;
	first = entry;
	last = entry;

3185 3186
	for (node = rb_next(&entry->offset_index); node;
	     node = rb_next(&entry->offset_index)) {
3187 3188
		entry = rb_entry(node, struct btrfs_free_space, offset_index);

3189 3190 3191
		if (entry->bitmap) {
			if (list_empty(&entry->list))
				list_add_tail(&entry->list, bitmaps);
3192
			continue;
3193 3194
		}

3195 3196 3197 3198 3199 3200
		if (entry->bytes < min_bytes)
			continue;

		last = entry;
		window_free += entry->bytes;
		if (entry->bytes > max_extent)
3201 3202 3203
			max_extent = entry->bytes;
	}

3204 3205 3206
	if (window_free < bytes || max_extent < cont1_bytes)
		return -ENOSPC;

3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219
	cluster->window_start = first->offset;

	node = &first->offset_index;

	/*
	 * now we've found our entries, pull them out of the free space
	 * cache and put them into the cluster rbtree
	 */
	do {
		int ret;

		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		node = rb_next(&entry->offset_index);
3220
		if (entry->bitmap || entry->bytes < min_bytes)
3221 3222
			continue;

3223
		rb_erase(&entry->offset_index, &ctl->free_space_offset);
3224 3225
		ret = tree_insert_offset(&cluster->root, entry->offset,
					 &entry->offset_index, 0);
J
Josef Bacik 已提交
3226
		total_size += entry->bytes;
3227
		ASSERT(!ret); /* -EEXIST; Logic error */
3228 3229 3230
	} while (node && entry != last);

	cluster->max_size = max_extent;
J
Josef Bacik 已提交
3231
	trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
3232 3233 3234 3235 3236 3237 3238
	return 0;
}

/*
 * This specifically looks for bitmaps that may work in the cluster, we assume
 * that we have already failed to find extents that will work.
 */
3239
static noinline int
3240
setup_cluster_bitmap(struct btrfs_block_group *block_group,
3241 3242
		     struct btrfs_free_cluster *cluster,
		     struct list_head *bitmaps, u64 offset, u64 bytes,
3243
		     u64 cont1_bytes, u64 min_bytes)
3244
{
3245
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3246
	struct btrfs_free_space *entry = NULL;
3247
	int ret = -ENOSPC;
3248
	u64 bitmap_offset = offset_to_bitmap(ctl, offset);
3249

3250
	if (ctl->total_bitmaps == 0)
3251 3252
		return -ENOSPC;

3253 3254 3255 3256
	/*
	 * The bitmap that covers offset won't be in the list unless offset
	 * is just its start offset.
	 */
3257 3258 3259 3260
	if (!list_empty(bitmaps))
		entry = list_first_entry(bitmaps, struct btrfs_free_space, list);

	if (!entry || entry->offset != bitmap_offset) {
3261 3262 3263 3264 3265
		entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
		if (entry && list_empty(&entry->list))
			list_add(&entry->list, bitmaps);
	}

3266
	list_for_each_entry(entry, bitmaps, list) {
3267
		if (entry->bytes < bytes)
3268 3269
			continue;
		ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
3270
					   bytes, cont1_bytes, min_bytes);
3271 3272 3273 3274 3275
		if (!ret)
			return 0;
	}

	/*
3276 3277
	 * The bitmaps list has all the bitmaps that record free space
	 * starting after offset, so no more search is required.
3278
	 */
3279
	return -ENOSPC;
3280 3281
}

3282 3283
/*
 * here we try to find a cluster of blocks in a block group.  The goal
3284
 * is to find at least bytes+empty_size.
3285 3286 3287 3288 3289
 * We might not find them all in one contiguous area.
 *
 * returns zero and sets up cluster if things worked out, otherwise
 * it returns -enospc
 */
3290
int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
3291 3292 3293
			     struct btrfs_free_cluster *cluster,
			     u64 offset, u64 bytes, u64 empty_size)
{
3294
	struct btrfs_fs_info *fs_info = block_group->fs_info;
3295
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3296
	struct btrfs_free_space *entry, *tmp;
3297
	LIST_HEAD(bitmaps);
3298
	u64 min_bytes;
3299
	u64 cont1_bytes;
3300 3301
	int ret;

3302 3303 3304 3305 3306 3307
	/*
	 * Choose the minimum extent size we'll require for this
	 * cluster.  For SSD_SPREAD, don't allow any fragmentation.
	 * For metadata, allow allocates with smaller extents.  For
	 * data, keep it dense.
	 */
3308
	if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
3309
		cont1_bytes = min_bytes = bytes + empty_size;
3310
	} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
3311
		cont1_bytes = bytes;
3312
		min_bytes = fs_info->sectorsize;
3313 3314
	} else {
		cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
3315
		min_bytes = fs_info->sectorsize;
3316
	}
3317

3318
	spin_lock(&ctl->tree_lock);
3319 3320 3321 3322 3323

	/*
	 * If we know we don't have enough space to make a cluster don't even
	 * bother doing all the work to try and find one.
	 */
3324
	if (ctl->free_space < bytes) {
3325
		spin_unlock(&ctl->tree_lock);
3326 3327 3328
		return -ENOSPC;
	}

3329 3330 3331 3332 3333 3334 3335 3336
	spin_lock(&cluster->lock);

	/* someone already found a cluster, hooray */
	if (cluster->block_group) {
		ret = 0;
		goto out;
	}

J
Josef Bacik 已提交
3337 3338 3339
	trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
				 min_bytes);

3340
	ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
3341 3342
				      bytes + empty_size,
				      cont1_bytes, min_bytes);
3343
	if (ret)
3344
		ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
3345 3346
					   offset, bytes + empty_size,
					   cont1_bytes, min_bytes);
3347 3348 3349 3350

	/* Clear our temporary list */
	list_for_each_entry_safe(entry, tmp, &bitmaps, list)
		list_del_init(&entry->list);
3351

3352 3353 3354 3355 3356
	if (!ret) {
		atomic_inc(&block_group->count);
		list_add_tail(&cluster->block_group_list,
			      &block_group->cluster_list);
		cluster->block_group = block_group;
J
Josef Bacik 已提交
3357 3358
	} else {
		trace_btrfs_failed_cluster_setup(block_group);
3359 3360 3361
	}
out:
	spin_unlock(&cluster->lock);
3362
	spin_unlock(&ctl->tree_lock);
3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373

	return ret;
}

/*
 * simple code to zero out a cluster
 */
void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
{
	spin_lock_init(&cluster->lock);
	spin_lock_init(&cluster->refill_lock);
3374
	cluster->root = RB_ROOT;
3375
	cluster->max_size = 0;
3376
	cluster->fragmented = false;
3377 3378 3379 3380
	INIT_LIST_HEAD(&cluster->block_group_list);
	cluster->block_group = NULL;
}

3381
static int do_trimming(struct btrfs_block_group *block_group,
3382
		       u64 *total_trimmed, u64 start, u64 bytes,
3383
		       u64 reserved_start, u64 reserved_bytes,
3384
		       enum btrfs_trim_state reserved_trim_state,
3385
		       struct btrfs_trim_range *trim_entry)
3386
{
3387
	struct btrfs_space_info *space_info = block_group->space_info;
3388
	struct btrfs_fs_info *fs_info = block_group->fs_info;
3389
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3390 3391
	int ret;
	int update = 0;
3392 3393 3394
	const u64 end = start + bytes;
	const u64 reserved_end = reserved_start + reserved_bytes;
	enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3395
	u64 trimmed = 0;
3396

3397 3398 3399 3400 3401 3402 3403 3404 3405 3406
	spin_lock(&space_info->lock);
	spin_lock(&block_group->lock);
	if (!block_group->ro) {
		block_group->reserved += reserved_bytes;
		space_info->bytes_reserved += reserved_bytes;
		update = 1;
	}
	spin_unlock(&block_group->lock);
	spin_unlock(&space_info->lock);

3407
	ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
3408
	if (!ret) {
3409
		*total_trimmed += trimmed;
3410 3411
		trim_state = BTRFS_TRIM_STATE_TRIMMED;
	}
3412

3413
	mutex_lock(&ctl->cache_writeout_mutex);
3414 3415 3416 3417 3418 3419 3420 3421
	if (reserved_start < start)
		__btrfs_add_free_space(fs_info, ctl, reserved_start,
				       start - reserved_start,
				       reserved_trim_state);
	if (start + bytes < reserved_start + reserved_bytes)
		__btrfs_add_free_space(fs_info, ctl, end, reserved_end - end,
				       reserved_trim_state);
	__btrfs_add_free_space(fs_info, ctl, start, bytes, trim_state);
3422 3423
	list_del(&trim_entry->list);
	mutex_unlock(&ctl->cache_writeout_mutex);
3424 3425 3426 3427 3428 3429 3430 3431 3432

	if (update) {
		spin_lock(&space_info->lock);
		spin_lock(&block_group->lock);
		if (block_group->ro)
			space_info->bytes_readonly += reserved_bytes;
		block_group->reserved -= reserved_bytes;
		space_info->bytes_reserved -= reserved_bytes;
		spin_unlock(&block_group->lock);
3433
		spin_unlock(&space_info->lock);
3434 3435 3436 3437 3438
	}

	return ret;
}

3439 3440 3441
/*
 * If @async is set, then we will trim 1 region and return.
 */
3442
static int trim_no_bitmap(struct btrfs_block_group *block_group,
3443 3444
			  u64 *total_trimmed, u64 start, u64 end, u64 minlen,
			  bool async)
3445
{
3446 3447
	struct btrfs_discard_ctl *discard_ctl =
					&block_group->fs_info->discard_ctl;
3448 3449 3450 3451 3452 3453
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct btrfs_free_space *entry;
	struct rb_node *node;
	int ret = 0;
	u64 extent_start;
	u64 extent_bytes;
3454
	enum btrfs_trim_state extent_trim_state;
3455
	u64 bytes;
3456
	const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size);
3457 3458

	while (start < end) {
3459 3460 3461
		struct btrfs_trim_range trim_entry;

		mutex_lock(&ctl->cache_writeout_mutex);
3462
		spin_lock(&ctl->tree_lock);
3463

3464 3465
		if (ctl->free_space < minlen)
			goto out_unlock;
3466

3467
		entry = tree_search_offset(ctl, start, 0, 1);
3468 3469
		if (!entry)
			goto out_unlock;
3470

3471 3472 3473
		/* Skip bitmaps and if async, already trimmed entries */
		while (entry->bitmap ||
		       (async && btrfs_free_space_trimmed(entry))) {
3474
			node = rb_next(&entry->offset_index);
3475 3476
			if (!node)
				goto out_unlock;
3477 3478
			entry = rb_entry(node, struct btrfs_free_space,
					 offset_index);
3479 3480
		}

3481 3482
		if (entry->offset >= end)
			goto out_unlock;
3483

3484 3485
		extent_start = entry->offset;
		extent_bytes = entry->bytes;
3486
		extent_trim_state = entry->trim_state;
3487 3488 3489 3490 3491 3492 3493 3494 3495
		if (async) {
			start = entry->offset;
			bytes = entry->bytes;
			if (bytes < minlen) {
				spin_unlock(&ctl->tree_lock);
				mutex_unlock(&ctl->cache_writeout_mutex);
				goto next;
			}
			unlink_free_space(ctl, entry);
D
Dennis Zhou 已提交
3496 3497 3498 3499 3500 3501 3502 3503
			/*
			 * Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
			 * If X < BTRFS_ASYNC_DISCARD_MIN_FILTER, we won't trim
			 * X when we come back around.  So trim it now.
			 */
			if (max_discard_size &&
			    bytes >= (max_discard_size +
				      BTRFS_ASYNC_DISCARD_MIN_FILTER)) {
3504 3505 3506 3507
				bytes = max_discard_size;
				extent_bytes = max_discard_size;
				entry->offset += max_discard_size;
				entry->bytes -= max_discard_size;
3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519
				link_free_space(ctl, entry);
			} else {
				kmem_cache_free(btrfs_free_space_cachep, entry);
			}
		} else {
			start = max(start, extent_start);
			bytes = min(extent_start + extent_bytes, end) - start;
			if (bytes < minlen) {
				spin_unlock(&ctl->tree_lock);
				mutex_unlock(&ctl->cache_writeout_mutex);
				goto next;
			}
3520

3521 3522 3523
			unlink_free_space(ctl, entry);
			kmem_cache_free(btrfs_free_space_cachep, entry);
		}
3524

3525
		spin_unlock(&ctl->tree_lock);
3526 3527 3528 3529
		trim_entry.start = extent_start;
		trim_entry.bytes = extent_bytes;
		list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
		mutex_unlock(&ctl->cache_writeout_mutex);
3530

3531
		ret = do_trimming(block_group, total_trimmed, start, bytes,
3532 3533
				  extent_start, extent_bytes, extent_trim_state,
				  &trim_entry);
3534 3535
		if (ret) {
			block_group->discard_cursor = start + bytes;
3536
			break;
3537
		}
3538 3539
next:
		start += bytes;
3540 3541 3542
		block_group->discard_cursor = start;
		if (async && *total_trimmed)
			break;
3543

3544 3545 3546 3547 3548 3549 3550
		if (fatal_signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}

		cond_resched();
	}
3551 3552 3553 3554 3555 3556 3557 3558

	return ret;

out_unlock:
	block_group->discard_cursor = btrfs_block_group_end(block_group);
	spin_unlock(&ctl->tree_lock);
	mutex_unlock(&ctl->cache_writeout_mutex);

3559 3560 3561
	return ret;
}

3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581
/*
 * If we break out of trimming a bitmap prematurely, we should reset the
 * trimming bit.  In a rather contrieved case, it's possible to race here so
 * reset the state to BTRFS_TRIM_STATE_UNTRIMMED.
 *
 * start = start of bitmap
 * end = near end of bitmap
 *
 * Thread 1:			Thread 2:
 * trim_bitmaps(start)
 *				trim_bitmaps(end)
 *				end_trimming_bitmap()
 * reset_trimming_bitmap()
 */
static void reset_trimming_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset)
{
	struct btrfs_free_space *entry;

	spin_lock(&ctl->tree_lock);
	entry = tree_search_offset(ctl, offset, 1, 0);
3582
	if (entry) {
3583
		if (btrfs_free_space_trimmed(entry)) {
3584 3585
			ctl->discardable_extents[BTRFS_STAT_CURR] +=
				entry->bitmap_extents;
3586 3587
			ctl->discardable_bytes[BTRFS_STAT_CURR] += entry->bytes;
		}
3588
		entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3589 3590
	}

3591 3592 3593
	spin_unlock(&ctl->tree_lock);
}

3594 3595
static void end_trimming_bitmap(struct btrfs_free_space_ctl *ctl,
				struct btrfs_free_space *entry)
3596
{
3597
	if (btrfs_free_space_trimming_bitmap(entry)) {
3598
		entry->trim_state = BTRFS_TRIM_STATE_TRIMMED;
3599 3600
		ctl->discardable_extents[BTRFS_STAT_CURR] -=
			entry->bitmap_extents;
3601
		ctl->discardable_bytes[BTRFS_STAT_CURR] -= entry->bytes;
3602
	}
3603 3604
}

3605 3606 3607
/*
 * If @async is set, then we will trim 1 region and return.
 */
3608
static int trim_bitmaps(struct btrfs_block_group *block_group,
3609
			u64 *total_trimmed, u64 start, u64 end, u64 minlen,
D
Dennis Zhou 已提交
3610
			u64 maxlen, bool async)
3611
{
3612 3613
	struct btrfs_discard_ctl *discard_ctl =
					&block_group->fs_info->discard_ctl;
3614 3615 3616 3617 3618 3619
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct btrfs_free_space *entry;
	int ret = 0;
	int ret2;
	u64 bytes;
	u64 offset = offset_to_bitmap(ctl, start);
3620
	const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size);
3621 3622 3623

	while (offset < end) {
		bool next_bitmap = false;
3624
		struct btrfs_trim_range trim_entry;
3625

3626
		mutex_lock(&ctl->cache_writeout_mutex);
3627 3628 3629
		spin_lock(&ctl->tree_lock);

		if (ctl->free_space < minlen) {
3630 3631
			block_group->discard_cursor =
				btrfs_block_group_end(block_group);
3632
			spin_unlock(&ctl->tree_lock);
3633
			mutex_unlock(&ctl->cache_writeout_mutex);
3634 3635 3636 3637
			break;
		}

		entry = tree_search_offset(ctl, offset, 1, 0);
D
Dennis Zhou 已提交
3638 3639 3640 3641 3642 3643 3644 3645 3646
		/*
		 * Bitmaps are marked trimmed lossily now to prevent constant
		 * discarding of the same bitmap (the reason why we are bound
		 * by the filters).  So, retrim the block group bitmaps when we
		 * are preparing to punt to the unused_bgs list.  This uses
		 * @minlen to determine if we are in BTRFS_DISCARD_INDEX_UNUSED
		 * which is the only discard index which sets minlen to 0.
		 */
		if (!entry || (async && minlen && start == offset &&
3647
			       btrfs_free_space_trimmed(entry))) {
3648
			spin_unlock(&ctl->tree_lock);
3649
			mutex_unlock(&ctl->cache_writeout_mutex);
3650 3651 3652 3653
			next_bitmap = true;
			goto next;
		}

3654 3655 3656 3657 3658 3659 3660 3661 3662
		/*
		 * Async discard bitmap trimming begins at by setting the start
		 * to be key.objectid and the offset_to_bitmap() aligns to the
		 * start of the bitmap.  This lets us know we are fully
		 * scanning the bitmap rather than only some portion of it.
		 */
		if (start == offset)
			entry->trim_state = BTRFS_TRIM_STATE_TRIMMING;

3663
		bytes = minlen;
3664
		ret2 = search_bitmap(ctl, entry, &start, &bytes, false);
3665
		if (ret2 || start >= end) {
3666
			/*
D
Dennis Zhou 已提交
3667 3668
			 * We lossily consider a bitmap trimmed if we only skip
			 * over regions <= BTRFS_ASYNC_DISCARD_MIN_FILTER.
3669
			 */
D
Dennis Zhou 已提交
3670
			if (ret2 && minlen <= BTRFS_ASYNC_DISCARD_MIN_FILTER)
3671
				end_trimming_bitmap(ctl, entry);
3672 3673
			else
				entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3674
			spin_unlock(&ctl->tree_lock);
3675
			mutex_unlock(&ctl->cache_writeout_mutex);
3676 3677 3678 3679
			next_bitmap = true;
			goto next;
		}

3680 3681 3682 3683 3684 3685 3686 3687 3688 3689
		/*
		 * We already trimmed a region, but are using the locking above
		 * to reset the trim_state.
		 */
		if (async && *total_trimmed) {
			spin_unlock(&ctl->tree_lock);
			mutex_unlock(&ctl->cache_writeout_mutex);
			goto out;
		}

3690
		bytes = min(bytes, end - start);
D
Dennis Zhou 已提交
3691
		if (bytes < minlen || (async && maxlen && bytes > maxlen)) {
3692
			spin_unlock(&ctl->tree_lock);
3693
			mutex_unlock(&ctl->cache_writeout_mutex);
3694 3695 3696
			goto next;
		}

D
Dennis Zhou 已提交
3697 3698 3699 3700 3701 3702 3703 3704 3705
		/*
		 * Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
		 * If X < @minlen, we won't trim X when we come back around.
		 * So trim it now.  We differ here from trimming extents as we
		 * don't keep individual state per bit.
		 */
		if (async &&
		    max_discard_size &&
		    bytes > (max_discard_size + minlen))
3706
			bytes = max_discard_size;
3707

3708 3709 3710 3711 3712
		bitmap_clear_bits(ctl, entry, start, bytes);
		if (entry->bytes == 0)
			free_bitmap(ctl, entry);

		spin_unlock(&ctl->tree_lock);
3713 3714 3715 3716
		trim_entry.start = start;
		trim_entry.bytes = bytes;
		list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
		mutex_unlock(&ctl->cache_writeout_mutex);
3717 3718

		ret = do_trimming(block_group, total_trimmed, start, bytes,
3719
				  start, bytes, 0, &trim_entry);
3720 3721
		if (ret) {
			reset_trimming_bitmap(ctl, offset);
3722 3723
			block_group->discard_cursor =
				btrfs_block_group_end(block_group);
3724
			break;
3725
		}
3726 3727 3728
next:
		if (next_bitmap) {
			offset += BITS_PER_BITMAP * ctl->unit;
3729
			start = offset;
3730 3731
		} else {
			start += bytes;
3732
		}
3733
		block_group->discard_cursor = start;
3734 3735

		if (fatal_signal_pending(current)) {
3736 3737
			if (start != offset)
				reset_trimming_bitmap(ctl, offset);
3738 3739 3740 3741 3742 3743 3744
			ret = -ERESTARTSYS;
			break;
		}

		cond_resched();
	}

3745 3746 3747 3748
	if (offset >= end)
		block_group->discard_cursor = end;

out:
3749 3750
	return ret;
}
3751

3752
void btrfs_get_block_group_trimming(struct btrfs_block_group *cache)
3753
{
3754 3755
	atomic_inc(&cache->trimming);
}
3756

3757
void btrfs_put_block_group_trimming(struct btrfs_block_group *block_group)
3758
{
3759
	struct btrfs_fs_info *fs_info = block_group->fs_info;
3760 3761 3762
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	bool cleanup;
3763

3764
	spin_lock(&block_group->lock);
3765 3766
	cleanup = (atomic_dec_and_test(&block_group->trimming) &&
		   block_group->removed);
3767 3768
	spin_unlock(&block_group->lock);

3769
	if (cleanup) {
3770
		mutex_lock(&fs_info->chunk_mutex);
3771
		em_tree = &fs_info->mapping_tree;
3772
		write_lock(&em_tree->lock);
3773
		em = lookup_extent_mapping(em_tree, block_group->start,
3774 3775 3776 3777
					   1);
		BUG_ON(!em); /* logic error, can't happen */
		remove_extent_mapping(em_tree, em);
		write_unlock(&em_tree->lock);
3778
		mutex_unlock(&fs_info->chunk_mutex);
3779 3780 3781 3782

		/* once for us and once for the tree */
		free_extent_map(em);
		free_extent_map(em);
3783 3784 3785 3786 3787 3788

		/*
		 * We've left one free space entry and other tasks trimming
		 * this block group have left 1 entry each one. Free them.
		 */
		__btrfs_remove_free_space_cache(block_group->free_space_ctl);
3789 3790 3791
	}
}

3792
int btrfs_trim_block_group(struct btrfs_block_group *block_group,
3793 3794
			   u64 *trimmed, u64 start, u64 end, u64 minlen)
{
3795
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3796
	int ret;
3797
	u64 rem = 0;
3798 3799 3800 3801 3802

	*trimmed = 0;

	spin_lock(&block_group->lock);
	if (block_group->removed) {
3803
		spin_unlock(&block_group->lock);
3804
		return 0;
3805
	}
3806 3807 3808
	btrfs_get_block_group_trimming(block_group);
	spin_unlock(&block_group->lock);

3809
	ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, false);
3810 3811
	if (ret)
		goto out;
3812

D
Dennis Zhou 已提交
3813
	ret = trim_bitmaps(block_group, trimmed, start, end, minlen, 0, false);
3814 3815 3816 3817
	div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem);
	/* If we ended in the middle of a bitmap, reset the trimming flag */
	if (rem)
		reset_trimming_bitmap(ctl, offset_to_bitmap(ctl, end));
3818 3819
out:
	btrfs_put_block_group_trimming(block_group);
3820 3821 3822
	return ret;
}

3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846
int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group,
				   u64 *trimmed, u64 start, u64 end, u64 minlen,
				   bool async)
{
	int ret;

	*trimmed = 0;

	spin_lock(&block_group->lock);
	if (block_group->removed) {
		spin_unlock(&block_group->lock);
		return 0;
	}
	btrfs_get_block_group_trimming(block_group);
	spin_unlock(&block_group->lock);

	ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, async);
	btrfs_put_block_group_trimming(block_group);

	return ret;
}

int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
				   u64 *trimmed, u64 start, u64 end, u64 minlen,
D
Dennis Zhou 已提交
3847
				   u64 maxlen, bool async)
3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860
{
	int ret;

	*trimmed = 0;

	spin_lock(&block_group->lock);
	if (block_group->removed) {
		spin_unlock(&block_group->lock);
		return 0;
	}
	btrfs_get_block_group_trimming(block_group);
	spin_unlock(&block_group->lock);

D
Dennis Zhou 已提交
3861 3862 3863
	ret = trim_bitmaps(block_group, trimmed, start, end, minlen, maxlen,
			   async);

3864 3865 3866 3867 3868
	btrfs_put_block_group_trimming(block_group);

	return ret;
}

3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904
/*
 * Find the left-most item in the cache tree, and then return the
 * smallest inode number in the item.
 *
 * Note: the returned inode number may not be the smallest one in
 * the tree, if the left-most item is a bitmap.
 */
u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
{
	struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
	struct btrfs_free_space *entry = NULL;
	u64 ino = 0;

	spin_lock(&ctl->tree_lock);

	if (RB_EMPTY_ROOT(&ctl->free_space_offset))
		goto out;

	entry = rb_entry(rb_first(&ctl->free_space_offset),
			 struct btrfs_free_space, offset_index);

	if (!entry->bitmap) {
		ino = entry->offset;

		unlink_free_space(ctl, entry);
		entry->offset++;
		entry->bytes--;
		if (!entry->bytes)
			kmem_cache_free(btrfs_free_space_cachep, entry);
		else
			link_free_space(ctl, entry);
	} else {
		u64 offset = 0;
		u64 count = 1;
		int ret;

3905
		ret = search_bitmap(ctl, entry, &offset, &count, true);
3906
		/* Logic error; Should be empty if it can't find anything */
3907
		ASSERT(!ret);
3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918

		ino = offset;
		bitmap_clear_bits(ctl, entry, offset, 1);
		if (entry->bytes == 0)
			free_bitmap(ctl, entry);
	}
out:
	spin_unlock(&ctl->tree_lock);

	return ino;
}
3919 3920 3921 3922 3923 3924

struct inode *lookup_free_ino_inode(struct btrfs_root *root,
				    struct btrfs_path *path)
{
	struct inode *inode = NULL;

3925 3926 3927 3928
	spin_lock(&root->ino_cache_lock);
	if (root->ino_cache_inode)
		inode = igrab(root->ino_cache_inode);
	spin_unlock(&root->ino_cache_lock);
3929 3930 3931 3932 3933 3934 3935
	if (inode)
		return inode;

	inode = __lookup_free_space_inode(root, path, 0);
	if (IS_ERR(inode))
		return inode;

3936
	spin_lock(&root->ino_cache_lock);
3937
	if (!btrfs_fs_closing(root->fs_info))
3938 3939
		root->ino_cache_inode = igrab(inode);
	spin_unlock(&root->ino_cache_lock);
3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959

	return inode;
}

int create_free_ino_inode(struct btrfs_root *root,
			  struct btrfs_trans_handle *trans,
			  struct btrfs_path *path)
{
	return __create_free_space_inode(root, trans, path,
					 BTRFS_FREE_INO_OBJECTID, 0);
}

int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
{
	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
	struct btrfs_path *path;
	struct inode *inode;
	int ret = 0;
	u64 root_gen = btrfs_root_generation(&root->root_item);

3960
	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
C
Chris Mason 已提交
3961 3962
		return 0;

3963 3964 3965 3966
	/*
	 * If we're unmounting then just return, since this does a search on the
	 * normal root and not the commit root and we could deadlock.
	 */
3967
	if (btrfs_fs_closing(fs_info))
3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983
		return 0;

	path = btrfs_alloc_path();
	if (!path)
		return 0;

	inode = lookup_free_ino_inode(root, path);
	if (IS_ERR(inode))
		goto out;

	if (root_gen != BTRFS_I(inode)->generation)
		goto out_put;

	ret = __load_free_space_cache(root, inode, ctl, path, 0);

	if (ret < 0)
3984 3985 3986
		btrfs_err(fs_info,
			"failed to load free ino cache for root %llu",
			root->root_key.objectid);
3987 3988 3989 3990 3991 3992 3993 3994 3995
out_put:
	iput(inode);
out:
	btrfs_free_path(path);
	return ret;
}

int btrfs_write_out_ino_cache(struct btrfs_root *root,
			      struct btrfs_trans_handle *trans,
3996 3997
			      struct btrfs_path *path,
			      struct inode *inode)
3998
{
3999
	struct btrfs_fs_info *fs_info = root->fs_info;
4000 4001
	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
	int ret;
4002
	struct btrfs_io_ctl io_ctl;
4003
	bool release_metadata = true;
4004

4005
	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
C
Chris Mason 已提交
4006 4007
		return 0;

C
Chris Mason 已提交
4008
	memset(&io_ctl, 0, sizeof(io_ctl));
4009
	ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl, trans);
4010 4011 4012 4013 4014 4015 4016 4017
	if (!ret) {
		/*
		 * At this point writepages() didn't error out, so our metadata
		 * reservation is released when the writeback finishes, at
		 * inode.c:btrfs_finish_ordered_io(), regardless of it finishing
		 * with or without an error.
		 */
		release_metadata = false;
4018
		ret = btrfs_wait_cache_io_root(root, trans, &io_ctl, path);
4019
	}
C
Chris Mason 已提交
4020

4021
	if (ret) {
4022
		if (release_metadata)
4023
			btrfs_delalloc_release_metadata(BTRFS_I(inode),
4024
					inode->i_size, true);
4025
#ifdef DEBUG
4026 4027 4028
		btrfs_err(fs_info,
			  "failed to write free ino cache for root %llu",
			  root->root_key.objectid);
4029 4030
#endif
	}
4031 4032 4033

	return ret;
}
4034 4035

#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4036 4037 4038 4039 4040 4041
/*
 * Use this if you need to make a bitmap or extent entry specifically, it
 * doesn't do any of the merging that add_free_space does, this acts a lot like
 * how the free space cache loading stuff works, so you can get really weird
 * configurations.
 */
4042
int test_add_free_space_entry(struct btrfs_block_group *cache,
4043
			      u64 offset, u64 bytes, bool bitmap)
4044
{
4045 4046 4047
	struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
	struct btrfs_free_space *info = NULL, *bitmap_info;
	void *map = NULL;
4048
	enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_TRIMMED;
4049 4050
	u64 bytes_added;
	int ret;
4051

4052 4053 4054 4055 4056
again:
	if (!info) {
		info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
		if (!info)
			return -ENOMEM;
4057 4058
	}

4059 4060 4061 4062
	if (!bitmap) {
		spin_lock(&ctl->tree_lock);
		info->offset = offset;
		info->bytes = bytes;
4063
		info->max_extent_size = 0;
4064 4065 4066 4067 4068 4069 4070 4071
		ret = link_free_space(ctl, info);
		spin_unlock(&ctl->tree_lock);
		if (ret)
			kmem_cache_free(btrfs_free_space_cachep, info);
		return ret;
	}

	if (!map) {
4072
		map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086
		if (!map) {
			kmem_cache_free(btrfs_free_space_cachep, info);
			return -ENOMEM;
		}
	}

	spin_lock(&ctl->tree_lock);
	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
					 1, 0);
	if (!bitmap_info) {
		info->bitmap = map;
		map = NULL;
		add_new_bitmap(ctl, info, offset);
		bitmap_info = info;
4087
		info = NULL;
4088
	}
4089

4090 4091
	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes,
					  trim_state);
4092

4093 4094 4095
	bytes -= bytes_added;
	offset += bytes_added;
	spin_unlock(&ctl->tree_lock);
4096

4097 4098
	if (bytes)
		goto again;
4099

4100 4101
	if (info)
		kmem_cache_free(btrfs_free_space_cachep, info);
4102 4103
	if (map)
		kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
4104
	return 0;
4105 4106 4107 4108 4109 4110 4111
}

/*
 * Checks to see if the given range is in the free space cache.  This is really
 * just used to check the absence of space, so if there is free space in the
 * range at all we will return 1.
 */
4112
int test_check_exists(struct btrfs_block_group *cache,
4113
		      u64 offset, u64 bytes)
4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135
{
	struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
	struct btrfs_free_space *info;
	int ret = 0;

	spin_lock(&ctl->tree_lock);
	info = tree_search_offset(ctl, offset, 0, 0);
	if (!info) {
		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
					  1, 0);
		if (!info)
			goto out;
	}

have_info:
	if (info->bitmap) {
		u64 bit_off, bit_bytes;
		struct rb_node *n;
		struct btrfs_free_space *tmp;

		bit_off = offset;
		bit_bytes = ctl->unit;
4136
		ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false);
4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154
		if (!ret) {
			if (bit_off == offset) {
				ret = 1;
				goto out;
			} else if (bit_off > offset &&
				   offset + bytes > bit_off) {
				ret = 1;
				goto out;
			}
		}

		n = rb_prev(&info->offset_index);
		while (n) {
			tmp = rb_entry(n, struct btrfs_free_space,
				       offset_index);
			if (tmp->offset + tmp->bytes < offset)
				break;
			if (offset + bytes < tmp->offset) {
4155
				n = rb_prev(&tmp->offset_index);
4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168
				continue;
			}
			info = tmp;
			goto have_info;
		}

		n = rb_next(&info->offset_index);
		while (n) {
			tmp = rb_entry(n, struct btrfs_free_space,
				       offset_index);
			if (offset + bytes < tmp->offset)
				break;
			if (tmp->offset + tmp->bytes < offset) {
4169
				n = rb_next(&tmp->offset_index);
4170 4171 4172 4173 4174 4175
				continue;
			}
			info = tmp;
			goto have_info;
		}

4176
		ret = 0;
4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190
		goto out;
	}

	if (info->offset == offset) {
		ret = 1;
		goto out;
	}

	if (offset > info->offset && offset < info->offset + info->bytes)
		ret = 1;
out:
	spin_unlock(&ctl->tree_lock);
	return ret;
}
4191
#endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */