free-space-cache.c 107.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
J
Josef Bacik 已提交
2 3 4 5
/*
 * Copyright (C) 2008 Red Hat.  All rights reserved.
 */

6
#include <linux/pagemap.h>
J
Josef Bacik 已提交
7
#include <linux/sched.h>
8
#include <linux/sched/signal.h>
9
#include <linux/slab.h>
10
#include <linux/math64.h>
11
#include <linux/ratelimit.h>
12
#include <linux/error-injection.h>
13
#include <linux/sched/mm.h>
J
Josef Bacik 已提交
14
#include "ctree.h"
15 16
#include "free-space-cache.h"
#include "transaction.h"
17
#include "disk-io.h"
18
#include "extent_io.h"
19
#include "inode-map.h"
20
#include "volumes.h"
21
#include "space-info.h"
22
#include "delalloc-space.h"
23
#include "block-group.h"
24
#include "discard.h"
25

26
#define BITS_PER_BITMAP		(PAGE_SIZE * 8UL)
27 28
#define MAX_CACHE_BYTES_PER_GIG	SZ_64K
#define FORCE_EXTENT_THRESHOLD	SZ_1M
J
Josef Bacik 已提交
29

30 31 32 33 34 35
struct btrfs_trim_range {
	u64 start;
	u64 bytes;
	struct list_head list;
};

36 37
static int count_bitmap_extents(struct btrfs_free_space_ctl *ctl,
				struct btrfs_free_space *bitmap_info);
38
static int link_free_space(struct btrfs_free_space_ctl *ctl,
J
Josef Bacik 已提交
39
			   struct btrfs_free_space *info);
40 41
static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info);
42 43 44 45
static int btrfs_wait_cache_io_root(struct btrfs_root *root,
			     struct btrfs_trans_handle *trans,
			     struct btrfs_io_ctl *io_ctl,
			     struct btrfs_path *path);
J
Josef Bacik 已提交
46

47 48 49
static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
					       struct btrfs_path *path,
					       u64 offset)
50
{
51
	struct btrfs_fs_info *fs_info = root->fs_info;
52 53 54 55 56 57
	struct btrfs_key key;
	struct btrfs_key location;
	struct btrfs_disk_key disk_key;
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
	struct inode *inode = NULL;
58
	unsigned nofs_flag;
59 60 61
	int ret;

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
62
	key.offset = offset;
63 64 65 66 67 68
	key.type = 0;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		return ERR_PTR(ret);
	if (ret > 0) {
69
		btrfs_release_path(path);
70 71 72 73 74 75 76 77
		return ERR_PTR(-ENOENT);
	}

	leaf = path->nodes[0];
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	btrfs_free_space_key(leaf, header, &disk_key);
	btrfs_disk_key_to_cpu(&location, &disk_key);
78
	btrfs_release_path(path);
79

80 81 82 83 84
	/*
	 * We are often under a trans handle at this point, so we need to make
	 * sure NOFS is set to keep us from deadlocking.
	 */
	nofs_flag = memalloc_nofs_save();
D
David Sterba 已提交
85
	inode = btrfs_iget_path(fs_info->sb, location.objectid, root, path);
86
	btrfs_release_path(path);
87
	memalloc_nofs_restore(nofs_flag);
88 89 90
	if (IS_ERR(inode))
		return inode;

A
Al Viro 已提交
91
	mapping_set_gfp_mask(inode->i_mapping,
92 93
			mapping_gfp_constraint(inode->i_mapping,
			~(__GFP_FS | __GFP_HIGHMEM)));
94

95 96 97
	return inode;
}

98
struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
99
		struct btrfs_path *path)
100
{
101
	struct btrfs_fs_info *fs_info = block_group->fs_info;
102
	struct inode *inode = NULL;
103
	u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
104 105 106 107 108 109 110 111

	spin_lock(&block_group->lock);
	if (block_group->inode)
		inode = igrab(block_group->inode);
	spin_unlock(&block_group->lock);
	if (inode)
		return inode;

112
	inode = __lookup_free_space_inode(fs_info->tree_root, path,
113
					  block_group->start);
114 115 116
	if (IS_ERR(inode))
		return inode;

117
	spin_lock(&block_group->lock);
118
	if (!((BTRFS_I(inode)->flags & flags) == flags)) {
119
		btrfs_info(fs_info, "Old style space inode found, converting.");
120 121
		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
			BTRFS_INODE_NODATACOW;
122 123 124
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
	}

125
	if (!block_group->iref) {
126 127 128 129 130 131 132 133
		block_group->inode = igrab(inode);
		block_group->iref = 1;
	}
	spin_unlock(&block_group->lock);

	return inode;
}

134 135 136 137
static int __create_free_space_inode(struct btrfs_root *root,
				     struct btrfs_trans_handle *trans,
				     struct btrfs_path *path,
				     u64 ino, u64 offset)
138 139 140 141 142 143
{
	struct btrfs_key key;
	struct btrfs_disk_key disk_key;
	struct btrfs_free_space_header *header;
	struct btrfs_inode_item *inode_item;
	struct extent_buffer *leaf;
144
	u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
145 146
	int ret;

147
	ret = btrfs_insert_empty_inode(trans, root, path, ino);
148 149 150
	if (ret)
		return ret;

151 152 153 154
	/* We inline crc's for the free disk space cache */
	if (ino != BTRFS_FREE_INO_OBJECTID)
		flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;

155 156 157 158
	leaf = path->nodes[0];
	inode_item = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_inode_item);
	btrfs_item_key(leaf, &disk_key, path->slots[0]);
159
	memzero_extent_buffer(leaf, (unsigned long)inode_item,
160 161 162 163 164 165 166
			     sizeof(*inode_item));
	btrfs_set_inode_generation(leaf, inode_item, trans->transid);
	btrfs_set_inode_size(leaf, inode_item, 0);
	btrfs_set_inode_nbytes(leaf, inode_item, 0);
	btrfs_set_inode_uid(leaf, inode_item, 0);
	btrfs_set_inode_gid(leaf, inode_item, 0);
	btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
167
	btrfs_set_inode_flags(leaf, inode_item, flags);
168 169
	btrfs_set_inode_nlink(leaf, inode_item, 1);
	btrfs_set_inode_transid(leaf, inode_item, trans->transid);
170
	btrfs_set_inode_block_group(leaf, inode_item, offset);
171
	btrfs_mark_buffer_dirty(leaf);
172
	btrfs_release_path(path);
173 174

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
175
	key.offset = offset;
176 177 178 179
	key.type = 0;
	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(struct btrfs_free_space_header));
	if (ret < 0) {
180
		btrfs_release_path(path);
181 182
		return ret;
	}
183

184 185 186
	leaf = path->nodes[0];
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
187
	memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
188 189
	btrfs_set_free_space_key(leaf, header, &disk_key);
	btrfs_mark_buffer_dirty(leaf);
190
	btrfs_release_path(path);
191 192 193 194

	return 0;
}

195
int create_free_space_inode(struct btrfs_trans_handle *trans,
196
			    struct btrfs_block_group *block_group,
197 198 199 200 201
			    struct btrfs_path *path)
{
	int ret;
	u64 ino;

202
	ret = btrfs_find_free_objectid(trans->fs_info->tree_root, &ino);
203 204 205
	if (ret < 0)
		return ret;

206
	return __create_free_space_inode(trans->fs_info->tree_root, trans, path,
207
					 ino, block_group->start);
208 209
}

210
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
211
				       struct btrfs_block_rsv *rsv)
212
{
213
	u64 needed_bytes;
214
	int ret;
215 216

	/* 1 for slack space, 1 for updating the inode */
217 218
	needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) +
		btrfs_calc_metadata_size(fs_info, 1);
219

220 221 222 223 224 225
	spin_lock(&rsv->lock);
	if (rsv->reserved < needed_bytes)
		ret = -ENOSPC;
	else
		ret = 0;
	spin_unlock(&rsv->lock);
226
	return ret;
227 228
}

229
int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
230
				    struct btrfs_block_group *block_group,
231 232
				    struct inode *inode)
{
233
	struct btrfs_root *root = BTRFS_I(inode)->root;
234
	int ret = 0;
235
	bool locked = false;
236 237

	if (block_group) {
238 239 240 241 242 243
		struct btrfs_path *path = btrfs_alloc_path();

		if (!path) {
			ret = -ENOMEM;
			goto fail;
		}
244
		locked = true;
245 246 247 248
		mutex_lock(&trans->transaction->cache_write_mutex);
		if (!list_empty(&block_group->io_list)) {
			list_del_init(&block_group->io_list);

249
			btrfs_wait_cache_io(trans, block_group, path);
250 251 252 253 254 255 256 257 258 259
			btrfs_put_block_group(block_group);
		}

		/*
		 * now that we've truncated the cache away, its no longer
		 * setup or written
		 */
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
		spin_unlock(&block_group->lock);
260
		btrfs_free_path(path);
261
	}
262

263
	btrfs_i_size_write(BTRFS_I(inode), 0);
264
	truncate_pagecache(inode, 0);
265 266

	/*
267 268
	 * We skip the throttling logic for free space cache inodes, so we don't
	 * need to check for -EAGAIN.
269 270 271
	 */
	ret = btrfs_truncate_inode_items(trans, root, inode,
					 0, BTRFS_EXTENT_DATA_KEY);
272 273
	if (ret)
		goto fail;
274

275
	ret = btrfs_update_inode(trans, root, inode);
276 277

fail:
278 279
	if (locked)
		mutex_unlock(&trans->transaction->cache_write_mutex);
280
	if (ret)
281
		btrfs_abort_transaction(trans, ret);
282

283
	return ret;
284 285
}

286
static void readahead_cache(struct inode *inode)
287 288 289 290 291 292
{
	struct file_ra_state *ra;
	unsigned long last_index;

	ra = kzalloc(sizeof(*ra), GFP_NOFS);
	if (!ra)
293
		return;
294 295

	file_ra_state_init(ra, inode->i_mapping);
296
	last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
297 298 299 300 301 302

	page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);

	kfree(ra);
}

303
static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
304
		       int write)
305
{
306 307 308
	int num_pages;
	int check_crcs = 0;

309
	num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
310

311
	if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FREE_INO_OBJECTID)
312 313
		check_crcs = 1;

314
	/* Make sure we can fit our crcs and generation into the first page */
315
	if (write && check_crcs &&
316
	    (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE)
317 318
		return -ENOSPC;

319
	memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
320

321
	io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
322 323
	if (!io_ctl->pages)
		return -ENOMEM;
324 325

	io_ctl->num_pages = num_pages;
326
	io_ctl->fs_info = btrfs_sb(inode->i_sb);
327
	io_ctl->check_crcs = check_crcs;
328
	io_ctl->inode = inode;
329

330 331
	return 0;
}
332
ALLOW_ERROR_INJECTION(io_ctl_init, ERRNO);
333

334
static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
335 336
{
	kfree(io_ctl->pages);
337
	io_ctl->pages = NULL;
338 339
}

340
static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl)
341 342 343 344 345 346 347
{
	if (io_ctl->cur) {
		io_ctl->cur = NULL;
		io_ctl->orig = NULL;
	}
}

348
static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
349
{
350
	ASSERT(io_ctl->index < io_ctl->num_pages);
351
	io_ctl->page = io_ctl->pages[io_ctl->index++];
352
	io_ctl->cur = page_address(io_ctl->page);
353
	io_ctl->orig = io_ctl->cur;
354
	io_ctl->size = PAGE_SIZE;
355
	if (clear)
356
		clear_page(io_ctl->cur);
357 358
}

359
static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
360 361 362 363 364 365
{
	int i;

	io_ctl_unmap_page(io_ctl);

	for (i = 0; i < io_ctl->num_pages; i++) {
366 367 368
		if (io_ctl->pages[i]) {
			ClearPageChecked(io_ctl->pages[i]);
			unlock_page(io_ctl->pages[i]);
369
			put_page(io_ctl->pages[i]);
370
		}
371 372 373
	}
}

374
static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
375 376
{
	struct page *page;
377
	struct inode *inode = io_ctl->inode;
378 379 380 381 382 383 384 385 386 387 388 389 390
	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
	int i;

	for (i = 0; i < io_ctl->num_pages; i++) {
		page = find_or_create_page(inode->i_mapping, i, mask);
		if (!page) {
			io_ctl_drop_pages(io_ctl);
			return -ENOMEM;
		}
		io_ctl->pages[i] = page;
		if (uptodate && !PageUptodate(page)) {
			btrfs_readpage(NULL, page);
			lock_page(page);
391 392 393 394 395 396
			if (page->mapping != inode->i_mapping) {
				btrfs_err(BTRFS_I(inode)->root->fs_info,
					  "free space cache page truncated");
				io_ctl_drop_pages(io_ctl);
				return -EIO;
			}
397
			if (!PageUptodate(page)) {
398 399
				btrfs_err(BTRFS_I(inode)->root->fs_info,
					   "error reading free space cache");
400 401 402 403 404 405
				io_ctl_drop_pages(io_ctl);
				return -EIO;
			}
		}
	}

406 407 408 409 410
	for (i = 0; i < io_ctl->num_pages; i++) {
		clear_page_dirty_for_io(io_ctl->pages[i]);
		set_page_extent_mapped(io_ctl->pages[i]);
	}

411 412 413
	return 0;
}

414
static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
415
{
A
Al Viro 已提交
416
	__le64 *val;
417 418 419 420

	io_ctl_map_page(io_ctl, 1);

	/*
421 422
	 * Skip the csum areas.  If we don't check crcs then we just have a
	 * 64bit chunk at the front of the first page.
423
	 */
424 425 426 427 428 429 430
	if (io_ctl->check_crcs) {
		io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
		io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
	} else {
		io_ctl->cur += sizeof(u64);
		io_ctl->size -= sizeof(u64) * 2;
	}
431 432 433 434 435 436

	val = io_ctl->cur;
	*val = cpu_to_le64(generation);
	io_ctl->cur += sizeof(u64);
}

437
static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
438
{
A
Al Viro 已提交
439
	__le64 *gen;
440

441 442 443 444 445 446 447 448 449 450 451 452
	/*
	 * Skip the crc area.  If we don't check crcs then we just have a 64bit
	 * chunk at the front of the first page.
	 */
	if (io_ctl->check_crcs) {
		io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
		io_ctl->size -= sizeof(u64) +
			(sizeof(u32) * io_ctl->num_pages);
	} else {
		io_ctl->cur += sizeof(u64);
		io_ctl->size -= sizeof(u64) * 2;
	}
453 454 455

	gen = io_ctl->cur;
	if (le64_to_cpu(*gen) != generation) {
456
		btrfs_err_rl(io_ctl->fs_info,
457 458
			"space cache generation (%llu) does not match inode (%llu)",
				*gen, generation);
459 460 461 462
		io_ctl_unmap_page(io_ctl);
		return -EIO;
	}
	io_ctl->cur += sizeof(u64);
463 464 465
	return 0;
}

466
static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
467 468 469 470 471 472 473 474 475 476 477
{
	u32 *tmp;
	u32 crc = ~(u32)0;
	unsigned offset = 0;

	if (!io_ctl->check_crcs) {
		io_ctl_unmap_page(io_ctl);
		return;
	}

	if (index == 0)
478
		offset = sizeof(u32) * io_ctl->num_pages;
479

480 481
	crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
	btrfs_crc32c_final(crc, (u8 *)&crc);
482
	io_ctl_unmap_page(io_ctl);
483
	tmp = page_address(io_ctl->pages[0]);
484 485 486 487
	tmp += index;
	*tmp = crc;
}

488
static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
489 490 491 492 493 494 495 496 497 498 499 500 501
{
	u32 *tmp, val;
	u32 crc = ~(u32)0;
	unsigned offset = 0;

	if (!io_ctl->check_crcs) {
		io_ctl_map_page(io_ctl, 0);
		return 0;
	}

	if (index == 0)
		offset = sizeof(u32) * io_ctl->num_pages;

502
	tmp = page_address(io_ctl->pages[0]);
503 504 505 506
	tmp += index;
	val = *tmp;

	io_ctl_map_page(io_ctl, 0);
507 508
	crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
	btrfs_crc32c_final(crc, (u8 *)&crc);
509
	if (val != crc) {
510
		btrfs_err_rl(io_ctl->fs_info,
511
			"csum mismatch on free space cache");
512 513 514 515
		io_ctl_unmap_page(io_ctl);
		return -EIO;
	}

516 517 518
	return 0;
}

519
static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes,
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
			    void *bitmap)
{
	struct btrfs_free_space_entry *entry;

	if (!io_ctl->cur)
		return -ENOSPC;

	entry = io_ctl->cur;
	entry->offset = cpu_to_le64(offset);
	entry->bytes = cpu_to_le64(bytes);
	entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
		BTRFS_FREE_SPACE_EXTENT;
	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
	io_ctl->size -= sizeof(struct btrfs_free_space_entry);

	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
		return 0;

538
	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
539 540 541 542 543 544 545 546 547 548

	/* No more pages to map */
	if (io_ctl->index >= io_ctl->num_pages)
		return 0;

	/* map the next page */
	io_ctl_map_page(io_ctl, 1);
	return 0;
}

549
static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
550 551 552 553 554 555 556 557 558
{
	if (!io_ctl->cur)
		return -ENOSPC;

	/*
	 * If we aren't at the start of the current page, unmap this one and
	 * map the next one if there is any left.
	 */
	if (io_ctl->cur != io_ctl->orig) {
559
		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
560 561 562 563 564
		if (io_ctl->index >= io_ctl->num_pages)
			return -ENOSPC;
		io_ctl_map_page(io_ctl, 0);
	}

565
	copy_page(io_ctl->cur, bitmap);
566
	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
567 568 569 570 571
	if (io_ctl->index < io_ctl->num_pages)
		io_ctl_map_page(io_ctl, 0);
	return 0;
}

572
static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl)
573
{
574 575 576 577 578 579 580 581
	/*
	 * If we're not on the boundary we know we've modified the page and we
	 * need to crc the page.
	 */
	if (io_ctl->cur != io_ctl->orig)
		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
	else
		io_ctl_unmap_page(io_ctl);
582 583 584

	while (io_ctl->index < io_ctl->num_pages) {
		io_ctl_map_page(io_ctl, 1);
585
		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
586 587 588
	}
}

589
static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl,
590
			    struct btrfs_free_space *entry, u8 *type)
591 592
{
	struct btrfs_free_space_entry *e;
593 594 595 596 597 598 599
	int ret;

	if (!io_ctl->cur) {
		ret = io_ctl_check_crc(io_ctl, io_ctl->index);
		if (ret)
			return ret;
	}
600 601 602 603

	e = io_ctl->cur;
	entry->offset = le64_to_cpu(e->offset);
	entry->bytes = le64_to_cpu(e->bytes);
604
	*type = e->type;
605 606 607 608
	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
	io_ctl->size -= sizeof(struct btrfs_free_space_entry);

	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
609
		return 0;
610 611 612

	io_ctl_unmap_page(io_ctl);

613
	return 0;
614 615
}

616
static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
617
			      struct btrfs_free_space *entry)
618
{
619 620 621 622 623 624
	int ret;

	ret = io_ctl_check_crc(io_ctl, io_ctl->index);
	if (ret)
		return ret;

625
	copy_page(entry->bitmap, io_ctl->cur);
626
	io_ctl_unmap_page(io_ctl);
627 628

	return 0;
629 630
}

631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
/*
 * Since we attach pinned extents after the fact we can have contiguous sections
 * of free space that are split up in entries.  This poses a problem with the
 * tree logging stuff since it could have allocated across what appears to be 2
 * entries since we would have merged the entries when adding the pinned extents
 * back to the free space cache.  So run through the space cache that we just
 * loaded and merge contiguous entries.  This will make the log replay stuff not
 * blow up and it will make for nicer allocator behavior.
 */
static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
{
	struct btrfs_free_space *e, *prev = NULL;
	struct rb_node *n;

again:
	spin_lock(&ctl->tree_lock);
	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
		e = rb_entry(n, struct btrfs_free_space, offset_index);
		if (!prev)
			goto next;
		if (e->bitmap || prev->bitmap)
			goto next;
		if (prev->offset + prev->bytes == e->offset) {
			unlink_free_space(ctl, prev);
			unlink_free_space(ctl, e);
			prev->bytes += e->bytes;
			kmem_cache_free(btrfs_free_space_cachep, e);
			link_free_space(ctl, prev);
			prev = NULL;
			spin_unlock(&ctl->tree_lock);
			goto again;
		}
next:
		prev = e;
	}
	spin_unlock(&ctl->tree_lock);
}

669 670 671
static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
				   struct btrfs_free_space_ctl *ctl,
				   struct btrfs_path *path, u64 offset)
672
{
673
	struct btrfs_fs_info *fs_info = root->fs_info;
674 675
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
676
	struct btrfs_io_ctl io_ctl;
677
	struct btrfs_key key;
678
	struct btrfs_free_space *e, *n;
679
	LIST_HEAD(bitmaps);
680 681 682
	u64 num_entries;
	u64 num_bitmaps;
	u64 generation;
683
	u8 type;
684
	int ret = 0;
685 686

	/* Nothing in the space cache, goodbye */
687
	if (!i_size_read(inode))
688
		return 0;
689 690

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
691
	key.offset = offset;
692 693 694
	key.type = 0;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
695
	if (ret < 0)
696
		return 0;
697
	else if (ret > 0) {
698
		btrfs_release_path(path);
699
		return 0;
700 701
	}

702 703
	ret = -1;

704 705 706 707 708 709
	leaf = path->nodes[0];
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	num_entries = btrfs_free_space_entries(leaf, header);
	num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
	generation = btrfs_free_space_generation(leaf, header);
710
	btrfs_release_path(path);
711

712
	if (!BTRFS_I(inode)->generation) {
713
		btrfs_info(fs_info,
714
			   "the free space cache file (%llu) is invalid, skip it",
715 716 717 718
			   offset);
		return 0;
	}

719
	if (BTRFS_I(inode)->generation != generation) {
720 721 722
		btrfs_err(fs_info,
			  "free space inode generation (%llu) did not match free space cache generation (%llu)",
			  BTRFS_I(inode)->generation, generation);
723
		return 0;
724 725 726
	}

	if (!num_entries)
727
		return 0;
728

729
	ret = io_ctl_init(&io_ctl, inode, 0);
730 731 732
	if (ret)
		return ret;

733
	readahead_cache(inode);
734

735
	ret = io_ctl_prepare_pages(&io_ctl, true);
736 737
	if (ret)
		goto out;
738

739 740 741 742
	ret = io_ctl_check_crc(&io_ctl, 0);
	if (ret)
		goto free_cache;

743 744 745
	ret = io_ctl_check_generation(&io_ctl, generation);
	if (ret)
		goto free_cache;
746

747 748 749 750
	while (num_entries) {
		e = kmem_cache_zalloc(btrfs_free_space_cachep,
				      GFP_NOFS);
		if (!e)
751 752
			goto free_cache;

753 754 755 756 757 758
		ret = io_ctl_read_entry(&io_ctl, e, &type);
		if (ret) {
			kmem_cache_free(btrfs_free_space_cachep, e);
			goto free_cache;
		}

759 760 761
		/*
		 * Sync discard ensures that the free space cache is always
		 * trimmed.  So when reading this in, the state should reflect
762 763
		 * that.  We also do this for async as a stop gap for lack of
		 * persistence.
764
		 */
765 766
		if (btrfs_test_opt(fs_info, DISCARD_SYNC) ||
		    btrfs_test_opt(fs_info, DISCARD_ASYNC))
767 768
			e->trim_state = BTRFS_TRIM_STATE_TRIMMED;

769 770 771
		if (!e->bytes) {
			kmem_cache_free(btrfs_free_space_cachep, e);
			goto free_cache;
772
		}
773 774 775 776 777 778

		if (type == BTRFS_FREE_SPACE_EXTENT) {
			spin_lock(&ctl->tree_lock);
			ret = link_free_space(ctl, e);
			spin_unlock(&ctl->tree_lock);
			if (ret) {
779
				btrfs_err(fs_info,
780
					"Duplicate entries in free space cache, dumping");
781
				kmem_cache_free(btrfs_free_space_cachep, e);
782 783
				goto free_cache;
			}
784
		} else {
785
			ASSERT(num_bitmaps);
786
			num_bitmaps--;
787 788
			e->bitmap = kmem_cache_zalloc(
					btrfs_free_space_bitmap_cachep, GFP_NOFS);
789 790 791
			if (!e->bitmap) {
				kmem_cache_free(
					btrfs_free_space_cachep, e);
792 793
				goto free_cache;
			}
794 795 796 797 798 799
			spin_lock(&ctl->tree_lock);
			ret = link_free_space(ctl, e);
			ctl->total_bitmaps++;
			ctl->op->recalc_thresholds(ctl);
			spin_unlock(&ctl->tree_lock);
			if (ret) {
800
				btrfs_err(fs_info,
801
					"Duplicate entries in free space cache, dumping");
802
				kmem_cache_free(btrfs_free_space_cachep, e);
803 804
				goto free_cache;
			}
805
			list_add_tail(&e->list, &bitmaps);
806 807
		}

808 809
		num_entries--;
	}
810

811 812
	io_ctl_unmap_page(&io_ctl);

813 814 815 816 817
	/*
	 * We add the bitmaps at the end of the entries in order that
	 * the bitmap entries are added to the cache.
	 */
	list_for_each_entry_safe(e, n, &bitmaps, list) {
818
		list_del_init(&e->list);
819 820 821
		ret = io_ctl_read_bitmap(&io_ctl, e);
		if (ret)
			goto free_cache;
822
		e->bitmap_extents = count_bitmap_extents(ctl, e);
823
		if (!btrfs_free_space_trimmed(e)) {
824 825
			ctl->discardable_extents[BTRFS_STAT_CURR] +=
				e->bitmap_extents;
826 827
			ctl->discardable_bytes[BTRFS_STAT_CURR] += e->bytes;
		}
828 829
	}

830
	io_ctl_drop_pages(&io_ctl);
831
	merge_space_tree(ctl);
832 833
	ret = 1;
out:
834
	btrfs_discard_update_discardable(ctl->private, ctl);
835
	io_ctl_free(&io_ctl);
836 837
	return ret;
free_cache:
838
	io_ctl_drop_pages(&io_ctl);
839
	__btrfs_remove_free_space_cache(ctl);
840 841 842
	goto out;
}

843
int load_free_space_cache(struct btrfs_block_group *block_group)
J
Josef Bacik 已提交
844
{
845
	struct btrfs_fs_info *fs_info = block_group->fs_info;
846
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
847 848
	struct inode *inode;
	struct btrfs_path *path;
849
	int ret = 0;
850
	bool matched;
851
	u64 used = block_group->used;
852 853 854 855 856

	/*
	 * If this block group has been marked to be cleared for one reason or
	 * another then we can't trust the on disk cache, so just return.
	 */
857
	spin_lock(&block_group->lock);
858 859 860 861
	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
		spin_unlock(&block_group->lock);
		return 0;
	}
862
	spin_unlock(&block_group->lock);
863 864 865 866

	path = btrfs_alloc_path();
	if (!path)
		return 0;
867 868
	path->search_commit_root = 1;
	path->skip_locking = 1;
869

870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
	/*
	 * We must pass a path with search_commit_root set to btrfs_iget in
	 * order to avoid a deadlock when allocating extents for the tree root.
	 *
	 * When we are COWing an extent buffer from the tree root, when looking
	 * for a free extent, at extent-tree.c:find_free_extent(), we can find
	 * block group without its free space cache loaded. When we find one
	 * we must load its space cache which requires reading its free space
	 * cache's inode item from the root tree. If this inode item is located
	 * in the same leaf that we started COWing before, then we end up in
	 * deadlock on the extent buffer (trying to read lock it when we
	 * previously write locked it).
	 *
	 * It's safe to read the inode item using the commit root because
	 * block groups, once loaded, stay in memory forever (until they are
	 * removed) as well as their space caches once loaded. New block groups
	 * once created get their ->cached field set to BTRFS_CACHE_FINISHED so
	 * we will never try to read their inode item while the fs is mounted.
	 */
889
	inode = lookup_free_space_inode(block_group, path);
890 891 892 893 894
	if (IS_ERR(inode)) {
		btrfs_free_path(path);
		return 0;
	}

895 896 897 898
	/* We may have converted the inode and made the cache invalid. */
	spin_lock(&block_group->lock);
	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
		spin_unlock(&block_group->lock);
899
		btrfs_free_path(path);
900 901 902 903
		goto out;
	}
	spin_unlock(&block_group->lock);

904
	ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
905
				      path, block_group->start);
906 907 908 909 910
	btrfs_free_path(path);
	if (ret <= 0)
		goto out;

	spin_lock(&ctl->tree_lock);
911
	matched = (ctl->free_space == (block_group->length - used -
912 913 914 915 916
				       block_group->bytes_super));
	spin_unlock(&ctl->tree_lock);

	if (!matched) {
		__btrfs_remove_free_space_cache(ctl);
J
Jeff Mahoney 已提交
917 918
		btrfs_warn(fs_info,
			   "block group %llu has wrong amount of free space",
919
			   block_group->start);
920 921 922 923 924 925 926 927
		ret = -1;
	}
out:
	if (ret < 0) {
		/* This cache is bogus, make sure it gets cleared */
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
		spin_unlock(&block_group->lock);
928
		ret = 0;
929

J
Jeff Mahoney 已提交
930 931
		btrfs_warn(fs_info,
			   "failed to load free space cache for block group %llu, rebuilding it now",
932
			   block_group->start);
933 934 935 936
	}

	iput(inode);
	return ret;
937 938
}

939
static noinline_for_stack
940
int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
941
			      struct btrfs_free_space_ctl *ctl,
942
			      struct btrfs_block_group *block_group,
943 944
			      int *entries, int *bitmaps,
			      struct list_head *bitmap_list)
J
Josef Bacik 已提交
945
{
946
	int ret;
947
	struct btrfs_free_cluster *cluster = NULL;
948
	struct btrfs_free_cluster *cluster_locked = NULL;
949
	struct rb_node *node = rb_first(&ctl->free_space_offset);
950
	struct btrfs_trim_range *trim_entry;
951

952
	/* Get the cluster for this block_group if it exists */
953
	if (block_group && !list_empty(&block_group->cluster_list)) {
954 955 956
		cluster = list_entry(block_group->cluster_list.next,
				     struct btrfs_free_cluster,
				     block_group_list);
957
	}
958

959
	if (!node && cluster) {
960 961
		cluster_locked = cluster;
		spin_lock(&cluster_locked->lock);
962 963 964 965
		node = rb_first(&cluster->root);
		cluster = NULL;
	}

966 967 968
	/* Write out the extent entries */
	while (node) {
		struct btrfs_free_space *e;
J
Josef Bacik 已提交
969

970
		e = rb_entry(node, struct btrfs_free_space, offset_index);
971
		*entries += 1;
J
Josef Bacik 已提交
972

973
		ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes,
974 975
				       e->bitmap);
		if (ret)
976
			goto fail;
977

978
		if (e->bitmap) {
979 980
			list_add_tail(&e->list, bitmap_list);
			*bitmaps += 1;
981
		}
982 983 984
		node = rb_next(node);
		if (!node && cluster) {
			node = rb_first(&cluster->root);
985 986
			cluster_locked = cluster;
			spin_lock(&cluster_locked->lock);
987
			cluster = NULL;
988
		}
989
	}
990 991 992 993
	if (cluster_locked) {
		spin_unlock(&cluster_locked->lock);
		cluster_locked = NULL;
	}
994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008

	/*
	 * Make sure we don't miss any range that was removed from our rbtree
	 * because trimming is running. Otherwise after a umount+mount (or crash
	 * after committing the transaction) we would leak free space and get
	 * an inconsistent free space cache report from fsck.
	 */
	list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
		ret = io_ctl_add_entry(io_ctl, trim_entry->start,
				       trim_entry->bytes, NULL);
		if (ret)
			goto fail;
		*entries += 1;
	}

1009 1010
	return 0;
fail:
1011 1012
	if (cluster_locked)
		spin_unlock(&cluster_locked->lock);
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
	return -ENOSPC;
}

static noinline_for_stack int
update_cache_item(struct btrfs_trans_handle *trans,
		  struct btrfs_root *root,
		  struct inode *inode,
		  struct btrfs_path *path, u64 offset,
		  int entries, int bitmaps)
{
	struct btrfs_key key;
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
	int ret;

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
	key.offset = offset;
	key.type = 0;

	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
	if (ret < 0) {
		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1035
				 EXTENT_DELALLOC, 0, 0, NULL);
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
		goto fail;
	}
	leaf = path->nodes[0];
	if (ret > 0) {
		struct btrfs_key found_key;
		ASSERT(path->slots[0]);
		path->slots[0]--;
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
		    found_key.offset != offset) {
			clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1047 1048
					 inode->i_size - 1, EXTENT_DELALLOC, 0,
					 0, NULL);
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
			btrfs_release_path(path);
			goto fail;
		}
	}

	BTRFS_I(inode)->generation = trans->transid;
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	btrfs_set_free_space_entries(leaf, header, entries);
	btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
	btrfs_set_free_space_generation(leaf, header, trans->transid);
	btrfs_mark_buffer_dirty(leaf);
	btrfs_release_path(path);

	return 0;

fail:
	return -1;
}

1069
static noinline_for_stack int write_pinned_extent_entries(
1070
			    struct btrfs_trans_handle *trans,
1071
			    struct btrfs_block_group *block_group,
1072
			    struct btrfs_io_ctl *io_ctl,
1073
			    int *entries)
1074 1075 1076 1077
{
	u64 start, extent_start, extent_end, len;
	struct extent_io_tree *unpin = NULL;
	int ret;
1078

1079 1080 1081
	if (!block_group)
		return 0;

1082 1083 1084
	/*
	 * We want to add any pinned extents to our free space cache
	 * so we don't leak the space
1085
	 *
1086 1087 1088
	 * We shouldn't have switched the pinned extents yet so this is the
	 * right one
	 */
1089
	unpin = &trans->transaction->pinned_extents;
1090

1091
	start = block_group->start;
1092

1093
	while (start < block_group->start + block_group->length) {
1094 1095
		ret = find_first_extent_bit(unpin, start,
					    &extent_start, &extent_end,
1096
					    EXTENT_DIRTY, NULL);
1097 1098
		if (ret)
			return 0;
J
Josef Bacik 已提交
1099

1100
		/* This pinned extent is out of our range */
1101
		if (extent_start >= block_group->start + block_group->length)
1102
			return 0;
1103

1104
		extent_start = max(extent_start, start);
1105 1106
		extent_end = min(block_group->start + block_group->length,
				 extent_end + 1);
1107
		len = extent_end - extent_start;
J
Josef Bacik 已提交
1108

1109 1110
		*entries += 1;
		ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
1111
		if (ret)
1112
			return -ENOSPC;
J
Josef Bacik 已提交
1113

1114
		start = extent_end;
1115
	}
J
Josef Bacik 已提交
1116

1117 1118 1119 1120
	return 0;
}

static noinline_for_stack int
1121
write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
1122
{
1123
	struct btrfs_free_space *entry, *next;
1124 1125
	int ret;

J
Josef Bacik 已提交
1126
	/* Write out the bitmaps */
1127
	list_for_each_entry_safe(entry, next, bitmap_list, list) {
1128
		ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
1129
		if (ret)
1130
			return -ENOSPC;
J
Josef Bacik 已提交
1131
		list_del_init(&entry->list);
1132 1133
	}

1134 1135
	return 0;
}
J
Josef Bacik 已提交
1136

1137 1138 1139
static int flush_dirty_cache(struct inode *inode)
{
	int ret;
1140

1141
	ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
1142
	if (ret)
1143
		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1144
				 EXTENT_DELALLOC, 0, 0, NULL);
J
Josef Bacik 已提交
1145

1146
	return ret;
1147 1148 1149
}

static void noinline_for_stack
1150
cleanup_bitmap_list(struct list_head *bitmap_list)
1151
{
1152
	struct btrfs_free_space *entry, *next;
1153

1154
	list_for_each_entry_safe(entry, next, bitmap_list, list)
1155
		list_del_init(&entry->list);
1156 1157 1158 1159 1160
}

static void noinline_for_stack
cleanup_write_cache_enospc(struct inode *inode,
			   struct btrfs_io_ctl *io_ctl,
1161
			   struct extent_state **cached_state)
1162
{
1163 1164
	io_ctl_drop_pages(io_ctl);
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1165
			     i_size_read(inode) - 1, cached_state);
1166
}
1167

1168 1169
static int __btrfs_wait_cache_io(struct btrfs_root *root,
				 struct btrfs_trans_handle *trans,
1170
				 struct btrfs_block_group *block_group,
1171 1172
				 struct btrfs_io_ctl *io_ctl,
				 struct btrfs_path *path, u64 offset)
1173 1174 1175 1176
{
	int ret;
	struct inode *inode = io_ctl->inode;

1177 1178 1179
	if (!inode)
		return 0;

1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
	/* Flush the dirty pages in the cache file. */
	ret = flush_dirty_cache(inode);
	if (ret)
		goto out;

	/* Update the cache item to tell everyone this cache file is valid. */
	ret = update_cache_item(trans, root, inode, path, offset,
				io_ctl->entries, io_ctl->bitmaps);
out:
	if (ret) {
		invalidate_inode_pages2(inode->i_mapping);
		BTRFS_I(inode)->generation = 0;
1192 1193
		if (block_group)
			btrfs_debug(root->fs_info,
1194 1195
	  "failed to write free space cache for block group %llu error %d",
				  block_group->start, ret);
1196 1197 1198 1199
	}
	btrfs_update_inode(trans, root, inode);

	if (block_group) {
1200 1201 1202 1203
		/* the dirty list is protected by the dirty_bgs_lock */
		spin_lock(&trans->transaction->dirty_bgs_lock);

		/* the disk_cache_state is protected by the block group lock */
1204 1205 1206 1207
		spin_lock(&block_group->lock);

		/*
		 * only mark this as written if we didn't get put back on
1208 1209
		 * the dirty list while waiting for IO.   Otherwise our
		 * cache state won't be right, and we won't get written again
1210 1211 1212 1213 1214 1215 1216
		 */
		if (!ret && list_empty(&block_group->dirty_list))
			block_group->disk_cache_state = BTRFS_DC_WRITTEN;
		else if (ret)
			block_group->disk_cache_state = BTRFS_DC_ERROR;

		spin_unlock(&block_group->lock);
1217
		spin_unlock(&trans->transaction->dirty_bgs_lock);
1218 1219 1220 1221 1222 1223 1224 1225
		io_ctl->inode = NULL;
		iput(inode);
	}

	return ret;

}

1226 1227 1228 1229 1230 1231 1232 1233 1234
static int btrfs_wait_cache_io_root(struct btrfs_root *root,
				    struct btrfs_trans_handle *trans,
				    struct btrfs_io_ctl *io_ctl,
				    struct btrfs_path *path)
{
	return __btrfs_wait_cache_io(root, trans, NULL, io_ctl, path, 0);
}

int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
1235
			struct btrfs_block_group *block_group,
1236 1237 1238 1239
			struct btrfs_path *path)
{
	return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
				     block_group, &block_group->io_ctl,
1240
				     path, block_group->start);
1241 1242
}

1243 1244 1245 1246 1247 1248 1249 1250
/**
 * __btrfs_write_out_cache - write out cached info to an inode
 * @root - the root the inode belongs to
 * @ctl - the free space cache we are going to write out
 * @block_group - the block_group for this cache if it belongs to a block_group
 * @trans - the trans handle
 *
 * This function writes out a free space cache struct to disk for quick recovery
G
Geliang Tang 已提交
1251
 * on mount.  This will return 0 if it was successful in writing the cache out,
1252
 * or an errno if it was not.
1253 1254 1255
 */
static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
				   struct btrfs_free_space_ctl *ctl,
1256
				   struct btrfs_block_group *block_group,
1257
				   struct btrfs_io_ctl *io_ctl,
1258
				   struct btrfs_trans_handle *trans)
1259 1260
{
	struct extent_state *cached_state = NULL;
1261
	LIST_HEAD(bitmap_list);
1262 1263 1264
	int entries = 0;
	int bitmaps = 0;
	int ret;
1265
	int must_iput = 0;
1266 1267

	if (!i_size_read(inode))
1268
		return -EIO;
1269

1270
	WARN_ON(io_ctl->pages);
1271
	ret = io_ctl_init(io_ctl, inode, 1);
1272
	if (ret)
1273
		return ret;
1274

1275 1276 1277 1278 1279 1280 1281 1282 1283
	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
		down_write(&block_group->data_rwsem);
		spin_lock(&block_group->lock);
		if (block_group->delalloc_bytes) {
			block_group->disk_cache_state = BTRFS_DC_WRITTEN;
			spin_unlock(&block_group->lock);
			up_write(&block_group->data_rwsem);
			BTRFS_I(inode)->generation = 0;
			ret = 0;
1284
			must_iput = 1;
1285 1286 1287 1288 1289
			goto out;
		}
		spin_unlock(&block_group->lock);
	}

1290
	/* Lock all pages first so we can lock the extent safely. */
1291
	ret = io_ctl_prepare_pages(io_ctl, false);
1292
	if (ret)
1293
		goto out_unlock;
1294 1295

	lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1296
			 &cached_state);
1297

1298
	io_ctl_set_generation(io_ctl, trans->transid);
1299

1300
	mutex_lock(&ctl->cache_writeout_mutex);
1301
	/* Write out the extent entries in the free space cache */
1302
	spin_lock(&ctl->tree_lock);
1303
	ret = write_cache_extent_entries(io_ctl, ctl,
1304 1305
					 block_group, &entries, &bitmaps,
					 &bitmap_list);
1306 1307
	if (ret)
		goto out_nospc_locked;
1308

1309 1310 1311 1312
	/*
	 * Some spaces that are freed in the current transaction are pinned,
	 * they will be added into free space cache after the transaction is
	 * committed, we shouldn't lose them.
1313 1314 1315
	 *
	 * If this changes while we are working we'll get added back to
	 * the dirty list and redo it.  No locking needed
1316
	 */
1317
	ret = write_pinned_extent_entries(trans, block_group, io_ctl, &entries);
1318 1319
	if (ret)
		goto out_nospc_locked;
1320

1321 1322 1323 1324 1325
	/*
	 * At last, we write out all the bitmaps and keep cache_writeout_mutex
	 * locked while doing it because a concurrent trim can be manipulating
	 * or freeing the bitmap.
	 */
1326
	ret = write_bitmap_entries(io_ctl, &bitmap_list);
1327
	spin_unlock(&ctl->tree_lock);
1328
	mutex_unlock(&ctl->cache_writeout_mutex);
1329 1330 1331 1332
	if (ret)
		goto out_nospc;

	/* Zero out the rest of the pages just to make sure */
1333
	io_ctl_zero_remaining_pages(io_ctl);
1334

1335
	/* Everything is written out, now we dirty the pages in the file. */
1336 1337 1338
	ret = btrfs_dirty_pages(BTRFS_I(inode), io_ctl->pages,
				io_ctl->num_pages, 0, i_size_read(inode),
				&cached_state);
1339
	if (ret)
1340
		goto out_nospc;
1341

1342 1343
	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
		up_write(&block_group->data_rwsem);
1344 1345 1346 1347
	/*
	 * Release the pages and unlock the extent, we will flush
	 * them out later
	 */
1348
	io_ctl_drop_pages(io_ctl);
1349
	io_ctl_free(io_ctl);
1350 1351

	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1352
			     i_size_read(inode) - 1, &cached_state);
1353

1354 1355 1356 1357 1358 1359 1360 1361 1362
	/*
	 * at this point the pages are under IO and we're happy,
	 * The caller is responsible for waiting on them and updating the
	 * the cache and the inode
	 */
	io_ctl->entries = entries;
	io_ctl->bitmaps = bitmaps;

	ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
1363
	if (ret)
1364 1365
		goto out;

1366 1367
	return 0;

1368 1369 1370 1371 1372
out_nospc_locked:
	cleanup_bitmap_list(&bitmap_list);
	spin_unlock(&ctl->tree_lock);
	mutex_unlock(&ctl->cache_writeout_mutex);

1373
out_nospc:
1374
	cleanup_write_cache_enospc(inode, io_ctl, &cached_state);
1375

1376
out_unlock:
1377 1378 1379
	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
		up_write(&block_group->data_rwsem);

1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
out:
	io_ctl->inode = NULL;
	io_ctl_free(io_ctl);
	if (ret) {
		invalidate_inode_pages2(inode->i_mapping);
		BTRFS_I(inode)->generation = 0;
	}
	btrfs_update_inode(trans, root, inode);
	if (must_iput)
		iput(inode);
	return ret;
1391 1392
}

1393
int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
1394
			  struct btrfs_block_group *block_group,
1395 1396
			  struct btrfs_path *path)
{
1397
	struct btrfs_fs_info *fs_info = trans->fs_info;
1398 1399 1400 1401 1402 1403 1404
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct inode *inode;
	int ret = 0;

	spin_lock(&block_group->lock);
	if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
		spin_unlock(&block_group->lock);
1405 1406
		return 0;
	}
1407 1408
	spin_unlock(&block_group->lock);

1409
	inode = lookup_free_space_inode(block_group, path);
1410 1411 1412
	if (IS_ERR(inode))
		return 0;

1413 1414
	ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl,
				block_group, &block_group->io_ctl, trans);
1415
	if (ret) {
1416
		btrfs_debug(fs_info,
1417 1418
	  "failed to write free space cache for block group %llu error %d",
			  block_group->start, ret);
1419 1420 1421 1422 1423 1424
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_ERROR;
		spin_unlock(&block_group->lock);

		block_group->io_ctl.inode = NULL;
		iput(inode);
1425 1426
	}

1427 1428 1429 1430 1431
	/*
	 * if ret == 0 the caller is expected to call btrfs_wait_cache_io
	 * to wait for IO and put the inode
	 */

J
Josef Bacik 已提交
1432 1433 1434
	return ret;
}

1435
static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1436
					  u64 offset)
J
Josef Bacik 已提交
1437
{
1438
	ASSERT(offset >= bitmap_start);
1439
	offset -= bitmap_start;
1440
	return (unsigned long)(div_u64(offset, unit));
1441
}
J
Josef Bacik 已提交
1442

1443
static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1444
{
1445
	return (unsigned long)(div_u64(bytes, unit));
1446
}
J
Josef Bacik 已提交
1447

1448
static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1449 1450 1451
				   u64 offset)
{
	u64 bitmap_start;
1452
	u64 bytes_per_bitmap;
J
Josef Bacik 已提交
1453

1454 1455
	bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
	bitmap_start = offset - ctl->start;
1456
	bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1457
	bitmap_start *= bytes_per_bitmap;
1458
	bitmap_start += ctl->start;
J
Josef Bacik 已提交
1459

1460
	return bitmap_start;
J
Josef Bacik 已提交
1461 1462
}

1463 1464
static int tree_insert_offset(struct rb_root *root, u64 offset,
			      struct rb_node *node, int bitmap)
J
Josef Bacik 已提交
1465 1466 1467 1468 1469 1470 1471
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
	struct btrfs_free_space *info;

	while (*p) {
		parent = *p;
1472
		info = rb_entry(parent, struct btrfs_free_space, offset_index);
J
Josef Bacik 已提交
1473

1474
		if (offset < info->offset) {
J
Josef Bacik 已提交
1475
			p = &(*p)->rb_left;
1476
		} else if (offset > info->offset) {
J
Josef Bacik 已提交
1477
			p = &(*p)->rb_right;
1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492
		} else {
			/*
			 * we could have a bitmap entry and an extent entry
			 * share the same offset.  If this is the case, we want
			 * the extent entry to always be found first if we do a
			 * linear search through the tree, since we want to have
			 * the quickest allocation time, and allocating from an
			 * extent is faster than allocating from a bitmap.  So
			 * if we're inserting a bitmap and we find an entry at
			 * this offset, we want to go right, or after this entry
			 * logically.  If we are inserting an extent and we've
			 * found a bitmap, we want to go left, or before
			 * logically.
			 */
			if (bitmap) {
1493 1494 1495 1496
				if (info->bitmap) {
					WARN_ON_ONCE(1);
					return -EEXIST;
				}
1497 1498
				p = &(*p)->rb_right;
			} else {
1499 1500 1501 1502
				if (!info->bitmap) {
					WARN_ON_ONCE(1);
					return -EEXIST;
				}
1503 1504 1505
				p = &(*p)->rb_left;
			}
		}
J
Josef Bacik 已提交
1506 1507 1508 1509 1510 1511 1512 1513 1514
	}

	rb_link_node(node, parent, p);
	rb_insert_color(node, root);

	return 0;
}

/*
J
Josef Bacik 已提交
1515 1516
 * searches the tree for the given offset.
 *
1517 1518 1519
 * fuzzy - If this is set, then we are trying to make an allocation, and we just
 * want a section that has at least bytes size and comes at or after the given
 * offset.
J
Josef Bacik 已提交
1520
 */
1521
static struct btrfs_free_space *
1522
tree_search_offset(struct btrfs_free_space_ctl *ctl,
1523
		   u64 offset, int bitmap_only, int fuzzy)
J
Josef Bacik 已提交
1524
{
1525
	struct rb_node *n = ctl->free_space_offset.rb_node;
1526 1527 1528 1529 1530 1531 1532 1533
	struct btrfs_free_space *entry, *prev = NULL;

	/* find entry that is closest to the 'offset' */
	while (1) {
		if (!n) {
			entry = NULL;
			break;
		}
J
Josef Bacik 已提交
1534 1535

		entry = rb_entry(n, struct btrfs_free_space, offset_index);
1536
		prev = entry;
J
Josef Bacik 已提交
1537

1538
		if (offset < entry->offset)
J
Josef Bacik 已提交
1539
			n = n->rb_left;
1540
		else if (offset > entry->offset)
J
Josef Bacik 已提交
1541
			n = n->rb_right;
1542
		else
J
Josef Bacik 已提交
1543 1544 1545
			break;
	}

1546 1547 1548 1549 1550
	if (bitmap_only) {
		if (!entry)
			return NULL;
		if (entry->bitmap)
			return entry;
J
Josef Bacik 已提交
1551

1552 1553 1554 1555 1556 1557 1558 1559 1560 1561
		/*
		 * bitmap entry and extent entry may share same offset,
		 * in that case, bitmap entry comes after extent entry.
		 */
		n = rb_next(n);
		if (!n)
			return NULL;
		entry = rb_entry(n, struct btrfs_free_space, offset_index);
		if (entry->offset != offset)
			return NULL;
J
Josef Bacik 已提交
1562

1563 1564 1565 1566
		WARN_ON(!entry->bitmap);
		return entry;
	} else if (entry) {
		if (entry->bitmap) {
J
Josef Bacik 已提交
1567
			/*
1568 1569
			 * if previous extent entry covers the offset,
			 * we should return it instead of the bitmap entry
J
Josef Bacik 已提交
1570
			 */
1571 1572
			n = rb_prev(&entry->offset_index);
			if (n) {
1573 1574
				prev = rb_entry(n, struct btrfs_free_space,
						offset_index);
1575 1576 1577
				if (!prev->bitmap &&
				    prev->offset + prev->bytes > offset)
					entry = prev;
J
Josef Bacik 已提交
1578
			}
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
		}
		return entry;
	}

	if (!prev)
		return NULL;

	/* find last entry before the 'offset' */
	entry = prev;
	if (entry->offset > offset) {
		n = rb_prev(&entry->offset_index);
		if (n) {
			entry = rb_entry(n, struct btrfs_free_space,
					offset_index);
1593
			ASSERT(entry->offset <= offset);
J
Josef Bacik 已提交
1594
		} else {
1595 1596 1597 1598
			if (fuzzy)
				return entry;
			else
				return NULL;
J
Josef Bacik 已提交
1599 1600 1601
		}
	}

1602
	if (entry->bitmap) {
1603 1604
		n = rb_prev(&entry->offset_index);
		if (n) {
1605 1606
			prev = rb_entry(n, struct btrfs_free_space,
					offset_index);
1607 1608 1609
			if (!prev->bitmap &&
			    prev->offset + prev->bytes > offset)
				return prev;
1610
		}
1611
		if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
			return entry;
	} else if (entry->offset + entry->bytes > offset)
		return entry;

	if (!fuzzy)
		return NULL;

	while (1) {
		if (entry->bitmap) {
			if (entry->offset + BITS_PER_BITMAP *
1622
			    ctl->unit > offset)
1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634
				break;
		} else {
			if (entry->offset + entry->bytes > offset)
				break;
		}

		n = rb_next(&entry->offset_index);
		if (!n)
			return NULL;
		entry = rb_entry(n, struct btrfs_free_space, offset_index);
	}
	return entry;
J
Josef Bacik 已提交
1635 1636
}

1637
static inline void
1638
__unlink_free_space(struct btrfs_free_space_ctl *ctl,
1639
		    struct btrfs_free_space *info)
J
Josef Bacik 已提交
1640
{
1641 1642
	rb_erase(&info->offset_index, &ctl->free_space_offset);
	ctl->free_extents--;
1643

1644
	if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
1645
		ctl->discardable_extents[BTRFS_STAT_CURR]--;
1646 1647
		ctl->discardable_bytes[BTRFS_STAT_CURR] -= info->bytes;
	}
1648 1649
}

1650
static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1651 1652
			      struct btrfs_free_space *info)
{
1653 1654
	__unlink_free_space(ctl, info);
	ctl->free_space -= info->bytes;
J
Josef Bacik 已提交
1655 1656
}

1657
static int link_free_space(struct btrfs_free_space_ctl *ctl,
J
Josef Bacik 已提交
1658 1659 1660 1661
			   struct btrfs_free_space *info)
{
	int ret = 0;

1662
	ASSERT(info->bytes || info->bitmap);
1663
	ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1664
				 &info->offset_index, (info->bitmap != NULL));
J
Josef Bacik 已提交
1665 1666 1667
	if (ret)
		return ret;

1668
	if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
1669
		ctl->discardable_extents[BTRFS_STAT_CURR]++;
1670 1671
		ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes;
	}
1672

1673 1674
	ctl->free_space += info->bytes;
	ctl->free_extents++;
1675 1676 1677
	return ret;
}

1678
static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1679
{
1680
	struct btrfs_block_group *block_group = ctl->private;
1681 1682 1683
	u64 max_bytes;
	u64 bitmap_bytes;
	u64 extent_bytes;
1684
	u64 size = block_group->length;
1685 1686
	u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
	u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1687

1688
	max_bitmaps = max_t(u64, max_bitmaps, 1);
1689

1690
	ASSERT(ctl->total_bitmaps <= max_bitmaps);
1691 1692

	/*
1693 1694 1695 1696
	 * We are trying to keep the total amount of memory used per 1GiB of
	 * space to be MAX_CACHE_BYTES_PER_GIG.  However, with a reclamation
	 * mechanism of pulling extents >= FORCE_EXTENT_THRESHOLD out of
	 * bitmaps, we may end up using more memory than this.
1697
	 */
1698
	if (size < SZ_1G)
1699 1700
		max_bytes = MAX_CACHE_BYTES_PER_GIG;
	else
1701
		max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G);
1702

1703
	bitmap_bytes = ctl->total_bitmaps * ctl->unit;
1704

1705
	/*
1706
	 * we want the extent entry threshold to always be at most 1/2 the max
1707 1708 1709
	 * bytes we can have, or whatever is less than that.
	 */
	extent_bytes = max_bytes - bitmap_bytes;
1710
	extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
1711

1712
	ctl->extents_thresh =
1713
		div_u64(extent_bytes, sizeof(struct btrfs_free_space));
1714 1715
}

1716 1717 1718
static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
				       struct btrfs_free_space *info,
				       u64 offset, u64 bytes)
1719
{
1720 1721
	unsigned long start, count, end;
	int extent_delta = -1;
1722

1723 1724
	start = offset_to_bit(info->offset, ctl->unit, offset);
	count = bytes_to_bits(bytes, ctl->unit);
1725 1726
	end = start + count;
	ASSERT(end <= BITS_PER_BITMAP);
1727

L
Li Zefan 已提交
1728
	bitmap_clear(info->bitmap, start, count);
1729 1730

	info->bytes -= bytes;
1731 1732
	if (info->max_extent_size > ctl->unit)
		info->max_extent_size = 0;
1733 1734 1735 1736 1737 1738 1739 1740

	if (start && test_bit(start - 1, info->bitmap))
		extent_delta++;

	if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap))
		extent_delta++;

	info->bitmap_extents += extent_delta;
1741
	if (!btrfs_free_space_trimmed(info)) {
1742
		ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta;
1743 1744
		ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
	}
1745 1746 1747 1748 1749 1750 1751
}

static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info, u64 offset,
			      u64 bytes)
{
	__bitmap_clear_bits(ctl, info, offset, bytes);
1752
	ctl->free_space -= bytes;
1753 1754
}

1755
static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
J
Josef Bacik 已提交
1756 1757
			    struct btrfs_free_space *info, u64 offset,
			    u64 bytes)
1758
{
1759 1760
	unsigned long start, count, end;
	int extent_delta = 1;
1761

1762 1763
	start = offset_to_bit(info->offset, ctl->unit, offset);
	count = bytes_to_bits(bytes, ctl->unit);
1764 1765
	end = start + count;
	ASSERT(end <= BITS_PER_BITMAP);
1766

L
Li Zefan 已提交
1767
	bitmap_set(info->bitmap, start, count);
1768 1769

	info->bytes += bytes;
1770
	ctl->free_space += bytes;
1771 1772 1773 1774 1775 1776 1777 1778

	if (start && test_bit(start - 1, info->bitmap))
		extent_delta--;

	if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap))
		extent_delta--;

	info->bitmap_extents += extent_delta;
1779
	if (!btrfs_free_space_trimmed(info)) {
1780
		ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta;
1781 1782
		ctl->discardable_bytes[BTRFS_STAT_CURR] += bytes;
	}
1783 1784
}

1785 1786 1787 1788
/*
 * If we can not find suitable extent, we will use bytes to record
 * the size of the max extent.
 */
1789
static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1790
			 struct btrfs_free_space *bitmap_info, u64 *offset,
1791
			 u64 *bytes, bool for_alloc)
1792 1793
{
	unsigned long found_bits = 0;
1794
	unsigned long max_bits = 0;
1795 1796
	unsigned long bits, i;
	unsigned long next_zero;
1797
	unsigned long extent_bits;
1798

1799 1800 1801 1802
	/*
	 * Skip searching the bitmap if we don't have a contiguous section that
	 * is large enough for this allocation.
	 */
1803 1804
	if (for_alloc &&
	    bitmap_info->max_extent_size &&
1805 1806 1807 1808 1809
	    bitmap_info->max_extent_size < *bytes) {
		*bytes = bitmap_info->max_extent_size;
		return -1;
	}

1810
	i = offset_to_bit(bitmap_info->offset, ctl->unit,
1811
			  max_t(u64, *offset, bitmap_info->offset));
1812
	bits = bytes_to_bits(*bytes, ctl->unit);
1813

1814
	for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1815 1816 1817 1818
		if (for_alloc && bits == 1) {
			found_bits = 1;
			break;
		}
1819 1820
		next_zero = find_next_zero_bit(bitmap_info->bitmap,
					       BITS_PER_BITMAP, i);
1821 1822 1823
		extent_bits = next_zero - i;
		if (extent_bits >= bits) {
			found_bits = extent_bits;
1824
			break;
1825 1826
		} else if (extent_bits > max_bits) {
			max_bits = extent_bits;
1827 1828 1829 1830 1831
		}
		i = next_zero;
	}

	if (found_bits) {
1832 1833
		*offset = (u64)(i * ctl->unit) + bitmap_info->offset;
		*bytes = (u64)(found_bits) * ctl->unit;
1834 1835 1836
		return 0;
	}

1837
	*bytes = (u64)(max_bits) * ctl->unit;
1838
	bitmap_info->max_extent_size = *bytes;
1839 1840 1841
	return -1;
}

J
Josef Bacik 已提交
1842 1843 1844 1845 1846 1847 1848
static inline u64 get_max_extent_size(struct btrfs_free_space *entry)
{
	if (entry->bitmap)
		return entry->max_extent_size;
	return entry->bytes;
}

1849
/* Cache the size of the max extent in bytes */
1850
static struct btrfs_free_space *
D
David Woodhouse 已提交
1851
find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1852
		unsigned long align, u64 *max_extent_size)
1853 1854 1855
{
	struct btrfs_free_space *entry;
	struct rb_node *node;
D
David Woodhouse 已提交
1856 1857
	u64 tmp;
	u64 align_off;
1858 1859
	int ret;

1860
	if (!ctl->free_space_offset.rb_node)
1861
		goto out;
1862

1863
	entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1864
	if (!entry)
1865
		goto out;
1866 1867 1868

	for (node = &entry->offset_index; node; node = rb_next(node)) {
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
1869
		if (entry->bytes < *bytes) {
J
Josef Bacik 已提交
1870 1871
			*max_extent_size = max(get_max_extent_size(entry),
					       *max_extent_size);
1872
			continue;
1873
		}
1874

D
David Woodhouse 已提交
1875 1876 1877 1878
		/* make sure the space returned is big enough
		 * to match our requested alignment
		 */
		if (*bytes >= align) {
1879
			tmp = entry->offset - ctl->start + align - 1;
1880
			tmp = div64_u64(tmp, align);
D
David Woodhouse 已提交
1881 1882 1883 1884 1885 1886 1887
			tmp = tmp * align + ctl->start;
			align_off = tmp - entry->offset;
		} else {
			align_off = 0;
			tmp = entry->offset;
		}

1888
		if (entry->bytes < *bytes + align_off) {
J
Josef Bacik 已提交
1889 1890
			*max_extent_size = max(get_max_extent_size(entry),
					       *max_extent_size);
D
David Woodhouse 已提交
1891
			continue;
1892
		}
D
David Woodhouse 已提交
1893

1894
		if (entry->bitmap) {
1895 1896
			u64 size = *bytes;

1897
			ret = search_bitmap(ctl, entry, &tmp, &size, true);
D
David Woodhouse 已提交
1898 1899
			if (!ret) {
				*offset = tmp;
1900
				*bytes = size;
1901
				return entry;
J
Josef Bacik 已提交
1902 1903 1904 1905
			} else {
				*max_extent_size =
					max(get_max_extent_size(entry),
					    *max_extent_size);
D
David Woodhouse 已提交
1906
			}
1907 1908 1909
			continue;
		}

D
David Woodhouse 已提交
1910 1911
		*offset = tmp;
		*bytes = entry->bytes - align_off;
1912 1913
		return entry;
	}
1914
out:
1915 1916 1917
	return NULL;
}

1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
static int count_bitmap_extents(struct btrfs_free_space_ctl *ctl,
				struct btrfs_free_space *bitmap_info)
{
	struct btrfs_block_group *block_group = ctl->private;
	u64 bytes = bitmap_info->bytes;
	unsigned int rs, re;
	int count = 0;

	if (!block_group || !bytes)
		return count;

	bitmap_for_each_set_region(bitmap_info->bitmap, rs, re, 0,
				   BITS_PER_BITMAP) {
		bytes -= (rs - re) * ctl->unit;
		count++;

		if (!bytes)
			break;
	}

	return count;
}

1941
static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1942 1943
			   struct btrfs_free_space *info, u64 offset)
{
1944
	info->offset = offset_to_bitmap(ctl, offset);
J
Josef Bacik 已提交
1945
	info->bytes = 0;
1946
	info->bitmap_extents = 0;
1947
	INIT_LIST_HEAD(&info->list);
1948 1949
	link_free_space(ctl, info);
	ctl->total_bitmaps++;
1950

1951
	ctl->op->recalc_thresholds(ctl);
1952 1953
}

1954
static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1955 1956
			struct btrfs_free_space *bitmap_info)
{
1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
	/*
	 * Normally when this is called, the bitmap is completely empty. However,
	 * if we are blowing up the free space cache for one reason or another
	 * via __btrfs_remove_free_space_cache(), then it may not be freed and
	 * we may leave stats on the table.
	 */
	if (bitmap_info->bytes && !btrfs_free_space_trimmed(bitmap_info)) {
		ctl->discardable_extents[BTRFS_STAT_CURR] -=
			bitmap_info->bitmap_extents;
		ctl->discardable_bytes[BTRFS_STAT_CURR] -= bitmap_info->bytes;

	}
1969
	unlink_free_space(ctl, bitmap_info);
1970
	kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
1971
	kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1972 1973
	ctl->total_bitmaps--;
	ctl->op->recalc_thresholds(ctl);
1974 1975
}

1976
static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1977 1978 1979 1980
			      struct btrfs_free_space *bitmap_info,
			      u64 *offset, u64 *bytes)
{
	u64 end;
1981 1982
	u64 search_start, search_bytes;
	int ret;
1983 1984

again:
1985
	end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1986

1987
	/*
1988 1989 1990 1991
	 * We need to search for bits in this bitmap.  We could only cover some
	 * of the extent in this bitmap thanks to how we add space, so we need
	 * to search for as much as it as we can and clear that amount, and then
	 * go searching for the next bit.
1992 1993
	 */
	search_start = *offset;
1994
	search_bytes = ctl->unit;
1995
	search_bytes = min(search_bytes, end - search_start + 1);
1996 1997
	ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes,
			    false);
1998 1999
	if (ret < 0 || search_start != *offset)
		return -EINVAL;
2000

2001 2002 2003 2004 2005 2006 2007 2008 2009
	/* We may have found more bits than what we need */
	search_bytes = min(search_bytes, *bytes);

	/* Cannot clear past the end of the bitmap */
	search_bytes = min(search_bytes, end - search_start + 1);

	bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
	*offset += search_bytes;
	*bytes -= search_bytes;
2010 2011

	if (*bytes) {
2012
		struct rb_node *next = rb_next(&bitmap_info->offset_index);
2013
		if (!bitmap_info->bytes)
2014
			free_bitmap(ctl, bitmap_info);
2015

2016 2017 2018 2019 2020
		/*
		 * no entry after this bitmap, but we still have bytes to
		 * remove, so something has gone wrong.
		 */
		if (!next)
2021 2022
			return -EINVAL;

2023 2024 2025 2026 2027 2028 2029
		bitmap_info = rb_entry(next, struct btrfs_free_space,
				       offset_index);

		/*
		 * if the next entry isn't a bitmap we need to return to let the
		 * extent stuff do its work.
		 */
2030 2031 2032
		if (!bitmap_info->bitmap)
			return -EAGAIN;

2033 2034 2035 2036 2037 2038 2039
		/*
		 * Ok the next item is a bitmap, but it may not actually hold
		 * the information for the rest of this free space stuff, so
		 * look for it, and if we don't find it return so we can try
		 * everything over again.
		 */
		search_start = *offset;
2040
		search_bytes = ctl->unit;
2041
		ret = search_bitmap(ctl, bitmap_info, &search_start,
2042
				    &search_bytes, false);
2043 2044 2045
		if (ret < 0 || search_start != *offset)
			return -EAGAIN;

2046
		goto again;
2047
	} else if (!bitmap_info->bytes)
2048
		free_bitmap(ctl, bitmap_info);
2049 2050 2051 2052

	return 0;
}

J
Josef Bacik 已提交
2053 2054
static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
			       struct btrfs_free_space *info, u64 offset,
2055
			       u64 bytes, enum btrfs_trim_state trim_state)
J
Josef Bacik 已提交
2056 2057 2058 2059
{
	u64 bytes_to_set = 0;
	u64 end;

2060 2061 2062 2063
	/*
	 * This is a tradeoff to make bitmap trim state minimal.  We mark the
	 * whole bitmap untrimmed if at any point we add untrimmed regions.
	 */
2064
	if (trim_state == BTRFS_TRIM_STATE_UNTRIMMED) {
2065
		if (btrfs_free_space_trimmed(info)) {
2066 2067
			ctl->discardable_extents[BTRFS_STAT_CURR] +=
				info->bitmap_extents;
2068 2069
			ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes;
		}
2070
		info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2071
	}
2072

J
Josef Bacik 已提交
2073 2074 2075 2076 2077 2078
	end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);

	bytes_to_set = min(end - offset, bytes);

	bitmap_set_bits(ctl, info, offset, bytes_to_set);

2079 2080 2081 2082 2083 2084
	/*
	 * We set some bytes, we have no idea what the max extent size is
	 * anymore.
	 */
	info->max_extent_size = 0;

J
Josef Bacik 已提交
2085 2086 2087 2088
	return bytes_to_set;

}

2089 2090
static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
		      struct btrfs_free_space *info)
2091
{
2092
	struct btrfs_block_group *block_group = ctl->private;
2093
	struct btrfs_fs_info *fs_info = block_group->fs_info;
2094 2095 2096
	bool forced = false;

#ifdef CONFIG_BTRFS_DEBUG
2097
	if (btrfs_should_fragment_free_space(block_group))
2098 2099
		forced = true;
#endif
2100

2101 2102 2103 2104
	/* This is a way to reclaim large regions from the bitmaps. */
	if (!forced && info->bytes >= FORCE_EXTENT_THRESHOLD)
		return false;

2105 2106 2107 2108
	/*
	 * If we are below the extents threshold then we can add this as an
	 * extent, and don't have to deal with the bitmap
	 */
2109
	if (!forced && ctl->free_extents < ctl->extents_thresh) {
2110 2111 2112
		/*
		 * If this block group has some small extents we don't want to
		 * use up all of our free slots in the cache with them, we want
2113
		 * to reserve them to larger extents, however if we have plenty
2114 2115 2116
		 * of cache left then go ahead an dadd them, no sense in adding
		 * the overhead of a bitmap if we don't have to.
		 */
2117 2118
		if (info->bytes <= fs_info->sectorsize * 8) {
			if (ctl->free_extents * 3 <= ctl->extents_thresh)
2119
				return false;
2120
		} else {
2121
			return false;
2122 2123
		}
	}
2124 2125

	/*
2126 2127 2128 2129 2130 2131
	 * The original block groups from mkfs can be really small, like 8
	 * megabytes, so don't bother with a bitmap for those entries.  However
	 * some block groups can be smaller than what a bitmap would cover but
	 * are still large enough that they could overflow the 32k memory limit,
	 * so allow those block groups to still be allowed to have a bitmap
	 * entry.
2132
	 */
2133
	if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length)
2134 2135 2136 2137 2138
		return false;

	return true;
}

2139
static const struct btrfs_free_space_op free_space_op = {
J
Josef Bacik 已提交
2140 2141 2142 2143
	.recalc_thresholds	= recalculate_thresholds,
	.use_bitmap		= use_bitmap,
};

2144 2145 2146 2147
static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info)
{
	struct btrfs_free_space *bitmap_info;
2148
	struct btrfs_block_group *block_group = NULL;
2149
	int added = 0;
J
Josef Bacik 已提交
2150
	u64 bytes, offset, bytes_added;
2151
	enum btrfs_trim_state trim_state;
2152
	int ret;
2153 2154 2155

	bytes = info->bytes;
	offset = info->offset;
2156
	trim_state = info->trim_state;
2157

2158 2159 2160
	if (!ctl->op->use_bitmap(ctl, info))
		return 0;

J
Josef Bacik 已提交
2161 2162
	if (ctl->op == &free_space_op)
		block_group = ctl->private;
2163
again:
J
Josef Bacik 已提交
2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180
	/*
	 * Since we link bitmaps right into the cluster we need to see if we
	 * have a cluster here, and if so and it has our bitmap we need to add
	 * the free space to that bitmap.
	 */
	if (block_group && !list_empty(&block_group->cluster_list)) {
		struct btrfs_free_cluster *cluster;
		struct rb_node *node;
		struct btrfs_free_space *entry;

		cluster = list_entry(block_group->cluster_list.next,
				     struct btrfs_free_cluster,
				     block_group_list);
		spin_lock(&cluster->lock);
		node = rb_first(&cluster->root);
		if (!node) {
			spin_unlock(&cluster->lock);
2181
			goto no_cluster_bitmap;
J
Josef Bacik 已提交
2182 2183 2184 2185 2186
		}

		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		if (!entry->bitmap) {
			spin_unlock(&cluster->lock);
2187
			goto no_cluster_bitmap;
J
Josef Bacik 已提交
2188 2189 2190
		}

		if (entry->offset == offset_to_bitmap(ctl, offset)) {
2191 2192
			bytes_added = add_bytes_to_bitmap(ctl, entry, offset,
							  bytes, trim_state);
J
Josef Bacik 已提交
2193 2194 2195 2196 2197 2198 2199 2200 2201
			bytes -= bytes_added;
			offset += bytes_added;
		}
		spin_unlock(&cluster->lock);
		if (!bytes) {
			ret = 1;
			goto out;
		}
	}
2202 2203

no_cluster_bitmap:
2204
	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2205 2206
					 1, 0);
	if (!bitmap_info) {
2207
		ASSERT(added == 0);
2208 2209 2210
		goto new_bitmap;
	}

2211 2212
	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes,
					  trim_state);
J
Josef Bacik 已提交
2213 2214 2215
	bytes -= bytes_added;
	offset += bytes_added;
	added = 0;
2216 2217 2218 2219 2220 2221 2222 2223 2224

	if (!bytes) {
		ret = 1;
		goto out;
	} else
		goto again;

new_bitmap:
	if (info && info->bitmap) {
2225
		add_new_bitmap(ctl, info, offset);
2226 2227 2228 2229
		added = 1;
		info = NULL;
		goto again;
	} else {
2230
		spin_unlock(&ctl->tree_lock);
2231 2232 2233

		/* no pre-allocated info, allocate a new one */
		if (!info) {
2234 2235
			info = kmem_cache_zalloc(btrfs_free_space_cachep,
						 GFP_NOFS);
2236
			if (!info) {
2237
				spin_lock(&ctl->tree_lock);
2238 2239 2240 2241 2242 2243
				ret = -ENOMEM;
				goto out;
			}
		}

		/* allocate the bitmap */
2244 2245
		info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
						 GFP_NOFS);
2246
		info->trim_state = BTRFS_TRIM_STATE_TRIMMED;
2247
		spin_lock(&ctl->tree_lock);
2248 2249 2250 2251 2252 2253 2254 2255 2256
		if (!info->bitmap) {
			ret = -ENOMEM;
			goto out;
		}
		goto again;
	}

out:
	if (info) {
2257 2258 2259
		if (info->bitmap)
			kmem_cache_free(btrfs_free_space_bitmap_cachep,
					info->bitmap);
2260
		kmem_cache_free(btrfs_free_space_cachep, info);
2261
	}
J
Josef Bacik 已提交
2262 2263 2264 2265

	return ret;
}

2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281
/*
 * Free space merging rules:
 *  1) Merge trimmed areas together
 *  2) Let untrimmed areas coalesce with trimmed areas
 *  3) Always pull neighboring regions from bitmaps
 *
 * The above rules are for when we merge free space based on btrfs_trim_state.
 * Rules 2 and 3 are subtle because they are suboptimal, but are done for the
 * same reason: to promote larger extent regions which makes life easier for
 * find_free_extent().  Rule 2 enables coalescing based on the common path
 * being returning free space from btrfs_finish_extent_commit().  So when free
 * space is trimmed, it will prevent aggregating trimmed new region and
 * untrimmed regions in the rb_tree.  Rule 3 is purely to obtain larger extents
 * and provide find_free_extent() with the largest extents possible hoping for
 * the reuse path.
 */
2282
static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
2283
			  struct btrfs_free_space *info, bool update_stat)
J
Josef Bacik 已提交
2284
{
2285
	struct btrfs_free_space *left_info = NULL;
2286 2287 2288 2289
	struct btrfs_free_space *right_info;
	bool merged = false;
	u64 offset = info->offset;
	u64 bytes = info->bytes;
2290
	const bool is_trimmed = btrfs_free_space_trimmed(info);
2291

J
Josef Bacik 已提交
2292 2293 2294 2295 2296
	/*
	 * first we want to see if there is free space adjacent to the range we
	 * are adding, if there is remove that struct and add a new one to
	 * cover the entire range
	 */
2297
	right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
2298 2299 2300
	if (right_info && rb_prev(&right_info->offset_index))
		left_info = rb_entry(rb_prev(&right_info->offset_index),
				     struct btrfs_free_space, offset_index);
2301
	else if (!right_info)
2302
		left_info = tree_search_offset(ctl, offset - 1, 0, 0);
J
Josef Bacik 已提交
2303

2304 2305 2306
	/* See try_merge_free_space() comment. */
	if (right_info && !right_info->bitmap &&
	    (!is_trimmed || btrfs_free_space_trimmed(right_info))) {
2307
		if (update_stat)
2308
			unlink_free_space(ctl, right_info);
2309
		else
2310
			__unlink_free_space(ctl, right_info);
2311
		info->bytes += right_info->bytes;
2312
		kmem_cache_free(btrfs_free_space_cachep, right_info);
2313
		merged = true;
J
Josef Bacik 已提交
2314 2315
	}

2316
	/* See try_merge_free_space() comment. */
2317
	if (left_info && !left_info->bitmap &&
2318 2319
	    left_info->offset + left_info->bytes == offset &&
	    (!is_trimmed || btrfs_free_space_trimmed(left_info))) {
2320
		if (update_stat)
2321
			unlink_free_space(ctl, left_info);
2322
		else
2323
			__unlink_free_space(ctl, left_info);
2324 2325
		info->offset = left_info->offset;
		info->bytes += left_info->bytes;
2326
		kmem_cache_free(btrfs_free_space_cachep, left_info);
2327
		merged = true;
J
Josef Bacik 已提交
2328 2329
	}

2330 2331 2332
	return merged;
}

2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354
static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl,
				     struct btrfs_free_space *info,
				     bool update_stat)
{
	struct btrfs_free_space *bitmap;
	unsigned long i;
	unsigned long j;
	const u64 end = info->offset + info->bytes;
	const u64 bitmap_offset = offset_to_bitmap(ctl, end);
	u64 bytes;

	bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
	if (!bitmap)
		return false;

	i = offset_to_bit(bitmap->offset, ctl->unit, end);
	j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i);
	if (j == i)
		return false;
	bytes = (j - i) * ctl->unit;
	info->bytes += bytes;

2355 2356 2357 2358
	/* See try_merge_free_space() comment. */
	if (!btrfs_free_space_trimmed(bitmap))
		info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;

2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411
	if (update_stat)
		bitmap_clear_bits(ctl, bitmap, end, bytes);
	else
		__bitmap_clear_bits(ctl, bitmap, end, bytes);

	if (!bitmap->bytes)
		free_bitmap(ctl, bitmap);

	return true;
}

static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl,
				       struct btrfs_free_space *info,
				       bool update_stat)
{
	struct btrfs_free_space *bitmap;
	u64 bitmap_offset;
	unsigned long i;
	unsigned long j;
	unsigned long prev_j;
	u64 bytes;

	bitmap_offset = offset_to_bitmap(ctl, info->offset);
	/* If we're on a boundary, try the previous logical bitmap. */
	if (bitmap_offset == info->offset) {
		if (info->offset == 0)
			return false;
		bitmap_offset = offset_to_bitmap(ctl, info->offset - 1);
	}

	bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
	if (!bitmap)
		return false;

	i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
	j = 0;
	prev_j = (unsigned long)-1;
	for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
		if (j > i)
			break;
		prev_j = j;
	}
	if (prev_j == i)
		return false;

	if (prev_j == (unsigned long)-1)
		bytes = (i + 1) * ctl->unit;
	else
		bytes = (i - prev_j) * ctl->unit;

	info->offset -= bytes;
	info->bytes += bytes;

2412 2413 2414 2415
	/* See try_merge_free_space() comment. */
	if (!btrfs_free_space_trimmed(bitmap))
		info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;

2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462
	if (update_stat)
		bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
	else
		__bitmap_clear_bits(ctl, bitmap, info->offset, bytes);

	if (!bitmap->bytes)
		free_bitmap(ctl, bitmap);

	return true;
}

/*
 * We prefer always to allocate from extent entries, both for clustered and
 * non-clustered allocation requests. So when attempting to add a new extent
 * entry, try to see if there's adjacent free space in bitmap entries, and if
 * there is, migrate that space from the bitmaps to the extent.
 * Like this we get better chances of satisfying space allocation requests
 * because we attempt to satisfy them based on a single cache entry, and never
 * on 2 or more entries - even if the entries represent a contiguous free space
 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
 * ends).
 */
static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info,
			      bool update_stat)
{
	/*
	 * Only work with disconnected entries, as we can change their offset,
	 * and must be extent entries.
	 */
	ASSERT(!info->bitmap);
	ASSERT(RB_EMPTY_NODE(&info->offset_index));

	if (ctl->total_bitmaps > 0) {
		bool stole_end;
		bool stole_front = false;

		stole_end = steal_from_bitmap_to_end(ctl, info, update_stat);
		if (ctl->total_bitmaps > 0)
			stole_front = steal_from_bitmap_to_front(ctl, info,
								 update_stat);

		if (stole_end || stole_front)
			try_merge_free_space(ctl, info, update_stat);
	}
}

2463 2464
int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
			   struct btrfs_free_space_ctl *ctl,
2465 2466
			   u64 offset, u64 bytes,
			   enum btrfs_trim_state trim_state)
2467
{
2468
	struct btrfs_block_group *block_group = ctl->private;
2469 2470
	struct btrfs_free_space *info;
	int ret = 0;
D
Dennis Zhou 已提交
2471
	u64 filter_bytes = bytes;
2472

2473
	info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
2474 2475 2476 2477 2478
	if (!info)
		return -ENOMEM;

	info->offset = offset;
	info->bytes = bytes;
2479
	info->trim_state = trim_state;
2480
	RB_CLEAR_NODE(&info->offset_index);
2481

2482
	spin_lock(&ctl->tree_lock);
2483

2484
	if (try_merge_free_space(ctl, info, true))
2485 2486 2487 2488 2489 2490 2491
		goto link;

	/*
	 * There was no extent directly to the left or right of this new
	 * extent then we know we're going to have to allocate a new extent, so
	 * before we do that see if we need to drop this into a bitmap
	 */
2492
	ret = insert_into_bitmap(ctl, info);
2493 2494 2495 2496 2497 2498 2499
	if (ret < 0) {
		goto out;
	} else if (ret) {
		ret = 0;
		goto out;
	}
link:
2500 2501 2502 2503 2504 2505 2506 2507
	/*
	 * Only steal free space from adjacent bitmaps if we're sure we're not
	 * going to add the new free space to existing bitmap entries - because
	 * that would mean unnecessary work that would be reverted. Therefore
	 * attempt to steal space from bitmaps if we're adding an extent entry.
	 */
	steal_from_bitmap(ctl, info, true);

D
Dennis Zhou 已提交
2508 2509
	filter_bytes = max(filter_bytes, info->bytes);

2510
	ret = link_free_space(ctl, info);
J
Josef Bacik 已提交
2511
	if (ret)
2512
		kmem_cache_free(btrfs_free_space_cachep, info);
2513
out:
2514
	btrfs_discard_update_discardable(block_group, ctl);
2515
	spin_unlock(&ctl->tree_lock);
2516

J
Josef Bacik 已提交
2517
	if (ret) {
2518
		btrfs_crit(fs_info, "unable to add free space :%d", ret);
2519
		ASSERT(ret != -EEXIST);
J
Josef Bacik 已提交
2520 2521
	}

D
Dennis Zhou 已提交
2522 2523
	if (trim_state != BTRFS_TRIM_STATE_TRIMMED) {
		btrfs_discard_check_filter(block_group, filter_bytes);
2524
		btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
D
Dennis Zhou 已提交
2525
	}
2526

J
Josef Bacik 已提交
2527 2528 2529
	return ret;
}

2530
int btrfs_add_free_space(struct btrfs_block_group *block_group,
2531 2532
			 u64 bytenr, u64 size)
{
2533 2534 2535 2536 2537
	enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;

	if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC))
		trim_state = BTRFS_TRIM_STATE_TRIMMED;

2538 2539
	return __btrfs_add_free_space(block_group->fs_info,
				      block_group->free_space_ctl,
2540
				      bytenr, size, trim_state);
2541 2542
}

2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561
/*
 * This is a subtle distinction because when adding free space back in general,
 * we want it to be added as untrimmed for async. But in the case where we add
 * it on loading of a block group, we want to consider it trimmed.
 */
int btrfs_add_free_space_async_trimmed(struct btrfs_block_group *block_group,
				       u64 bytenr, u64 size)
{
	enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;

	if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC) ||
	    btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
		trim_state = BTRFS_TRIM_STATE_TRIMMED;

	return __btrfs_add_free_space(block_group->fs_info,
				      block_group->free_space_ctl,
				      bytenr, size, trim_state);
}

2562
int btrfs_remove_free_space(struct btrfs_block_group *block_group,
2563
			    u64 offset, u64 bytes)
J
Josef Bacik 已提交
2564
{
2565
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
J
Josef Bacik 已提交
2566
	struct btrfs_free_space *info;
2567 2568
	int ret;
	bool re_search = false;
J
Josef Bacik 已提交
2569

2570
	spin_lock(&ctl->tree_lock);
2571

2572
again:
2573
	ret = 0;
2574 2575 2576
	if (!bytes)
		goto out_lock;

2577
	info = tree_search_offset(ctl, offset, 0, 0);
2578
	if (!info) {
2579 2580 2581 2582
		/*
		 * oops didn't find an extent that matched the space we wanted
		 * to remove, look for a bitmap instead
		 */
2583
		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2584 2585
					  1, 0);
		if (!info) {
2586 2587 2588 2589
			/*
			 * If we found a partial bit of our free space in a
			 * bitmap but then couldn't find the other part this may
			 * be a problem, so WARN about it.
2590
			 */
2591
			WARN_ON(re_search);
2592 2593
			goto out_lock;
		}
2594 2595
	}

2596
	re_search = false;
2597
	if (!info->bitmap) {
2598
		unlink_free_space(ctl, info);
2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609
		if (offset == info->offset) {
			u64 to_free = min(bytes, info->bytes);

			info->bytes -= to_free;
			info->offset += to_free;
			if (info->bytes) {
				ret = link_free_space(ctl, info);
				WARN_ON(ret);
			} else {
				kmem_cache_free(btrfs_free_space_cachep, info);
			}
J
Josef Bacik 已提交
2610

2611 2612 2613 2614 2615
			offset += to_free;
			bytes -= to_free;
			goto again;
		} else {
			u64 old_end = info->bytes + info->offset;
2616

2617
			info->bytes = offset - info->offset;
2618
			ret = link_free_space(ctl, info);
2619 2620 2621 2622
			WARN_ON(ret);
			if (ret)
				goto out_lock;

2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633
			/* Not enough bytes in this entry to satisfy us */
			if (old_end < offset + bytes) {
				bytes -= old_end - offset;
				offset = old_end;
				goto again;
			} else if (old_end == offset + bytes) {
				/* all done */
				goto out_lock;
			}
			spin_unlock(&ctl->tree_lock);

2634 2635 2636 2637
			ret = __btrfs_add_free_space(block_group->fs_info, ctl,
						     offset + bytes,
						     old_end - (offset + bytes),
						     info->trim_state);
2638 2639 2640
			WARN_ON(ret);
			goto out;
		}
J
Josef Bacik 已提交
2641
	}
2642

2643
	ret = remove_from_bitmap(ctl, info, &offset, &bytes);
2644 2645
	if (ret == -EAGAIN) {
		re_search = true;
2646
		goto again;
2647
	}
2648
out_lock:
2649
	btrfs_discard_update_discardable(block_group, ctl);
2650
	spin_unlock(&ctl->tree_lock);
J
Josef Bacik 已提交
2651
out:
2652 2653 2654
	return ret;
}

2655
void btrfs_dump_free_space(struct btrfs_block_group *block_group,
J
Josef Bacik 已提交
2656 2657
			   u64 bytes)
{
2658
	struct btrfs_fs_info *fs_info = block_group->fs_info;
2659
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
J
Josef Bacik 已提交
2660 2661 2662 2663
	struct btrfs_free_space *info;
	struct rb_node *n;
	int count = 0;

2664
	spin_lock(&ctl->tree_lock);
2665
	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
J
Josef Bacik 已提交
2666
		info = rb_entry(n, struct btrfs_free_space, offset_index);
L
Liu Bo 已提交
2667
		if (info->bytes >= bytes && !block_group->ro)
J
Josef Bacik 已提交
2668
			count++;
2669
		btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
2670
			   info->offset, info->bytes,
2671
		       (info->bitmap) ? "yes" : "no");
J
Josef Bacik 已提交
2672
	}
2673
	spin_unlock(&ctl->tree_lock);
2674
	btrfs_info(fs_info, "block group has cluster?: %s",
2675
	       list_empty(&block_group->cluster_list) ? "no" : "yes");
2676
	btrfs_info(fs_info,
2677
		   "%d blocks of free space at or bigger than bytes is", count);
J
Josef Bacik 已提交
2678 2679
}

2680
void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group)
J
Josef Bacik 已提交
2681
{
2682
	struct btrfs_fs_info *fs_info = block_group->fs_info;
2683
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
J
Josef Bacik 已提交
2684

2685
	spin_lock_init(&ctl->tree_lock);
2686
	ctl->unit = fs_info->sectorsize;
2687
	ctl->start = block_group->start;
2688 2689
	ctl->private = block_group;
	ctl->op = &free_space_op;
2690 2691
	INIT_LIST_HEAD(&ctl->trimming_ranges);
	mutex_init(&ctl->cache_writeout_mutex);
J
Josef Bacik 已提交
2692

2693 2694 2695 2696 2697
	/*
	 * we only want to have 32k of ram per block group for keeping
	 * track of free space, and if we pass 1/2 of that we want to
	 * start converting things over to using bitmaps
	 */
2698
	ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space);
J
Josef Bacik 已提交
2699 2700
}

2701 2702 2703 2704 2705 2706
/*
 * for a given cluster, put all of its extents back into the free
 * space cache.  If the block group passed doesn't match the block group
 * pointed to by the cluster, someone else raced in and freed the
 * cluster already.  In that case, we just return without changing anything
 */
2707
static void __btrfs_return_cluster_to_free_space(
2708
			     struct btrfs_block_group *block_group,
2709 2710
			     struct btrfs_free_cluster *cluster)
{
2711
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2712 2713 2714 2715 2716 2717 2718
	struct btrfs_free_space *entry;
	struct rb_node *node;

	spin_lock(&cluster->lock);
	if (cluster->block_group != block_group)
		goto out;

2719
	cluster->block_group = NULL;
2720
	cluster->window_start = 0;
2721 2722
	list_del_init(&cluster->block_group_list);

2723
	node = rb_first(&cluster->root);
2724
	while (node) {
2725 2726
		bool bitmap;

2727 2728 2729
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		node = rb_next(&entry->offset_index);
		rb_erase(&entry->offset_index, &cluster->root);
2730
		RB_CLEAR_NODE(&entry->offset_index);
2731 2732

		bitmap = (entry->bitmap != NULL);
2733
		if (!bitmap) {
2734
			/* Merging treats extents as if they were new */
2735
			if (!btrfs_free_space_trimmed(entry)) {
2736
				ctl->discardable_extents[BTRFS_STAT_CURR]--;
2737 2738 2739
				ctl->discardable_bytes[BTRFS_STAT_CURR] -=
					entry->bytes;
			}
2740

2741
			try_merge_free_space(ctl, entry, false);
2742
			steal_from_bitmap(ctl, entry, false);
2743 2744

			/* As we insert directly, update these statistics */
2745
			if (!btrfs_free_space_trimmed(entry)) {
2746
				ctl->discardable_extents[BTRFS_STAT_CURR]++;
2747 2748 2749
				ctl->discardable_bytes[BTRFS_STAT_CURR] +=
					entry->bytes;
			}
2750
		}
2751
		tree_insert_offset(&ctl->free_space_offset,
2752
				   entry->offset, &entry->offset_index, bitmap);
2753
	}
2754
	cluster->root = RB_ROOT;
2755

2756 2757
out:
	spin_unlock(&cluster->lock);
2758
	btrfs_put_block_group(block_group);
2759 2760
}

2761 2762
static void __btrfs_remove_free_space_cache_locked(
				struct btrfs_free_space_ctl *ctl)
J
Josef Bacik 已提交
2763 2764 2765
{
	struct btrfs_free_space *info;
	struct rb_node *node;
2766 2767 2768

	while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
		info = rb_entry(node, struct btrfs_free_space, offset_index);
2769 2770 2771 2772 2773 2774
		if (!info->bitmap) {
			unlink_free_space(ctl, info);
			kmem_cache_free(btrfs_free_space_cachep, info);
		} else {
			free_bitmap(ctl, info);
		}
2775 2776

		cond_resched_lock(&ctl->tree_lock);
2777
	}
2778 2779 2780 2781 2782 2783
}

void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
{
	spin_lock(&ctl->tree_lock);
	__btrfs_remove_free_space_cache_locked(ctl);
2784 2785
	if (ctl->private)
		btrfs_discard_update_discardable(ctl->private, ctl);
2786 2787 2788
	spin_unlock(&ctl->tree_lock);
}

2789
void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
2790 2791
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2792
	struct btrfs_free_cluster *cluster;
2793
	struct list_head *head;
J
Josef Bacik 已提交
2794

2795
	spin_lock(&ctl->tree_lock);
2796 2797 2798 2799
	while ((head = block_group->cluster_list.next) !=
	       &block_group->cluster_list) {
		cluster = list_entry(head, struct btrfs_free_cluster,
				     block_group_list);
2800 2801 2802

		WARN_ON(cluster->block_group != block_group);
		__btrfs_return_cluster_to_free_space(block_group, cluster);
2803 2804

		cond_resched_lock(&ctl->tree_lock);
2805
	}
2806
	__btrfs_remove_free_space_cache_locked(ctl);
2807
	btrfs_discard_update_discardable(block_group, ctl);
2808
	spin_unlock(&ctl->tree_lock);
2809

J
Josef Bacik 已提交
2810 2811
}

2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842
/**
 * btrfs_is_free_space_trimmed - see if everything is trimmed
 * @block_group: block_group of interest
 *
 * Walk @block_group's free space rb_tree to determine if everything is trimmed.
 */
bool btrfs_is_free_space_trimmed(struct btrfs_block_group *block_group)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct btrfs_free_space *info;
	struct rb_node *node;
	bool ret = true;

	spin_lock(&ctl->tree_lock);
	node = rb_first(&ctl->free_space_offset);

	while (node) {
		info = rb_entry(node, struct btrfs_free_space, offset_index);

		if (!btrfs_free_space_trimmed(info)) {
			ret = false;
			break;
		}

		node = rb_next(node);
	}

	spin_unlock(&ctl->tree_lock);
	return ret;
}

2843
u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
2844 2845
			       u64 offset, u64 bytes, u64 empty_size,
			       u64 *max_extent_size)
J
Josef Bacik 已提交
2846
{
2847
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2848 2849
	struct btrfs_discard_ctl *discard_ctl =
					&block_group->fs_info->discard_ctl;
2850
	struct btrfs_free_space *entry = NULL;
2851
	u64 bytes_search = bytes + empty_size;
2852
	u64 ret = 0;
D
David Woodhouse 已提交
2853 2854
	u64 align_gap = 0;
	u64 align_gap_len = 0;
2855
	enum btrfs_trim_state align_gap_trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
J
Josef Bacik 已提交
2856

2857
	spin_lock(&ctl->tree_lock);
D
David Woodhouse 已提交
2858
	entry = find_free_space(ctl, &offset, &bytes_search,
2859
				block_group->full_stripe_len, max_extent_size);
2860
	if (!entry)
2861 2862 2863 2864
		goto out;

	ret = offset;
	if (entry->bitmap) {
2865
		bitmap_clear_bits(ctl, entry, offset, bytes);
2866 2867 2868 2869

		if (!btrfs_free_space_trimmed(entry))
			atomic64_add(bytes, &discard_ctl->discard_bytes_saved);

2870
		if (!entry->bytes)
2871
			free_bitmap(ctl, entry);
2872
	} else {
2873
		unlink_free_space(ctl, entry);
D
David Woodhouse 已提交
2874 2875
		align_gap_len = offset - entry->offset;
		align_gap = entry->offset;
2876
		align_gap_trim_state = entry->trim_state;
D
David Woodhouse 已提交
2877

2878 2879 2880
		if (!btrfs_free_space_trimmed(entry))
			atomic64_add(bytes, &discard_ctl->discard_bytes_saved);

D
David Woodhouse 已提交
2881 2882 2883 2884
		entry->offset = offset + bytes;
		WARN_ON(entry->bytes < bytes + align_gap_len);

		entry->bytes -= bytes + align_gap_len;
2885
		if (!entry->bytes)
2886
			kmem_cache_free(btrfs_free_space_cachep, entry);
2887
		else
2888
			link_free_space(ctl, entry);
2889
	}
2890
out:
2891
	btrfs_discard_update_discardable(block_group, ctl);
2892
	spin_unlock(&ctl->tree_lock);
J
Josef Bacik 已提交
2893

D
David Woodhouse 已提交
2894
	if (align_gap_len)
2895
		__btrfs_add_free_space(block_group->fs_info, ctl,
2896 2897
				       align_gap, align_gap_len,
				       align_gap_trim_state);
J
Josef Bacik 已提交
2898 2899
	return ret;
}
2900 2901 2902 2903 2904 2905 2906 2907 2908

/*
 * given a cluster, put all of its extents back into the free space
 * cache.  If a block group is passed, this function will only free
 * a cluster that belongs to the passed block group.
 *
 * Otherwise, it'll get a reference on the block group pointed to by the
 * cluster and remove the cluster from it.
 */
2909
void btrfs_return_cluster_to_free_space(
2910
			       struct btrfs_block_group *block_group,
2911 2912
			       struct btrfs_free_cluster *cluster)
{
2913
	struct btrfs_free_space_ctl *ctl;
2914 2915 2916 2917 2918 2919 2920

	/* first, get a safe pointer to the block group */
	spin_lock(&cluster->lock);
	if (!block_group) {
		block_group = cluster->block_group;
		if (!block_group) {
			spin_unlock(&cluster->lock);
2921
			return;
2922 2923 2924 2925
		}
	} else if (cluster->block_group != block_group) {
		/* someone else has already freed it don't redo their work */
		spin_unlock(&cluster->lock);
2926
		return;
2927
	}
2928
	btrfs_get_block_group(block_group);
2929 2930
	spin_unlock(&cluster->lock);

2931 2932
	ctl = block_group->free_space_ctl;

2933
	/* now return any extents the cluster had on it */
2934
	spin_lock(&ctl->tree_lock);
2935
	__btrfs_return_cluster_to_free_space(block_group, cluster);
2936
	spin_unlock(&ctl->tree_lock);
2937

2938 2939
	btrfs_discard_queue_work(&block_group->fs_info->discard_ctl, block_group);

2940 2941 2942 2943
	/* finally drop our ref */
	btrfs_put_block_group(block_group);
}

2944
static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group,
2945
				   struct btrfs_free_cluster *cluster,
2946
				   struct btrfs_free_space *entry,
2947 2948
				   u64 bytes, u64 min_start,
				   u64 *max_extent_size)
2949
{
2950
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2951 2952 2953 2954 2955 2956 2957 2958
	int err;
	u64 search_start = cluster->window_start;
	u64 search_bytes = bytes;
	u64 ret = 0;

	search_start = min_start;
	search_bytes = bytes;

2959
	err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
2960
	if (err) {
J
Josef Bacik 已提交
2961 2962
		*max_extent_size = max(get_max_extent_size(entry),
				       *max_extent_size);
2963
		return 0;
2964
	}
2965 2966

	ret = search_start;
2967
	__bitmap_clear_bits(ctl, entry, ret, bytes);
2968 2969 2970 2971

	return ret;
}

2972 2973 2974 2975 2976
/*
 * given a cluster, try to allocate 'bytes' from it, returns 0
 * if it couldn't find anything suitably large, or a logical disk offset
 * if things worked out
 */
2977
u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
2978
			     struct btrfs_free_cluster *cluster, u64 bytes,
2979
			     u64 min_start, u64 *max_extent_size)
2980
{
2981
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2982 2983
	struct btrfs_discard_ctl *discard_ctl =
					&block_group->fs_info->discard_ctl;
2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999
	struct btrfs_free_space *entry = NULL;
	struct rb_node *node;
	u64 ret = 0;

	spin_lock(&cluster->lock);
	if (bytes > cluster->max_size)
		goto out;

	if (cluster->block_group != block_group)
		goto out;

	node = rb_first(&cluster->root);
	if (!node)
		goto out;

	entry = rb_entry(node, struct btrfs_free_space, offset_index);
3000
	while (1) {
J
Josef Bacik 已提交
3001 3002 3003
		if (entry->bytes < bytes)
			*max_extent_size = max(get_max_extent_size(entry),
					       *max_extent_size);
3004

3005 3006
		if (entry->bytes < bytes ||
		    (!entry->bitmap && entry->offset < min_start)) {
3007 3008 3009 3010 3011 3012 3013 3014
			node = rb_next(&entry->offset_index);
			if (!node)
				break;
			entry = rb_entry(node, struct btrfs_free_space,
					 offset_index);
			continue;
		}

3015 3016 3017
		if (entry->bitmap) {
			ret = btrfs_alloc_from_bitmap(block_group,
						      cluster, entry, bytes,
3018 3019
						      cluster->window_start,
						      max_extent_size);
3020 3021 3022 3023 3024 3025 3026 3027
			if (ret == 0) {
				node = rb_next(&entry->offset_index);
				if (!node)
					break;
				entry = rb_entry(node, struct btrfs_free_space,
						 offset_index);
				continue;
			}
3028
			cluster->window_start += bytes;
3029 3030 3031 3032 3033 3034
		} else {
			ret = entry->offset;

			entry->offset += bytes;
			entry->bytes -= bytes;
		}
3035

3036
		if (entry->bytes == 0)
3037 3038 3039 3040 3041
			rb_erase(&entry->offset_index, &cluster->root);
		break;
	}
out:
	spin_unlock(&cluster->lock);
3042

3043 3044 3045
	if (!ret)
		return 0;

3046
	spin_lock(&ctl->tree_lock);
3047

3048 3049 3050
	if (!btrfs_free_space_trimmed(entry))
		atomic64_add(bytes, &discard_ctl->discard_bytes_saved);

3051
	ctl->free_space -= bytes;
3052 3053
	if (!entry->bitmap && !btrfs_free_space_trimmed(entry))
		ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
3054
	if (entry->bytes == 0) {
3055
		ctl->free_extents--;
3056
		if (entry->bitmap) {
3057 3058
			kmem_cache_free(btrfs_free_space_bitmap_cachep,
					entry->bitmap);
3059 3060
			ctl->total_bitmaps--;
			ctl->op->recalc_thresholds(ctl);
3061 3062
		} else if (!btrfs_free_space_trimmed(entry)) {
			ctl->discardable_extents[BTRFS_STAT_CURR]--;
3063
		}
3064
		kmem_cache_free(btrfs_free_space_cachep, entry);
3065 3066
	}

3067
	spin_unlock(&ctl->tree_lock);
3068

3069 3070 3071
	return ret;
}

3072
static int btrfs_bitmap_cluster(struct btrfs_block_group *block_group,
3073 3074
				struct btrfs_free_space *entry,
				struct btrfs_free_cluster *cluster,
3075 3076
				u64 offset, u64 bytes,
				u64 cont1_bytes, u64 min_bytes)
3077
{
3078
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3079 3080
	unsigned long next_zero;
	unsigned long i;
3081 3082
	unsigned long want_bits;
	unsigned long min_bits;
3083
	unsigned long found_bits;
3084
	unsigned long max_bits = 0;
3085 3086
	unsigned long start = 0;
	unsigned long total_found = 0;
3087
	int ret;
3088

3089
	i = offset_to_bit(entry->offset, ctl->unit,
3090
			  max_t(u64, offset, entry->offset));
3091 3092
	want_bits = bytes_to_bits(bytes, ctl->unit);
	min_bits = bytes_to_bits(min_bytes, ctl->unit);
3093

3094 3095 3096 3097 3098 3099 3100
	/*
	 * Don't bother looking for a cluster in this bitmap if it's heavily
	 * fragmented.
	 */
	if (entry->max_extent_size &&
	    entry->max_extent_size < cont1_bytes)
		return -ENOSPC;
3101 3102
again:
	found_bits = 0;
3103
	for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
3104 3105
		next_zero = find_next_zero_bit(entry->bitmap,
					       BITS_PER_BITMAP, i);
3106
		if (next_zero - i >= min_bits) {
3107
			found_bits = next_zero - i;
3108 3109
			if (found_bits > max_bits)
				max_bits = found_bits;
3110 3111
			break;
		}
3112 3113
		if (next_zero - i > max_bits)
			max_bits = next_zero - i;
3114 3115 3116
		i = next_zero;
	}

3117 3118
	if (!found_bits) {
		entry->max_extent_size = (u64)max_bits * ctl->unit;
3119
		return -ENOSPC;
3120
	}
3121

3122
	if (!total_found) {
3123
		start = i;
3124
		cluster->max_size = 0;
3125 3126 3127 3128
	}

	total_found += found_bits;

3129 3130
	if (cluster->max_size < found_bits * ctl->unit)
		cluster->max_size = found_bits * ctl->unit;
3131

3132 3133
	if (total_found < want_bits || cluster->max_size < cont1_bytes) {
		i = next_zero + 1;
3134 3135 3136
		goto again;
	}

3137
	cluster->window_start = start * ctl->unit + entry->offset;
3138
	rb_erase(&entry->offset_index, &ctl->free_space_offset);
3139 3140
	ret = tree_insert_offset(&cluster->root, entry->offset,
				 &entry->offset_index, 1);
3141
	ASSERT(!ret); /* -EEXIST; Logic error */
3142

J
Josef Bacik 已提交
3143
	trace_btrfs_setup_cluster(block_group, cluster,
3144
				  total_found * ctl->unit, 1);
3145 3146 3147
	return 0;
}

3148 3149
/*
 * This searches the block group for just extents to fill the cluster with.
3150 3151
 * Try to find a cluster with at least bytes total bytes, at least one
 * extent of cont1_bytes, and other clusters of at least min_bytes.
3152
 */
3153
static noinline int
3154
setup_cluster_no_bitmap(struct btrfs_block_group *block_group,
3155 3156
			struct btrfs_free_cluster *cluster,
			struct list_head *bitmaps, u64 offset, u64 bytes,
3157
			u64 cont1_bytes, u64 min_bytes)
3158
{
3159
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3160 3161 3162 3163 3164 3165
	struct btrfs_free_space *first = NULL;
	struct btrfs_free_space *entry = NULL;
	struct btrfs_free_space *last;
	struct rb_node *node;
	u64 window_free;
	u64 max_extent;
J
Josef Bacik 已提交
3166
	u64 total_size = 0;
3167

3168
	entry = tree_search_offset(ctl, offset, 0, 1);
3169 3170 3171 3172 3173 3174 3175
	if (!entry)
		return -ENOSPC;

	/*
	 * We don't want bitmaps, so just move along until we find a normal
	 * extent entry.
	 */
3176 3177
	while (entry->bitmap || entry->bytes < min_bytes) {
		if (entry->bitmap && list_empty(&entry->list))
3178
			list_add_tail(&entry->list, bitmaps);
3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189
		node = rb_next(&entry->offset_index);
		if (!node)
			return -ENOSPC;
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
	}

	window_free = entry->bytes;
	max_extent = entry->bytes;
	first = entry;
	last = entry;

3190 3191
	for (node = rb_next(&entry->offset_index); node;
	     node = rb_next(&entry->offset_index)) {
3192 3193
		entry = rb_entry(node, struct btrfs_free_space, offset_index);

3194 3195 3196
		if (entry->bitmap) {
			if (list_empty(&entry->list))
				list_add_tail(&entry->list, bitmaps);
3197
			continue;
3198 3199
		}

3200 3201 3202 3203 3204 3205
		if (entry->bytes < min_bytes)
			continue;

		last = entry;
		window_free += entry->bytes;
		if (entry->bytes > max_extent)
3206 3207 3208
			max_extent = entry->bytes;
	}

3209 3210 3211
	if (window_free < bytes || max_extent < cont1_bytes)
		return -ENOSPC;

3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224
	cluster->window_start = first->offset;

	node = &first->offset_index;

	/*
	 * now we've found our entries, pull them out of the free space
	 * cache and put them into the cluster rbtree
	 */
	do {
		int ret;

		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		node = rb_next(&entry->offset_index);
3225
		if (entry->bitmap || entry->bytes < min_bytes)
3226 3227
			continue;

3228
		rb_erase(&entry->offset_index, &ctl->free_space_offset);
3229 3230
		ret = tree_insert_offset(&cluster->root, entry->offset,
					 &entry->offset_index, 0);
J
Josef Bacik 已提交
3231
		total_size += entry->bytes;
3232
		ASSERT(!ret); /* -EEXIST; Logic error */
3233 3234 3235
	} while (node && entry != last);

	cluster->max_size = max_extent;
J
Josef Bacik 已提交
3236
	trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
3237 3238 3239 3240 3241 3242 3243
	return 0;
}

/*
 * This specifically looks for bitmaps that may work in the cluster, we assume
 * that we have already failed to find extents that will work.
 */
3244
static noinline int
3245
setup_cluster_bitmap(struct btrfs_block_group *block_group,
3246 3247
		     struct btrfs_free_cluster *cluster,
		     struct list_head *bitmaps, u64 offset, u64 bytes,
3248
		     u64 cont1_bytes, u64 min_bytes)
3249
{
3250
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3251
	struct btrfs_free_space *entry = NULL;
3252
	int ret = -ENOSPC;
3253
	u64 bitmap_offset = offset_to_bitmap(ctl, offset);
3254

3255
	if (ctl->total_bitmaps == 0)
3256 3257
		return -ENOSPC;

3258 3259 3260 3261
	/*
	 * The bitmap that covers offset won't be in the list unless offset
	 * is just its start offset.
	 */
3262 3263 3264 3265
	if (!list_empty(bitmaps))
		entry = list_first_entry(bitmaps, struct btrfs_free_space, list);

	if (!entry || entry->offset != bitmap_offset) {
3266 3267 3268 3269 3270
		entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
		if (entry && list_empty(&entry->list))
			list_add(&entry->list, bitmaps);
	}

3271
	list_for_each_entry(entry, bitmaps, list) {
3272
		if (entry->bytes < bytes)
3273 3274
			continue;
		ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
3275
					   bytes, cont1_bytes, min_bytes);
3276 3277 3278 3279 3280
		if (!ret)
			return 0;
	}

	/*
3281 3282
	 * The bitmaps list has all the bitmaps that record free space
	 * starting after offset, so no more search is required.
3283
	 */
3284
	return -ENOSPC;
3285 3286
}

3287 3288
/*
 * here we try to find a cluster of blocks in a block group.  The goal
3289
 * is to find at least bytes+empty_size.
3290 3291 3292 3293 3294
 * We might not find them all in one contiguous area.
 *
 * returns zero and sets up cluster if things worked out, otherwise
 * it returns -enospc
 */
3295
int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
3296 3297 3298
			     struct btrfs_free_cluster *cluster,
			     u64 offset, u64 bytes, u64 empty_size)
{
3299
	struct btrfs_fs_info *fs_info = block_group->fs_info;
3300
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3301
	struct btrfs_free_space *entry, *tmp;
3302
	LIST_HEAD(bitmaps);
3303
	u64 min_bytes;
3304
	u64 cont1_bytes;
3305 3306
	int ret;

3307 3308 3309 3310 3311 3312
	/*
	 * Choose the minimum extent size we'll require for this
	 * cluster.  For SSD_SPREAD, don't allow any fragmentation.
	 * For metadata, allow allocates with smaller extents.  For
	 * data, keep it dense.
	 */
3313
	if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
3314
		cont1_bytes = min_bytes = bytes + empty_size;
3315
	} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
3316
		cont1_bytes = bytes;
3317
		min_bytes = fs_info->sectorsize;
3318 3319
	} else {
		cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
3320
		min_bytes = fs_info->sectorsize;
3321
	}
3322

3323
	spin_lock(&ctl->tree_lock);
3324 3325 3326 3327 3328

	/*
	 * If we know we don't have enough space to make a cluster don't even
	 * bother doing all the work to try and find one.
	 */
3329
	if (ctl->free_space < bytes) {
3330
		spin_unlock(&ctl->tree_lock);
3331 3332 3333
		return -ENOSPC;
	}

3334 3335 3336 3337 3338 3339 3340 3341
	spin_lock(&cluster->lock);

	/* someone already found a cluster, hooray */
	if (cluster->block_group) {
		ret = 0;
		goto out;
	}

J
Josef Bacik 已提交
3342 3343 3344
	trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
				 min_bytes);

3345
	ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
3346 3347
				      bytes + empty_size,
				      cont1_bytes, min_bytes);
3348
	if (ret)
3349
		ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
3350 3351
					   offset, bytes + empty_size,
					   cont1_bytes, min_bytes);
3352 3353 3354 3355

	/* Clear our temporary list */
	list_for_each_entry_safe(entry, tmp, &bitmaps, list)
		list_del_init(&entry->list);
3356

3357
	if (!ret) {
3358
		btrfs_get_block_group(block_group);
3359 3360 3361
		list_add_tail(&cluster->block_group_list,
			      &block_group->cluster_list);
		cluster->block_group = block_group;
J
Josef Bacik 已提交
3362 3363
	} else {
		trace_btrfs_failed_cluster_setup(block_group);
3364 3365 3366
	}
out:
	spin_unlock(&cluster->lock);
3367
	spin_unlock(&ctl->tree_lock);
3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378

	return ret;
}

/*
 * simple code to zero out a cluster
 */
void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
{
	spin_lock_init(&cluster->lock);
	spin_lock_init(&cluster->refill_lock);
3379
	cluster->root = RB_ROOT;
3380
	cluster->max_size = 0;
3381
	cluster->fragmented = false;
3382 3383 3384 3385
	INIT_LIST_HEAD(&cluster->block_group_list);
	cluster->block_group = NULL;
}

3386
static int do_trimming(struct btrfs_block_group *block_group,
3387
		       u64 *total_trimmed, u64 start, u64 bytes,
3388
		       u64 reserved_start, u64 reserved_bytes,
3389
		       enum btrfs_trim_state reserved_trim_state,
3390
		       struct btrfs_trim_range *trim_entry)
3391
{
3392
	struct btrfs_space_info *space_info = block_group->space_info;
3393
	struct btrfs_fs_info *fs_info = block_group->fs_info;
3394
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3395 3396
	int ret;
	int update = 0;
3397 3398 3399
	const u64 end = start + bytes;
	const u64 reserved_end = reserved_start + reserved_bytes;
	enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3400
	u64 trimmed = 0;
3401

3402 3403 3404 3405 3406 3407 3408 3409 3410 3411
	spin_lock(&space_info->lock);
	spin_lock(&block_group->lock);
	if (!block_group->ro) {
		block_group->reserved += reserved_bytes;
		space_info->bytes_reserved += reserved_bytes;
		update = 1;
	}
	spin_unlock(&block_group->lock);
	spin_unlock(&space_info->lock);

3412
	ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
3413
	if (!ret) {
3414
		*total_trimmed += trimmed;
3415 3416
		trim_state = BTRFS_TRIM_STATE_TRIMMED;
	}
3417

3418
	mutex_lock(&ctl->cache_writeout_mutex);
3419 3420 3421 3422 3423 3424 3425 3426
	if (reserved_start < start)
		__btrfs_add_free_space(fs_info, ctl, reserved_start,
				       start - reserved_start,
				       reserved_trim_state);
	if (start + bytes < reserved_start + reserved_bytes)
		__btrfs_add_free_space(fs_info, ctl, end, reserved_end - end,
				       reserved_trim_state);
	__btrfs_add_free_space(fs_info, ctl, start, bytes, trim_state);
3427 3428
	list_del(&trim_entry->list);
	mutex_unlock(&ctl->cache_writeout_mutex);
3429 3430 3431 3432 3433 3434 3435 3436 3437

	if (update) {
		spin_lock(&space_info->lock);
		spin_lock(&block_group->lock);
		if (block_group->ro)
			space_info->bytes_readonly += reserved_bytes;
		block_group->reserved -= reserved_bytes;
		space_info->bytes_reserved -= reserved_bytes;
		spin_unlock(&block_group->lock);
3438
		spin_unlock(&space_info->lock);
3439 3440 3441 3442 3443
	}

	return ret;
}

3444 3445 3446
/*
 * If @async is set, then we will trim 1 region and return.
 */
3447
static int trim_no_bitmap(struct btrfs_block_group *block_group,
3448 3449
			  u64 *total_trimmed, u64 start, u64 end, u64 minlen,
			  bool async)
3450
{
3451 3452
	struct btrfs_discard_ctl *discard_ctl =
					&block_group->fs_info->discard_ctl;
3453 3454 3455 3456 3457 3458
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct btrfs_free_space *entry;
	struct rb_node *node;
	int ret = 0;
	u64 extent_start;
	u64 extent_bytes;
3459
	enum btrfs_trim_state extent_trim_state;
3460
	u64 bytes;
3461
	const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size);
3462 3463

	while (start < end) {
3464 3465 3466
		struct btrfs_trim_range trim_entry;

		mutex_lock(&ctl->cache_writeout_mutex);
3467
		spin_lock(&ctl->tree_lock);
3468

3469 3470
		if (ctl->free_space < minlen)
			goto out_unlock;
3471

3472
		entry = tree_search_offset(ctl, start, 0, 1);
3473 3474
		if (!entry)
			goto out_unlock;
3475

3476 3477 3478
		/* Skip bitmaps and if async, already trimmed entries */
		while (entry->bitmap ||
		       (async && btrfs_free_space_trimmed(entry))) {
3479
			node = rb_next(&entry->offset_index);
3480 3481
			if (!node)
				goto out_unlock;
3482 3483
			entry = rb_entry(node, struct btrfs_free_space,
					 offset_index);
3484 3485
		}

3486 3487
		if (entry->offset >= end)
			goto out_unlock;
3488

3489 3490
		extent_start = entry->offset;
		extent_bytes = entry->bytes;
3491
		extent_trim_state = entry->trim_state;
3492 3493 3494 3495 3496 3497 3498 3499 3500
		if (async) {
			start = entry->offset;
			bytes = entry->bytes;
			if (bytes < minlen) {
				spin_unlock(&ctl->tree_lock);
				mutex_unlock(&ctl->cache_writeout_mutex);
				goto next;
			}
			unlink_free_space(ctl, entry);
D
Dennis Zhou 已提交
3501 3502 3503 3504 3505 3506 3507 3508
			/*
			 * Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
			 * If X < BTRFS_ASYNC_DISCARD_MIN_FILTER, we won't trim
			 * X when we come back around.  So trim it now.
			 */
			if (max_discard_size &&
			    bytes >= (max_discard_size +
				      BTRFS_ASYNC_DISCARD_MIN_FILTER)) {
3509 3510 3511 3512
				bytes = max_discard_size;
				extent_bytes = max_discard_size;
				entry->offset += max_discard_size;
				entry->bytes -= max_discard_size;
3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524
				link_free_space(ctl, entry);
			} else {
				kmem_cache_free(btrfs_free_space_cachep, entry);
			}
		} else {
			start = max(start, extent_start);
			bytes = min(extent_start + extent_bytes, end) - start;
			if (bytes < minlen) {
				spin_unlock(&ctl->tree_lock);
				mutex_unlock(&ctl->cache_writeout_mutex);
				goto next;
			}
3525

3526 3527 3528
			unlink_free_space(ctl, entry);
			kmem_cache_free(btrfs_free_space_cachep, entry);
		}
3529

3530
		spin_unlock(&ctl->tree_lock);
3531 3532 3533 3534
		trim_entry.start = extent_start;
		trim_entry.bytes = extent_bytes;
		list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
		mutex_unlock(&ctl->cache_writeout_mutex);
3535

3536
		ret = do_trimming(block_group, total_trimmed, start, bytes,
3537 3538
				  extent_start, extent_bytes, extent_trim_state,
				  &trim_entry);
3539 3540
		if (ret) {
			block_group->discard_cursor = start + bytes;
3541
			break;
3542
		}
3543 3544
next:
		start += bytes;
3545 3546 3547
		block_group->discard_cursor = start;
		if (async && *total_trimmed)
			break;
3548

3549 3550 3551 3552 3553 3554 3555
		if (fatal_signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}

		cond_resched();
	}
3556 3557 3558 3559 3560 3561 3562 3563

	return ret;

out_unlock:
	block_group->discard_cursor = btrfs_block_group_end(block_group);
	spin_unlock(&ctl->tree_lock);
	mutex_unlock(&ctl->cache_writeout_mutex);

3564 3565 3566
	return ret;
}

3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586
/*
 * If we break out of trimming a bitmap prematurely, we should reset the
 * trimming bit.  In a rather contrieved case, it's possible to race here so
 * reset the state to BTRFS_TRIM_STATE_UNTRIMMED.
 *
 * start = start of bitmap
 * end = near end of bitmap
 *
 * Thread 1:			Thread 2:
 * trim_bitmaps(start)
 *				trim_bitmaps(end)
 *				end_trimming_bitmap()
 * reset_trimming_bitmap()
 */
static void reset_trimming_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset)
{
	struct btrfs_free_space *entry;

	spin_lock(&ctl->tree_lock);
	entry = tree_search_offset(ctl, offset, 1, 0);
3587
	if (entry) {
3588
		if (btrfs_free_space_trimmed(entry)) {
3589 3590
			ctl->discardable_extents[BTRFS_STAT_CURR] +=
				entry->bitmap_extents;
3591 3592
			ctl->discardable_bytes[BTRFS_STAT_CURR] += entry->bytes;
		}
3593
		entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3594 3595
	}

3596 3597 3598
	spin_unlock(&ctl->tree_lock);
}

3599 3600
static void end_trimming_bitmap(struct btrfs_free_space_ctl *ctl,
				struct btrfs_free_space *entry)
3601
{
3602
	if (btrfs_free_space_trimming_bitmap(entry)) {
3603
		entry->trim_state = BTRFS_TRIM_STATE_TRIMMED;
3604 3605
		ctl->discardable_extents[BTRFS_STAT_CURR] -=
			entry->bitmap_extents;
3606
		ctl->discardable_bytes[BTRFS_STAT_CURR] -= entry->bytes;
3607
	}
3608 3609
}

3610 3611 3612
/*
 * If @async is set, then we will trim 1 region and return.
 */
3613
static int trim_bitmaps(struct btrfs_block_group *block_group,
3614
			u64 *total_trimmed, u64 start, u64 end, u64 minlen,
D
Dennis Zhou 已提交
3615
			u64 maxlen, bool async)
3616
{
3617 3618
	struct btrfs_discard_ctl *discard_ctl =
					&block_group->fs_info->discard_ctl;
3619 3620 3621 3622 3623 3624
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct btrfs_free_space *entry;
	int ret = 0;
	int ret2;
	u64 bytes;
	u64 offset = offset_to_bitmap(ctl, start);
3625
	const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size);
3626 3627 3628

	while (offset < end) {
		bool next_bitmap = false;
3629
		struct btrfs_trim_range trim_entry;
3630

3631
		mutex_lock(&ctl->cache_writeout_mutex);
3632 3633 3634
		spin_lock(&ctl->tree_lock);

		if (ctl->free_space < minlen) {
3635 3636
			block_group->discard_cursor =
				btrfs_block_group_end(block_group);
3637
			spin_unlock(&ctl->tree_lock);
3638
			mutex_unlock(&ctl->cache_writeout_mutex);
3639 3640 3641 3642
			break;
		}

		entry = tree_search_offset(ctl, offset, 1, 0);
D
Dennis Zhou 已提交
3643 3644 3645 3646 3647 3648 3649 3650 3651
		/*
		 * Bitmaps are marked trimmed lossily now to prevent constant
		 * discarding of the same bitmap (the reason why we are bound
		 * by the filters).  So, retrim the block group bitmaps when we
		 * are preparing to punt to the unused_bgs list.  This uses
		 * @minlen to determine if we are in BTRFS_DISCARD_INDEX_UNUSED
		 * which is the only discard index which sets minlen to 0.
		 */
		if (!entry || (async && minlen && start == offset &&
3652
			       btrfs_free_space_trimmed(entry))) {
3653
			spin_unlock(&ctl->tree_lock);
3654
			mutex_unlock(&ctl->cache_writeout_mutex);
3655 3656 3657 3658
			next_bitmap = true;
			goto next;
		}

3659 3660 3661 3662 3663 3664 3665 3666 3667
		/*
		 * Async discard bitmap trimming begins at by setting the start
		 * to be key.objectid and the offset_to_bitmap() aligns to the
		 * start of the bitmap.  This lets us know we are fully
		 * scanning the bitmap rather than only some portion of it.
		 */
		if (start == offset)
			entry->trim_state = BTRFS_TRIM_STATE_TRIMMING;

3668
		bytes = minlen;
3669
		ret2 = search_bitmap(ctl, entry, &start, &bytes, false);
3670
		if (ret2 || start >= end) {
3671
			/*
D
Dennis Zhou 已提交
3672 3673
			 * We lossily consider a bitmap trimmed if we only skip
			 * over regions <= BTRFS_ASYNC_DISCARD_MIN_FILTER.
3674
			 */
D
Dennis Zhou 已提交
3675
			if (ret2 && minlen <= BTRFS_ASYNC_DISCARD_MIN_FILTER)
3676
				end_trimming_bitmap(ctl, entry);
3677 3678
			else
				entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3679
			spin_unlock(&ctl->tree_lock);
3680
			mutex_unlock(&ctl->cache_writeout_mutex);
3681 3682 3683 3684
			next_bitmap = true;
			goto next;
		}

3685 3686 3687 3688 3689 3690 3691 3692 3693 3694
		/*
		 * We already trimmed a region, but are using the locking above
		 * to reset the trim_state.
		 */
		if (async && *total_trimmed) {
			spin_unlock(&ctl->tree_lock);
			mutex_unlock(&ctl->cache_writeout_mutex);
			goto out;
		}

3695
		bytes = min(bytes, end - start);
D
Dennis Zhou 已提交
3696
		if (bytes < minlen || (async && maxlen && bytes > maxlen)) {
3697
			spin_unlock(&ctl->tree_lock);
3698
			mutex_unlock(&ctl->cache_writeout_mutex);
3699 3700 3701
			goto next;
		}

D
Dennis Zhou 已提交
3702 3703 3704 3705 3706 3707 3708 3709 3710
		/*
		 * Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
		 * If X < @minlen, we won't trim X when we come back around.
		 * So trim it now.  We differ here from trimming extents as we
		 * don't keep individual state per bit.
		 */
		if (async &&
		    max_discard_size &&
		    bytes > (max_discard_size + minlen))
3711
			bytes = max_discard_size;
3712

3713 3714 3715 3716 3717
		bitmap_clear_bits(ctl, entry, start, bytes);
		if (entry->bytes == 0)
			free_bitmap(ctl, entry);

		spin_unlock(&ctl->tree_lock);
3718 3719 3720 3721
		trim_entry.start = start;
		trim_entry.bytes = bytes;
		list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
		mutex_unlock(&ctl->cache_writeout_mutex);
3722 3723

		ret = do_trimming(block_group, total_trimmed, start, bytes,
3724
				  start, bytes, 0, &trim_entry);
3725 3726
		if (ret) {
			reset_trimming_bitmap(ctl, offset);
3727 3728
			block_group->discard_cursor =
				btrfs_block_group_end(block_group);
3729
			break;
3730
		}
3731 3732 3733
next:
		if (next_bitmap) {
			offset += BITS_PER_BITMAP * ctl->unit;
3734
			start = offset;
3735 3736
		} else {
			start += bytes;
3737
		}
3738
		block_group->discard_cursor = start;
3739 3740

		if (fatal_signal_pending(current)) {
3741 3742
			if (start != offset)
				reset_trimming_bitmap(ctl, offset);
3743 3744 3745 3746 3747 3748 3749
			ret = -ERESTARTSYS;
			break;
		}

		cond_resched();
	}

3750 3751 3752 3753
	if (offset >= end)
		block_group->discard_cursor = end;

out:
3754 3755
	return ret;
}
3756

3757
int btrfs_trim_block_group(struct btrfs_block_group *block_group,
3758 3759
			   u64 *trimmed, u64 start, u64 end, u64 minlen)
{
3760
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3761
	int ret;
3762
	u64 rem = 0;
3763 3764 3765 3766 3767

	*trimmed = 0;

	spin_lock(&block_group->lock);
	if (block_group->removed) {
3768
		spin_unlock(&block_group->lock);
3769
		return 0;
3770
	}
3771
	btrfs_freeze_block_group(block_group);
3772 3773
	spin_unlock(&block_group->lock);

3774
	ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, false);
3775 3776
	if (ret)
		goto out;
3777

D
Dennis Zhou 已提交
3778
	ret = trim_bitmaps(block_group, trimmed, start, end, minlen, 0, false);
3779 3780 3781 3782
	div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem);
	/* If we ended in the middle of a bitmap, reset the trimming flag */
	if (rem)
		reset_trimming_bitmap(ctl, offset_to_bitmap(ctl, end));
3783
out:
3784
	btrfs_unfreeze_block_group(block_group);
3785 3786 3787
	return ret;
}

3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800
int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group,
				   u64 *trimmed, u64 start, u64 end, u64 minlen,
				   bool async)
{
	int ret;

	*trimmed = 0;

	spin_lock(&block_group->lock);
	if (block_group->removed) {
		spin_unlock(&block_group->lock);
		return 0;
	}
3801
	btrfs_freeze_block_group(block_group);
3802 3803 3804
	spin_unlock(&block_group->lock);

	ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, async);
3805
	btrfs_unfreeze_block_group(block_group);
3806 3807 3808 3809 3810 3811

	return ret;
}

int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
				   u64 *trimmed, u64 start, u64 end, u64 minlen,
D
Dennis Zhou 已提交
3812
				   u64 maxlen, bool async)
3813 3814 3815 3816 3817 3818 3819 3820 3821 3822
{
	int ret;

	*trimmed = 0;

	spin_lock(&block_group->lock);
	if (block_group->removed) {
		spin_unlock(&block_group->lock);
		return 0;
	}
3823
	btrfs_freeze_block_group(block_group);
3824 3825
	spin_unlock(&block_group->lock);

D
Dennis Zhou 已提交
3826 3827 3828
	ret = trim_bitmaps(block_group, trimmed, start, end, minlen, maxlen,
			   async);

3829
	btrfs_unfreeze_block_group(block_group);
3830 3831 3832 3833

	return ret;
}

3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869
/*
 * Find the left-most item in the cache tree, and then return the
 * smallest inode number in the item.
 *
 * Note: the returned inode number may not be the smallest one in
 * the tree, if the left-most item is a bitmap.
 */
u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
{
	struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
	struct btrfs_free_space *entry = NULL;
	u64 ino = 0;

	spin_lock(&ctl->tree_lock);

	if (RB_EMPTY_ROOT(&ctl->free_space_offset))
		goto out;

	entry = rb_entry(rb_first(&ctl->free_space_offset),
			 struct btrfs_free_space, offset_index);

	if (!entry->bitmap) {
		ino = entry->offset;

		unlink_free_space(ctl, entry);
		entry->offset++;
		entry->bytes--;
		if (!entry->bytes)
			kmem_cache_free(btrfs_free_space_cachep, entry);
		else
			link_free_space(ctl, entry);
	} else {
		u64 offset = 0;
		u64 count = 1;
		int ret;

3870
		ret = search_bitmap(ctl, entry, &offset, &count, true);
3871
		/* Logic error; Should be empty if it can't find anything */
3872
		ASSERT(!ret);
3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883

		ino = offset;
		bitmap_clear_bits(ctl, entry, offset, 1);
		if (entry->bytes == 0)
			free_bitmap(ctl, entry);
	}
out:
	spin_unlock(&ctl->tree_lock);

	return ino;
}
3884 3885 3886 3887 3888 3889

struct inode *lookup_free_ino_inode(struct btrfs_root *root,
				    struct btrfs_path *path)
{
	struct inode *inode = NULL;

3890 3891 3892 3893
	spin_lock(&root->ino_cache_lock);
	if (root->ino_cache_inode)
		inode = igrab(root->ino_cache_inode);
	spin_unlock(&root->ino_cache_lock);
3894 3895 3896 3897 3898 3899 3900
	if (inode)
		return inode;

	inode = __lookup_free_space_inode(root, path, 0);
	if (IS_ERR(inode))
		return inode;

3901
	spin_lock(&root->ino_cache_lock);
3902
	if (!btrfs_fs_closing(root->fs_info))
3903 3904
		root->ino_cache_inode = igrab(inode);
	spin_unlock(&root->ino_cache_lock);
3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924

	return inode;
}

int create_free_ino_inode(struct btrfs_root *root,
			  struct btrfs_trans_handle *trans,
			  struct btrfs_path *path)
{
	return __create_free_space_inode(root, trans, path,
					 BTRFS_FREE_INO_OBJECTID, 0);
}

int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
{
	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
	struct btrfs_path *path;
	struct inode *inode;
	int ret = 0;
	u64 root_gen = btrfs_root_generation(&root->root_item);

3925
	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
C
Chris Mason 已提交
3926 3927
		return 0;

3928 3929 3930 3931
	/*
	 * If we're unmounting then just return, since this does a search on the
	 * normal root and not the commit root and we could deadlock.
	 */
3932
	if (btrfs_fs_closing(fs_info))
3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948
		return 0;

	path = btrfs_alloc_path();
	if (!path)
		return 0;

	inode = lookup_free_ino_inode(root, path);
	if (IS_ERR(inode))
		goto out;

	if (root_gen != BTRFS_I(inode)->generation)
		goto out_put;

	ret = __load_free_space_cache(root, inode, ctl, path, 0);

	if (ret < 0)
3949 3950 3951
		btrfs_err(fs_info,
			"failed to load free ino cache for root %llu",
			root->root_key.objectid);
3952 3953 3954 3955 3956 3957 3958 3959 3960
out_put:
	iput(inode);
out:
	btrfs_free_path(path);
	return ret;
}

int btrfs_write_out_ino_cache(struct btrfs_root *root,
			      struct btrfs_trans_handle *trans,
3961 3962
			      struct btrfs_path *path,
			      struct inode *inode)
3963
{
3964
	struct btrfs_fs_info *fs_info = root->fs_info;
3965 3966
	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
	int ret;
3967
	struct btrfs_io_ctl io_ctl;
3968
	bool release_metadata = true;
3969

3970
	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
C
Chris Mason 已提交
3971 3972
		return 0;

C
Chris Mason 已提交
3973
	memset(&io_ctl, 0, sizeof(io_ctl));
3974
	ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl, trans);
3975 3976 3977 3978 3979 3980 3981 3982
	if (!ret) {
		/*
		 * At this point writepages() didn't error out, so our metadata
		 * reservation is released when the writeback finishes, at
		 * inode.c:btrfs_finish_ordered_io(), regardless of it finishing
		 * with or without an error.
		 */
		release_metadata = false;
3983
		ret = btrfs_wait_cache_io_root(root, trans, &io_ctl, path);
3984
	}
C
Chris Mason 已提交
3985

3986
	if (ret) {
3987
		if (release_metadata)
3988
			btrfs_delalloc_release_metadata(BTRFS_I(inode),
3989
					inode->i_size, true);
3990
		btrfs_debug(fs_info,
3991 3992
			  "failed to write free ino cache for root %llu error %d",
			  root->root_key.objectid, ret);
3993
	}
3994 3995 3996

	return ret;
}
3997 3998

#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3999 4000 4001 4002 4003 4004
/*
 * Use this if you need to make a bitmap or extent entry specifically, it
 * doesn't do any of the merging that add_free_space does, this acts a lot like
 * how the free space cache loading stuff works, so you can get really weird
 * configurations.
 */
4005
int test_add_free_space_entry(struct btrfs_block_group *cache,
4006
			      u64 offset, u64 bytes, bool bitmap)
4007
{
4008 4009 4010
	struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
	struct btrfs_free_space *info = NULL, *bitmap_info;
	void *map = NULL;
4011
	enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_TRIMMED;
4012 4013
	u64 bytes_added;
	int ret;
4014

4015 4016 4017 4018 4019
again:
	if (!info) {
		info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
		if (!info)
			return -ENOMEM;
4020 4021
	}

4022 4023 4024 4025
	if (!bitmap) {
		spin_lock(&ctl->tree_lock);
		info->offset = offset;
		info->bytes = bytes;
4026
		info->max_extent_size = 0;
4027 4028 4029 4030 4031 4032 4033 4034
		ret = link_free_space(ctl, info);
		spin_unlock(&ctl->tree_lock);
		if (ret)
			kmem_cache_free(btrfs_free_space_cachep, info);
		return ret;
	}

	if (!map) {
4035
		map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049
		if (!map) {
			kmem_cache_free(btrfs_free_space_cachep, info);
			return -ENOMEM;
		}
	}

	spin_lock(&ctl->tree_lock);
	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
					 1, 0);
	if (!bitmap_info) {
		info->bitmap = map;
		map = NULL;
		add_new_bitmap(ctl, info, offset);
		bitmap_info = info;
4050
		info = NULL;
4051
	}
4052

4053 4054
	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes,
					  trim_state);
4055

4056 4057 4058
	bytes -= bytes_added;
	offset += bytes_added;
	spin_unlock(&ctl->tree_lock);
4059

4060 4061
	if (bytes)
		goto again;
4062

4063 4064
	if (info)
		kmem_cache_free(btrfs_free_space_cachep, info);
4065 4066
	if (map)
		kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
4067
	return 0;
4068 4069 4070 4071 4072 4073 4074
}

/*
 * Checks to see if the given range is in the free space cache.  This is really
 * just used to check the absence of space, so if there is free space in the
 * range at all we will return 1.
 */
4075
int test_check_exists(struct btrfs_block_group *cache,
4076
		      u64 offset, u64 bytes)
4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098
{
	struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
	struct btrfs_free_space *info;
	int ret = 0;

	spin_lock(&ctl->tree_lock);
	info = tree_search_offset(ctl, offset, 0, 0);
	if (!info) {
		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
					  1, 0);
		if (!info)
			goto out;
	}

have_info:
	if (info->bitmap) {
		u64 bit_off, bit_bytes;
		struct rb_node *n;
		struct btrfs_free_space *tmp;

		bit_off = offset;
		bit_bytes = ctl->unit;
4099
		ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false);
4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117
		if (!ret) {
			if (bit_off == offset) {
				ret = 1;
				goto out;
			} else if (bit_off > offset &&
				   offset + bytes > bit_off) {
				ret = 1;
				goto out;
			}
		}

		n = rb_prev(&info->offset_index);
		while (n) {
			tmp = rb_entry(n, struct btrfs_free_space,
				       offset_index);
			if (tmp->offset + tmp->bytes < offset)
				break;
			if (offset + bytes < tmp->offset) {
4118
				n = rb_prev(&tmp->offset_index);
4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131
				continue;
			}
			info = tmp;
			goto have_info;
		}

		n = rb_next(&info->offset_index);
		while (n) {
			tmp = rb_entry(n, struct btrfs_free_space,
				       offset_index);
			if (offset + bytes < tmp->offset)
				break;
			if (tmp->offset + tmp->bytes < offset) {
4132
				n = rb_next(&tmp->offset_index);
4133 4134 4135 4136 4137 4138
				continue;
			}
			info = tmp;
			goto have_info;
		}

4139
		ret = 0;
4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153
		goto out;
	}

	if (info->offset == offset) {
		ret = 1;
		goto out;
	}

	if (offset > info->offset && offset < info->offset + info->bytes)
		ret = 1;
out:
	spin_unlock(&ctl->tree_lock);
	return ret;
}
4154
#endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */