free-space-cache.c 91.8 KB
Newer Older
J
Josef Bacik 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2008 Red Hat.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

19
#include <linux/pagemap.h>
J
Josef Bacik 已提交
20
#include <linux/sched.h>
21
#include <linux/slab.h>
22
#include <linux/math64.h>
23
#include <linux/ratelimit.h>
J
Josef Bacik 已提交
24
#include "ctree.h"
25 26
#include "free-space-cache.h"
#include "transaction.h"
27
#include "disk-io.h"
28
#include "extent_io.h"
29
#include "inode-map.h"
30
#include "volumes.h"
31

32
#define BITS_PER_BITMAP		(PAGE_SIZE * 8UL)
33
#define MAX_CACHE_BYTES_PER_GIG	SZ_32K
J
Josef Bacik 已提交
34

35 36 37 38 39 40
struct btrfs_trim_range {
	u64 start;
	u64 bytes;
	struct list_head list;
};

41
static int link_free_space(struct btrfs_free_space_ctl *ctl,
J
Josef Bacik 已提交
42
			   struct btrfs_free_space *info);
43 44
static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info);
J
Josef Bacik 已提交
45

46 47 48
static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
					       struct btrfs_path *path,
					       u64 offset)
49 50 51 52 53 54 55 56 57 58
{
	struct btrfs_key key;
	struct btrfs_key location;
	struct btrfs_disk_key disk_key;
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
	struct inode *inode = NULL;
	int ret;

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
59
	key.offset = offset;
60 61 62 63 64 65
	key.type = 0;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		return ERR_PTR(ret);
	if (ret > 0) {
66
		btrfs_release_path(path);
67 68 69 70 71 72 73 74
		return ERR_PTR(-ENOENT);
	}

	leaf = path->nodes[0];
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	btrfs_free_space_key(leaf, header, &disk_key);
	btrfs_disk_key_to_cpu(&location, &disk_key);
75
	btrfs_release_path(path);
76 77 78 79 80 81 82 83 84 85 86

	inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
	if (!inode)
		return ERR_PTR(-ENOENT);
	if (IS_ERR(inode))
		return inode;
	if (is_bad_inode(inode)) {
		iput(inode);
		return ERR_PTR(-ENOENT);
	}

A
Al Viro 已提交
87
	mapping_set_gfp_mask(inode->i_mapping,
88 89
			mapping_gfp_constraint(inode->i_mapping,
			~(__GFP_FS | __GFP_HIGHMEM)));
90

91 92 93 94 95 96 97 98
	return inode;
}

struct inode *lookup_free_space_inode(struct btrfs_root *root,
				      struct btrfs_block_group_cache
				      *block_group, struct btrfs_path *path)
{
	struct inode *inode = NULL;
99
	u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
100 101 102 103 104 105 106 107 108 109 110 111 112

	spin_lock(&block_group->lock);
	if (block_group->inode)
		inode = igrab(block_group->inode);
	spin_unlock(&block_group->lock);
	if (inode)
		return inode;

	inode = __lookup_free_space_inode(root, path,
					  block_group->key.objectid);
	if (IS_ERR(inode))
		return inode;

113
	spin_lock(&block_group->lock);
114
	if (!((BTRFS_I(inode)->flags & flags) == flags)) {
115 116
		btrfs_info(root->fs_info,
			"Old style space inode found, converting.");
117 118
		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
			BTRFS_INODE_NODATACOW;
119 120 121
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
	}

122
	if (!block_group->iref) {
123 124 125 126 127 128 129 130
		block_group->inode = igrab(inode);
		block_group->iref = 1;
	}
	spin_unlock(&block_group->lock);

	return inode;
}

131 132 133 134
static int __create_free_space_inode(struct btrfs_root *root,
				     struct btrfs_trans_handle *trans,
				     struct btrfs_path *path,
				     u64 ino, u64 offset)
135 136 137 138 139 140
{
	struct btrfs_key key;
	struct btrfs_disk_key disk_key;
	struct btrfs_free_space_header *header;
	struct btrfs_inode_item *inode_item;
	struct extent_buffer *leaf;
141
	u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
142 143
	int ret;

144
	ret = btrfs_insert_empty_inode(trans, root, path, ino);
145 146 147
	if (ret)
		return ret;

148 149 150 151
	/* We inline crc's for the free disk space cache */
	if (ino != BTRFS_FREE_INO_OBJECTID)
		flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;

152 153 154 155 156 157 158 159 160 161 162 163
	leaf = path->nodes[0];
	inode_item = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_inode_item);
	btrfs_item_key(leaf, &disk_key, path->slots[0]);
	memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
			     sizeof(*inode_item));
	btrfs_set_inode_generation(leaf, inode_item, trans->transid);
	btrfs_set_inode_size(leaf, inode_item, 0);
	btrfs_set_inode_nbytes(leaf, inode_item, 0);
	btrfs_set_inode_uid(leaf, inode_item, 0);
	btrfs_set_inode_gid(leaf, inode_item, 0);
	btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
164
	btrfs_set_inode_flags(leaf, inode_item, flags);
165 166
	btrfs_set_inode_nlink(leaf, inode_item, 1);
	btrfs_set_inode_transid(leaf, inode_item, trans->transid);
167
	btrfs_set_inode_block_group(leaf, inode_item, offset);
168
	btrfs_mark_buffer_dirty(leaf);
169
	btrfs_release_path(path);
170 171

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
172
	key.offset = offset;
173 174 175 176
	key.type = 0;
	ret = btrfs_insert_empty_item(trans, root, path, &key,
				      sizeof(struct btrfs_free_space_header));
	if (ret < 0) {
177
		btrfs_release_path(path);
178 179
		return ret;
	}
180

181 182 183 184 185 186
	leaf = path->nodes[0];
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
	btrfs_set_free_space_key(leaf, header, &disk_key);
	btrfs_mark_buffer_dirty(leaf);
187
	btrfs_release_path(path);
188 189 190 191

	return 0;
}

192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
int create_free_space_inode(struct btrfs_root *root,
			    struct btrfs_trans_handle *trans,
			    struct btrfs_block_group_cache *block_group,
			    struct btrfs_path *path)
{
	int ret;
	u64 ino;

	ret = btrfs_find_free_objectid(root, &ino);
	if (ret < 0)
		return ret;

	return __create_free_space_inode(root, trans, path, ino,
					 block_group->key.objectid);
}

208 209
int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
				       struct btrfs_block_rsv *rsv)
210
{
211
	u64 needed_bytes;
212
	int ret;
213 214 215 216 217

	/* 1 for slack space, 1 for updating the inode */
	needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
		btrfs_calc_trans_metadata_size(root, 1);

218 219 220 221 222 223
	spin_lock(&rsv->lock);
	if (rsv->reserved < needed_bytes)
		ret = -ENOSPC;
	else
		ret = 0;
	spin_unlock(&rsv->lock);
224
	return ret;
225 226 227 228
}

int btrfs_truncate_free_space_cache(struct btrfs_root *root,
				    struct btrfs_trans_handle *trans,
229
				    struct btrfs_block_group_cache *block_group,
230 231 232
				    struct inode *inode)
{
	int ret = 0;
233
	struct btrfs_path *path = btrfs_alloc_path();
234
	bool locked = false;
235 236 237 238 239 240 241

	if (!path) {
		ret = -ENOMEM;
		goto fail;
	}

	if (block_group) {
242
		locked = true;
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
		mutex_lock(&trans->transaction->cache_write_mutex);
		if (!list_empty(&block_group->io_list)) {
			list_del_init(&block_group->io_list);

			btrfs_wait_cache_io(root, trans, block_group,
					    &block_group->io_ctl, path,
					    block_group->key.objectid);
			btrfs_put_block_group(block_group);
		}

		/*
		 * now that we've truncated the cache away, its no longer
		 * setup or written
		 */
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
		spin_unlock(&block_group->lock);
	}
	btrfs_free_path(path);
262 263

	btrfs_i_size_write(inode, 0);
264
	truncate_pagecache(inode, 0);
265 266 267 268

	/*
	 * We don't need an orphan item because truncating the free space cache
	 * will never be split across transactions.
269 270
	 * We don't need to check for -EAGAIN because we're a free space
	 * cache inode
271 272 273
	 */
	ret = btrfs_truncate_inode_items(trans, root, inode,
					 0, BTRFS_EXTENT_DATA_KEY);
274 275
	if (ret)
		goto fail;
276

277
	ret = btrfs_update_inode(trans, root, inode);
278 279

fail:
280 281
	if (locked)
		mutex_unlock(&trans->transaction->cache_write_mutex);
282
	if (ret)
283
		btrfs_abort_transaction(trans, ret);
284

285
	return ret;
286 287
}

288 289 290 291 292 293 294 295 296 297
static int readahead_cache(struct inode *inode)
{
	struct file_ra_state *ra;
	unsigned long last_index;

	ra = kzalloc(sizeof(*ra), GFP_NOFS);
	if (!ra)
		return -ENOMEM;

	file_ra_state_init(ra, inode->i_mapping);
298
	last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
299 300 301 302 303 304 305 306

	page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);

	kfree(ra);

	return 0;
}

307
static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
308
		       struct btrfs_root *root, int write)
309
{
310 311 312
	int num_pages;
	int check_crcs = 0;

313
	num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
314 315 316 317 318 319

	if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
		check_crcs = 1;

	/* Make sure we can fit our crcs into the first page */
	if (write && check_crcs &&
320
	    (num_pages * sizeof(u32)) >= PAGE_SIZE)
321 322
		return -ENOSPC;

323
	memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
324

325
	io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
326 327
	if (!io_ctl->pages)
		return -ENOMEM;
328 329

	io_ctl->num_pages = num_pages;
330
	io_ctl->root = root;
331
	io_ctl->check_crcs = check_crcs;
332
	io_ctl->inode = inode;
333

334 335 336
	return 0;
}

337
static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
338 339
{
	kfree(io_ctl->pages);
340
	io_ctl->pages = NULL;
341 342
}

343
static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl)
344 345 346 347 348 349 350
{
	if (io_ctl->cur) {
		io_ctl->cur = NULL;
		io_ctl->orig = NULL;
	}
}

351
static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
352
{
353
	ASSERT(io_ctl->index < io_ctl->num_pages);
354
	io_ctl->page = io_ctl->pages[io_ctl->index++];
355
	io_ctl->cur = page_address(io_ctl->page);
356
	io_ctl->orig = io_ctl->cur;
357
	io_ctl->size = PAGE_SIZE;
358
	if (clear)
359
		memset(io_ctl->cur, 0, PAGE_SIZE);
360 361
}

362
static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
363 364 365 366 367 368
{
	int i;

	io_ctl_unmap_page(io_ctl);

	for (i = 0; i < io_ctl->num_pages; i++) {
369 370 371
		if (io_ctl->pages[i]) {
			ClearPageChecked(io_ctl->pages[i]);
			unlock_page(io_ctl->pages[i]);
372
			put_page(io_ctl->pages[i]);
373
		}
374 375 376
	}
}

377
static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode,
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
				int uptodate)
{
	struct page *page;
	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
	int i;

	for (i = 0; i < io_ctl->num_pages; i++) {
		page = find_or_create_page(inode->i_mapping, i, mask);
		if (!page) {
			io_ctl_drop_pages(io_ctl);
			return -ENOMEM;
		}
		io_ctl->pages[i] = page;
		if (uptodate && !PageUptodate(page)) {
			btrfs_readpage(NULL, page);
			lock_page(page);
			if (!PageUptodate(page)) {
395 396
				btrfs_err(BTRFS_I(inode)->root->fs_info,
					   "error reading free space cache");
397 398 399 400 401 402
				io_ctl_drop_pages(io_ctl);
				return -EIO;
			}
		}
	}

403 404 405 406 407
	for (i = 0; i < io_ctl->num_pages; i++) {
		clear_page_dirty_for_io(io_ctl->pages[i]);
		set_page_extent_mapped(io_ctl->pages[i]);
	}

408 409 410
	return 0;
}

411
static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
412
{
A
Al Viro 已提交
413
	__le64 *val;
414 415 416 417

	io_ctl_map_page(io_ctl, 1);

	/*
418 419
	 * Skip the csum areas.  If we don't check crcs then we just have a
	 * 64bit chunk at the front of the first page.
420
	 */
421 422 423 424 425 426 427
	if (io_ctl->check_crcs) {
		io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
		io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
	} else {
		io_ctl->cur += sizeof(u64);
		io_ctl->size -= sizeof(u64) * 2;
	}
428 429 430 431 432 433

	val = io_ctl->cur;
	*val = cpu_to_le64(generation);
	io_ctl->cur += sizeof(u64);
}

434
static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
435
{
A
Al Viro 已提交
436
	__le64 *gen;
437

438 439 440 441 442 443 444 445 446 447 448 449
	/*
	 * Skip the crc area.  If we don't check crcs then we just have a 64bit
	 * chunk at the front of the first page.
	 */
	if (io_ctl->check_crcs) {
		io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
		io_ctl->size -= sizeof(u64) +
			(sizeof(u32) * io_ctl->num_pages);
	} else {
		io_ctl->cur += sizeof(u64);
		io_ctl->size -= sizeof(u64) * 2;
	}
450 451 452

	gen = io_ctl->cur;
	if (le64_to_cpu(*gen) != generation) {
453 454 455
		btrfs_err_rl(io_ctl->root->fs_info,
			"space cache generation (%llu) does not match inode (%llu)",
				*gen, generation);
456 457 458 459
		io_ctl_unmap_page(io_ctl);
		return -EIO;
	}
	io_ctl->cur += sizeof(u64);
460 461 462
	return 0;
}

463
static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
464 465 466 467 468 469 470 471 472 473 474
{
	u32 *tmp;
	u32 crc = ~(u32)0;
	unsigned offset = 0;

	if (!io_ctl->check_crcs) {
		io_ctl_unmap_page(io_ctl);
		return;
	}

	if (index == 0)
475
		offset = sizeof(u32) * io_ctl->num_pages;
476

477
	crc = btrfs_csum_data(io_ctl->orig + offset, crc,
478
			      PAGE_SIZE - offset);
479 480
	btrfs_csum_final(crc, (char *)&crc);
	io_ctl_unmap_page(io_ctl);
481
	tmp = page_address(io_ctl->pages[0]);
482 483 484 485
	tmp += index;
	*tmp = crc;
}

486
static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
487 488 489 490 491 492 493 494 495 496 497 498 499
{
	u32 *tmp, val;
	u32 crc = ~(u32)0;
	unsigned offset = 0;

	if (!io_ctl->check_crcs) {
		io_ctl_map_page(io_ctl, 0);
		return 0;
	}

	if (index == 0)
		offset = sizeof(u32) * io_ctl->num_pages;

500
	tmp = page_address(io_ctl->pages[0]);
501 502 503 504
	tmp += index;
	val = *tmp;

	io_ctl_map_page(io_ctl, 0);
505
	crc = btrfs_csum_data(io_ctl->orig + offset, crc,
506
			      PAGE_SIZE - offset);
507 508
	btrfs_csum_final(crc, (char *)&crc);
	if (val != crc) {
509 510
		btrfs_err_rl(io_ctl->root->fs_info,
			"csum mismatch on free space cache");
511 512 513 514
		io_ctl_unmap_page(io_ctl);
		return -EIO;
	}

515 516 517
	return 0;
}

518
static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes,
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
			    void *bitmap)
{
	struct btrfs_free_space_entry *entry;

	if (!io_ctl->cur)
		return -ENOSPC;

	entry = io_ctl->cur;
	entry->offset = cpu_to_le64(offset);
	entry->bytes = cpu_to_le64(bytes);
	entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
		BTRFS_FREE_SPACE_EXTENT;
	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
	io_ctl->size -= sizeof(struct btrfs_free_space_entry);

	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
		return 0;

537
	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
538 539 540 541 542 543 544 545 546 547

	/* No more pages to map */
	if (io_ctl->index >= io_ctl->num_pages)
		return 0;

	/* map the next page */
	io_ctl_map_page(io_ctl, 1);
	return 0;
}

548
static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
549 550 551 552 553 554 555 556 557
{
	if (!io_ctl->cur)
		return -ENOSPC;

	/*
	 * If we aren't at the start of the current page, unmap this one and
	 * map the next one if there is any left.
	 */
	if (io_ctl->cur != io_ctl->orig) {
558
		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
559 560 561 562 563
		if (io_ctl->index >= io_ctl->num_pages)
			return -ENOSPC;
		io_ctl_map_page(io_ctl, 0);
	}

564
	memcpy(io_ctl->cur, bitmap, PAGE_SIZE);
565
	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
566 567 568 569 570
	if (io_ctl->index < io_ctl->num_pages)
		io_ctl_map_page(io_ctl, 0);
	return 0;
}

571
static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl)
572
{
573 574 575 576 577 578 579 580
	/*
	 * If we're not on the boundary we know we've modified the page and we
	 * need to crc the page.
	 */
	if (io_ctl->cur != io_ctl->orig)
		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
	else
		io_ctl_unmap_page(io_ctl);
581 582 583

	while (io_ctl->index < io_ctl->num_pages) {
		io_ctl_map_page(io_ctl, 1);
584
		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
585 586 587
	}
}

588
static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl,
589
			    struct btrfs_free_space *entry, u8 *type)
590 591
{
	struct btrfs_free_space_entry *e;
592 593 594 595 596 597 598
	int ret;

	if (!io_ctl->cur) {
		ret = io_ctl_check_crc(io_ctl, io_ctl->index);
		if (ret)
			return ret;
	}
599 600 601 602

	e = io_ctl->cur;
	entry->offset = le64_to_cpu(e->offset);
	entry->bytes = le64_to_cpu(e->bytes);
603
	*type = e->type;
604 605 606 607
	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
	io_ctl->size -= sizeof(struct btrfs_free_space_entry);

	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
608
		return 0;
609 610 611

	io_ctl_unmap_page(io_ctl);

612
	return 0;
613 614
}

615
static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
616
			      struct btrfs_free_space *entry)
617
{
618 619 620 621 622 623
	int ret;

	ret = io_ctl_check_crc(io_ctl, io_ctl->index);
	if (ret)
		return ret;

624
	memcpy(entry->bitmap, io_ctl->cur, PAGE_SIZE);
625
	io_ctl_unmap_page(io_ctl);
626 627

	return 0;
628 629
}

630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
/*
 * Since we attach pinned extents after the fact we can have contiguous sections
 * of free space that are split up in entries.  This poses a problem with the
 * tree logging stuff since it could have allocated across what appears to be 2
 * entries since we would have merged the entries when adding the pinned extents
 * back to the free space cache.  So run through the space cache that we just
 * loaded and merge contiguous entries.  This will make the log replay stuff not
 * blow up and it will make for nicer allocator behavior.
 */
static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
{
	struct btrfs_free_space *e, *prev = NULL;
	struct rb_node *n;

again:
	spin_lock(&ctl->tree_lock);
	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
		e = rb_entry(n, struct btrfs_free_space, offset_index);
		if (!prev)
			goto next;
		if (e->bitmap || prev->bitmap)
			goto next;
		if (prev->offset + prev->bytes == e->offset) {
			unlink_free_space(ctl, prev);
			unlink_free_space(ctl, e);
			prev->bytes += e->bytes;
			kmem_cache_free(btrfs_free_space_cachep, e);
			link_free_space(ctl, prev);
			prev = NULL;
			spin_unlock(&ctl->tree_lock);
			goto again;
		}
next:
		prev = e;
	}
	spin_unlock(&ctl->tree_lock);
}

668 669 670
static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
				   struct btrfs_free_space_ctl *ctl,
				   struct btrfs_path *path, u64 offset)
671 672 673
{
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
674
	struct btrfs_io_ctl io_ctl;
675
	struct btrfs_key key;
676
	struct btrfs_free_space *e, *n;
677
	LIST_HEAD(bitmaps);
678 679 680
	u64 num_entries;
	u64 num_bitmaps;
	u64 generation;
681
	u8 type;
682
	int ret = 0;
683 684

	/* Nothing in the space cache, goodbye */
685
	if (!i_size_read(inode))
686
		return 0;
687 688

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
689
	key.offset = offset;
690 691 692
	key.type = 0;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
693
	if (ret < 0)
694
		return 0;
695
	else if (ret > 0) {
696
		btrfs_release_path(path);
697
		return 0;
698 699
	}

700 701
	ret = -1;

702 703 704 705 706 707
	leaf = path->nodes[0];
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	num_entries = btrfs_free_space_entries(leaf, header);
	num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
	generation = btrfs_free_space_generation(leaf, header);
708
	btrfs_release_path(path);
709

710 711 712 713 714 715 716
	if (!BTRFS_I(inode)->generation) {
		btrfs_info(root->fs_info,
			   "The free space cache file (%llu) is invalid. skip it\n",
			   offset);
		return 0;
	}

717
	if (BTRFS_I(inode)->generation != generation) {
718
		btrfs_err(root->fs_info,
J
Jeff Mahoney 已提交
719
			"free space inode generation (%llu) did not match free space cache generation (%llu)",
720
			BTRFS_I(inode)->generation, generation);
721
		return 0;
722 723 724
	}

	if (!num_entries)
725
		return 0;
726

727
	ret = io_ctl_init(&io_ctl, inode, root, 0);
728 729 730
	if (ret)
		return ret;

731
	ret = readahead_cache(inode);
732
	if (ret)
733 734
		goto out;

735 736 737
	ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
	if (ret)
		goto out;
738

739 740 741 742
	ret = io_ctl_check_crc(&io_ctl, 0);
	if (ret)
		goto free_cache;

743 744 745
	ret = io_ctl_check_generation(&io_ctl, generation);
	if (ret)
		goto free_cache;
746

747 748 749 750
	while (num_entries) {
		e = kmem_cache_zalloc(btrfs_free_space_cachep,
				      GFP_NOFS);
		if (!e)
751 752
			goto free_cache;

753 754 755 756 757 758
		ret = io_ctl_read_entry(&io_ctl, e, &type);
		if (ret) {
			kmem_cache_free(btrfs_free_space_cachep, e);
			goto free_cache;
		}

759 760 761
		if (!e->bytes) {
			kmem_cache_free(btrfs_free_space_cachep, e);
			goto free_cache;
762
		}
763 764 765 766 767 768

		if (type == BTRFS_FREE_SPACE_EXTENT) {
			spin_lock(&ctl->tree_lock);
			ret = link_free_space(ctl, e);
			spin_unlock(&ctl->tree_lock);
			if (ret) {
769 770
				btrfs_err(root->fs_info,
					"Duplicate entries in free space cache, dumping");
771
				kmem_cache_free(btrfs_free_space_cachep, e);
772 773
				goto free_cache;
			}
774
		} else {
775
			ASSERT(num_bitmaps);
776
			num_bitmaps--;
777
			e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
778 779 780
			if (!e->bitmap) {
				kmem_cache_free(
					btrfs_free_space_cachep, e);
781 782
				goto free_cache;
			}
783 784 785 786 787 788
			spin_lock(&ctl->tree_lock);
			ret = link_free_space(ctl, e);
			ctl->total_bitmaps++;
			ctl->op->recalc_thresholds(ctl);
			spin_unlock(&ctl->tree_lock);
			if (ret) {
789 790
				btrfs_err(root->fs_info,
					"Duplicate entries in free space cache, dumping");
791
				kmem_cache_free(btrfs_free_space_cachep, e);
792 793
				goto free_cache;
			}
794
			list_add_tail(&e->list, &bitmaps);
795 796
		}

797 798
		num_entries--;
	}
799

800 801
	io_ctl_unmap_page(&io_ctl);

802 803 804 805 806
	/*
	 * We add the bitmaps at the end of the entries in order that
	 * the bitmap entries are added to the cache.
	 */
	list_for_each_entry_safe(e, n, &bitmaps, list) {
807
		list_del_init(&e->list);
808 809 810
		ret = io_ctl_read_bitmap(&io_ctl, e);
		if (ret)
			goto free_cache;
811 812
	}

813
	io_ctl_drop_pages(&io_ctl);
814
	merge_space_tree(ctl);
815 816
	ret = 1;
out:
817
	io_ctl_free(&io_ctl);
818 819
	return ret;
free_cache:
820
	io_ctl_drop_pages(&io_ctl);
821
	__btrfs_remove_free_space_cache(ctl);
822 823 824
	goto out;
}

825 826
int load_free_space_cache(struct btrfs_fs_info *fs_info,
			  struct btrfs_block_group_cache *block_group)
J
Josef Bacik 已提交
827
{
828
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
829 830 831
	struct btrfs_root *root = fs_info->tree_root;
	struct inode *inode;
	struct btrfs_path *path;
832
	int ret = 0;
833 834 835 836 837 838 839
	bool matched;
	u64 used = btrfs_block_group_used(&block_group->item);

	/*
	 * If this block group has been marked to be cleared for one reason or
	 * another then we can't trust the on disk cache, so just return.
	 */
840
	spin_lock(&block_group->lock);
841 842 843 844
	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
		spin_unlock(&block_group->lock);
		return 0;
	}
845
	spin_unlock(&block_group->lock);
846 847 848 849

	path = btrfs_alloc_path();
	if (!path)
		return 0;
850 851
	path->search_commit_root = 1;
	path->skip_locking = 1;
852 853 854 855 856 857 858

	inode = lookup_free_space_inode(root, block_group, path);
	if (IS_ERR(inode)) {
		btrfs_free_path(path);
		return 0;
	}

859 860 861 862
	/* We may have converted the inode and made the cache invalid. */
	spin_lock(&block_group->lock);
	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
		spin_unlock(&block_group->lock);
863
		btrfs_free_path(path);
864 865 866 867
		goto out;
	}
	spin_unlock(&block_group->lock);

868 869 870 871 872 873 874 875 876 877 878 879 880
	ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
				      path, block_group->key.objectid);
	btrfs_free_path(path);
	if (ret <= 0)
		goto out;

	spin_lock(&ctl->tree_lock);
	matched = (ctl->free_space == (block_group->key.offset - used -
				       block_group->bytes_super));
	spin_unlock(&ctl->tree_lock);

	if (!matched) {
		__btrfs_remove_free_space_cache(ctl);
J
Jeff Mahoney 已提交
881 882 883
		btrfs_warn(fs_info,
			   "block group %llu has wrong amount of free space",
			   block_group->key.objectid);
884 885 886 887 888 889 890 891
		ret = -1;
	}
out:
	if (ret < 0) {
		/* This cache is bogus, make sure it gets cleared */
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_CLEAR;
		spin_unlock(&block_group->lock);
892
		ret = 0;
893

J
Jeff Mahoney 已提交
894 895 896
		btrfs_warn(fs_info,
			   "failed to load free space cache for block group %llu, rebuilding it now",
			   block_group->key.objectid);
897 898 899 900
	}

	iput(inode);
	return ret;
901 902
}

903
static noinline_for_stack
904
int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
905 906 907 908
			      struct btrfs_free_space_ctl *ctl,
			      struct btrfs_block_group_cache *block_group,
			      int *entries, int *bitmaps,
			      struct list_head *bitmap_list)
J
Josef Bacik 已提交
909
{
910
	int ret;
911
	struct btrfs_free_cluster *cluster = NULL;
912
	struct btrfs_free_cluster *cluster_locked = NULL;
913
	struct rb_node *node = rb_first(&ctl->free_space_offset);
914
	struct btrfs_trim_range *trim_entry;
915

916
	/* Get the cluster for this block_group if it exists */
917
	if (block_group && !list_empty(&block_group->cluster_list)) {
918 919 920
		cluster = list_entry(block_group->cluster_list.next,
				     struct btrfs_free_cluster,
				     block_group_list);
921
	}
922

923
	if (!node && cluster) {
924 925
		cluster_locked = cluster;
		spin_lock(&cluster_locked->lock);
926 927 928 929
		node = rb_first(&cluster->root);
		cluster = NULL;
	}

930 931 932
	/* Write out the extent entries */
	while (node) {
		struct btrfs_free_space *e;
J
Josef Bacik 已提交
933

934
		e = rb_entry(node, struct btrfs_free_space, offset_index);
935
		*entries += 1;
J
Josef Bacik 已提交
936

937
		ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes,
938 939
				       e->bitmap);
		if (ret)
940
			goto fail;
941

942
		if (e->bitmap) {
943 944
			list_add_tail(&e->list, bitmap_list);
			*bitmaps += 1;
945
		}
946 947 948
		node = rb_next(node);
		if (!node && cluster) {
			node = rb_first(&cluster->root);
949 950
			cluster_locked = cluster;
			spin_lock(&cluster_locked->lock);
951
			cluster = NULL;
952
		}
953
	}
954 955 956 957
	if (cluster_locked) {
		spin_unlock(&cluster_locked->lock);
		cluster_locked = NULL;
	}
958 959 960 961 962 963 964 965 966 967 968 969 970 971 972

	/*
	 * Make sure we don't miss any range that was removed from our rbtree
	 * because trimming is running. Otherwise after a umount+mount (or crash
	 * after committing the transaction) we would leak free space and get
	 * an inconsistent free space cache report from fsck.
	 */
	list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
		ret = io_ctl_add_entry(io_ctl, trim_entry->start,
				       trim_entry->bytes, NULL);
		if (ret)
			goto fail;
		*entries += 1;
	}

973 974
	return 0;
fail:
975 976
	if (cluster_locked)
		spin_unlock(&cluster_locked->lock);
977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
	return -ENOSPC;
}

static noinline_for_stack int
update_cache_item(struct btrfs_trans_handle *trans,
		  struct btrfs_root *root,
		  struct inode *inode,
		  struct btrfs_path *path, u64 offset,
		  int entries, int bitmaps)
{
	struct btrfs_key key;
	struct btrfs_free_space_header *header;
	struct extent_buffer *leaf;
	int ret;

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
	key.offset = offset;
	key.type = 0;

	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
	if (ret < 0) {
		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
				 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
				 GFP_NOFS);
		goto fail;
	}
	leaf = path->nodes[0];
	if (ret > 0) {
		struct btrfs_key found_key;
		ASSERT(path->slots[0]);
		path->slots[0]--;
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
		if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
		    found_key.offset != offset) {
			clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
					 inode->i_size - 1,
					 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
					 NULL, GFP_NOFS);
			btrfs_release_path(path);
			goto fail;
		}
	}

	BTRFS_I(inode)->generation = trans->transid;
	header = btrfs_item_ptr(leaf, path->slots[0],
				struct btrfs_free_space_header);
	btrfs_set_free_space_entries(leaf, header, entries);
	btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
	btrfs_set_free_space_generation(leaf, header, trans->transid);
	btrfs_mark_buffer_dirty(leaf);
	btrfs_release_path(path);

	return 0;

fail:
	return -1;
}

static noinline_for_stack int
1036 1037
write_pinned_extent_entries(struct btrfs_root *root,
			    struct btrfs_block_group_cache *block_group,
1038
			    struct btrfs_io_ctl *io_ctl,
1039
			    int *entries)
1040 1041 1042 1043
{
	u64 start, extent_start, extent_end, len;
	struct extent_io_tree *unpin = NULL;
	int ret;
1044

1045 1046 1047
	if (!block_group)
		return 0;

1048 1049 1050
	/*
	 * We want to add any pinned extents to our free space cache
	 * so we don't leak the space
1051
	 *
1052 1053 1054 1055 1056
	 * We shouldn't have switched the pinned extents yet so this is the
	 * right one
	 */
	unpin = root->fs_info->pinned_extents;

1057
	start = block_group->key.objectid;
1058

1059
	while (start < block_group->key.objectid + block_group->key.offset) {
1060 1061
		ret = find_first_extent_bit(unpin, start,
					    &extent_start, &extent_end,
1062
					    EXTENT_DIRTY, NULL);
1063 1064
		if (ret)
			return 0;
J
Josef Bacik 已提交
1065

1066
		/* This pinned extent is out of our range */
1067
		if (extent_start >= block_group->key.objectid +
1068
		    block_group->key.offset)
1069
			return 0;
1070

1071 1072 1073 1074
		extent_start = max(extent_start, start);
		extent_end = min(block_group->key.objectid +
				 block_group->key.offset, extent_end + 1);
		len = extent_end - extent_start;
J
Josef Bacik 已提交
1075

1076 1077
		*entries += 1;
		ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
1078
		if (ret)
1079
			return -ENOSPC;
J
Josef Bacik 已提交
1080

1081
		start = extent_end;
1082
	}
J
Josef Bacik 已提交
1083

1084 1085 1086 1087
	return 0;
}

static noinline_for_stack int
1088
write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
1089
{
1090
	struct btrfs_free_space *entry, *next;
1091 1092
	int ret;

J
Josef Bacik 已提交
1093
	/* Write out the bitmaps */
1094
	list_for_each_entry_safe(entry, next, bitmap_list, list) {
1095
		ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
1096
		if (ret)
1097
			return -ENOSPC;
J
Josef Bacik 已提交
1098
		list_del_init(&entry->list);
1099 1100
	}

1101 1102
	return 0;
}
J
Josef Bacik 已提交
1103

1104 1105 1106
static int flush_dirty_cache(struct inode *inode)
{
	int ret;
1107

1108
	ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
1109
	if (ret)
1110 1111 1112
		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
				 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
				 GFP_NOFS);
J
Josef Bacik 已提交
1113

1114
	return ret;
1115 1116 1117
}

static void noinline_for_stack
1118
cleanup_bitmap_list(struct list_head *bitmap_list)
1119
{
1120
	struct btrfs_free_space *entry, *next;
1121

1122
	list_for_each_entry_safe(entry, next, bitmap_list, list)
1123
		list_del_init(&entry->list);
1124 1125 1126 1127 1128 1129 1130 1131
}

static void noinline_for_stack
cleanup_write_cache_enospc(struct inode *inode,
			   struct btrfs_io_ctl *io_ctl,
			   struct extent_state **cached_state,
			   struct list_head *bitmap_list)
{
1132 1133 1134 1135 1136
	io_ctl_drop_pages(io_ctl);
	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
			     i_size_read(inode) - 1, cached_state,
			     GFP_NOFS);
}
1137

1138 1139 1140 1141 1142 1143 1144 1145 1146
int btrfs_wait_cache_io(struct btrfs_root *root,
			struct btrfs_trans_handle *trans,
			struct btrfs_block_group_cache *block_group,
			struct btrfs_io_ctl *io_ctl,
			struct btrfs_path *path, u64 offset)
{
	int ret;
	struct inode *inode = io_ctl->inode;

1147 1148 1149
	if (!inode)
		return 0;

C
Chris Mason 已提交
1150 1151
	if (block_group)
		root = root->fs_info->tree_root;
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176

	/* Flush the dirty pages in the cache file. */
	ret = flush_dirty_cache(inode);
	if (ret)
		goto out;

	/* Update the cache item to tell everyone this cache file is valid. */
	ret = update_cache_item(trans, root, inode, path, offset,
				io_ctl->entries, io_ctl->bitmaps);
out:
	io_ctl_free(io_ctl);
	if (ret) {
		invalidate_inode_pages2(inode->i_mapping);
		BTRFS_I(inode)->generation = 0;
		if (block_group) {
#ifdef DEBUG
			btrfs_err(root->fs_info,
				"failed to write free space cache for block group %llu",
				block_group->key.objectid);
#endif
		}
	}
	btrfs_update_inode(trans, root, inode);

	if (block_group) {
1177 1178 1179 1180
		/* the dirty list is protected by the dirty_bgs_lock */
		spin_lock(&trans->transaction->dirty_bgs_lock);

		/* the disk_cache_state is protected by the block group lock */
1181 1182 1183 1184
		spin_lock(&block_group->lock);

		/*
		 * only mark this as written if we didn't get put back on
1185 1186
		 * the dirty list while waiting for IO.   Otherwise our
		 * cache state won't be right, and we won't get written again
1187 1188 1189 1190 1191 1192 1193
		 */
		if (!ret && list_empty(&block_group->dirty_list))
			block_group->disk_cache_state = BTRFS_DC_WRITTEN;
		else if (ret)
			block_group->disk_cache_state = BTRFS_DC_ERROR;

		spin_unlock(&block_group->lock);
1194
		spin_unlock(&trans->transaction->dirty_bgs_lock);
1195 1196 1197 1198 1199 1200 1201 1202
		io_ctl->inode = NULL;
		iput(inode);
	}

	return ret;

}

1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
/**
 * __btrfs_write_out_cache - write out cached info to an inode
 * @root - the root the inode belongs to
 * @ctl - the free space cache we are going to write out
 * @block_group - the block_group for this cache if it belongs to a block_group
 * @trans - the trans handle
 * @path - the path to use
 * @offset - the offset for the key we'll insert
 *
 * This function writes out a free space cache struct to disk for quick recovery
G
Geliang Tang 已提交
1213
 * on mount.  This will return 0 if it was successful in writing the cache out,
1214
 * or an errno if it was not.
1215 1216 1217 1218
 */
static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
				   struct btrfs_free_space_ctl *ctl,
				   struct btrfs_block_group_cache *block_group,
1219
				   struct btrfs_io_ctl *io_ctl,
1220 1221 1222 1223
				   struct btrfs_trans_handle *trans,
				   struct btrfs_path *path, u64 offset)
{
	struct extent_state *cached_state = NULL;
1224
	LIST_HEAD(bitmap_list);
1225 1226 1227
	int entries = 0;
	int bitmaps = 0;
	int ret;
1228
	int must_iput = 0;
1229 1230

	if (!i_size_read(inode))
1231
		return -EIO;
1232

1233 1234
	WARN_ON(io_ctl->pages);
	ret = io_ctl_init(io_ctl, inode, root, 1);
1235
	if (ret)
1236
		return ret;
1237

1238 1239 1240 1241 1242 1243 1244 1245 1246
	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
		down_write(&block_group->data_rwsem);
		spin_lock(&block_group->lock);
		if (block_group->delalloc_bytes) {
			block_group->disk_cache_state = BTRFS_DC_WRITTEN;
			spin_unlock(&block_group->lock);
			up_write(&block_group->data_rwsem);
			BTRFS_I(inode)->generation = 0;
			ret = 0;
1247
			must_iput = 1;
1248 1249 1250 1251 1252
			goto out;
		}
		spin_unlock(&block_group->lock);
	}

1253
	/* Lock all pages first so we can lock the extent safely. */
1254 1255 1256
	ret = io_ctl_prepare_pages(io_ctl, inode, 0);
	if (ret)
		goto out;
1257 1258

	lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1259
			 &cached_state);
1260

1261
	io_ctl_set_generation(io_ctl, trans->transid);
1262

1263
	mutex_lock(&ctl->cache_writeout_mutex);
1264
	/* Write out the extent entries in the free space cache */
1265
	spin_lock(&ctl->tree_lock);
1266
	ret = write_cache_extent_entries(io_ctl, ctl,
1267 1268
					 block_group, &entries, &bitmaps,
					 &bitmap_list);
1269 1270
	if (ret)
		goto out_nospc_locked;
1271

1272 1273 1274 1275
	/*
	 * Some spaces that are freed in the current transaction are pinned,
	 * they will be added into free space cache after the transaction is
	 * committed, we shouldn't lose them.
1276 1277 1278
	 *
	 * If this changes while we are working we'll get added back to
	 * the dirty list and redo it.  No locking needed
1279
	 */
1280
	ret = write_pinned_extent_entries(root, block_group, io_ctl, &entries);
1281 1282
	if (ret)
		goto out_nospc_locked;
1283

1284 1285 1286 1287 1288
	/*
	 * At last, we write out all the bitmaps and keep cache_writeout_mutex
	 * locked while doing it because a concurrent trim can be manipulating
	 * or freeing the bitmap.
	 */
1289
	ret = write_bitmap_entries(io_ctl, &bitmap_list);
1290
	spin_unlock(&ctl->tree_lock);
1291
	mutex_unlock(&ctl->cache_writeout_mutex);
1292 1293 1294 1295
	if (ret)
		goto out_nospc;

	/* Zero out the rest of the pages just to make sure */
1296
	io_ctl_zero_remaining_pages(io_ctl);
1297

1298
	/* Everything is written out, now we dirty the pages in the file. */
1299
	ret = btrfs_dirty_pages(root, inode, io_ctl->pages, io_ctl->num_pages,
1300 1301
				0, i_size_read(inode), &cached_state);
	if (ret)
1302
		goto out_nospc;
1303

1304 1305
	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
		up_write(&block_group->data_rwsem);
1306 1307 1308 1309
	/*
	 * Release the pages and unlock the extent, we will flush
	 * them out later
	 */
1310
	io_ctl_drop_pages(io_ctl);
1311 1312 1313 1314

	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
			     i_size_read(inode) - 1, &cached_state, GFP_NOFS);

1315 1316 1317 1318 1319 1320 1321 1322 1323
	/*
	 * at this point the pages are under IO and we're happy,
	 * The caller is responsible for waiting on them and updating the
	 * the cache and the inode
	 */
	io_ctl->entries = entries;
	io_ctl->bitmaps = bitmaps;

	ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
1324
	if (ret)
1325 1326
		goto out;

1327 1328
	return 0;

1329
out:
1330 1331
	io_ctl->inode = NULL;
	io_ctl_free(io_ctl);
1332
	if (ret) {
1333
		invalidate_inode_pages2(inode->i_mapping);
J
Josef Bacik 已提交
1334 1335 1336
		BTRFS_I(inode)->generation = 0;
	}
	btrfs_update_inode(trans, root, inode);
1337 1338
	if (must_iput)
		iput(inode);
1339
	return ret;
1340

1341 1342 1343 1344 1345
out_nospc_locked:
	cleanup_bitmap_list(&bitmap_list);
	spin_unlock(&ctl->tree_lock);
	mutex_unlock(&ctl->cache_writeout_mutex);

1346
out_nospc:
1347
	cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list);
1348 1349 1350 1351

	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
		up_write(&block_group->data_rwsem);

1352
	goto out;
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
}

int btrfs_write_out_cache(struct btrfs_root *root,
			  struct btrfs_trans_handle *trans,
			  struct btrfs_block_group_cache *block_group,
			  struct btrfs_path *path)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct inode *inode;
	int ret = 0;

	root = root->fs_info->tree_root;

	spin_lock(&block_group->lock);
	if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
		spin_unlock(&block_group->lock);
1369 1370
		return 0;
	}
1371 1372 1373 1374 1375 1376
	spin_unlock(&block_group->lock);

	inode = lookup_free_space_inode(root, block_group, path);
	if (IS_ERR(inode))
		return 0;

1377 1378
	ret = __btrfs_write_out_cache(root, inode, ctl, block_group,
				      &block_group->io_ctl, trans,
1379
				      path, block_group->key.objectid);
1380 1381
	if (ret) {
#ifdef DEBUG
1382 1383 1384
		btrfs_err(root->fs_info,
			"failed to write free space cache for block group %llu",
			block_group->key.objectid);
1385
#endif
1386 1387 1388 1389 1390 1391
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_ERROR;
		spin_unlock(&block_group->lock);

		block_group->io_ctl.inode = NULL;
		iput(inode);
1392 1393
	}

1394 1395 1396 1397 1398
	/*
	 * if ret == 0 the caller is expected to call btrfs_wait_cache_io
	 * to wait for IO and put the inode
	 */

J
Josef Bacik 已提交
1399 1400 1401
	return ret;
}

1402
static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1403
					  u64 offset)
J
Josef Bacik 已提交
1404
{
1405
	ASSERT(offset >= bitmap_start);
1406
	offset -= bitmap_start;
1407
	return (unsigned long)(div_u64(offset, unit));
1408
}
J
Josef Bacik 已提交
1409

1410
static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1411
{
1412
	return (unsigned long)(div_u64(bytes, unit));
1413
}
J
Josef Bacik 已提交
1414

1415
static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1416 1417 1418
				   u64 offset)
{
	u64 bitmap_start;
1419
	u64 bytes_per_bitmap;
J
Josef Bacik 已提交
1420

1421 1422
	bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
	bitmap_start = offset - ctl->start;
1423
	bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1424
	bitmap_start *= bytes_per_bitmap;
1425
	bitmap_start += ctl->start;
J
Josef Bacik 已提交
1426

1427
	return bitmap_start;
J
Josef Bacik 已提交
1428 1429
}

1430 1431
static int tree_insert_offset(struct rb_root *root, u64 offset,
			      struct rb_node *node, int bitmap)
J
Josef Bacik 已提交
1432 1433 1434 1435 1436 1437 1438
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
	struct btrfs_free_space *info;

	while (*p) {
		parent = *p;
1439
		info = rb_entry(parent, struct btrfs_free_space, offset_index);
J
Josef Bacik 已提交
1440

1441
		if (offset < info->offset) {
J
Josef Bacik 已提交
1442
			p = &(*p)->rb_left;
1443
		} else if (offset > info->offset) {
J
Josef Bacik 已提交
1444
			p = &(*p)->rb_right;
1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
		} else {
			/*
			 * we could have a bitmap entry and an extent entry
			 * share the same offset.  If this is the case, we want
			 * the extent entry to always be found first if we do a
			 * linear search through the tree, since we want to have
			 * the quickest allocation time, and allocating from an
			 * extent is faster than allocating from a bitmap.  So
			 * if we're inserting a bitmap and we find an entry at
			 * this offset, we want to go right, or after this entry
			 * logically.  If we are inserting an extent and we've
			 * found a bitmap, we want to go left, or before
			 * logically.
			 */
			if (bitmap) {
1460 1461 1462 1463
				if (info->bitmap) {
					WARN_ON_ONCE(1);
					return -EEXIST;
				}
1464 1465
				p = &(*p)->rb_right;
			} else {
1466 1467 1468 1469
				if (!info->bitmap) {
					WARN_ON_ONCE(1);
					return -EEXIST;
				}
1470 1471 1472
				p = &(*p)->rb_left;
			}
		}
J
Josef Bacik 已提交
1473 1474 1475 1476 1477 1478 1479 1480 1481
	}

	rb_link_node(node, parent, p);
	rb_insert_color(node, root);

	return 0;
}

/*
J
Josef Bacik 已提交
1482 1483
 * searches the tree for the given offset.
 *
1484 1485 1486
 * fuzzy - If this is set, then we are trying to make an allocation, and we just
 * want a section that has at least bytes size and comes at or after the given
 * offset.
J
Josef Bacik 已提交
1487
 */
1488
static struct btrfs_free_space *
1489
tree_search_offset(struct btrfs_free_space_ctl *ctl,
1490
		   u64 offset, int bitmap_only, int fuzzy)
J
Josef Bacik 已提交
1491
{
1492
	struct rb_node *n = ctl->free_space_offset.rb_node;
1493 1494 1495 1496 1497 1498 1499 1500
	struct btrfs_free_space *entry, *prev = NULL;

	/* find entry that is closest to the 'offset' */
	while (1) {
		if (!n) {
			entry = NULL;
			break;
		}
J
Josef Bacik 已提交
1501 1502

		entry = rb_entry(n, struct btrfs_free_space, offset_index);
1503
		prev = entry;
J
Josef Bacik 已提交
1504

1505
		if (offset < entry->offset)
J
Josef Bacik 已提交
1506
			n = n->rb_left;
1507
		else if (offset > entry->offset)
J
Josef Bacik 已提交
1508
			n = n->rb_right;
1509
		else
J
Josef Bacik 已提交
1510 1511 1512
			break;
	}

1513 1514 1515 1516 1517
	if (bitmap_only) {
		if (!entry)
			return NULL;
		if (entry->bitmap)
			return entry;
J
Josef Bacik 已提交
1518

1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
		/*
		 * bitmap entry and extent entry may share same offset,
		 * in that case, bitmap entry comes after extent entry.
		 */
		n = rb_next(n);
		if (!n)
			return NULL;
		entry = rb_entry(n, struct btrfs_free_space, offset_index);
		if (entry->offset != offset)
			return NULL;
J
Josef Bacik 已提交
1529

1530 1531 1532 1533
		WARN_ON(!entry->bitmap);
		return entry;
	} else if (entry) {
		if (entry->bitmap) {
J
Josef Bacik 已提交
1534
			/*
1535 1536
			 * if previous extent entry covers the offset,
			 * we should return it instead of the bitmap entry
J
Josef Bacik 已提交
1537
			 */
1538 1539
			n = rb_prev(&entry->offset_index);
			if (n) {
1540 1541
				prev = rb_entry(n, struct btrfs_free_space,
						offset_index);
1542 1543 1544
				if (!prev->bitmap &&
				    prev->offset + prev->bytes > offset)
					entry = prev;
J
Josef Bacik 已提交
1545
			}
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
		}
		return entry;
	}

	if (!prev)
		return NULL;

	/* find last entry before the 'offset' */
	entry = prev;
	if (entry->offset > offset) {
		n = rb_prev(&entry->offset_index);
		if (n) {
			entry = rb_entry(n, struct btrfs_free_space,
					offset_index);
1560
			ASSERT(entry->offset <= offset);
J
Josef Bacik 已提交
1561
		} else {
1562 1563 1564 1565
			if (fuzzy)
				return entry;
			else
				return NULL;
J
Josef Bacik 已提交
1566 1567 1568
		}
	}

1569
	if (entry->bitmap) {
1570 1571
		n = rb_prev(&entry->offset_index);
		if (n) {
1572 1573
			prev = rb_entry(n, struct btrfs_free_space,
					offset_index);
1574 1575 1576
			if (!prev->bitmap &&
			    prev->offset + prev->bytes > offset)
				return prev;
1577
		}
1578
		if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588
			return entry;
	} else if (entry->offset + entry->bytes > offset)
		return entry;

	if (!fuzzy)
		return NULL;

	while (1) {
		if (entry->bitmap) {
			if (entry->offset + BITS_PER_BITMAP *
1589
			    ctl->unit > offset)
1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601
				break;
		} else {
			if (entry->offset + entry->bytes > offset)
				break;
		}

		n = rb_next(&entry->offset_index);
		if (!n)
			return NULL;
		entry = rb_entry(n, struct btrfs_free_space, offset_index);
	}
	return entry;
J
Josef Bacik 已提交
1602 1603
}

1604
static inline void
1605
__unlink_free_space(struct btrfs_free_space_ctl *ctl,
1606
		    struct btrfs_free_space *info)
J
Josef Bacik 已提交
1607
{
1608 1609
	rb_erase(&info->offset_index, &ctl->free_space_offset);
	ctl->free_extents--;
1610 1611
}

1612
static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1613 1614
			      struct btrfs_free_space *info)
{
1615 1616
	__unlink_free_space(ctl, info);
	ctl->free_space -= info->bytes;
J
Josef Bacik 已提交
1617 1618
}

1619
static int link_free_space(struct btrfs_free_space_ctl *ctl,
J
Josef Bacik 已提交
1620 1621 1622 1623
			   struct btrfs_free_space *info)
{
	int ret = 0;

1624
	ASSERT(info->bytes || info->bitmap);
1625
	ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1626
				 &info->offset_index, (info->bitmap != NULL));
J
Josef Bacik 已提交
1627 1628 1629
	if (ret)
		return ret;

1630 1631
	ctl->free_space += info->bytes;
	ctl->free_extents++;
1632 1633 1634
	return ret;
}

1635
static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1636
{
1637
	struct btrfs_block_group_cache *block_group = ctl->private;
1638 1639 1640
	u64 max_bytes;
	u64 bitmap_bytes;
	u64 extent_bytes;
1641
	u64 size = block_group->key.offset;
1642 1643
	u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
	u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1644

1645
	max_bitmaps = max_t(u64, max_bitmaps, 1);
1646

1647
	ASSERT(ctl->total_bitmaps <= max_bitmaps);
1648 1649 1650 1651 1652 1653

	/*
	 * The goal is to keep the total amount of memory used per 1gb of space
	 * at or below 32k, so we need to adjust how much memory we allow to be
	 * used by extent based free space tracking
	 */
1654
	if (size < SZ_1G)
1655 1656
		max_bytes = MAX_CACHE_BYTES_PER_GIG;
	else
1657
		max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G);
1658

1659 1660 1661 1662 1663
	/*
	 * we want to account for 1 more bitmap than what we have so we can make
	 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
	 * we add more bitmaps.
	 */
1664
	bitmap_bytes = (ctl->total_bitmaps + 1) * ctl->unit;
1665

1666
	if (bitmap_bytes >= max_bytes) {
1667
		ctl->extents_thresh = 0;
1668 1669
		return;
	}
1670

1671
	/*
1672
	 * we want the extent entry threshold to always be at most 1/2 the max
1673 1674 1675
	 * bytes we can have, or whatever is less than that.
	 */
	extent_bytes = max_bytes - bitmap_bytes;
1676
	extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
1677

1678
	ctl->extents_thresh =
1679
		div_u64(extent_bytes, sizeof(struct btrfs_free_space));
1680 1681
}

1682 1683 1684
static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
				       struct btrfs_free_space *info,
				       u64 offset, u64 bytes)
1685
{
L
Li Zefan 已提交
1686
	unsigned long start, count;
1687

1688 1689
	start = offset_to_bit(info->offset, ctl->unit, offset);
	count = bytes_to_bits(bytes, ctl->unit);
1690
	ASSERT(start + count <= BITS_PER_BITMAP);
1691

L
Li Zefan 已提交
1692
	bitmap_clear(info->bitmap, start, count);
1693 1694

	info->bytes -= bytes;
1695 1696 1697 1698 1699 1700 1701
}

static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info, u64 offset,
			      u64 bytes)
{
	__bitmap_clear_bits(ctl, info, offset, bytes);
1702
	ctl->free_space -= bytes;
1703 1704
}

1705
static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
J
Josef Bacik 已提交
1706 1707
			    struct btrfs_free_space *info, u64 offset,
			    u64 bytes)
1708
{
L
Li Zefan 已提交
1709
	unsigned long start, count;
1710

1711 1712
	start = offset_to_bit(info->offset, ctl->unit, offset);
	count = bytes_to_bits(bytes, ctl->unit);
1713
	ASSERT(start + count <= BITS_PER_BITMAP);
1714

L
Li Zefan 已提交
1715
	bitmap_set(info->bitmap, start, count);
1716 1717

	info->bytes += bytes;
1718
	ctl->free_space += bytes;
1719 1720
}

1721 1722 1723 1724
/*
 * If we can not find suitable extent, we will use bytes to record
 * the size of the max extent.
 */
1725
static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1726
			 struct btrfs_free_space *bitmap_info, u64 *offset,
1727
			 u64 *bytes, bool for_alloc)
1728 1729
{
	unsigned long found_bits = 0;
1730
	unsigned long max_bits = 0;
1731 1732
	unsigned long bits, i;
	unsigned long next_zero;
1733
	unsigned long extent_bits;
1734

1735 1736 1737 1738
	/*
	 * Skip searching the bitmap if we don't have a contiguous section that
	 * is large enough for this allocation.
	 */
1739 1740
	if (for_alloc &&
	    bitmap_info->max_extent_size &&
1741 1742 1743 1744 1745
	    bitmap_info->max_extent_size < *bytes) {
		*bytes = bitmap_info->max_extent_size;
		return -1;
	}

1746
	i = offset_to_bit(bitmap_info->offset, ctl->unit,
1747
			  max_t(u64, *offset, bitmap_info->offset));
1748
	bits = bytes_to_bits(*bytes, ctl->unit);
1749

1750
	for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1751 1752 1753 1754
		if (for_alloc && bits == 1) {
			found_bits = 1;
			break;
		}
1755 1756
		next_zero = find_next_zero_bit(bitmap_info->bitmap,
					       BITS_PER_BITMAP, i);
1757 1758 1759
		extent_bits = next_zero - i;
		if (extent_bits >= bits) {
			found_bits = extent_bits;
1760
			break;
1761 1762
		} else if (extent_bits > max_bits) {
			max_bits = extent_bits;
1763 1764 1765 1766 1767
		}
		i = next_zero;
	}

	if (found_bits) {
1768 1769
		*offset = (u64)(i * ctl->unit) + bitmap_info->offset;
		*bytes = (u64)(found_bits) * ctl->unit;
1770 1771 1772
		return 0;
	}

1773
	*bytes = (u64)(max_bits) * ctl->unit;
1774
	bitmap_info->max_extent_size = *bytes;
1775 1776 1777
	return -1;
}

1778
/* Cache the size of the max extent in bytes */
1779
static struct btrfs_free_space *
D
David Woodhouse 已提交
1780
find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1781
		unsigned long align, u64 *max_extent_size)
1782 1783 1784
{
	struct btrfs_free_space *entry;
	struct rb_node *node;
D
David Woodhouse 已提交
1785 1786
	u64 tmp;
	u64 align_off;
1787 1788
	int ret;

1789
	if (!ctl->free_space_offset.rb_node)
1790
		goto out;
1791

1792
	entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1793
	if (!entry)
1794
		goto out;
1795 1796 1797

	for (node = &entry->offset_index; node; node = rb_next(node)) {
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
1798 1799 1800
		if (entry->bytes < *bytes) {
			if (entry->bytes > *max_extent_size)
				*max_extent_size = entry->bytes;
1801
			continue;
1802
		}
1803

D
David Woodhouse 已提交
1804 1805 1806 1807
		/* make sure the space returned is big enough
		 * to match our requested alignment
		 */
		if (*bytes >= align) {
1808
			tmp = entry->offset - ctl->start + align - 1;
1809
			tmp = div64_u64(tmp, align);
D
David Woodhouse 已提交
1810 1811 1812 1813 1814 1815 1816
			tmp = tmp * align + ctl->start;
			align_off = tmp - entry->offset;
		} else {
			align_off = 0;
			tmp = entry->offset;
		}

1817 1818 1819
		if (entry->bytes < *bytes + align_off) {
			if (entry->bytes > *max_extent_size)
				*max_extent_size = entry->bytes;
D
David Woodhouse 已提交
1820
			continue;
1821
		}
D
David Woodhouse 已提交
1822

1823
		if (entry->bitmap) {
1824 1825
			u64 size = *bytes;

1826
			ret = search_bitmap(ctl, entry, &tmp, &size, true);
D
David Woodhouse 已提交
1827 1828
			if (!ret) {
				*offset = tmp;
1829
				*bytes = size;
1830
				return entry;
1831 1832
			} else if (size > *max_extent_size) {
				*max_extent_size = size;
D
David Woodhouse 已提交
1833
			}
1834 1835 1836
			continue;
		}

D
David Woodhouse 已提交
1837 1838
		*offset = tmp;
		*bytes = entry->bytes - align_off;
1839 1840
		return entry;
	}
1841
out:
1842 1843 1844
	return NULL;
}

1845
static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1846 1847
			   struct btrfs_free_space *info, u64 offset)
{
1848
	info->offset = offset_to_bitmap(ctl, offset);
J
Josef Bacik 已提交
1849
	info->bytes = 0;
1850
	INIT_LIST_HEAD(&info->list);
1851 1852
	link_free_space(ctl, info);
	ctl->total_bitmaps++;
1853

1854
	ctl->op->recalc_thresholds(ctl);
1855 1856
}

1857
static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1858 1859
			struct btrfs_free_space *bitmap_info)
{
1860
	unlink_free_space(ctl, bitmap_info);
1861
	kfree(bitmap_info->bitmap);
1862
	kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1863 1864
	ctl->total_bitmaps--;
	ctl->op->recalc_thresholds(ctl);
1865 1866
}

1867
static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1868 1869 1870 1871
			      struct btrfs_free_space *bitmap_info,
			      u64 *offset, u64 *bytes)
{
	u64 end;
1872 1873
	u64 search_start, search_bytes;
	int ret;
1874 1875

again:
1876
	end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1877

1878
	/*
1879 1880 1881 1882
	 * We need to search for bits in this bitmap.  We could only cover some
	 * of the extent in this bitmap thanks to how we add space, so we need
	 * to search for as much as it as we can and clear that amount, and then
	 * go searching for the next bit.
1883 1884
	 */
	search_start = *offset;
1885
	search_bytes = ctl->unit;
1886
	search_bytes = min(search_bytes, end - search_start + 1);
1887 1888
	ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes,
			    false);
1889 1890
	if (ret < 0 || search_start != *offset)
		return -EINVAL;
1891

1892 1893 1894 1895 1896 1897 1898 1899 1900
	/* We may have found more bits than what we need */
	search_bytes = min(search_bytes, *bytes);

	/* Cannot clear past the end of the bitmap */
	search_bytes = min(search_bytes, end - search_start + 1);

	bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
	*offset += search_bytes;
	*bytes -= search_bytes;
1901 1902

	if (*bytes) {
1903
		struct rb_node *next = rb_next(&bitmap_info->offset_index);
1904
		if (!bitmap_info->bytes)
1905
			free_bitmap(ctl, bitmap_info);
1906

1907 1908 1909 1910 1911
		/*
		 * no entry after this bitmap, but we still have bytes to
		 * remove, so something has gone wrong.
		 */
		if (!next)
1912 1913
			return -EINVAL;

1914 1915 1916 1917 1918 1919 1920
		bitmap_info = rb_entry(next, struct btrfs_free_space,
				       offset_index);

		/*
		 * if the next entry isn't a bitmap we need to return to let the
		 * extent stuff do its work.
		 */
1921 1922 1923
		if (!bitmap_info->bitmap)
			return -EAGAIN;

1924 1925 1926 1927 1928 1929 1930
		/*
		 * Ok the next item is a bitmap, but it may not actually hold
		 * the information for the rest of this free space stuff, so
		 * look for it, and if we don't find it return so we can try
		 * everything over again.
		 */
		search_start = *offset;
1931
		search_bytes = ctl->unit;
1932
		ret = search_bitmap(ctl, bitmap_info, &search_start,
1933
				    &search_bytes, false);
1934 1935 1936
		if (ret < 0 || search_start != *offset)
			return -EAGAIN;

1937
		goto again;
1938
	} else if (!bitmap_info->bytes)
1939
		free_bitmap(ctl, bitmap_info);
1940 1941 1942 1943

	return 0;
}

J
Josef Bacik 已提交
1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
			       struct btrfs_free_space *info, u64 offset,
			       u64 bytes)
{
	u64 bytes_to_set = 0;
	u64 end;

	end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);

	bytes_to_set = min(end - offset, bytes);

	bitmap_set_bits(ctl, info, offset, bytes_to_set);

1957 1958 1959 1960 1961 1962
	/*
	 * We set some bytes, we have no idea what the max extent size is
	 * anymore.
	 */
	info->max_extent_size = 0;

J
Josef Bacik 已提交
1963 1964 1965 1966
	return bytes_to_set;

}

1967 1968
static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
		      struct btrfs_free_space *info)
1969
{
1970
	struct btrfs_block_group_cache *block_group = ctl->private;
1971 1972 1973 1974 1975 1976 1977
	bool forced = false;

#ifdef CONFIG_BTRFS_DEBUG
	if (btrfs_should_fragment_free_space(block_group->fs_info->extent_root,
					     block_group))
		forced = true;
#endif
1978 1979 1980 1981 1982

	/*
	 * If we are below the extents threshold then we can add this as an
	 * extent, and don't have to deal with the bitmap
	 */
1983
	if (!forced && ctl->free_extents < ctl->extents_thresh) {
1984 1985 1986
		/*
		 * If this block group has some small extents we don't want to
		 * use up all of our free slots in the cache with them, we want
1987
		 * to reserve them to larger extents, however if we have plenty
1988 1989 1990 1991
		 * of cache left then go ahead an dadd them, no sense in adding
		 * the overhead of a bitmap if we don't have to.
		 */
		if (info->bytes <= block_group->sectorsize * 4) {
1992 1993
			if (ctl->free_extents * 2 <= ctl->extents_thresh)
				return false;
1994
		} else {
1995
			return false;
1996 1997
		}
	}
1998 1999

	/*
2000 2001 2002 2003 2004 2005
	 * The original block groups from mkfs can be really small, like 8
	 * megabytes, so don't bother with a bitmap for those entries.  However
	 * some block groups can be smaller than what a bitmap would cover but
	 * are still large enough that they could overflow the 32k memory limit,
	 * so allow those block groups to still be allowed to have a bitmap
	 * entry.
2006
	 */
2007
	if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
2008 2009 2010 2011 2012
		return false;

	return true;
}

2013
static const struct btrfs_free_space_op free_space_op = {
J
Josef Bacik 已提交
2014 2015 2016 2017
	.recalc_thresholds	= recalculate_thresholds,
	.use_bitmap		= use_bitmap,
};

2018 2019 2020 2021
static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info)
{
	struct btrfs_free_space *bitmap_info;
J
Josef Bacik 已提交
2022
	struct btrfs_block_group_cache *block_group = NULL;
2023
	int added = 0;
J
Josef Bacik 已提交
2024
	u64 bytes, offset, bytes_added;
2025
	int ret;
2026 2027 2028 2029

	bytes = info->bytes;
	offset = info->offset;

2030 2031 2032
	if (!ctl->op->use_bitmap(ctl, info))
		return 0;

J
Josef Bacik 已提交
2033 2034
	if (ctl->op == &free_space_op)
		block_group = ctl->private;
2035
again:
J
Josef Bacik 已提交
2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052
	/*
	 * Since we link bitmaps right into the cluster we need to see if we
	 * have a cluster here, and if so and it has our bitmap we need to add
	 * the free space to that bitmap.
	 */
	if (block_group && !list_empty(&block_group->cluster_list)) {
		struct btrfs_free_cluster *cluster;
		struct rb_node *node;
		struct btrfs_free_space *entry;

		cluster = list_entry(block_group->cluster_list.next,
				     struct btrfs_free_cluster,
				     block_group_list);
		spin_lock(&cluster->lock);
		node = rb_first(&cluster->root);
		if (!node) {
			spin_unlock(&cluster->lock);
2053
			goto no_cluster_bitmap;
J
Josef Bacik 已提交
2054 2055 2056 2057 2058
		}

		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		if (!entry->bitmap) {
			spin_unlock(&cluster->lock);
2059
			goto no_cluster_bitmap;
J
Josef Bacik 已提交
2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073
		}

		if (entry->offset == offset_to_bitmap(ctl, offset)) {
			bytes_added = add_bytes_to_bitmap(ctl, entry,
							  offset, bytes);
			bytes -= bytes_added;
			offset += bytes_added;
		}
		spin_unlock(&cluster->lock);
		if (!bytes) {
			ret = 1;
			goto out;
		}
	}
2074 2075

no_cluster_bitmap:
2076
	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2077 2078
					 1, 0);
	if (!bitmap_info) {
2079
		ASSERT(added == 0);
2080 2081 2082
		goto new_bitmap;
	}

J
Josef Bacik 已提交
2083 2084 2085 2086
	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
	bytes -= bytes_added;
	offset += bytes_added;
	added = 0;
2087 2088 2089 2090 2091 2092 2093 2094 2095

	if (!bytes) {
		ret = 1;
		goto out;
	} else
		goto again;

new_bitmap:
	if (info && info->bitmap) {
2096
		add_new_bitmap(ctl, info, offset);
2097 2098 2099 2100
		added = 1;
		info = NULL;
		goto again;
	} else {
2101
		spin_unlock(&ctl->tree_lock);
2102 2103 2104

		/* no pre-allocated info, allocate a new one */
		if (!info) {
2105 2106
			info = kmem_cache_zalloc(btrfs_free_space_cachep,
						 GFP_NOFS);
2107
			if (!info) {
2108
				spin_lock(&ctl->tree_lock);
2109 2110 2111 2112 2113 2114
				ret = -ENOMEM;
				goto out;
			}
		}

		/* allocate the bitmap */
2115
		info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
2116
		spin_lock(&ctl->tree_lock);
2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127
		if (!info->bitmap) {
			ret = -ENOMEM;
			goto out;
		}
		goto again;
	}

out:
	if (info) {
		if (info->bitmap)
			kfree(info->bitmap);
2128
		kmem_cache_free(btrfs_free_space_cachep, info);
2129
	}
J
Josef Bacik 已提交
2130 2131 2132 2133

	return ret;
}

2134
static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
2135
			  struct btrfs_free_space *info, bool update_stat)
J
Josef Bacik 已提交
2136
{
2137 2138 2139 2140 2141
	struct btrfs_free_space *left_info;
	struct btrfs_free_space *right_info;
	bool merged = false;
	u64 offset = info->offset;
	u64 bytes = info->bytes;
2142

J
Josef Bacik 已提交
2143 2144 2145 2146 2147
	/*
	 * first we want to see if there is free space adjacent to the range we
	 * are adding, if there is remove that struct and add a new one to
	 * cover the entire range
	 */
2148
	right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
2149 2150 2151 2152
	if (right_info && rb_prev(&right_info->offset_index))
		left_info = rb_entry(rb_prev(&right_info->offset_index),
				     struct btrfs_free_space, offset_index);
	else
2153
		left_info = tree_search_offset(ctl, offset - 1, 0, 0);
J
Josef Bacik 已提交
2154

2155
	if (right_info && !right_info->bitmap) {
2156
		if (update_stat)
2157
			unlink_free_space(ctl, right_info);
2158
		else
2159
			__unlink_free_space(ctl, right_info);
2160
		info->bytes += right_info->bytes;
2161
		kmem_cache_free(btrfs_free_space_cachep, right_info);
2162
		merged = true;
J
Josef Bacik 已提交
2163 2164
	}

2165 2166
	if (left_info && !left_info->bitmap &&
	    left_info->offset + left_info->bytes == offset) {
2167
		if (update_stat)
2168
			unlink_free_space(ctl, left_info);
2169
		else
2170
			__unlink_free_space(ctl, left_info);
2171 2172
		info->offset = left_info->offset;
		info->bytes += left_info->bytes;
2173
		kmem_cache_free(btrfs_free_space_cachep, left_info);
2174
		merged = true;
J
Josef Bacik 已提交
2175 2176
	}

2177 2178 2179
	return merged;
}

2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301
static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl,
				     struct btrfs_free_space *info,
				     bool update_stat)
{
	struct btrfs_free_space *bitmap;
	unsigned long i;
	unsigned long j;
	const u64 end = info->offset + info->bytes;
	const u64 bitmap_offset = offset_to_bitmap(ctl, end);
	u64 bytes;

	bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
	if (!bitmap)
		return false;

	i = offset_to_bit(bitmap->offset, ctl->unit, end);
	j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i);
	if (j == i)
		return false;
	bytes = (j - i) * ctl->unit;
	info->bytes += bytes;

	if (update_stat)
		bitmap_clear_bits(ctl, bitmap, end, bytes);
	else
		__bitmap_clear_bits(ctl, bitmap, end, bytes);

	if (!bitmap->bytes)
		free_bitmap(ctl, bitmap);

	return true;
}

static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl,
				       struct btrfs_free_space *info,
				       bool update_stat)
{
	struct btrfs_free_space *bitmap;
	u64 bitmap_offset;
	unsigned long i;
	unsigned long j;
	unsigned long prev_j;
	u64 bytes;

	bitmap_offset = offset_to_bitmap(ctl, info->offset);
	/* If we're on a boundary, try the previous logical bitmap. */
	if (bitmap_offset == info->offset) {
		if (info->offset == 0)
			return false;
		bitmap_offset = offset_to_bitmap(ctl, info->offset - 1);
	}

	bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
	if (!bitmap)
		return false;

	i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
	j = 0;
	prev_j = (unsigned long)-1;
	for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
		if (j > i)
			break;
		prev_j = j;
	}
	if (prev_j == i)
		return false;

	if (prev_j == (unsigned long)-1)
		bytes = (i + 1) * ctl->unit;
	else
		bytes = (i - prev_j) * ctl->unit;

	info->offset -= bytes;
	info->bytes += bytes;

	if (update_stat)
		bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
	else
		__bitmap_clear_bits(ctl, bitmap, info->offset, bytes);

	if (!bitmap->bytes)
		free_bitmap(ctl, bitmap);

	return true;
}

/*
 * We prefer always to allocate from extent entries, both for clustered and
 * non-clustered allocation requests. So when attempting to add a new extent
 * entry, try to see if there's adjacent free space in bitmap entries, and if
 * there is, migrate that space from the bitmaps to the extent.
 * Like this we get better chances of satisfying space allocation requests
 * because we attempt to satisfy them based on a single cache entry, and never
 * on 2 or more entries - even if the entries represent a contiguous free space
 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
 * ends).
 */
static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info,
			      bool update_stat)
{
	/*
	 * Only work with disconnected entries, as we can change their offset,
	 * and must be extent entries.
	 */
	ASSERT(!info->bitmap);
	ASSERT(RB_EMPTY_NODE(&info->offset_index));

	if (ctl->total_bitmaps > 0) {
		bool stole_end;
		bool stole_front = false;

		stole_end = steal_from_bitmap_to_end(ctl, info, update_stat);
		if (ctl->total_bitmaps > 0)
			stole_front = steal_from_bitmap_to_front(ctl, info,
								 update_stat);

		if (stole_end || stole_front)
			try_merge_free_space(ctl, info, update_stat);
	}
}

2302 2303
int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
			   struct btrfs_free_space_ctl *ctl,
2304
			   u64 offset, u64 bytes)
2305 2306 2307 2308
{
	struct btrfs_free_space *info;
	int ret = 0;

2309
	info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
2310 2311 2312 2313 2314
	if (!info)
		return -ENOMEM;

	info->offset = offset;
	info->bytes = bytes;
2315
	RB_CLEAR_NODE(&info->offset_index);
2316

2317
	spin_lock(&ctl->tree_lock);
2318

2319
	if (try_merge_free_space(ctl, info, true))
2320 2321 2322 2323 2324 2325 2326
		goto link;

	/*
	 * There was no extent directly to the left or right of this new
	 * extent then we know we're going to have to allocate a new extent, so
	 * before we do that see if we need to drop this into a bitmap
	 */
2327
	ret = insert_into_bitmap(ctl, info);
2328 2329 2330 2331 2332 2333 2334
	if (ret < 0) {
		goto out;
	} else if (ret) {
		ret = 0;
		goto out;
	}
link:
2335 2336 2337 2338 2339 2340 2341 2342
	/*
	 * Only steal free space from adjacent bitmaps if we're sure we're not
	 * going to add the new free space to existing bitmap entries - because
	 * that would mean unnecessary work that would be reverted. Therefore
	 * attempt to steal space from bitmaps if we're adding an extent entry.
	 */
	steal_from_bitmap(ctl, info, true);

2343
	ret = link_free_space(ctl, info);
J
Josef Bacik 已提交
2344
	if (ret)
2345
		kmem_cache_free(btrfs_free_space_cachep, info);
2346
out:
2347
	spin_unlock(&ctl->tree_lock);
2348

J
Josef Bacik 已提交
2349
	if (ret) {
2350
		btrfs_crit(fs_info, "unable to add free space :%d", ret);
2351
		ASSERT(ret != -EEXIST);
J
Josef Bacik 已提交
2352 2353 2354 2355 2356
	}

	return ret;
}

2357 2358
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
			    u64 offset, u64 bytes)
J
Josef Bacik 已提交
2359
{
2360
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
J
Josef Bacik 已提交
2361
	struct btrfs_free_space *info;
2362 2363
	int ret;
	bool re_search = false;
J
Josef Bacik 已提交
2364

2365
	spin_lock(&ctl->tree_lock);
2366

2367
again:
2368
	ret = 0;
2369 2370 2371
	if (!bytes)
		goto out_lock;

2372
	info = tree_search_offset(ctl, offset, 0, 0);
2373
	if (!info) {
2374 2375 2376 2377
		/*
		 * oops didn't find an extent that matched the space we wanted
		 * to remove, look for a bitmap instead
		 */
2378
		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2379 2380
					  1, 0);
		if (!info) {
2381 2382 2383 2384
			/*
			 * If we found a partial bit of our free space in a
			 * bitmap but then couldn't find the other part this may
			 * be a problem, so WARN about it.
2385
			 */
2386
			WARN_ON(re_search);
2387 2388
			goto out_lock;
		}
2389 2390
	}

2391
	re_search = false;
2392
	if (!info->bitmap) {
2393
		unlink_free_space(ctl, info);
2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404
		if (offset == info->offset) {
			u64 to_free = min(bytes, info->bytes);

			info->bytes -= to_free;
			info->offset += to_free;
			if (info->bytes) {
				ret = link_free_space(ctl, info);
				WARN_ON(ret);
			} else {
				kmem_cache_free(btrfs_free_space_cachep, info);
			}
J
Josef Bacik 已提交
2405

2406 2407 2408 2409 2410
			offset += to_free;
			bytes -= to_free;
			goto again;
		} else {
			u64 old_end = info->bytes + info->offset;
2411

2412
			info->bytes = offset - info->offset;
2413
			ret = link_free_space(ctl, info);
2414 2415 2416 2417
			WARN_ON(ret);
			if (ret)
				goto out_lock;

2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
			/* Not enough bytes in this entry to satisfy us */
			if (old_end < offset + bytes) {
				bytes -= old_end - offset;
				offset = old_end;
				goto again;
			} else if (old_end == offset + bytes) {
				/* all done */
				goto out_lock;
			}
			spin_unlock(&ctl->tree_lock);

			ret = btrfs_add_free_space(block_group, offset + bytes,
						   old_end - (offset + bytes));
			WARN_ON(ret);
			goto out;
		}
J
Josef Bacik 已提交
2434
	}
2435

2436
	ret = remove_from_bitmap(ctl, info, &offset, &bytes);
2437 2438
	if (ret == -EAGAIN) {
		re_search = true;
2439
		goto again;
2440
	}
2441
out_lock:
2442
	spin_unlock(&ctl->tree_lock);
J
Josef Bacik 已提交
2443
out:
2444 2445 2446
	return ret;
}

J
Josef Bacik 已提交
2447 2448 2449
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
			   u64 bytes)
{
2450
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
J
Josef Bacik 已提交
2451 2452 2453 2454
	struct btrfs_free_space *info;
	struct rb_node *n;
	int count = 0;

2455
	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
J
Josef Bacik 已提交
2456
		info = rb_entry(n, struct btrfs_free_space, offset_index);
L
Liu Bo 已提交
2457
		if (info->bytes >= bytes && !block_group->ro)
J
Josef Bacik 已提交
2458
			count++;
2459 2460 2461
		btrfs_crit(block_group->fs_info,
			   "entry offset %llu, bytes %llu, bitmap %s",
			   info->offset, info->bytes,
2462
		       (info->bitmap) ? "yes" : "no");
J
Josef Bacik 已提交
2463
	}
2464
	btrfs_info(block_group->fs_info, "block group has cluster?: %s",
2465
	       list_empty(&block_group->cluster_list) ? "no" : "yes");
2466 2467
	btrfs_info(block_group->fs_info,
		   "%d blocks of free space at or bigger than bytes is", count);
J
Josef Bacik 已提交
2468 2469
}

2470
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
J
Josef Bacik 已提交
2471
{
2472
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
J
Josef Bacik 已提交
2473

2474 2475 2476 2477 2478
	spin_lock_init(&ctl->tree_lock);
	ctl->unit = block_group->sectorsize;
	ctl->start = block_group->key.objectid;
	ctl->private = block_group;
	ctl->op = &free_space_op;
2479 2480
	INIT_LIST_HEAD(&ctl->trimming_ranges);
	mutex_init(&ctl->cache_writeout_mutex);
J
Josef Bacik 已提交
2481

2482 2483 2484 2485 2486
	/*
	 * we only want to have 32k of ram per block group for keeping
	 * track of free space, and if we pass 1/2 of that we want to
	 * start converting things over to using bitmaps
	 */
2487
	ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space);
J
Josef Bacik 已提交
2488 2489
}

2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500
/*
 * for a given cluster, put all of its extents back into the free
 * space cache.  If the block group passed doesn't match the block group
 * pointed to by the cluster, someone else raced in and freed the
 * cluster already.  In that case, we just return without changing anything
 */
static int
__btrfs_return_cluster_to_free_space(
			     struct btrfs_block_group_cache *block_group,
			     struct btrfs_free_cluster *cluster)
{
2501
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2502 2503 2504 2505 2506 2507 2508
	struct btrfs_free_space *entry;
	struct rb_node *node;

	spin_lock(&cluster->lock);
	if (cluster->block_group != block_group)
		goto out;

2509
	cluster->block_group = NULL;
2510
	cluster->window_start = 0;
2511 2512
	list_del_init(&cluster->block_group_list);

2513
	node = rb_first(&cluster->root);
2514
	while (node) {
2515 2516
		bool bitmap;

2517 2518 2519
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		node = rb_next(&entry->offset_index);
		rb_erase(&entry->offset_index, &cluster->root);
2520
		RB_CLEAR_NODE(&entry->offset_index);
2521 2522

		bitmap = (entry->bitmap != NULL);
2523
		if (!bitmap) {
2524
			try_merge_free_space(ctl, entry, false);
2525 2526
			steal_from_bitmap(ctl, entry, false);
		}
2527
		tree_insert_offset(&ctl->free_space_offset,
2528
				   entry->offset, &entry->offset_index, bitmap);
2529
	}
2530
	cluster->root = RB_ROOT;
2531

2532 2533
out:
	spin_unlock(&cluster->lock);
2534
	btrfs_put_block_group(block_group);
2535 2536 2537
	return 0;
}

2538 2539
static void __btrfs_remove_free_space_cache_locked(
				struct btrfs_free_space_ctl *ctl)
J
Josef Bacik 已提交
2540 2541 2542
{
	struct btrfs_free_space *info;
	struct rb_node *node;
2543 2544 2545

	while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
		info = rb_entry(node, struct btrfs_free_space, offset_index);
2546 2547 2548 2549 2550 2551
		if (!info->bitmap) {
			unlink_free_space(ctl, info);
			kmem_cache_free(btrfs_free_space_cachep, info);
		} else {
			free_bitmap(ctl, info);
		}
2552 2553

		cond_resched_lock(&ctl->tree_lock);
2554
	}
2555 2556 2557 2558 2559 2560
}

void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
{
	spin_lock(&ctl->tree_lock);
	__btrfs_remove_free_space_cache_locked(ctl);
2561 2562 2563 2564 2565 2566
	spin_unlock(&ctl->tree_lock);
}

void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2567
	struct btrfs_free_cluster *cluster;
2568
	struct list_head *head;
J
Josef Bacik 已提交
2569

2570
	spin_lock(&ctl->tree_lock);
2571 2572 2573 2574
	while ((head = block_group->cluster_list.next) !=
	       &block_group->cluster_list) {
		cluster = list_entry(head, struct btrfs_free_cluster,
				     block_group_list);
2575 2576 2577

		WARN_ON(cluster->block_group != block_group);
		__btrfs_return_cluster_to_free_space(block_group, cluster);
2578 2579

		cond_resched_lock(&ctl->tree_lock);
2580
	}
2581
	__btrfs_remove_free_space_cache_locked(ctl);
2582
	spin_unlock(&ctl->tree_lock);
2583

J
Josef Bacik 已提交
2584 2585
}

2586
u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2587 2588
			       u64 offset, u64 bytes, u64 empty_size,
			       u64 *max_extent_size)
J
Josef Bacik 已提交
2589
{
2590
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2591
	struct btrfs_free_space *entry = NULL;
2592
	u64 bytes_search = bytes + empty_size;
2593
	u64 ret = 0;
D
David Woodhouse 已提交
2594 2595
	u64 align_gap = 0;
	u64 align_gap_len = 0;
J
Josef Bacik 已提交
2596

2597
	spin_lock(&ctl->tree_lock);
D
David Woodhouse 已提交
2598
	entry = find_free_space(ctl, &offset, &bytes_search,
2599
				block_group->full_stripe_len, max_extent_size);
2600
	if (!entry)
2601 2602 2603 2604
		goto out;

	ret = offset;
	if (entry->bitmap) {
2605
		bitmap_clear_bits(ctl, entry, offset, bytes);
2606
		if (!entry->bytes)
2607
			free_bitmap(ctl, entry);
2608
	} else {
2609
		unlink_free_space(ctl, entry);
D
David Woodhouse 已提交
2610 2611 2612 2613 2614 2615 2616
		align_gap_len = offset - entry->offset;
		align_gap = entry->offset;

		entry->offset = offset + bytes;
		WARN_ON(entry->bytes < bytes + align_gap_len);

		entry->bytes -= bytes + align_gap_len;
2617
		if (!entry->bytes)
2618
			kmem_cache_free(btrfs_free_space_cachep, entry);
2619
		else
2620
			link_free_space(ctl, entry);
2621
	}
2622
out:
2623
	spin_unlock(&ctl->tree_lock);
J
Josef Bacik 已提交
2624

D
David Woodhouse 已提交
2625
	if (align_gap_len)
2626 2627
		__btrfs_add_free_space(block_group->fs_info, ctl,
				       align_gap, align_gap_len);
J
Josef Bacik 已提交
2628 2629
	return ret;
}
2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642

/*
 * given a cluster, put all of its extents back into the free space
 * cache.  If a block group is passed, this function will only free
 * a cluster that belongs to the passed block group.
 *
 * Otherwise, it'll get a reference on the block group pointed to by the
 * cluster and remove the cluster from it.
 */
int btrfs_return_cluster_to_free_space(
			       struct btrfs_block_group_cache *block_group,
			       struct btrfs_free_cluster *cluster)
{
2643
	struct btrfs_free_space_ctl *ctl;
2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661
	int ret;

	/* first, get a safe pointer to the block group */
	spin_lock(&cluster->lock);
	if (!block_group) {
		block_group = cluster->block_group;
		if (!block_group) {
			spin_unlock(&cluster->lock);
			return 0;
		}
	} else if (cluster->block_group != block_group) {
		/* someone else has already freed it don't redo their work */
		spin_unlock(&cluster->lock);
		return 0;
	}
	atomic_inc(&block_group->count);
	spin_unlock(&cluster->lock);

2662 2663
	ctl = block_group->free_space_ctl;

2664
	/* now return any extents the cluster had on it */
2665
	spin_lock(&ctl->tree_lock);
2666
	ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
2667
	spin_unlock(&ctl->tree_lock);
2668 2669 2670 2671 2672 2673

	/* finally drop our ref */
	btrfs_put_block_group(block_group);
	return ret;
}

2674 2675
static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
				   struct btrfs_free_cluster *cluster,
2676
				   struct btrfs_free_space *entry,
2677 2678
				   u64 bytes, u64 min_start,
				   u64 *max_extent_size)
2679
{
2680
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2681 2682 2683 2684 2685 2686 2687 2688
	int err;
	u64 search_start = cluster->window_start;
	u64 search_bytes = bytes;
	u64 ret = 0;

	search_start = min_start;
	search_bytes = bytes;

2689
	err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
2690 2691 2692
	if (err) {
		if (search_bytes > *max_extent_size)
			*max_extent_size = search_bytes;
2693
		return 0;
2694
	}
2695 2696

	ret = search_start;
2697
	__bitmap_clear_bits(ctl, entry, ret, bytes);
2698 2699 2700 2701

	return ret;
}

2702 2703 2704 2705 2706 2707 2708
/*
 * given a cluster, try to allocate 'bytes' from it, returns 0
 * if it couldn't find anything suitably large, or a logical disk offset
 * if things worked out
 */
u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
			     struct btrfs_free_cluster *cluster, u64 bytes,
2709
			     u64 min_start, u64 *max_extent_size)
2710
{
2711
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727
	struct btrfs_free_space *entry = NULL;
	struct rb_node *node;
	u64 ret = 0;

	spin_lock(&cluster->lock);
	if (bytes > cluster->max_size)
		goto out;

	if (cluster->block_group != block_group)
		goto out;

	node = rb_first(&cluster->root);
	if (!node)
		goto out;

	entry = rb_entry(node, struct btrfs_free_space, offset_index);
2728
	while (1) {
2729 2730 2731
		if (entry->bytes < bytes && entry->bytes > *max_extent_size)
			*max_extent_size = entry->bytes;

2732 2733
		if (entry->bytes < bytes ||
		    (!entry->bitmap && entry->offset < min_start)) {
2734 2735 2736 2737 2738 2739 2740 2741
			node = rb_next(&entry->offset_index);
			if (!node)
				break;
			entry = rb_entry(node, struct btrfs_free_space,
					 offset_index);
			continue;
		}

2742 2743 2744
		if (entry->bitmap) {
			ret = btrfs_alloc_from_bitmap(block_group,
						      cluster, entry, bytes,
2745 2746
						      cluster->window_start,
						      max_extent_size);
2747 2748 2749 2750 2751 2752 2753 2754
			if (ret == 0) {
				node = rb_next(&entry->offset_index);
				if (!node)
					break;
				entry = rb_entry(node, struct btrfs_free_space,
						 offset_index);
				continue;
			}
2755
			cluster->window_start += bytes;
2756 2757 2758 2759 2760 2761
		} else {
			ret = entry->offset;

			entry->offset += bytes;
			entry->bytes -= bytes;
		}
2762

2763
		if (entry->bytes == 0)
2764 2765 2766 2767 2768
			rb_erase(&entry->offset_index, &cluster->root);
		break;
	}
out:
	spin_unlock(&cluster->lock);
2769

2770 2771 2772
	if (!ret)
		return 0;

2773
	spin_lock(&ctl->tree_lock);
2774

2775
	ctl->free_space -= bytes;
2776
	if (entry->bytes == 0) {
2777
		ctl->free_extents--;
2778 2779
		if (entry->bitmap) {
			kfree(entry->bitmap);
2780 2781
			ctl->total_bitmaps--;
			ctl->op->recalc_thresholds(ctl);
2782
		}
2783
		kmem_cache_free(btrfs_free_space_cachep, entry);
2784 2785
	}

2786
	spin_unlock(&ctl->tree_lock);
2787

2788 2789 2790
	return ret;
}

2791 2792 2793
static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
				struct btrfs_free_space *entry,
				struct btrfs_free_cluster *cluster,
2794 2795
				u64 offset, u64 bytes,
				u64 cont1_bytes, u64 min_bytes)
2796
{
2797
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2798 2799
	unsigned long next_zero;
	unsigned long i;
2800 2801
	unsigned long want_bits;
	unsigned long min_bits;
2802
	unsigned long found_bits;
2803
	unsigned long max_bits = 0;
2804 2805
	unsigned long start = 0;
	unsigned long total_found = 0;
2806
	int ret;
2807

2808
	i = offset_to_bit(entry->offset, ctl->unit,
2809
			  max_t(u64, offset, entry->offset));
2810 2811
	want_bits = bytes_to_bits(bytes, ctl->unit);
	min_bits = bytes_to_bits(min_bytes, ctl->unit);
2812

2813 2814 2815 2816 2817 2818 2819
	/*
	 * Don't bother looking for a cluster in this bitmap if it's heavily
	 * fragmented.
	 */
	if (entry->max_extent_size &&
	    entry->max_extent_size < cont1_bytes)
		return -ENOSPC;
2820 2821
again:
	found_bits = 0;
2822
	for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
2823 2824
		next_zero = find_next_zero_bit(entry->bitmap,
					       BITS_PER_BITMAP, i);
2825
		if (next_zero - i >= min_bits) {
2826
			found_bits = next_zero - i;
2827 2828
			if (found_bits > max_bits)
				max_bits = found_bits;
2829 2830
			break;
		}
2831 2832
		if (next_zero - i > max_bits)
			max_bits = next_zero - i;
2833 2834 2835
		i = next_zero;
	}

2836 2837
	if (!found_bits) {
		entry->max_extent_size = (u64)max_bits * ctl->unit;
2838
		return -ENOSPC;
2839
	}
2840

2841
	if (!total_found) {
2842
		start = i;
2843
		cluster->max_size = 0;
2844 2845 2846 2847
	}

	total_found += found_bits;

2848 2849
	if (cluster->max_size < found_bits * ctl->unit)
		cluster->max_size = found_bits * ctl->unit;
2850

2851 2852
	if (total_found < want_bits || cluster->max_size < cont1_bytes) {
		i = next_zero + 1;
2853 2854 2855
		goto again;
	}

2856
	cluster->window_start = start * ctl->unit + entry->offset;
2857
	rb_erase(&entry->offset_index, &ctl->free_space_offset);
2858 2859
	ret = tree_insert_offset(&cluster->root, entry->offset,
				 &entry->offset_index, 1);
2860
	ASSERT(!ret); /* -EEXIST; Logic error */
2861

J
Josef Bacik 已提交
2862
	trace_btrfs_setup_cluster(block_group, cluster,
2863
				  total_found * ctl->unit, 1);
2864 2865 2866
	return 0;
}

2867 2868
/*
 * This searches the block group for just extents to fill the cluster with.
2869 2870
 * Try to find a cluster with at least bytes total bytes, at least one
 * extent of cont1_bytes, and other clusters of at least min_bytes.
2871
 */
2872 2873 2874 2875
static noinline int
setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
			struct btrfs_free_cluster *cluster,
			struct list_head *bitmaps, u64 offset, u64 bytes,
2876
			u64 cont1_bytes, u64 min_bytes)
2877
{
2878
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2879 2880 2881 2882 2883 2884
	struct btrfs_free_space *first = NULL;
	struct btrfs_free_space *entry = NULL;
	struct btrfs_free_space *last;
	struct rb_node *node;
	u64 window_free;
	u64 max_extent;
J
Josef Bacik 已提交
2885
	u64 total_size = 0;
2886

2887
	entry = tree_search_offset(ctl, offset, 0, 1);
2888 2889 2890 2891 2892 2893 2894
	if (!entry)
		return -ENOSPC;

	/*
	 * We don't want bitmaps, so just move along until we find a normal
	 * extent entry.
	 */
2895 2896
	while (entry->bitmap || entry->bytes < min_bytes) {
		if (entry->bitmap && list_empty(&entry->list))
2897
			list_add_tail(&entry->list, bitmaps);
2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908
		node = rb_next(&entry->offset_index);
		if (!node)
			return -ENOSPC;
		entry = rb_entry(node, struct btrfs_free_space, offset_index);
	}

	window_free = entry->bytes;
	max_extent = entry->bytes;
	first = entry;
	last = entry;

2909 2910
	for (node = rb_next(&entry->offset_index); node;
	     node = rb_next(&entry->offset_index)) {
2911 2912
		entry = rb_entry(node, struct btrfs_free_space, offset_index);

2913 2914 2915
		if (entry->bitmap) {
			if (list_empty(&entry->list))
				list_add_tail(&entry->list, bitmaps);
2916
			continue;
2917 2918
		}

2919 2920 2921 2922 2923 2924
		if (entry->bytes < min_bytes)
			continue;

		last = entry;
		window_free += entry->bytes;
		if (entry->bytes > max_extent)
2925 2926 2927
			max_extent = entry->bytes;
	}

2928 2929 2930
	if (window_free < bytes || max_extent < cont1_bytes)
		return -ENOSPC;

2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943
	cluster->window_start = first->offset;

	node = &first->offset_index;

	/*
	 * now we've found our entries, pull them out of the free space
	 * cache and put them into the cluster rbtree
	 */
	do {
		int ret;

		entry = rb_entry(node, struct btrfs_free_space, offset_index);
		node = rb_next(&entry->offset_index);
2944
		if (entry->bitmap || entry->bytes < min_bytes)
2945 2946
			continue;

2947
		rb_erase(&entry->offset_index, &ctl->free_space_offset);
2948 2949
		ret = tree_insert_offset(&cluster->root, entry->offset,
					 &entry->offset_index, 0);
J
Josef Bacik 已提交
2950
		total_size += entry->bytes;
2951
		ASSERT(!ret); /* -EEXIST; Logic error */
2952 2953 2954
	} while (node && entry != last);

	cluster->max_size = max_extent;
J
Josef Bacik 已提交
2955
	trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
2956 2957 2958 2959 2960 2961 2962
	return 0;
}

/*
 * This specifically looks for bitmaps that may work in the cluster, we assume
 * that we have already failed to find extents that will work.
 */
2963 2964 2965 2966
static noinline int
setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
		     struct btrfs_free_cluster *cluster,
		     struct list_head *bitmaps, u64 offset, u64 bytes,
2967
		     u64 cont1_bytes, u64 min_bytes)
2968
{
2969
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2970
	struct btrfs_free_space *entry = NULL;
2971
	int ret = -ENOSPC;
2972
	u64 bitmap_offset = offset_to_bitmap(ctl, offset);
2973

2974
	if (ctl->total_bitmaps == 0)
2975 2976
		return -ENOSPC;

2977 2978 2979 2980
	/*
	 * The bitmap that covers offset won't be in the list unless offset
	 * is just its start offset.
	 */
2981 2982 2983 2984
	if (!list_empty(bitmaps))
		entry = list_first_entry(bitmaps, struct btrfs_free_space, list);

	if (!entry || entry->offset != bitmap_offset) {
2985 2986 2987 2988 2989
		entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
		if (entry && list_empty(&entry->list))
			list_add(&entry->list, bitmaps);
	}

2990
	list_for_each_entry(entry, bitmaps, list) {
2991
		if (entry->bytes < bytes)
2992 2993
			continue;
		ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2994
					   bytes, cont1_bytes, min_bytes);
2995 2996 2997 2998 2999
		if (!ret)
			return 0;
	}

	/*
3000 3001
	 * The bitmaps list has all the bitmaps that record free space
	 * starting after offset, so no more search is required.
3002
	 */
3003
	return -ENOSPC;
3004 3005
}

3006 3007
/*
 * here we try to find a cluster of blocks in a block group.  The goal
3008
 * is to find at least bytes+empty_size.
3009 3010 3011 3012 3013
 * We might not find them all in one contiguous area.
 *
 * returns zero and sets up cluster if things worked out, otherwise
 * it returns -enospc
 */
3014
int btrfs_find_space_cluster(struct btrfs_root *root,
3015 3016 3017 3018
			     struct btrfs_block_group_cache *block_group,
			     struct btrfs_free_cluster *cluster,
			     u64 offset, u64 bytes, u64 empty_size)
{
3019
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3020
	struct btrfs_free_space *entry, *tmp;
3021
	LIST_HEAD(bitmaps);
3022
	u64 min_bytes;
3023
	u64 cont1_bytes;
3024 3025
	int ret;

3026 3027 3028 3029 3030 3031
	/*
	 * Choose the minimum extent size we'll require for this
	 * cluster.  For SSD_SPREAD, don't allow any fragmentation.
	 * For metadata, allow allocates with smaller extents.  For
	 * data, keep it dense.
	 */
3032
	if (btrfs_test_opt(root->fs_info, SSD_SPREAD)) {
3033
		cont1_bytes = min_bytes = bytes + empty_size;
3034
	} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
3035 3036 3037 3038 3039 3040
		cont1_bytes = bytes;
		min_bytes = block_group->sectorsize;
	} else {
		cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
		min_bytes = block_group->sectorsize;
	}
3041

3042
	spin_lock(&ctl->tree_lock);
3043 3044 3045 3046 3047

	/*
	 * If we know we don't have enough space to make a cluster don't even
	 * bother doing all the work to try and find one.
	 */
3048
	if (ctl->free_space < bytes) {
3049
		spin_unlock(&ctl->tree_lock);
3050 3051 3052
		return -ENOSPC;
	}

3053 3054 3055 3056 3057 3058 3059 3060
	spin_lock(&cluster->lock);

	/* someone already found a cluster, hooray */
	if (cluster->block_group) {
		ret = 0;
		goto out;
	}

J
Josef Bacik 已提交
3061 3062 3063
	trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
				 min_bytes);

3064
	ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
3065 3066
				      bytes + empty_size,
				      cont1_bytes, min_bytes);
3067
	if (ret)
3068
		ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
3069 3070
					   offset, bytes + empty_size,
					   cont1_bytes, min_bytes);
3071 3072 3073 3074

	/* Clear our temporary list */
	list_for_each_entry_safe(entry, tmp, &bitmaps, list)
		list_del_init(&entry->list);
3075

3076 3077 3078 3079 3080
	if (!ret) {
		atomic_inc(&block_group->count);
		list_add_tail(&cluster->block_group_list,
			      &block_group->cluster_list);
		cluster->block_group = block_group;
J
Josef Bacik 已提交
3081 3082
	} else {
		trace_btrfs_failed_cluster_setup(block_group);
3083 3084 3085
	}
out:
	spin_unlock(&cluster->lock);
3086
	spin_unlock(&ctl->tree_lock);
3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097

	return ret;
}

/*
 * simple code to zero out a cluster
 */
void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
{
	spin_lock_init(&cluster->lock);
	spin_lock_init(&cluster->refill_lock);
3098
	cluster->root = RB_ROOT;
3099
	cluster->max_size = 0;
3100
	cluster->fragmented = false;
3101 3102 3103 3104
	INIT_LIST_HEAD(&cluster->block_group_list);
	cluster->block_group = NULL;
}

3105 3106
static int do_trimming(struct btrfs_block_group_cache *block_group,
		       u64 *total_trimmed, u64 start, u64 bytes,
3107 3108
		       u64 reserved_start, u64 reserved_bytes,
		       struct btrfs_trim_range *trim_entry)
3109
{
3110
	struct btrfs_space_info *space_info = block_group->space_info;
3111
	struct btrfs_fs_info *fs_info = block_group->fs_info;
3112
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3113 3114 3115
	int ret;
	int update = 0;
	u64 trimmed = 0;
3116

3117 3118 3119 3120 3121 3122 3123 3124 3125 3126
	spin_lock(&space_info->lock);
	spin_lock(&block_group->lock);
	if (!block_group->ro) {
		block_group->reserved += reserved_bytes;
		space_info->bytes_reserved += reserved_bytes;
		update = 1;
	}
	spin_unlock(&block_group->lock);
	spin_unlock(&space_info->lock);

3127 3128
	ret = btrfs_discard_extent(fs_info->extent_root,
				   start, bytes, &trimmed);
3129 3130 3131
	if (!ret)
		*total_trimmed += trimmed;

3132
	mutex_lock(&ctl->cache_writeout_mutex);
3133
	btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
3134 3135
	list_del(&trim_entry->list);
	mutex_unlock(&ctl->cache_writeout_mutex);
3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160

	if (update) {
		spin_lock(&space_info->lock);
		spin_lock(&block_group->lock);
		if (block_group->ro)
			space_info->bytes_readonly += reserved_bytes;
		block_group->reserved -= reserved_bytes;
		space_info->bytes_reserved -= reserved_bytes;
		spin_unlock(&space_info->lock);
		spin_unlock(&block_group->lock);
	}

	return ret;
}

static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
			  u64 *total_trimmed, u64 start, u64 end, u64 minlen)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct btrfs_free_space *entry;
	struct rb_node *node;
	int ret = 0;
	u64 extent_start;
	u64 extent_bytes;
	u64 bytes;
3161 3162

	while (start < end) {
3163 3164 3165
		struct btrfs_trim_range trim_entry;

		mutex_lock(&ctl->cache_writeout_mutex);
3166
		spin_lock(&ctl->tree_lock);
3167

3168 3169
		if (ctl->free_space < minlen) {
			spin_unlock(&ctl->tree_lock);
3170
			mutex_unlock(&ctl->cache_writeout_mutex);
3171 3172 3173
			break;
		}

3174
		entry = tree_search_offset(ctl, start, 0, 1);
3175
		if (!entry) {
3176
			spin_unlock(&ctl->tree_lock);
3177
			mutex_unlock(&ctl->cache_writeout_mutex);
3178 3179 3180
			break;
		}

3181 3182 3183 3184
		/* skip bitmaps */
		while (entry->bitmap) {
			node = rb_next(&entry->offset_index);
			if (!node) {
3185
				spin_unlock(&ctl->tree_lock);
3186
				mutex_unlock(&ctl->cache_writeout_mutex);
3187
				goto out;
3188
			}
3189 3190
			entry = rb_entry(node, struct btrfs_free_space,
					 offset_index);
3191 3192
		}

3193 3194
		if (entry->offset >= end) {
			spin_unlock(&ctl->tree_lock);
3195
			mutex_unlock(&ctl->cache_writeout_mutex);
3196
			break;
3197 3198
		}

3199 3200 3201 3202 3203 3204
		extent_start = entry->offset;
		extent_bytes = entry->bytes;
		start = max(start, extent_start);
		bytes = min(extent_start + extent_bytes, end) - start;
		if (bytes < minlen) {
			spin_unlock(&ctl->tree_lock);
3205
			mutex_unlock(&ctl->cache_writeout_mutex);
3206
			goto next;
3207 3208
		}

3209 3210 3211
		unlink_free_space(ctl, entry);
		kmem_cache_free(btrfs_free_space_cachep, entry);

3212
		spin_unlock(&ctl->tree_lock);
3213 3214 3215 3216
		trim_entry.start = extent_start;
		trim_entry.bytes = extent_bytes;
		list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
		mutex_unlock(&ctl->cache_writeout_mutex);
3217

3218
		ret = do_trimming(block_group, total_trimmed, start, bytes,
3219
				  extent_start, extent_bytes, &trim_entry);
3220 3221 3222 3223
		if (ret)
			break;
next:
		start += bytes;
3224

3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247
		if (fatal_signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}

		cond_resched();
	}
out:
	return ret;
}

static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
			u64 *total_trimmed, u64 start, u64 end, u64 minlen)
{
	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
	struct btrfs_free_space *entry;
	int ret = 0;
	int ret2;
	u64 bytes;
	u64 offset = offset_to_bitmap(ctl, start);

	while (offset < end) {
		bool next_bitmap = false;
3248
		struct btrfs_trim_range trim_entry;
3249

3250
		mutex_lock(&ctl->cache_writeout_mutex);
3251 3252 3253 3254
		spin_lock(&ctl->tree_lock);

		if (ctl->free_space < minlen) {
			spin_unlock(&ctl->tree_lock);
3255
			mutex_unlock(&ctl->cache_writeout_mutex);
3256 3257 3258 3259 3260 3261
			break;
		}

		entry = tree_search_offset(ctl, offset, 1, 0);
		if (!entry) {
			spin_unlock(&ctl->tree_lock);
3262
			mutex_unlock(&ctl->cache_writeout_mutex);
3263 3264 3265 3266 3267
			next_bitmap = true;
			goto next;
		}

		bytes = minlen;
3268
		ret2 = search_bitmap(ctl, entry, &start, &bytes, false);
3269 3270
		if (ret2 || start >= end) {
			spin_unlock(&ctl->tree_lock);
3271
			mutex_unlock(&ctl->cache_writeout_mutex);
3272 3273 3274 3275 3276 3277 3278
			next_bitmap = true;
			goto next;
		}

		bytes = min(bytes, end - start);
		if (bytes < minlen) {
			spin_unlock(&ctl->tree_lock);
3279
			mutex_unlock(&ctl->cache_writeout_mutex);
3280 3281 3282 3283 3284 3285 3286 3287
			goto next;
		}

		bitmap_clear_bits(ctl, entry, start, bytes);
		if (entry->bytes == 0)
			free_bitmap(ctl, entry);

		spin_unlock(&ctl->tree_lock);
3288 3289 3290 3291
		trim_entry.start = start;
		trim_entry.bytes = bytes;
		list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
		mutex_unlock(&ctl->cache_writeout_mutex);
3292 3293

		ret = do_trimming(block_group, total_trimmed, start, bytes,
3294
				  start, bytes, &trim_entry);
3295 3296 3297 3298 3299 3300 3301 3302 3303
		if (ret)
			break;
next:
		if (next_bitmap) {
			offset += BITS_PER_BITMAP * ctl->unit;
		} else {
			start += bytes;
			if (start >= offset + BITS_PER_BITMAP * ctl->unit)
				offset += BITS_PER_BITMAP * ctl->unit;
3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315
		}

		if (fatal_signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}

		cond_resched();
	}

	return ret;
}
3316

3317
void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache)
3318
{
3319 3320
	atomic_inc(&cache->trimming);
}
3321

3322 3323 3324 3325 3326
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
{
	struct extent_map_tree *em_tree;
	struct extent_map *em;
	bool cleanup;
3327

3328
	spin_lock(&block_group->lock);
3329 3330
	cleanup = (atomic_dec_and_test(&block_group->trimming) &&
		   block_group->removed);
3331 3332
	spin_unlock(&block_group->lock);

3333
	if (cleanup) {
3334
		lock_chunks(block_group->fs_info->chunk_root);
3335 3336 3337 3338 3339
		em_tree = &block_group->fs_info->mapping_tree.map_tree;
		write_lock(&em_tree->lock);
		em = lookup_extent_mapping(em_tree, block_group->key.objectid,
					   1);
		BUG_ON(!em); /* logic error, can't happen */
3340 3341 3342 3343
		/*
		 * remove_extent_mapping() will delete us from the pinned_chunks
		 * list, which is protected by the chunk mutex.
		 */
3344 3345 3346 3347 3348 3349 3350
		remove_extent_mapping(em_tree, em);
		write_unlock(&em_tree->lock);
		unlock_chunks(block_group->fs_info->chunk_root);

		/* once for us and once for the tree */
		free_extent_map(em);
		free_extent_map(em);
3351 3352 3353 3354 3355 3356

		/*
		 * We've left one free space entry and other tasks trimming
		 * this block group have left 1 entry each one. Free them.
		 */
		__btrfs_remove_free_space_cache(block_group->free_space_ctl);
3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368
	}
}

int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
			   u64 *trimmed, u64 start, u64 end, u64 minlen)
{
	int ret;

	*trimmed = 0;

	spin_lock(&block_group->lock);
	if (block_group->removed) {
3369
		spin_unlock(&block_group->lock);
3370
		return 0;
3371
	}
3372 3373 3374 3375 3376 3377
	btrfs_get_block_group_trimming(block_group);
	spin_unlock(&block_group->lock);

	ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
	if (ret)
		goto out;
3378

3379 3380 3381
	ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
out:
	btrfs_put_block_group_trimming(block_group);
3382 3383 3384
	return ret;
}

3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420
/*
 * Find the left-most item in the cache tree, and then return the
 * smallest inode number in the item.
 *
 * Note: the returned inode number may not be the smallest one in
 * the tree, if the left-most item is a bitmap.
 */
u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
{
	struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
	struct btrfs_free_space *entry = NULL;
	u64 ino = 0;

	spin_lock(&ctl->tree_lock);

	if (RB_EMPTY_ROOT(&ctl->free_space_offset))
		goto out;

	entry = rb_entry(rb_first(&ctl->free_space_offset),
			 struct btrfs_free_space, offset_index);

	if (!entry->bitmap) {
		ino = entry->offset;

		unlink_free_space(ctl, entry);
		entry->offset++;
		entry->bytes--;
		if (!entry->bytes)
			kmem_cache_free(btrfs_free_space_cachep, entry);
		else
			link_free_space(ctl, entry);
	} else {
		u64 offset = 0;
		u64 count = 1;
		int ret;

3421
		ret = search_bitmap(ctl, entry, &offset, &count, true);
3422
		/* Logic error; Should be empty if it can't find anything */
3423
		ASSERT(!ret);
3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434

		ino = offset;
		bitmap_clear_bits(ctl, entry, offset, 1);
		if (entry->bytes == 0)
			free_bitmap(ctl, entry);
	}
out:
	spin_unlock(&ctl->tree_lock);

	return ino;
}
3435 3436 3437 3438 3439 3440

struct inode *lookup_free_ino_inode(struct btrfs_root *root,
				    struct btrfs_path *path)
{
	struct inode *inode = NULL;

3441 3442 3443 3444
	spin_lock(&root->ino_cache_lock);
	if (root->ino_cache_inode)
		inode = igrab(root->ino_cache_inode);
	spin_unlock(&root->ino_cache_lock);
3445 3446 3447 3448 3449 3450 3451
	if (inode)
		return inode;

	inode = __lookup_free_space_inode(root, path, 0);
	if (IS_ERR(inode))
		return inode;

3452
	spin_lock(&root->ino_cache_lock);
3453
	if (!btrfs_fs_closing(root->fs_info))
3454 3455
		root->ino_cache_inode = igrab(inode);
	spin_unlock(&root->ino_cache_lock);
3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475

	return inode;
}

int create_free_ino_inode(struct btrfs_root *root,
			  struct btrfs_trans_handle *trans,
			  struct btrfs_path *path)
{
	return __create_free_space_inode(root, trans, path,
					 BTRFS_FREE_INO_OBJECTID, 0);
}

int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
{
	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
	struct btrfs_path *path;
	struct inode *inode;
	int ret = 0;
	u64 root_gen = btrfs_root_generation(&root->root_item);

3476
	if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
C
Chris Mason 已提交
3477 3478
		return 0;

3479 3480 3481 3482
	/*
	 * If we're unmounting then just return, since this does a search on the
	 * normal root and not the commit root and we could deadlock.
	 */
3483
	if (btrfs_fs_closing(fs_info))
3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499
		return 0;

	path = btrfs_alloc_path();
	if (!path)
		return 0;

	inode = lookup_free_ino_inode(root, path);
	if (IS_ERR(inode))
		goto out;

	if (root_gen != BTRFS_I(inode)->generation)
		goto out_put;

	ret = __load_free_space_cache(root, inode, ctl, path, 0);

	if (ret < 0)
3500 3501 3502
		btrfs_err(fs_info,
			"failed to load free ino cache for root %llu",
			root->root_key.objectid);
3503 3504 3505 3506 3507 3508 3509 3510 3511
out_put:
	iput(inode);
out:
	btrfs_free_path(path);
	return ret;
}

int btrfs_write_out_ino_cache(struct btrfs_root *root,
			      struct btrfs_trans_handle *trans,
3512 3513
			      struct btrfs_path *path,
			      struct inode *inode)
3514 3515 3516
{
	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
	int ret;
3517
	struct btrfs_io_ctl io_ctl;
3518
	bool release_metadata = true;
3519

3520
	if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
C
Chris Mason 已提交
3521 3522
		return 0;

C
Chris Mason 已提交
3523
	memset(&io_ctl, 0, sizeof(io_ctl));
3524
	ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl,
C
Chris Mason 已提交
3525
				      trans, path, 0);
3526 3527 3528 3529 3530 3531 3532 3533
	if (!ret) {
		/*
		 * At this point writepages() didn't error out, so our metadata
		 * reservation is released when the writeback finishes, at
		 * inode.c:btrfs_finish_ordered_io(), regardless of it finishing
		 * with or without an error.
		 */
		release_metadata = false;
C
Chris Mason 已提交
3534
		ret = btrfs_wait_cache_io(root, trans, NULL, &io_ctl, path, 0);
3535
	}
C
Chris Mason 已提交
3536

3537
	if (ret) {
3538 3539
		if (release_metadata)
			btrfs_delalloc_release_metadata(inode, inode->i_size);
3540
#ifdef DEBUG
3541 3542 3543
		btrfs_err(root->fs_info,
			"failed to write free ino cache for root %llu",
			root->root_key.objectid);
3544 3545
#endif
	}
3546 3547 3548

	return ret;
}
3549 3550

#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3551 3552 3553 3554 3555 3556 3557 3558
/*
 * Use this if you need to make a bitmap or extent entry specifically, it
 * doesn't do any of the merging that add_free_space does, this acts a lot like
 * how the free space cache loading stuff works, so you can get really weird
 * configurations.
 */
int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
			      u64 offset, u64 bytes, bool bitmap)
3559
{
3560 3561 3562 3563 3564
	struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
	struct btrfs_free_space *info = NULL, *bitmap_info;
	void *map = NULL;
	u64 bytes_added;
	int ret;
3565

3566 3567 3568 3569 3570
again:
	if (!info) {
		info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
		if (!info)
			return -ENOMEM;
3571 3572
	}

3573 3574 3575 3576
	if (!bitmap) {
		spin_lock(&ctl->tree_lock);
		info->offset = offset;
		info->bytes = bytes;
3577
		info->max_extent_size = 0;
3578 3579 3580 3581 3582 3583 3584 3585
		ret = link_free_space(ctl, info);
		spin_unlock(&ctl->tree_lock);
		if (ret)
			kmem_cache_free(btrfs_free_space_cachep, info);
		return ret;
	}

	if (!map) {
3586
		map = kzalloc(PAGE_SIZE, GFP_NOFS);
3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600
		if (!map) {
			kmem_cache_free(btrfs_free_space_cachep, info);
			return -ENOMEM;
		}
	}

	spin_lock(&ctl->tree_lock);
	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
					 1, 0);
	if (!bitmap_info) {
		info->bitmap = map;
		map = NULL;
		add_new_bitmap(ctl, info, offset);
		bitmap_info = info;
3601
		info = NULL;
3602
	}
3603

3604
	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
3605

3606 3607 3608
	bytes -= bytes_added;
	offset += bytes_added;
	spin_unlock(&ctl->tree_lock);
3609

3610 3611
	if (bytes)
		goto again;
3612

3613 3614
	if (info)
		kmem_cache_free(btrfs_free_space_cachep, info);
3615 3616 3617
	if (map)
		kfree(map);
	return 0;
3618 3619 3620 3621 3622 3623 3624
}

/*
 * Checks to see if the given range is in the free space cache.  This is really
 * just used to check the absence of space, so if there is free space in the
 * range at all we will return 1.
 */
3625 3626
int test_check_exists(struct btrfs_block_group_cache *cache,
		      u64 offset, u64 bytes)
3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648
{
	struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
	struct btrfs_free_space *info;
	int ret = 0;

	spin_lock(&ctl->tree_lock);
	info = tree_search_offset(ctl, offset, 0, 0);
	if (!info) {
		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
					  1, 0);
		if (!info)
			goto out;
	}

have_info:
	if (info->bitmap) {
		u64 bit_off, bit_bytes;
		struct rb_node *n;
		struct btrfs_free_space *tmp;

		bit_off = offset;
		bit_bytes = ctl->unit;
3649
		ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false);
3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667
		if (!ret) {
			if (bit_off == offset) {
				ret = 1;
				goto out;
			} else if (bit_off > offset &&
				   offset + bytes > bit_off) {
				ret = 1;
				goto out;
			}
		}

		n = rb_prev(&info->offset_index);
		while (n) {
			tmp = rb_entry(n, struct btrfs_free_space,
				       offset_index);
			if (tmp->offset + tmp->bytes < offset)
				break;
			if (offset + bytes < tmp->offset) {
3668
				n = rb_prev(&tmp->offset_index);
3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681
				continue;
			}
			info = tmp;
			goto have_info;
		}

		n = rb_next(&info->offset_index);
		while (n) {
			tmp = rb_entry(n, struct btrfs_free_space,
				       offset_index);
			if (offset + bytes < tmp->offset)
				break;
			if (tmp->offset + tmp->bytes < offset) {
3682
				n = rb_next(&tmp->offset_index);
3683 3684 3685 3686 3687 3688
				continue;
			}
			info = tmp;
			goto have_info;
		}

3689
		ret = 0;
3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703
		goto out;
	}

	if (info->offset == offset) {
		ret = 1;
		goto out;
	}

	if (offset > info->offset && offset < info->offset + info->bytes)
		ret = 1;
out:
	spin_unlock(&ctl->tree_lock);
	return ret;
}
3704
#endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */