inode-map.c 14.2 KB
Newer Older
C
Chris Mason 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

19 20 21 22
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/pagemap.h>

23 24
#include "ctree.h"
#include "disk-io.h"
25 26
#include "free-space-cache.h"
#include "inode-map.h"
27 28
#include "transaction.h"

29 30 31 32 33 34 35 36 37 38 39 40
static int caching_kthread(void *data)
{
	struct btrfs_root *root = data;
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
	struct btrfs_key key;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	u64 last = (u64)-1;
	int slot;
	int ret;

C
Chris Mason 已提交
41 42 43
	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
		return 0;

44 45 46 47 48 49 50 51 52 53 54 55 56 57
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	/* Since the commit root is read-only, we can safely skip locking. */
	path->skip_locking = 1;
	path->search_commit_root = 1;
	path->reada = 2;

	key.objectid = BTRFS_FIRST_FREE_OBJECTID;
	key.offset = 0;
	key.type = BTRFS_INODE_ITEM_KEY;
again:
	/* need to make sure the commit_root doesn't disappear */
58
	down_read(&fs_info->commit_root_sem);
59 60 61 62 63 64

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		goto out;

	while (1) {
65
		if (btrfs_fs_closing(fs_info))
66 67 68 69
			goto out;

		leaf = path->nodes[0];
		slot = path->slots[0];
70
		if (slot >= btrfs_header_nritems(leaf)) {
71 72 73 74 75 76 77 78 79 80
			ret = btrfs_next_leaf(root, path);
			if (ret < 0)
				goto out;
			else if (ret > 0)
				break;

			if (need_resched() ||
			    btrfs_transaction_in_commit(fs_info)) {
				leaf = path->nodes[0];

81
				if (WARN_ON(btrfs_header_nritems(leaf) == 0))
82 83 84 85 86 87 88
					break;

				/*
				 * Save the key so we can advances forward
				 * in the next search.
				 */
				btrfs_item_key_to_cpu(leaf, &key, 0);
89
				btrfs_release_path(path);
90
				root->ino_cache_progress = last;
91
				up_read(&fs_info->commit_root_sem);
92 93 94 95 96 97 98 99 100 101 102
				schedule_timeout(1);
				goto again;
			} else
				continue;
		}

		btrfs_item_key_to_cpu(leaf, &key, slot);

		if (key.type != BTRFS_INODE_ITEM_KEY)
			goto next;

103
		if (key.objectid >= root->highest_objectid)
104 105 106 107 108
			break;

		if (last != (u64)-1 && last + 1 != key.objectid) {
			__btrfs_add_free_space(ctl, last + 1,
					       key.objectid - last - 1);
109
			wake_up(&root->ino_cache_wait);
110 111 112 113 114 115 116
		}

		last = key.objectid;
next:
		path->slots[0]++;
	}

117
	if (last < root->highest_objectid - 1) {
118
		__btrfs_add_free_space(ctl, last + 1,
119
				       root->highest_objectid - last - 1);
120 121
	}

122 123 124
	spin_lock(&root->ino_cache_lock);
	root->ino_cache_state = BTRFS_CACHE_FINISHED;
	spin_unlock(&root->ino_cache_lock);
125

126
	root->ino_cache_progress = (u64)-1;
127 128
	btrfs_unpin_free_ino(root);
out:
129
	wake_up(&root->ino_cache_wait);
130
	up_read(&fs_info->commit_root_sem);
131 132 133 134 135 136 137 138

	btrfs_free_path(path);

	return ret;
}

static void start_caching(struct btrfs_root *root)
{
139
	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
140
	struct task_struct *tsk;
141
	int ret;
142
	u64 objectid;
143

C
Chris Mason 已提交
144 145 146
	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
		return;

147 148 149
	spin_lock(&root->ino_cache_lock);
	if (root->ino_cache_state != BTRFS_CACHE_NO) {
		spin_unlock(&root->ino_cache_lock);
150 151 152
		return;
	}

153 154
	root->ino_cache_state = BTRFS_CACHE_STARTED;
	spin_unlock(&root->ino_cache_lock);
155

156 157
	ret = load_free_ino_cache(root->fs_info, root);
	if (ret == 1) {
158 159 160
		spin_lock(&root->ino_cache_lock);
		root->ino_cache_state = BTRFS_CACHE_FINISHED;
		spin_unlock(&root->ino_cache_lock);
161 162 163
		return;
	}

164 165 166 167 168 169 170 171 172 173 174 175 176
	/*
	 * It can be quite time-consuming to fill the cache by searching
	 * through the extent tree, and this can keep ino allocation path
	 * waiting. Therefore at start we quickly find out the highest
	 * inode number and we know we can use inode numbers which fall in
	 * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID].
	 */
	ret = btrfs_find_free_objectid(root, &objectid);
	if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
		__btrfs_add_free_space(ctl, objectid,
				       BTRFS_LAST_FREE_OBJECTID - objectid + 1);
	}

177
	tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu",
178
			  root->root_key.objectid);
179 180
	if (IS_ERR(tsk)) {
		btrfs_warn(root->fs_info, "failed to start inode caching task");
181
		btrfs_clear_pending_and_info(root->fs_info, INODE_MAP_CACHE,
182 183
				"disabling inode map caching");
	}
184 185 186 187
}

int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
{
C
Chris Mason 已提交
188 189 190
	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
		return btrfs_find_free_objectid(root, objectid);

191 192 193 194 195 196 197 198
again:
	*objectid = btrfs_find_ino_for_alloc(root);

	if (*objectid != 0)
		return 0;

	start_caching(root);

199 200
	wait_event(root->ino_cache_wait,
		   root->ino_cache_state == BTRFS_CACHE_FINISHED ||
201 202
		   root->free_ino_ctl->free_space > 0);

203
	if (root->ino_cache_state == BTRFS_CACHE_FINISHED &&
204 205 206 207 208 209 210 211 212
	    root->free_ino_ctl->free_space == 0)
		return -ENOSPC;
	else
		goto again;
}

void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
{
	struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
C
Chris Mason 已提交
213 214 215

	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
		return;
216
again:
217
	if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
M
Miao Xie 已提交
218
		__btrfs_add_free_space(pinned, objectid, 1);
219
	} else {
220
		down_write(&root->fs_info->commit_root_sem);
221 222 223
		spin_lock(&root->ino_cache_lock);
		if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
			spin_unlock(&root->ino_cache_lock);
224
			up_write(&root->fs_info->commit_root_sem);
225 226
			goto again;
		}
227
		spin_unlock(&root->ino_cache_lock);
228 229 230

		start_caching(root);

M
Miao Xie 已提交
231
		__btrfs_add_free_space(pinned, objectid, 1);
232

233
		up_write(&root->fs_info->commit_root_sem);
234 235 236 237
	}
}

/*
238 239 240 241
 * When a transaction is committed, we'll move those inode numbers which are
 * smaller than root->ino_cache_progress from pinned tree to free_ino tree, and
 * others will just be dropped, because the commit root we were searching has
 * changed.
242
 *
243
 * Must be called with root->fs_info->commit_root_sem held
244 245 246 247 248 249 250 251 252
 */
void btrfs_unpin_free_ino(struct btrfs_root *root)
{
	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
	struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
	struct btrfs_free_space *info;
	struct rb_node *n;
	u64 count;

C
Chris Mason 已提交
253 254 255
	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
		return;

256 257 258 259 260 261
	while (1) {
		n = rb_first(rbroot);
		if (!n)
			break;

		info = rb_entry(n, struct btrfs_free_space, offset_index);
262
		BUG_ON(info->bitmap); /* Logic error */
263

264
		if (info->offset > root->ino_cache_progress)
265
			goto free;
266 267
		else if (info->offset + info->bytes > root->ino_cache_progress)
			count = root->ino_cache_progress - info->offset + 1;
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
		else
			count = info->bytes;

		__btrfs_add_free_space(ctl, info->offset, count);
free:
		rb_erase(&info->offset_index, rbroot);
		kfree(info);
	}
}

#define INIT_THRESHOLD	(((1024 * 32) / 2) / sizeof(struct btrfs_free_space))
#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)

/*
 * The goal is to keep the memory used by the free_ino tree won't
 * exceed the memory if we use bitmaps only.
 */
static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
{
	struct btrfs_free_space *info;
	struct rb_node *n;
	int max_ino;
	int max_bitmaps;

	n = rb_last(&ctl->free_space_offset);
	if (!n) {
		ctl->extents_thresh = INIT_THRESHOLD;
		return;
	}
	info = rb_entry(n, struct btrfs_free_space, offset_index);

	/*
	 * Find the maximum inode number in the filesystem. Note we
	 * ignore the fact that this can be a bitmap, because we are
	 * not doing precise calculation.
	 */
	max_ino = info->bytes - 1;

	max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP;
	if (max_bitmaps <= ctl->total_bitmaps) {
		ctl->extents_thresh = 0;
		return;
	}

	ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
				PAGE_CACHE_SIZE / sizeof(*info);
}

/*
 * We don't fall back to bitmap, if we are below the extents threshold
 * or this chunk of inode numbers is a big one.
 */
static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
		       struct btrfs_free_space *info)
{
	if (ctl->free_extents < ctl->extents_thresh ||
	    info->bytes > INODES_PER_BITMAP / 10)
		return false;

	return true;
}

static struct btrfs_free_space_op free_ino_op = {
	.recalc_thresholds	= recalculate_thresholds,
	.use_bitmap		= use_bitmap,
};

static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl)
{
}

static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
			      struct btrfs_free_space *info)
{
	/*
	 * We always use extents for two reasons:
	 *
	 * - The pinned tree is only used during the process of caching
	 *   work.
	 * - Make code simpler. See btrfs_unpin_free_ino().
	 */
	return false;
}

static struct btrfs_free_space_op pinned_free_ino_op = {
	.recalc_thresholds	= pinned_recalc_thresholds,
	.use_bitmap		= pinned_use_bitmap,
};

void btrfs_init_free_ino_ctl(struct btrfs_root *root)
{
	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
	struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;

	spin_lock_init(&ctl->tree_lock);
	ctl->unit = 1;
	ctl->start = 0;
	ctl->private = NULL;
	ctl->op = &free_ino_op;
367 368
	INIT_LIST_HEAD(&ctl->trimming_ranges);
	mutex_init(&ctl->cache_writeout_mutex);
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384

	/*
	 * Initially we allow to use 16K of ram to cache chunks of
	 * inode numbers before we resort to bitmaps. This is somewhat
	 * arbitrary, but it will be adjusted in runtime.
	 */
	ctl->extents_thresh = INIT_THRESHOLD;

	spin_lock_init(&pinned->tree_lock);
	pinned->unit = 1;
	pinned->start = 0;
	pinned->private = NULL;
	pinned->extents_thresh = 0;
	pinned->op = &pinned_free_ino_op;
}

385 386 387 388 389 390
int btrfs_save_ino_cache(struct btrfs_root *root,
			 struct btrfs_trans_handle *trans)
{
	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
	struct btrfs_path *path;
	struct inode *inode;
391 392
	struct btrfs_block_rsv *rsv;
	u64 num_bytes;
393 394 395 396 397
	u64 alloc_hint = 0;
	int ret;
	int prealloc;
	bool retry = false;

398 399 400 401 402 403
	/* only fs tree and subvol/snap needs ino cache */
	if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID &&
	    (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID ||
	     root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID))
		return 0;

404
	/* Don't save inode cache if we are deleting this root */
405
	if (btrfs_root_refs(&root->root_item) == 0)
406 407
		return 0;

C
Chris Mason 已提交
408 409 410
	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
		return 0;

411 412 413
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
C
Chris Mason 已提交
414

415 416 417 418 419 420
	rsv = trans->block_rsv;
	trans->block_rsv = &root->fs_info->trans_block_rsv;

	num_bytes = trans->bytes_reserved;
	/*
	 * 1 item for inode item insertion if need
421 422
	 * 4 items for inode item update (in the worst case)
	 * 1 items for slack space if we need do truncation
423 424 425
	 * 1 item for free space object
	 * 3 items for pre-allocation
	 */
426
	trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 10);
M
Miao Xie 已提交
427 428 429
	ret = btrfs_block_rsv_add(root, trans->block_rsv,
				  trans->bytes_reserved,
				  BTRFS_RESERVE_NO_FLUSH);
430 431
	if (ret)
		goto out;
432
	trace_btrfs_space_reservation(root->fs_info, "ino_cache",
433
				      trans->transid, trans->bytes_reserved, 1);
434 435
again:
	inode = lookup_free_ino_inode(root, path);
436
	if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) {
437
		ret = PTR_ERR(inode);
438
		goto out_release;
439 440 441
	}

	if (IS_ERR(inode)) {
442
		BUG_ON(retry); /* Logic error */
443 444 445 446
		retry = true;

		ret = create_free_ino_inode(root, trans, path);
		if (ret)
447
			goto out_release;
448 449 450 451 452
		goto again;
	}

	BTRFS_I(inode)->generation = 0;
	ret = btrfs_update_inode(trans, root, inode);
453 454 455 456
	if (ret) {
		btrfs_abort_transaction(trans, root, ret);
		goto out_put;
	}
457 458

	if (i_size_read(inode) > 0) {
459
		ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
460
		if (ret) {
461 462
			if (ret != -ENOSPC)
				btrfs_abort_transaction(trans, root, ret);
463
			goto out_put;
464
		}
465 466
	}

467 468
	spin_lock(&root->ino_cache_lock);
	if (root->ino_cache_state != BTRFS_CACHE_FINISHED) {
469
		ret = -1;
470
		spin_unlock(&root->ino_cache_lock);
471 472
		goto out_put;
	}
473
	spin_unlock(&root->ino_cache_lock);
474 475 476 477 478 479 480 481 482 483

	spin_lock(&ctl->tree_lock);
	prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
	prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE);
	prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE;
	spin_unlock(&ctl->tree_lock);

	/* Just to make sure we have enough space */
	prealloc += 8 * PAGE_CACHE_SIZE;

484
	ret = btrfs_delalloc_reserve_space(inode, prealloc);
485 486 487 488 489
	if (ret)
		goto out_put;

	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
					      prealloc, prealloc, &alloc_hint);
490 491
	if (ret) {
		btrfs_delalloc_release_space(inode, prealloc);
492
		goto out_put;
493
	}
494 495
	btrfs_free_reserved_data_space(inode, prealloc);

496
	ret = btrfs_write_out_ino_cache(root, trans, path, inode);
497 498
out_put:
	iput(inode);
499
out_release:
500
	trace_btrfs_space_reservation(root->fs_info, "ino_cache",
501
				      trans->transid, trans->bytes_reserved, 0);
502
	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
503
out:
504 505
	trans->block_rsv = rsv;
	trans->bytes_reserved = num_bytes;
506 507 508 509 510

	btrfs_free_path(path);
	return ret;
}

511
static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
C
Chris Mason 已提交
512 513 514
{
	struct btrfs_path *path;
	int ret;
515
	struct extent_buffer *l;
C
Chris Mason 已提交
516
	struct btrfs_key search_key;
517
	struct btrfs_key found_key;
C
Chris Mason 已提交
518 519 520
	int slot;

	path = btrfs_alloc_path();
T
Tsutomu Itoh 已提交
521 522
	if (!path)
		return -ENOMEM;
C
Chris Mason 已提交
523

524 525
	search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
	search_key.type = -1;
C
Chris Mason 已提交
526 527 528 529
	search_key.offset = (u64)-1;
	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
	if (ret < 0)
		goto error;
530
	BUG_ON(ret == 0); /* Corruption */
C
Chris Mason 已提交
531 532
	if (path->slots[0] > 0) {
		slot = path->slots[0] - 1;
533 534
		l = path->nodes[0];
		btrfs_item_key_to_cpu(l, &found_key, slot);
535 536
		*objectid = max_t(u64, found_key.objectid,
				  BTRFS_FIRST_FREE_OBJECTID - 1);
C
Chris Mason 已提交
537
	} else {
538
		*objectid = BTRFS_FIRST_FREE_OBJECTID - 1;
C
Chris Mason 已提交
539 540 541 542 543 544 545
	}
	ret = 0;
error:
	btrfs_free_path(path);
	return ret;
}

546
int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
547 548
{
	int ret;
549
	mutex_lock(&root->objectid_mutex);
550

551
	if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
552 553
		ret = btrfs_find_highest_objectid(root,
						  &root->highest_objectid);
554 555 556
		if (ret)
			goto out;
	}
557

558 559 560
	if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
		ret = -ENOSPC;
		goto out;
561
	}
562 563 564 565

	*objectid = ++root->highest_objectid;
	ret = 0;
out:
566
	mutex_unlock(&root->objectid_mutex);
567 568
	return ret;
}