ctree.c 149.4 KB
Newer Older
C
Chris Mason 已提交
1
/*
C
Chris Mason 已提交
2
 * Copyright (C) 2007,2008 Oracle.  All rights reserved.
C
Chris Mason 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

19
#include <linux/sched.h>
20
#include <linux/slab.h>
21
#include <linux/rbtree.h>
22 23
#include "ctree.h"
#include "disk-io.h"
24
#include "transaction.h"
25
#include "print-tree.h"
26
#include "locking.h"
27

28 29 30
static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
		      *root, struct btrfs_path *path, int level);
static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31
		      *root, struct btrfs_key *ins_key,
32
		      struct btrfs_path *path, int data_size, int extend);
33 34
static int push_node_left(struct btrfs_trans_handle *trans,
			  struct btrfs_root *root, struct extent_buffer *dst,
35
			  struct extent_buffer *src, int empty);
36 37 38 39
static int balance_node_right(struct btrfs_trans_handle *trans,
			      struct btrfs_root *root,
			      struct extent_buffer *dst_buf,
			      struct extent_buffer *src_buf);
40
static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
41 42
		    struct btrfs_path *path, int level, int slot,
		    int tree_mod_log);
43 44 45 46 47 48 49 50
static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
				 struct extent_buffer *eb);
struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
					  u32 blocksize, u64 parent_transid,
					  u64 time_seq);
struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
						u64 bytenr, u32 blocksize,
						u64 time_seq);
51

C
Chris Mason 已提交
52
struct btrfs_path *btrfs_alloc_path(void)
C
Chris Mason 已提交
53
{
C
Chris Mason 已提交
54
	struct btrfs_path *path;
J
Jeff Mahoney 已提交
55
	path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
C
Chris Mason 已提交
56
	return path;
C
Chris Mason 已提交
57 58
}

59 60 61 62 63 64 65 66
/*
 * set all locked nodes in the path to blocking locks.  This should
 * be done before scheduling
 */
noinline void btrfs_set_path_blocking(struct btrfs_path *p)
{
	int i;
	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
67 68 69 70 71 72 73
		if (!p->nodes[i] || !p->locks[i])
			continue;
		btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
		if (p->locks[i] == BTRFS_READ_LOCK)
			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
		else if (p->locks[i] == BTRFS_WRITE_LOCK)
			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
74 75 76 77 78
	}
}

/*
 * reset all the locked nodes in the patch to spinning locks.
79 80 81 82 83
 *
 * held is used to keep lockdep happy, when lockdep is enabled
 * we set held to a blocking lock before we go around and
 * retake all the spinlocks in the path.  You can safely use NULL
 * for held
84
 */
85
noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
86
					struct extent_buffer *held, int held_rw)
87 88
{
	int i;
89 90 91 92 93 94 95 96

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/* lockdep really cares that we take all of these spinlocks
	 * in the right order.  If any of the locks in the path are not
	 * currently blocking, it is going to complain.  So, make really
	 * really sure by forcing the path to blocking before we clear
	 * the path blocking.
	 */
97 98 99 100 101 102 103
	if (held) {
		btrfs_set_lock_blocking_rw(held, held_rw);
		if (held_rw == BTRFS_WRITE_LOCK)
			held_rw = BTRFS_WRITE_LOCK_BLOCKING;
		else if (held_rw == BTRFS_READ_LOCK)
			held_rw = BTRFS_READ_LOCK_BLOCKING;
	}
104 105 106 107
	btrfs_set_path_blocking(p);
#endif

	for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
108 109 110 111 112 113 114
		if (p->nodes[i] && p->locks[i]) {
			btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
			if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
				p->locks[i] = BTRFS_WRITE_LOCK;
			else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
				p->locks[i] = BTRFS_READ_LOCK;
		}
115
	}
116 117 118

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	if (held)
119
		btrfs_clear_lock_blocking_rw(held, held_rw);
120
#endif
121 122
}

C
Chris Mason 已提交
123
/* this also releases the path */
C
Chris Mason 已提交
124
void btrfs_free_path(struct btrfs_path *p)
125
{
126 127
	if (!p)
		return;
128
	btrfs_release_path(p);
C
Chris Mason 已提交
129
	kmem_cache_free(btrfs_path_cachep, p);
130 131
}

C
Chris Mason 已提交
132 133 134 135 136 137
/*
 * path release drops references on the extent buffers in the path
 * and it drops any locks held by this path
 *
 * It is safe to call this on paths that no locks or extent buffers held.
 */
138
noinline void btrfs_release_path(struct btrfs_path *p)
139 140
{
	int i;
141

C
Chris Mason 已提交
142
	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
143
		p->slots[i] = 0;
144
		if (!p->nodes[i])
145 146
			continue;
		if (p->locks[i]) {
147
			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
148 149
			p->locks[i] = 0;
		}
150
		free_extent_buffer(p->nodes[i]);
151
		p->nodes[i] = NULL;
152 153 154
	}
}

C
Chris Mason 已提交
155 156 157 158 159 160 161 162 163 164
/*
 * safely gets a reference on the root node of a tree.  A lock
 * is not taken, so a concurrent writer may put a different node
 * at the root of the tree.  See btrfs_lock_root_node for the
 * looping required.
 *
 * The extent buffer returned by this has a reference taken, so
 * it won't disappear.  It may stop being the root of the tree
 * at any time because there are no locks held.
 */
165 166 167
struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
{
	struct extent_buffer *eb;
168

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
	while (1) {
		rcu_read_lock();
		eb = rcu_dereference(root->node);

		/*
		 * RCU really hurts here, we could free up the root node because
		 * it was cow'ed but we may not get the new root node yet so do
		 * the inc_not_zero dance and if it doesn't work then
		 * synchronize_rcu and try again.
		 */
		if (atomic_inc_not_zero(&eb->refs)) {
			rcu_read_unlock();
			break;
		}
		rcu_read_unlock();
		synchronize_rcu();
	}
186 187 188
	return eb;
}

C
Chris Mason 已提交
189 190 191 192
/* loop around taking references on and locking the root node of the
 * tree until you end up with a lock on the root.  A locked buffer
 * is returned, with a reference held.
 */
193 194 195 196
struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
{
	struct extent_buffer *eb;

C
Chris Mason 已提交
197
	while (1) {
198 199
		eb = btrfs_root_node(root);
		btrfs_tree_lock(eb);
200
		if (eb == root->node)
201 202 203 204 205 206 207
			break;
		btrfs_tree_unlock(eb);
		free_extent_buffer(eb);
	}
	return eb;
}

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
/* loop around taking references on and locking the root node of the
 * tree until you end up with a lock on the root.  A locked buffer
 * is returned, with a reference held.
 */
struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
{
	struct extent_buffer *eb;

	while (1) {
		eb = btrfs_root_node(root);
		btrfs_tree_read_lock(eb);
		if (eb == root->node)
			break;
		btrfs_tree_read_unlock(eb);
		free_extent_buffer(eb);
	}
	return eb;
}

C
Chris Mason 已提交
227 228 229 230
/* cowonly root (everything not a reference counted cow subvolume), just get
 * put onto a simple dirty list.  transaction.c walks this to make sure they
 * get properly updated on disk.
 */
231 232
static void add_root_to_dirty_list(struct btrfs_root *root)
{
233
	spin_lock(&root->fs_info->trans_lock);
234 235 236 237
	if (root->track_dirty && list_empty(&root->dirty_list)) {
		list_add(&root->dirty_list,
			 &root->fs_info->dirty_cowonly_roots);
	}
238
	spin_unlock(&root->fs_info->trans_lock);
239 240
}

C
Chris Mason 已提交
241 242 243 244 245
/*
 * used by snapshot creation to make a copy of a root for a tree with
 * a given objectid.  The buffer with the new root node is returned in
 * cow_ret, and this func returns zero on success or a negative error code.
 */
246 247 248 249 250 251 252 253
int btrfs_copy_root(struct btrfs_trans_handle *trans,
		      struct btrfs_root *root,
		      struct extent_buffer *buf,
		      struct extent_buffer **cow_ret, u64 new_root_objectid)
{
	struct extent_buffer *cow;
	int ret = 0;
	int level;
254
	struct btrfs_disk_key disk_key;
255 256 257 258 259 260

	WARN_ON(root->ref_cows && trans->transid !=
		root->fs_info->running_transaction->transid);
	WARN_ON(root->ref_cows && trans->transid != root->last_trans);

	level = btrfs_header_level(buf);
261 262 263 264
	if (level == 0)
		btrfs_item_key(buf, &disk_key, 0);
	else
		btrfs_node_key(buf, &disk_key, 0);
Z
Zheng Yan 已提交
265

266 267
	cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
				     new_root_objectid, &disk_key, level,
268
				     buf->start, 0);
269
	if (IS_ERR(cow))
270 271 272 273 274
		return PTR_ERR(cow);

	copy_extent_buffer(cow, buf, 0, 0, cow->len);
	btrfs_set_header_bytenr(cow, cow->start);
	btrfs_set_header_generation(cow, trans->transid);
275 276 277 278 279 280 281
	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
				     BTRFS_HEADER_FLAG_RELOC);
	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
	else
		btrfs_set_header_owner(cow, new_root_objectid);
282

Y
Yan Zheng 已提交
283 284 285 286
	write_extent_buffer(cow, root->fs_info->fsid,
			    (unsigned long)btrfs_header_fsid(cow),
			    BTRFS_FSID_SIZE);

287
	WARN_ON(btrfs_header_generation(buf) > trans->transid);
288
	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
A
Arne Jansen 已提交
289
		ret = btrfs_inc_ref(trans, root, cow, 1, 1);
290
	else
A
Arne Jansen 已提交
291
		ret = btrfs_inc_ref(trans, root, cow, 0, 1);
292

293 294 295 296 297 298 299 300
	if (ret)
		return ret;

	btrfs_mark_buffer_dirty(cow);
	*cow_ret = cow;
	return 0;
}

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
enum mod_log_op {
	MOD_LOG_KEY_REPLACE,
	MOD_LOG_KEY_ADD,
	MOD_LOG_KEY_REMOVE,
	MOD_LOG_KEY_REMOVE_WHILE_FREEING,
	MOD_LOG_KEY_REMOVE_WHILE_MOVING,
	MOD_LOG_MOVE_KEYS,
	MOD_LOG_ROOT_REPLACE,
};

struct tree_mod_move {
	int dst_slot;
	int nr_items;
};

struct tree_mod_root {
	u64 logical;
	u8 level;
};

struct tree_mod_elem {
	struct rb_node node;
	u64 index;		/* shifted logical */
324
	u64 seq;
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
	enum mod_log_op op;

	/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
	int slot;

	/* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
	u64 generation;

	/* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
	struct btrfs_disk_key key;
	u64 blockptr;

	/* this is used for op == MOD_LOG_MOVE_KEYS */
	struct tree_mod_move move;

	/* this is used for op == MOD_LOG_ROOT_REPLACE */
	struct tree_mod_root old_root;
};

344
static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
345
{
346
	read_lock(&fs_info->tree_mod_log_lock);
347 348
}

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
{
	read_unlock(&fs_info->tree_mod_log_lock);
}

static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
{
	write_lock(&fs_info->tree_mod_log_lock);
}

static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
{
	write_unlock(&fs_info->tree_mod_log_lock);
}

/*
 * This adds a new blocker to the tree mod log's blocker list if the @elem
 * passed does not already have a sequence number set. So when a caller expects
 * to record tree modifications, it should ensure to set elem->seq to zero
 * before calling btrfs_get_tree_mod_seq.
 * Returns a fresh, unused tree log modification sequence number, even if no new
 * blocker was added.
 */
u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
			   struct seq_list *elem)
374
{
375 376 377
	u64 seq;

	tree_mod_log_write_lock(fs_info);
378
	spin_lock(&fs_info->tree_mod_seq_lock);
379 380 381 382 383
	if (!elem->seq) {
		elem->seq = btrfs_inc_tree_mod_seq(fs_info);
		list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
	}
	seq = btrfs_inc_tree_mod_seq(fs_info);
384
	spin_unlock(&fs_info->tree_mod_seq_lock);
385 386 387
	tree_mod_log_write_unlock(fs_info);

	return seq;
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
}

void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
			    struct seq_list *elem)
{
	struct rb_root *tm_root;
	struct rb_node *node;
	struct rb_node *next;
	struct seq_list *cur_elem;
	struct tree_mod_elem *tm;
	u64 min_seq = (u64)-1;
	u64 seq_putting = elem->seq;

	if (!seq_putting)
		return;

	spin_lock(&fs_info->tree_mod_seq_lock);
	list_del(&elem->list);
406
	elem->seq = 0;
407 408

	list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
409
		if (cur_elem->seq < min_seq) {
410 411 412 413 414
			if (seq_putting > cur_elem->seq) {
				/*
				 * blocker with lower sequence number exists, we
				 * cannot remove anything from the log
				 */
415 416
				spin_unlock(&fs_info->tree_mod_seq_lock);
				return;
417 418 419 420
			}
			min_seq = cur_elem->seq;
		}
	}
421 422
	spin_unlock(&fs_info->tree_mod_seq_lock);

423 424 425 426
	/*
	 * anything that's lower than the lowest existing (read: blocked)
	 * sequence number can be removed from the tree.
	 */
427
	tree_mod_log_write_lock(fs_info);
428 429 430 431
	tm_root = &fs_info->tree_mod_log;
	for (node = rb_first(tm_root); node; node = next) {
		next = rb_next(node);
		tm = container_of(node, struct tree_mod_elem, node);
432
		if (tm->seq > min_seq)
433 434 435 436
			continue;
		rb_erase(node, tm_root);
		kfree(tm);
	}
437
	tree_mod_log_write_unlock(fs_info);
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
}

/*
 * key order of the log:
 *       index -> sequence
 *
 * the index is the shifted logical of the *new* root node for root replace
 * operations, or the shifted logical of the affected block for all other
 * operations.
 */
static noinline int
__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
{
	struct rb_root *tm_root;
	struct rb_node **new;
	struct rb_node *parent = NULL;
	struct tree_mod_elem *cur;

456
	BUG_ON(!tm || !tm->seq);
457 458 459 460 461 462 463 464 465 466

	tm_root = &fs_info->tree_mod_log;
	new = &tm_root->rb_node;
	while (*new) {
		cur = container_of(*new, struct tree_mod_elem, node);
		parent = *new;
		if (cur->index < tm->index)
			new = &((*new)->rb_left);
		else if (cur->index > tm->index)
			new = &((*new)->rb_right);
467
		else if (cur->seq < tm->seq)
468
			new = &((*new)->rb_left);
469
		else if (cur->seq > tm->seq)
470 471 472
			new = &((*new)->rb_right);
		else {
			kfree(tm);
473
			return -EEXIST;
474 475 476 477 478
		}
	}

	rb_link_node(&tm->node, parent, new);
	rb_insert_color(&tm->node, tm_root);
479
	return 0;
480 481
}

482 483 484 485 486 487
/*
 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
 * returns zero with the tree_mod_log_lock acquired. The caller must hold
 * this until all tree mod log insertions are recorded in the rb tree and then
 * call tree_mod_log_write_unlock() to release.
 */
488 489 490 491 492
static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
				    struct extent_buffer *eb) {
	smp_mb();
	if (list_empty(&(fs_info)->tree_mod_seq_list))
		return 1;
493 494 495 496 497 498 499 500 501 502
	if (eb && btrfs_header_level(eb) == 0)
		return 1;

	tree_mod_log_write_lock(fs_info);
	if (list_empty(&fs_info->tree_mod_seq_list)) {
		/*
		 * someone emptied the list while we were waiting for the lock.
		 * we must not add to the list when no blocker exists.
		 */
		tree_mod_log_write_unlock(fs_info);
503
		return 1;
504 505
	}

506 507 508
	return 0;
}

509
/*
510
 * This allocates memory and gets a tree modification sequence number.
511
 *
512 513
 * Returns <0 on error.
 * Returns >0 (the added sequence number) on success.
514
 */
515 516
static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
				 struct tree_mod_elem **tm_ret)
517 518 519
{
	struct tree_mod_elem *tm;

520 521 522 523 524
	/*
	 * once we switch from spin locks to something different, we should
	 * honor the flags parameter here.
	 */
	tm = *tm_ret = kzalloc(sizeof(*tm), GFP_ATOMIC);
525 526 527
	if (!tm)
		return -ENOMEM;

528 529
	tm->seq = btrfs_inc_tree_mod_seq(fs_info);
	return tm->seq;
530 531
}

532 533 534 535
static inline int
__tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
			  struct extent_buffer *eb, int slot,
			  enum mod_log_op op, gfp_t flags)
536 537
{
	int ret;
538
	struct tree_mod_elem *tm;
539 540

	ret = tree_mod_alloc(fs_info, flags, &tm);
541
	if (ret < 0)
542 543 544 545 546 547 548 549 550 551 552
		return ret;

	tm->index = eb->start >> PAGE_CACHE_SHIFT;
	if (op != MOD_LOG_KEY_ADD) {
		btrfs_node_key(eb, &tm->key, slot);
		tm->blockptr = btrfs_node_blockptr(eb, slot);
	}
	tm->op = op;
	tm->slot = slot;
	tm->generation = btrfs_node_ptr_generation(eb, slot);

553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
	return __tree_mod_log_insert(fs_info, tm);
}

static noinline int
tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
			     struct extent_buffer *eb, int slot,
			     enum mod_log_op op, gfp_t flags)
{
	int ret;

	if (tree_mod_dont_log(fs_info, eb))
		return 0;

	ret = __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);

	tree_mod_log_write_unlock(fs_info);
569
	return ret;
570 571 572 573 574 575 576 577 578
}

static noinline int
tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
			int slot, enum mod_log_op op)
{
	return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
}

579 580 581 582 583 584 585 586
static noinline int
tree_mod_log_insert_key_locked(struct btrfs_fs_info *fs_info,
			     struct extent_buffer *eb, int slot,
			     enum mod_log_op op)
{
	return __tree_mod_log_insert_key(fs_info, eb, slot, op, GFP_NOFS);
}

587 588 589 590 591 592 593 594 595
static noinline int
tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
			 struct extent_buffer *eb, int dst_slot, int src_slot,
			 int nr_items, gfp_t flags)
{
	struct tree_mod_elem *tm;
	int ret;
	int i;

J
Jan Schmidt 已提交
596 597
	if (tree_mod_dont_log(fs_info, eb))
		return 0;
598 599

	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
600
		ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot,
601 602 603 604
					      MOD_LOG_KEY_REMOVE_WHILE_MOVING);
		BUG_ON(ret < 0);
	}

J
Jan Schmidt 已提交
605
	ret = tree_mod_alloc(fs_info, flags, &tm);
606 607
	if (ret < 0)
		goto out;
J
Jan Schmidt 已提交
608

609 610 611 612 613 614
	tm->index = eb->start >> PAGE_CACHE_SHIFT;
	tm->slot = src_slot;
	tm->move.dst_slot = dst_slot;
	tm->move.nr_items = nr_items;
	tm->op = MOD_LOG_MOVE_KEYS;

615
	ret = __tree_mod_log_insert(fs_info, tm);
616 617
out:
	tree_mod_log_write_unlock(fs_info);
618
	return ret;
619 620
}

621 622 623 624 625 626 627
static inline void
__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
{
	int i;
	u32 nritems;
	int ret;

628 629 630
	if (btrfs_header_level(eb) == 0)
		return;

631 632 633 634 635 636 637 638
	nritems = btrfs_header_nritems(eb);
	for (i = nritems - 1; i >= 0; i--) {
		ret = tree_mod_log_insert_key_locked(fs_info, eb, i,
					      MOD_LOG_KEY_REMOVE_WHILE_FREEING);
		BUG_ON(ret < 0);
	}
}

639 640 641 642 643 644 645 646
static noinline int
tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
			 struct extent_buffer *old_root,
			 struct extent_buffer *new_root, gfp_t flags)
{
	struct tree_mod_elem *tm;
	int ret;

647 648 649 650 651
	if (tree_mod_dont_log(fs_info, NULL))
		return 0;

	__tree_mod_log_free_eb(fs_info, old_root);

652
	ret = tree_mod_alloc(fs_info, flags, &tm);
653 654
	if (ret < 0)
		goto out;
655 656 657 658 659 660 661

	tm->index = new_root->start >> PAGE_CACHE_SHIFT;
	tm->old_root.logical = old_root->start;
	tm->old_root.level = btrfs_header_level(old_root);
	tm->generation = btrfs_header_generation(old_root);
	tm->op = MOD_LOG_ROOT_REPLACE;

662
	ret = __tree_mod_log_insert(fs_info, tm);
663 664
out:
	tree_mod_log_write_unlock(fs_info);
665
	return ret;
666 667 668 669 670 671 672 673 674 675 676 677
}

static struct tree_mod_elem *
__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
		      int smallest)
{
	struct rb_root *tm_root;
	struct rb_node *node;
	struct tree_mod_elem *cur = NULL;
	struct tree_mod_elem *found = NULL;
	u64 index = start >> PAGE_CACHE_SHIFT;

678
	tree_mod_log_read_lock(fs_info);
679 680 681 682 683 684 685 686
	tm_root = &fs_info->tree_mod_log;
	node = tm_root->rb_node;
	while (node) {
		cur = container_of(node, struct tree_mod_elem, node);
		if (cur->index < index) {
			node = node->rb_left;
		} else if (cur->index > index) {
			node = node->rb_right;
687
		} else if (cur->seq < min_seq) {
688 689 690 691
			node = node->rb_left;
		} else if (!smallest) {
			/* we want the node with the highest seq */
			if (found)
692
				BUG_ON(found->seq > cur->seq);
693 694
			found = cur;
			node = node->rb_left;
695
		} else if (cur->seq > min_seq) {
696 697
			/* we want the node with the smallest seq */
			if (found)
698
				BUG_ON(found->seq < cur->seq);
699 700 701 702 703 704 705
			found = cur;
			node = node->rb_right;
		} else {
			found = cur;
			break;
		}
	}
706
	tree_mod_log_read_unlock(fs_info);
707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733

	return found;
}

/*
 * this returns the element from the log with the smallest time sequence
 * value that's in the log (the oldest log item). any element with a time
 * sequence lower than min_seq will be ignored.
 */
static struct tree_mod_elem *
tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
			   u64 min_seq)
{
	return __tree_mod_log_search(fs_info, start, min_seq, 1);
}

/*
 * this returns the element from the log with the largest time sequence
 * value that's in the log (the most recent log item). any element with
 * a time sequence lower than min_seq will be ignored.
 */
static struct tree_mod_elem *
tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
{
	return __tree_mod_log_search(fs_info, start, min_seq, 0);
}

734
static noinline void
735 736 737 738 739 740 741
tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
		     struct extent_buffer *src, unsigned long dst_offset,
		     unsigned long src_offset, int nr_items)
{
	int ret;
	int i;

742
	if (tree_mod_dont_log(fs_info, NULL))
743 744
		return;

745 746
	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) {
		tree_mod_log_write_unlock(fs_info);
747
		return;
748
	}
749 750

	for (i = 0; i < nr_items; i++) {
751 752 753
		ret = tree_mod_log_insert_key_locked(fs_info, src,
						     i + src_offset,
						     MOD_LOG_KEY_REMOVE);
754
		BUG_ON(ret < 0);
755 756 757
		ret = tree_mod_log_insert_key_locked(fs_info, dst,
						     i + dst_offset,
						     MOD_LOG_KEY_ADD);
758 759
		BUG_ON(ret < 0);
	}
760 761

	tree_mod_log_write_unlock(fs_info);
762 763 764 765 766 767 768 769 770 771 772 773
}

static inline void
tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
		     int dst_offset, int src_offset, int nr_items)
{
	int ret;
	ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
				       nr_items, GFP_NOFS);
	BUG_ON(ret < 0);
}

774
static noinline void
775 776 777 778 779 780 781 782 783 784 785 786
tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
			  struct extent_buffer *eb,
			  struct btrfs_disk_key *disk_key, int slot, int atomic)
{
	int ret;

	ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
					   MOD_LOG_KEY_REPLACE,
					   atomic ? GFP_ATOMIC : GFP_NOFS);
	BUG_ON(ret < 0);
}

787 788
static noinline void
tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
789
{
790
	if (tree_mod_dont_log(fs_info, eb))
791 792
		return;

793 794 795
	__tree_mod_log_free_eb(fs_info, eb);

	tree_mod_log_write_unlock(fs_info);
796 797
}

798
static noinline void
799 800 801 802 803 804 805 806 807
tree_mod_log_set_root_pointer(struct btrfs_root *root,
			      struct extent_buffer *new_root_node)
{
	int ret;
	ret = tree_mod_log_insert_root(root->fs_info, root->node,
				       new_root_node, GFP_NOFS);
	BUG_ON(ret < 0);
}

808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836
/*
 * check if the tree block can be shared by multiple trees
 */
int btrfs_block_can_be_shared(struct btrfs_root *root,
			      struct extent_buffer *buf)
{
	/*
	 * Tree blocks not in refernece counted trees and tree roots
	 * are never shared. If a block was allocated after the last
	 * snapshot and the block was not allocated by tree relocation,
	 * we know the block is not shared.
	 */
	if (root->ref_cows &&
	    buf != root->node && buf != root->commit_root &&
	    (btrfs_header_generation(buf) <=
	     btrfs_root_last_snapshot(&root->root_item) ||
	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
		return 1;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	if (root->ref_cows &&
	    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
		return 1;
#endif
	return 0;
}

static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
				       struct btrfs_root *root,
				       struct extent_buffer *buf,
837 838
				       struct extent_buffer *cow,
				       int *last_ref)
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
{
	u64 refs;
	u64 owner;
	u64 flags;
	u64 new_flags = 0;
	int ret;

	/*
	 * Backrefs update rules:
	 *
	 * Always use full backrefs for extent pointers in tree block
	 * allocated by tree relocation.
	 *
	 * If a shared tree block is no longer referenced by its owner
	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
	 * use full backrefs for extent pointers in tree block.
	 *
	 * If a tree block is been relocating
	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
	 * use full backrefs for extent pointers in tree block.
	 * The reason for this is some operations (such as drop tree)
	 * are only allowed for blocks use full backrefs.
	 */

	if (btrfs_block_can_be_shared(root, buf)) {
		ret = btrfs_lookup_extent_info(trans, root, buf->start,
					       buf->len, &refs, &flags);
866 867
		if (ret)
			return ret;
868 869 870 871 872
		if (refs == 0) {
			ret = -EROFS;
			btrfs_std_error(root->fs_info, ret);
			return ret;
		}
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
	} else {
		refs = 1;
		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
		else
			flags = 0;
	}

	owner = btrfs_header_owner(buf);
	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));

	if (refs > 1) {
		if ((owner == root->root_key.objectid ||
		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
A
Arne Jansen 已提交
890
			ret = btrfs_inc_ref(trans, root, buf, 1, 1);
891
			BUG_ON(ret); /* -ENOMEM */
892 893 894

			if (root->root_key.objectid ==
			    BTRFS_TREE_RELOC_OBJECTID) {
A
Arne Jansen 已提交
895
				ret = btrfs_dec_ref(trans, root, buf, 0, 1);
896
				BUG_ON(ret); /* -ENOMEM */
A
Arne Jansen 已提交
897
				ret = btrfs_inc_ref(trans, root, cow, 1, 1);
898
				BUG_ON(ret); /* -ENOMEM */
899 900 901 902 903 904
			}
			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
		} else {

			if (root->root_key.objectid ==
			    BTRFS_TREE_RELOC_OBJECTID)
A
Arne Jansen 已提交
905
				ret = btrfs_inc_ref(trans, root, cow, 1, 1);
906
			else
A
Arne Jansen 已提交
907
				ret = btrfs_inc_ref(trans, root, cow, 0, 1);
908
			BUG_ON(ret); /* -ENOMEM */
909 910 911 912 913 914
		}
		if (new_flags != 0) {
			ret = btrfs_set_disk_extent_flags(trans, root,
							  buf->start,
							  buf->len,
							  new_flags, 0);
915 916
			if (ret)
				return ret;
917 918 919 920 921
		}
	} else {
		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
			if (root->root_key.objectid ==
			    BTRFS_TREE_RELOC_OBJECTID)
A
Arne Jansen 已提交
922
				ret = btrfs_inc_ref(trans, root, cow, 1, 1);
923
			else
A
Arne Jansen 已提交
924
				ret = btrfs_inc_ref(trans, root, cow, 0, 1);
925
			BUG_ON(ret); /* -ENOMEM */
A
Arne Jansen 已提交
926
			ret = btrfs_dec_ref(trans, root, buf, 1, 1);
927
			BUG_ON(ret); /* -ENOMEM */
928
		}
929 930 931 932 933 934
		/*
		 * don't log freeing in case we're freeing the root node, this
		 * is done by tree_mod_log_set_root_pointer later
		 */
		if (buf != root->node && btrfs_header_level(buf) != 0)
			tree_mod_log_free_eb(root->fs_info, buf);
935
		clean_tree_block(trans, root, buf);
936
		*last_ref = 1;
937 938 939 940
	}
	return 0;
}

C
Chris Mason 已提交
941
/*
C
Chris Mason 已提交
942 943 944 945
 * does the dirty work in cow of a single block.  The parent block (if
 * supplied) is updated to point to the new cow copy.  The new buffer is marked
 * dirty and returned locked.  If you modify the block it needs to be marked
 * dirty again.
C
Chris Mason 已提交
946 947 948
 *
 * search_start -- an allocation hint for the new block
 *
C
Chris Mason 已提交
949 950 951
 * empty_size -- a hint that you plan on doing more cow.  This is the size in
 * bytes the allocator should try to find free next to the block it returns.
 * This is just a hint and may be ignored by the allocator.
C
Chris Mason 已提交
952
 */
C
Chris Mason 已提交
953
static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
954 955 956 957
			     struct btrfs_root *root,
			     struct extent_buffer *buf,
			     struct extent_buffer *parent, int parent_slot,
			     struct extent_buffer **cow_ret,
958
			     u64 search_start, u64 empty_size)
C
Chris Mason 已提交
959
{
960
	struct btrfs_disk_key disk_key;
961
	struct extent_buffer *cow;
962
	int level, ret;
963
	int last_ref = 0;
964
	int unlock_orig = 0;
965
	u64 parent_start;
966

967 968 969
	if (*cow_ret == buf)
		unlock_orig = 1;

970
	btrfs_assert_tree_locked(buf);
971

972 973
	WARN_ON(root->ref_cows && trans->transid !=
		root->fs_info->running_transaction->transid);
974
	WARN_ON(root->ref_cows && trans->transid != root->last_trans);
975

976
	level = btrfs_header_level(buf);
Z
Zheng Yan 已提交
977

978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
	if (level == 0)
		btrfs_item_key(buf, &disk_key, 0);
	else
		btrfs_node_key(buf, &disk_key, 0);

	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
		if (parent)
			parent_start = parent->start;
		else
			parent_start = 0;
	} else
		parent_start = 0;

	cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
				     root->root_key.objectid, &disk_key,
993
				     level, search_start, empty_size);
994 995
	if (IS_ERR(cow))
		return PTR_ERR(cow);
996

997 998
	/* cow is set to blocking by btrfs_init_new_buffer */

999
	copy_extent_buffer(cow, buf, 0, 0, cow->len);
1000
	btrfs_set_header_bytenr(cow, cow->start);
1001
	btrfs_set_header_generation(cow, trans->transid);
1002 1003 1004 1005 1006 1007 1008
	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
				     BTRFS_HEADER_FLAG_RELOC);
	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
	else
		btrfs_set_header_owner(cow, root->root_key.objectid);
1009

Y
Yan Zheng 已提交
1010 1011 1012 1013
	write_extent_buffer(cow, root->fs_info->fsid,
			    (unsigned long)btrfs_header_fsid(cow),
			    BTRFS_FSID_SIZE);

1014
	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1015
	if (ret) {
1016
		btrfs_abort_transaction(trans, root, ret);
1017 1018
		return ret;
	}
Z
Zheng Yan 已提交
1019

1020 1021 1022
	if (root->ref_cows)
		btrfs_reloc_cow_block(trans, root, buf, cow);

C
Chris Mason 已提交
1023
	if (buf == root->node) {
1024
		WARN_ON(parent && parent != buf);
1025 1026 1027 1028 1029
		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
			parent_start = buf->start;
		else
			parent_start = 0;
1030

1031
		extent_buffer_get(cow);
1032
		tree_mod_log_set_root_pointer(root, cow);
1033
		rcu_assign_pointer(root->node, cow);
1034

1035
		btrfs_free_tree_block(trans, root, buf, parent_start,
1036
				      last_ref);
1037
		free_extent_buffer(buf);
1038
		add_root_to_dirty_list(root);
C
Chris Mason 已提交
1039
	} else {
1040 1041 1042 1043 1044 1045
		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
			parent_start = parent->start;
		else
			parent_start = 0;

		WARN_ON(trans->transid != btrfs_header_generation(parent));
1046 1047
		tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
					MOD_LOG_KEY_REPLACE);
1048
		btrfs_set_node_blockptr(parent, parent_slot,
1049
					cow->start);
1050 1051
		btrfs_set_node_ptr_generation(parent, parent_slot,
					      trans->transid);
C
Chris Mason 已提交
1052
		btrfs_mark_buffer_dirty(parent);
1053
		btrfs_free_tree_block(trans, root, buf, parent_start,
1054
				      last_ref);
C
Chris Mason 已提交
1055
	}
1056 1057
	if (unlock_orig)
		btrfs_tree_unlock(buf);
1058
	free_extent_buffer_stale(buf);
C
Chris Mason 已提交
1059
	btrfs_mark_buffer_dirty(cow);
C
Chris Mason 已提交
1060
	*cow_ret = cow;
C
Chris Mason 已提交
1061 1062 1063
	return 0;
}

J
Jan Schmidt 已提交
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
/*
 * returns the logical address of the oldest predecessor of the given root.
 * entries older than time_seq are ignored.
 */
static struct tree_mod_elem *
__tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
			   struct btrfs_root *root, u64 time_seq)
{
	struct tree_mod_elem *tm;
	struct tree_mod_elem *found = NULL;
	u64 root_logical = root->node->start;
	int looped = 0;

	if (!time_seq)
		return 0;

	/*
	 * the very last operation that's logged for a root is the replacement
	 * operation (if it is replaced at all). this has the index of the *new*
	 * root, making it the very first operation that's logged for this root.
	 */
	while (1) {
		tm = tree_mod_log_search_oldest(fs_info, root_logical,
						time_seq);
		if (!looped && !tm)
			return 0;
		/*
1091 1092 1093
		 * if there are no tree operation for the oldest root, we simply
		 * return it. this should only happen if that (old) root is at
		 * level 0.
J
Jan Schmidt 已提交
1094
		 */
1095 1096
		if (!tm)
			break;
J
Jan Schmidt 已提交
1097

1098 1099 1100 1101 1102
		/*
		 * if there's an operation that's not a root replacement, we
		 * found the oldest version of our root. normally, we'll find a
		 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
		 */
J
Jan Schmidt 已提交
1103 1104 1105 1106 1107 1108 1109 1110 1111
		if (tm->op != MOD_LOG_ROOT_REPLACE)
			break;

		found = tm;
		root_logical = tm->old_root.logical;
		BUG_ON(root_logical == root->node->start);
		looped = 1;
	}

1112 1113 1114 1115
	/* if there's no old root to return, return what we found instead */
	if (!found)
		found = tm;

J
Jan Schmidt 已提交
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
	return found;
}

/*
 * tm is a pointer to the first operation to rewind within eb. then, all
 * previous operations will be rewinded (until we reach something older than
 * time_seq).
 */
static void
__tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
		      struct tree_mod_elem *first_tm)
{
	u32 n;
	struct rb_node *next;
	struct tree_mod_elem *tm = first_tm;
	unsigned long o_dst;
	unsigned long o_src;
	unsigned long p_size = sizeof(struct btrfs_key_ptr);

	n = btrfs_header_nritems(eb);
1136
	while (tm && tm->seq >= time_seq) {
J
Jan Schmidt 已提交
1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
		/*
		 * all the operations are recorded with the operator used for
		 * the modification. as we're going backwards, we do the
		 * opposite of each operation here.
		 */
		switch (tm->op) {
		case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
			BUG_ON(tm->slot < n);
		case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
		case MOD_LOG_KEY_REMOVE:
			btrfs_set_node_key(eb, &tm->key, tm->slot);
			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
			btrfs_set_node_ptr_generation(eb, tm->slot,
						      tm->generation);
			n++;
			break;
		case MOD_LOG_KEY_REPLACE:
			BUG_ON(tm->slot >= n);
			btrfs_set_node_key(eb, &tm->key, tm->slot);
			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
			btrfs_set_node_ptr_generation(eb, tm->slot,
						      tm->generation);
			break;
		case MOD_LOG_KEY_ADD:
1161
			/* if a move operation is needed it's in the log */
J
Jan Schmidt 已提交
1162 1163 1164
			n--;
			break;
		case MOD_LOG_MOVE_KEYS:
1165 1166 1167
			o_dst = btrfs_node_key_ptr_offset(tm->slot);
			o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
			memmove_extent_buffer(eb, o_dst, o_src,
J
Jan Schmidt 已提交
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
					      tm->move.nr_items * p_size);
			break;
		case MOD_LOG_ROOT_REPLACE:
			/*
			 * this operation is special. for roots, this must be
			 * handled explicitly before rewinding.
			 * for non-roots, this operation may exist if the node
			 * was a root: root A -> child B; then A gets empty and
			 * B is promoted to the new root. in the mod log, we'll
			 * have a root-replace operation for B, a tree block
			 * that is no root. we simply ignore that operation.
			 */
			break;
		}
		next = rb_next(&tm->node);
		if (!next)
			break;
		tm = container_of(next, struct tree_mod_elem, node);
		if (tm->index != first_tm->index)
			break;
	}
	btrfs_set_header_nritems(eb, n);
}

static struct extent_buffer *
tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
		    u64 time_seq)
{
	struct extent_buffer *eb_rewin;
	struct tree_mod_elem *tm;

	if (!time_seq)
		return eb;

	if (btrfs_header_level(eb) == 0)
		return eb;

	tm = tree_mod_log_search(fs_info, eb->start, time_seq);
	if (!tm)
		return eb;

	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
		BUG_ON(tm->slot != 0);
		eb_rewin = alloc_dummy_extent_buffer(eb->start,
						fs_info->tree_root->nodesize);
		BUG_ON(!eb_rewin);
		btrfs_set_header_bytenr(eb_rewin, eb->start);
		btrfs_set_header_backref_rev(eb_rewin,
					     btrfs_header_backref_rev(eb));
		btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1218
		btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
J
Jan Schmidt 已提交
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
	} else {
		eb_rewin = btrfs_clone_extent_buffer(eb);
		BUG_ON(!eb_rewin);
	}

	extent_buffer_get(eb_rewin);
	free_extent_buffer(eb);

	__tree_mod_log_rewind(eb_rewin, time_seq, tm);

	return eb_rewin;
}

1232 1233 1234 1235 1236 1237 1238
/*
 * get_old_root() rewinds the state of @root's root node to the given @time_seq
 * value. If there are no changes, the current root->root_node is returned. If
 * anything changed in between, there's a fresh buffer allocated on which the
 * rewind operations are done. In any case, the returned buffer is read locked.
 * Returns NULL on error (with no locks held).
 */
J
Jan Schmidt 已提交
1239 1240 1241 1242 1243
static inline struct extent_buffer *
get_old_root(struct btrfs_root *root, u64 time_seq)
{
	struct tree_mod_elem *tm;
	struct extent_buffer *eb;
1244
	struct tree_mod_root *old_root = NULL;
1245
	u64 old_generation = 0;
1246
	u64 logical;
J
Jan Schmidt 已提交
1247

1248
	eb = btrfs_read_lock_root_node(root);
J
Jan Schmidt 已提交
1249 1250 1251 1252
	tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
	if (!tm)
		return root->node;

1253 1254 1255 1256 1257 1258 1259
	if (tm->op == MOD_LOG_ROOT_REPLACE) {
		old_root = &tm->old_root;
		old_generation = tm->generation;
		logical = old_root->logical;
	} else {
		logical = root->node->start;
	}
J
Jan Schmidt 已提交
1260

1261 1262
	tm = tree_mod_log_search(root->fs_info, logical, time_seq);
	if (old_root)
1263
		eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1264 1265
	else
		eb = btrfs_clone_extent_buffer(root->node);
1266 1267 1268 1269 1270
	btrfs_tree_read_unlock(root->node);
	free_extent_buffer(root->node);
	if (!eb)
		return NULL;
	btrfs_tree_read_lock(eb);
1271
	if (old_root) {
J
Jan Schmidt 已提交
1272 1273 1274
		btrfs_set_header_bytenr(eb, eb->start);
		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
		btrfs_set_header_owner(eb, root->root_key.objectid);
1275 1276
		btrfs_set_header_level(eb, old_root->level);
		btrfs_set_header_generation(eb, old_generation);
J
Jan Schmidt 已提交
1277
	}
1278 1279 1280 1281
	if (tm)
		__tree_mod_log_rewind(eb, time_seq, tm);
	else
		WARN_ON(btrfs_header_level(eb) != 0);
1282
	extent_buffer_get(eb);
J
Jan Schmidt 已提交
1283 1284 1285 1286

	return eb;
}

1287 1288 1289 1290
static inline int should_cow_block(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root,
				   struct extent_buffer *buf)
{
1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
	/* ensure we can see the force_cow */
	smp_rmb();

	/*
	 * We do not need to cow a block if
	 * 1) this block is not created or changed in this transaction;
	 * 2) this block does not belong to TREE_RELOC tree;
	 * 3) the root is not forced COW.
	 *
	 * What is forced COW:
	 *    when we create snapshot during commiting the transaction,
	 *    after we've finished coping src root, we must COW the shared
	 *    block to ensure the metadata consistency.
	 */
1305 1306 1307
	if (btrfs_header_generation(buf) == trans->transid &&
	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
	    !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1308 1309
	      btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
	    !root->force_cow)
1310 1311 1312 1313
		return 0;
	return 1;
}

C
Chris Mason 已提交
1314 1315 1316 1317 1318
/*
 * cows a single block, see __btrfs_cow_block for the real work.
 * This version of it has extra checks so that a block isn't cow'd more than
 * once per transaction, as long as it hasn't been written yet
 */
C
Chris Mason 已提交
1319
noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1320 1321
		    struct btrfs_root *root, struct extent_buffer *buf,
		    struct extent_buffer *parent, int parent_slot,
1322
		    struct extent_buffer **cow_ret)
1323 1324
{
	u64 search_start;
1325
	int ret;
C
Chris Mason 已提交
1326

1327
	if (trans->transaction != root->fs_info->running_transaction) {
C
Chris Mason 已提交
1328 1329 1330
		printk(KERN_CRIT "trans %llu running %llu\n",
		       (unsigned long long)trans->transid,
		       (unsigned long long)
1331 1332 1333 1334
		       root->fs_info->running_transaction->transid);
		WARN_ON(1);
	}
	if (trans->transid != root->fs_info->generation) {
C
Chris Mason 已提交
1335 1336 1337
		printk(KERN_CRIT "trans %llu running %llu\n",
		       (unsigned long long)trans->transid,
		       (unsigned long long)root->fs_info->generation);
1338 1339
		WARN_ON(1);
	}
C
Chris Mason 已提交
1340

1341
	if (!should_cow_block(trans, root, buf)) {
1342 1343 1344
		*cow_ret = buf;
		return 0;
	}
1345

1346
	search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1347 1348 1349 1350 1351

	if (parent)
		btrfs_set_lock_blocking(parent);
	btrfs_set_lock_blocking(buf);

1352
	ret = __btrfs_cow_block(trans, root, buf, parent,
1353
				 parent_slot, cow_ret, search_start, 0);
1354 1355 1356

	trace_btrfs_cow_block(root, buf, *cow_ret);

1357
	return ret;
1358 1359
}

C
Chris Mason 已提交
1360 1361 1362 1363
/*
 * helper function for defrag to decide if two blocks pointed to by a
 * node are actually close by
 */
1364
static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1365
{
1366
	if (blocknr < other && other - (blocknr + blocksize) < 32768)
1367
		return 1;
1368
	if (blocknr > other && blocknr - (other + blocksize) < 32768)
1369 1370 1371 1372
		return 1;
	return 0;
}

1373 1374 1375 1376 1377 1378 1379 1380 1381
/*
 * compare two keys in a memcmp fashion
 */
static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
{
	struct btrfs_key k1;

	btrfs_disk_key_to_cpu(&k1, disk);

1382
	return btrfs_comp_cpu_keys(&k1, k2);
1383 1384
}

1385 1386 1387
/*
 * same as comp_keys only with two btrfs_key's
 */
1388
int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
{
	if (k1->objectid > k2->objectid)
		return 1;
	if (k1->objectid < k2->objectid)
		return -1;
	if (k1->type > k2->type)
		return 1;
	if (k1->type < k2->type)
		return -1;
	if (k1->offset > k2->offset)
		return 1;
	if (k1->offset < k2->offset)
		return -1;
	return 0;
}
1404

C
Chris Mason 已提交
1405 1406 1407 1408 1409
/*
 * this is used by the defrag code to go through all the
 * leaves pointed to by a node and reallocate them so that
 * disk order is close to key order
 */
1410
int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1411
		       struct btrfs_root *root, struct extent_buffer *parent,
1412 1413
		       int start_slot, int cache_only, u64 *last_ret,
		       struct btrfs_key *progress)
1414
{
1415
	struct extent_buffer *cur;
1416
	u64 blocknr;
1417
	u64 gen;
1418 1419
	u64 search_start = *last_ret;
	u64 last_block = 0;
1420 1421 1422 1423 1424
	u64 other;
	u32 parent_nritems;
	int end_slot;
	int i;
	int err = 0;
1425
	int parent_level;
1426 1427
	int uptodate;
	u32 blocksize;
1428 1429
	int progress_passed = 0;
	struct btrfs_disk_key disk_key;
1430

1431 1432 1433 1434
	parent_level = btrfs_header_level(parent);
	if (cache_only && parent_level != 1)
		return 0;

C
Chris Mason 已提交
1435
	if (trans->transaction != root->fs_info->running_transaction)
1436
		WARN_ON(1);
C
Chris Mason 已提交
1437
	if (trans->transid != root->fs_info->generation)
1438
		WARN_ON(1);
1439

1440 1441
	parent_nritems = btrfs_header_nritems(parent);
	blocksize = btrfs_level_size(root, parent_level - 1);
1442 1443 1444 1445 1446
	end_slot = parent_nritems;

	if (parent_nritems == 1)
		return 0;

1447 1448
	btrfs_set_lock_blocking(parent);

1449 1450
	for (i = start_slot; i < end_slot; i++) {
		int close = 1;
1451

1452 1453 1454 1455 1456
		btrfs_node_key(parent, &disk_key, i);
		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
			continue;

		progress_passed = 1;
1457
		blocknr = btrfs_node_blockptr(parent, i);
1458
		gen = btrfs_node_ptr_generation(parent, i);
1459 1460
		if (last_block == 0)
			last_block = blocknr;
1461

1462
		if (i > 0) {
1463 1464
			other = btrfs_node_blockptr(parent, i - 1);
			close = close_blocks(blocknr, other, blocksize);
1465
		}
C
Chris Mason 已提交
1466
		if (!close && i < end_slot - 2) {
1467 1468
			other = btrfs_node_blockptr(parent, i + 1);
			close = close_blocks(blocknr, other, blocksize);
1469
		}
1470 1471
		if (close) {
			last_block = blocknr;
1472
			continue;
1473
		}
1474

1475 1476
		cur = btrfs_find_tree_block(root, blocknr, blocksize);
		if (cur)
1477
			uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1478 1479
		else
			uptodate = 0;
1480
		if (!cur || !uptodate) {
1481
			if (cache_only) {
1482
				free_extent_buffer(cur);
1483 1484
				continue;
			}
1485 1486
			if (!cur) {
				cur = read_tree_block(root, blocknr,
1487
							 blocksize, gen);
1488 1489
				if (!cur)
					return -EIO;
1490
			} else if (!uptodate) {
1491 1492 1493 1494 1495
				err = btrfs_read_buffer(cur, gen);
				if (err) {
					free_extent_buffer(cur);
					return err;
				}
1496
			}
1497
		}
1498
		if (search_start == 0)
1499
			search_start = last_block;
1500

1501
		btrfs_tree_lock(cur);
1502
		btrfs_set_lock_blocking(cur);
1503
		err = __btrfs_cow_block(trans, root, cur, parent, i,
1504
					&cur, search_start,
1505
					min(16 * blocksize,
1506
					    (end_slot - i) * blocksize));
Y
Yan 已提交
1507
		if (err) {
1508
			btrfs_tree_unlock(cur);
1509
			free_extent_buffer(cur);
1510
			break;
Y
Yan 已提交
1511
		}
1512 1513
		search_start = cur->start;
		last_block = cur->start;
1514
		*last_ret = search_start;
1515 1516
		btrfs_tree_unlock(cur);
		free_extent_buffer(cur);
1517 1518 1519 1520
	}
	return err;
}

C
Chris Mason 已提交
1521 1522 1523 1524 1525
/*
 * The leaf data grows from end-to-front in the node.
 * this returns the address of the start of the last item,
 * which is the stop of the leaf data stack
 */
C
Chris Mason 已提交
1526
static inline unsigned int leaf_data_end(struct btrfs_root *root,
1527
					 struct extent_buffer *leaf)
1528
{
1529
	u32 nr = btrfs_header_nritems(leaf);
1530
	if (nr == 0)
C
Chris Mason 已提交
1531
		return BTRFS_LEAF_DATA_SIZE(root);
1532
	return btrfs_item_offset_nr(leaf, nr - 1);
1533 1534
}

C
Chris Mason 已提交
1535

C
Chris Mason 已提交
1536
/*
1537 1538 1539
 * search for key in the extent_buffer.  The items start at offset p,
 * and they are item_size apart.  There are 'max' items in p.
 *
C
Chris Mason 已提交
1540 1541 1542 1543 1544 1545
 * the slot in the array is returned via slot, and it points to
 * the place where you would insert key if it is not found in
 * the array.
 *
 * slot may point to max if the key is bigger than all of the keys
 */
1546 1547 1548 1549
static noinline int generic_bin_search(struct extent_buffer *eb,
				       unsigned long p,
				       int item_size, struct btrfs_key *key,
				       int max, int *slot)
1550 1551 1552 1553 1554
{
	int low = 0;
	int high = max;
	int mid;
	int ret;
1555
	struct btrfs_disk_key *tmp = NULL;
1556 1557 1558 1559 1560
	struct btrfs_disk_key unaligned;
	unsigned long offset;
	char *kaddr = NULL;
	unsigned long map_start = 0;
	unsigned long map_len = 0;
1561
	int err;
1562

C
Chris Mason 已提交
1563
	while (low < high) {
1564
		mid = (low + high) / 2;
1565 1566
		offset = p + mid * item_size;

1567
		if (!kaddr || offset < map_start ||
1568 1569
		    (offset + sizeof(struct btrfs_disk_key)) >
		    map_start + map_len) {
1570 1571

			err = map_private_extent_buffer(eb, offset,
1572
						sizeof(struct btrfs_disk_key),
1573
						&kaddr, &map_start, &map_len);
1574 1575 1576 1577 1578 1579 1580 1581 1582

			if (!err) {
				tmp = (struct btrfs_disk_key *)(kaddr + offset -
							map_start);
			} else {
				read_extent_buffer(eb, &unaligned,
						   offset, sizeof(unaligned));
				tmp = &unaligned;
			}
1583 1584 1585 1586 1587

		} else {
			tmp = (struct btrfs_disk_key *)(kaddr + offset -
							map_start);
		}
1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
		ret = comp_keys(tmp, key);

		if (ret < 0)
			low = mid + 1;
		else if (ret > 0)
			high = mid;
		else {
			*slot = mid;
			return 0;
		}
	}
	*slot = low;
	return 1;
}

C
Chris Mason 已提交
1603 1604 1605 1606
/*
 * simple bin_search frontend that does the right thing for
 * leaves vs nodes
 */
1607 1608
static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
		      int level, int *slot)
1609
{
1610
	if (level == 0)
1611 1612
		return generic_bin_search(eb,
					  offsetof(struct btrfs_leaf, items),
C
Chris Mason 已提交
1613
					  sizeof(struct btrfs_item),
1614
					  key, btrfs_header_nritems(eb),
1615
					  slot);
1616
	else
1617 1618
		return generic_bin_search(eb,
					  offsetof(struct btrfs_node, ptrs),
C
Chris Mason 已提交
1619
					  sizeof(struct btrfs_key_ptr),
1620
					  key, btrfs_header_nritems(eb),
1621
					  slot);
1622 1623
}

1624 1625 1626 1627 1628 1629
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
		     int level, int *slot)
{
	return bin_search(eb, key, level, slot);
}

1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645
static void root_add_used(struct btrfs_root *root, u32 size)
{
	spin_lock(&root->accounting_lock);
	btrfs_set_root_used(&root->root_item,
			    btrfs_root_used(&root->root_item) + size);
	spin_unlock(&root->accounting_lock);
}

static void root_sub_used(struct btrfs_root *root, u32 size)
{
	spin_lock(&root->accounting_lock);
	btrfs_set_root_used(&root->root_item,
			    btrfs_root_used(&root->root_item) - size);
	spin_unlock(&root->accounting_lock);
}

C
Chris Mason 已提交
1646 1647 1648 1649
/* given a node and slot number, this reads the blocks it points to.  The
 * extent buffer is returned with a reference taken (but unlocked).
 * NULL is returned on error.
 */
1650
static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1651
				   struct extent_buffer *parent, int slot)
1652
{
1653
	int level = btrfs_header_level(parent);
1654 1655
	if (slot < 0)
		return NULL;
1656
	if (slot >= btrfs_header_nritems(parent))
1657
		return NULL;
1658 1659 1660

	BUG_ON(level == 0);

1661
	return read_tree_block(root, btrfs_node_blockptr(parent, slot),
1662 1663
		       btrfs_level_size(root, level - 1),
		       btrfs_node_ptr_generation(parent, slot));
1664 1665
}

C
Chris Mason 已提交
1666 1667 1668 1669 1670
/*
 * node level balancing, used to make sure nodes are in proper order for
 * item deletion.  We balance from the top down, so we have to make sure
 * that a deletion won't leave an node completely empty later on.
 */
1671
static noinline int balance_level(struct btrfs_trans_handle *trans,
1672 1673
			 struct btrfs_root *root,
			 struct btrfs_path *path, int level)
1674
{
1675 1676 1677 1678
	struct extent_buffer *right = NULL;
	struct extent_buffer *mid;
	struct extent_buffer *left = NULL;
	struct extent_buffer *parent = NULL;
1679 1680 1681 1682
	int ret = 0;
	int wret;
	int pslot;
	int orig_slot = path->slots[level];
1683
	u64 orig_ptr;
1684 1685 1686 1687

	if (level == 0)
		return 0;

1688
	mid = path->nodes[level];
1689

1690 1691
	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
		path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1692 1693
	WARN_ON(btrfs_header_generation(mid) != trans->transid);

1694
	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1695

L
Li Zefan 已提交
1696
	if (level < BTRFS_MAX_LEVEL - 1) {
1697
		parent = path->nodes[level + 1];
L
Li Zefan 已提交
1698 1699
		pslot = path->slots[level + 1];
	}
1700

C
Chris Mason 已提交
1701 1702 1703 1704
	/*
	 * deal with the case where there is only one pointer in the root
	 * by promoting the node below to a root
	 */
1705 1706
	if (!parent) {
		struct extent_buffer *child;
1707

1708
		if (btrfs_header_nritems(mid) != 1)
1709 1710 1711
			return 0;

		/* promote the child to a root */
1712
		child = read_node_slot(root, mid, 0);
1713 1714 1715 1716 1717 1718
		if (!child) {
			ret = -EROFS;
			btrfs_std_error(root->fs_info, ret);
			goto enospc;
		}

1719
		btrfs_tree_lock(child);
1720
		btrfs_set_lock_blocking(child);
1721
		ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1722 1723 1724 1725 1726
		if (ret) {
			btrfs_tree_unlock(child);
			free_extent_buffer(child);
			goto enospc;
		}
1727

1728
		tree_mod_log_set_root_pointer(root, child);
1729
		rcu_assign_pointer(root->node, child);
1730

1731
		add_root_to_dirty_list(root);
1732
		btrfs_tree_unlock(child);
1733

1734
		path->locks[level] = 0;
1735
		path->nodes[level] = NULL;
1736
		clean_tree_block(trans, root, mid);
1737
		btrfs_tree_unlock(mid);
1738
		/* once for the path */
1739
		free_extent_buffer(mid);
1740 1741

		root_sub_used(root, mid->len);
1742
		btrfs_free_tree_block(trans, root, mid, 0, 1);
1743
		/* once for the root ptr */
1744
		free_extent_buffer_stale(mid);
1745
		return 0;
1746
	}
1747
	if (btrfs_header_nritems(mid) >
C
Chris Mason 已提交
1748
	    BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1749 1750
		return 0;

1751 1752
	left = read_node_slot(root, parent, pslot - 1);
	if (left) {
1753
		btrfs_tree_lock(left);
1754
		btrfs_set_lock_blocking(left);
1755
		wret = btrfs_cow_block(trans, root, left,
1756
				       parent, pslot - 1, &left);
1757 1758 1759 1760
		if (wret) {
			ret = wret;
			goto enospc;
		}
1761
	}
1762 1763
	right = read_node_slot(root, parent, pslot + 1);
	if (right) {
1764
		btrfs_tree_lock(right);
1765
		btrfs_set_lock_blocking(right);
1766
		wret = btrfs_cow_block(trans, root, right,
1767
				       parent, pslot + 1, &right);
1768 1769 1770 1771 1772 1773 1774
		if (wret) {
			ret = wret;
			goto enospc;
		}
	}

	/* first, try to make some room in the middle buffer */
1775 1776
	if (left) {
		orig_slot += btrfs_header_nritems(left);
1777
		wret = push_node_left(trans, root, left, mid, 1);
1778 1779
		if (wret < 0)
			ret = wret;
1780
	}
1781 1782 1783 1784

	/*
	 * then try to empty the right most buffer into the middle
	 */
1785
	if (right) {
1786
		wret = push_node_left(trans, root, mid, right, 1);
1787
		if (wret < 0 && wret != -ENOSPC)
1788
			ret = wret;
1789 1790
		if (btrfs_header_nritems(right) == 0) {
			clean_tree_block(trans, root, right);
1791
			btrfs_tree_unlock(right);
1792
			del_ptr(trans, root, path, level + 1, pslot + 1, 1);
1793
			root_sub_used(root, right->len);
1794
			btrfs_free_tree_block(trans, root, right, 0, 1);
1795
			free_extent_buffer_stale(right);
1796
			right = NULL;
1797
		} else {
1798 1799
			struct btrfs_disk_key right_key;
			btrfs_node_key(right, &right_key, 0);
1800 1801
			tree_mod_log_set_node_key(root->fs_info, parent,
						  &right_key, pslot + 1, 0);
1802 1803
			btrfs_set_node_key(parent, &right_key, pslot + 1);
			btrfs_mark_buffer_dirty(parent);
1804 1805
		}
	}
1806
	if (btrfs_header_nritems(mid) == 1) {
1807 1808 1809 1810 1811 1812 1813 1814 1815
		/*
		 * we're not allowed to leave a node with one item in the
		 * tree during a delete.  A deletion from lower in the tree
		 * could try to delete the only pointer in this node.
		 * So, pull some keys from the left.
		 * There has to be a left pointer at this point because
		 * otherwise we would have pulled some pointers from the
		 * right
		 */
1816 1817 1818 1819 1820
		if (!left) {
			ret = -EROFS;
			btrfs_std_error(root->fs_info, ret);
			goto enospc;
		}
1821
		wret = balance_node_right(trans, root, mid, left);
1822
		if (wret < 0) {
1823
			ret = wret;
1824 1825
			goto enospc;
		}
1826 1827 1828 1829 1830
		if (wret == 1) {
			wret = push_node_left(trans, root, left, mid, 1);
			if (wret < 0)
				ret = wret;
		}
1831 1832
		BUG_ON(wret == 1);
	}
1833 1834
	if (btrfs_header_nritems(mid) == 0) {
		clean_tree_block(trans, root, mid);
1835
		btrfs_tree_unlock(mid);
1836
		del_ptr(trans, root, path, level + 1, pslot, 1);
1837
		root_sub_used(root, mid->len);
1838
		btrfs_free_tree_block(trans, root, mid, 0, 1);
1839
		free_extent_buffer_stale(mid);
1840
		mid = NULL;
1841 1842
	} else {
		/* update the parent key to reflect our changes */
1843 1844
		struct btrfs_disk_key mid_key;
		btrfs_node_key(mid, &mid_key, 0);
1845 1846
		tree_mod_log_set_node_key(root->fs_info, parent, &mid_key,
					  pslot, 0);
1847 1848
		btrfs_set_node_key(parent, &mid_key, pslot);
		btrfs_mark_buffer_dirty(parent);
1849
	}
1850

1851
	/* update the path */
1852 1853 1854
	if (left) {
		if (btrfs_header_nritems(left) > orig_slot) {
			extent_buffer_get(left);
1855
			/* left was locked after cow */
1856
			path->nodes[level] = left;
1857 1858
			path->slots[level + 1] -= 1;
			path->slots[level] = orig_slot;
1859 1860
			if (mid) {
				btrfs_tree_unlock(mid);
1861
				free_extent_buffer(mid);
1862
			}
1863
		} else {
1864
			orig_slot -= btrfs_header_nritems(left);
1865 1866 1867
			path->slots[level] = orig_slot;
		}
	}
1868
	/* double check we haven't messed things up */
C
Chris Mason 已提交
1869
	if (orig_ptr !=
1870
	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1871
		BUG();
1872
enospc:
1873 1874
	if (right) {
		btrfs_tree_unlock(right);
1875
		free_extent_buffer(right);
1876 1877 1878 1879
	}
	if (left) {
		if (path->nodes[level] != left)
			btrfs_tree_unlock(left);
1880
		free_extent_buffer(left);
1881
	}
1882 1883 1884
	return ret;
}

C
Chris Mason 已提交
1885 1886 1887 1888
/* Node balancing for insertion.  Here we only split or push nodes around
 * when they are completely full.  This is also done top down, so we
 * have to be pessimistic.
 */
C
Chris Mason 已提交
1889
static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1890 1891
					  struct btrfs_root *root,
					  struct btrfs_path *path, int level)
1892
{
1893 1894 1895 1896
	struct extent_buffer *right = NULL;
	struct extent_buffer *mid;
	struct extent_buffer *left = NULL;
	struct extent_buffer *parent = NULL;
1897 1898 1899 1900 1901 1902 1903 1904
	int ret = 0;
	int wret;
	int pslot;
	int orig_slot = path->slots[level];

	if (level == 0)
		return 1;

1905
	mid = path->nodes[level];
1906
	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1907

L
Li Zefan 已提交
1908
	if (level < BTRFS_MAX_LEVEL - 1) {
1909
		parent = path->nodes[level + 1];
L
Li Zefan 已提交
1910 1911
		pslot = path->slots[level + 1];
	}
1912

1913
	if (!parent)
1914 1915
		return 1;

1916
	left = read_node_slot(root, parent, pslot - 1);
1917 1918

	/* first, try to make some room in the middle buffer */
1919
	if (left) {
1920
		u32 left_nr;
1921 1922

		btrfs_tree_lock(left);
1923 1924
		btrfs_set_lock_blocking(left);

1925
		left_nr = btrfs_header_nritems(left);
C
Chris Mason 已提交
1926 1927 1928
		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
			wret = 1;
		} else {
1929
			ret = btrfs_cow_block(trans, root, left, parent,
1930
					      pslot - 1, &left);
1931 1932 1933 1934
			if (ret)
				wret = 1;
			else {
				wret = push_node_left(trans, root,
1935
						      left, mid, 0);
1936
			}
C
Chris Mason 已提交
1937
		}
1938 1939 1940
		if (wret < 0)
			ret = wret;
		if (wret == 0) {
1941
			struct btrfs_disk_key disk_key;
1942
			orig_slot += left_nr;
1943
			btrfs_node_key(mid, &disk_key, 0);
1944 1945
			tree_mod_log_set_node_key(root->fs_info, parent,
						  &disk_key, pslot, 0);
1946 1947 1948 1949
			btrfs_set_node_key(parent, &disk_key, pslot);
			btrfs_mark_buffer_dirty(parent);
			if (btrfs_header_nritems(left) > orig_slot) {
				path->nodes[level] = left;
1950 1951
				path->slots[level + 1] -= 1;
				path->slots[level] = orig_slot;
1952
				btrfs_tree_unlock(mid);
1953
				free_extent_buffer(mid);
1954 1955
			} else {
				orig_slot -=
1956
					btrfs_header_nritems(left);
1957
				path->slots[level] = orig_slot;
1958
				btrfs_tree_unlock(left);
1959
				free_extent_buffer(left);
1960 1961 1962
			}
			return 0;
		}
1963
		btrfs_tree_unlock(left);
1964
		free_extent_buffer(left);
1965
	}
1966
	right = read_node_slot(root, parent, pslot + 1);
1967 1968 1969 1970

	/*
	 * then try to empty the right most buffer into the middle
	 */
1971
	if (right) {
C
Chris Mason 已提交
1972
		u32 right_nr;
1973

1974
		btrfs_tree_lock(right);
1975 1976
		btrfs_set_lock_blocking(right);

1977
		right_nr = btrfs_header_nritems(right);
C
Chris Mason 已提交
1978 1979 1980
		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
			wret = 1;
		} else {
1981 1982
			ret = btrfs_cow_block(trans, root, right,
					      parent, pslot + 1,
1983
					      &right);
1984 1985 1986 1987
			if (ret)
				wret = 1;
			else {
				wret = balance_node_right(trans, root,
1988
							  right, mid);
1989
			}
C
Chris Mason 已提交
1990
		}
1991 1992 1993
		if (wret < 0)
			ret = wret;
		if (wret == 0) {
1994 1995 1996
			struct btrfs_disk_key disk_key;

			btrfs_node_key(right, &disk_key, 0);
1997 1998
			tree_mod_log_set_node_key(root->fs_info, parent,
						  &disk_key, pslot + 1, 0);
1999 2000 2001 2002 2003
			btrfs_set_node_key(parent, &disk_key, pslot + 1);
			btrfs_mark_buffer_dirty(parent);

			if (btrfs_header_nritems(mid) <= orig_slot) {
				path->nodes[level] = right;
2004 2005
				path->slots[level + 1] += 1;
				path->slots[level] = orig_slot -
2006
					btrfs_header_nritems(mid);
2007
				btrfs_tree_unlock(mid);
2008
				free_extent_buffer(mid);
2009
			} else {
2010
				btrfs_tree_unlock(right);
2011
				free_extent_buffer(right);
2012 2013 2014
			}
			return 0;
		}
2015
		btrfs_tree_unlock(right);
2016
		free_extent_buffer(right);
2017 2018 2019 2020
	}
	return 1;
}

2021
/*
C
Chris Mason 已提交
2022 2023
 * readahead one full node of leaves, finding things that are close
 * to the block in 'slot', and triggering ra on them.
2024
 */
2025 2026 2027
static void reada_for_search(struct btrfs_root *root,
			     struct btrfs_path *path,
			     int level, int slot, u64 objectid)
2028
{
2029
	struct extent_buffer *node;
2030
	struct btrfs_disk_key disk_key;
2031 2032
	u32 nritems;
	u64 search;
2033
	u64 target;
2034
	u64 nread = 0;
2035
	u64 gen;
2036
	int direction = path->reada;
2037
	struct extent_buffer *eb;
2038 2039 2040
	u32 nr;
	u32 blocksize;
	u32 nscan = 0;
2041

2042
	if (level != 1)
2043 2044 2045
		return;

	if (!path->nodes[level])
2046 2047
		return;

2048
	node = path->nodes[level];
2049

2050
	search = btrfs_node_blockptr(node, slot);
2051 2052
	blocksize = btrfs_level_size(root, level - 1);
	eb = btrfs_find_tree_block(root, search, blocksize);
2053 2054
	if (eb) {
		free_extent_buffer(eb);
2055 2056 2057
		return;
	}

2058
	target = search;
2059

2060
	nritems = btrfs_header_nritems(node);
2061
	nr = slot;
2062

C
Chris Mason 已提交
2063
	while (1) {
2064 2065 2066 2067 2068 2069 2070 2071
		if (direction < 0) {
			if (nr == 0)
				break;
			nr--;
		} else if (direction > 0) {
			nr++;
			if (nr >= nritems)
				break;
2072
		}
2073 2074 2075 2076 2077
		if (path->reada < 0 && objectid) {
			btrfs_node_key(node, &disk_key, nr);
			if (btrfs_disk_key_objectid(&disk_key) != objectid)
				break;
		}
2078
		search = btrfs_node_blockptr(node, nr);
2079 2080
		if ((search <= target && target - search <= 65536) ||
		    (search > target && search - target <= 65536)) {
2081 2082
			gen = btrfs_node_ptr_generation(node, nr);
			readahead_tree_block(root, search, blocksize, gen);
2083 2084 2085
			nread += blocksize;
		}
		nscan++;
2086
		if ((nread > 65536 || nscan > 32))
2087
			break;
2088 2089
	}
}
2090

2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107
/*
 * returns -EAGAIN if it had to drop the path, or zero if everything was in
 * cache
 */
static noinline int reada_for_balance(struct btrfs_root *root,
				      struct btrfs_path *path, int level)
{
	int slot;
	int nritems;
	struct extent_buffer *parent;
	struct extent_buffer *eb;
	u64 gen;
	u64 block1 = 0;
	u64 block2 = 0;
	int ret = 0;
	int blocksize;

2108
	parent = path->nodes[level + 1];
2109 2110 2111 2112
	if (!parent)
		return 0;

	nritems = btrfs_header_nritems(parent);
2113
	slot = path->slots[level + 1];
2114 2115 2116 2117 2118 2119
	blocksize = btrfs_level_size(root, level);

	if (slot > 0) {
		block1 = btrfs_node_blockptr(parent, slot - 1);
		gen = btrfs_node_ptr_generation(parent, slot - 1);
		eb = btrfs_find_tree_block(root, block1, blocksize);
2120 2121 2122 2123 2124 2125
		/*
		 * if we get -eagain from btrfs_buffer_uptodate, we
		 * don't want to return eagain here.  That will loop
		 * forever
		 */
		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2126 2127 2128
			block1 = 0;
		free_extent_buffer(eb);
	}
2129
	if (slot + 1 < nritems) {
2130 2131 2132
		block2 = btrfs_node_blockptr(parent, slot + 1);
		gen = btrfs_node_ptr_generation(parent, slot + 1);
		eb = btrfs_find_tree_block(root, block2, blocksize);
2133
		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2134 2135 2136 2137 2138
			block2 = 0;
		free_extent_buffer(eb);
	}
	if (block1 || block2) {
		ret = -EAGAIN;
2139 2140

		/* release the whole path */
2141
		btrfs_release_path(path);
2142 2143

		/* read the blocks */
2144 2145 2146 2147 2148 2149 2150 2151 2152
		if (block1)
			readahead_tree_block(root, block1, blocksize, 0);
		if (block2)
			readahead_tree_block(root, block2, blocksize, 0);

		if (block1) {
			eb = read_tree_block(root, block1, blocksize, 0);
			free_extent_buffer(eb);
		}
2153
		if (block2) {
2154 2155 2156 2157 2158 2159 2160 2161
			eb = read_tree_block(root, block2, blocksize, 0);
			free_extent_buffer(eb);
		}
	}
	return ret;
}


C
Chris Mason 已提交
2162
/*
C
Chris Mason 已提交
2163 2164 2165 2166
 * when we walk down the tree, it is usually safe to unlock the higher layers
 * in the tree.  The exceptions are when our path goes through slot 0, because
 * operations on the tree might require changing key pointers higher up in the
 * tree.
C
Chris Mason 已提交
2167
 *
C
Chris Mason 已提交
2168 2169 2170
 * callers might also have set path->keep_locks, which tells this code to keep
 * the lock if the path points to the last slot in the block.  This is part of
 * walking through the tree, and selecting the next slot in the higher block.
C
Chris Mason 已提交
2171
 *
C
Chris Mason 已提交
2172 2173
 * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
 * if lowest_unlock is 1, level 0 won't be unlocked
C
Chris Mason 已提交
2174
 */
2175
static noinline void unlock_up(struct btrfs_path *path, int level,
2176 2177
			       int lowest_unlock, int min_write_lock_level,
			       int *write_lock_level)
2178 2179 2180
{
	int i;
	int skip_level = level;
2181
	int no_skips = 0;
2182 2183 2184 2185 2186 2187 2188
	struct extent_buffer *t;

	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
		if (!path->nodes[i])
			break;
		if (!path->locks[i])
			break;
2189
		if (!no_skips && path->slots[i] == 0) {
2190 2191 2192
			skip_level = i + 1;
			continue;
		}
2193
		if (!no_skips && path->keep_locks) {
2194 2195 2196
			u32 nritems;
			t = path->nodes[i];
			nritems = btrfs_header_nritems(t);
2197
			if (nritems < 1 || path->slots[i] >= nritems - 1) {
2198 2199 2200 2201
				skip_level = i + 1;
				continue;
			}
		}
2202 2203 2204
		if (skip_level < i && i >= lowest_unlock)
			no_skips = 1;

2205 2206
		t = path->nodes[i];
		if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2207
			btrfs_tree_unlock_rw(t, path->locks[i]);
2208
			path->locks[i] = 0;
2209 2210 2211 2212 2213
			if (write_lock_level &&
			    i > min_write_lock_level &&
			    i <= *write_lock_level) {
				*write_lock_level = i - 1;
			}
2214 2215 2216 2217
		}
	}
}

2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
/*
 * This releases any locks held in the path starting at level and
 * going all the way up to the root.
 *
 * btrfs_search_slot will keep the lock held on higher nodes in a few
 * corner cases, such as COW of the block at slot zero in the node.  This
 * ignores those rules, and it should only be called when there are no
 * more updates to be done higher up in the tree.
 */
noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
{
	int i;

2231
	if (path->keep_locks)
2232 2233 2234 2235
		return;

	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
		if (!path->nodes[i])
2236
			continue;
2237
		if (!path->locks[i])
2238
			continue;
2239
		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2240 2241 2242 2243
		path->locks[i] = 0;
	}
}

2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
/*
 * helper function for btrfs_search_slot.  The goal is to find a block
 * in cache without setting the path to blocking.  If we find the block
 * we return zero and the path is unchanged.
 *
 * If we can't find the block, we set the path blocking and do some
 * reada.  -EAGAIN is returned and the search must be repeated.
 */
static int
read_block_for_search(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root, struct btrfs_path *p,
		       struct extent_buffer **eb_ret, int level, int slot,
J
Jan Schmidt 已提交
2256
		       struct btrfs_key *key, u64 time_seq)
2257 2258 2259 2260 2261 2262
{
	u64 blocknr;
	u64 gen;
	u32 blocksize;
	struct extent_buffer *b = *eb_ret;
	struct extent_buffer *tmp;
2263
	int ret;
2264 2265 2266 2267 2268 2269

	blocknr = btrfs_node_blockptr(b, slot);
	gen = btrfs_node_ptr_generation(b, slot);
	blocksize = btrfs_level_size(root, level - 1);

	tmp = btrfs_find_tree_block(root, blocknr, blocksize);
2270
	if (tmp) {
2271 2272 2273
		/* first we do an atomic uptodate check */
		if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) {
			if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288
				/*
				 * we found an up to date block without
				 * sleeping, return
				 * right away
				 */
				*eb_ret = tmp;
				return 0;
			}
			/* the pages were up to date, but we failed
			 * the generation number check.  Do a full
			 * read for the generation number that is correct.
			 * We must do this without dropping locks so
			 * we can trust our generation number
			 */
			free_extent_buffer(tmp);
2289 2290
			btrfs_set_path_blocking(p);

2291
			/* now we're allowed to do a blocking uptodate check */
2292
			tmp = read_tree_block(root, blocknr, blocksize, gen);
2293
			if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) {
2294 2295 2296 2297
				*eb_ret = tmp;
				return 0;
			}
			free_extent_buffer(tmp);
2298
			btrfs_release_path(p);
2299 2300
			return -EIO;
		}
2301 2302 2303 2304 2305
	}

	/*
	 * reduce lock contention at high levels
	 * of the btree by dropping locks before
2306 2307 2308
	 * we read.  Don't release the lock on the current
	 * level because we need to walk this node to figure
	 * out which blocks to read.
2309
	 */
2310 2311 2312
	btrfs_unlock_up_safe(p, level + 1);
	btrfs_set_path_blocking(p);

2313
	free_extent_buffer(tmp);
2314 2315 2316
	if (p->reada)
		reada_for_search(root, p, level, slot, key->objectid);

2317
	btrfs_release_path(p);
2318 2319

	ret = -EAGAIN;
2320
	tmp = read_tree_block(root, blocknr, blocksize, 0);
2321 2322 2323 2324 2325 2326 2327
	if (tmp) {
		/*
		 * If the read above didn't mark this buffer up to date,
		 * it will never end up being up to date.  Set ret to EIO now
		 * and give up so that our caller doesn't loop forever
		 * on our EAGAINs.
		 */
2328
		if (!btrfs_buffer_uptodate(tmp, 0, 0))
2329
			ret = -EIO;
2330
		free_extent_buffer(tmp);
2331 2332
	}
	return ret;
2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346
}

/*
 * helper function for btrfs_search_slot.  This does all of the checks
 * for node-level blocks and does any balancing required based on
 * the ins_len.
 *
 * If no extra work was required, zero is returned.  If we had to
 * drop the path, -EAGAIN is returned and btrfs_search_slot must
 * start over
 */
static int
setup_nodes_for_search(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root, struct btrfs_path *p,
2347 2348
		       struct extent_buffer *b, int level, int ins_len,
		       int *write_lock_level)
2349 2350 2351 2352 2353 2354
{
	int ret;
	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
	    BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
		int sret;

2355 2356 2357 2358 2359 2360
		if (*write_lock_level < level + 1) {
			*write_lock_level = level + 1;
			btrfs_release_path(p);
			goto again;
		}

2361 2362 2363 2364 2365 2366
		sret = reada_for_balance(root, p, level);
		if (sret)
			goto again;

		btrfs_set_path_blocking(p);
		sret = split_node(trans, root, p, level);
2367
		btrfs_clear_path_blocking(p, NULL, 0);
2368 2369 2370 2371 2372 2373 2374 2375

		BUG_ON(sret > 0);
		if (sret) {
			ret = sret;
			goto done;
		}
		b = p->nodes[level];
	} else if (ins_len < 0 && btrfs_header_nritems(b) <
C
Chris Mason 已提交
2376
		   BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2377 2378
		int sret;

2379 2380 2381 2382 2383 2384
		if (*write_lock_level < level + 1) {
			*write_lock_level = level + 1;
			btrfs_release_path(p);
			goto again;
		}

2385 2386 2387 2388 2389 2390
		sret = reada_for_balance(root, p, level);
		if (sret)
			goto again;

		btrfs_set_path_blocking(p);
		sret = balance_level(trans, root, p, level);
2391
		btrfs_clear_path_blocking(p, NULL, 0);
2392 2393 2394 2395 2396 2397 2398

		if (sret) {
			ret = sret;
			goto done;
		}
		b = p->nodes[level];
		if (!b) {
2399
			btrfs_release_path(p);
2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411
			goto again;
		}
		BUG_ON(btrfs_header_nritems(b) == 1);
	}
	return 0;

again:
	ret = -EAGAIN;
done:
	return ret;
}

C
Chris Mason 已提交
2412 2413 2414 2415 2416 2417
/*
 * look for key in the tree.  path is filled in with nodes along the way
 * if key is found, we return zero and you can find the item in the leaf
 * level of the path (level 0)
 *
 * If the key isn't found, the path points to the slot where it should
C
Chris Mason 已提交
2418 2419
 * be inserted, and 1 is returned.  If there are other errors during the
 * search a negative error number is returned.
C
Chris Mason 已提交
2420 2421 2422 2423
 *
 * if ins_len > 0, nodes and leaves will be split as we walk down the
 * tree.  if ins_len < 0, nodes will be merged as we walk down the tree (if
 * possible)
C
Chris Mason 已提交
2424
 */
2425 2426 2427
int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
		      *root, struct btrfs_key *key, struct btrfs_path *p, int
		      ins_len, int cow)
2428
{
2429
	struct extent_buffer *b;
2430 2431
	int slot;
	int ret;
2432
	int err;
2433
	int level;
2434
	int lowest_unlock = 1;
2435 2436 2437
	int root_lock;
	/* everything at write_lock_level or lower must be write locked */
	int write_lock_level = 0;
2438
	u8 lowest_level = 0;
2439
	int min_write_lock_level;
2440

2441
	lowest_level = p->lowest_level;
2442
	WARN_ON(lowest_level && ins_len > 0);
C
Chris Mason 已提交
2443
	WARN_ON(p->nodes[0] != NULL);
2444

2445
	if (ins_len < 0) {
2446
		lowest_unlock = 2;
2447

2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466
		/* when we are removing items, we might have to go up to level
		 * two as we update tree pointers  Make sure we keep write
		 * for those levels as well
		 */
		write_lock_level = 2;
	} else if (ins_len > 0) {
		/*
		 * for inserting items, make sure we have a write lock on
		 * level 1 so we can update keys
		 */
		write_lock_level = 1;
	}

	if (!cow)
		write_lock_level = -1;

	if (cow && (p->keep_locks || p->lowest_level))
		write_lock_level = BTRFS_MAX_LEVEL;

2467 2468
	min_write_lock_level = write_lock_level;

2469
again:
2470 2471 2472 2473 2474
	/*
	 * we try very hard to do read locks on the root
	 */
	root_lock = BTRFS_READ_LOCK;
	level = 0;
2475
	if (p->search_commit_root) {
2476 2477 2478 2479
		/*
		 * the commit roots are read only
		 * so we always do read locks
		 */
2480 2481
		b = root->commit_root;
		extent_buffer_get(b);
2482
		level = btrfs_header_level(b);
2483
		if (!p->skip_locking)
2484
			btrfs_tree_read_lock(b);
2485
	} else {
2486
		if (p->skip_locking) {
2487
			b = btrfs_root_node(root);
2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505
			level = btrfs_header_level(b);
		} else {
			/* we don't know the level of the root node
			 * until we actually have it read locked
			 */
			b = btrfs_read_lock_root_node(root);
			level = btrfs_header_level(b);
			if (level <= write_lock_level) {
				/* whoops, must trade for write lock */
				btrfs_tree_read_unlock(b);
				free_extent_buffer(b);
				b = btrfs_lock_root_node(root);
				root_lock = BTRFS_WRITE_LOCK;

				/* the level might have changed, check again */
				level = btrfs_header_level(b);
			}
		}
2506
	}
2507 2508 2509
	p->nodes[level] = b;
	if (!p->skip_locking)
		p->locks[level] = root_lock;
2510

2511
	while (b) {
2512
		level = btrfs_header_level(b);
2513 2514 2515 2516 2517

		/*
		 * setup the path here so we can release it under lock
		 * contention with the cow code
		 */
C
Chris Mason 已提交
2518
		if (cow) {
2519 2520 2521 2522 2523
			/*
			 * if we don't really need to cow this block
			 * then we don't want to set the path blocking,
			 * so we test it here
			 */
2524
			if (!should_cow_block(trans, root, b))
2525
				goto cow_done;
2526

2527 2528
			btrfs_set_path_blocking(p);

2529 2530 2531 2532 2533 2534 2535 2536 2537 2538
			/*
			 * must have write locks on this node and the
			 * parent
			 */
			if (level + 1 > write_lock_level) {
				write_lock_level = level + 1;
				btrfs_release_path(p);
				goto again;
			}

2539 2540 2541 2542 2543
			err = btrfs_cow_block(trans, root, b,
					      p->nodes[level + 1],
					      p->slots[level + 1], &b);
			if (err) {
				ret = err;
2544
				goto done;
2545
			}
C
Chris Mason 已提交
2546
		}
2547
cow_done:
C
Chris Mason 已提交
2548
		BUG_ON(!cow && ins_len);
2549

2550
		p->nodes[level] = b;
2551
		btrfs_clear_path_blocking(p, NULL, 0);
2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566

		/*
		 * we have a lock on b and as long as we aren't changing
		 * the tree, there is no way to for the items in b to change.
		 * It is safe to drop the lock on our parent before we
		 * go through the expensive btree search on b.
		 *
		 * If cow is true, then we might be changing slot zero,
		 * which may require changing the parent.  So, we can't
		 * drop the lock until after we know which slot we're
		 * operating on.
		 */
		if (!cow)
			btrfs_unlock_up_safe(p, level + 1);

2567
		ret = bin_search(b, key, level, &slot);
2568

2569
		if (level != 0) {
2570 2571 2572
			int dec = 0;
			if (ret && slot > 0) {
				dec = 1;
2573
				slot -= 1;
2574
			}
2575
			p->slots[level] = slot;
2576
			err = setup_nodes_for_search(trans, root, p, b, level,
2577
					     ins_len, &write_lock_level);
2578
			if (err == -EAGAIN)
2579
				goto again;
2580 2581
			if (err) {
				ret = err;
2582
				goto done;
2583
			}
2584 2585
			b = p->nodes[level];
			slot = p->slots[level];
2586

2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599
			/*
			 * slot 0 is special, if we change the key
			 * we have to update the parent pointer
			 * which means we must have a write lock
			 * on the parent
			 */
			if (slot == 0 && cow &&
			    write_lock_level < level + 1) {
				write_lock_level = level + 1;
				btrfs_release_path(p);
				goto again;
			}

2600 2601
			unlock_up(p, level, lowest_unlock,
				  min_write_lock_level, &write_lock_level);
2602

2603
			if (level == lowest_level) {
2604 2605
				if (dec)
					p->slots[level]++;
2606
				goto done;
2607
			}
2608

2609
			err = read_block_for_search(trans, root, p,
J
Jan Schmidt 已提交
2610
						    &b, level, slot, key, 0);
2611
			if (err == -EAGAIN)
2612
				goto again;
2613 2614
			if (err) {
				ret = err;
2615
				goto done;
2616
			}
2617

2618
			if (!p->skip_locking) {
2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637
				level = btrfs_header_level(b);
				if (level <= write_lock_level) {
					err = btrfs_try_tree_write_lock(b);
					if (!err) {
						btrfs_set_path_blocking(p);
						btrfs_tree_lock(b);
						btrfs_clear_path_blocking(p, b,
								  BTRFS_WRITE_LOCK);
					}
					p->locks[level] = BTRFS_WRITE_LOCK;
				} else {
					err = btrfs_try_tree_read_lock(b);
					if (!err) {
						btrfs_set_path_blocking(p);
						btrfs_tree_read_lock(b);
						btrfs_clear_path_blocking(p, b,
								  BTRFS_READ_LOCK);
					}
					p->locks[level] = BTRFS_READ_LOCK;
2638
				}
2639
				p->nodes[level] = b;
2640
			}
2641 2642
		} else {
			p->slots[level] = slot;
2643 2644
			if (ins_len > 0 &&
			    btrfs_leaf_free_space(root, b) < ins_len) {
2645 2646 2647 2648 2649 2650
				if (write_lock_level < 1) {
					write_lock_level = 1;
					btrfs_release_path(p);
					goto again;
				}

2651
				btrfs_set_path_blocking(p);
2652 2653
				err = split_leaf(trans, root, key,
						 p, ins_len, ret == 0);
2654
				btrfs_clear_path_blocking(p, NULL, 0);
2655

2656 2657 2658
				BUG_ON(err > 0);
				if (err) {
					ret = err;
2659 2660
					goto done;
				}
C
Chris Mason 已提交
2661
			}
2662
			if (!p->search_for_split)
2663 2664
				unlock_up(p, level, lowest_unlock,
					  min_write_lock_level, &write_lock_level);
2665
			goto done;
2666 2667
		}
	}
2668 2669
	ret = 1;
done:
2670 2671 2672 2673
	/*
	 * we don't really know what they plan on doing with the path
	 * from here on, so for now just mark it as blocking
	 */
2674 2675
	if (!p->leave_spinning)
		btrfs_set_path_blocking(p);
2676
	if (ret < 0)
2677
		btrfs_release_path(p);
2678
	return ret;
2679 2680
}

J
Jan Schmidt 已提交
2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787
/*
 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
 * current state of the tree together with the operations recorded in the tree
 * modification log to search for the key in a previous version of this tree, as
 * denoted by the time_seq parameter.
 *
 * Naturally, there is no support for insert, delete or cow operations.
 *
 * The resulting path and return value will be set up as if we called
 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
 */
int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
			  struct btrfs_path *p, u64 time_seq)
{
	struct extent_buffer *b;
	int slot;
	int ret;
	int err;
	int level;
	int lowest_unlock = 1;
	u8 lowest_level = 0;

	lowest_level = p->lowest_level;
	WARN_ON(p->nodes[0] != NULL);

	if (p->search_commit_root) {
		BUG_ON(time_seq);
		return btrfs_search_slot(NULL, root, key, p, 0, 0);
	}

again:
	b = get_old_root(root, time_seq);
	level = btrfs_header_level(b);
	p->locks[level] = BTRFS_READ_LOCK;

	while (b) {
		level = btrfs_header_level(b);
		p->nodes[level] = b;
		btrfs_clear_path_blocking(p, NULL, 0);

		/*
		 * we have a lock on b and as long as we aren't changing
		 * the tree, there is no way to for the items in b to change.
		 * It is safe to drop the lock on our parent before we
		 * go through the expensive btree search on b.
		 */
		btrfs_unlock_up_safe(p, level + 1);

		ret = bin_search(b, key, level, &slot);

		if (level != 0) {
			int dec = 0;
			if (ret && slot > 0) {
				dec = 1;
				slot -= 1;
			}
			p->slots[level] = slot;
			unlock_up(p, level, lowest_unlock, 0, NULL);

			if (level == lowest_level) {
				if (dec)
					p->slots[level]++;
				goto done;
			}

			err = read_block_for_search(NULL, root, p, &b, level,
						    slot, key, time_seq);
			if (err == -EAGAIN)
				goto again;
			if (err) {
				ret = err;
				goto done;
			}

			level = btrfs_header_level(b);
			err = btrfs_try_tree_read_lock(b);
			if (!err) {
				btrfs_set_path_blocking(p);
				btrfs_tree_read_lock(b);
				btrfs_clear_path_blocking(p, b,
							  BTRFS_READ_LOCK);
			}
			p->locks[level] = BTRFS_READ_LOCK;
			p->nodes[level] = b;
			b = tree_mod_log_rewind(root->fs_info, b, time_seq);
			if (b != p->nodes[level]) {
				btrfs_tree_unlock_rw(p->nodes[level],
						     p->locks[level]);
				p->locks[level] = 0;
				p->nodes[level] = b;
			}
		} else {
			p->slots[level] = slot;
			unlock_up(p, level, lowest_unlock, 0, NULL);
			goto done;
		}
	}
	ret = 1;
done:
	if (!p->leave_spinning)
		btrfs_set_path_blocking(p);
	if (ret < 0)
		btrfs_release_path(p);

	return ret;
}

2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836
/*
 * helper to use instead of search slot if no exact match is needed but
 * instead the next or previous item should be returned.
 * When find_higher is true, the next higher item is returned, the next lower
 * otherwise.
 * When return_any and find_higher are both true, and no higher item is found,
 * return the next lower instead.
 * When return_any is true and find_higher is false, and no lower item is found,
 * return the next higher instead.
 * It returns 0 if any item is found, 1 if none is found (tree empty), and
 * < 0 on error
 */
int btrfs_search_slot_for_read(struct btrfs_root *root,
			       struct btrfs_key *key, struct btrfs_path *p,
			       int find_higher, int return_any)
{
	int ret;
	struct extent_buffer *leaf;

again:
	ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
	if (ret <= 0)
		return ret;
	/*
	 * a return value of 1 means the path is at the position where the
	 * item should be inserted. Normally this is the next bigger item,
	 * but in case the previous item is the last in a leaf, path points
	 * to the first free slot in the previous leaf, i.e. at an invalid
	 * item.
	 */
	leaf = p->nodes[0];

	if (find_higher) {
		if (p->slots[0] >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, p);
			if (ret <= 0)
				return ret;
			if (!return_any)
				return 1;
			/*
			 * no higher item found, return the next
			 * lower instead
			 */
			return_any = 0;
			find_higher = 0;
			btrfs_release_path(p);
			goto again;
		}
	} else {
2837 2838 2839 2840 2841 2842 2843
		if (p->slots[0] == 0) {
			ret = btrfs_prev_leaf(root, p);
			if (ret < 0)
				return ret;
			if (!ret) {
				p->slots[0] = btrfs_header_nritems(leaf) - 1;
				return 0;
2844
			}
2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855
			if (!return_any)
				return 1;
			/*
			 * no lower item found, return the next
			 * higher instead
			 */
			return_any = 0;
			find_higher = 1;
			btrfs_release_path(p);
			goto again;
		} else {
2856 2857 2858 2859 2860 2861
			--p->slots[0];
		}
	}
	return 0;
}

C
Chris Mason 已提交
2862 2863 2864 2865 2866 2867
/*
 * adjust the pointers going up the tree, starting at level
 * making sure the right key of each node is points to 'key'.
 * This is used after shifting pointers to the left, so it stops
 * fixing up pointers when a given leaf/node is not in slot 0 of the
 * higher levels
C
Chris Mason 已提交
2868
 *
C
Chris Mason 已提交
2869
 */
2870 2871 2872
static void fixup_low_keys(struct btrfs_trans_handle *trans,
			   struct btrfs_root *root, struct btrfs_path *path,
			   struct btrfs_disk_key *key, int level)
2873 2874
{
	int i;
2875 2876
	struct extent_buffer *t;

C
Chris Mason 已提交
2877
	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2878
		int tslot = path->slots[i];
2879
		if (!path->nodes[i])
2880
			break;
2881
		t = path->nodes[i];
2882
		tree_mod_log_set_node_key(root->fs_info, t, key, tslot, 1);
2883
		btrfs_set_node_key(t, key, tslot);
C
Chris Mason 已提交
2884
		btrfs_mark_buffer_dirty(path->nodes[i]);
2885 2886 2887 2888 2889
		if (tslot != 0)
			break;
	}
}

Z
Zheng Yan 已提交
2890 2891 2892 2893 2894 2895
/*
 * update item key.
 *
 * This function isn't completely safe. It's the caller's responsibility
 * that the new key won't break the order
 */
2896 2897 2898
void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
			     struct btrfs_root *root, struct btrfs_path *path,
			     struct btrfs_key *new_key)
Z
Zheng Yan 已提交
2899 2900 2901 2902 2903 2904 2905 2906 2907
{
	struct btrfs_disk_key disk_key;
	struct extent_buffer *eb;
	int slot;

	eb = path->nodes[0];
	slot = path->slots[0];
	if (slot > 0) {
		btrfs_item_key(eb, &disk_key, slot - 1);
2908
		BUG_ON(comp_keys(&disk_key, new_key) >= 0);
Z
Zheng Yan 已提交
2909 2910 2911
	}
	if (slot < btrfs_header_nritems(eb) - 1) {
		btrfs_item_key(eb, &disk_key, slot + 1);
2912
		BUG_ON(comp_keys(&disk_key, new_key) <= 0);
Z
Zheng Yan 已提交
2913 2914 2915 2916 2917 2918 2919 2920 2921
	}

	btrfs_cpu_key_to_disk(&disk_key, new_key);
	btrfs_set_item_key(eb, &disk_key, slot);
	btrfs_mark_buffer_dirty(eb);
	if (slot == 0)
		fixup_low_keys(trans, root, path, &disk_key, 1);
}

C
Chris Mason 已提交
2922 2923
/*
 * try to push data from one node into the next node left in the
2924
 * tree.
C
Chris Mason 已提交
2925 2926 2927
 *
 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
 * error, and > 0 if there was no room in the left hand block.
C
Chris Mason 已提交
2928
 */
2929 2930
static int push_node_left(struct btrfs_trans_handle *trans,
			  struct btrfs_root *root, struct extent_buffer *dst,
2931
			  struct extent_buffer *src, int empty)
2932 2933
{
	int push_items = 0;
2934 2935
	int src_nritems;
	int dst_nritems;
C
Chris Mason 已提交
2936
	int ret = 0;
2937

2938 2939
	src_nritems = btrfs_header_nritems(src);
	dst_nritems = btrfs_header_nritems(dst);
C
Chris Mason 已提交
2940
	push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2941 2942
	WARN_ON(btrfs_header_generation(src) != trans->transid);
	WARN_ON(btrfs_header_generation(dst) != trans->transid);
2943

2944
	if (!empty && src_nritems <= 8)
2945 2946
		return 1;

C
Chris Mason 已提交
2947
	if (push_items <= 0)
2948 2949
		return 1;

2950
	if (empty) {
2951
		push_items = min(src_nritems, push_items);
2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963
		if (push_items < src_nritems) {
			/* leave at least 8 pointers in the node if
			 * we aren't going to empty it
			 */
			if (src_nritems - push_items < 8) {
				if (push_items <= 8)
					return 1;
				push_items -= 8;
			}
		}
	} else
		push_items = min(src_nritems - 8, push_items);
2964

2965 2966
	tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
			     push_items);
2967 2968 2969
	copy_extent_buffer(dst, src,
			   btrfs_node_key_ptr_offset(dst_nritems),
			   btrfs_node_key_ptr_offset(0),
C
Chris Mason 已提交
2970
			   push_items * sizeof(struct btrfs_key_ptr));
2971

2972
	if (push_items < src_nritems) {
2973 2974
		tree_mod_log_eb_move(root->fs_info, src, 0, push_items,
				     src_nritems - push_items);
2975 2976 2977 2978 2979 2980 2981 2982 2983
		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
				      btrfs_node_key_ptr_offset(push_items),
				      (src_nritems - push_items) *
				      sizeof(struct btrfs_key_ptr));
	}
	btrfs_set_header_nritems(src, src_nritems - push_items);
	btrfs_set_header_nritems(dst, dst_nritems + push_items);
	btrfs_mark_buffer_dirty(src);
	btrfs_mark_buffer_dirty(dst);
Z
Zheng Yan 已提交
2984

2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996
	return ret;
}

/*
 * try to push data from one node into the next node right in the
 * tree.
 *
 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
 * error, and > 0 if there was no room in the right hand block.
 *
 * this will  only push up to 1/2 the contents of the left node over
 */
2997 2998 2999 3000
static int balance_node_right(struct btrfs_trans_handle *trans,
			      struct btrfs_root *root,
			      struct extent_buffer *dst,
			      struct extent_buffer *src)
3001 3002 3003 3004 3005 3006 3007
{
	int push_items = 0;
	int max_push;
	int src_nritems;
	int dst_nritems;
	int ret = 0;

3008 3009 3010
	WARN_ON(btrfs_header_generation(src) != trans->transid);
	WARN_ON(btrfs_header_generation(dst) != trans->transid);

3011 3012
	src_nritems = btrfs_header_nritems(src);
	dst_nritems = btrfs_header_nritems(dst);
C
Chris Mason 已提交
3013
	push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
C
Chris Mason 已提交
3014
	if (push_items <= 0)
3015
		return 1;
3016

C
Chris Mason 已提交
3017
	if (src_nritems < 4)
3018
		return 1;
3019 3020 3021

	max_push = src_nritems / 2 + 1;
	/* don't try to empty the node */
C
Chris Mason 已提交
3022
	if (max_push >= src_nritems)
3023
		return 1;
Y
Yan 已提交
3024

3025 3026 3027
	if (max_push < push_items)
		push_items = max_push;

3028
	tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3029 3030 3031 3032
	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
				      btrfs_node_key_ptr_offset(0),
				      (dst_nritems) *
				      sizeof(struct btrfs_key_ptr));
C
Chris Mason 已提交
3033

3034 3035
	tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
			     src_nritems - push_items, push_items);
3036 3037 3038
	copy_extent_buffer(dst, src,
			   btrfs_node_key_ptr_offset(0),
			   btrfs_node_key_ptr_offset(src_nritems - push_items),
C
Chris Mason 已提交
3039
			   push_items * sizeof(struct btrfs_key_ptr));
3040

3041 3042
	btrfs_set_header_nritems(src, src_nritems - push_items);
	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3043

3044 3045
	btrfs_mark_buffer_dirty(src);
	btrfs_mark_buffer_dirty(dst);
Z
Zheng Yan 已提交
3046

C
Chris Mason 已提交
3047
	return ret;
3048 3049
}

C
Chris Mason 已提交
3050 3051 3052 3053
/*
 * helper function to insert a new root level in the tree.
 * A new node is allocated, and a single item is inserted to
 * point to the existing root
C
Chris Mason 已提交
3054 3055
 *
 * returns zero on success or < 0 on failure.
C
Chris Mason 已提交
3056
 */
C
Chris Mason 已提交
3057
static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3058 3059
			   struct btrfs_root *root,
			   struct btrfs_path *path, int level)
C
Chris Mason 已提交
3060
{
3061
	u64 lower_gen;
3062 3063
	struct extent_buffer *lower;
	struct extent_buffer *c;
3064
	struct extent_buffer *old;
3065
	struct btrfs_disk_key lower_key;
C
Chris Mason 已提交
3066 3067 3068 3069

	BUG_ON(path->nodes[level]);
	BUG_ON(path->nodes[level-1] != root->node);

3070 3071 3072 3073 3074 3075
	lower = path->nodes[level-1];
	if (level == 1)
		btrfs_item_key(lower, &lower_key, 0);
	else
		btrfs_node_key(lower, &lower_key, 0);

Z
Zheng Yan 已提交
3076
	c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3077
				   root->root_key.objectid, &lower_key,
3078
				   level, root->node->start, 0);
3079 3080
	if (IS_ERR(c))
		return PTR_ERR(c);
3081

3082 3083
	root_add_used(root, root->nodesize);

3084
	memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3085 3086
	btrfs_set_header_nritems(c, 1);
	btrfs_set_header_level(c, level);
3087
	btrfs_set_header_bytenr(c, c->start);
3088
	btrfs_set_header_generation(c, trans->transid);
3089
	btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3090 3091 3092 3093 3094
	btrfs_set_header_owner(c, root->root_key.objectid);

	write_extent_buffer(c, root->fs_info->fsid,
			    (unsigned long)btrfs_header_fsid(c),
			    BTRFS_FSID_SIZE);
3095 3096 3097 3098 3099

	write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
			    (unsigned long)btrfs_header_chunk_tree_uuid(c),
			    BTRFS_UUID_SIZE);

3100
	btrfs_set_node_key(c, &lower_key, 0);
3101
	btrfs_set_node_blockptr(c, 0, lower->start);
3102
	lower_gen = btrfs_header_generation(lower);
Z
Zheng Yan 已提交
3103
	WARN_ON(lower_gen != trans->transid);
3104 3105

	btrfs_set_node_ptr_generation(c, 0, lower_gen);
3106

3107
	btrfs_mark_buffer_dirty(c);
3108

3109
	old = root->node;
3110
	tree_mod_log_set_root_pointer(root, c);
3111
	rcu_assign_pointer(root->node, c);
3112 3113 3114 3115

	/* the super has an extra ref to root->node */
	free_extent_buffer(old);

3116
	add_root_to_dirty_list(root);
3117 3118
	extent_buffer_get(c);
	path->nodes[level] = c;
3119
	path->locks[level] = BTRFS_WRITE_LOCK;
C
Chris Mason 已提交
3120 3121 3122 3123
	path->slots[level] = 0;
	return 0;
}

C
Chris Mason 已提交
3124 3125 3126
/*
 * worker function to insert a single pointer in a node.
 * the node should have enough room for the pointer already
C
Chris Mason 已提交
3127
 *
C
Chris Mason 已提交
3128 3129 3130
 * slot and level indicate where you want the key to go, and
 * blocknr is the block the key points to.
 */
3131 3132 3133
static void insert_ptr(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root, struct btrfs_path *path,
		       struct btrfs_disk_key *key, u64 bytenr,
3134
		       int slot, int level)
C
Chris Mason 已提交
3135
{
3136
	struct extent_buffer *lower;
C
Chris Mason 已提交
3137
	int nritems;
3138
	int ret;
C
Chris Mason 已提交
3139 3140

	BUG_ON(!path->nodes[level]);
3141
	btrfs_assert_tree_locked(path->nodes[level]);
3142 3143
	lower = path->nodes[level];
	nritems = btrfs_header_nritems(lower);
S
Stoyan Gaydarov 已提交
3144
	BUG_ON(slot > nritems);
3145
	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
C
Chris Mason 已提交
3146
	if (slot != nritems) {
3147
		if (level)
3148 3149
			tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
					     slot, nritems - slot);
3150 3151 3152
		memmove_extent_buffer(lower,
			      btrfs_node_key_ptr_offset(slot + 1),
			      btrfs_node_key_ptr_offset(slot),
C
Chris Mason 已提交
3153
			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
C
Chris Mason 已提交
3154
	}
3155
	if (level) {
3156 3157 3158 3159
		ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
					      MOD_LOG_KEY_ADD);
		BUG_ON(ret < 0);
	}
3160
	btrfs_set_node_key(lower, key, slot);
3161
	btrfs_set_node_blockptr(lower, slot, bytenr);
3162 3163
	WARN_ON(trans->transid == 0);
	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3164 3165
	btrfs_set_header_nritems(lower, nritems + 1);
	btrfs_mark_buffer_dirty(lower);
C
Chris Mason 已提交
3166 3167
}

C
Chris Mason 已提交
3168 3169 3170 3171 3172 3173
/*
 * split the node at the specified level in path in two.
 * The path is corrected to point to the appropriate node after the split
 *
 * Before splitting this tries to make some room in the node by pushing
 * left and right, if either one works, it returns right away.
C
Chris Mason 已提交
3174 3175
 *
 * returns 0 on success and < 0 on failure
C
Chris Mason 已提交
3176
 */
3177 3178 3179
static noinline int split_node(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root,
			       struct btrfs_path *path, int level)
3180
{
3181 3182 3183
	struct extent_buffer *c;
	struct extent_buffer *split;
	struct btrfs_disk_key disk_key;
3184
	int mid;
C
Chris Mason 已提交
3185
	int ret;
3186
	u32 c_nritems;
3187

3188
	c = path->nodes[level];
3189
	WARN_ON(btrfs_header_generation(c) != trans->transid);
3190
	if (c == root->node) {
C
Chris Mason 已提交
3191
		/* trying to split the root, lets make a new one */
3192
		ret = insert_new_root(trans, root, path, level + 1);
C
Chris Mason 已提交
3193 3194
		if (ret)
			return ret;
3195
	} else {
3196
		ret = push_nodes_for_insert(trans, root, path, level);
3197 3198
		c = path->nodes[level];
		if (!ret && btrfs_header_nritems(c) <
3199
		    BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3200
			return 0;
3201 3202
		if (ret < 0)
			return ret;
3203
	}
3204

3205
	c_nritems = btrfs_header_nritems(c);
3206 3207
	mid = (c_nritems + 1) / 2;
	btrfs_node_key(c, &disk_key, mid);
3208

3209
	split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
Z
Zheng Yan 已提交
3210
					root->root_key.objectid,
3211
					&disk_key, level, c->start, 0);
3212 3213 3214
	if (IS_ERR(split))
		return PTR_ERR(split);

3215 3216
	root_add_used(root, root->nodesize);

3217
	memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3218
	btrfs_set_header_level(split, btrfs_header_level(c));
3219
	btrfs_set_header_bytenr(split, split->start);
3220
	btrfs_set_header_generation(split, trans->transid);
3221
	btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3222 3223 3224 3225
	btrfs_set_header_owner(split, root->root_key.objectid);
	write_extent_buffer(split, root->fs_info->fsid,
			    (unsigned long)btrfs_header_fsid(split),
			    BTRFS_FSID_SIZE);
3226 3227 3228
	write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
			    (unsigned long)btrfs_header_chunk_tree_uuid(split),
			    BTRFS_UUID_SIZE);
3229

3230
	tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
3231 3232 3233 3234 3235 3236
	copy_extent_buffer(split, c,
			   btrfs_node_key_ptr_offset(0),
			   btrfs_node_key_ptr_offset(mid),
			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
	btrfs_set_header_nritems(split, c_nritems - mid);
	btrfs_set_header_nritems(c, mid);
C
Chris Mason 已提交
3237 3238
	ret = 0;

3239 3240 3241
	btrfs_mark_buffer_dirty(c);
	btrfs_mark_buffer_dirty(split);

3242
	insert_ptr(trans, root, path, &disk_key, split->start,
3243
		   path->slots[level + 1] + 1, level + 1);
C
Chris Mason 已提交
3244

C
Chris Mason 已提交
3245
	if (path->slots[level] >= mid) {
C
Chris Mason 已提交
3246
		path->slots[level] -= mid;
3247
		btrfs_tree_unlock(c);
3248 3249
		free_extent_buffer(c);
		path->nodes[level] = split;
C
Chris Mason 已提交
3250 3251
		path->slots[level + 1] += 1;
	} else {
3252
		btrfs_tree_unlock(split);
3253
		free_extent_buffer(split);
3254
	}
C
Chris Mason 已提交
3255
	return ret;
3256 3257
}

C
Chris Mason 已提交
3258 3259 3260 3261 3262
/*
 * how many bytes are required to store the items in a leaf.  start
 * and nr indicate which items in the leaf to check.  This totals up the
 * space used both by the item structs and the item data
 */
3263
static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3264 3265
{
	int data_len;
3266
	int nritems = btrfs_header_nritems(l);
3267
	int end = min(nritems, start + nr) - 1;
3268 3269 3270

	if (!nr)
		return 0;
3271 3272
	data_len = btrfs_item_end_nr(l, start);
	data_len = data_len - btrfs_item_offset_nr(l, end);
C
Chris Mason 已提交
3273
	data_len += sizeof(struct btrfs_item) * nr;
3274
	WARN_ON(data_len < 0);
3275 3276 3277
	return data_len;
}

3278 3279 3280 3281 3282
/*
 * The space between the end of the leaf items and
 * the start of the leaf data.  IOW, how much room
 * the leaf has left for both items and data
 */
C
Chris Mason 已提交
3283
noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3284
				   struct extent_buffer *leaf)
3285
{
3286 3287 3288 3289
	int nritems = btrfs_header_nritems(leaf);
	int ret;
	ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
	if (ret < 0) {
C
Chris Mason 已提交
3290 3291
		printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
		       "used %d nritems %d\n",
J
Jens Axboe 已提交
3292
		       ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3293 3294 3295
		       leaf_space_used(leaf, 0, nritems), nritems);
	}
	return ret;
3296 3297
}

3298 3299 3300 3301
/*
 * min slot controls the lowest index we're willing to push to the
 * right.  We'll push up to and including min_slot, but no lower
 */
3302 3303 3304 3305 3306
static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root,
				      struct btrfs_path *path,
				      int data_size, int empty,
				      struct extent_buffer *right,
3307 3308
				      int free_space, u32 left_nritems,
				      u32 min_slot)
C
Chris Mason 已提交
3309
{
3310
	struct extent_buffer *left = path->nodes[0];
3311
	struct extent_buffer *upper = path->nodes[1];
3312
	struct btrfs_map_token token;
3313
	struct btrfs_disk_key disk_key;
C
Chris Mason 已提交
3314
	int slot;
3315
	u32 i;
C
Chris Mason 已提交
3316 3317
	int push_space = 0;
	int push_items = 0;
C
Chris Mason 已提交
3318
	struct btrfs_item *item;
3319
	u32 nr;
3320
	u32 right_nritems;
3321
	u32 data_end;
3322
	u32 this_item_size;
C
Chris Mason 已提交
3323

3324 3325
	btrfs_init_map_token(&token);

3326 3327 3328
	if (empty)
		nr = 0;
	else
3329
		nr = max_t(u32, 1, min_slot);
3330

Z
Zheng Yan 已提交
3331
	if (path->slots[0] >= left_nritems)
3332
		push_space += data_size;
Z
Zheng Yan 已提交
3333

3334
	slot = path->slots[1];
3335 3336
	i = left_nritems - 1;
	while (i >= nr) {
3337
		item = btrfs_item_nr(left, i);
3338

Z
Zheng Yan 已提交
3339 3340 3341 3342 3343 3344 3345 3346 3347 3348
		if (!empty && push_items > 0) {
			if (path->slots[0] > i)
				break;
			if (path->slots[0] == i) {
				int space = btrfs_leaf_free_space(root, left);
				if (space + push_space * 2 > free_space)
					break;
			}
		}

C
Chris Mason 已提交
3349
		if (path->slots[0] == i)
3350
			push_space += data_size;
3351 3352 3353

		this_item_size = btrfs_item_size(left, item);
		if (this_item_size + sizeof(*item) + push_space > free_space)
C
Chris Mason 已提交
3354
			break;
Z
Zheng Yan 已提交
3355

C
Chris Mason 已提交
3356
		push_items++;
3357
		push_space += this_item_size + sizeof(*item);
3358 3359 3360
		if (i == 0)
			break;
		i--;
3361
	}
3362

3363 3364
	if (push_items == 0)
		goto out_unlock;
3365

3366
	if (!empty && push_items == left_nritems)
3367
		WARN_ON(1);
3368

C
Chris Mason 已提交
3369
	/* push left to right */
3370
	right_nritems = btrfs_header_nritems(right);
3371

3372
	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
C
Chris Mason 已提交
3373
	push_space -= leaf_data_end(root, left);
3374

C
Chris Mason 已提交
3375
	/* make room in the right data area */
3376 3377 3378 3379 3380 3381
	data_end = leaf_data_end(root, right);
	memmove_extent_buffer(right,
			      btrfs_leaf_data(right) + data_end - push_space,
			      btrfs_leaf_data(right) + data_end,
			      BTRFS_LEAF_DATA_SIZE(root) - data_end);

C
Chris Mason 已提交
3382
	/* copy from the left data area */
3383
	copy_extent_buffer(right, left, btrfs_leaf_data(right) +
C
Chris Mason 已提交
3384 3385 3386
		     BTRFS_LEAF_DATA_SIZE(root) - push_space,
		     btrfs_leaf_data(left) + leaf_data_end(root, left),
		     push_space);
3387 3388 3389 3390 3391

	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
			      btrfs_item_nr_offset(0),
			      right_nritems * sizeof(struct btrfs_item));

C
Chris Mason 已提交
3392
	/* copy the items from left to right */
3393 3394 3395
	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
		   btrfs_item_nr_offset(left_nritems - push_items),
		   push_items * sizeof(struct btrfs_item));
C
Chris Mason 已提交
3396 3397

	/* update the item pointers */
3398
	right_nritems += push_items;
3399
	btrfs_set_header_nritems(right, right_nritems);
C
Chris Mason 已提交
3400
	push_space = BTRFS_LEAF_DATA_SIZE(root);
3401
	for (i = 0; i < right_nritems; i++) {
3402
		item = btrfs_item_nr(right, i);
3403 3404
		push_space -= btrfs_token_item_size(right, item, &token);
		btrfs_set_token_item_offset(right, item, push_space, &token);
3405 3406
	}

3407
	left_nritems -= push_items;
3408
	btrfs_set_header_nritems(left, left_nritems);
C
Chris Mason 已提交
3409

3410 3411
	if (left_nritems)
		btrfs_mark_buffer_dirty(left);
3412 3413 3414
	else
		clean_tree_block(trans, root, left);

3415
	btrfs_mark_buffer_dirty(right);
3416

3417 3418
	btrfs_item_key(right, &disk_key, 0);
	btrfs_set_node_key(upper, &disk_key, slot + 1);
C
Chris Mason 已提交
3419
	btrfs_mark_buffer_dirty(upper);
C
Chris Mason 已提交
3420

C
Chris Mason 已提交
3421
	/* then fixup the leaf pointer in the path */
3422 3423
	if (path->slots[0] >= left_nritems) {
		path->slots[0] -= left_nritems;
3424 3425 3426
		if (btrfs_header_nritems(path->nodes[0]) == 0)
			clean_tree_block(trans, root, path->nodes[0]);
		btrfs_tree_unlock(path->nodes[0]);
3427 3428
		free_extent_buffer(path->nodes[0]);
		path->nodes[0] = right;
C
Chris Mason 已提交
3429 3430
		path->slots[1] += 1;
	} else {
3431
		btrfs_tree_unlock(right);
3432
		free_extent_buffer(right);
C
Chris Mason 已提交
3433 3434
	}
	return 0;
3435 3436 3437 3438 3439

out_unlock:
	btrfs_tree_unlock(right);
	free_extent_buffer(right);
	return 1;
C
Chris Mason 已提交
3440
}
3441

3442 3443 3444 3445 3446 3447
/*
 * push some data in the path leaf to the right, trying to free up at
 * least data_size bytes.  returns zero if the push worked, nonzero otherwise
 *
 * returns 1 if the push failed because the other node didn't have enough
 * room, 0 if everything worked out and < 0 if there were major errors.
3448 3449 3450
 *
 * this will push starting from min_slot to the end of the leaf.  It won't
 * push any slot lower than min_slot
3451 3452
 */
static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3453 3454 3455
			   *root, struct btrfs_path *path,
			   int min_data_size, int data_size,
			   int empty, u32 min_slot)
3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475
{
	struct extent_buffer *left = path->nodes[0];
	struct extent_buffer *right;
	struct extent_buffer *upper;
	int slot;
	int free_space;
	u32 left_nritems;
	int ret;

	if (!path->nodes[1])
		return 1;

	slot = path->slots[1];
	upper = path->nodes[1];
	if (slot >= btrfs_header_nritems(upper) - 1)
		return 1;

	btrfs_assert_tree_locked(path->nodes[1]);

	right = read_node_slot(root, upper, slot + 1);
T
Tsutomu Itoh 已提交
3476 3477 3478
	if (right == NULL)
		return 1;

3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499
	btrfs_tree_lock(right);
	btrfs_set_lock_blocking(right);

	free_space = btrfs_leaf_free_space(root, right);
	if (free_space < data_size)
		goto out_unlock;

	/* cow and double check */
	ret = btrfs_cow_block(trans, root, right, upper,
			      slot + 1, &right);
	if (ret)
		goto out_unlock;

	free_space = btrfs_leaf_free_space(root, right);
	if (free_space < data_size)
		goto out_unlock;

	left_nritems = btrfs_header_nritems(left);
	if (left_nritems == 0)
		goto out_unlock;

3500 3501
	return __push_leaf_right(trans, root, path, min_data_size, empty,
				right, free_space, left_nritems, min_slot);
3502 3503 3504 3505 3506 3507
out_unlock:
	btrfs_tree_unlock(right);
	free_extent_buffer(right);
	return 1;
}

C
Chris Mason 已提交
3508 3509 3510
/*
 * push some data in the path leaf to the left, trying to free up at
 * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3511 3512 3513 3514
 *
 * max_slot can put a limit on how far into the leaf we'll push items.  The
 * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
 * items
C
Chris Mason 已提交
3515
 */
3516 3517 3518 3519
static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
				     struct btrfs_path *path, int data_size,
				     int empty, struct extent_buffer *left,
3520 3521
				     int free_space, u32 right_nritems,
				     u32 max_slot)
3522
{
3523 3524
	struct btrfs_disk_key disk_key;
	struct extent_buffer *right = path->nodes[0];
3525 3526 3527
	int i;
	int push_space = 0;
	int push_items = 0;
C
Chris Mason 已提交
3528
	struct btrfs_item *item;
3529
	u32 old_left_nritems;
3530
	u32 nr;
C
Chris Mason 已提交
3531
	int ret = 0;
3532 3533
	u32 this_item_size;
	u32 old_left_item_size;
3534 3535 3536
	struct btrfs_map_token token;

	btrfs_init_map_token(&token);
3537

3538
	if (empty)
3539
		nr = min(right_nritems, max_slot);
3540
	else
3541
		nr = min(right_nritems - 1, max_slot);
3542 3543

	for (i = 0; i < nr; i++) {
3544
		item = btrfs_item_nr(right, i);
3545

Z
Zheng Yan 已提交
3546 3547 3548 3549 3550 3551 3552 3553 3554 3555
		if (!empty && push_items > 0) {
			if (path->slots[0] < i)
				break;
			if (path->slots[0] == i) {
				int space = btrfs_leaf_free_space(root, right);
				if (space + push_space * 2 > free_space)
					break;
			}
		}

3556
		if (path->slots[0] == i)
3557
			push_space += data_size;
3558 3559 3560

		this_item_size = btrfs_item_size(right, item);
		if (this_item_size + sizeof(*item) + push_space > free_space)
3561
			break;
3562

3563
		push_items++;
3564 3565 3566
		push_space += this_item_size + sizeof(*item);
	}

3567
	if (push_items == 0) {
3568 3569
		ret = 1;
		goto out;
3570
	}
3571
	if (!empty && push_items == btrfs_header_nritems(right))
3572
		WARN_ON(1);
3573

3574
	/* push data from right to left */
3575 3576 3577 3578 3579
	copy_extent_buffer(left, right,
			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
			   btrfs_item_nr_offset(0),
			   push_items * sizeof(struct btrfs_item));

C
Chris Mason 已提交
3580
	push_space = BTRFS_LEAF_DATA_SIZE(root) -
C
Chris Mason 已提交
3581
		     btrfs_item_offset_nr(right, push_items - 1);
3582 3583

	copy_extent_buffer(left, right, btrfs_leaf_data(left) +
C
Chris Mason 已提交
3584 3585
		     leaf_data_end(root, left) - push_space,
		     btrfs_leaf_data(right) +
3586
		     btrfs_item_offset_nr(right, push_items - 1),
C
Chris Mason 已提交
3587
		     push_space);
3588
	old_left_nritems = btrfs_header_nritems(left);
3589
	BUG_ON(old_left_nritems <= 0);
3590

3591
	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
C
Chris Mason 已提交
3592
	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3593
		u32 ioff;
3594

3595
		item = btrfs_item_nr(left, i);
3596

3597 3598 3599 3600
		ioff = btrfs_token_item_offset(left, item, &token);
		btrfs_set_token_item_offset(left, item,
		      ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
		      &token);
3601
	}
3602
	btrfs_set_header_nritems(left, old_left_nritems + push_items);
3603 3604

	/* fixup right node */
3605
	if (push_items > right_nritems) {
C
Chris Mason 已提交
3606 3607
		printk(KERN_CRIT "push items %d nr %u\n", push_items,
		       right_nritems);
3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619
		WARN_ON(1);
	}

	if (push_items < right_nritems) {
		push_space = btrfs_item_offset_nr(right, push_items - 1) -
						  leaf_data_end(root, right);
		memmove_extent_buffer(right, btrfs_leaf_data(right) +
				      BTRFS_LEAF_DATA_SIZE(root) - push_space,
				      btrfs_leaf_data(right) +
				      leaf_data_end(root, right), push_space);

		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3620 3621 3622
			      btrfs_item_nr_offset(push_items),
			     (btrfs_header_nritems(right) - push_items) *
			     sizeof(struct btrfs_item));
3623
	}
3624 3625
	right_nritems -= push_items;
	btrfs_set_header_nritems(right, right_nritems);
C
Chris Mason 已提交
3626
	push_space = BTRFS_LEAF_DATA_SIZE(root);
3627 3628
	for (i = 0; i < right_nritems; i++) {
		item = btrfs_item_nr(right, i);
3629

3630 3631 3632
		push_space = push_space - btrfs_token_item_size(right,
								item, &token);
		btrfs_set_token_item_offset(right, item, push_space, &token);
3633
	}
3634

3635
	btrfs_mark_buffer_dirty(left);
3636 3637
	if (right_nritems)
		btrfs_mark_buffer_dirty(right);
3638 3639
	else
		clean_tree_block(trans, root, right);
3640

3641
	btrfs_item_key(right, &disk_key, 0);
3642
	fixup_low_keys(trans, root, path, &disk_key, 1);
3643 3644 3645 3646

	/* then fixup the leaf pointer in the path */
	if (path->slots[0] < push_items) {
		path->slots[0] += old_left_nritems;
3647
		btrfs_tree_unlock(path->nodes[0]);
3648 3649
		free_extent_buffer(path->nodes[0]);
		path->nodes[0] = left;
3650 3651
		path->slots[1] -= 1;
	} else {
3652
		btrfs_tree_unlock(left);
3653
		free_extent_buffer(left);
3654 3655
		path->slots[0] -= push_items;
	}
3656
	BUG_ON(path->slots[0] < 0);
C
Chris Mason 已提交
3657
	return ret;
3658 3659 3660 3661
out:
	btrfs_tree_unlock(left);
	free_extent_buffer(left);
	return ret;
3662 3663
}

3664 3665 3666
/*
 * push some data in the path leaf to the left, trying to free up at
 * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3667 3668 3669 3670
 *
 * max_slot can put a limit on how far into the leaf we'll push items.  The
 * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
 * items
3671 3672
 */
static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3673 3674
			  *root, struct btrfs_path *path, int min_data_size,
			  int data_size, int empty, u32 max_slot)
3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695
{
	struct extent_buffer *right = path->nodes[0];
	struct extent_buffer *left;
	int slot;
	int free_space;
	u32 right_nritems;
	int ret = 0;

	slot = path->slots[1];
	if (slot == 0)
		return 1;
	if (!path->nodes[1])
		return 1;

	right_nritems = btrfs_header_nritems(right);
	if (right_nritems == 0)
		return 1;

	btrfs_assert_tree_locked(path->nodes[1]);

	left = read_node_slot(root, path->nodes[1], slot - 1);
T
Tsutomu Itoh 已提交
3696 3697 3698
	if (left == NULL)
		return 1;

3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712
	btrfs_tree_lock(left);
	btrfs_set_lock_blocking(left);

	free_space = btrfs_leaf_free_space(root, left);
	if (free_space < data_size) {
		ret = 1;
		goto out;
	}

	/* cow and double check */
	ret = btrfs_cow_block(trans, root, left,
			      path->nodes[1], slot - 1, &left);
	if (ret) {
		/* we hit -ENOSPC, but it isn't fatal here */
3713 3714
		if (ret == -ENOSPC)
			ret = 1;
3715 3716 3717 3718 3719 3720 3721 3722 3723
		goto out;
	}

	free_space = btrfs_leaf_free_space(root, left);
	if (free_space < data_size) {
		ret = 1;
		goto out;
	}

3724 3725 3726
	return __push_leaf_left(trans, root, path, min_data_size,
			       empty, left, free_space, right_nritems,
			       max_slot);
3727 3728 3729 3730 3731 3732 3733 3734 3735 3736
out:
	btrfs_tree_unlock(left);
	free_extent_buffer(left);
	return ret;
}

/*
 * split the path's leaf in two, making sure there is at least data_size
 * available for the resulting leaf level of the path.
 */
3737 3738 3739 3740 3741 3742
static noinline void copy_for_split(struct btrfs_trans_handle *trans,
				    struct btrfs_root *root,
				    struct btrfs_path *path,
				    struct extent_buffer *l,
				    struct extent_buffer *right,
				    int slot, int mid, int nritems)
3743 3744 3745 3746 3747
{
	int data_copy_size;
	int rt_data_off;
	int i;
	struct btrfs_disk_key disk_key;
3748 3749 3750
	struct btrfs_map_token token;

	btrfs_init_map_token(&token);
3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771

	nritems = nritems - mid;
	btrfs_set_header_nritems(right, nritems);
	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);

	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
			   btrfs_item_nr_offset(mid),
			   nritems * sizeof(struct btrfs_item));

	copy_extent_buffer(right, l,
		     btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
		     data_copy_size, btrfs_leaf_data(l) +
		     leaf_data_end(root, l), data_copy_size);

	rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
		      btrfs_item_end_nr(l, mid);

	for (i = 0; i < nritems; i++) {
		struct btrfs_item *item = btrfs_item_nr(right, i);
		u32 ioff;

3772 3773 3774
		ioff = btrfs_token_item_offset(right, item, &token);
		btrfs_set_token_item_offset(right, item,
					    ioff + rt_data_off, &token);
3775 3776 3777 3778
	}

	btrfs_set_header_nritems(l, mid);
	btrfs_item_key(right, &disk_key, 0);
3779
	insert_ptr(trans, root, path, &disk_key, right->start,
3780
		   path->slots[1] + 1, 1);
3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799

	btrfs_mark_buffer_dirty(right);
	btrfs_mark_buffer_dirty(l);
	BUG_ON(path->slots[0] != slot);

	if (mid <= slot) {
		btrfs_tree_unlock(path->nodes[0]);
		free_extent_buffer(path->nodes[0]);
		path->nodes[0] = right;
		path->slots[0] -= mid;
		path->slots[1] += 1;
	} else {
		btrfs_tree_unlock(right);
		free_extent_buffer(right);
	}

	BUG_ON(path->slots[0] < 0);
}

3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857
/*
 * double splits happen when we need to insert a big item in the middle
 * of a leaf.  A double split can leave us with 3 mostly empty leaves:
 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
 *          A                 B                 C
 *
 * We avoid this by trying to push the items on either side of our target
 * into the adjacent leaves.  If all goes well we can avoid the double split
 * completely.
 */
static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
					  struct btrfs_root *root,
					  struct btrfs_path *path,
					  int data_size)
{
	int ret;
	int progress = 0;
	int slot;
	u32 nritems;

	slot = path->slots[0];

	/*
	 * try to push all the items after our slot into the
	 * right leaf
	 */
	ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
	if (ret < 0)
		return ret;

	if (ret == 0)
		progress++;

	nritems = btrfs_header_nritems(path->nodes[0]);
	/*
	 * our goal is to get our slot at the start or end of a leaf.  If
	 * we've done so we're done
	 */
	if (path->slots[0] == 0 || path->slots[0] == nritems)
		return 0;

	if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
		return 0;

	/* try to push all the items before our slot into the next leaf */
	slot = path->slots[0];
	ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
	if (ret < 0)
		return ret;

	if (ret == 0)
		progress++;

	if (progress)
		return 0;
	return 1;
}

C
Chris Mason 已提交
3858 3859 3860
/*
 * split the path's leaf in two, making sure there is at least data_size
 * available for the resulting leaf level of the path.
C
Chris Mason 已提交
3861 3862
 *
 * returns 0 if all went well and < 0 on failure.
C
Chris Mason 已提交
3863
 */
3864 3865 3866 3867 3868
static noinline int split_leaf(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root,
			       struct btrfs_key *ins_key,
			       struct btrfs_path *path, int data_size,
			       int extend)
3869
{
3870
	struct btrfs_disk_key disk_key;
3871
	struct extent_buffer *l;
3872
	u32 nritems;
3873 3874
	int mid;
	int slot;
3875
	struct extent_buffer *right;
3876
	int ret = 0;
C
Chris Mason 已提交
3877
	int wret;
3878
	int split;
3879
	int num_doubles = 0;
3880
	int tried_avoid_double = 0;
C
Chris Mason 已提交
3881

3882 3883 3884 3885 3886 3887
	l = path->nodes[0];
	slot = path->slots[0];
	if (extend && data_size + btrfs_item_size_nr(l, slot) +
	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
		return -EOVERFLOW;

C
Chris Mason 已提交
3888
	/* first try to make some room by pushing left and right */
3889 3890 3891
	if (data_size) {
		wret = push_leaf_right(trans, root, path, data_size,
				       data_size, 0, 0);
C
Chris Mason 已提交
3892
		if (wret < 0)
C
Chris Mason 已提交
3893
			return wret;
3894
		if (wret) {
3895 3896
			wret = push_leaf_left(trans, root, path, data_size,
					      data_size, 0, (u32)-1);
3897 3898 3899 3900
			if (wret < 0)
				return wret;
		}
		l = path->nodes[0];
C
Chris Mason 已提交
3901

3902
		/* did the pushes work? */
3903
		if (btrfs_leaf_free_space(root, l) >= data_size)
3904
			return 0;
3905
	}
C
Chris Mason 已提交
3906

C
Chris Mason 已提交
3907
	if (!path->nodes[1]) {
3908
		ret = insert_new_root(trans, root, path, 1);
C
Chris Mason 已提交
3909 3910 3911
		if (ret)
			return ret;
	}
3912
again:
3913
	split = 1;
3914
	l = path->nodes[0];
3915
	slot = path->slots[0];
3916
	nritems = btrfs_header_nritems(l);
C
Chris Mason 已提交
3917
	mid = (nritems + 1) / 2;
3918

3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929
	if (mid <= slot) {
		if (nritems == 1 ||
		    leaf_space_used(l, mid, nritems - mid) + data_size >
			BTRFS_LEAF_DATA_SIZE(root)) {
			if (slot >= nritems) {
				split = 0;
			} else {
				mid = slot;
				if (mid != nritems &&
				    leaf_space_used(l, mid, nritems - mid) +
				    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3930 3931
					if (data_size && !tried_avoid_double)
						goto push_for_double;
3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947
					split = 2;
				}
			}
		}
	} else {
		if (leaf_space_used(l, 0, mid) + data_size >
			BTRFS_LEAF_DATA_SIZE(root)) {
			if (!extend && data_size && slot == 0) {
				split = 0;
			} else if ((extend || !data_size) && slot == 0) {
				mid = 1;
			} else {
				mid = slot;
				if (mid != nritems &&
				    leaf_space_used(l, mid, nritems - mid) +
				    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3948 3949
					if (data_size && !tried_avoid_double)
						goto push_for_double;
3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961
					split = 2 ;
				}
			}
		}
	}

	if (split == 0)
		btrfs_cpu_key_to_disk(&disk_key, ins_key);
	else
		btrfs_item_key(l, &disk_key, mid);

	right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
Z
Zheng Yan 已提交
3962
					root->root_key.objectid,
3963
					&disk_key, 0, l->start, 0);
3964
	if (IS_ERR(right))
3965
		return PTR_ERR(right);
3966 3967

	root_add_used(root, root->leafsize);
3968 3969

	memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
3970
	btrfs_set_header_bytenr(right, right->start);
3971
	btrfs_set_header_generation(right, trans->transid);
3972
	btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
3973 3974 3975 3976 3977
	btrfs_set_header_owner(right, root->root_key.objectid);
	btrfs_set_header_level(right, 0);
	write_extent_buffer(right, root->fs_info->fsid,
			    (unsigned long)btrfs_header_fsid(right),
			    BTRFS_FSID_SIZE);
3978 3979 3980 3981

	write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
			    (unsigned long)btrfs_header_chunk_tree_uuid(right),
			    BTRFS_UUID_SIZE);
3982

3983 3984 3985
	if (split == 0) {
		if (mid <= slot) {
			btrfs_set_header_nritems(right, 0);
3986
			insert_ptr(trans, root, path, &disk_key, right->start,
3987
				   path->slots[1] + 1, 1);
3988 3989 3990 3991 3992 3993 3994
			btrfs_tree_unlock(path->nodes[0]);
			free_extent_buffer(path->nodes[0]);
			path->nodes[0] = right;
			path->slots[0] = 0;
			path->slots[1] += 1;
		} else {
			btrfs_set_header_nritems(right, 0);
3995
			insert_ptr(trans, root, path, &disk_key, right->start,
3996
					  path->slots[1], 1);
3997 3998 3999 4000
			btrfs_tree_unlock(path->nodes[0]);
			free_extent_buffer(path->nodes[0]);
			path->nodes[0] = right;
			path->slots[0] = 0;
4001 4002 4003
			if (path->slots[1] == 0)
				fixup_low_keys(trans, root, path,
					       &disk_key, 1);
4004
		}
4005 4006
		btrfs_mark_buffer_dirty(right);
		return ret;
4007
	}
C
Chris Mason 已提交
4008

4009
	copy_for_split(trans, root, path, l, right, slot, mid, nritems);
Z
Zheng Yan 已提交
4010

4011
	if (split == 2) {
4012 4013 4014
		BUG_ON(num_doubles != 0);
		num_doubles++;
		goto again;
4015
	}
4016

4017
	return 0;
4018 4019 4020 4021 4022 4023 4024

push_for_double:
	push_for_double_split(trans, root, path, data_size);
	tried_avoid_double = 1;
	if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
		return 0;
	goto again;
4025 4026
}

Y
Yan, Zheng 已提交
4027 4028 4029
static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
					 struct btrfs_root *root,
					 struct btrfs_path *path, int ins_len)
4030
{
Y
Yan, Zheng 已提交
4031
	struct btrfs_key key;
4032
	struct extent_buffer *leaf;
Y
Yan, Zheng 已提交
4033 4034 4035 4036
	struct btrfs_file_extent_item *fi;
	u64 extent_len = 0;
	u32 item_size;
	int ret;
4037 4038

	leaf = path->nodes[0];
Y
Yan, Zheng 已提交
4039 4040 4041 4042 4043 4044 4045
	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);

	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
	       key.type != BTRFS_EXTENT_CSUM_KEY);

	if (btrfs_leaf_free_space(root, leaf) >= ins_len)
		return 0;
4046 4047

	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
Y
Yan, Zheng 已提交
4048 4049 4050 4051 4052
	if (key.type == BTRFS_EXTENT_DATA_KEY) {
		fi = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_file_extent_item);
		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
	}
4053
	btrfs_release_path(path);
4054 4055

	path->keep_locks = 1;
Y
Yan, Zheng 已提交
4056 4057
	path->search_for_split = 1;
	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4058
	path->search_for_split = 0;
Y
Yan, Zheng 已提交
4059 4060
	if (ret < 0)
		goto err;
4061

Y
Yan, Zheng 已提交
4062 4063
	ret = -EAGAIN;
	leaf = path->nodes[0];
4064
	/* if our item isn't there or got smaller, return now */
Y
Yan, Zheng 已提交
4065 4066 4067
	if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
		goto err;

4068 4069 4070 4071
	/* the leaf has  changed, it now has room.  return now */
	if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
		goto err;

Y
Yan, Zheng 已提交
4072 4073 4074 4075 4076
	if (key.type == BTRFS_EXTENT_DATA_KEY) {
		fi = btrfs_item_ptr(leaf, path->slots[0],
				    struct btrfs_file_extent_item);
		if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
			goto err;
4077 4078
	}

4079
	btrfs_set_path_blocking(path);
Y
Yan, Zheng 已提交
4080
	ret = split_leaf(trans, root, &key, path, ins_len, 1);
4081 4082
	if (ret)
		goto err;
4083

Y
Yan, Zheng 已提交
4084
	path->keep_locks = 0;
4085
	btrfs_unlock_up_safe(path, 1);
Y
Yan, Zheng 已提交
4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107
	return 0;
err:
	path->keep_locks = 0;
	return ret;
}

static noinline int split_item(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root,
			       struct btrfs_path *path,
			       struct btrfs_key *new_key,
			       unsigned long split_offset)
{
	struct extent_buffer *leaf;
	struct btrfs_item *item;
	struct btrfs_item *new_item;
	int slot;
	char *buf;
	u32 nritems;
	u32 item_size;
	u32 orig_offset;
	struct btrfs_disk_key disk_key;

4108 4109 4110
	leaf = path->nodes[0];
	BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));

4111 4112
	btrfs_set_path_blocking(path);

4113 4114 4115 4116 4117
	item = btrfs_item_nr(leaf, path->slots[0]);
	orig_offset = btrfs_item_offset(leaf, item);
	item_size = btrfs_item_size(leaf, item);

	buf = kmalloc(item_size, GFP_NOFS);
Y
Yan, Zheng 已提交
4118 4119 4120
	if (!buf)
		return -ENOMEM;

4121 4122 4123
	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
			    path->slots[0]), item_size);

Y
Yan, Zheng 已提交
4124
	slot = path->slots[0] + 1;
4125 4126 4127 4128
	nritems = btrfs_header_nritems(leaf);
	if (slot != nritems) {
		/* shift the items */
		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
Y
Yan, Zheng 已提交
4129 4130
				btrfs_item_nr_offset(slot),
				(nritems - slot) * sizeof(struct btrfs_item));
4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157
	}

	btrfs_cpu_key_to_disk(&disk_key, new_key);
	btrfs_set_item_key(leaf, &disk_key, slot);

	new_item = btrfs_item_nr(leaf, slot);

	btrfs_set_item_offset(leaf, new_item, orig_offset);
	btrfs_set_item_size(leaf, new_item, item_size - split_offset);

	btrfs_set_item_offset(leaf, item,
			      orig_offset + item_size - split_offset);
	btrfs_set_item_size(leaf, item, split_offset);

	btrfs_set_header_nritems(leaf, nritems + 1);

	/* write the data for the start of the original item */
	write_extent_buffer(leaf, buf,
			    btrfs_item_ptr_offset(leaf, path->slots[0]),
			    split_offset);

	/* write the data for the new item */
	write_extent_buffer(leaf, buf + split_offset,
			    btrfs_item_ptr_offset(leaf, slot),
			    item_size - split_offset);
	btrfs_mark_buffer_dirty(leaf);

Y
Yan, Zheng 已提交
4158
	BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4159
	kfree(buf);
Y
Yan, Zheng 已提交
4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190
	return 0;
}

/*
 * This function splits a single item into two items,
 * giving 'new_key' to the new item and splitting the
 * old one at split_offset (from the start of the item).
 *
 * The path may be released by this operation.  After
 * the split, the path is pointing to the old item.  The
 * new item is going to be in the same node as the old one.
 *
 * Note, the item being split must be smaller enough to live alone on
 * a tree block with room for one extra struct btrfs_item
 *
 * This allows us to split the item in place, keeping a lock on the
 * leaf the entire time.
 */
int btrfs_split_item(struct btrfs_trans_handle *trans,
		     struct btrfs_root *root,
		     struct btrfs_path *path,
		     struct btrfs_key *new_key,
		     unsigned long split_offset)
{
	int ret;
	ret = setup_leaf_for_split(trans, root, path,
				   sizeof(struct btrfs_item));
	if (ret)
		return ret;

	ret = split_item(trans, root, path, new_key, split_offset);
4191 4192 4193
	return ret;
}

Y
Yan, Zheng 已提交
4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218
/*
 * This function duplicate a item, giving 'new_key' to the new item.
 * It guarantees both items live in the same tree leaf and the new item
 * is contiguous with the original item.
 *
 * This allows us to split file extent in place, keeping a lock on the
 * leaf the entire time.
 */
int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
			 struct btrfs_root *root,
			 struct btrfs_path *path,
			 struct btrfs_key *new_key)
{
	struct extent_buffer *leaf;
	int ret;
	u32 item_size;

	leaf = path->nodes[0];
	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
	ret = setup_leaf_for_split(trans, root, path,
				   item_size + sizeof(struct btrfs_item));
	if (ret)
		return ret;

	path->slots[0]++;
4219 4220 4221
	setup_items_for_insert(trans, root, path, new_key, &item_size,
			       item_size, item_size +
			       sizeof(struct btrfs_item), 1);
Y
Yan, Zheng 已提交
4222 4223 4224 4225 4226 4227 4228 4229
	leaf = path->nodes[0];
	memcpy_extent_buffer(leaf,
			     btrfs_item_ptr_offset(leaf, path->slots[0]),
			     btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
			     item_size);
	return 0;
}

C
Chris Mason 已提交
4230 4231 4232 4233 4234 4235
/*
 * make the item pointed to by the path smaller.  new_size indicates
 * how small to make it, and from_end tells us if we just chop bytes
 * off the end of the item or if we shift the item to chop bytes off
 * the front.
 */
4236 4237 4238 4239
void btrfs_truncate_item(struct btrfs_trans_handle *trans,
			 struct btrfs_root *root,
			 struct btrfs_path *path,
			 u32 new_size, int from_end)
C
Chris Mason 已提交
4240 4241
{
	int slot;
4242 4243
	struct extent_buffer *leaf;
	struct btrfs_item *item;
C
Chris Mason 已提交
4244 4245 4246 4247 4248 4249
	u32 nritems;
	unsigned int data_end;
	unsigned int old_data_start;
	unsigned int old_size;
	unsigned int size_diff;
	int i;
4250 4251 4252
	struct btrfs_map_token token;

	btrfs_init_map_token(&token);
C
Chris Mason 已提交
4253

4254
	leaf = path->nodes[0];
4255 4256 4257 4258
	slot = path->slots[0];

	old_size = btrfs_item_size_nr(leaf, slot);
	if (old_size == new_size)
4259
		return;
C
Chris Mason 已提交
4260

4261
	nritems = btrfs_header_nritems(leaf);
C
Chris Mason 已提交
4262 4263
	data_end = leaf_data_end(root, leaf);

4264
	old_data_start = btrfs_item_offset_nr(leaf, slot);
4265

C
Chris Mason 已提交
4266 4267 4268 4269 4270 4271 4272 4273 4274 4275
	size_diff = old_size - new_size;

	BUG_ON(slot < 0);
	BUG_ON(slot >= nritems);

	/*
	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
	 */
	/* first correct the data pointers */
	for (i = slot; i < nritems; i++) {
4276 4277
		u32 ioff;
		item = btrfs_item_nr(leaf, i);
4278

4279 4280 4281
		ioff = btrfs_token_item_offset(leaf, item, &token);
		btrfs_set_token_item_offset(leaf, item,
					    ioff + size_diff, &token);
C
Chris Mason 已提交
4282
	}
4283

C
Chris Mason 已提交
4284
	/* shift the data */
4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307
	if (from_end) {
		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
			      data_end + size_diff, btrfs_leaf_data(leaf) +
			      data_end, old_data_start + new_size - data_end);
	} else {
		struct btrfs_disk_key disk_key;
		u64 offset;

		btrfs_item_key(leaf, &disk_key, slot);

		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
			unsigned long ptr;
			struct btrfs_file_extent_item *fi;

			fi = btrfs_item_ptr(leaf, slot,
					    struct btrfs_file_extent_item);
			fi = (struct btrfs_file_extent_item *)(
			     (unsigned long)fi - size_diff);

			if (btrfs_file_extent_type(leaf, fi) ==
			    BTRFS_FILE_EXTENT_INLINE) {
				ptr = btrfs_item_ptr_offset(leaf, slot);
				memmove_extent_buffer(leaf, ptr,
C
Chris Mason 已提交
4308 4309
				      (unsigned long)fi,
				      offsetof(struct btrfs_file_extent_item,
4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323
						 disk_bytenr));
			}
		}

		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
			      data_end + size_diff, btrfs_leaf_data(leaf) +
			      data_end, old_data_start - data_end);

		offset = btrfs_disk_key_offset(&disk_key);
		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
		btrfs_set_item_key(leaf, &disk_key, slot);
		if (slot == 0)
			fixup_low_keys(trans, root, path, &disk_key, 1);
	}
4324 4325 4326 4327

	item = btrfs_item_nr(leaf, slot);
	btrfs_set_item_size(leaf, item, new_size);
	btrfs_mark_buffer_dirty(leaf);
C
Chris Mason 已提交
4328

4329 4330
	if (btrfs_leaf_free_space(root, leaf) < 0) {
		btrfs_print_leaf(root, leaf);
C
Chris Mason 已提交
4331
		BUG();
4332
	}
C
Chris Mason 已提交
4333 4334
}

C
Chris Mason 已提交
4335 4336 4337
/*
 * make the item pointed to by the path bigger, data_size is the new size.
 */
4338 4339 4340
void btrfs_extend_item(struct btrfs_trans_handle *trans,
		       struct btrfs_root *root, struct btrfs_path *path,
		       u32 data_size)
4341 4342
{
	int slot;
4343 4344
	struct extent_buffer *leaf;
	struct btrfs_item *item;
4345 4346 4347 4348 4349
	u32 nritems;
	unsigned int data_end;
	unsigned int old_data;
	unsigned int old_size;
	int i;
4350 4351 4352
	struct btrfs_map_token token;

	btrfs_init_map_token(&token);
4353

4354
	leaf = path->nodes[0];
4355

4356
	nritems = btrfs_header_nritems(leaf);
4357 4358
	data_end = leaf_data_end(root, leaf);

4359 4360
	if (btrfs_leaf_free_space(root, leaf) < data_size) {
		btrfs_print_leaf(root, leaf);
4361
		BUG();
4362
	}
4363
	slot = path->slots[0];
4364
	old_data = btrfs_item_end_nr(leaf, slot);
4365 4366

	BUG_ON(slot < 0);
4367 4368
	if (slot >= nritems) {
		btrfs_print_leaf(root, leaf);
C
Chris Mason 已提交
4369 4370
		printk(KERN_CRIT "slot %d too large, nritems %d\n",
		       slot, nritems);
4371 4372
		BUG_ON(1);
	}
4373 4374 4375 4376 4377 4378

	/*
	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
	 */
	/* first correct the data pointers */
	for (i = slot; i < nritems; i++) {
4379 4380
		u32 ioff;
		item = btrfs_item_nr(leaf, i);
4381

4382 4383 4384
		ioff = btrfs_token_item_offset(leaf, item, &token);
		btrfs_set_token_item_offset(leaf, item,
					    ioff - data_size, &token);
4385
	}
4386

4387
	/* shift the data */
4388
	memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4389 4390
		      data_end - data_size, btrfs_leaf_data(leaf) +
		      data_end, old_data - data_end);
4391

4392
	data_end = old_data;
4393 4394 4395 4396
	old_size = btrfs_item_size_nr(leaf, slot);
	item = btrfs_item_nr(leaf, slot);
	btrfs_set_item_size(leaf, item, old_size + data_size);
	btrfs_mark_buffer_dirty(leaf);
4397

4398 4399
	if (btrfs_leaf_free_space(root, leaf) < 0) {
		btrfs_print_leaf(root, leaf);
4400
		BUG();
4401
	}
4402 4403
}

4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425
/*
 * Given a key and some data, insert items into the tree.
 * This does all the path init required, making room in the tree if needed.
 * Returns the number of keys that were inserted.
 */
int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
			    struct btrfs_root *root,
			    struct btrfs_path *path,
			    struct btrfs_key *cpu_key, u32 *data_size,
			    int nr)
{
	struct extent_buffer *leaf;
	struct btrfs_item *item;
	int ret = 0;
	int slot;
	int i;
	u32 nritems;
	u32 total_data = 0;
	u32 total_size = 0;
	unsigned int data_end;
	struct btrfs_disk_key disk_key;
	struct btrfs_key found_key;
4426 4427 4428
	struct btrfs_map_token token;

	btrfs_init_map_token(&token);
4429

4430 4431 4432 4433 4434 4435
	for (i = 0; i < nr; i++) {
		if (total_size + data_size[i] + sizeof(struct btrfs_item) >
		    BTRFS_LEAF_DATA_SIZE(root)) {
			break;
			nr = i;
		}
4436
		total_data += data_size[i];
4437 4438 4439
		total_size += data_size[i] + sizeof(struct btrfs_item);
	}
	BUG_ON(nr == 0);
4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473

	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
	if (ret == 0)
		return -EEXIST;
	if (ret < 0)
		goto out;

	leaf = path->nodes[0];

	nritems = btrfs_header_nritems(leaf);
	data_end = leaf_data_end(root, leaf);

	if (btrfs_leaf_free_space(root, leaf) < total_size) {
		for (i = nr; i >= 0; i--) {
			total_data -= data_size[i];
			total_size -= data_size[i] + sizeof(struct btrfs_item);
			if (total_size < btrfs_leaf_free_space(root, leaf))
				break;
		}
		nr = i;
	}

	slot = path->slots[0];
	BUG_ON(slot < 0);

	if (slot != nritems) {
		unsigned int old_data = btrfs_item_end_nr(leaf, slot);

		item = btrfs_item_nr(leaf, slot);
		btrfs_item_key_to_cpu(leaf, &found_key, slot);

		/* figure out how many keys we can insert in here */
		total_data = data_size[0];
		for (i = 1; i < nr; i++) {
4474
			if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
4475 4476 4477 4478 4479 4480 4481
				break;
			total_data += data_size[i];
		}
		nr = i;

		if (old_data < data_end) {
			btrfs_print_leaf(root, leaf);
C
Chris Mason 已提交
4482
			printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493
			       slot, old_data, data_end);
			BUG_ON(1);
		}
		/*
		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
		 */
		/* first correct the data pointers */
		for (i = slot; i < nritems; i++) {
			u32 ioff;

			item = btrfs_item_nr(leaf, i);
4494 4495 4496
			ioff = btrfs_token_item_offset(leaf, item, &token);
			btrfs_set_token_item_offset(leaf, item,
						    ioff - total_data, &token);
4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522
		}
		/* shift the items */
		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
			      btrfs_item_nr_offset(slot),
			      (nritems - slot) * sizeof(struct btrfs_item));

		/* shift the data */
		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
			      data_end - total_data, btrfs_leaf_data(leaf) +
			      data_end, old_data - data_end);
		data_end = old_data;
	} else {
		/*
		 * this sucks but it has to be done, if we are inserting at
		 * the end of the leaf only insert 1 of the items, since we
		 * have no way of knowing whats on the next leaf and we'd have
		 * to drop our current locks to figure it out
		 */
		nr = 1;
	}

	/* setup the item for the new data */
	for (i = 0; i < nr; i++) {
		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
		btrfs_set_item_key(leaf, &disk_key, slot + i);
		item = btrfs_item_nr(leaf, slot + i);
4523 4524
		btrfs_set_token_item_offset(leaf, item,
					    data_end - data_size[i], &token);
4525
		data_end -= data_size[i];
4526
		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4527 4528 4529 4530 4531 4532 4533
	}
	btrfs_set_header_nritems(leaf, nritems + nr);
	btrfs_mark_buffer_dirty(leaf);

	ret = 0;
	if (slot == 0) {
		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4534
		fixup_low_keys(trans, root, path, &disk_key, 1);
4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546
	}

	if (btrfs_leaf_free_space(root, leaf) < 0) {
		btrfs_print_leaf(root, leaf);
		BUG();
	}
out:
	if (!ret)
		ret = nr;
	return ret;
}

C
Chris Mason 已提交
4547
/*
4548 4549 4550
 * this is a helper for btrfs_insert_empty_items, the main goal here is
 * to save stack depth by doing the bulk of the work in a function
 * that doesn't call btrfs_search_slot
C
Chris Mason 已提交
4551
 */
4552 4553 4554 4555
void setup_items_for_insert(struct btrfs_trans_handle *trans,
			    struct btrfs_root *root, struct btrfs_path *path,
			    struct btrfs_key *cpu_key, u32 *data_size,
			    u32 total_data, u32 total_size, int nr)
4556
{
4557
	struct btrfs_item *item;
4558
	int i;
4559
	u32 nritems;
4560
	unsigned int data_end;
C
Chris Mason 已提交
4561
	struct btrfs_disk_key disk_key;
4562 4563
	struct extent_buffer *leaf;
	int slot;
4564 4565 4566
	struct btrfs_map_token token;

	btrfs_init_map_token(&token);
C
Chris Mason 已提交
4567

4568
	leaf = path->nodes[0];
4569
	slot = path->slots[0];
C
Chris Mason 已提交
4570

4571
	nritems = btrfs_header_nritems(leaf);
C
Chris Mason 已提交
4572
	data_end = leaf_data_end(root, leaf);
4573

4574
	if (btrfs_leaf_free_space(root, leaf) < total_size) {
4575
		btrfs_print_leaf(root, leaf);
C
Chris Mason 已提交
4576
		printk(KERN_CRIT "not enough freespace need %u have %d\n",
4577
		       total_size, btrfs_leaf_free_space(root, leaf));
4578
		BUG();
4579
	}
4580

4581
	if (slot != nritems) {
4582
		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4583

4584 4585
		if (old_data < data_end) {
			btrfs_print_leaf(root, leaf);
C
Chris Mason 已提交
4586
			printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4587 4588 4589
			       slot, old_data, data_end);
			BUG_ON(1);
		}
4590 4591 4592 4593
		/*
		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
		 */
		/* first correct the data pointers */
C
Chris Mason 已提交
4594
		for (i = slot; i < nritems; i++) {
4595
			u32 ioff;
4596

4597
			item = btrfs_item_nr(leaf, i);
4598 4599 4600
			ioff = btrfs_token_item_offset(leaf, item, &token);
			btrfs_set_token_item_offset(leaf, item,
						    ioff - total_data, &token);
C
Chris Mason 已提交
4601
		}
4602
		/* shift the items */
4603
		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4604
			      btrfs_item_nr_offset(slot),
C
Chris Mason 已提交
4605
			      (nritems - slot) * sizeof(struct btrfs_item));
4606 4607

		/* shift the data */
4608
		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4609
			      data_end - total_data, btrfs_leaf_data(leaf) +
C
Chris Mason 已提交
4610
			      data_end, old_data - data_end);
4611 4612
		data_end = old_data;
	}
4613

4614
	/* setup the item for the new data */
4615 4616 4617 4618
	for (i = 0; i < nr; i++) {
		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
		btrfs_set_item_key(leaf, &disk_key, slot + i);
		item = btrfs_item_nr(leaf, slot + i);
4619 4620
		btrfs_set_token_item_offset(leaf, item,
					    data_end - data_size[i], &token);
4621
		data_end -= data_size[i];
4622
		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4623
	}
4624

4625
	btrfs_set_header_nritems(leaf, nritems + nr);
C
Chris Mason 已提交
4626

4627 4628
	if (slot == 0) {
		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4629
		fixup_low_keys(trans, root, path, &disk_key, 1);
4630
	}
4631 4632
	btrfs_unlock_up_safe(path, 1);
	btrfs_mark_buffer_dirty(leaf);
C
Chris Mason 已提交
4633

4634 4635
	if (btrfs_leaf_free_space(root, leaf) < 0) {
		btrfs_print_leaf(root, leaf);
4636
		BUG();
4637
	}
4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663
}

/*
 * Given a key and some data, insert items into the tree.
 * This does all the path init required, making room in the tree if needed.
 */
int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
			    struct btrfs_root *root,
			    struct btrfs_path *path,
			    struct btrfs_key *cpu_key, u32 *data_size,
			    int nr)
{
	int ret = 0;
	int slot;
	int i;
	u32 total_size = 0;
	u32 total_data = 0;

	for (i = 0; i < nr; i++)
		total_data += data_size[i];

	total_size = total_data + (nr * sizeof(struct btrfs_item));
	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
	if (ret == 0)
		return -EEXIST;
	if (ret < 0)
4664
		return ret;
4665 4666 4667 4668

	slot = path->slots[0];
	BUG_ON(slot < 0);

4669
	setup_items_for_insert(trans, root, path, cpu_key, data_size,
4670
			       total_data, total_size, nr);
4671
	return 0;
4672 4673 4674 4675 4676 4677
}

/*
 * Given a key and some data, insert an item into the tree.
 * This does all the path init required, making room in the tree if needed.
 */
4678 4679 4680
int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
		      *root, struct btrfs_key *cpu_key, void *data, u32
		      data_size)
4681 4682
{
	int ret = 0;
C
Chris Mason 已提交
4683
	struct btrfs_path *path;
4684 4685
	struct extent_buffer *leaf;
	unsigned long ptr;
4686

C
Chris Mason 已提交
4687
	path = btrfs_alloc_path();
T
Tsutomu Itoh 已提交
4688 4689
	if (!path)
		return -ENOMEM;
C
Chris Mason 已提交
4690
	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4691
	if (!ret) {
4692 4693 4694 4695
		leaf = path->nodes[0];
		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
		write_extent_buffer(leaf, data, ptr, data_size);
		btrfs_mark_buffer_dirty(leaf);
4696
	}
C
Chris Mason 已提交
4697
	btrfs_free_path(path);
C
Chris Mason 已提交
4698
	return ret;
4699 4700
}

C
Chris Mason 已提交
4701
/*
C
Chris Mason 已提交
4702
 * delete the pointer from a given node.
C
Chris Mason 已提交
4703
 *
C
Chris Mason 已提交
4704 4705
 * the tree should have been previously balanced so the deletion does not
 * empty a node.
C
Chris Mason 已提交
4706
 */
4707
static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4708 4709
		    struct btrfs_path *path, int level, int slot,
		    int tree_mod_log)
4710
{
4711
	struct extent_buffer *parent = path->nodes[level];
4712
	u32 nritems;
4713
	int ret;
4714

4715
	nritems = btrfs_header_nritems(parent);
C
Chris Mason 已提交
4716
	if (slot != nritems - 1) {
4717 4718 4719
		if (tree_mod_log && level)
			tree_mod_log_eb_move(root->fs_info, parent, slot,
					     slot + 1, nritems - slot - 1);
4720 4721 4722
		memmove_extent_buffer(parent,
			      btrfs_node_key_ptr_offset(slot),
			      btrfs_node_key_ptr_offset(slot + 1),
C
Chris Mason 已提交
4723 4724
			      sizeof(struct btrfs_key_ptr) *
			      (nritems - slot - 1));
J
Jan Schmidt 已提交
4725
	} else if (tree_mod_log && level) {
4726 4727 4728
		ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
					      MOD_LOG_KEY_REMOVE);
		BUG_ON(ret < 0);
4729
	}
4730

4731
	nritems--;
4732
	btrfs_set_header_nritems(parent, nritems);
4733
	if (nritems == 0 && parent == root->node) {
4734
		BUG_ON(btrfs_header_level(root->node) != 1);
4735
		/* just turn the root into a leaf and break */
4736
		btrfs_set_header_level(root->node, 0);
4737
	} else if (slot == 0) {
4738 4739 4740
		struct btrfs_disk_key disk_key;

		btrfs_node_key(parent, &disk_key, 0);
4741
		fixup_low_keys(trans, root, path, &disk_key, level + 1);
4742
	}
C
Chris Mason 已提交
4743
	btrfs_mark_buffer_dirty(parent);
4744 4745
}

4746 4747
/*
 * a helper function to delete the leaf pointed to by path->slots[1] and
4748
 * path->nodes[1].
4749 4750 4751 4752 4753 4754 4755
 *
 * This deletes the pointer in path->nodes[1] and frees the leaf
 * block extent.  zero is returned if it all worked out, < 0 otherwise.
 *
 * The path must have already been setup for deleting the leaf, including
 * all the proper balancing.  path->nodes[1] must be locked.
 */
4756 4757 4758 4759
static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
				    struct btrfs_root *root,
				    struct btrfs_path *path,
				    struct extent_buffer *leaf)
4760
{
4761
	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4762
	del_ptr(trans, root, path, 1, path->slots[1], 1);
4763

4764 4765 4766 4767 4768 4769
	/*
	 * btrfs_free_extent is expensive, we want to make sure we
	 * aren't holding any locks when we call it
	 */
	btrfs_unlock_up_safe(path, 0);

4770 4771
	root_sub_used(root, leaf->len);

4772
	extent_buffer_get(leaf);
4773
	btrfs_free_tree_block(trans, root, leaf, 0, 1);
4774
	free_extent_buffer_stale(leaf);
4775
}
C
Chris Mason 已提交
4776 4777 4778 4779
/*
 * delete the item at the leaf level in path.  If that empties
 * the leaf, remove it from the tree
 */
4780 4781
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
		    struct btrfs_path *path, int slot, int nr)
4782
{
4783 4784
	struct extent_buffer *leaf;
	struct btrfs_item *item;
4785 4786
	int last_off;
	int dsize = 0;
C
Chris Mason 已提交
4787 4788
	int ret = 0;
	int wret;
4789
	int i;
4790
	u32 nritems;
4791 4792 4793
	struct btrfs_map_token token;

	btrfs_init_map_token(&token);
4794

4795
	leaf = path->nodes[0];
4796 4797 4798 4799 4800
	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);

	for (i = 0; i < nr; i++)
		dsize += btrfs_item_size_nr(leaf, slot + i);

4801
	nritems = btrfs_header_nritems(leaf);
4802

4803
	if (slot + nr != nritems) {
C
Chris Mason 已提交
4804
		int data_end = leaf_data_end(root, leaf);
4805 4806

		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
C
Chris Mason 已提交
4807 4808
			      data_end + dsize,
			      btrfs_leaf_data(leaf) + data_end,
4809
			      last_off - data_end);
4810

4811
		for (i = slot + nr; i < nritems; i++) {
4812
			u32 ioff;
4813

4814
			item = btrfs_item_nr(leaf, i);
4815 4816 4817
			ioff = btrfs_token_item_offset(leaf, item, &token);
			btrfs_set_token_item_offset(leaf, item,
						    ioff + dsize, &token);
C
Chris Mason 已提交
4818
		}
4819

4820
		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4821
			      btrfs_item_nr_offset(slot + nr),
C
Chris Mason 已提交
4822
			      sizeof(struct btrfs_item) *
4823
			      (nritems - slot - nr));
4824
	}
4825 4826
	btrfs_set_header_nritems(leaf, nritems - nr);
	nritems -= nr;
4827

C
Chris Mason 已提交
4828
	/* delete the leaf if we've emptied it */
4829
	if (nritems == 0) {
4830 4831
		if (leaf == root->node) {
			btrfs_set_header_level(leaf, 0);
4832
		} else {
4833 4834
			btrfs_set_path_blocking(path);
			clean_tree_block(trans, root, leaf);
4835
			btrfs_del_leaf(trans, root, path, leaf);
4836
		}
4837
	} else {
4838
		int used = leaf_space_used(leaf, 0, nritems);
C
Chris Mason 已提交
4839
		if (slot == 0) {
4840 4841 4842
			struct btrfs_disk_key disk_key;

			btrfs_item_key(leaf, &disk_key, 0);
4843
			fixup_low_keys(trans, root, path, &disk_key, 1);
C
Chris Mason 已提交
4844 4845
		}

C
Chris Mason 已提交
4846
		/* delete the leaf if it is mostly empty */
4847
		if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
4848 4849 4850 4851
			/* push_leaf_left fixes the path.
			 * make sure the path still points to our leaf
			 * for possible call to del_ptr below
			 */
4852
			slot = path->slots[1];
4853 4854
			extent_buffer_get(leaf);

4855
			btrfs_set_path_blocking(path);
4856 4857
			wret = push_leaf_left(trans, root, path, 1, 1,
					      1, (u32)-1);
4858
			if (wret < 0 && wret != -ENOSPC)
C
Chris Mason 已提交
4859
				ret = wret;
4860 4861 4862

			if (path->nodes[0] == leaf &&
			    btrfs_header_nritems(leaf)) {
4863 4864
				wret = push_leaf_right(trans, root, path, 1,
						       1, 1, 0);
4865
				if (wret < 0 && wret != -ENOSPC)
C
Chris Mason 已提交
4866 4867
					ret = wret;
			}
4868 4869

			if (btrfs_header_nritems(leaf) == 0) {
4870
				path->slots[1] = slot;
4871
				btrfs_del_leaf(trans, root, path, leaf);
4872
				free_extent_buffer(leaf);
4873
				ret = 0;
C
Chris Mason 已提交
4874
			} else {
4875 4876 4877 4878 4879 4880 4881
				/* if we're still in the path, make sure
				 * we're dirty.  Otherwise, one of the
				 * push_leaf functions must have already
				 * dirtied this buffer
				 */
				if (path->nodes[0] == leaf)
					btrfs_mark_buffer_dirty(leaf);
4882
				free_extent_buffer(leaf);
4883
			}
4884
		} else {
4885
			btrfs_mark_buffer_dirty(leaf);
4886 4887
		}
	}
C
Chris Mason 已提交
4888
	return ret;
4889 4890
}

4891
/*
4892
 * search the tree again to find a leaf with lesser keys
4893 4894
 * returns 0 if it found something or 1 if there are no lesser leaves.
 * returns < 0 on io errors.
C
Chris Mason 已提交
4895 4896 4897
 *
 * This may release the path, and so you may lose any locks held at the
 * time you call it.
4898 4899 4900
 */
int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
{
4901 4902 4903
	struct btrfs_key key;
	struct btrfs_disk_key found_key;
	int ret;
4904

4905
	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4906

4907 4908 4909 4910 4911 4912 4913 4914
	if (key.offset > 0)
		key.offset--;
	else if (key.type > 0)
		key.type--;
	else if (key.objectid > 0)
		key.objectid--;
	else
		return 1;
4915

4916
	btrfs_release_path(path);
4917 4918 4919 4920 4921 4922 4923 4924
	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		return ret;
	btrfs_item_key(path->nodes[0], &found_key, 0);
	ret = comp_keys(&found_key, &key);
	if (ret < 0)
		return 0;
	return 1;
4925 4926
}

4927 4928 4929
/*
 * A helper function to walk down the tree starting at min_key, and looking
 * for nodes or leaves that are either in cache or have a minimum
C
Chris Mason 已提交
4930
 * transaction id.  This is used by the btree defrag code, and tree logging
4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941
 *
 * This does not cow, but it does stuff the starting key it finds back
 * into min_key, so you can call btrfs_search_slot with cow=1 on the
 * key and get a writable path.
 *
 * This does lock as it descends, and path->keep_locks should be set
 * to 1 by the caller.
 *
 * This honors path->lowest_level to prevent descent past a given level
 * of the tree.
 *
C
Chris Mason 已提交
4942 4943 4944 4945
 * min_trans indicates the oldest transaction that you are interested
 * in walking through.  Any nodes or leaves older than min_trans are
 * skipped over (without reading them).
 *
4946 4947 4948 4949
 * returns zero if something useful was found, < 0 on error and 1 if there
 * was nothing in the tree that matched the search criteria.
 */
int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4950
			 struct btrfs_key *max_key,
4951 4952 4953 4954 4955 4956
			 struct btrfs_path *path, int cache_only,
			 u64 min_trans)
{
	struct extent_buffer *cur;
	struct btrfs_key found_key;
	int slot;
4957
	int sret;
4958 4959 4960 4961
	u32 nritems;
	int level;
	int ret = 1;

4962
	WARN_ON(!path->keep_locks);
4963
again:
4964
	cur = btrfs_read_lock_root_node(root);
4965
	level = btrfs_header_level(cur);
4966
	WARN_ON(path->nodes[level]);
4967
	path->nodes[level] = cur;
4968
	path->locks[level] = BTRFS_READ_LOCK;
4969 4970 4971 4972 4973

	if (btrfs_header_generation(cur) < min_trans) {
		ret = 1;
		goto out;
	}
C
Chris Mason 已提交
4974
	while (1) {
4975 4976
		nritems = btrfs_header_nritems(cur);
		level = btrfs_header_level(cur);
4977
		sret = bin_search(cur, min_key, level, &slot);
4978

4979 4980
		/* at the lowest level, we're done, setup the path and exit */
		if (level == path->lowest_level) {
4981 4982
			if (slot >= nritems)
				goto find_next_key;
4983 4984 4985 4986 4987
			ret = 0;
			path->slots[level] = slot;
			btrfs_item_key_to_cpu(cur, &found_key, slot);
			goto out;
		}
4988 4989
		if (sret && slot > 0)
			slot--;
4990 4991 4992 4993 4994
		/*
		 * check this node pointer against the cache_only and
		 * min_trans parameters.  If it isn't in cache or is too
		 * old, skip to the next one.
		 */
C
Chris Mason 已提交
4995
		while (slot < nritems) {
4996 4997 4998
			u64 blockptr;
			u64 gen;
			struct extent_buffer *tmp;
4999 5000
			struct btrfs_disk_key disk_key;

5001 5002 5003 5004 5005 5006 5007 5008 5009
			blockptr = btrfs_node_blockptr(cur, slot);
			gen = btrfs_node_ptr_generation(cur, slot);
			if (gen < min_trans) {
				slot++;
				continue;
			}
			if (!cache_only)
				break;

5010 5011 5012 5013 5014 5015 5016 5017
			if (max_key) {
				btrfs_node_key(cur, &disk_key, slot);
				if (comp_keys(&disk_key, max_key) >= 0) {
					ret = 1;
					goto out;
				}
			}

5018 5019 5020
			tmp = btrfs_find_tree_block(root, blockptr,
					    btrfs_level_size(root, level - 1));

5021
			if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
5022 5023 5024 5025 5026 5027 5028
				free_extent_buffer(tmp);
				break;
			}
			if (tmp)
				free_extent_buffer(tmp);
			slot++;
		}
5029
find_next_key:
5030 5031 5032 5033 5034
		/*
		 * we didn't find a candidate key in this node, walk forward
		 * and find another one
		 */
		if (slot >= nritems) {
5035
			path->slots[level] = slot;
5036
			btrfs_set_path_blocking(path);
5037
			sret = btrfs_find_next_key(root, path, min_key, level,
5038
						  cache_only, min_trans);
5039
			if (sret == 0) {
5040
				btrfs_release_path(path);
5041 5042 5043 5044 5045 5046 5047 5048 5049 5050
				goto again;
			} else {
				goto out;
			}
		}
		/* save our key for returning back */
		btrfs_node_key_to_cpu(cur, &found_key, slot);
		path->slots[level] = slot;
		if (level == path->lowest_level) {
			ret = 0;
5051
			unlock_up(path, level, 1, 0, NULL);
5052 5053
			goto out;
		}
5054
		btrfs_set_path_blocking(path);
5055
		cur = read_node_slot(root, cur, slot);
5056
		BUG_ON(!cur); /* -ENOMEM */
5057

5058
		btrfs_tree_read_lock(cur);
5059

5060
		path->locks[level - 1] = BTRFS_READ_LOCK;
5061
		path->nodes[level - 1] = cur;
5062
		unlock_up(path, level, 1, 0, NULL);
5063
		btrfs_clear_path_blocking(path, NULL, 0);
5064 5065 5066 5067
	}
out:
	if (ret == 0)
		memcpy(min_key, &found_key, sizeof(found_key));
5068
	btrfs_set_path_blocking(path);
5069 5070 5071
	return ret;
}

5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496
static void tree_move_down(struct btrfs_root *root,
			   struct btrfs_path *path,
			   int *level, int root_level)
{
	path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
					path->slots[*level]);
	path->slots[*level - 1] = 0;
	(*level)--;
}

static int tree_move_next_or_upnext(struct btrfs_root *root,
				    struct btrfs_path *path,
				    int *level, int root_level)
{
	int ret = 0;
	int nritems;
	nritems = btrfs_header_nritems(path->nodes[*level]);

	path->slots[*level]++;

	while (path->slots[*level] == nritems) {
		if (*level == root_level)
			return -1;

		/* move upnext */
		path->slots[*level] = 0;
		free_extent_buffer(path->nodes[*level]);
		path->nodes[*level] = NULL;
		(*level)++;
		path->slots[*level]++;

		nritems = btrfs_header_nritems(path->nodes[*level]);
		ret = 1;
	}
	return ret;
}

/*
 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
 * or down.
 */
static int tree_advance(struct btrfs_root *root,
			struct btrfs_path *path,
			int *level, int root_level,
			int allow_down,
			struct btrfs_key *key)
{
	int ret;

	if (*level == 0 || !allow_down) {
		ret = tree_move_next_or_upnext(root, path, level, root_level);
	} else {
		tree_move_down(root, path, level, root_level);
		ret = 0;
	}
	if (ret >= 0) {
		if (*level == 0)
			btrfs_item_key_to_cpu(path->nodes[*level], key,
					path->slots[*level]);
		else
			btrfs_node_key_to_cpu(path->nodes[*level], key,
					path->slots[*level]);
	}
	return ret;
}

static int tree_compare_item(struct btrfs_root *left_root,
			     struct btrfs_path *left_path,
			     struct btrfs_path *right_path,
			     char *tmp_buf)
{
	int cmp;
	int len1, len2;
	unsigned long off1, off2;

	len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
	len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
	if (len1 != len2)
		return 1;

	off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
	off2 = btrfs_item_ptr_offset(right_path->nodes[0],
				right_path->slots[0]);

	read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);

	cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
	if (cmp)
		return 1;
	return 0;
}

#define ADVANCE 1
#define ADVANCE_ONLY_NEXT -1

/*
 * This function compares two trees and calls the provided callback for
 * every changed/new/deleted item it finds.
 * If shared tree blocks are encountered, whole subtrees are skipped, making
 * the compare pretty fast on snapshotted subvolumes.
 *
 * This currently works on commit roots only. As commit roots are read only,
 * we don't do any locking. The commit roots are protected with transactions.
 * Transactions are ended and rejoined when a commit is tried in between.
 *
 * This function checks for modifications done to the trees while comparing.
 * If it detects a change, it aborts immediately.
 */
int btrfs_compare_trees(struct btrfs_root *left_root,
			struct btrfs_root *right_root,
			btrfs_changed_cb_t changed_cb, void *ctx)
{
	int ret;
	int cmp;
	struct btrfs_trans_handle *trans = NULL;
	struct btrfs_path *left_path = NULL;
	struct btrfs_path *right_path = NULL;
	struct btrfs_key left_key;
	struct btrfs_key right_key;
	char *tmp_buf = NULL;
	int left_root_level;
	int right_root_level;
	int left_level;
	int right_level;
	int left_end_reached;
	int right_end_reached;
	int advance_left;
	int advance_right;
	u64 left_blockptr;
	u64 right_blockptr;
	u64 left_start_ctransid;
	u64 right_start_ctransid;
	u64 ctransid;

	left_path = btrfs_alloc_path();
	if (!left_path) {
		ret = -ENOMEM;
		goto out;
	}
	right_path = btrfs_alloc_path();
	if (!right_path) {
		ret = -ENOMEM;
		goto out;
	}

	tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS);
	if (!tmp_buf) {
		ret = -ENOMEM;
		goto out;
	}

	left_path->search_commit_root = 1;
	left_path->skip_locking = 1;
	right_path->search_commit_root = 1;
	right_path->skip_locking = 1;

	spin_lock(&left_root->root_times_lock);
	left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
	spin_unlock(&left_root->root_times_lock);

	spin_lock(&right_root->root_times_lock);
	right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
	spin_unlock(&right_root->root_times_lock);

	trans = btrfs_join_transaction(left_root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		trans = NULL;
		goto out;
	}

	/*
	 * Strategy: Go to the first items of both trees. Then do
	 *
	 * If both trees are at level 0
	 *   Compare keys of current items
	 *     If left < right treat left item as new, advance left tree
	 *       and repeat
	 *     If left > right treat right item as deleted, advance right tree
	 *       and repeat
	 *     If left == right do deep compare of items, treat as changed if
	 *       needed, advance both trees and repeat
	 * If both trees are at the same level but not at level 0
	 *   Compare keys of current nodes/leafs
	 *     If left < right advance left tree and repeat
	 *     If left > right advance right tree and repeat
	 *     If left == right compare blockptrs of the next nodes/leafs
	 *       If they match advance both trees but stay at the same level
	 *         and repeat
	 *       If they don't match advance both trees while allowing to go
	 *         deeper and repeat
	 * If tree levels are different
	 *   Advance the tree that needs it and repeat
	 *
	 * Advancing a tree means:
	 *   If we are at level 0, try to go to the next slot. If that's not
	 *   possible, go one level up and repeat. Stop when we found a level
	 *   where we could go to the next slot. We may at this point be on a
	 *   node or a leaf.
	 *
	 *   If we are not at level 0 and not on shared tree blocks, go one
	 *   level deeper.
	 *
	 *   If we are not at level 0 and on shared tree blocks, go one slot to
	 *   the right if possible or go up and right.
	 */

	left_level = btrfs_header_level(left_root->commit_root);
	left_root_level = left_level;
	left_path->nodes[left_level] = left_root->commit_root;
	extent_buffer_get(left_path->nodes[left_level]);

	right_level = btrfs_header_level(right_root->commit_root);
	right_root_level = right_level;
	right_path->nodes[right_level] = right_root->commit_root;
	extent_buffer_get(right_path->nodes[right_level]);

	if (left_level == 0)
		btrfs_item_key_to_cpu(left_path->nodes[left_level],
				&left_key, left_path->slots[left_level]);
	else
		btrfs_node_key_to_cpu(left_path->nodes[left_level],
				&left_key, left_path->slots[left_level]);
	if (right_level == 0)
		btrfs_item_key_to_cpu(right_path->nodes[right_level],
				&right_key, right_path->slots[right_level]);
	else
		btrfs_node_key_to_cpu(right_path->nodes[right_level],
				&right_key, right_path->slots[right_level]);

	left_end_reached = right_end_reached = 0;
	advance_left = advance_right = 0;

	while (1) {
		/*
		 * We need to make sure the transaction does not get committed
		 * while we do anything on commit roots. This means, we need to
		 * join and leave transactions for every item that we process.
		 */
		if (trans && btrfs_should_end_transaction(trans, left_root)) {
			btrfs_release_path(left_path);
			btrfs_release_path(right_path);

			ret = btrfs_end_transaction(trans, left_root);
			trans = NULL;
			if (ret < 0)
				goto out;
		}
		/* now rejoin the transaction */
		if (!trans) {
			trans = btrfs_join_transaction(left_root);
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				trans = NULL;
				goto out;
			}

			spin_lock(&left_root->root_times_lock);
			ctransid = btrfs_root_ctransid(&left_root->root_item);
			spin_unlock(&left_root->root_times_lock);
			if (ctransid != left_start_ctransid)
				left_start_ctransid = 0;

			spin_lock(&right_root->root_times_lock);
			ctransid = btrfs_root_ctransid(&right_root->root_item);
			spin_unlock(&right_root->root_times_lock);
			if (ctransid != right_start_ctransid)
				right_start_ctransid = 0;

			if (!left_start_ctransid || !right_start_ctransid) {
				WARN(1, KERN_WARNING
					"btrfs: btrfs_compare_tree detected "
					"a change in one of the trees while "
					"iterating. This is probably a "
					"bug.\n");
				ret = -EIO;
				goto out;
			}

			/*
			 * the commit root may have changed, so start again
			 * where we stopped
			 */
			left_path->lowest_level = left_level;
			right_path->lowest_level = right_level;
			ret = btrfs_search_slot(NULL, left_root,
					&left_key, left_path, 0, 0);
			if (ret < 0)
				goto out;
			ret = btrfs_search_slot(NULL, right_root,
					&right_key, right_path, 0, 0);
			if (ret < 0)
				goto out;
		}

		if (advance_left && !left_end_reached) {
			ret = tree_advance(left_root, left_path, &left_level,
					left_root_level,
					advance_left != ADVANCE_ONLY_NEXT,
					&left_key);
			if (ret < 0)
				left_end_reached = ADVANCE;
			advance_left = 0;
		}
		if (advance_right && !right_end_reached) {
			ret = tree_advance(right_root, right_path, &right_level,
					right_root_level,
					advance_right != ADVANCE_ONLY_NEXT,
					&right_key);
			if (ret < 0)
				right_end_reached = ADVANCE;
			advance_right = 0;
		}

		if (left_end_reached && right_end_reached) {
			ret = 0;
			goto out;
		} else if (left_end_reached) {
			if (right_level == 0) {
				ret = changed_cb(left_root, right_root,
						left_path, right_path,
						&right_key,
						BTRFS_COMPARE_TREE_DELETED,
						ctx);
				if (ret < 0)
					goto out;
			}
			advance_right = ADVANCE;
			continue;
		} else if (right_end_reached) {
			if (left_level == 0) {
				ret = changed_cb(left_root, right_root,
						left_path, right_path,
						&left_key,
						BTRFS_COMPARE_TREE_NEW,
						ctx);
				if (ret < 0)
					goto out;
			}
			advance_left = ADVANCE;
			continue;
		}

		if (left_level == 0 && right_level == 0) {
			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
			if (cmp < 0) {
				ret = changed_cb(left_root, right_root,
						left_path, right_path,
						&left_key,
						BTRFS_COMPARE_TREE_NEW,
						ctx);
				if (ret < 0)
					goto out;
				advance_left = ADVANCE;
			} else if (cmp > 0) {
				ret = changed_cb(left_root, right_root,
						left_path, right_path,
						&right_key,
						BTRFS_COMPARE_TREE_DELETED,
						ctx);
				if (ret < 0)
					goto out;
				advance_right = ADVANCE;
			} else {
				ret = tree_compare_item(left_root, left_path,
						right_path, tmp_buf);
				if (ret) {
					ret = changed_cb(left_root, right_root,
						left_path, right_path,
						&left_key,
						BTRFS_COMPARE_TREE_CHANGED,
						ctx);
					if (ret < 0)
						goto out;
				}
				advance_left = ADVANCE;
				advance_right = ADVANCE;
			}
		} else if (left_level == right_level) {
			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
			if (cmp < 0) {
				advance_left = ADVANCE;
			} else if (cmp > 0) {
				advance_right = ADVANCE;
			} else {
				left_blockptr = btrfs_node_blockptr(
						left_path->nodes[left_level],
						left_path->slots[left_level]);
				right_blockptr = btrfs_node_blockptr(
						right_path->nodes[right_level],
						right_path->slots[right_level]);
				if (left_blockptr == right_blockptr) {
					/*
					 * As we're on a shared block, don't
					 * allow to go deeper.
					 */
					advance_left = ADVANCE_ONLY_NEXT;
					advance_right = ADVANCE_ONLY_NEXT;
				} else {
					advance_left = ADVANCE;
					advance_right = ADVANCE;
				}
			}
		} else if (left_level < right_level) {
			advance_right = ADVANCE;
		} else {
			advance_left = ADVANCE;
		}
	}

out:
	btrfs_free_path(left_path);
	btrfs_free_path(right_path);
	kfree(tmp_buf);

	if (trans) {
		if (!ret)
			ret = btrfs_end_transaction(trans, left_root);
		else
			btrfs_end_transaction(trans, left_root);
	}

	return ret;
}

5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508
/*
 * this is similar to btrfs_next_leaf, but does not try to preserve
 * and fixup the path.  It looks for and returns the next key in the
 * tree based on the current path and the cache_only and min_trans
 * parameters.
 *
 * 0 is returned if another key is found, < 0 if there are any errors
 * and 1 is returned if there are no higher keys in the tree
 *
 * path->keep_locks should be set to 1 on the search made before
 * calling this function.
 */
5509
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5510
			struct btrfs_key *key, int level,
5511
			int cache_only, u64 min_trans)
5512 5513 5514 5515
{
	int slot;
	struct extent_buffer *c;

5516
	WARN_ON(!path->keep_locks);
C
Chris Mason 已提交
5517
	while (level < BTRFS_MAX_LEVEL) {
5518 5519 5520 5521 5522
		if (!path->nodes[level])
			return 1;

		slot = path->slots[level] + 1;
		c = path->nodes[level];
5523
next:
5524
		if (slot >= btrfs_header_nritems(c)) {
5525 5526 5527 5528 5529
			int ret;
			int orig_lowest;
			struct btrfs_key cur_key;
			if (level + 1 >= BTRFS_MAX_LEVEL ||
			    !path->nodes[level + 1])
5530
				return 1;
5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543

			if (path->locks[level + 1]) {
				level++;
				continue;
			}

			slot = btrfs_header_nritems(c) - 1;
			if (level == 0)
				btrfs_item_key_to_cpu(c, &cur_key, slot);
			else
				btrfs_node_key_to_cpu(c, &cur_key, slot);

			orig_lowest = path->lowest_level;
5544
			btrfs_release_path(path);
5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556
			path->lowest_level = level;
			ret = btrfs_search_slot(NULL, root, &cur_key, path,
						0, 0);
			path->lowest_level = orig_lowest;
			if (ret < 0)
				return ret;

			c = path->nodes[level];
			slot = path->slots[level];
			if (ret == 0)
				slot++;
			goto next;
5557
		}
5558

5559 5560
		if (level == 0)
			btrfs_item_key_to_cpu(c, key, slot);
5561 5562 5563 5564 5565 5566 5567 5568
		else {
			u64 blockptr = btrfs_node_blockptr(c, slot);
			u64 gen = btrfs_node_ptr_generation(c, slot);

			if (cache_only) {
				struct extent_buffer *cur;
				cur = btrfs_find_tree_block(root, blockptr,
					    btrfs_level_size(root, level - 1));
5569 5570
				if (!cur ||
				    btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581
					slot++;
					if (cur)
						free_extent_buffer(cur);
					goto next;
				}
				free_extent_buffer(cur);
			}
			if (gen < min_trans) {
				slot++;
				goto next;
			}
5582
			btrfs_node_key_to_cpu(c, key, slot);
5583
		}
5584 5585 5586 5587 5588
		return 0;
	}
	return 1;
}

C
Chris Mason 已提交
5589
/*
5590
 * search the tree again to find a leaf with greater keys
C
Chris Mason 已提交
5591 5592
 * returns 0 if it found something or 1 if there are no greater leaves.
 * returns < 0 on io errors.
C
Chris Mason 已提交
5593
 */
C
Chris Mason 已提交
5594
int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
J
Jan Schmidt 已提交
5595 5596 5597 5598 5599 5600
{
	return btrfs_next_old_leaf(root, path, 0);
}

int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
			u64 time_seq)
5601 5602
{
	int slot;
5603
	int level;
5604
	struct extent_buffer *c;
5605
	struct extent_buffer *next;
5606 5607 5608
	struct btrfs_key key;
	u32 nritems;
	int ret;
5609
	int old_spinning = path->leave_spinning;
5610
	int next_rw_lock = 0;
5611 5612

	nritems = btrfs_header_nritems(path->nodes[0]);
C
Chris Mason 已提交
5613
	if (nritems == 0)
5614 5615
		return 1;

5616 5617 5618 5619
	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
again:
	level = 1;
	next = NULL;
5620
	next_rw_lock = 0;
5621
	btrfs_release_path(path);
5622

5623
	path->keep_locks = 1;
5624
	path->leave_spinning = 1;
5625

J
Jan Schmidt 已提交
5626 5627 5628 5629
	if (time_seq)
		ret = btrfs_search_old_slot(root, &key, path, time_seq);
	else
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5630 5631 5632 5633 5634
	path->keep_locks = 0;

	if (ret < 0)
		return ret;

5635
	nritems = btrfs_header_nritems(path->nodes[0]);
5636 5637 5638 5639 5640 5641
	/*
	 * by releasing the path above we dropped all our locks.  A balance
	 * could have added more items next to the key that used to be
	 * at the very end of the block.  So, check again here and
	 * advance the path if there are now more items available.
	 */
5642
	if (nritems > 0 && path->slots[0] < nritems - 1) {
5643 5644
		if (ret == 0)
			path->slots[0]++;
5645
		ret = 0;
5646 5647
		goto done;
	}
5648

C
Chris Mason 已提交
5649
	while (level < BTRFS_MAX_LEVEL) {
5650 5651 5652 5653
		if (!path->nodes[level]) {
			ret = 1;
			goto done;
		}
5654

5655 5656
		slot = path->slots[level] + 1;
		c = path->nodes[level];
5657
		if (slot >= btrfs_header_nritems(c)) {
5658
			level++;
5659 5660 5661 5662
			if (level == BTRFS_MAX_LEVEL) {
				ret = 1;
				goto done;
			}
5663 5664
			continue;
		}
5665

5666
		if (next) {
5667
			btrfs_tree_unlock_rw(next, next_rw_lock);
5668
			free_extent_buffer(next);
5669
		}
5670

5671
		next = c;
5672
		next_rw_lock = path->locks[level];
5673
		ret = read_block_for_search(NULL, root, path, &next, level,
J
Jan Schmidt 已提交
5674
					    slot, &key, 0);
5675 5676
		if (ret == -EAGAIN)
			goto again;
5677

5678
		if (ret < 0) {
5679
			btrfs_release_path(path);
5680 5681 5682
			goto done;
		}

5683
		if (!path->skip_locking) {
5684
			ret = btrfs_try_tree_read_lock(next);
5685 5686 5687 5688 5689 5690 5691 5692
			if (!ret && time_seq) {
				/*
				 * If we don't get the lock, we may be racing
				 * with push_leaf_left, holding that lock while
				 * itself waiting for the leaf we've currently
				 * locked. To solve this situation, we give up
				 * on our lock and cycle.
				 */
5693
				free_extent_buffer(next);
5694 5695 5696 5697
				btrfs_release_path(path);
				cond_resched();
				goto again;
			}
5698 5699
			if (!ret) {
				btrfs_set_path_blocking(path);
5700
				btrfs_tree_read_lock(next);
5701
				btrfs_clear_path_blocking(path, next,
5702
							  BTRFS_READ_LOCK);
5703
			}
5704
			next_rw_lock = BTRFS_READ_LOCK;
5705
		}
5706 5707 5708
		break;
	}
	path->slots[level] = slot;
C
Chris Mason 已提交
5709
	while (1) {
5710 5711
		level--;
		c = path->nodes[level];
5712
		if (path->locks[level])
5713
			btrfs_tree_unlock_rw(c, path->locks[level]);
5714

5715
		free_extent_buffer(c);
5716 5717
		path->nodes[level] = next;
		path->slots[level] = 0;
5718
		if (!path->skip_locking)
5719
			path->locks[level] = next_rw_lock;
5720 5721
		if (!level)
			break;
5722

5723
		ret = read_block_for_search(NULL, root, path, &next, level,
J
Jan Schmidt 已提交
5724
					    0, &key, 0);
5725 5726 5727
		if (ret == -EAGAIN)
			goto again;

5728
		if (ret < 0) {
5729
			btrfs_release_path(path);
5730 5731 5732
			goto done;
		}

5733
		if (!path->skip_locking) {
5734
			ret = btrfs_try_tree_read_lock(next);
5735 5736
			if (!ret) {
				btrfs_set_path_blocking(path);
5737
				btrfs_tree_read_lock(next);
5738
				btrfs_clear_path_blocking(path, next,
5739 5740
							  BTRFS_READ_LOCK);
			}
5741
			next_rw_lock = BTRFS_READ_LOCK;
5742
		}
5743
	}
5744
	ret = 0;
5745
done:
5746
	unlock_up(path, 0, 1, 0, NULL);
5747 5748 5749 5750 5751
	path->leave_spinning = old_spinning;
	if (!old_spinning)
		btrfs_set_path_blocking(path);

	return ret;
5752
}
5753

5754 5755 5756 5757 5758 5759
/*
 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
 * searching until it gets past min_objectid or finds an item of 'type'
 *
 * returns 0 if something is found, 1 if nothing was found and < 0 on error
 */
5760 5761 5762 5763 5764 5765
int btrfs_previous_item(struct btrfs_root *root,
			struct btrfs_path *path, u64 min_objectid,
			int type)
{
	struct btrfs_key found_key;
	struct extent_buffer *leaf;
5766
	u32 nritems;
5767 5768
	int ret;

C
Chris Mason 已提交
5769
	while (1) {
5770
		if (path->slots[0] == 0) {
5771
			btrfs_set_path_blocking(path);
5772 5773 5774 5775 5776 5777 5778
			ret = btrfs_prev_leaf(root, path);
			if (ret != 0)
				return ret;
		} else {
			path->slots[0]--;
		}
		leaf = path->nodes[0];
5779 5780 5781 5782 5783 5784
		nritems = btrfs_header_nritems(leaf);
		if (nritems == 0)
			return 1;
		if (path->slots[0] == nritems)
			path->slots[0]--;

5785
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5786 5787
		if (found_key.objectid < min_objectid)
			break;
5788 5789
		if (found_key.type == type)
			return 0;
5790 5791 5792
		if (found_key.objectid == min_objectid &&
		    found_key.type < type)
			break;
5793 5794 5795
	}
	return 1;
}