extents.c 103.8 KB
Newer Older
A
Alex Tomas 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/*
 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
 * Written by Alex Tomas <alex@clusterfs.com>
 *
 * Architecture independence:
 *   Copyright (c) 2005, Bull S.A.
 *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public Licens
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
 */

/*
 * Extents support for EXT4
 *
 * TODO:
 *   - ext4*_error() should be used in some situations
 *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
 *   - smart tree reduction
 */

#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
35
#include <linux/jbd2.h>
A
Alex Tomas 已提交
36 37 38 39 40
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/slab.h>
A
Amit Arora 已提交
41
#include <linux/falloc.h>
A
Alex Tomas 已提交
42
#include <asm/uaccess.h>
43
#include <linux/fiemap.h>
44 45
#include "ext4_jbd2.h"
#include "ext4_extents.h"
A
Alex Tomas 已提交
46 47


48 49 50 51
/*
 * ext_pblock:
 * combine low and high parts of physical block number into ext4_fsblk_t
 */
52
ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
53 54 55
{
	ext4_fsblk_t block;

56
	block = le32_to_cpu(ex->ee_start_lo);
57
	block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
58 59 60
	return block;
}

61 62 63 64
/*
 * idx_pblock:
 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
 */
65
ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
66 67 68
{
	ext4_fsblk_t block;

69
	block = le32_to_cpu(ix->ei_leaf_lo);
70
	block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
71 72 73
	return block;
}

74 75 76 77 78
/*
 * ext4_ext_store_pblock:
 * stores a large physical block number into an extent struct,
 * breaking it into parts
 */
79
void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
80
{
81
	ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
82
	ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
83 84
}

85 86 87 88 89
/*
 * ext4_idx_store_pblock:
 * stores a large physical block number into an index struct,
 * breaking it into parts
 */
90
static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
91
{
92
	ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
93
	ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
94 95
}

96 97 98
static int ext4_ext_truncate_extend_restart(handle_t *handle,
					    struct inode *inode,
					    int needed)
A
Alex Tomas 已提交
99 100 101
{
	int err;

102 103
	if (!ext4_handle_valid(handle))
		return 0;
A
Alex Tomas 已提交
104
	if (handle->h_buffer_credits > needed)
105 106
		return 0;
	err = ext4_journal_extend(handle, needed);
107
	if (err <= 0)
108
		return err;
109
	err = ext4_truncate_restart_trans(handle, inode, needed);
110 111
	if (err == 0)
		err = -EAGAIN;
112 113

	return err;
A
Alex Tomas 已提交
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
}

/*
 * could return:
 *  - EROFS
 *  - ENOMEM
 */
static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path)
{
	if (path->p_bh) {
		/* path points to block */
		return ext4_journal_get_write_access(handle, path->p_bh);
	}
	/* path points to leaf/index in inode body */
	/* we use in-core data, no need to protect them */
	return 0;
}

/*
 * could return:
 *  - EROFS
 *  - ENOMEM
 *  - EIO
 */
static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path)
{
	int err;
	if (path->p_bh) {
		/* path points to block */
145
		err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
A
Alex Tomas 已提交
146 147 148 149 150 151 152
	} else {
		/* path points to leaf/index in inode body */
		err = ext4_mark_inode_dirty(handle, inode);
	}
	return err;
}

153
static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
A
Alex Tomas 已提交
154
			      struct ext4_ext_path *path,
A
Aneesh Kumar K.V 已提交
155
			      ext4_lblk_t block)
A
Alex Tomas 已提交
156 157
{
	struct ext4_inode_info *ei = EXT4_I(inode);
158
	ext4_fsblk_t bg_start;
159
	ext4_fsblk_t last_block;
160
	ext4_grpblk_t colour;
161 162
	ext4_group_t block_group;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
A
Alex Tomas 已提交
163 164 165 166 167 168 169
	int depth;

	if (path) {
		struct ext4_extent *ex;
		depth = path->p_depth;

		/* try to predict block placement */
170 171
		ex = path[depth].p_ext;
		if (ex)
172
			return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
A
Alex Tomas 已提交
173

174 175
		/* it looks like index is empty;
		 * try to find starting block from index itself */
A
Alex Tomas 已提交
176 177 178 179 180
		if (path[depth].p_bh)
			return path[depth].p_bh->b_blocknr;
	}

	/* OK. use inode's group */
181 182 183 184
	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		/*
		 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
185 186
		 * block groups per flexgroup, reserve the first block
		 * group for directories and special files.  Regular
187
		 * files will start at the second block group.  This
188
		 * tends to speed up directory access and improves
189 190 191 192 193 194
		 * fsck times.
		 */
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
195
	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
196 197
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

198 199 200 201 202 203 204
	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

205 206
	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
A
Alex Tomas 已提交
207
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
208 209
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
A
Alex Tomas 已提交
210 211 212
	return bg_start + colour + block;
}

A
Aneesh Kumar K.V 已提交
213 214 215
/*
 * Allocation for a meta data block
 */
216
static ext4_fsblk_t
A
Aneesh Kumar K.V 已提交
217
ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
218 219 220
			struct ext4_ext_path *path,
			struct ext4_extent *ex, int *err)
{
221
	ext4_fsblk_t goal, newblock;
A
Alex Tomas 已提交
222 223

	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
T
Theodore Ts'o 已提交
224
	newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err);
A
Alex Tomas 已提交
225 226 227
	return newblock;
}

228
static inline int ext4_ext_space_block(struct inode *inode, int check)
A
Alex Tomas 已提交
229 230 231 232 233
{
	int size;

	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
			/ sizeof(struct ext4_extent);
234
	if (!check) {
235
#ifdef AGGRESSIVE_TEST
236 237
		if (size > 6)
			size = 6;
A
Alex Tomas 已提交
238
#endif
239
	}
A
Alex Tomas 已提交
240 241 242
	return size;
}

243
static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
A
Alex Tomas 已提交
244 245 246 247 248
{
	int size;

	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
			/ sizeof(struct ext4_extent_idx);
249
	if (!check) {
250
#ifdef AGGRESSIVE_TEST
251 252
		if (size > 5)
			size = 5;
A
Alex Tomas 已提交
253
#endif
254
	}
A
Alex Tomas 已提交
255 256 257
	return size;
}

258
static inline int ext4_ext_space_root(struct inode *inode, int check)
A
Alex Tomas 已提交
259 260 261 262 263 264
{
	int size;

	size = sizeof(EXT4_I(inode)->i_data);
	size -= sizeof(struct ext4_extent_header);
	size /= sizeof(struct ext4_extent);
265
	if (!check) {
266
#ifdef AGGRESSIVE_TEST
267 268
		if (size > 3)
			size = 3;
A
Alex Tomas 已提交
269
#endif
270
	}
A
Alex Tomas 已提交
271 272 273
	return size;
}

274
static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
A
Alex Tomas 已提交
275 276 277 278 279 280
{
	int size;

	size = sizeof(EXT4_I(inode)->i_data);
	size -= sizeof(struct ext4_extent_header);
	size /= sizeof(struct ext4_extent_idx);
281
	if (!check) {
282
#ifdef AGGRESSIVE_TEST
283 284
		if (size > 4)
			size = 4;
A
Alex Tomas 已提交
285
#endif
286
	}
A
Alex Tomas 已提交
287 288 289
	return size;
}

290 291 292 293 294
/*
 * Calculate the number of metadata blocks needed
 * to allocate @blocks
 * Worse case is one block per extent
 */
295
int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
296
{
297 298
	struct ext4_inode_info *ei = EXT4_I(inode);
	int idxs, num = 0;
299

300 301
	idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
		/ sizeof(struct ext4_extent_idx));
302 303

	/*
304 305 306 307 308 309
	 * If the new delayed allocation block is contiguous with the
	 * previous da block, it can share index blocks with the
	 * previous block, so we only need to allocate a new index
	 * block every idxs leaf blocks.  At ldxs**2 blocks, we need
	 * an additional index block, and at ldxs**3 blocks, yet
	 * another index blocks.
310
	 */
311 312 313 314 315 316 317 318 319 320 321 322 323 324
	if (ei->i_da_metadata_calc_len &&
	    ei->i_da_metadata_calc_last_lblock+1 == lblock) {
		if ((ei->i_da_metadata_calc_len % idxs) == 0)
			num++;
		if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
			num++;
		if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
			num++;
			ei->i_da_metadata_calc_len = 0;
		} else
			ei->i_da_metadata_calc_len++;
		ei->i_da_metadata_calc_last_lblock++;
		return num;
	}
325

326 327 328 329 330 331 332
	/*
	 * In the worst case we need a new set of index blocks at
	 * every level of the inode's extent tree.
	 */
	ei->i_da_metadata_calc_len = 1;
	ei->i_da_metadata_calc_last_lblock = lblock;
	return ext_depth(inode) + 1;
333 334
}

335 336 337 338 339 340 341
static int
ext4_ext_max_entries(struct inode *inode, int depth)
{
	int max;

	if (depth == ext_depth(inode)) {
		if (depth == 0)
342
			max = ext4_ext_space_root(inode, 1);
343
		else
344
			max = ext4_ext_space_root_idx(inode, 1);
345 346
	} else {
		if (depth == 0)
347
			max = ext4_ext_space_block(inode, 1);
348
		else
349
			max = ext4_ext_space_block_idx(inode, 1);
350 351 352 353 354
	}

	return max;
}

355 356
static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
{
357
	ext4_fsblk_t block = ext_pblock(ext);
358
	int len = ext4_ext_get_actual_len(ext);
359

360
	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
361 362 363 364 365
}

static int ext4_valid_extent_idx(struct inode *inode,
				struct ext4_extent_idx *ext_idx)
{
366
	ext4_fsblk_t block = idx_pblock(ext_idx);
367

368
	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
}

static int ext4_valid_extent_entries(struct inode *inode,
				struct ext4_extent_header *eh,
				int depth)
{
	struct ext4_extent *ext;
	struct ext4_extent_idx *ext_idx;
	unsigned short entries;
	if (eh->eh_entries == 0)
		return 1;

	entries = le16_to_cpu(eh->eh_entries);

	if (depth == 0) {
		/* leaf entries */
		ext = EXT_FIRST_EXTENT(eh);
		while (entries) {
			if (!ext4_valid_extent(inode, ext))
				return 0;
			ext++;
			entries--;
		}
	} else {
		ext_idx = EXT_FIRST_INDEX(eh);
		while (entries) {
			if (!ext4_valid_extent_idx(inode, ext_idx))
				return 0;
			ext_idx++;
			entries--;
		}
	}
	return 1;
}

static int __ext4_ext_check(const char *function, struct inode *inode,
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
					struct ext4_extent_header *eh,
					int depth)
{
	const char *error_msg;
	int max = 0;

	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
		error_msg = "invalid magic";
		goto corrupted;
	}
	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
		error_msg = "unexpected eh_depth";
		goto corrupted;
	}
	if (unlikely(eh->eh_max == 0)) {
		error_msg = "invalid eh_max";
		goto corrupted;
	}
	max = ext4_ext_max_entries(inode, depth);
	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
		error_msg = "too large eh_max";
		goto corrupted;
	}
	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
		error_msg = "invalid eh_entries";
		goto corrupted;
	}
432 433 434 435
	if (!ext4_valid_extent_entries(inode, eh, depth)) {
		error_msg = "invalid extent entries";
		goto corrupted;
	}
436 437 438
	return 0;

corrupted:
439 440
	ext4_error_inode(function, inode,
			"bad header/extent: %s - magic %x, "
441
			"entries %u, max %u(%u), depth %u(%u)",
442
			error_msg, le16_to_cpu(eh->eh_magic),
443 444 445 446 447 448
			le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
			max, le16_to_cpu(eh->eh_depth), depth);

	return -EIO;
}

449 450
#define ext4_ext_check(inode, eh, depth)	\
	__ext4_ext_check(__func__, inode, eh, depth)
451

452 453 454 455 456
int ext4_ext_check_inode(struct inode *inode)
{
	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
}

A
Alex Tomas 已提交
457 458 459 460 461 462 463 464
#ifdef EXT_DEBUG
static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
{
	int k, l = path->p_depth;

	ext_debug("path:");
	for (k = 0; k <= l; k++, path++) {
		if (path->p_idx) {
465
		  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
466
			    idx_pblock(path->p_idx));
A
Alex Tomas 已提交
467
		} else if (path->p_ext) {
468
			ext_debug("  %d:[%d]%d:%llu ",
A
Alex Tomas 已提交
469
				  le32_to_cpu(path->p_ext->ee_block),
470
				  ext4_ext_is_uninitialized(path->p_ext),
A
Amit Arora 已提交
471
				  ext4_ext_get_actual_len(path->p_ext),
472
				  ext_pblock(path->p_ext));
A
Alex Tomas 已提交
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
		} else
			ext_debug("  []");
	}
	ext_debug("\n");
}

static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
{
	int depth = ext_depth(inode);
	struct ext4_extent_header *eh;
	struct ext4_extent *ex;
	int i;

	if (!path)
		return;

	eh = path[depth].p_hdr;
	ex = EXT_FIRST_EXTENT(eh);

492 493
	ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);

A
Alex Tomas 已提交
494
	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
495 496
		ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
			  ext4_ext_is_uninitialized(ex),
A
Amit Arora 已提交
497
			  ext4_ext_get_actual_len(ex), ext_pblock(ex));
A
Alex Tomas 已提交
498 499 500 501
	}
	ext_debug("\n");
}
#else
502 503
#define ext4_ext_show_path(inode, path)
#define ext4_ext_show_leaf(inode, path)
A
Alex Tomas 已提交
504 505
#endif

506
void ext4_ext_drop_refs(struct ext4_ext_path *path)
A
Alex Tomas 已提交
507 508 509 510 511 512 513 514 515 516 517 518
{
	int depth = path->p_depth;
	int i;

	for (i = 0; i <= depth; i++, path++)
		if (path->p_bh) {
			brelse(path->p_bh);
			path->p_bh = NULL;
		}
}

/*
519 520
 * ext4_ext_binsearch_idx:
 * binary search for the closest index of the given block
521
 * the header must be checked before calling this
A
Alex Tomas 已提交
522 523
 */
static void
A
Aneesh Kumar K.V 已提交
524 525
ext4_ext_binsearch_idx(struct inode *inode,
			struct ext4_ext_path *path, ext4_lblk_t block)
A
Alex Tomas 已提交
526 527 528 529 530
{
	struct ext4_extent_header *eh = path->p_hdr;
	struct ext4_extent_idx *r, *l, *m;


531
	ext_debug("binsearch for %u(idx):  ", block);
A
Alex Tomas 已提交
532 533

	l = EXT_FIRST_INDEX(eh) + 1;
D
Dmitry Monakhov 已提交
534
	r = EXT_LAST_INDEX(eh);
A
Alex Tomas 已提交
535 536 537 538 539 540
	while (l <= r) {
		m = l + (r - l) / 2;
		if (block < le32_to_cpu(m->ei_block))
			r = m - 1;
		else
			l = m + 1;
541 542 543
		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
				m, le32_to_cpu(m->ei_block),
				r, le32_to_cpu(r->ei_block));
A
Alex Tomas 已提交
544 545 546
	}

	path->p_idx = l - 1;
547
	ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
548
		  idx_pblock(path->p_idx));
A
Alex Tomas 已提交
549 550 551 552 553 554 555 556 557 558

#ifdef CHECK_BINSEARCH
	{
		struct ext4_extent_idx *chix, *ix;
		int k;

		chix = ix = EXT_FIRST_INDEX(eh);
		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
		  if (k != 0 &&
		      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
559 560 561 562
				printk(KERN_DEBUG "k=%d, ix=0x%p, "
				       "first=0x%p\n", k,
				       ix, EXT_FIRST_INDEX(eh));
				printk(KERN_DEBUG "%u <= %u\n",
A
Alex Tomas 已提交
563 564 565 566
				       le32_to_cpu(ix->ei_block),
				       le32_to_cpu(ix[-1].ei_block));
			}
			BUG_ON(k && le32_to_cpu(ix->ei_block)
D
Dave Kleikamp 已提交
567
					   <= le32_to_cpu(ix[-1].ei_block));
A
Alex Tomas 已提交
568 569 570 571 572 573 574 575 576 577 578
			if (block < le32_to_cpu(ix->ei_block))
				break;
			chix = ix;
		}
		BUG_ON(chix != path->p_idx);
	}
#endif

}

/*
579 580
 * ext4_ext_binsearch:
 * binary search for closest extent of the given block
581
 * the header must be checked before calling this
A
Alex Tomas 已提交
582 583
 */
static void
A
Aneesh Kumar K.V 已提交
584 585
ext4_ext_binsearch(struct inode *inode,
		struct ext4_ext_path *path, ext4_lblk_t block)
A
Alex Tomas 已提交
586 587 588 589 590 591
{
	struct ext4_extent_header *eh = path->p_hdr;
	struct ext4_extent *r, *l, *m;

	if (eh->eh_entries == 0) {
		/*
592 593
		 * this leaf is empty:
		 * we get such a leaf in split/add case
A
Alex Tomas 已提交
594 595 596 597
		 */
		return;
	}

598
	ext_debug("binsearch for %u:  ", block);
A
Alex Tomas 已提交
599 600

	l = EXT_FIRST_EXTENT(eh) + 1;
D
Dmitry Monakhov 已提交
601
	r = EXT_LAST_EXTENT(eh);
A
Alex Tomas 已提交
602 603 604 605 606 607 608

	while (l <= r) {
		m = l + (r - l) / 2;
		if (block < le32_to_cpu(m->ee_block))
			r = m - 1;
		else
			l = m + 1;
609 610 611
		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
				m, le32_to_cpu(m->ee_block),
				r, le32_to_cpu(r->ee_block));
A
Alex Tomas 已提交
612 613 614
	}

	path->p_ext = l - 1;
615
	ext_debug("  -> %d:%llu:[%d]%d ",
D
Dave Kleikamp 已提交
616 617
			le32_to_cpu(path->p_ext->ee_block),
			ext_pblock(path->p_ext),
618
			ext4_ext_is_uninitialized(path->p_ext),
A
Amit Arora 已提交
619
			ext4_ext_get_actual_len(path->p_ext));
A
Alex Tomas 已提交
620 621 622 623 624 625 626 627 628

#ifdef CHECK_BINSEARCH
	{
		struct ext4_extent *chex, *ex;
		int k;

		chex = ex = EXT_FIRST_EXTENT(eh);
		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
			BUG_ON(k && le32_to_cpu(ex->ee_block)
D
Dave Kleikamp 已提交
629
					  <= le32_to_cpu(ex[-1].ee_block));
A
Alex Tomas 已提交
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
			if (block < le32_to_cpu(ex->ee_block))
				break;
			chex = ex;
		}
		BUG_ON(chex != path->p_ext);
	}
#endif

}

int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
{
	struct ext4_extent_header *eh;

	eh = ext_inode_hdr(inode);
	eh->eh_depth = 0;
	eh->eh_entries = 0;
	eh->eh_magic = EXT4_EXT_MAGIC;
648
	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
A
Alex Tomas 已提交
649 650 651 652 653 654
	ext4_mark_inode_dirty(handle, inode);
	ext4_ext_invalidate_cache(inode);
	return 0;
}

struct ext4_ext_path *
A
Aneesh Kumar K.V 已提交
655 656
ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
					struct ext4_ext_path *path)
A
Alex Tomas 已提交
657 658 659 660 661 662
{
	struct ext4_extent_header *eh;
	struct buffer_head *bh;
	short int depth, i, ppos = 0, alloc = 0;

	eh = ext_inode_hdr(inode);
663
	depth = ext_depth(inode);
A
Alex Tomas 已提交
664 665 666

	/* account possible depth increase */
	if (!path) {
A
Avantika Mathur 已提交
667
		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
A
Alex Tomas 已提交
668 669 670 671 672 673
				GFP_NOFS);
		if (!path)
			return ERR_PTR(-ENOMEM);
		alloc = 1;
	}
	path[0].p_hdr = eh;
674
	path[0].p_bh = NULL;
A
Alex Tomas 已提交
675

676
	i = depth;
A
Alex Tomas 已提交
677 678
	/* walk through the tree */
	while (i) {
679 680
		int need_to_validate = 0;

A
Alex Tomas 已提交
681 682
		ext_debug("depth %d: num %d, max %d\n",
			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
683

A
Alex Tomas 已提交
684
		ext4_ext_binsearch_idx(inode, path + ppos, block);
685
		path[ppos].p_block = idx_pblock(path[ppos].p_idx);
A
Alex Tomas 已提交
686 687 688
		path[ppos].p_depth = i;
		path[ppos].p_ext = NULL;

689 690
		bh = sb_getblk(inode->i_sb, path[ppos].p_block);
		if (unlikely(!bh))
A
Alex Tomas 已提交
691
			goto err;
692 693 694 695 696 697 698 699
		if (!bh_uptodate_or_lock(bh)) {
			if (bh_submit_read(bh) < 0) {
				put_bh(bh);
				goto err;
			}
			/* validate the extent entries */
			need_to_validate = 1;
		}
A
Alex Tomas 已提交
700 701
		eh = ext_block_hdr(bh);
		ppos++;
702 703 704 705 706 707
		if (unlikely(ppos > depth)) {
			put_bh(bh);
			EXT4_ERROR_INODE(inode,
					 "ppos %d > depth %d", ppos, depth);
			goto err;
		}
A
Alex Tomas 已提交
708 709 710 711
		path[ppos].p_bh = bh;
		path[ppos].p_hdr = eh;
		i--;

712
		if (need_to_validate && ext4_ext_check(inode, eh, i))
A
Alex Tomas 已提交
713 714 715 716 717 718 719 720 721
			goto err;
	}

	path[ppos].p_depth = i;
	path[ppos].p_ext = NULL;
	path[ppos].p_idx = NULL;

	/* find extent */
	ext4_ext_binsearch(inode, path + ppos, block);
722 723 724
	/* if not an empty leaf */
	if (path[ppos].p_ext)
		path[ppos].p_block = ext_pblock(path[ppos].p_ext);
A
Alex Tomas 已提交
725 726 727 728 729 730 731 732 733 734 735 736 737

	ext4_ext_show_path(inode, path);

	return path;

err:
	ext4_ext_drop_refs(path);
	if (alloc)
		kfree(path);
	return ERR_PTR(-EIO);
}

/*
738 739 740
 * ext4_ext_insert_index:
 * insert new index [@logical;@ptr] into the block at @curp;
 * check where to insert: before @curp or after @curp
A
Alex Tomas 已提交
741
 */
742
int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
743
				struct ext4_ext_path *curp,
744
				int logical, ext4_fsblk_t ptr)
A
Alex Tomas 已提交
745 746 747 748
{
	struct ext4_extent_idx *ix;
	int len, err;

749 750
	err = ext4_ext_get_access(handle, inode, curp);
	if (err)
A
Alex Tomas 已提交
751 752
		return err;

753 754 755 756 757 758
	if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
		EXT4_ERROR_INODE(inode,
				 "logical %d == ei_block %d!",
				 logical, le32_to_cpu(curp->p_idx->ei_block));
		return -EIO;
	}
A
Alex Tomas 已提交
759 760 761 762 763 764
	len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
		/* insert after */
		if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
			len = (len - 1) * sizeof(struct ext4_extent_idx);
			len = len < 0 ? 0 : len;
765
			ext_debug("insert new index %d after: %llu. "
A
Alex Tomas 已提交
766 767 768 769 770 771 772 773 774 775
					"move %d from 0x%p to 0x%p\n",
					logical, ptr, len,
					(curp->p_idx + 1), (curp->p_idx + 2));
			memmove(curp->p_idx + 2, curp->p_idx + 1, len);
		}
		ix = curp->p_idx + 1;
	} else {
		/* insert before */
		len = len * sizeof(struct ext4_extent_idx);
		len = len < 0 ? 0 : len;
776
		ext_debug("insert new index %d before: %llu. "
A
Alex Tomas 已提交
777 778 779 780 781 782 783 784
				"move %d from 0x%p to 0x%p\n",
				logical, ptr, len,
				curp->p_idx, (curp->p_idx + 1));
		memmove(curp->p_idx + 1, curp->p_idx, len);
		ix = curp->p_idx;
	}

	ix->ei_block = cpu_to_le32(logical);
785
	ext4_idx_store_pblock(ix, ptr);
M
Marcin Slusarz 已提交
786
	le16_add_cpu(&curp->p_hdr->eh_entries, 1);
A
Alex Tomas 已提交
787

788 789 790 791 792 793 794 795 796 797 798
	if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
			     > le16_to_cpu(curp->p_hdr->eh_max))) {
		EXT4_ERROR_INODE(inode,
				 "logical %d == ei_block %d!",
				 logical, le32_to_cpu(curp->p_idx->ei_block));
		return -EIO;
	}
	if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
		EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
		return -EIO;
	}
A
Alex Tomas 已提交
799 800 801 802 803 804 805 806

	err = ext4_ext_dirty(handle, inode, curp);
	ext4_std_error(inode->i_sb, err);

	return err;
}

/*
807 808 809 810 811 812 813 814
 * ext4_ext_split:
 * inserts new subtree into the path, using free index entry
 * at depth @at:
 * - allocates all needed blocks (new leaf and all intermediate index blocks)
 * - makes decision where to split
 * - moves remaining extents and index entries (right to the split point)
 *   into the newly allocated blocks
 * - initializes subtree
A
Alex Tomas 已提交
815 816 817 818 819 820 821 822 823 824 825
 */
static int ext4_ext_split(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path,
				struct ext4_extent *newext, int at)
{
	struct buffer_head *bh = NULL;
	int depth = ext_depth(inode);
	struct ext4_extent_header *neh;
	struct ext4_extent_idx *fidx;
	struct ext4_extent *ex;
	int i = at, k, m, a;
826
	ext4_fsblk_t newblock, oldblock;
A
Alex Tomas 已提交
827
	__le32 border;
828
	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
A
Alex Tomas 已提交
829 830 831
	int err = 0;

	/* make decision: where to split? */
832
	/* FIXME: now decision is simplest: at current extent */
A
Alex Tomas 已提交
833

834
	/* if current leaf will be split, then we should use
A
Alex Tomas 已提交
835
	 * border from split point */
836 837 838 839
	if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
		EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
		return -EIO;
	}
A
Alex Tomas 已提交
840 841
	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
		border = path[depth].p_ext[1].ee_block;
842
		ext_debug("leaf will be split."
A
Alex Tomas 已提交
843
				" next leaf starts at %d\n",
D
Dave Kleikamp 已提交
844
				  le32_to_cpu(border));
A
Alex Tomas 已提交
845 846 847 848
	} else {
		border = newext->ee_block;
		ext_debug("leaf will be added."
				" next leaf starts at %d\n",
D
Dave Kleikamp 已提交
849
				le32_to_cpu(border));
A
Alex Tomas 已提交
850 851 852
	}

	/*
853 854
	 * If error occurs, then we break processing
	 * and mark filesystem read-only. index won't
A
Alex Tomas 已提交
855
	 * be inserted and tree will be in consistent
856
	 * state. Next mount will repair buffers too.
A
Alex Tomas 已提交
857 858 859
	 */

	/*
860 861 862
	 * Get array to track all allocated blocks.
	 * We need this to handle errors and free blocks
	 * upon them.
A
Alex Tomas 已提交
863
	 */
A
Avantika Mathur 已提交
864
	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
A
Alex Tomas 已提交
865 866 867 868 869 870
	if (!ablocks)
		return -ENOMEM;

	/* allocate all needed blocks */
	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
	for (a = 0; a < depth - at; a++) {
A
Aneesh Kumar K.V 已提交
871 872
		newblock = ext4_ext_new_meta_block(handle, inode, path,
						   newext, &err);
A
Alex Tomas 已提交
873 874 875 876 877 878 879
		if (newblock == 0)
			goto cleanup;
		ablocks[a] = newblock;
	}

	/* initialize new leaf */
	newblock = ablocks[--a];
880 881 882 883 884
	if (unlikely(newblock == 0)) {
		EXT4_ERROR_INODE(inode, "newblock == 0!");
		err = -EIO;
		goto cleanup;
	}
A
Alex Tomas 已提交
885 886 887 888 889 890 891
	bh = sb_getblk(inode->i_sb, newblock);
	if (!bh) {
		err = -EIO;
		goto cleanup;
	}
	lock_buffer(bh);

892 893
	err = ext4_journal_get_create_access(handle, bh);
	if (err)
A
Alex Tomas 已提交
894 895 896 897
		goto cleanup;

	neh = ext_block_hdr(bh);
	neh->eh_entries = 0;
898
	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
A
Alex Tomas 已提交
899 900 901 902
	neh->eh_magic = EXT4_EXT_MAGIC;
	neh->eh_depth = 0;
	ex = EXT_FIRST_EXTENT(neh);

903
	/* move remainder of path[depth] to the new leaf */
904 905 906 907 908 909 910 911
	if (unlikely(path[depth].p_hdr->eh_entries !=
		     path[depth].p_hdr->eh_max)) {
		EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
				 path[depth].p_hdr->eh_entries,
				 path[depth].p_hdr->eh_max);
		err = -EIO;
		goto cleanup;
	}
A
Alex Tomas 已提交
912 913 914 915 916 917
	/* start copy from next extent */
	/* TODO: we could do it by single memmove */
	m = 0;
	path[depth].p_ext++;
	while (path[depth].p_ext <=
			EXT_MAX_EXTENT(path[depth].p_hdr)) {
918
		ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
D
Dave Kleikamp 已提交
919 920
				le32_to_cpu(path[depth].p_ext->ee_block),
				ext_pblock(path[depth].p_ext),
921
				ext4_ext_is_uninitialized(path[depth].p_ext),
A
Amit Arora 已提交
922
				ext4_ext_get_actual_len(path[depth].p_ext),
A
Alex Tomas 已提交
923 924 925 926 927 928 929 930 931
				newblock);
		/*memmove(ex++, path[depth].p_ext++,
				sizeof(struct ext4_extent));
		neh->eh_entries++;*/
		path[depth].p_ext++;
		m++;
	}
	if (m) {
		memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
M
Marcin Slusarz 已提交
932
		le16_add_cpu(&neh->eh_entries, m);
A
Alex Tomas 已提交
933 934 935 936 937
	}

	set_buffer_uptodate(bh);
	unlock_buffer(bh);

938
	err = ext4_handle_dirty_metadata(handle, inode, bh);
939
	if (err)
A
Alex Tomas 已提交
940 941 942 943 944 945
		goto cleanup;
	brelse(bh);
	bh = NULL;

	/* correct old leaf */
	if (m) {
946 947
		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
A
Alex Tomas 已提交
948
			goto cleanup;
M
Marcin Slusarz 已提交
949
		le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
950 951
		err = ext4_ext_dirty(handle, inode, path + depth);
		if (err)
A
Alex Tomas 已提交
952 953 954 955 956 957
			goto cleanup;

	}

	/* create intermediate indexes */
	k = depth - at - 1;
958 959 960 961 962
	if (unlikely(k < 0)) {
		EXT4_ERROR_INODE(inode, "k %d < 0!", k);
		err = -EIO;
		goto cleanup;
	}
A
Alex Tomas 已提交
963 964 965 966 967 968 969 970
	if (k)
		ext_debug("create %d intermediate indices\n", k);
	/* insert new index into current index block */
	/* current depth stored in i var */
	i = depth - 1;
	while (k--) {
		oldblock = newblock;
		newblock = ablocks[--a];
971
		bh = sb_getblk(inode->i_sb, newblock);
A
Alex Tomas 已提交
972 973 974 975 976 977
		if (!bh) {
			err = -EIO;
			goto cleanup;
		}
		lock_buffer(bh);

978 979
		err = ext4_journal_get_create_access(handle, bh);
		if (err)
A
Alex Tomas 已提交
980 981 982 983 984
			goto cleanup;

		neh = ext_block_hdr(bh);
		neh->eh_entries = cpu_to_le16(1);
		neh->eh_magic = EXT4_EXT_MAGIC;
985
		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
A
Alex Tomas 已提交
986 987 988
		neh->eh_depth = cpu_to_le16(depth - i);
		fidx = EXT_FIRST_INDEX(neh);
		fidx->ei_block = border;
989
		ext4_idx_store_pblock(fidx, oldblock);
A
Alex Tomas 已提交
990

991 992
		ext_debug("int.index at %d (block %llu): %u -> %llu\n",
				i, newblock, le32_to_cpu(border), oldblock);
A
Alex Tomas 已提交
993 994 995 996 997 998
		/* copy indexes */
		m = 0;
		path[i].p_idx++;

		ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
				EXT_MAX_INDEX(path[i].p_hdr));
999 1000 1001 1002 1003 1004 1005 1006
		if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
					EXT_LAST_INDEX(path[i].p_hdr))) {
			EXT4_ERROR_INODE(inode,
					 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
					 le32_to_cpu(path[i].p_ext->ee_block));
			err = -EIO;
			goto cleanup;
		}
A
Alex Tomas 已提交
1007
		while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
1008
			ext_debug("%d: move %d:%llu in new index %llu\n", i,
D
Dave Kleikamp 已提交
1009 1010 1011
					le32_to_cpu(path[i].p_idx->ei_block),
					idx_pblock(path[i].p_idx),
					newblock);
A
Alex Tomas 已提交
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
			/*memmove(++fidx, path[i].p_idx++,
					sizeof(struct ext4_extent_idx));
			neh->eh_entries++;
			BUG_ON(neh->eh_entries > neh->eh_max);*/
			path[i].p_idx++;
			m++;
		}
		if (m) {
			memmove(++fidx, path[i].p_idx - m,
				sizeof(struct ext4_extent_idx) * m);
M
Marcin Slusarz 已提交
1022
			le16_add_cpu(&neh->eh_entries, m);
A
Alex Tomas 已提交
1023 1024 1025 1026
		}
		set_buffer_uptodate(bh);
		unlock_buffer(bh);

1027
		err = ext4_handle_dirty_metadata(handle, inode, bh);
1028
		if (err)
A
Alex Tomas 已提交
1029 1030 1031 1032 1033 1034 1035 1036 1037
			goto cleanup;
		brelse(bh);
		bh = NULL;

		/* correct old index */
		if (m) {
			err = ext4_ext_get_access(handle, inode, path + i);
			if (err)
				goto cleanup;
M
Marcin Slusarz 已提交
1038
			le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
A
Alex Tomas 已提交
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
			err = ext4_ext_dirty(handle, inode, path + i);
			if (err)
				goto cleanup;
		}

		i--;
	}

	/* insert new index */
	err = ext4_ext_insert_index(handle, inode, path + at,
				    le32_to_cpu(border), newblock);

cleanup:
	if (bh) {
		if (buffer_locked(bh))
			unlock_buffer(bh);
		brelse(bh);
	}

	if (err) {
		/* free all allocated blocks in error case */
		for (i = 0; i < depth; i++) {
			if (!ablocks[i])
				continue;
1063 1064
			ext4_free_blocks(handle, inode, 0, ablocks[i], 1,
					 EXT4_FREE_BLOCKS_METADATA);
A
Alex Tomas 已提交
1065 1066 1067 1068 1069 1070 1071 1072
		}
	}
	kfree(ablocks);

	return err;
}

/*
1073 1074 1075 1076 1077 1078
 * ext4_ext_grow_indepth:
 * implements tree growing procedure:
 * - allocates new block
 * - moves top-level data (index block or leaf) into the new block
 * - initializes new top-level, creating index that points to the
 *   just created block
A
Alex Tomas 已提交
1079 1080 1081 1082 1083 1084 1085 1086 1087
 */
static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
					struct ext4_ext_path *path,
					struct ext4_extent *newext)
{
	struct ext4_ext_path *curp = path;
	struct ext4_extent_header *neh;
	struct ext4_extent_idx *fidx;
	struct buffer_head *bh;
1088
	ext4_fsblk_t newblock;
A
Alex Tomas 已提交
1089 1090
	int err = 0;

A
Aneesh Kumar K.V 已提交
1091
	newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err);
A
Alex Tomas 已提交
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
	if (newblock == 0)
		return err;

	bh = sb_getblk(inode->i_sb, newblock);
	if (!bh) {
		err = -EIO;
		ext4_std_error(inode->i_sb, err);
		return err;
	}
	lock_buffer(bh);

1103 1104
	err = ext4_journal_get_create_access(handle, bh);
	if (err) {
A
Alex Tomas 已提交
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
		unlock_buffer(bh);
		goto out;
	}

	/* move top-level index/leaf into new block */
	memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));

	/* set size of new block */
	neh = ext_block_hdr(bh);
	/* old root could have indexes or leaves
	 * so calculate e_max right way */
	if (ext_depth(inode))
1117
		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
A
Alex Tomas 已提交
1118
	else
1119
		neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
A
Alex Tomas 已提交
1120 1121 1122 1123
	neh->eh_magic = EXT4_EXT_MAGIC;
	set_buffer_uptodate(bh);
	unlock_buffer(bh);

1124
	err = ext4_handle_dirty_metadata(handle, inode, bh);
1125
	if (err)
A
Alex Tomas 已提交
1126 1127 1128
		goto out;

	/* create index in new top-level index: num,max,pointer */
1129 1130
	err = ext4_ext_get_access(handle, inode, curp);
	if (err)
A
Alex Tomas 已提交
1131 1132 1133
		goto out;

	curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
1134
	curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
A
Alex Tomas 已提交
1135 1136
	curp->p_hdr->eh_entries = cpu_to_le16(1);
	curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
D
Dmitry Monakhov 已提交
1137 1138 1139 1140 1141 1142 1143

	if (path[0].p_hdr->eh_depth)
		curp->p_idx->ei_block =
			EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
	else
		curp->p_idx->ei_block =
			EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
1144
	ext4_idx_store_pblock(curp->p_idx, newblock);
A
Alex Tomas 已提交
1145 1146 1147

	neh = ext_inode_hdr(inode);
	fidx = EXT_FIRST_INDEX(neh);
1148
	ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
A
Alex Tomas 已提交
1149
		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1150
		  le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
A
Alex Tomas 已提交
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160

	neh->eh_depth = cpu_to_le16(path->p_depth + 1);
	err = ext4_ext_dirty(handle, inode, curp);
out:
	brelse(bh);

	return err;
}

/*
1161 1162 1163
 * ext4_ext_create_new_leaf:
 * finds empty index and adds new leaf.
 * if no free index is found, then it requests in-depth growing.
A
Alex Tomas 已提交
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
 */
static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
					struct ext4_ext_path *path,
					struct ext4_extent *newext)
{
	struct ext4_ext_path *curp;
	int depth, i, err = 0;

repeat:
	i = depth = ext_depth(inode);

	/* walk up to the tree and look for free index entry */
	curp = path + depth;
	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
		i--;
		curp--;
	}

1182 1183
	/* we use already allocated block for index block,
	 * so subsequent data blocks should be contiguous */
A
Alex Tomas 已提交
1184 1185 1186 1187
	if (EXT_HAS_FREE_INDEX(curp)) {
		/* if we found index with free entry, then use that
		 * entry: create all needed subtree and add new leaf */
		err = ext4_ext_split(handle, inode, path, newext, i);
1188 1189
		if (err)
			goto out;
A
Alex Tomas 已提交
1190 1191 1192 1193

		/* refill path */
		ext4_ext_drop_refs(path);
		path = ext4_ext_find_extent(inode,
A
Aneesh Kumar K.V 已提交
1194 1195
				    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
				    path);
A
Alex Tomas 已提交
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
		if (IS_ERR(path))
			err = PTR_ERR(path);
	} else {
		/* tree is full, time to grow in depth */
		err = ext4_ext_grow_indepth(handle, inode, path, newext);
		if (err)
			goto out;

		/* refill path */
		ext4_ext_drop_refs(path);
		path = ext4_ext_find_extent(inode,
A
Aneesh Kumar K.V 已提交
1207 1208
				   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
				    path);
A
Alex Tomas 已提交
1209 1210 1211 1212 1213 1214
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			goto out;
		}

		/*
1215 1216
		 * only first (depth 0 -> 1) produces free space;
		 * in all other cases we have to split the grown tree
A
Alex Tomas 已提交
1217 1218 1219
		 */
		depth = ext_depth(inode);
		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1220
			/* now we need to split */
A
Alex Tomas 已提交
1221 1222 1223 1224 1225 1226 1227 1228
			goto repeat;
		}
	}

out:
	return err;
}

1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
/*
 * search the closest allocated block to the left for *logical
 * and returns it at @logical + it's physical address at @phys
 * if *logical is the smallest allocated block, the function
 * returns 0 at @phys
 * return value contains 0 (success) or error code
 */
int
ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
			ext4_lblk_t *logical, ext4_fsblk_t *phys)
{
	struct ext4_extent_idx *ix;
	struct ext4_extent *ex;
1242
	int depth, ee_len;
1243

1244 1245 1246 1247
	if (unlikely(path == NULL)) {
		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
		return -EIO;
	}
1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
	depth = path->p_depth;
	*phys = 0;

	if (depth == 0 && path->p_ext == NULL)
		return 0;

	/* usually extent in the path covers blocks smaller
	 * then *logical, but it can be that extent is the
	 * first one in the file */

	ex = path[depth].p_ext;
1259
	ee_len = ext4_ext_get_actual_len(ex);
1260
	if (*logical < le32_to_cpu(ex->ee_block)) {
1261 1262 1263 1264 1265 1266
		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
			EXT4_ERROR_INODE(inode,
					 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
					 *logical, le32_to_cpu(ex->ee_block));
			return -EIO;
		}
1267 1268
		while (--depth >= 0) {
			ix = path[depth].p_idx;
1269 1270 1271 1272 1273 1274 1275 1276 1277
			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
				EXT4_ERROR_INODE(inode,
				  "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
				  ix != NULL ? ix->ei_block : 0,
				  EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
				    EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0,
				  depth);
				return -EIO;
			}
1278 1279 1280 1281
		}
		return 0;
	}

1282 1283 1284 1285 1286 1287
	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
		EXT4_ERROR_INODE(inode,
				 "logical %d < ee_block %d + ee_len %d!",
				 *logical, le32_to_cpu(ex->ee_block), ee_len);
		return -EIO;
	}
1288

1289 1290
	*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
	*phys = ext_pblock(ex) + ee_len - 1;
1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
	return 0;
}

/*
 * search the closest allocated block to the right for *logical
 * and returns it at @logical + it's physical address at @phys
 * if *logical is the smallest allocated block, the function
 * returns 0 at @phys
 * return value contains 0 (success) or error code
 */
int
ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
			ext4_lblk_t *logical, ext4_fsblk_t *phys)
{
	struct buffer_head *bh = NULL;
	struct ext4_extent_header *eh;
	struct ext4_extent_idx *ix;
	struct ext4_extent *ex;
	ext4_fsblk_t block;
1310 1311
	int depth;	/* Note, NOT eh_depth; depth from top of tree */
	int ee_len;
1312

1313 1314 1315 1316
	if (unlikely(path == NULL)) {
		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
		return -EIO;
	}
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
	depth = path->p_depth;
	*phys = 0;

	if (depth == 0 && path->p_ext == NULL)
		return 0;

	/* usually extent in the path covers blocks smaller
	 * then *logical, but it can be that extent is the
	 * first one in the file */

	ex = path[depth].p_ext;
1328
	ee_len = ext4_ext_get_actual_len(ex);
1329
	if (*logical < le32_to_cpu(ex->ee_block)) {
1330 1331 1332 1333 1334 1335
		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
			EXT4_ERROR_INODE(inode,
					 "first_extent(path[%d].p_hdr) != ex",
					 depth);
			return -EIO;
		}
1336 1337
		while (--depth >= 0) {
			ix = path[depth].p_idx;
1338 1339 1340 1341 1342 1343
			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
				EXT4_ERROR_INODE(inode,
						 "ix != EXT_FIRST_INDEX *logical %d!",
						 *logical);
				return -EIO;
			}
1344 1345 1346 1347 1348 1349
		}
		*logical = le32_to_cpu(ex->ee_block);
		*phys = ext_pblock(ex);
		return 0;
	}

1350 1351 1352 1353 1354 1355
	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
		EXT4_ERROR_INODE(inode,
				 "logical %d < ee_block %d + ee_len %d!",
				 *logical, le32_to_cpu(ex->ee_block), ee_len);
		return -EIO;
	}
1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368

	if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
		/* next allocated block in this leaf */
		ex++;
		*logical = le32_to_cpu(ex->ee_block);
		*phys = ext_pblock(ex);
		return 0;
	}

	/* go up and search for index to the right */
	while (--depth >= 0) {
		ix = path[depth].p_idx;
		if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
W
Wu Fengguang 已提交
1369
			goto got_index;
1370 1371
	}

W
Wu Fengguang 已提交
1372 1373
	/* we've gone up to the root and found no index to the right */
	return 0;
1374

W
Wu Fengguang 已提交
1375
got_index:
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
	/* we've found index to the right, let's
	 * follow it and find the closest allocated
	 * block to the right */
	ix++;
	block = idx_pblock(ix);
	while (++depth < path->p_depth) {
		bh = sb_bread(inode->i_sb, block);
		if (bh == NULL)
			return -EIO;
		eh = ext_block_hdr(bh);
1386
		/* subtract from p_depth to get proper eh_depth */
1387
		if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
			put_bh(bh);
			return -EIO;
		}
		ix = EXT_FIRST_INDEX(eh);
		block = idx_pblock(ix);
		put_bh(bh);
	}

	bh = sb_bread(inode->i_sb, block);
	if (bh == NULL)
		return -EIO;
	eh = ext_block_hdr(bh);
1400
	if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
		put_bh(bh);
		return -EIO;
	}
	ex = EXT_FIRST_EXTENT(eh);
	*logical = le32_to_cpu(ex->ee_block);
	*phys = ext_pblock(ex);
	put_bh(bh);
	return 0;
}

A
Alex Tomas 已提交
1411
/*
1412 1413 1414 1415 1416
 * ext4_ext_next_allocated_block:
 * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
 * NOTE: it considers block number from index entry as
 * allocated block. Thus, index entries have to be consistent
 * with leaves.
A
Alex Tomas 已提交
1417
 */
A
Aneesh Kumar K.V 已提交
1418
static ext4_lblk_t
A
Alex Tomas 已提交
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447
ext4_ext_next_allocated_block(struct ext4_ext_path *path)
{
	int depth;

	BUG_ON(path == NULL);
	depth = path->p_depth;

	if (depth == 0 && path->p_ext == NULL)
		return EXT_MAX_BLOCK;

	while (depth >= 0) {
		if (depth == path->p_depth) {
			/* leaf */
			if (path[depth].p_ext !=
					EXT_LAST_EXTENT(path[depth].p_hdr))
			  return le32_to_cpu(path[depth].p_ext[1].ee_block);
		} else {
			/* index */
			if (path[depth].p_idx !=
					EXT_LAST_INDEX(path[depth].p_hdr))
			  return le32_to_cpu(path[depth].p_idx[1].ei_block);
		}
		depth--;
	}

	return EXT_MAX_BLOCK;
}

/*
1448
 * ext4_ext_next_leaf_block:
A
Alex Tomas 已提交
1449 1450
 * returns first allocated block from next leaf or EXT_MAX_BLOCK
 */
A
Aneesh Kumar K.V 已提交
1451
static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
A
Andrew Morton 已提交
1452
					struct ext4_ext_path *path)
A
Alex Tomas 已提交
1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468
{
	int depth;

	BUG_ON(path == NULL);
	depth = path->p_depth;

	/* zero-tree has no leaf blocks at all */
	if (depth == 0)
		return EXT_MAX_BLOCK;

	/* go to index block */
	depth--;

	while (depth >= 0) {
		if (path[depth].p_idx !=
				EXT_LAST_INDEX(path[depth].p_hdr))
A
Aneesh Kumar K.V 已提交
1469 1470
			return (ext4_lblk_t)
				le32_to_cpu(path[depth].p_idx[1].ei_block);
A
Alex Tomas 已提交
1471 1472 1473 1474 1475 1476 1477
		depth--;
	}

	return EXT_MAX_BLOCK;
}

/*
1478 1479 1480
 * ext4_ext_correct_indexes:
 * if leaf gets modified and modified extent is first in the leaf,
 * then we have to correct all indexes above.
A
Alex Tomas 已提交
1481 1482
 * TODO: do we need to correct tree in all cases?
 */
A
Aneesh Kumar K.V 已提交
1483
static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
				struct ext4_ext_path *path)
{
	struct ext4_extent_header *eh;
	int depth = ext_depth(inode);
	struct ext4_extent *ex;
	__le32 border;
	int k, err = 0;

	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;
1494 1495 1496 1497 1498 1499

	if (unlikely(ex == NULL || eh == NULL)) {
		EXT4_ERROR_INODE(inode,
				 "ex %p == NULL or eh %p == NULL", ex, eh);
		return -EIO;
	}
A
Alex Tomas 已提交
1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511

	if (depth == 0) {
		/* there is no tree at all */
		return 0;
	}

	if (ex != EXT_FIRST_EXTENT(eh)) {
		/* we correct tree if first leaf got modified only */
		return 0;
	}

	/*
1512
	 * TODO: we need correction if border is smaller than current one
A
Alex Tomas 已提交
1513 1514 1515
	 */
	k = depth - 1;
	border = path[depth].p_ext->ee_block;
1516 1517
	err = ext4_ext_get_access(handle, inode, path + k);
	if (err)
A
Alex Tomas 已提交
1518 1519
		return err;
	path[k].p_idx->ei_block = border;
1520 1521
	err = ext4_ext_dirty(handle, inode, path + k);
	if (err)
A
Alex Tomas 已提交
1522 1523 1524 1525 1526 1527
		return err;

	while (k--) {
		/* change all left-side indexes */
		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
			break;
1528 1529
		err = ext4_ext_get_access(handle, inode, path + k);
		if (err)
A
Alex Tomas 已提交
1530 1531
			break;
		path[k].p_idx->ei_block = border;
1532 1533
		err = ext4_ext_dirty(handle, inode, path + k);
		if (err)
A
Alex Tomas 已提交
1534 1535 1536 1537 1538 1539
			break;
	}

	return err;
}

1540
int
A
Alex Tomas 已提交
1541 1542 1543
ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
				struct ext4_extent *ex2)
{
1544
	unsigned short ext1_ee_len, ext2_ee_len, max_len;
A
Amit Arora 已提交
1545 1546 1547 1548 1549 1550 1551 1552

	/*
	 * Make sure that either both extents are uninitialized, or
	 * both are _not_.
	 */
	if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
		return 0;

1553 1554 1555 1556 1557
	if (ext4_ext_is_uninitialized(ex1))
		max_len = EXT_UNINIT_MAX_LEN;
	else
		max_len = EXT_INIT_MAX_LEN;

A
Amit Arora 已提交
1558 1559 1560 1561
	ext1_ee_len = ext4_ext_get_actual_len(ex1);
	ext2_ee_len = ext4_ext_get_actual_len(ex2);

	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
A
Andrew Morton 已提交
1562
			le32_to_cpu(ex2->ee_block))
A
Alex Tomas 已提交
1563 1564
		return 0;

1565 1566 1567
	/*
	 * To allow future support for preallocated extents to be added
	 * as an RO_COMPAT feature, refuse to merge to extents if
1568
	 * this can result in the top bit of ee_len being set.
1569
	 */
1570
	if (ext1_ee_len + ext2_ee_len > max_len)
1571
		return 0;
1572
#ifdef AGGRESSIVE_TEST
1573
	if (ext1_ee_len >= 4)
A
Alex Tomas 已提交
1574 1575 1576
		return 0;
#endif

A
Amit Arora 已提交
1577
	if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
A
Alex Tomas 已提交
1578 1579 1580 1581
		return 1;
	return 0;
}

1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
/*
 * This function tries to merge the "ex" extent to the next extent in the tree.
 * It always tries to merge towards right. If you want to merge towards
 * left, pass "ex - 1" as argument instead of "ex".
 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
 * 1 if they got merged.
 */
int ext4_ext_try_to_merge(struct inode *inode,
			  struct ext4_ext_path *path,
			  struct ext4_extent *ex)
{
	struct ext4_extent_header *eh;
	unsigned int depth, len;
	int merge_done = 0;
	int uninitialized = 0;

	depth = ext_depth(inode);
	BUG_ON(path[depth].p_hdr == NULL);
	eh = path[depth].p_hdr;

	while (ex < EXT_LAST_EXTENT(eh)) {
		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
			break;
		/* merge with next extent! */
		if (ext4_ext_is_uninitialized(ex))
			uninitialized = 1;
		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
				+ ext4_ext_get_actual_len(ex + 1));
		if (uninitialized)
			ext4_ext_mark_uninitialized(ex);

		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
			len = (EXT_LAST_EXTENT(eh) - ex - 1)
				* sizeof(struct ext4_extent);
			memmove(ex + 1, ex + 2, len);
		}
M
Marcin Slusarz 已提交
1618
		le16_add_cpu(&eh->eh_entries, -1);
1619 1620 1621
		merge_done = 1;
		WARN_ON(eh->eh_entries == 0);
		if (!eh->eh_entries)
1622
			EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1623 1624 1625 1626 1627
	}

	return merge_done;
}

A
Amit Arora 已提交
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639
/*
 * check if a portion of the "newext" extent overlaps with an
 * existing extent.
 *
 * If there is an overlap discovered, it updates the length of the newext
 * such that there will be no overlap, and then returns 1.
 * If there is no overlap found, it returns 0.
 */
unsigned int ext4_ext_check_overlap(struct inode *inode,
				    struct ext4_extent *newext,
				    struct ext4_ext_path *path)
{
A
Aneesh Kumar K.V 已提交
1640
	ext4_lblk_t b1, b2;
A
Amit Arora 已提交
1641 1642 1643 1644
	unsigned int depth, len1;
	unsigned int ret = 0;

	b1 = le32_to_cpu(newext->ee_block);
A
Amit Arora 已提交
1645
	len1 = ext4_ext_get_actual_len(newext);
A
Amit Arora 已提交
1646 1647 1648 1649 1650 1651 1652
	depth = ext_depth(inode);
	if (!path[depth].p_ext)
		goto out;
	b2 = le32_to_cpu(path[depth].p_ext->ee_block);

	/*
	 * get the next allocated block if the extent in the path
1653
	 * is before the requested block(s)
A
Amit Arora 已提交
1654 1655 1656 1657 1658 1659 1660
	 */
	if (b2 < b1) {
		b2 = ext4_ext_next_allocated_block(path);
		if (b2 == EXT_MAX_BLOCK)
			goto out;
	}

A
Aneesh Kumar K.V 已提交
1661
	/* check for wrap through zero on extent logical start block*/
A
Amit Arora 已提交
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676
	if (b1 + len1 < b1) {
		len1 = EXT_MAX_BLOCK - b1;
		newext->ee_len = cpu_to_le16(len1);
		ret = 1;
	}

	/* check for overlap */
	if (b1 + len1 > b2) {
		newext->ee_len = cpu_to_le16(b2 - b1);
		ret = 1;
	}
out:
	return ret;
}

A
Alex Tomas 已提交
1677
/*
1678 1679 1680 1681
 * ext4_ext_insert_extent:
 * tries to merge requsted extent into the existing extent or
 * inserts requested extent as new one into the tree,
 * creating new leaf in the no-space case.
A
Alex Tomas 已提交
1682 1683 1684
 */
int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path,
1685
				struct ext4_extent *newext, int flag)
A
Alex Tomas 已提交
1686
{
1687
	struct ext4_extent_header *eh;
A
Alex Tomas 已提交
1688 1689 1690
	struct ext4_extent *ex, *fex;
	struct ext4_extent *nearex; /* nearest extent */
	struct ext4_ext_path *npath = NULL;
A
Aneesh Kumar K.V 已提交
1691 1692
	int depth, len, err;
	ext4_lblk_t next;
A
Amit Arora 已提交
1693
	unsigned uninitialized = 0;
A
Alex Tomas 已提交
1694

1695 1696 1697 1698
	if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
		EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
		return -EIO;
	}
A
Alex Tomas 已提交
1699 1700
	depth = ext_depth(inode);
	ex = path[depth].p_ext;
1701 1702 1703 1704
	if (unlikely(path[depth].p_hdr == NULL)) {
		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
		return -EIO;
	}
A
Alex Tomas 已提交
1705 1706

	/* try to insert block into found extent and return */
1707
	if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1708
		&& ext4_can_extents_be_merged(inode, ex, newext)) {
1709 1710
		ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
				ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1711
				ext4_ext_get_actual_len(newext),
A
Alex Tomas 已提交
1712
				le32_to_cpu(ex->ee_block),
1713
				ext4_ext_is_uninitialized(ex),
A
Amit Arora 已提交
1714
				ext4_ext_get_actual_len(ex), ext_pblock(ex));
1715 1716
		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
A
Alex Tomas 已提交
1717
			return err;
A
Amit Arora 已提交
1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729

		/*
		 * ext4_can_extents_be_merged should have checked that either
		 * both extents are uninitialized, or both aren't. Thus we
		 * need to check only one of them here.
		 */
		if (ext4_ext_is_uninitialized(ex))
			uninitialized = 1;
		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
					+ ext4_ext_get_actual_len(newext));
		if (uninitialized)
			ext4_ext_mark_uninitialized(ex);
A
Alex Tomas 已提交
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
		eh = path[depth].p_hdr;
		nearex = ex;
		goto merge;
	}

repeat:
	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
		goto has_space;

	/* probably next leaf has space for us? */
	fex = EXT_LAST_EXTENT(eh);
	next = ext4_ext_next_leaf_block(inode, path);
	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
	    && next != EXT_MAX_BLOCK) {
		ext_debug("next leaf block - %d\n", next);
		BUG_ON(npath != NULL);
		npath = ext4_ext_find_extent(inode, next, NULL);
		if (IS_ERR(npath))
			return PTR_ERR(npath);
		BUG_ON(npath->p_depth != path->p_depth);
		eh = npath[depth].p_hdr;
		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
			ext_debug("next leaf isnt full(%d)\n",
				  le16_to_cpu(eh->eh_entries));
			path = npath;
			goto repeat;
		}
		ext_debug("next leaf has no free space(%d,%d)\n",
			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
	}

	/*
1764 1765
	 * There is no free space in the found leaf.
	 * We're gonna add a new leaf in the tree.
A
Alex Tomas 已提交
1766 1767 1768 1769 1770 1771 1772 1773 1774 1775
	 */
	err = ext4_ext_create_new_leaf(handle, inode, path, newext);
	if (err)
		goto cleanup;
	depth = ext_depth(inode);
	eh = path[depth].p_hdr;

has_space:
	nearex = path[depth].p_ext;

1776 1777
	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
A
Alex Tomas 已提交
1778 1779 1780 1781
		goto cleanup;

	if (!nearex) {
		/* there is no extent in this leaf, create first one */
1782
		ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n",
D
Dave Kleikamp 已提交
1783 1784
				le32_to_cpu(newext->ee_block),
				ext_pblock(newext),
1785
				ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1786
				ext4_ext_get_actual_len(newext));
A
Alex Tomas 已提交
1787 1788
		path[depth].p_ext = EXT_FIRST_EXTENT(eh);
	} else if (le32_to_cpu(newext->ee_block)
D
Dave Kleikamp 已提交
1789
			   > le32_to_cpu(nearex->ee_block)) {
A
Alex Tomas 已提交
1790 1791 1792 1793 1794
/*		BUG_ON(newext->ee_block == nearex->ee_block); */
		if (nearex != EXT_LAST_EXTENT(eh)) {
			len = EXT_MAX_EXTENT(eh) - nearex;
			len = (len - 1) * sizeof(struct ext4_extent);
			len = len < 0 ? 0 : len;
1795
			ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, "
A
Alex Tomas 已提交
1796
					"move %d from 0x%p to 0x%p\n",
D
Dave Kleikamp 已提交
1797 1798
					le32_to_cpu(newext->ee_block),
					ext_pblock(newext),
1799
					ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1800
					ext4_ext_get_actual_len(newext),
A
Alex Tomas 已提交
1801 1802 1803 1804 1805 1806 1807 1808
					nearex, len, nearex + 1, nearex + 2);
			memmove(nearex + 2, nearex + 1, len);
		}
		path[depth].p_ext = nearex + 1;
	} else {
		BUG_ON(newext->ee_block == nearex->ee_block);
		len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
		len = len < 0 ? 0 : len;
1809
		ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, "
A
Alex Tomas 已提交
1810 1811
				"move %d from 0x%p to 0x%p\n",
				le32_to_cpu(newext->ee_block),
1812
				ext_pblock(newext),
1813
				ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1814
				ext4_ext_get_actual_len(newext),
A
Alex Tomas 已提交
1815 1816 1817 1818 1819
				nearex, len, nearex + 1, nearex + 2);
		memmove(nearex + 1, nearex, len);
		path[depth].p_ext = nearex;
	}

M
Marcin Slusarz 已提交
1820
	le16_add_cpu(&eh->eh_entries, 1);
A
Alex Tomas 已提交
1821 1822
	nearex = path[depth].p_ext;
	nearex->ee_block = newext->ee_block;
1823
	ext4_ext_store_pblock(nearex, ext_pblock(newext));
A
Alex Tomas 已提交
1824 1825 1826 1827
	nearex->ee_len = newext->ee_len;

merge:
	/* try to merge extents to the right */
1828
	if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1829
		ext4_ext_try_to_merge(inode, path, nearex);
A
Alex Tomas 已提交
1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848

	/* try to merge extents to the left */

	/* time to correct all indexes above */
	err = ext4_ext_correct_indexes(handle, inode, path);
	if (err)
		goto cleanup;

	err = ext4_ext_dirty(handle, inode, path + depth);

cleanup:
	if (npath) {
		ext4_ext_drop_refs(npath);
		kfree(npath);
	}
	ext4_ext_invalidate_cache(inode);
	return err;
}

1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
			ext4_lblk_t num, ext_prepare_callback func,
			void *cbdata)
{
	struct ext4_ext_path *path = NULL;
	struct ext4_ext_cache cbex;
	struct ext4_extent *ex;
	ext4_lblk_t next, start = 0, end = 0;
	ext4_lblk_t last = block + num;
	int depth, exists, err = 0;

	BUG_ON(func == NULL);
	BUG_ON(inode == NULL);

	while (block < last && block != EXT_MAX_BLOCK) {
		num = last - block;
		/* find extent for this block */
1866
		down_read(&EXT4_I(inode)->i_data_sem);
1867
		path = ext4_ext_find_extent(inode, block, path);
1868
		up_read(&EXT4_I(inode)->i_data_sem);
1869 1870 1871 1872 1873 1874 1875
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			path = NULL;
			break;
		}

		depth = ext_depth(inode);
1876 1877 1878 1879 1880
		if (unlikely(path[depth].p_hdr == NULL)) {
			EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
			err = -EIO;
			break;
		}
1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
		ex = path[depth].p_ext;
		next = ext4_ext_next_allocated_block(path);

		exists = 0;
		if (!ex) {
			/* there is no extent yet, so try to allocate
			 * all requested space */
			start = block;
			end = block + num;
		} else if (le32_to_cpu(ex->ee_block) > block) {
			/* need to allocate space before found extent */
			start = block;
			end = le32_to_cpu(ex->ee_block);
			if (block + num < end)
				end = block + num;
		} else if (block >= le32_to_cpu(ex->ee_block)
					+ ext4_ext_get_actual_len(ex)) {
			/* need to allocate space after found extent */
			start = block;
			end = block + num;
			if (end >= next)
				end = next;
		} else if (block >= le32_to_cpu(ex->ee_block)) {
			/*
			 * some part of requested space is covered
			 * by found extent
			 */
			start = block;
			end = le32_to_cpu(ex->ee_block)
				+ ext4_ext_get_actual_len(ex);
			if (block + num < end)
				end = block + num;
			exists = 1;
		} else {
			BUG();
		}
		BUG_ON(end <= start);

		if (!exists) {
			cbex.ec_block = start;
			cbex.ec_len = end - start;
			cbex.ec_start = 0;
			cbex.ec_type = EXT4_EXT_CACHE_GAP;
		} else {
			cbex.ec_block = le32_to_cpu(ex->ee_block);
			cbex.ec_len = ext4_ext_get_actual_len(ex);
			cbex.ec_start = ext_pblock(ex);
			cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
		}

1931 1932 1933 1934 1935
		if (unlikely(cbex.ec_len == 0)) {
			EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
			err = -EIO;
			break;
		}
1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965
		err = func(inode, path, &cbex, ex, cbdata);
		ext4_ext_drop_refs(path);

		if (err < 0)
			break;

		if (err == EXT_REPEAT)
			continue;
		else if (err == EXT_BREAK) {
			err = 0;
			break;
		}

		if (ext_depth(inode) != depth) {
			/* depth was changed. we have to realloc path */
			kfree(path);
			path = NULL;
		}

		block = cbex.ec_block + cbex.ec_len;
	}

	if (path) {
		ext4_ext_drop_refs(path);
		kfree(path);
	}

	return err;
}

1966
static void
A
Aneesh Kumar K.V 已提交
1967
ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1968
			__u32 len, ext4_fsblk_t start, int type)
A
Alex Tomas 已提交
1969 1970 1971
{
	struct ext4_ext_cache *cex;
	BUG_ON(len == 0);
1972
	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
A
Alex Tomas 已提交
1973 1974 1975 1976 1977
	cex = &EXT4_I(inode)->i_cached_extent;
	cex->ec_type = type;
	cex->ec_block = block;
	cex->ec_len = len;
	cex->ec_start = start;
1978
	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
A
Alex Tomas 已提交
1979 1980 1981
}

/*
1982 1983
 * ext4_ext_put_gap_in_cache:
 * calculate boundaries of the gap that the requested block fits into
A
Alex Tomas 已提交
1984 1985
 * and cache this gap
 */
1986
static void
A
Alex Tomas 已提交
1987
ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
A
Aneesh Kumar K.V 已提交
1988
				ext4_lblk_t block)
A
Alex Tomas 已提交
1989 1990
{
	int depth = ext_depth(inode);
A
Aneesh Kumar K.V 已提交
1991 1992
	unsigned long len;
	ext4_lblk_t lblock;
A
Alex Tomas 已提交
1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003
	struct ext4_extent *ex;

	ex = path[depth].p_ext;
	if (ex == NULL) {
		/* there is no extent yet, so gap is [0;-] */
		lblock = 0;
		len = EXT_MAX_BLOCK;
		ext_debug("cache gap(whole file):");
	} else if (block < le32_to_cpu(ex->ee_block)) {
		lblock = block;
		len = le32_to_cpu(ex->ee_block) - block;
2004 2005 2006 2007
		ext_debug("cache gap(before): %u [%u:%u]",
				block,
				le32_to_cpu(ex->ee_block),
				 ext4_ext_get_actual_len(ex));
A
Alex Tomas 已提交
2008
	} else if (block >= le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
2009
			+ ext4_ext_get_actual_len(ex)) {
A
Aneesh Kumar K.V 已提交
2010
		ext4_lblk_t next;
D
Dave Kleikamp 已提交
2011
		lblock = le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
2012
			+ ext4_ext_get_actual_len(ex);
A
Aneesh Kumar K.V 已提交
2013 2014

		next = ext4_ext_next_allocated_block(path);
2015 2016 2017 2018
		ext_debug("cache gap(after): [%u:%u] %u",
				le32_to_cpu(ex->ee_block),
				ext4_ext_get_actual_len(ex),
				block);
A
Aneesh Kumar K.V 已提交
2019 2020
		BUG_ON(next == lblock);
		len = next - lblock;
A
Alex Tomas 已提交
2021 2022 2023 2024 2025
	} else {
		lblock = len = 0;
		BUG();
	}

2026
	ext_debug(" -> %u:%lu\n", lblock, len);
A
Alex Tomas 已提交
2027 2028 2029
	ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
}

2030
static int
A
Aneesh Kumar K.V 已提交
2031
ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
A
Alex Tomas 已提交
2032 2033 2034
			struct ext4_extent *ex)
{
	struct ext4_ext_cache *cex;
2035
	int ret = EXT4_EXT_CACHE_NO;
A
Alex Tomas 已提交
2036

2037
	/*
2038 2039 2040
	 * We borrow i_block_reservation_lock to protect i_cached_extent
	 */
	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
A
Alex Tomas 已提交
2041 2042 2043 2044
	cex = &EXT4_I(inode)->i_cached_extent;

	/* has cache valid data? */
	if (cex->ec_type == EXT4_EXT_CACHE_NO)
2045
		goto errout;
A
Alex Tomas 已提交
2046 2047 2048

	BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
			cex->ec_type != EXT4_EXT_CACHE_EXTENT);
2049
	if (in_range(block, cex->ec_block, cex->ec_len)) {
D
Dave Kleikamp 已提交
2050
		ex->ee_block = cpu_to_le32(cex->ec_block);
2051
		ext4_ext_store_pblock(ex, cex->ec_start);
D
Dave Kleikamp 已提交
2052
		ex->ee_len = cpu_to_le16(cex->ec_len);
2053 2054 2055
		ext_debug("%u cached by %u:%u:%llu\n",
				block,
				cex->ec_block, cex->ec_len, cex->ec_start);
2056
		ret = cex->ec_type;
A
Alex Tomas 已提交
2057
	}
2058 2059 2060
errout:
	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
	return ret;
A
Alex Tomas 已提交
2061 2062 2063
}

/*
2064 2065 2066 2067
 * ext4_ext_rm_idx:
 * removes index from the index block.
 * It's used in truncate case only, thus all requests are for
 * last index in the block only.
A
Alex Tomas 已提交
2068
 */
A
Aneesh Kumar K.V 已提交
2069
static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
2070 2071 2072
			struct ext4_ext_path *path)
{
	int err;
2073
	ext4_fsblk_t leaf;
A
Alex Tomas 已提交
2074 2075 2076

	/* free index block */
	path--;
2077
	leaf = idx_pblock(path->p_idx);
2078 2079 2080 2081
	if (unlikely(path->p_hdr->eh_entries == 0)) {
		EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
		return -EIO;
	}
2082 2083
	err = ext4_ext_get_access(handle, inode, path);
	if (err)
A
Alex Tomas 已提交
2084
		return err;
M
Marcin Slusarz 已提交
2085
	le16_add_cpu(&path->p_hdr->eh_entries, -1);
2086 2087
	err = ext4_ext_dirty(handle, inode, path);
	if (err)
A
Alex Tomas 已提交
2088
		return err;
2089
	ext_debug("index is empty, remove it, free block %llu\n", leaf);
2090 2091
	ext4_free_blocks(handle, inode, 0, leaf, 1,
			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
A
Alex Tomas 已提交
2092 2093 2094 2095
	return err;
}

/*
2096 2097 2098 2099 2100
 * ext4_ext_calc_credits_for_single_extent:
 * This routine returns max. credits that needed to insert an extent
 * to the extent tree.
 * When pass the actual path, the caller should calculate credits
 * under i_data_sem.
A
Alex Tomas 已提交
2101
 */
2102
int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
A
Alex Tomas 已提交
2103 2104 2105
						struct ext4_ext_path *path)
{
	if (path) {
2106
		int depth = ext_depth(inode);
2107
		int ret = 0;
2108

A
Alex Tomas 已提交
2109 2110
		/* probably there is space in leaf? */
		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2111
				< le16_to_cpu(path[depth].p_hdr->eh_max)) {
A
Alex Tomas 已提交
2112

2113 2114 2115 2116 2117 2118 2119 2120
			/*
			 *  There are some space in the leaf tree, no
			 *  need to account for leaf block credit
			 *
			 *  bitmaps and block group descriptor blocks
			 *  and other metadat blocks still need to be
			 *  accounted.
			 */
2121
			/* 1 bitmap, 1 block group descriptor */
2122
			ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2123
			return ret;
2124 2125
		}
	}
A
Alex Tomas 已提交
2126

2127
	return ext4_chunk_trans_blocks(inode, nrblocks);
2128
}
A
Alex Tomas 已提交
2129

2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140
/*
 * How many index/leaf blocks need to change/allocate to modify nrblocks?
 *
 * if nrblocks are fit in a single extent (chunk flag is 1), then
 * in the worse case, each tree level index/leaf need to be changed
 * if the tree split due to insert a new extent, then the old tree
 * index/leaf need to be updated too
 *
 * If the nrblocks are discontiguous, they could cause
 * the whole tree split more than once, but this is really rare.
 */
2141
int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2142 2143 2144
{
	int index;
	int depth = ext_depth(inode);
A
Alex Tomas 已提交
2145

2146 2147 2148 2149
	if (chunk)
		index = depth * 2;
	else
		index = depth * 3;
A
Alex Tomas 已提交
2150

2151
	return index;
A
Alex Tomas 已提交
2152 2153 2154 2155
}

static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
				struct ext4_extent *ex,
A
Aneesh Kumar K.V 已提交
2156
				ext4_lblk_t from, ext4_lblk_t to)
A
Alex Tomas 已提交
2157
{
A
Amit Arora 已提交
2158
	unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2159
	int flags = EXT4_FREE_BLOCKS_FORGET;
A
Alex Tomas 已提交
2160

2161
	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2162
		flags |= EXT4_FREE_BLOCKS_METADATA;
A
Alex Tomas 已提交
2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178
#ifdef EXTENTS_STATS
	{
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
		spin_lock(&sbi->s_ext_stats_lock);
		sbi->s_ext_blocks += ee_len;
		sbi->s_ext_extents++;
		if (ee_len < sbi->s_ext_min)
			sbi->s_ext_min = ee_len;
		if (ee_len > sbi->s_ext_max)
			sbi->s_ext_max = ee_len;
		if (ext_depth(inode) > sbi->s_depth_max)
			sbi->s_depth_max = ext_depth(inode);
		spin_unlock(&sbi->s_ext_stats_lock);
	}
#endif
	if (from >= le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
2179
	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
A
Alex Tomas 已提交
2180
		/* tail removal */
A
Aneesh Kumar K.V 已提交
2181
		ext4_lblk_t num;
2182
		ext4_fsblk_t start;
A
Aneesh Kumar K.V 已提交
2183

A
Amit Arora 已提交
2184 2185
		num = le32_to_cpu(ex->ee_block) + ee_len - from;
		start = ext_pblock(ex) + ee_len - num;
A
Aneesh Kumar K.V 已提交
2186
		ext_debug("free last %u blocks starting %llu\n", num, start);
2187
		ext4_free_blocks(handle, inode, 0, start, num, flags);
A
Alex Tomas 已提交
2188
	} else if (from == le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
2189
		   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
A
Aneesh Kumar K.V 已提交
2190
		printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
A
Amit Arora 已提交
2191
			from, to, le32_to_cpu(ex->ee_block), ee_len);
A
Alex Tomas 已提交
2192
	} else {
A
Aneesh Kumar K.V 已提交
2193 2194 2195
		printk(KERN_INFO "strange request: removal(2) "
				"%u-%u from %u:%u\n",
				from, to, le32_to_cpu(ex->ee_block), ee_len);
A
Alex Tomas 已提交
2196 2197 2198 2199 2200 2201
	}
	return 0;
}

static int
ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
A
Aneesh Kumar K.V 已提交
2202
		struct ext4_ext_path *path, ext4_lblk_t start)
A
Alex Tomas 已提交
2203 2204 2205 2206
{
	int err = 0, correct_index = 0;
	int depth = ext_depth(inode), credits;
	struct ext4_extent_header *eh;
A
Aneesh Kumar K.V 已提交
2207 2208 2209
	ext4_lblk_t a, b, block;
	unsigned num;
	ext4_lblk_t ex_ee_block;
A
Alex Tomas 已提交
2210
	unsigned short ex_ee_len;
A
Amit Arora 已提交
2211
	unsigned uninitialized = 0;
A
Alex Tomas 已提交
2212 2213
	struct ext4_extent *ex;

2214
	/* the header must be checked already in ext4_ext_remove_space() */
A
Aneesh Kumar K.V 已提交
2215
	ext_debug("truncate since %u in leaf\n", start);
A
Alex Tomas 已提交
2216 2217 2218
	if (!path[depth].p_hdr)
		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
	eh = path[depth].p_hdr;
2219 2220 2221 2222
	if (unlikely(path[depth].p_hdr == NULL)) {
		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
		return -EIO;
	}
A
Alex Tomas 已提交
2223 2224 2225 2226
	/* find where to start removing */
	ex = EXT_LAST_EXTENT(eh);

	ex_ee_block = le32_to_cpu(ex->ee_block);
A
Amit Arora 已提交
2227
	ex_ee_len = ext4_ext_get_actual_len(ex);
A
Alex Tomas 已提交
2228 2229 2230

	while (ex >= EXT_FIRST_EXTENT(eh) &&
			ex_ee_block + ex_ee_len > start) {
2231 2232 2233 2234 2235 2236

		if (ext4_ext_is_uninitialized(ex))
			uninitialized = 1;
		else
			uninitialized = 0;

2237 2238
		ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
			 uninitialized, ex_ee_len);
A
Alex Tomas 已提交
2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268
		path[depth].p_ext = ex;

		a = ex_ee_block > start ? ex_ee_block : start;
		b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
			ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;

		ext_debug("  border %u:%u\n", a, b);

		if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
			block = 0;
			num = 0;
			BUG();
		} else if (a != ex_ee_block) {
			/* remove tail of the extent */
			block = ex_ee_block;
			num = a - block;
		} else if (b != ex_ee_block + ex_ee_len - 1) {
			/* remove head of the extent */
			block = a;
			num = b - a;
			/* there is no "make a hole" API yet */
			BUG();
		} else {
			/* remove whole extent: excellent! */
			block = ex_ee_block;
			num = 0;
			BUG_ON(a != ex_ee_block);
			BUG_ON(b != ex_ee_block + ex_ee_len - 1);
		}

2269 2270 2271 2272 2273 2274 2275
		/*
		 * 3 for leaf, sb, and inode plus 2 (bmap and group
		 * descriptor) for each block group; assume two block
		 * groups plus ex_ee_len/blocks_per_block_group for
		 * the worst case
		 */
		credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
A
Alex Tomas 已提交
2276 2277 2278 2279
		if (ex == EXT_FIRST_EXTENT(eh)) {
			correct_index = 1;
			credits += (ext_depth(inode)) + 1;
		}
D
Dmitry Monakhov 已提交
2280
		credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
A
Alex Tomas 已提交
2281

2282
		err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2283
		if (err)
A
Alex Tomas 已提交
2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294
			goto out;

		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
			goto out;

		err = ext4_remove_blocks(handle, inode, ex, a, b);
		if (err)
			goto out;

		if (num == 0) {
2295
			/* this extent is removed; mark slot entirely unused */
2296
			ext4_ext_store_pblock(ex, 0);
M
Marcin Slusarz 已提交
2297
			le16_add_cpu(&eh->eh_entries, -1);
A
Alex Tomas 已提交
2298 2299 2300 2301
		}

		ex->ee_block = cpu_to_le32(block);
		ex->ee_len = cpu_to_le16(num);
2302 2303 2304 2305 2306
		/*
		 * Do not mark uninitialized if all the blocks in the
		 * extent have been removed.
		 */
		if (uninitialized && num)
A
Amit Arora 已提交
2307
			ext4_ext_mark_uninitialized(ex);
A
Alex Tomas 已提交
2308 2309 2310 2311 2312

		err = ext4_ext_dirty(handle, inode, path + depth);
		if (err)
			goto out;

2313
		ext_debug("new extent: %u:%u:%llu\n", block, num,
2314
				ext_pblock(ex));
A
Alex Tomas 已提交
2315 2316
		ex--;
		ex_ee_block = le32_to_cpu(ex->ee_block);
A
Amit Arora 已提交
2317
		ex_ee_len = ext4_ext_get_actual_len(ex);
A
Alex Tomas 已提交
2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332
	}

	if (correct_index && eh->eh_entries)
		err = ext4_ext_correct_indexes(handle, inode, path);

	/* if this leaf is free, then we should
	 * remove it from index block above */
	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
		err = ext4_ext_rm_idx(handle, inode, path + depth);

out:
	return err;
}

/*
2333 2334
 * ext4_ext_more_to_rm:
 * returns 1 if current index has to be freed (even partial)
A
Alex Tomas 已提交
2335
 */
2336
static int
A
Alex Tomas 已提交
2337 2338 2339 2340 2341 2342 2343 2344
ext4_ext_more_to_rm(struct ext4_ext_path *path)
{
	BUG_ON(path->p_idx == NULL);

	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
		return 0;

	/*
2345
	 * if truncate on deeper level happened, it wasn't partial,
A
Alex Tomas 已提交
2346 2347 2348 2349 2350 2351 2352
	 * so we have to consider current index for truncation
	 */
	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
		return 0;
	return 1;
}

A
Aneesh Kumar K.V 已提交
2353
static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
A
Alex Tomas 已提交
2354 2355 2356 2357 2358
{
	struct super_block *sb = inode->i_sb;
	int depth = ext_depth(inode);
	struct ext4_ext_path *path;
	handle_t *handle;
2359
	int i, err;
A
Alex Tomas 已提交
2360

A
Aneesh Kumar K.V 已提交
2361
	ext_debug("truncate since %u\n", start);
A
Alex Tomas 已提交
2362 2363 2364 2365 2366 2367

	/* probably first extent we're gonna free will be last in block */
	handle = ext4_journal_start(inode, depth + 1);
	if (IS_ERR(handle))
		return PTR_ERR(handle);

2368
again:
A
Alex Tomas 已提交
2369 2370 2371
	ext4_ext_invalidate_cache(inode);

	/*
2372 2373
	 * We start scanning from right side, freeing all the blocks
	 * after i_size and walking into the tree depth-wise.
A
Alex Tomas 已提交
2374
	 */
2375
	depth = ext_depth(inode);
2376
	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
A
Alex Tomas 已提交
2377 2378 2379 2380
	if (path == NULL) {
		ext4_journal_stop(handle);
		return -ENOMEM;
	}
2381
	path[0].p_depth = depth;
A
Alex Tomas 已提交
2382
	path[0].p_hdr = ext_inode_hdr(inode);
2383
	if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
A
Alex Tomas 已提交
2384 2385 2386
		err = -EIO;
		goto out;
	}
2387
	i = err = 0;
A
Alex Tomas 已提交
2388 2389 2390 2391 2392

	while (i >= 0 && err == 0) {
		if (i == depth) {
			/* this is leaf block */
			err = ext4_ext_rm_leaf(handle, inode, path, start);
2393
			/* root level has p_bh == NULL, brelse() eats this */
A
Alex Tomas 已提交
2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406
			brelse(path[i].p_bh);
			path[i].p_bh = NULL;
			i--;
			continue;
		}

		/* this is index block */
		if (!path[i].p_hdr) {
			ext_debug("initialize header\n");
			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
		}

		if (!path[i].p_idx) {
2407
			/* this level hasn't been touched yet */
A
Alex Tomas 已提交
2408 2409 2410 2411 2412 2413
			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
			ext_debug("init index ptr: hdr 0x%p, num %d\n",
				  path[i].p_hdr,
				  le16_to_cpu(path[i].p_hdr->eh_entries));
		} else {
2414
			/* we were already here, see at next index */
A
Alex Tomas 已提交
2415 2416 2417 2418 2419 2420 2421
			path[i].p_idx--;
		}

		ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
				i, EXT_FIRST_INDEX(path[i].p_hdr),
				path[i].p_idx);
		if (ext4_ext_more_to_rm(path + i)) {
2422
			struct buffer_head *bh;
A
Alex Tomas 已提交
2423
			/* go to the next level */
2424
			ext_debug("move to level %d (block %llu)\n",
2425
				  i + 1, idx_pblock(path[i].p_idx));
A
Alex Tomas 已提交
2426
			memset(path + i + 1, 0, sizeof(*path));
2427 2428
			bh = sb_bread(sb, idx_pblock(path[i].p_idx));
			if (!bh) {
A
Alex Tomas 已提交
2429 2430 2431 2432
				/* should we reset i_size? */
				err = -EIO;
				break;
			}
2433 2434 2435 2436
			if (WARN_ON(i + 1 > depth)) {
				err = -EIO;
				break;
			}
2437
			if (ext4_ext_check(inode, ext_block_hdr(bh),
2438 2439 2440 2441 2442
							depth - i - 1)) {
				err = -EIO;
				break;
			}
			path[i + 1].p_bh = bh;
A
Alex Tomas 已提交
2443

2444 2445
			/* save actual number of indexes since this
			 * number is changed at the next iteration */
A
Alex Tomas 已提交
2446 2447 2448
			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
			i++;
		} else {
2449
			/* we finished processing this index, go up */
A
Alex Tomas 已提交
2450
			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2451
				/* index is empty, remove it;
A
Alex Tomas 已提交
2452 2453 2454 2455
				 * handle must be already prepared by the
				 * truncatei_leaf() */
				err = ext4_ext_rm_idx(handle, inode, path + i);
			}
2456
			/* root level has p_bh == NULL, brelse() eats this */
A
Alex Tomas 已提交
2457 2458 2459 2460 2461 2462 2463 2464 2465 2466
			brelse(path[i].p_bh);
			path[i].p_bh = NULL;
			i--;
			ext_debug("return to level %d\n", i);
		}
	}

	/* TODO: flexible tree reduction should be here */
	if (path->p_hdr->eh_entries == 0) {
		/*
2467 2468
		 * truncate to zero freed all the tree,
		 * so we need to correct eh_depth
A
Alex Tomas 已提交
2469 2470 2471 2472 2473
		 */
		err = ext4_ext_get_access(handle, inode, path);
		if (err == 0) {
			ext_inode_hdr(inode)->eh_depth = 0;
			ext_inode_hdr(inode)->eh_max =
2474
				cpu_to_le16(ext4_ext_space_root(inode, 0));
A
Alex Tomas 已提交
2475 2476 2477 2478 2479 2480
			err = ext4_ext_dirty(handle, inode, path);
		}
	}
out:
	ext4_ext_drop_refs(path);
	kfree(path);
2481 2482
	if (err == -EAGAIN)
		goto again;
A
Alex Tomas 已提交
2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496
	ext4_journal_stop(handle);

	return err;
}

/*
 * called at mount time
 */
void ext4_ext_init(struct super_block *sb)
{
	/*
	 * possible initialization would be here
	 */

2497
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2498
#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2499
		printk(KERN_INFO "EXT4-fs: file extents enabled");
2500 2501
#ifdef AGGRESSIVE_TEST
		printk(", aggressive tests");
A
Alex Tomas 已提交
2502 2503 2504 2505 2506 2507 2508 2509
#endif
#ifdef CHECK_BINSEARCH
		printk(", check binsearch");
#endif
#ifdef EXTENTS_STATS
		printk(", stats");
#endif
		printk("\n");
2510
#endif
A
Alex Tomas 已提交
2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523
#ifdef EXTENTS_STATS
		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
		EXT4_SB(sb)->s_ext_min = 1 << 30;
		EXT4_SB(sb)->s_ext_max = 0;
#endif
	}
}

/*
 * called at umount time
 */
void ext4_ext_release(struct super_block *sb)
{
2524
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
A
Alex Tomas 已提交
2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538
		return;

#ifdef EXTENTS_STATS
	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
		struct ext4_sb_info *sbi = EXT4_SB(sb);
		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
			sbi->s_ext_blocks, sbi->s_ext_extents,
			sbi->s_ext_blocks / sbi->s_ext_extents);
		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
	}
#endif
}

2539 2540 2541 2542 2543 2544 2545 2546
static void bi_complete(struct bio *bio, int error)
{
	complete((struct completion *)bio->bi_private);
}

/* FIXME!! we need to try to merge to left or right after zero-out  */
static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
{
2547
	int ret;
2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570
	struct bio *bio;
	int blkbits, blocksize;
	sector_t ee_pblock;
	struct completion event;
	unsigned int ee_len, len, done, offset;


	blkbits   = inode->i_blkbits;
	blocksize = inode->i_sb->s_blocksize;
	ee_len    = ext4_ext_get_actual_len(ex);
	ee_pblock = ext_pblock(ex);

	/* convert ee_pblock to 512 byte sectors */
	ee_pblock = ee_pblock << (blkbits - 9);

	while (ee_len > 0) {

		if (ee_len > BIO_MAX_PAGES)
			len = BIO_MAX_PAGES;
		else
			len = ee_len;

		bio = bio_alloc(GFP_NOIO, len);
2571 2572 2573
		if (!bio)
			return -ENOMEM;

2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600
		bio->bi_sector = ee_pblock;
		bio->bi_bdev   = inode->i_sb->s_bdev;

		done = 0;
		offset = 0;
		while (done < len) {
			ret = bio_add_page(bio, ZERO_PAGE(0),
							blocksize, offset);
			if (ret != blocksize) {
				/*
				 * We can't add any more pages because of
				 * hardware limitations.  Start a new bio.
				 */
				break;
			}
			done++;
			offset += blocksize;
			if (offset >= PAGE_CACHE_SIZE)
				offset = 0;
		}

		init_completion(&event);
		bio->bi_private = &event;
		bio->bi_end_io = bi_complete;
		submit_bio(WRITE, bio);
		wait_for_completion(&event);

2601 2602 2603
		if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
			bio_put(bio);
			return -EIO;
2604 2605 2606 2607 2608
		}
		bio_put(bio);
		ee_len    -= done;
		ee_pblock += done  << (blkbits - 9);
	}
2609
	return 0;
2610 2611
}

2612
#define EXT4_EXT_ZERO_LEN 7
2613
/*
2614
 * This function is called by ext4_ext_map_blocks() if someone tries to write
2615 2616 2617 2618 2619 2620 2621 2622
 * to an uninitialized extent. It may result in splitting the uninitialized
 * extent into multiple extents (upto three - one initialized and two
 * uninitialized).
 * There are three possibilities:
 *   a> There is no split required: Entire extent should be initialized
 *   b> Splits in two extents: Write is happening at either end of the extent
 *   c> Splits in three extents: Somone is writing in middle of the extent
 */
A
Aneesh Kumar K.V 已提交
2623
static int ext4_ext_convert_to_initialized(handle_t *handle,
2624 2625 2626
					   struct inode *inode,
					   struct ext4_map_blocks *map,
					   struct ext4_ext_path *path)
2627
{
2628
	struct ext4_extent *ex, newex, orig_ex;
2629 2630 2631 2632
	struct ext4_extent *ex1 = NULL;
	struct ext4_extent *ex2 = NULL;
	struct ext4_extent *ex3 = NULL;
	struct ext4_extent_header *eh;
2633
	ext4_lblk_t ee_block, eof_block;
A
Aneesh Kumar K.V 已提交
2634
	unsigned int allocated, ee_len, depth;
2635 2636 2637
	ext4_fsblk_t newblock;
	int err = 0;
	int ret = 0;
2638 2639 2640 2641
	int may_zeroout;

	ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
		"block %llu, max_blocks %u\n", inode->i_ino,
2642
		(unsigned long long)map->m_lblk, map->m_len);
2643 2644 2645

	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
		inode->i_sb->s_blocksize_bits;
2646 2647
	if (eof_block < map->m_lblk + map->m_len)
		eof_block = map->m_lblk + map->m_len;
2648 2649 2650 2651 2652 2653

	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;
	ee_block = le32_to_cpu(ex->ee_block);
	ee_len = ext4_ext_get_actual_len(ex);
2654 2655
	allocated = ee_len - (map->m_lblk - ee_block);
	newblock = map->m_lblk - ee_block + ext_pblock(ex);
2656

2657
	ex2 = ex;
2658 2659 2660
	orig_ex.ee_block = ex->ee_block;
	orig_ex.ee_len   = cpu_to_le16(ee_len);
	ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2661

2662 2663 2664 2665 2666 2667
	/*
	 * It is safe to convert extent to initialized via explicit
	 * zeroout only if extent is fully insde i_size or new_size.
	 */
	may_zeroout = ee_block + ee_len <= eof_block;

2668 2669 2670
	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
		goto out;
2671
	/* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
2672
	if (ee_len <= 2*EXT4_EXT_ZERO_LEN && may_zeroout) {
2673 2674 2675 2676 2677 2678 2679 2680
		err =  ext4_ext_zeroout(inode, &orig_ex);
		if (err)
			goto fix_extent_len;
		/* update the extent length and mark as initialized */
		ex->ee_block = orig_ex.ee_block;
		ex->ee_len   = orig_ex.ee_len;
		ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
		ext4_ext_dirty(handle, inode, path + depth);
2681 2682
		/* zeroed the full extent */
		return allocated;
2683
	}
2684

2685 2686
	/* ex1: ee_block to map->m_lblk - 1 : uninitialized */
	if (map->m_lblk > ee_block) {
2687
		ex1 = ex;
2688
		ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
2689 2690 2691 2692 2693 2694 2695 2696
		ext4_ext_mark_uninitialized(ex1);
		ex2 = &newex;
	}
	/*
	 * for sanity, update the length of the ex2 extent before
	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
	 * overlap of blocks.
	 */
2697 2698
	if (!ex1 && allocated > map->m_len)
		ex2->ee_len = cpu_to_le16(map->m_len);
2699
	/* ex3: to ee_block + ee_len : uninitialised */
2700
	if (allocated > map->m_len) {
2701
		unsigned int newdepth;
2702
		/* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
2703
		if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
2704
			/*
2705
			 * map->m_lblk == ee_block is handled by the zerouout
2706 2707
			 * at the beginning.
			 * Mark first half uninitialized.
2708 2709 2710 2711 2712 2713 2714 2715 2716 2717
			 * Mark second half initialized and zero out the
			 * initialized extent
			 */
			ex->ee_block = orig_ex.ee_block;
			ex->ee_len   = cpu_to_le16(ee_len - allocated);
			ext4_ext_mark_uninitialized(ex);
			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
			ext4_ext_dirty(handle, inode, path + depth);

			ex3 = &newex;
2718
			ex3->ee_block = cpu_to_le32(map->m_lblk);
2719 2720
			ext4_ext_store_pblock(ex3, newblock);
			ex3->ee_len = cpu_to_le16(allocated);
2721 2722
			err = ext4_ext_insert_extent(handle, inode, path,
							ex3, 0);
2723 2724 2725 2726 2727 2728 2729 2730
			if (err == -ENOSPC) {
				err =  ext4_ext_zeroout(inode, &orig_ex);
				if (err)
					goto fix_extent_len;
				ex->ee_block = orig_ex.ee_block;
				ex->ee_len   = orig_ex.ee_len;
				ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
				ext4_ext_dirty(handle, inode, path + depth);
2731
				/* blocks available from map->m_lblk */
2732
				return allocated;
2733 2734 2735 2736

			} else if (err)
				goto fix_extent_len;

2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752
			/*
			 * We need to zero out the second half because
			 * an fallocate request can update file size and
			 * converting the second half to initialized extent
			 * implies that we can leak some junk data to user
			 * space.
			 */
			err =  ext4_ext_zeroout(inode, ex3);
			if (err) {
				/*
				 * We should actually mark the
				 * second half as uninit and return error
				 * Insert would have changed the extent
				 */
				depth = ext_depth(inode);
				ext4_ext_drop_refs(path);
2753 2754
				path = ext4_ext_find_extent(inode, map->m_lblk,
							    path);
2755 2756 2757 2758
				if (IS_ERR(path)) {
					err = PTR_ERR(path);
					return err;
				}
2759
				/* get the second half extent details */
2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770
				ex = path[depth].p_ext;
				err = ext4_ext_get_access(handle, inode,
								path + depth);
				if (err)
					return err;
				ext4_ext_mark_uninitialized(ex);
				ext4_ext_dirty(handle, inode, path + depth);
				return err;
			}

			/* zeroed the second half */
2771 2772
			return allocated;
		}
2773
		ex3 = &newex;
2774 2775 2776
		ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
		ext4_ext_store_pblock(ex3, newblock + map->m_len);
		ex3->ee_len = cpu_to_le16(allocated - map->m_len);
2777
		ext4_ext_mark_uninitialized(ex3);
2778
		err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
2779
		if (err == -ENOSPC && may_zeroout) {
2780 2781 2782 2783
			err =  ext4_ext_zeroout(inode, &orig_ex);
			if (err)
				goto fix_extent_len;
			/* update the extent length and mark as initialized */
2784 2785 2786 2787
			ex->ee_block = orig_ex.ee_block;
			ex->ee_len   = orig_ex.ee_len;
			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
			ext4_ext_dirty(handle, inode, path + depth);
2788
			/* zeroed the full extent */
2789
			/* blocks available from map->m_lblk */
2790
			return allocated;
2791 2792 2793

		} else if (err)
			goto fix_extent_len;
2794 2795 2796 2797 2798
		/*
		 * The depth, and hence eh & ex might change
		 * as part of the insert above.
		 */
		newdepth = ext_depth(inode);
2799
		/*
C
Coly Li 已提交
2800
		 * update the extent length after successful insert of the
2801 2802
		 * split extent
		 */
2803 2804 2805 2806
		ee_len -= ext4_ext_get_actual_len(ex3);
		orig_ex.ee_len = cpu_to_le16(ee_len);
		may_zeroout = ee_block + ee_len <= eof_block;

2807 2808
		depth = newdepth;
		ext4_ext_drop_refs(path);
2809
		path = ext4_ext_find_extent(inode, map->m_lblk, path);
2810 2811 2812
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			goto out;
2813
		}
2814 2815 2816 2817 2818 2819 2820 2821 2822
		eh = path[depth].p_hdr;
		ex = path[depth].p_ext;
		if (ex2 != &newex)
			ex2 = ex;

		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
			goto out;

2823
		allocated = map->m_len;
2824 2825 2826 2827 2828 2829

		/* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
		 * to insert a extent in the middle zerout directly
		 * otherwise give the extent a chance to merge to left
		 */
		if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
2830
			map->m_lblk != ee_block && may_zeroout) {
2831 2832 2833 2834 2835 2836 2837 2838
			err =  ext4_ext_zeroout(inode, &orig_ex);
			if (err)
				goto fix_extent_len;
			/* update the extent length and mark as initialized */
			ex->ee_block = orig_ex.ee_block;
			ex->ee_len   = orig_ex.ee_len;
			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
			ext4_ext_dirty(handle, inode, path + depth);
2839
			/* zero out the first half */
2840
			/* blocks available from map->m_lblk */
2841
			return allocated;
2842
		}
2843 2844 2845 2846 2847 2848 2849 2850
	}
	/*
	 * If there was a change of depth as part of the
	 * insertion of ex3 above, we need to update the length
	 * of the ex1 extent again here
	 */
	if (ex1 && ex1 != ex) {
		ex1 = ex;
2851
		ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
2852 2853 2854
		ext4_ext_mark_uninitialized(ex1);
		ex2 = &newex;
	}
2855 2856
	/* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */
	ex2->ee_block = cpu_to_le32(map->m_lblk);
2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897
	ext4_ext_store_pblock(ex2, newblock);
	ex2->ee_len = cpu_to_le16(allocated);
	if (ex2 != ex)
		goto insert;
	/*
	 * New (initialized) extent starts from the first block
	 * in the current extent. i.e., ex2 == ex
	 * We have to see if it can be merged with the extent
	 * on the left.
	 */
	if (ex2 > EXT_FIRST_EXTENT(eh)) {
		/*
		 * To merge left, pass "ex2 - 1" to try_to_merge(),
		 * since it merges towards right _only_.
		 */
		ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
		if (ret) {
			err = ext4_ext_correct_indexes(handle, inode, path);
			if (err)
				goto out;
			depth = ext_depth(inode);
			ex2--;
		}
	}
	/*
	 * Try to Merge towards right. This might be required
	 * only when the whole extent is being written to.
	 * i.e. ex2 == ex and ex3 == NULL.
	 */
	if (!ex3) {
		ret = ext4_ext_try_to_merge(inode, path, ex2);
		if (ret) {
			err = ext4_ext_correct_indexes(handle, inode, path);
			if (err)
				goto out;
		}
	}
	/* Mark modified extent as dirty */
	err = ext4_ext_dirty(handle, inode, path + depth);
	goto out;
insert:
2898
	err = ext4_ext_insert_extent(handle, inode, path, &newex, 0);
2899
	if (err == -ENOSPC && may_zeroout) {
2900 2901 2902 2903
		err =  ext4_ext_zeroout(inode, &orig_ex);
		if (err)
			goto fix_extent_len;
		/* update the extent length and mark as initialized */
2904 2905 2906 2907
		ex->ee_block = orig_ex.ee_block;
		ex->ee_len   = orig_ex.ee_len;
		ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
		ext4_ext_dirty(handle, inode, path + depth);
2908 2909
		/* zero out the first half */
		return allocated;
2910 2911
	} else if (err)
		goto fix_extent_len;
2912
out:
2913
	ext4_ext_show_leaf(inode, path);
2914
	return err ? err : allocated;
2915 2916 2917 2918 2919 2920 2921 2922

fix_extent_len:
	ex->ee_block = orig_ex.ee_block;
	ex->ee_len   = orig_ex.ee_len;
	ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
	ext4_ext_mark_uninitialized(ex);
	ext4_ext_dirty(handle, inode, path + depth);
	return err;
2923 2924
}

2925
/*
2926
 * This function is called by ext4_ext_map_blocks() from
2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943
 * ext4_get_blocks_dio_write() when DIO to write
 * to an uninitialized extent.
 *
 * Writing to an uninitized extent may result in splitting the uninitialized
 * extent into multiple /intialized unintialized extents (up to three)
 * There are three possibilities:
 *   a> There is no split required: Entire extent should be uninitialized
 *   b> Splits in two extents: Write is happening at either end of the extent
 *   c> Splits in three extents: Somone is writing in middle of the extent
 *
 * One of more index blocks maybe needed if the extent tree grow after
 * the unintialized extent split. To prevent ENOSPC occur at the IO
 * complete, we need to split the uninitialized extent before DIO submit
 * the IO. The uninitilized extent called at this time will be split
 * into three uninitialized extent(at most). After IO complete, the part
 * being filled will be convert to initialized by the end_io callback function
 * via ext4_convert_unwritten_extents().
2944 2945
 *
 * Returns the size of uninitialized extent to be written on success.
2946 2947 2948
 */
static int ext4_split_unwritten_extents(handle_t *handle,
					struct inode *inode,
2949
					struct ext4_map_blocks *map,
2950 2951 2952 2953 2954 2955 2956 2957
					struct ext4_ext_path *path,
					int flags)
{
	struct ext4_extent *ex, newex, orig_ex;
	struct ext4_extent *ex1 = NULL;
	struct ext4_extent *ex2 = NULL;
	struct ext4_extent *ex3 = NULL;
	struct ext4_extent_header *eh;
2958
	ext4_lblk_t ee_block, eof_block;
2959 2960 2961
	unsigned int allocated, ee_len, depth;
	ext4_fsblk_t newblock;
	int err = 0;
2962 2963 2964 2965
	int may_zeroout;

	ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
		"block %llu, max_blocks %u\n", inode->i_ino,
2966
		(unsigned long long)map->m_lblk, map->m_len);
2967 2968 2969

	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
		inode->i_sb->s_blocksize_bits;
2970 2971
	if (eof_block < map->m_lblk + map->m_len)
		eof_block = map->m_lblk + map->m_len;
2972 2973 2974 2975 2976 2977

	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;
	ee_block = le32_to_cpu(ex->ee_block);
	ee_len = ext4_ext_get_actual_len(ex);
2978 2979
	allocated = ee_len - (map->m_lblk - ee_block);
	newblock = map->m_lblk - ee_block + ext_pblock(ex);
2980

2981 2982 2983 2984 2985
	ex2 = ex;
	orig_ex.ee_block = ex->ee_block;
	orig_ex.ee_len   = cpu_to_le16(ee_len);
	ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));

2986 2987 2988 2989 2990 2991
	/*
	 * It is safe to convert extent to initialized via explicit
	 * zeroout only if extent is fully insde i_size or new_size.
	 */
	may_zeroout = ee_block + ee_len <= eof_block;

2992
	/*
2993 2994 2995
 	 * If the uninitialized extent begins at the same logical
 	 * block where the write begins, and the write completely
 	 * covers the extent, then we don't need to split it.
2996
 	 */
2997
	if ((map->m_lblk == ee_block) && (allocated <= map->m_len))
2998
		return allocated;
2999 3000 3001 3002

	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
		goto out;
3003 3004
	/* ex1: ee_block to map->m_lblk - 1 : uninitialized */
	if (map->m_lblk > ee_block) {
3005
		ex1 = ex;
3006
		ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
3007 3008 3009 3010 3011 3012 3013 3014
		ext4_ext_mark_uninitialized(ex1);
		ex2 = &newex;
	}
	/*
	 * for sanity, update the length of the ex2 extent before
	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
	 * overlap of blocks.
	 */
3015 3016
	if (!ex1 && allocated > map->m_len)
		ex2->ee_len = cpu_to_le16(map->m_len);
3017
	/* ex3: to ee_block + ee_len : uninitialised */
3018
	if (allocated > map->m_len) {
3019 3020
		unsigned int newdepth;
		ex3 = &newex;
3021 3022 3023
		ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
		ext4_ext_store_pblock(ex3, newblock + map->m_len);
		ex3->ee_len = cpu_to_le16(allocated - map->m_len);
3024 3025
		ext4_ext_mark_uninitialized(ex3);
		err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
3026
		if (err == -ENOSPC && may_zeroout) {
3027 3028 3029 3030 3031 3032 3033 3034 3035
			err =  ext4_ext_zeroout(inode, &orig_ex);
			if (err)
				goto fix_extent_len;
			/* update the extent length and mark as initialized */
			ex->ee_block = orig_ex.ee_block;
			ex->ee_len   = orig_ex.ee_len;
			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
			ext4_ext_dirty(handle, inode, path + depth);
			/* zeroed the full extent */
3036
			/* blocks available from map->m_lblk */
3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049
			return allocated;

		} else if (err)
			goto fix_extent_len;
		/*
		 * The depth, and hence eh & ex might change
		 * as part of the insert above.
		 */
		newdepth = ext_depth(inode);
		/*
		 * update the extent length after successful insert of the
		 * split extent
		 */
3050 3051 3052 3053
		ee_len -= ext4_ext_get_actual_len(ex3);
		orig_ex.ee_len = cpu_to_le16(ee_len);
		may_zeroout = ee_block + ee_len <= eof_block;

3054 3055
		depth = newdepth;
		ext4_ext_drop_refs(path);
3056
		path = ext4_ext_find_extent(inode, map->m_lblk, path);
3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			goto out;
		}
		eh = path[depth].p_hdr;
		ex = path[depth].p_ext;
		if (ex2 != &newex)
			ex2 = ex;

		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
			goto out;

3070
		allocated = map->m_len;
3071 3072 3073 3074 3075 3076 3077 3078
	}
	/*
	 * If there was a change of depth as part of the
	 * insertion of ex3 above, we need to update the length
	 * of the ex1 extent again here
	 */
	if (ex1 && ex1 != ex) {
		ex1 = ex;
3079
		ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
3080 3081 3082 3083
		ext4_ext_mark_uninitialized(ex1);
		ex2 = &newex;
	}
	/*
3084 3085
	 * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written
	 * using direct I/O, uninitialised still.
3086
	 */
3087
	ex2->ee_block = cpu_to_le32(map->m_lblk);
3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098
	ext4_ext_store_pblock(ex2, newblock);
	ex2->ee_len = cpu_to_le16(allocated);
	ext4_ext_mark_uninitialized(ex2);
	if (ex2 != ex)
		goto insert;
	/* Mark modified extent as dirty */
	err = ext4_ext_dirty(handle, inode, path + depth);
	ext_debug("out here\n");
	goto out;
insert:
	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3099
	if (err == -ENOSPC && may_zeroout) {
3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123
		err =  ext4_ext_zeroout(inode, &orig_ex);
		if (err)
			goto fix_extent_len;
		/* update the extent length and mark as initialized */
		ex->ee_block = orig_ex.ee_block;
		ex->ee_len   = orig_ex.ee_len;
		ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
		ext4_ext_dirty(handle, inode, path + depth);
		/* zero out the first half */
		return allocated;
	} else if (err)
		goto fix_extent_len;
out:
	ext4_ext_show_leaf(inode, path);
	return err ? err : allocated;

fix_extent_len:
	ex->ee_block = orig_ex.ee_block;
	ex->ee_len   = orig_ex.ee_len;
	ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
	ext4_ext_mark_uninitialized(ex);
	ext4_ext_dirty(handle, inode, path + depth);
	return err;
}
3124
static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178
					      struct inode *inode,
					      struct ext4_ext_path *path)
{
	struct ext4_extent *ex;
	struct ext4_extent_header *eh;
	int depth;
	int err = 0;
	int ret = 0;

	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;

	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
		goto out;
	/* first mark the extent as initialized */
	ext4_ext_mark_initialized(ex);

	/*
	 * We have to see if it can be merged with the extent
	 * on the left.
	 */
	if (ex > EXT_FIRST_EXTENT(eh)) {
		/*
		 * To merge left, pass "ex - 1" to try_to_merge(),
		 * since it merges towards right _only_.
		 */
		ret = ext4_ext_try_to_merge(inode, path, ex - 1);
		if (ret) {
			err = ext4_ext_correct_indexes(handle, inode, path);
			if (err)
				goto out;
			depth = ext_depth(inode);
			ex--;
		}
	}
	/*
	 * Try to Merge towards right.
	 */
	ret = ext4_ext_try_to_merge(inode, path, ex);
	if (ret) {
		err = ext4_ext_correct_indexes(handle, inode, path);
		if (err)
			goto out;
		depth = ext_depth(inode);
	}
	/* Mark modified extent as dirty */
	err = ext4_ext_dirty(handle, inode, path + depth);
out:
	ext4_ext_show_leaf(inode, path);
	return err;
}

3179 3180 3181 3182 3183 3184 3185 3186
static void unmap_underlying_metadata_blocks(struct block_device *bdev,
			sector_t block, int count)
{
	int i;
	for (i = 0; i < count; i++)
                unmap_underlying_metadata(bdev, block + i);
}

3187 3188
static int
ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3189
			struct ext4_map_blocks *map,
3190
			struct ext4_ext_path *path, int flags,
3191
			unsigned int allocated, ext4_fsblk_t newblock)
3192 3193 3194
{
	int ret = 0;
	int err = 0;
3195
	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3196 3197 3198

	ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
		  "block %llu, max_blocks %u, flags %d, allocated %u",
3199
		  inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3200 3201 3202
		  flags, allocated);
	ext4_ext_show_leaf(inode, path);

3203
	/* get_block() before submit the IO, split the extent */
3204
	if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3205 3206
		ret = ext4_split_unwritten_extents(handle, inode, map,
						   path, flags);
3207 3208 3209 3210 3211
		/*
		 * Flag the inode(non aio case) or end_io struct (aio case)
		 * that this IO needs to convertion to written when IO is
		 * completed
		 */
3212
		if (io)
3213
			io->flag = EXT4_IO_UNWRITTEN;
3214
		else
3215
			ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3216
		if (ext4_should_dioread_nolock(inode))
3217
			map->m_flags |= EXT4_MAP_UNINIT;
3218 3219
		goto out;
	}
3220
	/* IO end_io complete, convert the filled extent to written */
3221
	if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3222
		ret = ext4_convert_unwritten_extents_endio(handle, inode,
3223
							path);
3224 3225
		if (ret >= 0)
			ext4_update_inode_fsync_trans(handle, inode, 1);
3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244
		goto out2;
	}
	/* buffered IO case */
	/*
	 * repeat fallocate creation request
	 * we already have an unwritten extent
	 */
	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
		goto map_out;

	/* buffered READ or buffered write_begin() lookup */
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
		/*
		 * We have blocks reserved already.  We
		 * return allocated blocks so that delalloc
		 * won't do block reservation for us.  But
		 * the buffer head will be unmapped so that
		 * a read from the block returns 0s.
		 */
3245
		map->m_flags |= EXT4_MAP_UNWRITTEN;
3246 3247 3248 3249
		goto out1;
	}

	/* buffered write, writepage time, convert*/
3250
	ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
3251 3252
	if (ret >= 0)
		ext4_update_inode_fsync_trans(handle, inode, 1);
3253 3254 3255 3256 3257 3258
out:
	if (ret <= 0) {
		err = ret;
		goto out2;
	} else
		allocated = ret;
3259
	map->m_flags |= EXT4_MAP_NEW;
3260 3261 3262 3263 3264 3265 3266
	/*
	 * if we allocated more blocks than requested
	 * we need to make sure we unmap the extra block
	 * allocated. The actual needed block will get
	 * unmapped later when we find the buffer_head marked
	 * new.
	 */
3267
	if (allocated > map->m_len) {
3268
		unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3269 3270 3271
					newblock + map->m_len,
					allocated - map->m_len);
		allocated = map->m_len;
3272
	}
3273 3274 3275 3276 3277 3278 3279 3280

	/*
	 * If we have done fallocate with the offset that is already
	 * delayed allocated, we would have block reservation
	 * and quota reservation done in the delayed write path.
	 * But fallocate would have already updated quota and block
	 * count for this offset. So cancel these reservation
	 */
3281
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3282 3283
		ext4_da_update_reserve_space(inode, allocated, 0);

3284
map_out:
3285
	map->m_flags |= EXT4_MAP_MAPPED;
3286
out1:
3287 3288
	if (allocated > map->m_len)
		allocated = map->m_len;
3289
	ext4_ext_show_leaf(inode, path);
3290 3291
	map->m_pblk = newblock;
	map->m_len = allocated;
3292 3293 3294 3295 3296 3297 3298
out2:
	if (path) {
		ext4_ext_drop_refs(path);
		kfree(path);
	}
	return err ? err : allocated;
}
3299
/*
3300 3301 3302
 * Block allocation/map/preallocation routine for extents based files
 *
 *
3303
 * Need to be called with
3304 3305
 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3306 3307 3308 3309 3310 3311 3312 3313 3314 3315
 *
 * return > 0, number of of blocks already mapped/allocated
 *          if create == 0 and these are pre-allocated blocks
 *          	buffer head is unmapped
 *          otherwise blocks are mapped
 *
 * return = 0, if plain look up failed (blocks have not been allocated)
 *          buffer head is unmapped
 *
 * return < 0, error case.
3316
 */
3317 3318
int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
			struct ext4_map_blocks *map, int flags)
A
Alex Tomas 已提交
3319 3320
{
	struct ext4_ext_path *path = NULL;
3321
	struct ext4_extent_header *eh;
3322
	struct ext4_extent newex, *ex, *last_ex;
3323
	ext4_fsblk_t newblock;
3324
	int i, err = 0, depth, ret, cache_type;
3325
	unsigned int allocated = 0;
3326
	struct ext4_allocation_request ar;
3327
	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
A
Alex Tomas 已提交
3328

3329
	ext_debug("blocks %u/%u requested for inode %lu\n",
3330
		  map->m_lblk, map->m_len, inode->i_ino);
A
Alex Tomas 已提交
3331 3332

	/* check in cache */
3333
	cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex);
3334 3335
	if (cache_type) {
		if (cache_type == EXT4_EXT_CACHE_GAP) {
3336
			if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3337 3338 3339 3340
				/*
				 * block isn't allocated yet and
				 * user doesn't want to allocate it
				 */
A
Alex Tomas 已提交
3341 3342 3343
				goto out2;
			}
			/* we should allocate requested block */
3344
		} else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
A
Alex Tomas 已提交
3345
			/* block is already allocated */
3346
			newblock = map->m_lblk
D
Dave Kleikamp 已提交
3347 3348
				   - le32_to_cpu(newex.ee_block)
				   + ext_pblock(&newex);
3349
			/* number of remaining blocks in the extent */
3350
			allocated = ext4_ext_get_actual_len(&newex) -
3351
				(map->m_lblk - le32_to_cpu(newex.ee_block));
A
Alex Tomas 已提交
3352 3353 3354 3355 3356 3357 3358
			goto out;
		} else {
			BUG();
		}
	}

	/* find extent for this block */
3359
	path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
A
Alex Tomas 已提交
3360 3361 3362 3363 3364 3365 3366 3367 3368
	if (IS_ERR(path)) {
		err = PTR_ERR(path);
		path = NULL;
		goto out2;
	}

	depth = ext_depth(inode);

	/*
3369 3370
	 * consistent leaf must not be empty;
	 * this situation is possible, though, _during_ tree modification;
A
Alex Tomas 已提交
3371 3372
	 * this is why assert can't be put in ext4_ext_find_extent()
	 */
3373 3374
	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
		EXT4_ERROR_INODE(inode, "bad extent address "
3375 3376 3377
				 "lblock: %lu, depth: %d pblock %lld",
				 (unsigned long) map->m_lblk, depth,
				 path[depth].p_block);
3378 3379 3380
		err = -EIO;
		goto out2;
	}
3381
	eh = path[depth].p_hdr;
A
Alex Tomas 已提交
3382

3383 3384
	ex = path[depth].p_ext;
	if (ex) {
A
Aneesh Kumar K.V 已提交
3385
		ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3386
		ext4_fsblk_t ee_start = ext_pblock(ex);
A
Amit Arora 已提交
3387
		unsigned short ee_len;
3388 3389 3390

		/*
		 * Uninitialized extents are treated as holes, except that
3391
		 * we split out initialized portions during a write.
3392
		 */
A
Amit Arora 已提交
3393
		ee_len = ext4_ext_get_actual_len(ex);
3394
		/* if found extent covers block, simply return it */
3395 3396
		if (in_range(map->m_lblk, ee_block, ee_len)) {
			newblock = map->m_lblk - ee_block + ee_start;
3397
			/* number of remaining blocks in the extent */
3398 3399 3400
			allocated = ee_len - (map->m_lblk - ee_block);
			ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
				  ee_block, ee_len, newblock);
3401

A
Amit Arora 已提交
3402
			/* Do not put uninitialized extent in the cache */
3403
			if (!ext4_ext_is_uninitialized(ex)) {
A
Amit Arora 已提交
3404 3405 3406
				ext4_ext_put_in_cache(inode, ee_block,
							ee_len, ee_start,
							EXT4_EXT_CACHE_EXTENT);
3407 3408
				goto out;
			}
3409
			ret = ext4_ext_handle_uninitialized_extents(handle,
3410 3411
					inode, map, path, flags, allocated,
					newblock);
3412
			return ret;
A
Alex Tomas 已提交
3413 3414 3415 3416
		}
	}

	/*
3417
	 * requested block isn't allocated yet;
A
Alex Tomas 已提交
3418 3419
	 * we couldn't try to create block if create flag is zero
	 */
3420
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3421 3422 3423 3424
		/*
		 * put just found gap into cache to speed up
		 * subsequent requests
		 */
3425
		ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
A
Alex Tomas 已提交
3426 3427 3428
		goto out2;
	}
	/*
3429
	 * Okay, we need to do block allocation.
A
Andrew Morton 已提交
3430
	 */
A
Alex Tomas 已提交
3431

3432
	/* find neighbour allocated blocks */
3433
	ar.lleft = map->m_lblk;
3434 3435 3436
	err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
	if (err)
		goto out2;
3437
	ar.lright = map->m_lblk;
3438 3439 3440
	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
	if (err)
		goto out2;
A
Amit Arora 已提交
3441

3442 3443 3444 3445 3446 3447
	/*
	 * See if request is beyond maximum number of blocks we can have in
	 * a single extent. For an initialized extent this limit is
	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
	 * EXT_UNINIT_MAX_LEN.
	 */
3448
	if (map->m_len > EXT_INIT_MAX_LEN &&
3449
	    !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3450 3451
		map->m_len = EXT_INIT_MAX_LEN;
	else if (map->m_len > EXT_UNINIT_MAX_LEN &&
3452
		 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3453
		map->m_len = EXT_UNINIT_MAX_LEN;
3454

3455 3456 3457
	/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
	newex.ee_block = cpu_to_le32(map->m_lblk);
	newex.ee_len = cpu_to_le16(map->m_len);
A
Amit Arora 已提交
3458 3459
	err = ext4_ext_check_overlap(inode, &newex, path);
	if (err)
3460
		allocated = ext4_ext_get_actual_len(&newex);
A
Amit Arora 已提交
3461
	else
3462
		allocated = map->m_len;
3463 3464 3465

	/* allocate new block */
	ar.inode = inode;
3466 3467
	ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
	ar.logical = map->m_lblk;
3468 3469 3470 3471 3472 3473 3474
	ar.len = allocated;
	if (S_ISREG(inode->i_mode))
		ar.flags = EXT4_MB_HINT_DATA;
	else
		/* disable in-core preallocation for non-regular files */
		ar.flags = 0;
	newblock = ext4_mb_new_blocks(handle, &ar, &err);
A
Alex Tomas 已提交
3475 3476
	if (!newblock)
		goto out2;
3477
	ext_debug("allocate new block: goal %llu, found %llu/%u\n",
3478
		  ar.goal, newblock, allocated);
A
Alex Tomas 已提交
3479 3480

	/* try to insert new extent into found leaf and return */
3481
	ext4_ext_store_pblock(&newex, newblock);
3482
	newex.ee_len = cpu_to_le16(ar.len);
3483 3484
	/* Mark uninitialized */
	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
A
Amit Arora 已提交
3485
		ext4_ext_mark_uninitialized(&newex);
3486
		/*
3487 3488 3489
		 * io_end structure was created for every IO write to an
		 * uninitialized extent. To avoid unecessary conversion,
		 * here we flag the IO that really needs the conversion.
3490 3491
		 * For non asycn direct IO case, flag the inode state
		 * that we need to perform convertion when IO is done.
3492
		 */
3493
		if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3494
			if (io)
3495
				io->flag = EXT4_IO_UNWRITTEN;
3496
			else
3497 3498
				ext4_set_inode_state(inode,
						     EXT4_STATE_DIO_UNWRITTEN);
3499
		}
3500
		if (ext4_should_dioread_nolock(inode))
3501
			map->m_flags |= EXT4_MAP_UNINIT;
3502
	}
3503

3504
	if (unlikely(ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))) {
3505 3506
		if (unlikely(!eh->eh_entries)) {
			EXT4_ERROR_INODE(inode,
3507 3508
					 "eh->eh_entries == 0 and "
					 "EOFBLOCKS_FL set");
3509 3510 3511 3512
			err = -EIO;
			goto out2;
		}
		last_ex = EXT_LAST_EXTENT(eh);
3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526
		/*
		 * If the current leaf block was reached by looking at
		 * the last index block all the way down the tree, and
		 * we are extending the inode beyond the last extent
		 * in the current leaf block, then clear the
		 * EOFBLOCKS_FL flag.
		 */
		for (i = depth-1; i >= 0; i--) {
			if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
				break;
		}
		if ((i < 0) &&
		    (map->m_lblk + ar.len > le32_to_cpu(last_ex->ee_block) +
		     ext4_ext_get_actual_len(last_ex)))
3527
			ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3528
	}
3529
	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3530 3531
	if (err) {
		/* free data blocks we just allocated */
3532 3533
		/* not a good idea to call discard here directly,
		 * but otherwise we'd need to call it every free() */
3534
		ext4_discard_preallocations(inode);
3535 3536
		ext4_free_blocks(handle, inode, 0, ext_pblock(&newex),
				 ext4_ext_get_actual_len(&newex), 0);
A
Alex Tomas 已提交
3537
		goto out2;
3538
	}
A
Alex Tomas 已提交
3539 3540

	/* previous routine could use block we allocated */
3541
	newblock = ext_pblock(&newex);
3542
	allocated = ext4_ext_get_actual_len(&newex);
3543 3544 3545
	if (allocated > map->m_len)
		allocated = map->m_len;
	map->m_flags |= EXT4_MAP_NEW;
A
Alex Tomas 已提交
3546

3547 3548 3549 3550
	/*
	 * Update reserved blocks/metadata blocks after successful
	 * block allocation which had been deferred till now.
	 */
3551
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3552 3553
		ext4_da_update_reserve_space(inode, allocated, 1);

3554 3555 3556 3557 3558
	/*
	 * Cache the extent and update transaction to commit on fdatasync only
	 * when it is _not_ an uninitialized extent.
	 */
	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
3559
		ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock,
A
Amit Arora 已提交
3560
						EXT4_EXT_CACHE_EXTENT);
3561 3562 3563
		ext4_update_inode_fsync_trans(handle, inode, 1);
	} else
		ext4_update_inode_fsync_trans(handle, inode, 0);
A
Alex Tomas 已提交
3564
out:
3565 3566
	if (allocated > map->m_len)
		allocated = map->m_len;
A
Alex Tomas 已提交
3567
	ext4_ext_show_leaf(inode, path);
3568 3569 3570
	map->m_flags |= EXT4_MAP_MAPPED;
	map->m_pblk = newblock;
	map->m_len = allocated;
A
Alex Tomas 已提交
3571 3572 3573 3574 3575 3576 3577 3578
out2:
	if (path) {
		ext4_ext_drop_refs(path);
		kfree(path);
	}
	return err ? err : allocated;
}

3579
void ext4_ext_truncate(struct inode *inode)
A
Alex Tomas 已提交
3580 3581 3582
{
	struct address_space *mapping = inode->i_mapping;
	struct super_block *sb = inode->i_sb;
A
Aneesh Kumar K.V 已提交
3583
	ext4_lblk_t last_block;
A
Alex Tomas 已提交
3584 3585 3586 3587 3588 3589
	handle_t *handle;
	int err = 0;

	/*
	 * probably first extent we're gonna free will be last in block
	 */
3590
	err = ext4_writepage_trans_blocks(inode);
A
Alex Tomas 已提交
3591
	handle = ext4_journal_start(inode, err);
3592
	if (IS_ERR(handle))
A
Alex Tomas 已提交
3593 3594
		return;

3595 3596
	if (inode->i_size & (sb->s_blocksize - 1))
		ext4_block_truncate_page(handle, mapping, inode->i_size);
A
Alex Tomas 已提交
3597

3598 3599 3600
	if (ext4_orphan_add(handle, inode))
		goto out_stop;

3601
	down_write(&EXT4_I(inode)->i_data_sem);
A
Alex Tomas 已提交
3602 3603
	ext4_ext_invalidate_cache(inode);

3604
	ext4_discard_preallocations(inode);
3605

A
Alex Tomas 已提交
3606
	/*
3607 3608 3609
	 * TODO: optimization is possible here.
	 * Probably we need not scan at all,
	 * because page truncation is enough.
A
Alex Tomas 已提交
3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620
	 */

	/* we have to know where to truncate from in crash case */
	EXT4_I(inode)->i_disksize = inode->i_size;
	ext4_mark_inode_dirty(handle, inode);

	last_block = (inode->i_size + sb->s_blocksize - 1)
			>> EXT4_BLOCK_SIZE_BITS(sb);
	err = ext4_ext_remove_space(inode, last_block);

	/* In a multi-transaction truncate, we only make the final
3621 3622
	 * transaction synchronous.
	 */
A
Alex Tomas 已提交
3623
	if (IS_SYNC(inode))
3624
		ext4_handle_sync(handle);
A
Alex Tomas 已提交
3625 3626

out_stop:
3627
	up_write(&EXT4_I(inode)->i_data_sem);
A
Alex Tomas 已提交
3628
	/*
3629
	 * If this was a simple ftruncate() and the file will remain alive,
A
Alex Tomas 已提交
3630 3631 3632 3633 3634 3635 3636 3637
	 * then we need to clear up the orphan record which we created above.
	 * However, if this was a real unlink then we were called by
	 * ext4_delete_inode(), and we allow that function to clean up the
	 * orphan info for us.
	 */
	if (inode->i_nlink)
		ext4_orphan_del(handle, inode);

3638 3639
	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
	ext4_mark_inode_dirty(handle, inode);
A
Alex Tomas 已提交
3640 3641 3642
	ext4_journal_stop(handle);
}

3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656
static void ext4_falloc_update_inode(struct inode *inode,
				int mode, loff_t new_size, int update_ctime)
{
	struct timespec now;

	if (update_ctime) {
		now = current_fs_time(inode->i_sb);
		if (!timespec_equal(&inode->i_ctime, &now))
			inode->i_ctime = now;
	}
	/*
	 * Update only when preallocation was requested beyond
	 * the file size.
	 */
3657 3658 3659 3660 3661
	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
		if (new_size > i_size_read(inode))
			i_size_write(inode, new_size);
		if (new_size > EXT4_I(inode)->i_disksize)
			ext4_update_i_disksize(inode, new_size);
3662 3663 3664 3665 3666 3667
	} else {
		/*
		 * Mark that we allocate beyond EOF so the subsequent truncate
		 * can proceed even if the new size is the same as i_size.
		 */
		if (new_size > i_size_read(inode))
3668
			ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3669 3670 3671 3672
	}

}

A
Amit Arora 已提交
3673 3674 3675 3676 3677 3678 3679 3680 3681 3682
/*
 * preallocate space for a file. This implements ext4's fallocate inode
 * operation, which gets called from sys_fallocate system call.
 * For block-mapped files, posix_fallocate should fall back to the method
 * of writing zeroes to the required new blocks (the same behavior which is
 * expected for file systems which do not support fallocate() system call).
 */
long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
{
	handle_t *handle;
3683
	loff_t new_size;
3684
	unsigned int max_blocks;
A
Amit Arora 已提交
3685 3686 3687
	int ret = 0;
	int ret2 = 0;
	int retries = 0;
3688
	struct ext4_map_blocks map;
A
Amit Arora 已提交
3689 3690 3691 3692 3693 3694
	unsigned int credits, blkbits = inode->i_blkbits;

	/*
	 * currently supporting (pre)allocate mode for extent-based
	 * files _only_
	 */
3695
	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
A
Amit Arora 已提交
3696 3697 3698 3699 3700 3701
		return -EOPNOTSUPP;

	/* preallocation to directories is currently not supported */
	if (S_ISDIR(inode->i_mode))
		return -ENODEV;

3702
	map.m_lblk = offset >> blkbits;
3703 3704 3705 3706
	/*
	 * We can't just convert len to max_blocks because
	 * If blocksize = 4096 offset = 3072 and len = 2048
	 */
A
Amit Arora 已提交
3707
	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3708
		- map.m_lblk;
A
Amit Arora 已提交
3709
	/*
3710
	 * credits to insert 1 extent into extent tree
A
Amit Arora 已提交
3711
	 */
3712
	credits = ext4_chunk_trans_blocks(inode, max_blocks);
3713
	mutex_lock(&inode->i_mutex);
3714 3715 3716 3717 3718
	ret = inode_newsize_ok(inode, (len + offset));
	if (ret) {
		mutex_unlock(&inode->i_mutex);
		return ret;
	}
A
Amit Arora 已提交
3719 3720
retry:
	while (ret >= 0 && ret < max_blocks) {
3721 3722
		map.m_lblk = map.m_lblk + ret;
		map.m_len = max_blocks = max_blocks - ret;
A
Amit Arora 已提交
3723 3724 3725 3726 3727
		handle = ext4_journal_start(inode, credits);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			break;
		}
3728
		ret = ext4_map_blocks(handle, inode, &map,
3729
				      EXT4_GET_BLOCKS_CREATE_UNINIT_EXT);
3730
		if (ret <= 0) {
3731 3732
#ifdef EXT4FS_DEBUG
			WARN_ON(ret <= 0);
3733
			printk(KERN_ERR "%s: ext4_ext_map_blocks "
3734
				    "returned error inode#%lu, block=%u, "
3735
				    "max_blocks=%u", __func__,
3736
				    inode->i_ino, block, max_blocks);
3737
#endif
A
Amit Arora 已提交
3738 3739 3740 3741
			ext4_mark_inode_dirty(handle, inode);
			ret2 = ext4_journal_stop(handle);
			break;
		}
3742
		if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
3743 3744 3745
						blkbits) >> blkbits))
			new_size = offset + len;
		else
3746
			new_size = (map.m_lblk + ret) << blkbits;
A
Amit Arora 已提交
3747

3748
		ext4_falloc_update_inode(inode, mode, new_size,
3749
					 (map.m_flags & EXT4_MAP_NEW));
A
Amit Arora 已提交
3750 3751 3752 3753 3754
		ext4_mark_inode_dirty(handle, inode);
		ret2 = ext4_journal_stop(handle);
		if (ret2)
			break;
	}
3755 3756 3757
	if (ret == -ENOSPC &&
			ext4_should_retry_alloc(inode->i_sb, &retries)) {
		ret = 0;
A
Amit Arora 已提交
3758 3759
		goto retry;
	}
3760
	mutex_unlock(&inode->i_mutex);
A
Amit Arora 已提交
3761 3762
	return ret > 0 ? ret2 : ret;
}
3763

3764 3765 3766 3767 3768 3769 3770 3771
/*
 * This function convert a range of blocks to written extents
 * The caller of this function will pass the start offset and the size.
 * all unwritten extents within this range will be converted to
 * written extents.
 *
 * This function is called from the direct IO end io call back
 * function, to convert the fallocated extents after IO is completed.
3772
 * Returns 0 on success.
3773 3774
 */
int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3775
				    ssize_t len)
3776 3777 3778 3779 3780
{
	handle_t *handle;
	unsigned int max_blocks;
	int ret = 0;
	int ret2 = 0;
3781
	struct ext4_map_blocks map;
3782 3783
	unsigned int credits, blkbits = inode->i_blkbits;

3784
	map.m_lblk = offset >> blkbits;
3785 3786 3787 3788
	/*
	 * We can't just convert len to max_blocks because
	 * If blocksize = 4096 offset = 3072 and len = 2048
	 */
3789 3790
	max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
		      map.m_lblk);
3791 3792 3793 3794 3795
	/*
	 * credits to insert 1 extent into extent tree
	 */
	credits = ext4_chunk_trans_blocks(inode, max_blocks);
	while (ret >= 0 && ret < max_blocks) {
3796 3797
		map.m_lblk += ret;
		map.m_len = (max_blocks -= ret);
3798 3799 3800 3801 3802
		handle = ext4_journal_start(inode, credits);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			break;
		}
3803
		ret = ext4_map_blocks(handle, inode, &map,
3804
				      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
3805 3806
		if (ret <= 0) {
			WARN_ON(ret <= 0);
3807
			printk(KERN_ERR "%s: ext4_ext_map_blocks "
3808 3809
				    "returned error inode#%lu, block=%u, "
				    "max_blocks=%u", __func__,
3810
				    inode->i_ino, map.m_lblk, map.m_len);
3811 3812 3813 3814 3815 3816 3817 3818
		}
		ext4_mark_inode_dirty(handle, inode);
		ret2 = ext4_journal_stop(handle);
		if (ret <= 0 || ret2 )
			break;
	}
	return ret > 0 ? ret2 : ret;
}
3819 3820 3821
/*
 * Callback function called for each extent to gather FIEMAP information.
 */
A
Aneesh Kumar K.V 已提交
3822
static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3823 3824 3825 3826
		       struct ext4_ext_cache *newex, struct ext4_extent *ex,
		       void *data)
{
	struct fiemap_extent_info *fieinfo = data;
3827
	unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873
	__u64	logical;
	__u64	physical;
	__u64	length;
	__u32	flags = 0;
	int	error;

	logical =  (__u64)newex->ec_block << blksize_bits;

	if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
		pgoff_t offset;
		struct page *page;
		struct buffer_head *bh = NULL;

		offset = logical >> PAGE_SHIFT;
		page = find_get_page(inode->i_mapping, offset);
		if (!page || !page_has_buffers(page))
			return EXT_CONTINUE;

		bh = page_buffers(page);

		if (!bh)
			return EXT_CONTINUE;

		if (buffer_delay(bh)) {
			flags |= FIEMAP_EXTENT_DELALLOC;
			page_cache_release(page);
		} else {
			page_cache_release(page);
			return EXT_CONTINUE;
		}
	}

	physical = (__u64)newex->ec_start << blksize_bits;
	length =   (__u64)newex->ec_len << blksize_bits;

	if (ex && ext4_ext_is_uninitialized(ex))
		flags |= FIEMAP_EXTENT_UNWRITTEN;

	/*
	 * If this extent reaches EXT_MAX_BLOCK, it must be last.
	 *
	 * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK,
	 * this also indicates no more allocated blocks.
	 *
	 * XXX this might miss a single-block extent at EXT_MAX_BLOCK
	 */
3874
	if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK ||
3875 3876 3877 3878
	    newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK) {
		loff_t size = i_size_read(inode);
		loff_t bs = EXT4_BLOCK_SIZE(inode->i_sb);

3879
		flags |= FIEMAP_EXTENT_LAST;
3880 3881 3882 3883
		if ((flags & FIEMAP_EXTENT_DELALLOC) &&
		    logical+length > size)
			length = (size - logical + bs - 1) & ~(bs-1);
	}
3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897

	error = fiemap_fill_next_extent(fieinfo, logical, physical,
					length, flags);
	if (error < 0)
		return error;
	if (error == 1)
		return EXT_BREAK;

	return EXT_CONTINUE;
}

/* fiemap flags we can handle specified here */
#define EXT4_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)

A
Aneesh Kumar K.V 已提交
3898 3899
static int ext4_xattr_fiemap(struct inode *inode,
				struct fiemap_extent_info *fieinfo)
3900 3901 3902 3903 3904 3905 3906 3907
{
	__u64 physical = 0;
	__u64 length;
	__u32 flags = FIEMAP_EXTENT_LAST;
	int blockbits = inode->i_sb->s_blocksize_bits;
	int error = 0;

	/* in-inode? */
3908
	if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920
		struct ext4_iloc iloc;
		int offset;	/* offset of xattr in inode */

		error = ext4_get_inode_loc(inode, &iloc);
		if (error)
			return error;
		physical = iloc.bh->b_blocknr << blockbits;
		offset = EXT4_GOOD_OLD_INODE_SIZE +
				EXT4_I(inode)->i_extra_isize;
		physical += offset;
		length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
		flags |= FIEMAP_EXTENT_DATA_INLINE;
3921
		brelse(iloc.bh);
3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939
	} else { /* external block */
		physical = EXT4_I(inode)->i_file_acl << blockbits;
		length = inode->i_sb->s_blocksize;
	}

	if (physical)
		error = fiemap_fill_next_extent(fieinfo, 0, physical,
						length, flags);
	return (error < 0 ? error : 0);
}

int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		__u64 start, __u64 len)
{
	ext4_lblk_t start_blk;
	int error = 0;

	/* fallback to generic here if not in extents fmt */
3940
	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
3941 3942 3943 3944 3945 3946 3947 3948 3949
		return generic_block_fiemap(inode, fieinfo, start, len,
			ext4_get_block);

	if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
		return -EBADR;

	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
		error = ext4_xattr_fiemap(inode, fieinfo);
	} else {
3950 3951 3952
		ext4_lblk_t len_blks;
		__u64 last_blk;

3953
		start_blk = start >> inode->i_sb->s_blocksize_bits;
3954 3955 3956 3957
		last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
		if (last_blk >= EXT_MAX_BLOCK)
			last_blk = EXT_MAX_BLOCK-1;
		len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969

		/*
		 * Walk the extent tree gathering extent information.
		 * ext4_ext_fiemap_cb will push extents back to user.
		 */
		error = ext4_ext_walk_space(inode, start_blk, len_blks,
					  ext4_ext_fiemap_cb, fieinfo);
	}

	return error;
}