extents.c 102.3 KB
Newer Older
A
Alex Tomas 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/*
 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
 * Written by Alex Tomas <alex@clusterfs.com>
 *
 * Architecture independence:
 *   Copyright (c) 2005, Bull S.A.
 *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public Licens
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
 */

/*
 * Extents support for EXT4
 *
 * TODO:
 *   - ext4*_error() should be used in some situations
 *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
 *   - smart tree reduction
 */

#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
35
#include <linux/jbd2.h>
A
Alex Tomas 已提交
36 37 38 39 40
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/slab.h>
A
Amit Arora 已提交
41
#include <linux/falloc.h>
A
Alex Tomas 已提交
42
#include <asm/uaccess.h>
43
#include <linux/fiemap.h>
44 45
#include "ext4_jbd2.h"
#include "ext4_extents.h"
A
Alex Tomas 已提交
46 47


48 49 50 51
/*
 * ext_pblock:
 * combine low and high parts of physical block number into ext4_fsblk_t
 */
52
ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
53 54 55
{
	ext4_fsblk_t block;

56
	block = le32_to_cpu(ex->ee_start_lo);
57
	block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
58 59 60
	return block;
}

61 62 63 64
/*
 * idx_pblock:
 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
 */
65
ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
66 67 68
{
	ext4_fsblk_t block;

69
	block = le32_to_cpu(ix->ei_leaf_lo);
70
	block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
71 72 73
	return block;
}

74 75 76 77 78
/*
 * ext4_ext_store_pblock:
 * stores a large physical block number into an extent struct,
 * breaking it into parts
 */
79
void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
80
{
81
	ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
82
	ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
83 84
}

85 86 87 88 89
/*
 * ext4_idx_store_pblock:
 * stores a large physical block number into an index struct,
 * breaking it into parts
 */
90
static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
91
{
92
	ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
93
	ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
94 95
}

96 97 98
static int ext4_ext_truncate_extend_restart(handle_t *handle,
					    struct inode *inode,
					    int needed)
A
Alex Tomas 已提交
99 100 101
{
	int err;

102 103
	if (!ext4_handle_valid(handle))
		return 0;
A
Alex Tomas 已提交
104
	if (handle->h_buffer_credits > needed)
105 106
		return 0;
	err = ext4_journal_extend(handle, needed);
107
	if (err <= 0)
108
		return err;
109 110 111 112 113 114 115 116
	err = ext4_truncate_restart_trans(handle, inode, needed);
	/*
	 * We have dropped i_data_sem so someone might have cached again
	 * an extent we are going to truncate.
	 */
	ext4_ext_invalidate_cache(inode);

	return err;
A
Alex Tomas 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
}

/*
 * could return:
 *  - EROFS
 *  - ENOMEM
 */
static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path)
{
	if (path->p_bh) {
		/* path points to block */
		return ext4_journal_get_write_access(handle, path->p_bh);
	}
	/* path points to leaf/index in inode body */
	/* we use in-core data, no need to protect them */
	return 0;
}

/*
 * could return:
 *  - EROFS
 *  - ENOMEM
 *  - EIO
 */
static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path)
{
	int err;
	if (path->p_bh) {
		/* path points to block */
148
		err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
A
Alex Tomas 已提交
149 150 151 152 153 154 155
	} else {
		/* path points to leaf/index in inode body */
		err = ext4_mark_inode_dirty(handle, inode);
	}
	return err;
}

156
static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
A
Alex Tomas 已提交
157
			      struct ext4_ext_path *path,
A
Aneesh Kumar K.V 已提交
158
			      ext4_lblk_t block)
A
Alex Tomas 已提交
159 160
{
	struct ext4_inode_info *ei = EXT4_I(inode);
161
	ext4_fsblk_t bg_start;
162
	ext4_fsblk_t last_block;
163
	ext4_grpblk_t colour;
164 165
	ext4_group_t block_group;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
A
Alex Tomas 已提交
166 167 168 169 170 171 172
	int depth;

	if (path) {
		struct ext4_extent *ex;
		depth = path->p_depth;

		/* try to predict block placement */
173 174
		ex = path[depth].p_ext;
		if (ex)
175
			return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
A
Alex Tomas 已提交
176

177 178
		/* it looks like index is empty;
		 * try to find starting block from index itself */
A
Alex Tomas 已提交
179 180 181 182 183
		if (path[depth].p_bh)
			return path[depth].p_bh->b_blocknr;
	}

	/* OK. use inode's group */
184 185 186 187 188 189 190 191 192 193 194 195 196 197
	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		/*
		 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
		 * block groups per flexgroup, reserve the first block 
		 * group for directories and special files.  Regular 
		 * files will start at the second block group.  This
		 * tends to speed up directory access and improves 
		 * fsck times.
		 */
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
198
	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
199 200
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

201 202 203 204 205 206 207
	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

208 209
	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
A
Alex Tomas 已提交
210
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
211 212
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
A
Alex Tomas 已提交
213 214 215
	return bg_start + colour + block;
}

A
Aneesh Kumar K.V 已提交
216 217 218
/*
 * Allocation for a meta data block
 */
219
static ext4_fsblk_t
A
Aneesh Kumar K.V 已提交
220
ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
221 222 223
			struct ext4_ext_path *path,
			struct ext4_extent *ex, int *err)
{
224
	ext4_fsblk_t goal, newblock;
A
Alex Tomas 已提交
225 226

	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
T
Theodore Ts'o 已提交
227
	newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err);
A
Alex Tomas 已提交
228 229 230
	return newblock;
}

231
static inline int ext4_ext_space_block(struct inode *inode, int check)
A
Alex Tomas 已提交
232 233 234 235 236
{
	int size;

	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
			/ sizeof(struct ext4_extent);
237
	if (!check) {
238
#ifdef AGGRESSIVE_TEST
239 240
		if (size > 6)
			size = 6;
A
Alex Tomas 已提交
241
#endif
242
	}
A
Alex Tomas 已提交
243 244 245
	return size;
}

246
static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
A
Alex Tomas 已提交
247 248 249 250 251
{
	int size;

	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
			/ sizeof(struct ext4_extent_idx);
252
	if (!check) {
253
#ifdef AGGRESSIVE_TEST
254 255
		if (size > 5)
			size = 5;
A
Alex Tomas 已提交
256
#endif
257
	}
A
Alex Tomas 已提交
258 259 260
	return size;
}

261
static inline int ext4_ext_space_root(struct inode *inode, int check)
A
Alex Tomas 已提交
262 263 264 265 266 267
{
	int size;

	size = sizeof(EXT4_I(inode)->i_data);
	size -= sizeof(struct ext4_extent_header);
	size /= sizeof(struct ext4_extent);
268
	if (!check) {
269
#ifdef AGGRESSIVE_TEST
270 271
		if (size > 3)
			size = 3;
A
Alex Tomas 已提交
272
#endif
273
	}
A
Alex Tomas 已提交
274 275 276
	return size;
}

277
static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
A
Alex Tomas 已提交
278 279 280 281 282 283
{
	int size;

	size = sizeof(EXT4_I(inode)->i_data);
	size -= sizeof(struct ext4_extent_header);
	size /= sizeof(struct ext4_extent_idx);
284
	if (!check) {
285
#ifdef AGGRESSIVE_TEST
286 287
		if (size > 4)
			size = 4;
A
Alex Tomas 已提交
288
#endif
289
	}
A
Alex Tomas 已提交
290 291 292
	return size;
}

293 294 295 296 297
/*
 * Calculate the number of metadata blocks needed
 * to allocate @blocks
 * Worse case is one block per extent
 */
298
int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
299
{
300 301
	struct ext4_inode_info *ei = EXT4_I(inode);
	int idxs, num = 0;
302

303 304
	idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
		/ sizeof(struct ext4_extent_idx));
305 306

	/*
307 308 309 310 311 312
	 * If the new delayed allocation block is contiguous with the
	 * previous da block, it can share index blocks with the
	 * previous block, so we only need to allocate a new index
	 * block every idxs leaf blocks.  At ldxs**2 blocks, we need
	 * an additional index block, and at ldxs**3 blocks, yet
	 * another index blocks.
313
	 */
314 315 316 317 318 319 320 321 322 323 324 325 326 327
	if (ei->i_da_metadata_calc_len &&
	    ei->i_da_metadata_calc_last_lblock+1 == lblock) {
		if ((ei->i_da_metadata_calc_len % idxs) == 0)
			num++;
		if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
			num++;
		if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
			num++;
			ei->i_da_metadata_calc_len = 0;
		} else
			ei->i_da_metadata_calc_len++;
		ei->i_da_metadata_calc_last_lblock++;
		return num;
	}
328

329 330 331 332 333 334 335
	/*
	 * In the worst case we need a new set of index blocks at
	 * every level of the inode's extent tree.
	 */
	ei->i_da_metadata_calc_len = 1;
	ei->i_da_metadata_calc_last_lblock = lblock;
	return ext_depth(inode) + 1;
336 337
}

338 339 340 341 342 343 344
static int
ext4_ext_max_entries(struct inode *inode, int depth)
{
	int max;

	if (depth == ext_depth(inode)) {
		if (depth == 0)
345
			max = ext4_ext_space_root(inode, 1);
346
		else
347
			max = ext4_ext_space_root_idx(inode, 1);
348 349
	} else {
		if (depth == 0)
350
			max = ext4_ext_space_block(inode, 1);
351
		else
352
			max = ext4_ext_space_block_idx(inode, 1);
353 354 355 356 357
	}

	return max;
}

358 359
static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
{
360
	ext4_fsblk_t block = ext_pblock(ext);
361
	int len = ext4_ext_get_actual_len(ext);
362

363
	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
364 365 366 367 368
}

static int ext4_valid_extent_idx(struct inode *inode,
				struct ext4_extent_idx *ext_idx)
{
369
	ext4_fsblk_t block = idx_pblock(ext_idx);
370

371
	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
}

static int ext4_valid_extent_entries(struct inode *inode,
				struct ext4_extent_header *eh,
				int depth)
{
	struct ext4_extent *ext;
	struct ext4_extent_idx *ext_idx;
	unsigned short entries;
	if (eh->eh_entries == 0)
		return 1;

	entries = le16_to_cpu(eh->eh_entries);

	if (depth == 0) {
		/* leaf entries */
		ext = EXT_FIRST_EXTENT(eh);
		while (entries) {
			if (!ext4_valid_extent(inode, ext))
				return 0;
			ext++;
			entries--;
		}
	} else {
		ext_idx = EXT_FIRST_INDEX(eh);
		while (entries) {
			if (!ext4_valid_extent_idx(inode, ext_idx))
				return 0;
			ext_idx++;
			entries--;
		}
	}
	return 1;
}

static int __ext4_ext_check(const char *function, struct inode *inode,
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
					struct ext4_extent_header *eh,
					int depth)
{
	const char *error_msg;
	int max = 0;

	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
		error_msg = "invalid magic";
		goto corrupted;
	}
	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
		error_msg = "unexpected eh_depth";
		goto corrupted;
	}
	if (unlikely(eh->eh_max == 0)) {
		error_msg = "invalid eh_max";
		goto corrupted;
	}
	max = ext4_ext_max_entries(inode, depth);
	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
		error_msg = "too large eh_max";
		goto corrupted;
	}
	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
		error_msg = "invalid eh_entries";
		goto corrupted;
	}
435 436 437 438
	if (!ext4_valid_extent_entries(inode, eh, depth)) {
		error_msg = "invalid extent entries";
		goto corrupted;
	}
439 440 441
	return 0;

corrupted:
442
	__ext4_error(inode->i_sb, function,
443
			"bad header/extent in inode #%lu: %s - magic %x, "
444 445 446 447 448 449 450 451
			"entries %u, max %u(%u), depth %u(%u)",
			inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
			le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
			max, le16_to_cpu(eh->eh_depth), depth);

	return -EIO;
}

452 453
#define ext4_ext_check(inode, eh, depth)	\
	__ext4_ext_check(__func__, inode, eh, depth)
454

455 456 457 458 459
int ext4_ext_check_inode(struct inode *inode)
{
	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
}

A
Alex Tomas 已提交
460 461 462 463 464 465 466 467
#ifdef EXT_DEBUG
static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
{
	int k, l = path->p_depth;

	ext_debug("path:");
	for (k = 0; k <= l; k++, path++) {
		if (path->p_idx) {
468
		  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
469
			    idx_pblock(path->p_idx));
A
Alex Tomas 已提交
470
		} else if (path->p_ext) {
471
			ext_debug("  %d:[%d]%d:%llu ",
A
Alex Tomas 已提交
472
				  le32_to_cpu(path->p_ext->ee_block),
473
				  ext4_ext_is_uninitialized(path->p_ext),
A
Amit Arora 已提交
474
				  ext4_ext_get_actual_len(path->p_ext),
475
				  ext_pblock(path->p_ext));
A
Alex Tomas 已提交
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
		} else
			ext_debug("  []");
	}
	ext_debug("\n");
}

static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
{
	int depth = ext_depth(inode);
	struct ext4_extent_header *eh;
	struct ext4_extent *ex;
	int i;

	if (!path)
		return;

	eh = path[depth].p_hdr;
	ex = EXT_FIRST_EXTENT(eh);

495 496
	ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);

A
Alex Tomas 已提交
497
	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
498 499
		ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
			  ext4_ext_is_uninitialized(ex),
A
Amit Arora 已提交
500
			  ext4_ext_get_actual_len(ex), ext_pblock(ex));
A
Alex Tomas 已提交
501 502 503 504
	}
	ext_debug("\n");
}
#else
505 506
#define ext4_ext_show_path(inode, path)
#define ext4_ext_show_leaf(inode, path)
A
Alex Tomas 已提交
507 508
#endif

509
void ext4_ext_drop_refs(struct ext4_ext_path *path)
A
Alex Tomas 已提交
510 511 512 513 514 515 516 517 518 519 520 521
{
	int depth = path->p_depth;
	int i;

	for (i = 0; i <= depth; i++, path++)
		if (path->p_bh) {
			brelse(path->p_bh);
			path->p_bh = NULL;
		}
}

/*
522 523
 * ext4_ext_binsearch_idx:
 * binary search for the closest index of the given block
524
 * the header must be checked before calling this
A
Alex Tomas 已提交
525 526
 */
static void
A
Aneesh Kumar K.V 已提交
527 528
ext4_ext_binsearch_idx(struct inode *inode,
			struct ext4_ext_path *path, ext4_lblk_t block)
A
Alex Tomas 已提交
529 530 531 532 533
{
	struct ext4_extent_header *eh = path->p_hdr;
	struct ext4_extent_idx *r, *l, *m;


534
	ext_debug("binsearch for %u(idx):  ", block);
A
Alex Tomas 已提交
535 536

	l = EXT_FIRST_INDEX(eh) + 1;
D
Dmitry Monakhov 已提交
537
	r = EXT_LAST_INDEX(eh);
A
Alex Tomas 已提交
538 539 540 541 542 543
	while (l <= r) {
		m = l + (r - l) / 2;
		if (block < le32_to_cpu(m->ei_block))
			r = m - 1;
		else
			l = m + 1;
544 545 546
		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
				m, le32_to_cpu(m->ei_block),
				r, le32_to_cpu(r->ei_block));
A
Alex Tomas 已提交
547 548 549
	}

	path->p_idx = l - 1;
550
	ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
551
		  idx_pblock(path->p_idx));
A
Alex Tomas 已提交
552 553 554 555 556 557 558 559 560 561

#ifdef CHECK_BINSEARCH
	{
		struct ext4_extent_idx *chix, *ix;
		int k;

		chix = ix = EXT_FIRST_INDEX(eh);
		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
		  if (k != 0 &&
		      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
562 563 564 565
				printk(KERN_DEBUG "k=%d, ix=0x%p, "
				       "first=0x%p\n", k,
				       ix, EXT_FIRST_INDEX(eh));
				printk(KERN_DEBUG "%u <= %u\n",
A
Alex Tomas 已提交
566 567 568 569
				       le32_to_cpu(ix->ei_block),
				       le32_to_cpu(ix[-1].ei_block));
			}
			BUG_ON(k && le32_to_cpu(ix->ei_block)
D
Dave Kleikamp 已提交
570
					   <= le32_to_cpu(ix[-1].ei_block));
A
Alex Tomas 已提交
571 572 573 574 575 576 577 578 579 580 581
			if (block < le32_to_cpu(ix->ei_block))
				break;
			chix = ix;
		}
		BUG_ON(chix != path->p_idx);
	}
#endif

}

/*
582 583
 * ext4_ext_binsearch:
 * binary search for closest extent of the given block
584
 * the header must be checked before calling this
A
Alex Tomas 已提交
585 586
 */
static void
A
Aneesh Kumar K.V 已提交
587 588
ext4_ext_binsearch(struct inode *inode,
		struct ext4_ext_path *path, ext4_lblk_t block)
A
Alex Tomas 已提交
589 590 591 592 593 594
{
	struct ext4_extent_header *eh = path->p_hdr;
	struct ext4_extent *r, *l, *m;

	if (eh->eh_entries == 0) {
		/*
595 596
		 * this leaf is empty:
		 * we get such a leaf in split/add case
A
Alex Tomas 已提交
597 598 599 600
		 */
		return;
	}

601
	ext_debug("binsearch for %u:  ", block);
A
Alex Tomas 已提交
602 603

	l = EXT_FIRST_EXTENT(eh) + 1;
D
Dmitry Monakhov 已提交
604
	r = EXT_LAST_EXTENT(eh);
A
Alex Tomas 已提交
605 606 607 608 609 610 611

	while (l <= r) {
		m = l + (r - l) / 2;
		if (block < le32_to_cpu(m->ee_block))
			r = m - 1;
		else
			l = m + 1;
612 613 614
		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
				m, le32_to_cpu(m->ee_block),
				r, le32_to_cpu(r->ee_block));
A
Alex Tomas 已提交
615 616 617
	}

	path->p_ext = l - 1;
618
	ext_debug("  -> %d:%llu:[%d]%d ",
D
Dave Kleikamp 已提交
619 620
			le32_to_cpu(path->p_ext->ee_block),
			ext_pblock(path->p_ext),
621
			ext4_ext_is_uninitialized(path->p_ext),
A
Amit Arora 已提交
622
			ext4_ext_get_actual_len(path->p_ext));
A
Alex Tomas 已提交
623 624 625 626 627 628 629 630 631

#ifdef CHECK_BINSEARCH
	{
		struct ext4_extent *chex, *ex;
		int k;

		chex = ex = EXT_FIRST_EXTENT(eh);
		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
			BUG_ON(k && le32_to_cpu(ex->ee_block)
D
Dave Kleikamp 已提交
632
					  <= le32_to_cpu(ex[-1].ee_block));
A
Alex Tomas 已提交
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650
			if (block < le32_to_cpu(ex->ee_block))
				break;
			chex = ex;
		}
		BUG_ON(chex != path->p_ext);
	}
#endif

}

int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
{
	struct ext4_extent_header *eh;

	eh = ext_inode_hdr(inode);
	eh->eh_depth = 0;
	eh->eh_entries = 0;
	eh->eh_magic = EXT4_EXT_MAGIC;
651
	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
A
Alex Tomas 已提交
652 653 654 655 656 657
	ext4_mark_inode_dirty(handle, inode);
	ext4_ext_invalidate_cache(inode);
	return 0;
}

struct ext4_ext_path *
A
Aneesh Kumar K.V 已提交
658 659
ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
					struct ext4_ext_path *path)
A
Alex Tomas 已提交
660 661 662 663 664 665
{
	struct ext4_extent_header *eh;
	struct buffer_head *bh;
	short int depth, i, ppos = 0, alloc = 0;

	eh = ext_inode_hdr(inode);
666
	depth = ext_depth(inode);
A
Alex Tomas 已提交
667 668 669

	/* account possible depth increase */
	if (!path) {
A
Avantika Mathur 已提交
670
		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
A
Alex Tomas 已提交
671 672 673 674 675 676
				GFP_NOFS);
		if (!path)
			return ERR_PTR(-ENOMEM);
		alloc = 1;
	}
	path[0].p_hdr = eh;
677
	path[0].p_bh = NULL;
A
Alex Tomas 已提交
678

679
	i = depth;
A
Alex Tomas 已提交
680 681
	/* walk through the tree */
	while (i) {
682 683
		int need_to_validate = 0;

A
Alex Tomas 已提交
684 685
		ext_debug("depth %d: num %d, max %d\n",
			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
686

A
Alex Tomas 已提交
687
		ext4_ext_binsearch_idx(inode, path + ppos, block);
688
		path[ppos].p_block = idx_pblock(path[ppos].p_idx);
A
Alex Tomas 已提交
689 690 691
		path[ppos].p_depth = i;
		path[ppos].p_ext = NULL;

692 693
		bh = sb_getblk(inode->i_sb, path[ppos].p_block);
		if (unlikely(!bh))
A
Alex Tomas 已提交
694
			goto err;
695 696 697 698 699 700 701 702
		if (!bh_uptodate_or_lock(bh)) {
			if (bh_submit_read(bh) < 0) {
				put_bh(bh);
				goto err;
			}
			/* validate the extent entries */
			need_to_validate = 1;
		}
A
Alex Tomas 已提交
703 704
		eh = ext_block_hdr(bh);
		ppos++;
705 706 707 708 709 710
		if (unlikely(ppos > depth)) {
			put_bh(bh);
			EXT4_ERROR_INODE(inode,
					 "ppos %d > depth %d", ppos, depth);
			goto err;
		}
A
Alex Tomas 已提交
711 712 713 714
		path[ppos].p_bh = bh;
		path[ppos].p_hdr = eh;
		i--;

715
		if (need_to_validate && ext4_ext_check(inode, eh, i))
A
Alex Tomas 已提交
716 717 718 719 720 721 722 723 724
			goto err;
	}

	path[ppos].p_depth = i;
	path[ppos].p_ext = NULL;
	path[ppos].p_idx = NULL;

	/* find extent */
	ext4_ext_binsearch(inode, path + ppos, block);
725 726 727
	/* if not an empty leaf */
	if (path[ppos].p_ext)
		path[ppos].p_block = ext_pblock(path[ppos].p_ext);
A
Alex Tomas 已提交
728 729 730 731 732 733 734 735 736 737 738 739 740

	ext4_ext_show_path(inode, path);

	return path;

err:
	ext4_ext_drop_refs(path);
	if (alloc)
		kfree(path);
	return ERR_PTR(-EIO);
}

/*
741 742 743
 * ext4_ext_insert_index:
 * insert new index [@logical;@ptr] into the block at @curp;
 * check where to insert: before @curp or after @curp
A
Alex Tomas 已提交
744
 */
745
int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
746
				struct ext4_ext_path *curp,
747
				int logical, ext4_fsblk_t ptr)
A
Alex Tomas 已提交
748 749 750 751
{
	struct ext4_extent_idx *ix;
	int len, err;

752 753
	err = ext4_ext_get_access(handle, inode, curp);
	if (err)
A
Alex Tomas 已提交
754 755
		return err;

756 757 758 759 760 761
	if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
		EXT4_ERROR_INODE(inode,
				 "logical %d == ei_block %d!",
				 logical, le32_to_cpu(curp->p_idx->ei_block));
		return -EIO;
	}
A
Alex Tomas 已提交
762 763 764 765 766 767
	len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
		/* insert after */
		if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
			len = (len - 1) * sizeof(struct ext4_extent_idx);
			len = len < 0 ? 0 : len;
768
			ext_debug("insert new index %d after: %llu. "
A
Alex Tomas 已提交
769 770 771 772 773 774 775 776 777 778
					"move %d from 0x%p to 0x%p\n",
					logical, ptr, len,
					(curp->p_idx + 1), (curp->p_idx + 2));
			memmove(curp->p_idx + 2, curp->p_idx + 1, len);
		}
		ix = curp->p_idx + 1;
	} else {
		/* insert before */
		len = len * sizeof(struct ext4_extent_idx);
		len = len < 0 ? 0 : len;
779
		ext_debug("insert new index %d before: %llu. "
A
Alex Tomas 已提交
780 781 782 783 784 785 786 787
				"move %d from 0x%p to 0x%p\n",
				logical, ptr, len,
				curp->p_idx, (curp->p_idx + 1));
		memmove(curp->p_idx + 1, curp->p_idx, len);
		ix = curp->p_idx;
	}

	ix->ei_block = cpu_to_le32(logical);
788
	ext4_idx_store_pblock(ix, ptr);
M
Marcin Slusarz 已提交
789
	le16_add_cpu(&curp->p_hdr->eh_entries, 1);
A
Alex Tomas 已提交
790

791 792 793 794 795 796 797 798 799 800 801
	if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
			     > le16_to_cpu(curp->p_hdr->eh_max))) {
		EXT4_ERROR_INODE(inode,
				 "logical %d == ei_block %d!",
				 logical, le32_to_cpu(curp->p_idx->ei_block));
		return -EIO;
	}
	if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
		EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
		return -EIO;
	}
A
Alex Tomas 已提交
802 803 804 805 806 807 808 809

	err = ext4_ext_dirty(handle, inode, curp);
	ext4_std_error(inode->i_sb, err);

	return err;
}

/*
810 811 812 813 814 815 816 817
 * ext4_ext_split:
 * inserts new subtree into the path, using free index entry
 * at depth @at:
 * - allocates all needed blocks (new leaf and all intermediate index blocks)
 * - makes decision where to split
 * - moves remaining extents and index entries (right to the split point)
 *   into the newly allocated blocks
 * - initializes subtree
A
Alex Tomas 已提交
818 819 820 821 822 823 824 825 826 827 828
 */
static int ext4_ext_split(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path,
				struct ext4_extent *newext, int at)
{
	struct buffer_head *bh = NULL;
	int depth = ext_depth(inode);
	struct ext4_extent_header *neh;
	struct ext4_extent_idx *fidx;
	struct ext4_extent *ex;
	int i = at, k, m, a;
829
	ext4_fsblk_t newblock, oldblock;
A
Alex Tomas 已提交
830
	__le32 border;
831
	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
A
Alex Tomas 已提交
832 833 834
	int err = 0;

	/* make decision: where to split? */
835
	/* FIXME: now decision is simplest: at current extent */
A
Alex Tomas 已提交
836

837
	/* if current leaf will be split, then we should use
A
Alex Tomas 已提交
838
	 * border from split point */
839 840 841 842
	if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
		EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
		return -EIO;
	}
A
Alex Tomas 已提交
843 844
	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
		border = path[depth].p_ext[1].ee_block;
845
		ext_debug("leaf will be split."
A
Alex Tomas 已提交
846
				" next leaf starts at %d\n",
D
Dave Kleikamp 已提交
847
				  le32_to_cpu(border));
A
Alex Tomas 已提交
848 849 850 851
	} else {
		border = newext->ee_block;
		ext_debug("leaf will be added."
				" next leaf starts at %d\n",
D
Dave Kleikamp 已提交
852
				le32_to_cpu(border));
A
Alex Tomas 已提交
853 854 855
	}

	/*
856 857
	 * If error occurs, then we break processing
	 * and mark filesystem read-only. index won't
A
Alex Tomas 已提交
858
	 * be inserted and tree will be in consistent
859
	 * state. Next mount will repair buffers too.
A
Alex Tomas 已提交
860 861 862
	 */

	/*
863 864 865
	 * Get array to track all allocated blocks.
	 * We need this to handle errors and free blocks
	 * upon them.
A
Alex Tomas 已提交
866
	 */
A
Avantika Mathur 已提交
867
	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
A
Alex Tomas 已提交
868 869 870 871 872 873
	if (!ablocks)
		return -ENOMEM;

	/* allocate all needed blocks */
	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
	for (a = 0; a < depth - at; a++) {
A
Aneesh Kumar K.V 已提交
874 875
		newblock = ext4_ext_new_meta_block(handle, inode, path,
						   newext, &err);
A
Alex Tomas 已提交
876 877 878 879 880 881 882
		if (newblock == 0)
			goto cleanup;
		ablocks[a] = newblock;
	}

	/* initialize new leaf */
	newblock = ablocks[--a];
883 884 885 886 887
	if (unlikely(newblock == 0)) {
		EXT4_ERROR_INODE(inode, "newblock == 0!");
		err = -EIO;
		goto cleanup;
	}
A
Alex Tomas 已提交
888 889 890 891 892 893 894
	bh = sb_getblk(inode->i_sb, newblock);
	if (!bh) {
		err = -EIO;
		goto cleanup;
	}
	lock_buffer(bh);

895 896
	err = ext4_journal_get_create_access(handle, bh);
	if (err)
A
Alex Tomas 已提交
897 898 899 900
		goto cleanup;

	neh = ext_block_hdr(bh);
	neh->eh_entries = 0;
901
	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
A
Alex Tomas 已提交
902 903 904 905
	neh->eh_magic = EXT4_EXT_MAGIC;
	neh->eh_depth = 0;
	ex = EXT_FIRST_EXTENT(neh);

906
	/* move remainder of path[depth] to the new leaf */
907 908 909 910 911 912 913 914
	if (unlikely(path[depth].p_hdr->eh_entries !=
		     path[depth].p_hdr->eh_max)) {
		EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
				 path[depth].p_hdr->eh_entries,
				 path[depth].p_hdr->eh_max);
		err = -EIO;
		goto cleanup;
	}
A
Alex Tomas 已提交
915 916 917 918 919 920
	/* start copy from next extent */
	/* TODO: we could do it by single memmove */
	m = 0;
	path[depth].p_ext++;
	while (path[depth].p_ext <=
			EXT_MAX_EXTENT(path[depth].p_hdr)) {
921
		ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
D
Dave Kleikamp 已提交
922 923
				le32_to_cpu(path[depth].p_ext->ee_block),
				ext_pblock(path[depth].p_ext),
924
				ext4_ext_is_uninitialized(path[depth].p_ext),
A
Amit Arora 已提交
925
				ext4_ext_get_actual_len(path[depth].p_ext),
A
Alex Tomas 已提交
926 927 928 929 930 931 932 933 934
				newblock);
		/*memmove(ex++, path[depth].p_ext++,
				sizeof(struct ext4_extent));
		neh->eh_entries++;*/
		path[depth].p_ext++;
		m++;
	}
	if (m) {
		memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
M
Marcin Slusarz 已提交
935
		le16_add_cpu(&neh->eh_entries, m);
A
Alex Tomas 已提交
936 937 938 939 940
	}

	set_buffer_uptodate(bh);
	unlock_buffer(bh);

941
	err = ext4_handle_dirty_metadata(handle, inode, bh);
942
	if (err)
A
Alex Tomas 已提交
943 944 945 946 947 948
		goto cleanup;
	brelse(bh);
	bh = NULL;

	/* correct old leaf */
	if (m) {
949 950
		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
A
Alex Tomas 已提交
951
			goto cleanup;
M
Marcin Slusarz 已提交
952
		le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
953 954
		err = ext4_ext_dirty(handle, inode, path + depth);
		if (err)
A
Alex Tomas 已提交
955 956 957 958 959 960
			goto cleanup;

	}

	/* create intermediate indexes */
	k = depth - at - 1;
961 962 963 964 965
	if (unlikely(k < 0)) {
		EXT4_ERROR_INODE(inode, "k %d < 0!", k);
		err = -EIO;
		goto cleanup;
	}
A
Alex Tomas 已提交
966 967 968 969 970 971 972 973
	if (k)
		ext_debug("create %d intermediate indices\n", k);
	/* insert new index into current index block */
	/* current depth stored in i var */
	i = depth - 1;
	while (k--) {
		oldblock = newblock;
		newblock = ablocks[--a];
974
		bh = sb_getblk(inode->i_sb, newblock);
A
Alex Tomas 已提交
975 976 977 978 979 980
		if (!bh) {
			err = -EIO;
			goto cleanup;
		}
		lock_buffer(bh);

981 982
		err = ext4_journal_get_create_access(handle, bh);
		if (err)
A
Alex Tomas 已提交
983 984 985 986 987
			goto cleanup;

		neh = ext_block_hdr(bh);
		neh->eh_entries = cpu_to_le16(1);
		neh->eh_magic = EXT4_EXT_MAGIC;
988
		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
A
Alex Tomas 已提交
989 990 991
		neh->eh_depth = cpu_to_le16(depth - i);
		fidx = EXT_FIRST_INDEX(neh);
		fidx->ei_block = border;
992
		ext4_idx_store_pblock(fidx, oldblock);
A
Alex Tomas 已提交
993

994 995
		ext_debug("int.index at %d (block %llu): %u -> %llu\n",
				i, newblock, le32_to_cpu(border), oldblock);
A
Alex Tomas 已提交
996 997 998 999 1000 1001
		/* copy indexes */
		m = 0;
		path[i].p_idx++;

		ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
				EXT_MAX_INDEX(path[i].p_hdr));
1002 1003 1004 1005 1006 1007 1008 1009
		if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
					EXT_LAST_INDEX(path[i].p_hdr))) {
			EXT4_ERROR_INODE(inode,
					 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
					 le32_to_cpu(path[i].p_ext->ee_block));
			err = -EIO;
			goto cleanup;
		}
A
Alex Tomas 已提交
1010
		while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
1011
			ext_debug("%d: move %d:%llu in new index %llu\n", i,
D
Dave Kleikamp 已提交
1012 1013 1014
					le32_to_cpu(path[i].p_idx->ei_block),
					idx_pblock(path[i].p_idx),
					newblock);
A
Alex Tomas 已提交
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
			/*memmove(++fidx, path[i].p_idx++,
					sizeof(struct ext4_extent_idx));
			neh->eh_entries++;
			BUG_ON(neh->eh_entries > neh->eh_max);*/
			path[i].p_idx++;
			m++;
		}
		if (m) {
			memmove(++fidx, path[i].p_idx - m,
				sizeof(struct ext4_extent_idx) * m);
M
Marcin Slusarz 已提交
1025
			le16_add_cpu(&neh->eh_entries, m);
A
Alex Tomas 已提交
1026 1027 1028 1029
		}
		set_buffer_uptodate(bh);
		unlock_buffer(bh);

1030
		err = ext4_handle_dirty_metadata(handle, inode, bh);
1031
		if (err)
A
Alex Tomas 已提交
1032 1033 1034 1035 1036 1037 1038 1039 1040
			goto cleanup;
		brelse(bh);
		bh = NULL;

		/* correct old index */
		if (m) {
			err = ext4_ext_get_access(handle, inode, path + i);
			if (err)
				goto cleanup;
M
Marcin Slusarz 已提交
1041
			le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
A
Alex Tomas 已提交
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
			err = ext4_ext_dirty(handle, inode, path + i);
			if (err)
				goto cleanup;
		}

		i--;
	}

	/* insert new index */
	err = ext4_ext_insert_index(handle, inode, path + at,
				    le32_to_cpu(border), newblock);

cleanup:
	if (bh) {
		if (buffer_locked(bh))
			unlock_buffer(bh);
		brelse(bh);
	}

	if (err) {
		/* free all allocated blocks in error case */
		for (i = 0; i < depth; i++) {
			if (!ablocks[i])
				continue;
1066 1067
			ext4_free_blocks(handle, inode, 0, ablocks[i], 1,
					 EXT4_FREE_BLOCKS_METADATA);
A
Alex Tomas 已提交
1068 1069 1070 1071 1072 1073 1074 1075
		}
	}
	kfree(ablocks);

	return err;
}

/*
1076 1077 1078 1079 1080 1081
 * ext4_ext_grow_indepth:
 * implements tree growing procedure:
 * - allocates new block
 * - moves top-level data (index block or leaf) into the new block
 * - initializes new top-level, creating index that points to the
 *   just created block
A
Alex Tomas 已提交
1082 1083 1084 1085 1086 1087 1088 1089 1090
 */
static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
					struct ext4_ext_path *path,
					struct ext4_extent *newext)
{
	struct ext4_ext_path *curp = path;
	struct ext4_extent_header *neh;
	struct ext4_extent_idx *fidx;
	struct buffer_head *bh;
1091
	ext4_fsblk_t newblock;
A
Alex Tomas 已提交
1092 1093
	int err = 0;

A
Aneesh Kumar K.V 已提交
1094
	newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err);
A
Alex Tomas 已提交
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
	if (newblock == 0)
		return err;

	bh = sb_getblk(inode->i_sb, newblock);
	if (!bh) {
		err = -EIO;
		ext4_std_error(inode->i_sb, err);
		return err;
	}
	lock_buffer(bh);

1106 1107
	err = ext4_journal_get_create_access(handle, bh);
	if (err) {
A
Alex Tomas 已提交
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
		unlock_buffer(bh);
		goto out;
	}

	/* move top-level index/leaf into new block */
	memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));

	/* set size of new block */
	neh = ext_block_hdr(bh);
	/* old root could have indexes or leaves
	 * so calculate e_max right way */
	if (ext_depth(inode))
1120
		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
A
Alex Tomas 已提交
1121
	else
1122
		neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
A
Alex Tomas 已提交
1123 1124 1125 1126
	neh->eh_magic = EXT4_EXT_MAGIC;
	set_buffer_uptodate(bh);
	unlock_buffer(bh);

1127
	err = ext4_handle_dirty_metadata(handle, inode, bh);
1128
	if (err)
A
Alex Tomas 已提交
1129 1130 1131
		goto out;

	/* create index in new top-level index: num,max,pointer */
1132 1133
	err = ext4_ext_get_access(handle, inode, curp);
	if (err)
A
Alex Tomas 已提交
1134 1135 1136
		goto out;

	curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
1137
	curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
A
Alex Tomas 已提交
1138 1139
	curp->p_hdr->eh_entries = cpu_to_le16(1);
	curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
D
Dmitry Monakhov 已提交
1140 1141 1142 1143 1144 1145 1146

	if (path[0].p_hdr->eh_depth)
		curp->p_idx->ei_block =
			EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
	else
		curp->p_idx->ei_block =
			EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
1147
	ext4_idx_store_pblock(curp->p_idx, newblock);
A
Alex Tomas 已提交
1148 1149 1150

	neh = ext_inode_hdr(inode);
	fidx = EXT_FIRST_INDEX(neh);
1151
	ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
A
Alex Tomas 已提交
1152
		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1153
		  le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
A
Alex Tomas 已提交
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163

	neh->eh_depth = cpu_to_le16(path->p_depth + 1);
	err = ext4_ext_dirty(handle, inode, curp);
out:
	brelse(bh);

	return err;
}

/*
1164 1165 1166
 * ext4_ext_create_new_leaf:
 * finds empty index and adds new leaf.
 * if no free index is found, then it requests in-depth growing.
A
Alex Tomas 已提交
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
 */
static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
					struct ext4_ext_path *path,
					struct ext4_extent *newext)
{
	struct ext4_ext_path *curp;
	int depth, i, err = 0;

repeat:
	i = depth = ext_depth(inode);

	/* walk up to the tree and look for free index entry */
	curp = path + depth;
	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
		i--;
		curp--;
	}

1185 1186
	/* we use already allocated block for index block,
	 * so subsequent data blocks should be contiguous */
A
Alex Tomas 已提交
1187 1188 1189 1190
	if (EXT_HAS_FREE_INDEX(curp)) {
		/* if we found index with free entry, then use that
		 * entry: create all needed subtree and add new leaf */
		err = ext4_ext_split(handle, inode, path, newext, i);
1191 1192
		if (err)
			goto out;
A
Alex Tomas 已提交
1193 1194 1195 1196

		/* refill path */
		ext4_ext_drop_refs(path);
		path = ext4_ext_find_extent(inode,
A
Aneesh Kumar K.V 已提交
1197 1198
				    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
				    path);
A
Alex Tomas 已提交
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
		if (IS_ERR(path))
			err = PTR_ERR(path);
	} else {
		/* tree is full, time to grow in depth */
		err = ext4_ext_grow_indepth(handle, inode, path, newext);
		if (err)
			goto out;

		/* refill path */
		ext4_ext_drop_refs(path);
		path = ext4_ext_find_extent(inode,
A
Aneesh Kumar K.V 已提交
1210 1211
				   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
				    path);
A
Alex Tomas 已提交
1212 1213 1214 1215 1216 1217
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			goto out;
		}

		/*
1218 1219
		 * only first (depth 0 -> 1) produces free space;
		 * in all other cases we have to split the grown tree
A
Alex Tomas 已提交
1220 1221 1222
		 */
		depth = ext_depth(inode);
		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1223
			/* now we need to split */
A
Alex Tomas 已提交
1224 1225 1226 1227 1228 1229 1230 1231
			goto repeat;
		}
	}

out:
	return err;
}

1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
/*
 * search the closest allocated block to the left for *logical
 * and returns it at @logical + it's physical address at @phys
 * if *logical is the smallest allocated block, the function
 * returns 0 at @phys
 * return value contains 0 (success) or error code
 */
int
ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
			ext4_lblk_t *logical, ext4_fsblk_t *phys)
{
	struct ext4_extent_idx *ix;
	struct ext4_extent *ex;
1245
	int depth, ee_len;
1246

1247 1248 1249 1250
	if (unlikely(path == NULL)) {
		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
		return -EIO;
	}
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
	depth = path->p_depth;
	*phys = 0;

	if (depth == 0 && path->p_ext == NULL)
		return 0;

	/* usually extent in the path covers blocks smaller
	 * then *logical, but it can be that extent is the
	 * first one in the file */

	ex = path[depth].p_ext;
1262
	ee_len = ext4_ext_get_actual_len(ex);
1263
	if (*logical < le32_to_cpu(ex->ee_block)) {
1264 1265 1266 1267 1268 1269
		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
			EXT4_ERROR_INODE(inode,
					 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
					 *logical, le32_to_cpu(ex->ee_block));
			return -EIO;
		}
1270 1271
		while (--depth >= 0) {
			ix = path[depth].p_idx;
1272 1273 1274 1275 1276 1277 1278 1279 1280
			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
				EXT4_ERROR_INODE(inode,
				  "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
				  ix != NULL ? ix->ei_block : 0,
				  EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
				    EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0,
				  depth);
				return -EIO;
			}
1281 1282 1283 1284
		}
		return 0;
	}

1285 1286 1287 1288 1289 1290
	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
		EXT4_ERROR_INODE(inode,
				 "logical %d < ee_block %d + ee_len %d!",
				 *logical, le32_to_cpu(ex->ee_block), ee_len);
		return -EIO;
	}
1291

1292 1293
	*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
	*phys = ext_pblock(ex) + ee_len - 1;
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
	return 0;
}

/*
 * search the closest allocated block to the right for *logical
 * and returns it at @logical + it's physical address at @phys
 * if *logical is the smallest allocated block, the function
 * returns 0 at @phys
 * return value contains 0 (success) or error code
 */
int
ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
			ext4_lblk_t *logical, ext4_fsblk_t *phys)
{
	struct buffer_head *bh = NULL;
	struct ext4_extent_header *eh;
	struct ext4_extent_idx *ix;
	struct ext4_extent *ex;
	ext4_fsblk_t block;
1313 1314
	int depth;	/* Note, NOT eh_depth; depth from top of tree */
	int ee_len;
1315

1316 1317 1318 1319
	if (unlikely(path == NULL)) {
		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
		return -EIO;
	}
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
	depth = path->p_depth;
	*phys = 0;

	if (depth == 0 && path->p_ext == NULL)
		return 0;

	/* usually extent in the path covers blocks smaller
	 * then *logical, but it can be that extent is the
	 * first one in the file */

	ex = path[depth].p_ext;
1331
	ee_len = ext4_ext_get_actual_len(ex);
1332
	if (*logical < le32_to_cpu(ex->ee_block)) {
1333 1334 1335 1336 1337 1338
		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
			EXT4_ERROR_INODE(inode,
					 "first_extent(path[%d].p_hdr) != ex",
					 depth);
			return -EIO;
		}
1339 1340
		while (--depth >= 0) {
			ix = path[depth].p_idx;
1341 1342 1343 1344 1345 1346
			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
				EXT4_ERROR_INODE(inode,
						 "ix != EXT_FIRST_INDEX *logical %d!",
						 *logical);
				return -EIO;
			}
1347 1348 1349 1350 1351 1352
		}
		*logical = le32_to_cpu(ex->ee_block);
		*phys = ext_pblock(ex);
		return 0;
	}

1353 1354 1355 1356 1357 1358
	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
		EXT4_ERROR_INODE(inode,
				 "logical %d < ee_block %d + ee_len %d!",
				 *logical, le32_to_cpu(ex->ee_block), ee_len);
		return -EIO;
	}
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371

	if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
		/* next allocated block in this leaf */
		ex++;
		*logical = le32_to_cpu(ex->ee_block);
		*phys = ext_pblock(ex);
		return 0;
	}

	/* go up and search for index to the right */
	while (--depth >= 0) {
		ix = path[depth].p_idx;
		if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
W
Wu Fengguang 已提交
1372
			goto got_index;
1373 1374
	}

W
Wu Fengguang 已提交
1375 1376
	/* we've gone up to the root and found no index to the right */
	return 0;
1377

W
Wu Fengguang 已提交
1378
got_index:
1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
	/* we've found index to the right, let's
	 * follow it and find the closest allocated
	 * block to the right */
	ix++;
	block = idx_pblock(ix);
	while (++depth < path->p_depth) {
		bh = sb_bread(inode->i_sb, block);
		if (bh == NULL)
			return -EIO;
		eh = ext_block_hdr(bh);
1389
		/* subtract from p_depth to get proper eh_depth */
1390
		if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
			put_bh(bh);
			return -EIO;
		}
		ix = EXT_FIRST_INDEX(eh);
		block = idx_pblock(ix);
		put_bh(bh);
	}

	bh = sb_bread(inode->i_sb, block);
	if (bh == NULL)
		return -EIO;
	eh = ext_block_hdr(bh);
1403
	if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
		put_bh(bh);
		return -EIO;
	}
	ex = EXT_FIRST_EXTENT(eh);
	*logical = le32_to_cpu(ex->ee_block);
	*phys = ext_pblock(ex);
	put_bh(bh);
	return 0;
}

A
Alex Tomas 已提交
1414
/*
1415 1416 1417 1418 1419
 * ext4_ext_next_allocated_block:
 * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
 * NOTE: it considers block number from index entry as
 * allocated block. Thus, index entries have to be consistent
 * with leaves.
A
Alex Tomas 已提交
1420
 */
A
Aneesh Kumar K.V 已提交
1421
static ext4_lblk_t
A
Alex Tomas 已提交
1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
ext4_ext_next_allocated_block(struct ext4_ext_path *path)
{
	int depth;

	BUG_ON(path == NULL);
	depth = path->p_depth;

	if (depth == 0 && path->p_ext == NULL)
		return EXT_MAX_BLOCK;

	while (depth >= 0) {
		if (depth == path->p_depth) {
			/* leaf */
			if (path[depth].p_ext !=
					EXT_LAST_EXTENT(path[depth].p_hdr))
			  return le32_to_cpu(path[depth].p_ext[1].ee_block);
		} else {
			/* index */
			if (path[depth].p_idx !=
					EXT_LAST_INDEX(path[depth].p_hdr))
			  return le32_to_cpu(path[depth].p_idx[1].ei_block);
		}
		depth--;
	}

	return EXT_MAX_BLOCK;
}

/*
1451
 * ext4_ext_next_leaf_block:
A
Alex Tomas 已提交
1452 1453
 * returns first allocated block from next leaf or EXT_MAX_BLOCK
 */
A
Aneesh Kumar K.V 已提交
1454
static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
A
Andrew Morton 已提交
1455
					struct ext4_ext_path *path)
A
Alex Tomas 已提交
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
{
	int depth;

	BUG_ON(path == NULL);
	depth = path->p_depth;

	/* zero-tree has no leaf blocks at all */
	if (depth == 0)
		return EXT_MAX_BLOCK;

	/* go to index block */
	depth--;

	while (depth >= 0) {
		if (path[depth].p_idx !=
				EXT_LAST_INDEX(path[depth].p_hdr))
A
Aneesh Kumar K.V 已提交
1472 1473
			return (ext4_lblk_t)
				le32_to_cpu(path[depth].p_idx[1].ei_block);
A
Alex Tomas 已提交
1474 1475 1476 1477 1478 1479 1480
		depth--;
	}

	return EXT_MAX_BLOCK;
}

/*
1481 1482 1483
 * ext4_ext_correct_indexes:
 * if leaf gets modified and modified extent is first in the leaf,
 * then we have to correct all indexes above.
A
Alex Tomas 已提交
1484 1485
 * TODO: do we need to correct tree in all cases?
 */
A
Aneesh Kumar K.V 已提交
1486
static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496
				struct ext4_ext_path *path)
{
	struct ext4_extent_header *eh;
	int depth = ext_depth(inode);
	struct ext4_extent *ex;
	__le32 border;
	int k, err = 0;

	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;
1497 1498 1499 1500 1501 1502

	if (unlikely(ex == NULL || eh == NULL)) {
		EXT4_ERROR_INODE(inode,
				 "ex %p == NULL or eh %p == NULL", ex, eh);
		return -EIO;
	}
A
Alex Tomas 已提交
1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514

	if (depth == 0) {
		/* there is no tree at all */
		return 0;
	}

	if (ex != EXT_FIRST_EXTENT(eh)) {
		/* we correct tree if first leaf got modified only */
		return 0;
	}

	/*
1515
	 * TODO: we need correction if border is smaller than current one
A
Alex Tomas 已提交
1516 1517 1518
	 */
	k = depth - 1;
	border = path[depth].p_ext->ee_block;
1519 1520
	err = ext4_ext_get_access(handle, inode, path + k);
	if (err)
A
Alex Tomas 已提交
1521 1522
		return err;
	path[k].p_idx->ei_block = border;
1523 1524
	err = ext4_ext_dirty(handle, inode, path + k);
	if (err)
A
Alex Tomas 已提交
1525 1526 1527 1528 1529 1530
		return err;

	while (k--) {
		/* change all left-side indexes */
		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
			break;
1531 1532
		err = ext4_ext_get_access(handle, inode, path + k);
		if (err)
A
Alex Tomas 已提交
1533 1534
			break;
		path[k].p_idx->ei_block = border;
1535 1536
		err = ext4_ext_dirty(handle, inode, path + k);
		if (err)
A
Alex Tomas 已提交
1537 1538 1539 1540 1541 1542
			break;
	}

	return err;
}

1543
int
A
Alex Tomas 已提交
1544 1545 1546
ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
				struct ext4_extent *ex2)
{
1547
	unsigned short ext1_ee_len, ext2_ee_len, max_len;
A
Amit Arora 已提交
1548 1549 1550 1551 1552 1553 1554 1555

	/*
	 * Make sure that either both extents are uninitialized, or
	 * both are _not_.
	 */
	if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
		return 0;

1556 1557 1558 1559 1560
	if (ext4_ext_is_uninitialized(ex1))
		max_len = EXT_UNINIT_MAX_LEN;
	else
		max_len = EXT_INIT_MAX_LEN;

A
Amit Arora 已提交
1561 1562 1563 1564
	ext1_ee_len = ext4_ext_get_actual_len(ex1);
	ext2_ee_len = ext4_ext_get_actual_len(ex2);

	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
A
Andrew Morton 已提交
1565
			le32_to_cpu(ex2->ee_block))
A
Alex Tomas 已提交
1566 1567
		return 0;

1568 1569 1570
	/*
	 * To allow future support for preallocated extents to be added
	 * as an RO_COMPAT feature, refuse to merge to extents if
1571
	 * this can result in the top bit of ee_len being set.
1572
	 */
1573
	if (ext1_ee_len + ext2_ee_len > max_len)
1574
		return 0;
1575
#ifdef AGGRESSIVE_TEST
1576
	if (ext1_ee_len >= 4)
A
Alex Tomas 已提交
1577 1578 1579
		return 0;
#endif

A
Amit Arora 已提交
1580
	if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
A
Alex Tomas 已提交
1581 1582 1583 1584
		return 1;
	return 0;
}

1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620
/*
 * This function tries to merge the "ex" extent to the next extent in the tree.
 * It always tries to merge towards right. If you want to merge towards
 * left, pass "ex - 1" as argument instead of "ex".
 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
 * 1 if they got merged.
 */
int ext4_ext_try_to_merge(struct inode *inode,
			  struct ext4_ext_path *path,
			  struct ext4_extent *ex)
{
	struct ext4_extent_header *eh;
	unsigned int depth, len;
	int merge_done = 0;
	int uninitialized = 0;

	depth = ext_depth(inode);
	BUG_ON(path[depth].p_hdr == NULL);
	eh = path[depth].p_hdr;

	while (ex < EXT_LAST_EXTENT(eh)) {
		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
			break;
		/* merge with next extent! */
		if (ext4_ext_is_uninitialized(ex))
			uninitialized = 1;
		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
				+ ext4_ext_get_actual_len(ex + 1));
		if (uninitialized)
			ext4_ext_mark_uninitialized(ex);

		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
			len = (EXT_LAST_EXTENT(eh) - ex - 1)
				* sizeof(struct ext4_extent);
			memmove(ex + 1, ex + 2, len);
		}
M
Marcin Slusarz 已提交
1621
		le16_add_cpu(&eh->eh_entries, -1);
1622 1623 1624
		merge_done = 1;
		WARN_ON(eh->eh_entries == 0);
		if (!eh->eh_entries)
1625 1626 1627
			ext4_error(inode->i_sb,
				   "inode#%lu, eh->eh_entries = 0!",
				   inode->i_ino);
1628 1629 1630 1631 1632
	}

	return merge_done;
}

A
Amit Arora 已提交
1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
/*
 * check if a portion of the "newext" extent overlaps with an
 * existing extent.
 *
 * If there is an overlap discovered, it updates the length of the newext
 * such that there will be no overlap, and then returns 1.
 * If there is no overlap found, it returns 0.
 */
unsigned int ext4_ext_check_overlap(struct inode *inode,
				    struct ext4_extent *newext,
				    struct ext4_ext_path *path)
{
A
Aneesh Kumar K.V 已提交
1645
	ext4_lblk_t b1, b2;
A
Amit Arora 已提交
1646 1647 1648 1649
	unsigned int depth, len1;
	unsigned int ret = 0;

	b1 = le32_to_cpu(newext->ee_block);
A
Amit Arora 已提交
1650
	len1 = ext4_ext_get_actual_len(newext);
A
Amit Arora 已提交
1651 1652 1653 1654 1655 1656 1657
	depth = ext_depth(inode);
	if (!path[depth].p_ext)
		goto out;
	b2 = le32_to_cpu(path[depth].p_ext->ee_block);

	/*
	 * get the next allocated block if the extent in the path
1658
	 * is before the requested block(s)
A
Amit Arora 已提交
1659 1660 1661 1662 1663 1664 1665
	 */
	if (b2 < b1) {
		b2 = ext4_ext_next_allocated_block(path);
		if (b2 == EXT_MAX_BLOCK)
			goto out;
	}

A
Aneesh Kumar K.V 已提交
1666
	/* check for wrap through zero on extent logical start block*/
A
Amit Arora 已提交
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
	if (b1 + len1 < b1) {
		len1 = EXT_MAX_BLOCK - b1;
		newext->ee_len = cpu_to_le16(len1);
		ret = 1;
	}

	/* check for overlap */
	if (b1 + len1 > b2) {
		newext->ee_len = cpu_to_le16(b2 - b1);
		ret = 1;
	}
out:
	return ret;
}

A
Alex Tomas 已提交
1682
/*
1683 1684 1685 1686
 * ext4_ext_insert_extent:
 * tries to merge requsted extent into the existing extent or
 * inserts requested extent as new one into the tree,
 * creating new leaf in the no-space case.
A
Alex Tomas 已提交
1687 1688 1689
 */
int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path,
1690
				struct ext4_extent *newext, int flag)
A
Alex Tomas 已提交
1691
{
1692
	struct ext4_extent_header *eh;
A
Alex Tomas 已提交
1693 1694 1695
	struct ext4_extent *ex, *fex;
	struct ext4_extent *nearex; /* nearest extent */
	struct ext4_ext_path *npath = NULL;
A
Aneesh Kumar K.V 已提交
1696 1697
	int depth, len, err;
	ext4_lblk_t next;
A
Amit Arora 已提交
1698
	unsigned uninitialized = 0;
A
Alex Tomas 已提交
1699

1700 1701 1702 1703
	if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
		EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
		return -EIO;
	}
A
Alex Tomas 已提交
1704 1705
	depth = ext_depth(inode);
	ex = path[depth].p_ext;
1706 1707 1708 1709
	if (unlikely(path[depth].p_hdr == NULL)) {
		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
		return -EIO;
	}
A
Alex Tomas 已提交
1710 1711

	/* try to insert block into found extent and return */
1712
	if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1713
		&& ext4_can_extents_be_merged(inode, ex, newext)) {
1714 1715
		ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
				ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1716
				ext4_ext_get_actual_len(newext),
A
Alex Tomas 已提交
1717
				le32_to_cpu(ex->ee_block),
1718
				ext4_ext_is_uninitialized(ex),
A
Amit Arora 已提交
1719
				ext4_ext_get_actual_len(ex), ext_pblock(ex));
1720 1721
		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
A
Alex Tomas 已提交
1722
			return err;
A
Amit Arora 已提交
1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734

		/*
		 * ext4_can_extents_be_merged should have checked that either
		 * both extents are uninitialized, or both aren't. Thus we
		 * need to check only one of them here.
		 */
		if (ext4_ext_is_uninitialized(ex))
			uninitialized = 1;
		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
					+ ext4_ext_get_actual_len(newext));
		if (uninitialized)
			ext4_ext_mark_uninitialized(ex);
A
Alex Tomas 已提交
1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768
		eh = path[depth].p_hdr;
		nearex = ex;
		goto merge;
	}

repeat:
	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
		goto has_space;

	/* probably next leaf has space for us? */
	fex = EXT_LAST_EXTENT(eh);
	next = ext4_ext_next_leaf_block(inode, path);
	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
	    && next != EXT_MAX_BLOCK) {
		ext_debug("next leaf block - %d\n", next);
		BUG_ON(npath != NULL);
		npath = ext4_ext_find_extent(inode, next, NULL);
		if (IS_ERR(npath))
			return PTR_ERR(npath);
		BUG_ON(npath->p_depth != path->p_depth);
		eh = npath[depth].p_hdr;
		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
			ext_debug("next leaf isnt full(%d)\n",
				  le16_to_cpu(eh->eh_entries));
			path = npath;
			goto repeat;
		}
		ext_debug("next leaf has no free space(%d,%d)\n",
			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
	}

	/*
1769 1770
	 * There is no free space in the found leaf.
	 * We're gonna add a new leaf in the tree.
A
Alex Tomas 已提交
1771 1772 1773 1774 1775 1776 1777 1778 1779 1780
	 */
	err = ext4_ext_create_new_leaf(handle, inode, path, newext);
	if (err)
		goto cleanup;
	depth = ext_depth(inode);
	eh = path[depth].p_hdr;

has_space:
	nearex = path[depth].p_ext;

1781 1782
	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
A
Alex Tomas 已提交
1783 1784 1785 1786
		goto cleanup;

	if (!nearex) {
		/* there is no extent in this leaf, create first one */
1787
		ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n",
D
Dave Kleikamp 已提交
1788 1789
				le32_to_cpu(newext->ee_block),
				ext_pblock(newext),
1790
				ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1791
				ext4_ext_get_actual_len(newext));
A
Alex Tomas 已提交
1792 1793
		path[depth].p_ext = EXT_FIRST_EXTENT(eh);
	} else if (le32_to_cpu(newext->ee_block)
D
Dave Kleikamp 已提交
1794
			   > le32_to_cpu(nearex->ee_block)) {
A
Alex Tomas 已提交
1795 1796 1797 1798 1799
/*		BUG_ON(newext->ee_block == nearex->ee_block); */
		if (nearex != EXT_LAST_EXTENT(eh)) {
			len = EXT_MAX_EXTENT(eh) - nearex;
			len = (len - 1) * sizeof(struct ext4_extent);
			len = len < 0 ? 0 : len;
1800
			ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, "
A
Alex Tomas 已提交
1801
					"move %d from 0x%p to 0x%p\n",
D
Dave Kleikamp 已提交
1802 1803
					le32_to_cpu(newext->ee_block),
					ext_pblock(newext),
1804
					ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1805
					ext4_ext_get_actual_len(newext),
A
Alex Tomas 已提交
1806 1807 1808 1809 1810 1811 1812 1813
					nearex, len, nearex + 1, nearex + 2);
			memmove(nearex + 2, nearex + 1, len);
		}
		path[depth].p_ext = nearex + 1;
	} else {
		BUG_ON(newext->ee_block == nearex->ee_block);
		len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
		len = len < 0 ? 0 : len;
1814
		ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, "
A
Alex Tomas 已提交
1815 1816
				"move %d from 0x%p to 0x%p\n",
				le32_to_cpu(newext->ee_block),
1817
				ext_pblock(newext),
1818
				ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1819
				ext4_ext_get_actual_len(newext),
A
Alex Tomas 已提交
1820 1821 1822 1823 1824
				nearex, len, nearex + 1, nearex + 2);
		memmove(nearex + 1, nearex, len);
		path[depth].p_ext = nearex;
	}

M
Marcin Slusarz 已提交
1825
	le16_add_cpu(&eh->eh_entries, 1);
A
Alex Tomas 已提交
1826 1827
	nearex = path[depth].p_ext;
	nearex->ee_block = newext->ee_block;
1828
	ext4_ext_store_pblock(nearex, ext_pblock(newext));
A
Alex Tomas 已提交
1829 1830 1831 1832
	nearex->ee_len = newext->ee_len;

merge:
	/* try to merge extents to the right */
1833
	if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1834
		ext4_ext_try_to_merge(inode, path, nearex);
A
Alex Tomas 已提交
1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853

	/* try to merge extents to the left */

	/* time to correct all indexes above */
	err = ext4_ext_correct_indexes(handle, inode, path);
	if (err)
		goto cleanup;

	err = ext4_ext_dirty(handle, inode, path + depth);

cleanup:
	if (npath) {
		ext4_ext_drop_refs(npath);
		kfree(npath);
	}
	ext4_ext_invalidate_cache(inode);
	return err;
}

1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870
int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
			ext4_lblk_t num, ext_prepare_callback func,
			void *cbdata)
{
	struct ext4_ext_path *path = NULL;
	struct ext4_ext_cache cbex;
	struct ext4_extent *ex;
	ext4_lblk_t next, start = 0, end = 0;
	ext4_lblk_t last = block + num;
	int depth, exists, err = 0;

	BUG_ON(func == NULL);
	BUG_ON(inode == NULL);

	while (block < last && block != EXT_MAX_BLOCK) {
		num = last - block;
		/* find extent for this block */
1871
		down_read(&EXT4_I(inode)->i_data_sem);
1872
		path = ext4_ext_find_extent(inode, block, path);
1873
		up_read(&EXT4_I(inode)->i_data_sem);
1874 1875 1876 1877 1878 1879 1880
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			path = NULL;
			break;
		}

		depth = ext_depth(inode);
1881 1882 1883 1884 1885
		if (unlikely(path[depth].p_hdr == NULL)) {
			EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
			err = -EIO;
			break;
		}
1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935
		ex = path[depth].p_ext;
		next = ext4_ext_next_allocated_block(path);

		exists = 0;
		if (!ex) {
			/* there is no extent yet, so try to allocate
			 * all requested space */
			start = block;
			end = block + num;
		} else if (le32_to_cpu(ex->ee_block) > block) {
			/* need to allocate space before found extent */
			start = block;
			end = le32_to_cpu(ex->ee_block);
			if (block + num < end)
				end = block + num;
		} else if (block >= le32_to_cpu(ex->ee_block)
					+ ext4_ext_get_actual_len(ex)) {
			/* need to allocate space after found extent */
			start = block;
			end = block + num;
			if (end >= next)
				end = next;
		} else if (block >= le32_to_cpu(ex->ee_block)) {
			/*
			 * some part of requested space is covered
			 * by found extent
			 */
			start = block;
			end = le32_to_cpu(ex->ee_block)
				+ ext4_ext_get_actual_len(ex);
			if (block + num < end)
				end = block + num;
			exists = 1;
		} else {
			BUG();
		}
		BUG_ON(end <= start);

		if (!exists) {
			cbex.ec_block = start;
			cbex.ec_len = end - start;
			cbex.ec_start = 0;
			cbex.ec_type = EXT4_EXT_CACHE_GAP;
		} else {
			cbex.ec_block = le32_to_cpu(ex->ee_block);
			cbex.ec_len = ext4_ext_get_actual_len(ex);
			cbex.ec_start = ext_pblock(ex);
			cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
		}

1936 1937 1938 1939 1940
		if (unlikely(cbex.ec_len == 0)) {
			EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
			err = -EIO;
			break;
		}
1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970
		err = func(inode, path, &cbex, ex, cbdata);
		ext4_ext_drop_refs(path);

		if (err < 0)
			break;

		if (err == EXT_REPEAT)
			continue;
		else if (err == EXT_BREAK) {
			err = 0;
			break;
		}

		if (ext_depth(inode) != depth) {
			/* depth was changed. we have to realloc path */
			kfree(path);
			path = NULL;
		}

		block = cbex.ec_block + cbex.ec_len;
	}

	if (path) {
		ext4_ext_drop_refs(path);
		kfree(path);
	}

	return err;
}

1971
static void
A
Aneesh Kumar K.V 已提交
1972
ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1973
			__u32 len, ext4_fsblk_t start, int type)
A
Alex Tomas 已提交
1974 1975 1976
{
	struct ext4_ext_cache *cex;
	BUG_ON(len == 0);
1977
	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
A
Alex Tomas 已提交
1978 1979 1980 1981 1982
	cex = &EXT4_I(inode)->i_cached_extent;
	cex->ec_type = type;
	cex->ec_block = block;
	cex->ec_len = len;
	cex->ec_start = start;
1983
	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
A
Alex Tomas 已提交
1984 1985 1986
}

/*
1987 1988
 * ext4_ext_put_gap_in_cache:
 * calculate boundaries of the gap that the requested block fits into
A
Alex Tomas 已提交
1989 1990
 * and cache this gap
 */
1991
static void
A
Alex Tomas 已提交
1992
ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
A
Aneesh Kumar K.V 已提交
1993
				ext4_lblk_t block)
A
Alex Tomas 已提交
1994 1995
{
	int depth = ext_depth(inode);
A
Aneesh Kumar K.V 已提交
1996 1997
	unsigned long len;
	ext4_lblk_t lblock;
A
Alex Tomas 已提交
1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008
	struct ext4_extent *ex;

	ex = path[depth].p_ext;
	if (ex == NULL) {
		/* there is no extent yet, so gap is [0;-] */
		lblock = 0;
		len = EXT_MAX_BLOCK;
		ext_debug("cache gap(whole file):");
	} else if (block < le32_to_cpu(ex->ee_block)) {
		lblock = block;
		len = le32_to_cpu(ex->ee_block) - block;
2009 2010 2011 2012
		ext_debug("cache gap(before): %u [%u:%u]",
				block,
				le32_to_cpu(ex->ee_block),
				 ext4_ext_get_actual_len(ex));
A
Alex Tomas 已提交
2013
	} else if (block >= le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
2014
			+ ext4_ext_get_actual_len(ex)) {
A
Aneesh Kumar K.V 已提交
2015
		ext4_lblk_t next;
D
Dave Kleikamp 已提交
2016
		lblock = le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
2017
			+ ext4_ext_get_actual_len(ex);
A
Aneesh Kumar K.V 已提交
2018 2019

		next = ext4_ext_next_allocated_block(path);
2020 2021 2022 2023
		ext_debug("cache gap(after): [%u:%u] %u",
				le32_to_cpu(ex->ee_block),
				ext4_ext_get_actual_len(ex),
				block);
A
Aneesh Kumar K.V 已提交
2024 2025
		BUG_ON(next == lblock);
		len = next - lblock;
A
Alex Tomas 已提交
2026 2027 2028 2029 2030
	} else {
		lblock = len = 0;
		BUG();
	}

2031
	ext_debug(" -> %u:%lu\n", lblock, len);
A
Alex Tomas 已提交
2032 2033 2034
	ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
}

2035
static int
A
Aneesh Kumar K.V 已提交
2036
ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
A
Alex Tomas 已提交
2037 2038 2039
			struct ext4_extent *ex)
{
	struct ext4_ext_cache *cex;
2040
	int ret = EXT4_EXT_CACHE_NO;
A
Alex Tomas 已提交
2041

2042 2043 2044 2045
	/* 
	 * We borrow i_block_reservation_lock to protect i_cached_extent
	 */
	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
A
Alex Tomas 已提交
2046 2047 2048 2049
	cex = &EXT4_I(inode)->i_cached_extent;

	/* has cache valid data? */
	if (cex->ec_type == EXT4_EXT_CACHE_NO)
2050
		goto errout;
A
Alex Tomas 已提交
2051 2052 2053

	BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
			cex->ec_type != EXT4_EXT_CACHE_EXTENT);
2054
	if (in_range(block, cex->ec_block, cex->ec_len)) {
D
Dave Kleikamp 已提交
2055
		ex->ee_block = cpu_to_le32(cex->ec_block);
2056
		ext4_ext_store_pblock(ex, cex->ec_start);
D
Dave Kleikamp 已提交
2057
		ex->ee_len = cpu_to_le16(cex->ec_len);
2058 2059 2060
		ext_debug("%u cached by %u:%u:%llu\n",
				block,
				cex->ec_block, cex->ec_len, cex->ec_start);
2061
		ret = cex->ec_type;
A
Alex Tomas 已提交
2062
	}
2063 2064 2065
errout:
	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
	return ret;
A
Alex Tomas 已提交
2066 2067 2068
}

/*
2069 2070 2071 2072
 * ext4_ext_rm_idx:
 * removes index from the index block.
 * It's used in truncate case only, thus all requests are for
 * last index in the block only.
A
Alex Tomas 已提交
2073
 */
A
Aneesh Kumar K.V 已提交
2074
static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
2075 2076 2077
			struct ext4_ext_path *path)
{
	int err;
2078
	ext4_fsblk_t leaf;
A
Alex Tomas 已提交
2079 2080 2081

	/* free index block */
	path--;
2082
	leaf = idx_pblock(path->p_idx);
2083 2084 2085 2086
	if (unlikely(path->p_hdr->eh_entries == 0)) {
		EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
		return -EIO;
	}
2087 2088
	err = ext4_ext_get_access(handle, inode, path);
	if (err)
A
Alex Tomas 已提交
2089
		return err;
M
Marcin Slusarz 已提交
2090
	le16_add_cpu(&path->p_hdr->eh_entries, -1);
2091 2092
	err = ext4_ext_dirty(handle, inode, path);
	if (err)
A
Alex Tomas 已提交
2093
		return err;
2094
	ext_debug("index is empty, remove it, free block %llu\n", leaf);
2095 2096
	ext4_free_blocks(handle, inode, 0, leaf, 1,
			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
A
Alex Tomas 已提交
2097 2098 2099 2100
	return err;
}

/*
2101 2102 2103 2104 2105
 * ext4_ext_calc_credits_for_single_extent:
 * This routine returns max. credits that needed to insert an extent
 * to the extent tree.
 * When pass the actual path, the caller should calculate credits
 * under i_data_sem.
A
Alex Tomas 已提交
2106
 */
2107
int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
A
Alex Tomas 已提交
2108 2109 2110
						struct ext4_ext_path *path)
{
	if (path) {
2111
		int depth = ext_depth(inode);
2112
		int ret = 0;
2113

A
Alex Tomas 已提交
2114 2115
		/* probably there is space in leaf? */
		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2116
				< le16_to_cpu(path[depth].p_hdr->eh_max)) {
A
Alex Tomas 已提交
2117

2118 2119 2120 2121 2122 2123 2124 2125
			/*
			 *  There are some space in the leaf tree, no
			 *  need to account for leaf block credit
			 *
			 *  bitmaps and block group descriptor blocks
			 *  and other metadat blocks still need to be
			 *  accounted.
			 */
2126
			/* 1 bitmap, 1 block group descriptor */
2127
			ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2128
			return ret;
2129 2130
		}
	}
A
Alex Tomas 已提交
2131

2132
	return ext4_chunk_trans_blocks(inode, nrblocks);
2133
}
A
Alex Tomas 已提交
2134

2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145
/*
 * How many index/leaf blocks need to change/allocate to modify nrblocks?
 *
 * if nrblocks are fit in a single extent (chunk flag is 1), then
 * in the worse case, each tree level index/leaf need to be changed
 * if the tree split due to insert a new extent, then the old tree
 * index/leaf need to be updated too
 *
 * If the nrblocks are discontiguous, they could cause
 * the whole tree split more than once, but this is really rare.
 */
2146
int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2147 2148 2149
{
	int index;
	int depth = ext_depth(inode);
A
Alex Tomas 已提交
2150

2151 2152 2153 2154
	if (chunk)
		index = depth * 2;
	else
		index = depth * 3;
A
Alex Tomas 已提交
2155

2156
	return index;
A
Alex Tomas 已提交
2157 2158 2159 2160
}

static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
				struct ext4_extent *ex,
A
Aneesh Kumar K.V 已提交
2161
				ext4_lblk_t from, ext4_lblk_t to)
A
Alex Tomas 已提交
2162
{
A
Amit Arora 已提交
2163
	unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2164
	int flags = EXT4_FREE_BLOCKS_FORGET;
A
Alex Tomas 已提交
2165

2166
	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2167
		flags |= EXT4_FREE_BLOCKS_METADATA;
A
Alex Tomas 已提交
2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183
#ifdef EXTENTS_STATS
	{
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
		spin_lock(&sbi->s_ext_stats_lock);
		sbi->s_ext_blocks += ee_len;
		sbi->s_ext_extents++;
		if (ee_len < sbi->s_ext_min)
			sbi->s_ext_min = ee_len;
		if (ee_len > sbi->s_ext_max)
			sbi->s_ext_max = ee_len;
		if (ext_depth(inode) > sbi->s_depth_max)
			sbi->s_depth_max = ext_depth(inode);
		spin_unlock(&sbi->s_ext_stats_lock);
	}
#endif
	if (from >= le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
2184
	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
A
Alex Tomas 已提交
2185
		/* tail removal */
A
Aneesh Kumar K.V 已提交
2186
		ext4_lblk_t num;
2187
		ext4_fsblk_t start;
A
Aneesh Kumar K.V 已提交
2188

A
Amit Arora 已提交
2189 2190
		num = le32_to_cpu(ex->ee_block) + ee_len - from;
		start = ext_pblock(ex) + ee_len - num;
A
Aneesh Kumar K.V 已提交
2191
		ext_debug("free last %u blocks starting %llu\n", num, start);
2192
		ext4_free_blocks(handle, inode, 0, start, num, flags);
A
Alex Tomas 已提交
2193
	} else if (from == le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
2194
		   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
A
Aneesh Kumar K.V 已提交
2195
		printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
A
Amit Arora 已提交
2196
			from, to, le32_to_cpu(ex->ee_block), ee_len);
A
Alex Tomas 已提交
2197
	} else {
A
Aneesh Kumar K.V 已提交
2198 2199 2200
		printk(KERN_INFO "strange request: removal(2) "
				"%u-%u from %u:%u\n",
				from, to, le32_to_cpu(ex->ee_block), ee_len);
A
Alex Tomas 已提交
2201 2202 2203 2204 2205 2206
	}
	return 0;
}

static int
ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
A
Aneesh Kumar K.V 已提交
2207
		struct ext4_ext_path *path, ext4_lblk_t start)
A
Alex Tomas 已提交
2208 2209 2210 2211
{
	int err = 0, correct_index = 0;
	int depth = ext_depth(inode), credits;
	struct ext4_extent_header *eh;
A
Aneesh Kumar K.V 已提交
2212 2213 2214
	ext4_lblk_t a, b, block;
	unsigned num;
	ext4_lblk_t ex_ee_block;
A
Alex Tomas 已提交
2215
	unsigned short ex_ee_len;
A
Amit Arora 已提交
2216
	unsigned uninitialized = 0;
A
Alex Tomas 已提交
2217 2218
	struct ext4_extent *ex;

2219
	/* the header must be checked already in ext4_ext_remove_space() */
A
Aneesh Kumar K.V 已提交
2220
	ext_debug("truncate since %u in leaf\n", start);
A
Alex Tomas 已提交
2221 2222 2223
	if (!path[depth].p_hdr)
		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
	eh = path[depth].p_hdr;
2224 2225 2226 2227
	if (unlikely(path[depth].p_hdr == NULL)) {
		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
		return -EIO;
	}
A
Alex Tomas 已提交
2228 2229 2230 2231
	/* find where to start removing */
	ex = EXT_LAST_EXTENT(eh);

	ex_ee_block = le32_to_cpu(ex->ee_block);
A
Amit Arora 已提交
2232
	ex_ee_len = ext4_ext_get_actual_len(ex);
A
Alex Tomas 已提交
2233 2234 2235

	while (ex >= EXT_FIRST_EXTENT(eh) &&
			ex_ee_block + ex_ee_len > start) {
2236 2237 2238 2239 2240 2241

		if (ext4_ext_is_uninitialized(ex))
			uninitialized = 1;
		else
			uninitialized = 0;

2242 2243
		ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
			 uninitialized, ex_ee_len);
A
Alex Tomas 已提交
2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273
		path[depth].p_ext = ex;

		a = ex_ee_block > start ? ex_ee_block : start;
		b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
			ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;

		ext_debug("  border %u:%u\n", a, b);

		if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
			block = 0;
			num = 0;
			BUG();
		} else if (a != ex_ee_block) {
			/* remove tail of the extent */
			block = ex_ee_block;
			num = a - block;
		} else if (b != ex_ee_block + ex_ee_len - 1) {
			/* remove head of the extent */
			block = a;
			num = b - a;
			/* there is no "make a hole" API yet */
			BUG();
		} else {
			/* remove whole extent: excellent! */
			block = ex_ee_block;
			num = 0;
			BUG_ON(a != ex_ee_block);
			BUG_ON(b != ex_ee_block + ex_ee_len - 1);
		}

2274 2275 2276 2277 2278 2279 2280
		/*
		 * 3 for leaf, sb, and inode plus 2 (bmap and group
		 * descriptor) for each block group; assume two block
		 * groups plus ex_ee_len/blocks_per_block_group for
		 * the worst case
		 */
		credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
A
Alex Tomas 已提交
2281 2282 2283 2284
		if (ex == EXT_FIRST_EXTENT(eh)) {
			correct_index = 1;
			credits += (ext_depth(inode)) + 1;
		}
D
Dmitry Monakhov 已提交
2285
		credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
A
Alex Tomas 已提交
2286

2287
		err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2288
		if (err)
A
Alex Tomas 已提交
2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299
			goto out;

		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
			goto out;

		err = ext4_remove_blocks(handle, inode, ex, a, b);
		if (err)
			goto out;

		if (num == 0) {
2300
			/* this extent is removed; mark slot entirely unused */
2301
			ext4_ext_store_pblock(ex, 0);
M
Marcin Slusarz 已提交
2302
			le16_add_cpu(&eh->eh_entries, -1);
A
Alex Tomas 已提交
2303 2304 2305 2306
		}

		ex->ee_block = cpu_to_le32(block);
		ex->ee_len = cpu_to_le16(num);
2307 2308 2309 2310 2311
		/*
		 * Do not mark uninitialized if all the blocks in the
		 * extent have been removed.
		 */
		if (uninitialized && num)
A
Amit Arora 已提交
2312
			ext4_ext_mark_uninitialized(ex);
A
Alex Tomas 已提交
2313 2314 2315 2316 2317

		err = ext4_ext_dirty(handle, inode, path + depth);
		if (err)
			goto out;

2318
		ext_debug("new extent: %u:%u:%llu\n", block, num,
2319
				ext_pblock(ex));
A
Alex Tomas 已提交
2320 2321
		ex--;
		ex_ee_block = le32_to_cpu(ex->ee_block);
A
Amit Arora 已提交
2322
		ex_ee_len = ext4_ext_get_actual_len(ex);
A
Alex Tomas 已提交
2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337
	}

	if (correct_index && eh->eh_entries)
		err = ext4_ext_correct_indexes(handle, inode, path);

	/* if this leaf is free, then we should
	 * remove it from index block above */
	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
		err = ext4_ext_rm_idx(handle, inode, path + depth);

out:
	return err;
}

/*
2338 2339
 * ext4_ext_more_to_rm:
 * returns 1 if current index has to be freed (even partial)
A
Alex Tomas 已提交
2340
 */
2341
static int
A
Alex Tomas 已提交
2342 2343 2344 2345 2346 2347 2348 2349
ext4_ext_more_to_rm(struct ext4_ext_path *path)
{
	BUG_ON(path->p_idx == NULL);

	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
		return 0;

	/*
2350
	 * if truncate on deeper level happened, it wasn't partial,
A
Alex Tomas 已提交
2351 2352 2353 2354 2355 2356 2357
	 * so we have to consider current index for truncation
	 */
	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
		return 0;
	return 1;
}

A
Aneesh Kumar K.V 已提交
2358
static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
A
Alex Tomas 已提交
2359 2360 2361 2362 2363 2364 2365
{
	struct super_block *sb = inode->i_sb;
	int depth = ext_depth(inode);
	struct ext4_ext_path *path;
	handle_t *handle;
	int i = 0, err = 0;

A
Aneesh Kumar K.V 已提交
2366
	ext_debug("truncate since %u\n", start);
A
Alex Tomas 已提交
2367 2368 2369 2370 2371 2372 2373 2374 2375

	/* probably first extent we're gonna free will be last in block */
	handle = ext4_journal_start(inode, depth + 1);
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	ext4_ext_invalidate_cache(inode);

	/*
2376 2377
	 * We start scanning from right side, freeing all the blocks
	 * after i_size and walking into the tree depth-wise.
A
Alex Tomas 已提交
2378
	 */
2379
	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
A
Alex Tomas 已提交
2380 2381 2382 2383 2384
	if (path == NULL) {
		ext4_journal_stop(handle);
		return -ENOMEM;
	}
	path[0].p_hdr = ext_inode_hdr(inode);
2385
	if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
A
Alex Tomas 已提交
2386 2387 2388 2389 2390 2391 2392 2393 2394
		err = -EIO;
		goto out;
	}
	path[0].p_depth = depth;

	while (i >= 0 && err == 0) {
		if (i == depth) {
			/* this is leaf block */
			err = ext4_ext_rm_leaf(handle, inode, path, start);
2395
			/* root level has p_bh == NULL, brelse() eats this */
A
Alex Tomas 已提交
2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
			brelse(path[i].p_bh);
			path[i].p_bh = NULL;
			i--;
			continue;
		}

		/* this is index block */
		if (!path[i].p_hdr) {
			ext_debug("initialize header\n");
			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
		}

		if (!path[i].p_idx) {
2409
			/* this level hasn't been touched yet */
A
Alex Tomas 已提交
2410 2411 2412 2413 2414 2415
			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
			ext_debug("init index ptr: hdr 0x%p, num %d\n",
				  path[i].p_hdr,
				  le16_to_cpu(path[i].p_hdr->eh_entries));
		} else {
2416
			/* we were already here, see at next index */
A
Alex Tomas 已提交
2417 2418 2419 2420 2421 2422 2423
			path[i].p_idx--;
		}

		ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
				i, EXT_FIRST_INDEX(path[i].p_hdr),
				path[i].p_idx);
		if (ext4_ext_more_to_rm(path + i)) {
2424
			struct buffer_head *bh;
A
Alex Tomas 已提交
2425
			/* go to the next level */
2426
			ext_debug("move to level %d (block %llu)\n",
2427
				  i + 1, idx_pblock(path[i].p_idx));
A
Alex Tomas 已提交
2428
			memset(path + i + 1, 0, sizeof(*path));
2429 2430
			bh = sb_bread(sb, idx_pblock(path[i].p_idx));
			if (!bh) {
A
Alex Tomas 已提交
2431 2432 2433 2434
				/* should we reset i_size? */
				err = -EIO;
				break;
			}
2435 2436 2437 2438
			if (WARN_ON(i + 1 > depth)) {
				err = -EIO;
				break;
			}
2439
			if (ext4_ext_check(inode, ext_block_hdr(bh),
2440 2441 2442 2443 2444
							depth - i - 1)) {
				err = -EIO;
				break;
			}
			path[i + 1].p_bh = bh;
A
Alex Tomas 已提交
2445

2446 2447
			/* save actual number of indexes since this
			 * number is changed at the next iteration */
A
Alex Tomas 已提交
2448 2449 2450
			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
			i++;
		} else {
2451
			/* we finished processing this index, go up */
A
Alex Tomas 已提交
2452
			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2453
				/* index is empty, remove it;
A
Alex Tomas 已提交
2454 2455 2456 2457
				 * handle must be already prepared by the
				 * truncatei_leaf() */
				err = ext4_ext_rm_idx(handle, inode, path + i);
			}
2458
			/* root level has p_bh == NULL, brelse() eats this */
A
Alex Tomas 已提交
2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
			brelse(path[i].p_bh);
			path[i].p_bh = NULL;
			i--;
			ext_debug("return to level %d\n", i);
		}
	}

	/* TODO: flexible tree reduction should be here */
	if (path->p_hdr->eh_entries == 0) {
		/*
2469 2470
		 * truncate to zero freed all the tree,
		 * so we need to correct eh_depth
A
Alex Tomas 已提交
2471 2472 2473 2474 2475
		 */
		err = ext4_ext_get_access(handle, inode, path);
		if (err == 0) {
			ext_inode_hdr(inode)->eh_depth = 0;
			ext_inode_hdr(inode)->eh_max =
2476
				cpu_to_le16(ext4_ext_space_root(inode, 0));
A
Alex Tomas 已提交
2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496
			err = ext4_ext_dirty(handle, inode, path);
		}
	}
out:
	ext4_ext_drop_refs(path);
	kfree(path);
	ext4_journal_stop(handle);

	return err;
}

/*
 * called at mount time
 */
void ext4_ext_init(struct super_block *sb)
{
	/*
	 * possible initialization would be here
	 */

2497
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2498
#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2499
		printk(KERN_INFO "EXT4-fs: file extents enabled");
2500 2501
#ifdef AGGRESSIVE_TEST
		printk(", aggressive tests");
A
Alex Tomas 已提交
2502 2503 2504 2505 2506 2507 2508 2509
#endif
#ifdef CHECK_BINSEARCH
		printk(", check binsearch");
#endif
#ifdef EXTENTS_STATS
		printk(", stats");
#endif
		printk("\n");
2510
#endif
A
Alex Tomas 已提交
2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523
#ifdef EXTENTS_STATS
		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
		EXT4_SB(sb)->s_ext_min = 1 << 30;
		EXT4_SB(sb)->s_ext_max = 0;
#endif
	}
}

/*
 * called at umount time
 */
void ext4_ext_release(struct super_block *sb)
{
2524
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
A
Alex Tomas 已提交
2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538
		return;

#ifdef EXTENTS_STATS
	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
		struct ext4_sb_info *sbi = EXT4_SB(sb);
		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
			sbi->s_ext_blocks, sbi->s_ext_extents,
			sbi->s_ext_blocks / sbi->s_ext_extents);
		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
	}
#endif
}

2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610
static void bi_complete(struct bio *bio, int error)
{
	complete((struct completion *)bio->bi_private);
}

/* FIXME!! we need to try to merge to left or right after zero-out  */
static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
{
	int ret = -EIO;
	struct bio *bio;
	int blkbits, blocksize;
	sector_t ee_pblock;
	struct completion event;
	unsigned int ee_len, len, done, offset;


	blkbits   = inode->i_blkbits;
	blocksize = inode->i_sb->s_blocksize;
	ee_len    = ext4_ext_get_actual_len(ex);
	ee_pblock = ext_pblock(ex);

	/* convert ee_pblock to 512 byte sectors */
	ee_pblock = ee_pblock << (blkbits - 9);

	while (ee_len > 0) {

		if (ee_len > BIO_MAX_PAGES)
			len = BIO_MAX_PAGES;
		else
			len = ee_len;

		bio = bio_alloc(GFP_NOIO, len);
		bio->bi_sector = ee_pblock;
		bio->bi_bdev   = inode->i_sb->s_bdev;

		done = 0;
		offset = 0;
		while (done < len) {
			ret = bio_add_page(bio, ZERO_PAGE(0),
							blocksize, offset);
			if (ret != blocksize) {
				/*
				 * We can't add any more pages because of
				 * hardware limitations.  Start a new bio.
				 */
				break;
			}
			done++;
			offset += blocksize;
			if (offset >= PAGE_CACHE_SIZE)
				offset = 0;
		}

		init_completion(&event);
		bio->bi_private = &event;
		bio->bi_end_io = bi_complete;
		submit_bio(WRITE, bio);
		wait_for_completion(&event);

		if (test_bit(BIO_UPTODATE, &bio->bi_flags))
			ret = 0;
		else {
			ret = -EIO;
			break;
		}
		bio_put(bio);
		ee_len    -= done;
		ee_pblock += done  << (blkbits - 9);
	}
	return ret;
}

2611
#define EXT4_EXT_ZERO_LEN 7
2612 2613 2614 2615 2616 2617 2618 2619 2620 2621
/*
 * This function is called by ext4_ext_get_blocks() if someone tries to write
 * to an uninitialized extent. It may result in splitting the uninitialized
 * extent into multiple extents (upto three - one initialized and two
 * uninitialized).
 * There are three possibilities:
 *   a> There is no split required: Entire extent should be initialized
 *   b> Splits in two extents: Write is happening at either end of the extent
 *   c> Splits in three extents: Somone is writing in middle of the extent
 */
A
Aneesh Kumar K.V 已提交
2622 2623 2624 2625
static int ext4_ext_convert_to_initialized(handle_t *handle,
						struct inode *inode,
						struct ext4_ext_path *path,
						ext4_lblk_t iblock,
2626
						unsigned int max_blocks)
2627
{
2628
	struct ext4_extent *ex, newex, orig_ex;
2629 2630 2631 2632
	struct ext4_extent *ex1 = NULL;
	struct ext4_extent *ex2 = NULL;
	struct ext4_extent *ex3 = NULL;
	struct ext4_extent_header *eh;
A
Aneesh Kumar K.V 已提交
2633 2634
	ext4_lblk_t ee_block;
	unsigned int allocated, ee_len, depth;
2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646
	ext4_fsblk_t newblock;
	int err = 0;
	int ret = 0;

	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;
	ee_block = le32_to_cpu(ex->ee_block);
	ee_len = ext4_ext_get_actual_len(ex);
	allocated = ee_len - (iblock - ee_block);
	newblock = iblock - ee_block + ext_pblock(ex);
	ex2 = ex;
2647 2648 2649
	orig_ex.ee_block = ex->ee_block;
	orig_ex.ee_len   = cpu_to_le16(ee_len);
	ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2650

2651 2652 2653
	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
		goto out;
2654 2655 2656 2657 2658 2659 2660 2661 2662 2663
	/* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
	if (ee_len <= 2*EXT4_EXT_ZERO_LEN) {
		err =  ext4_ext_zeroout(inode, &orig_ex);
		if (err)
			goto fix_extent_len;
		/* update the extent length and mark as initialized */
		ex->ee_block = orig_ex.ee_block;
		ex->ee_len   = orig_ex.ee_len;
		ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
		ext4_ext_dirty(handle, inode, path + depth);
2664 2665
		/* zeroed the full extent */
		return allocated;
2666
	}
2667

2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684
	/* ex1: ee_block to iblock - 1 : uninitialized */
	if (iblock > ee_block) {
		ex1 = ex;
		ex1->ee_len = cpu_to_le16(iblock - ee_block);
		ext4_ext_mark_uninitialized(ex1);
		ex2 = &newex;
	}
	/*
	 * for sanity, update the length of the ex2 extent before
	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
	 * overlap of blocks.
	 */
	if (!ex1 && allocated > max_blocks)
		ex2->ee_len = cpu_to_le16(max_blocks);
	/* ex3: to ee_block + ee_len : uninitialised */
	if (allocated > max_blocks) {
		unsigned int newdepth;
2685 2686
		/* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
		if (allocated <= EXT4_EXT_ZERO_LEN) {
2687 2688 2689 2690
			/*
			 * iblock == ee_block is handled by the zerouout
			 * at the beginning.
			 * Mark first half uninitialized.
2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703
			 * Mark second half initialized and zero out the
			 * initialized extent
			 */
			ex->ee_block = orig_ex.ee_block;
			ex->ee_len   = cpu_to_le16(ee_len - allocated);
			ext4_ext_mark_uninitialized(ex);
			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
			ext4_ext_dirty(handle, inode, path + depth);

			ex3 = &newex;
			ex3->ee_block = cpu_to_le32(iblock);
			ext4_ext_store_pblock(ex3, newblock);
			ex3->ee_len = cpu_to_le16(allocated);
2704 2705
			err = ext4_ext_insert_extent(handle, inode, path,
							ex3, 0);
2706 2707 2708 2709 2710 2711 2712 2713
			if (err == -ENOSPC) {
				err =  ext4_ext_zeroout(inode, &orig_ex);
				if (err)
					goto fix_extent_len;
				ex->ee_block = orig_ex.ee_block;
				ex->ee_len   = orig_ex.ee_len;
				ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
				ext4_ext_dirty(handle, inode, path + depth);
2714
				/* blocks available from iblock */
2715
				return allocated;
2716 2717 2718 2719

			} else if (err)
				goto fix_extent_len;

2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741
			/*
			 * We need to zero out the second half because
			 * an fallocate request can update file size and
			 * converting the second half to initialized extent
			 * implies that we can leak some junk data to user
			 * space.
			 */
			err =  ext4_ext_zeroout(inode, ex3);
			if (err) {
				/*
				 * We should actually mark the
				 * second half as uninit and return error
				 * Insert would have changed the extent
				 */
				depth = ext_depth(inode);
				ext4_ext_drop_refs(path);
				path = ext4_ext_find_extent(inode,
								iblock, path);
				if (IS_ERR(path)) {
					err = PTR_ERR(path);
					return err;
				}
2742
				/* get the second half extent details */
2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753
				ex = path[depth].p_ext;
				err = ext4_ext_get_access(handle, inode,
								path + depth);
				if (err)
					return err;
				ext4_ext_mark_uninitialized(ex);
				ext4_ext_dirty(handle, inode, path + depth);
				return err;
			}

			/* zeroed the second half */
2754 2755
			return allocated;
		}
2756 2757 2758 2759 2760
		ex3 = &newex;
		ex3->ee_block = cpu_to_le32(iblock + max_blocks);
		ext4_ext_store_pblock(ex3, newblock + max_blocks);
		ex3->ee_len = cpu_to_le16(allocated - max_blocks);
		ext4_ext_mark_uninitialized(ex3);
2761
		err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
2762 2763 2764 2765 2766
		if (err == -ENOSPC) {
			err =  ext4_ext_zeroout(inode, &orig_ex);
			if (err)
				goto fix_extent_len;
			/* update the extent length and mark as initialized */
2767 2768 2769 2770
			ex->ee_block = orig_ex.ee_block;
			ex->ee_len   = orig_ex.ee_len;
			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
			ext4_ext_dirty(handle, inode, path + depth);
2771
			/* zeroed the full extent */
2772
			/* blocks available from iblock */
2773
			return allocated;
2774 2775 2776

		} else if (err)
			goto fix_extent_len;
2777 2778 2779 2780 2781
		/*
		 * The depth, and hence eh & ex might change
		 * as part of the insert above.
		 */
		newdepth = ext_depth(inode);
2782
		/*
C
Coly Li 已提交
2783
		 * update the extent length after successful insert of the
2784 2785 2786 2787
		 * split extent
		 */
		orig_ex.ee_len = cpu_to_le16(ee_len -
						ext4_ext_get_actual_len(ex3));
2788 2789 2790 2791 2792 2793
		depth = newdepth;
		ext4_ext_drop_refs(path);
		path = ext4_ext_find_extent(inode, iblock, path);
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			goto out;
2794
		}
2795 2796 2797 2798 2799 2800 2801 2802 2803
		eh = path[depth].p_hdr;
		ex = path[depth].p_ext;
		if (ex2 != &newex)
			ex2 = ex;

		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
			goto out;

2804
		allocated = max_blocks;
2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819

		/* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
		 * to insert a extent in the middle zerout directly
		 * otherwise give the extent a chance to merge to left
		 */
		if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
							iblock != ee_block) {
			err =  ext4_ext_zeroout(inode, &orig_ex);
			if (err)
				goto fix_extent_len;
			/* update the extent length and mark as initialized */
			ex->ee_block = orig_ex.ee_block;
			ex->ee_len   = orig_ex.ee_len;
			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
			ext4_ext_dirty(handle, inode, path + depth);
2820
			/* zero out the first half */
2821
			/* blocks available from iblock */
2822
			return allocated;
2823
		}
2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878
	}
	/*
	 * If there was a change of depth as part of the
	 * insertion of ex3 above, we need to update the length
	 * of the ex1 extent again here
	 */
	if (ex1 && ex1 != ex) {
		ex1 = ex;
		ex1->ee_len = cpu_to_le16(iblock - ee_block);
		ext4_ext_mark_uninitialized(ex1);
		ex2 = &newex;
	}
	/* ex2: iblock to iblock + maxblocks-1 : initialised */
	ex2->ee_block = cpu_to_le32(iblock);
	ext4_ext_store_pblock(ex2, newblock);
	ex2->ee_len = cpu_to_le16(allocated);
	if (ex2 != ex)
		goto insert;
	/*
	 * New (initialized) extent starts from the first block
	 * in the current extent. i.e., ex2 == ex
	 * We have to see if it can be merged with the extent
	 * on the left.
	 */
	if (ex2 > EXT_FIRST_EXTENT(eh)) {
		/*
		 * To merge left, pass "ex2 - 1" to try_to_merge(),
		 * since it merges towards right _only_.
		 */
		ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
		if (ret) {
			err = ext4_ext_correct_indexes(handle, inode, path);
			if (err)
				goto out;
			depth = ext_depth(inode);
			ex2--;
		}
	}
	/*
	 * Try to Merge towards right. This might be required
	 * only when the whole extent is being written to.
	 * i.e. ex2 == ex and ex3 == NULL.
	 */
	if (!ex3) {
		ret = ext4_ext_try_to_merge(inode, path, ex2);
		if (ret) {
			err = ext4_ext_correct_indexes(handle, inode, path);
			if (err)
				goto out;
		}
	}
	/* Mark modified extent as dirty */
	err = ext4_ext_dirty(handle, inode, path + depth);
	goto out;
insert:
2879
	err = ext4_ext_insert_extent(handle, inode, path, &newex, 0);
2880 2881 2882 2883 2884
	if (err == -ENOSPC) {
		err =  ext4_ext_zeroout(inode, &orig_ex);
		if (err)
			goto fix_extent_len;
		/* update the extent length and mark as initialized */
2885 2886 2887 2888
		ex->ee_block = orig_ex.ee_block;
		ex->ee_len   = orig_ex.ee_len;
		ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
		ext4_ext_dirty(handle, inode, path + depth);
2889 2890
		/* zero out the first half */
		return allocated;
2891 2892
	} else if (err)
		goto fix_extent_len;
2893
out:
2894
	ext4_ext_show_leaf(inode, path);
2895
	return err ? err : allocated;
2896 2897 2898 2899 2900 2901 2902 2903

fix_extent_len:
	ex->ee_block = orig_ex.ee_block;
	ex->ee_len   = orig_ex.ee_len;
	ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
	ext4_ext_mark_uninitialized(ex);
	ext4_ext_dirty(handle, inode, path + depth);
	return err;
2904 2905
}

2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924
/*
 * This function is called by ext4_ext_get_blocks() from
 * ext4_get_blocks_dio_write() when DIO to write
 * to an uninitialized extent.
 *
 * Writing to an uninitized extent may result in splitting the uninitialized
 * extent into multiple /intialized unintialized extents (up to three)
 * There are three possibilities:
 *   a> There is no split required: Entire extent should be uninitialized
 *   b> Splits in two extents: Write is happening at either end of the extent
 *   c> Splits in three extents: Somone is writing in middle of the extent
 *
 * One of more index blocks maybe needed if the extent tree grow after
 * the unintialized extent split. To prevent ENOSPC occur at the IO
 * complete, we need to split the uninitialized extent before DIO submit
 * the IO. The uninitilized extent called at this time will be split
 * into three uninitialized extent(at most). After IO complete, the part
 * being filled will be convert to initialized by the end_io callback function
 * via ext4_convert_unwritten_extents().
2925 2926
 *
 * Returns the size of uninitialized extent to be written on success.
2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960
 */
static int ext4_split_unwritten_extents(handle_t *handle,
					struct inode *inode,
					struct ext4_ext_path *path,
					ext4_lblk_t iblock,
					unsigned int max_blocks,
					int flags)
{
	struct ext4_extent *ex, newex, orig_ex;
	struct ext4_extent *ex1 = NULL;
	struct ext4_extent *ex2 = NULL;
	struct ext4_extent *ex3 = NULL;
	struct ext4_extent_header *eh;
	ext4_lblk_t ee_block;
	unsigned int allocated, ee_len, depth;
	ext4_fsblk_t newblock;
	int err = 0;

	ext_debug("ext4_split_unwritten_extents: inode %lu,"
		  "iblock %llu, max_blocks %u\n", inode->i_ino,
		  (unsigned long long)iblock, max_blocks);
	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;
	ee_block = le32_to_cpu(ex->ee_block);
	ee_len = ext4_ext_get_actual_len(ex);
	allocated = ee_len - (iblock - ee_block);
	newblock = iblock - ee_block + ext_pblock(ex);
	ex2 = ex;
	orig_ex.ee_block = ex->ee_block;
	orig_ex.ee_len   = cpu_to_le16(ee_len);
	ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));

	/*
2961 2962 2963
 	 * If the uninitialized extent begins at the same logical
 	 * block where the write begins, and the write completely
 	 * covers the extent, then we don't need to split it.
2964
 	 */
2965 2966
	if ((iblock == ee_block) && (allocated <= max_blocks))
		return allocated;
2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089

	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
		goto out;
	/* ex1: ee_block to iblock - 1 : uninitialized */
	if (iblock > ee_block) {
		ex1 = ex;
		ex1->ee_len = cpu_to_le16(iblock - ee_block);
		ext4_ext_mark_uninitialized(ex1);
		ex2 = &newex;
	}
	/*
	 * for sanity, update the length of the ex2 extent before
	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
	 * overlap of blocks.
	 */
	if (!ex1 && allocated > max_blocks)
		ex2->ee_len = cpu_to_le16(max_blocks);
	/* ex3: to ee_block + ee_len : uninitialised */
	if (allocated > max_blocks) {
		unsigned int newdepth;
		ex3 = &newex;
		ex3->ee_block = cpu_to_le32(iblock + max_blocks);
		ext4_ext_store_pblock(ex3, newblock + max_blocks);
		ex3->ee_len = cpu_to_le16(allocated - max_blocks);
		ext4_ext_mark_uninitialized(ex3);
		err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
		if (err == -ENOSPC) {
			err =  ext4_ext_zeroout(inode, &orig_ex);
			if (err)
				goto fix_extent_len;
			/* update the extent length and mark as initialized */
			ex->ee_block = orig_ex.ee_block;
			ex->ee_len   = orig_ex.ee_len;
			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
			ext4_ext_dirty(handle, inode, path + depth);
			/* zeroed the full extent */
			/* blocks available from iblock */
			return allocated;

		} else if (err)
			goto fix_extent_len;
		/*
		 * The depth, and hence eh & ex might change
		 * as part of the insert above.
		 */
		newdepth = ext_depth(inode);
		/*
		 * update the extent length after successful insert of the
		 * split extent
		 */
		orig_ex.ee_len = cpu_to_le16(ee_len -
						ext4_ext_get_actual_len(ex3));
		depth = newdepth;
		ext4_ext_drop_refs(path);
		path = ext4_ext_find_extent(inode, iblock, path);
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			goto out;
		}
		eh = path[depth].p_hdr;
		ex = path[depth].p_ext;
		if (ex2 != &newex)
			ex2 = ex;

		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
			goto out;

		allocated = max_blocks;
	}
	/*
	 * If there was a change of depth as part of the
	 * insertion of ex3 above, we need to update the length
	 * of the ex1 extent again here
	 */
	if (ex1 && ex1 != ex) {
		ex1 = ex;
		ex1->ee_len = cpu_to_le16(iblock - ee_block);
		ext4_ext_mark_uninitialized(ex1);
		ex2 = &newex;
	}
	/*
	 * ex2: iblock to iblock + maxblocks-1 : to be direct IO written,
	 * uninitialised still.
	 */
	ex2->ee_block = cpu_to_le32(iblock);
	ext4_ext_store_pblock(ex2, newblock);
	ex2->ee_len = cpu_to_le16(allocated);
	ext4_ext_mark_uninitialized(ex2);
	if (ex2 != ex)
		goto insert;
	/* Mark modified extent as dirty */
	err = ext4_ext_dirty(handle, inode, path + depth);
	ext_debug("out here\n");
	goto out;
insert:
	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
	if (err == -ENOSPC) {
		err =  ext4_ext_zeroout(inode, &orig_ex);
		if (err)
			goto fix_extent_len;
		/* update the extent length and mark as initialized */
		ex->ee_block = orig_ex.ee_block;
		ex->ee_len   = orig_ex.ee_len;
		ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
		ext4_ext_dirty(handle, inode, path + depth);
		/* zero out the first half */
		return allocated;
	} else if (err)
		goto fix_extent_len;
out:
	ext4_ext_show_leaf(inode, path);
	return err ? err : allocated;

fix_extent_len:
	ex->ee_block = orig_ex.ee_block;
	ex->ee_len   = orig_ex.ee_len;
	ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
	ext4_ext_mark_uninitialized(ex);
	ext4_ext_dirty(handle, inode, path + depth);
	return err;
}
3090
static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144
					      struct inode *inode,
					      struct ext4_ext_path *path)
{
	struct ext4_extent *ex;
	struct ext4_extent_header *eh;
	int depth;
	int err = 0;
	int ret = 0;

	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;

	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
		goto out;
	/* first mark the extent as initialized */
	ext4_ext_mark_initialized(ex);

	/*
	 * We have to see if it can be merged with the extent
	 * on the left.
	 */
	if (ex > EXT_FIRST_EXTENT(eh)) {
		/*
		 * To merge left, pass "ex - 1" to try_to_merge(),
		 * since it merges towards right _only_.
		 */
		ret = ext4_ext_try_to_merge(inode, path, ex - 1);
		if (ret) {
			err = ext4_ext_correct_indexes(handle, inode, path);
			if (err)
				goto out;
			depth = ext_depth(inode);
			ex--;
		}
	}
	/*
	 * Try to Merge towards right.
	 */
	ret = ext4_ext_try_to_merge(inode, path, ex);
	if (ret) {
		err = ext4_ext_correct_indexes(handle, inode, path);
		if (err)
			goto out;
		depth = ext_depth(inode);
	}
	/* Mark modified extent as dirty */
	err = ext4_ext_dirty(handle, inode, path + depth);
out:
	ext4_ext_show_leaf(inode, path);
	return err;
}

3145 3146 3147 3148 3149 3150 3151 3152
static void unmap_underlying_metadata_blocks(struct block_device *bdev,
			sector_t block, int count)
{
	int i;
	for (i = 0; i < count; i++)
                unmap_underlying_metadata(bdev, block + i);
}

3153 3154 3155 3156 3157 3158 3159 3160 3161
static int
ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
			ext4_lblk_t iblock, unsigned int max_blocks,
			struct ext4_ext_path *path, int flags,
			unsigned int allocated, struct buffer_head *bh_result,
			ext4_fsblk_t newblock)
{
	int ret = 0;
	int err = 0;
3162
	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3163 3164 3165 3166 3167 3168 3169

	ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
		  "block %llu, max_blocks %u, flags %d, allocated %u",
		  inode->i_ino, (unsigned long long)iblock, max_blocks,
		  flags, allocated);
	ext4_ext_show_leaf(inode, path);

3170
	/* get_block() before submit the IO, split the extent */
3171
	if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3172 3173 3174
		ret = ext4_split_unwritten_extents(handle,
						inode, path, iblock,
						max_blocks, flags);
3175 3176 3177 3178 3179
		/*
		 * Flag the inode(non aio case) or end_io struct (aio case)
		 * that this IO needs to convertion to written when IO is
		 * completed
		 */
3180
		if (io)
3181
			io->flag = EXT4_IO_UNWRITTEN;
3182
		else
3183
			ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3184 3185
		if (ext4_should_dioread_nolock(inode))
			set_buffer_uninit(bh_result);
3186 3187
		goto out;
	}
3188
	/* IO end_io complete, convert the filled extent to written */
3189
	if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3190
		ret = ext4_convert_unwritten_extents_endio(handle, inode,
3191
							path);
3192 3193
		if (ret >= 0)
			ext4_update_inode_fsync_trans(handle, inode, 1);
3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220
		goto out2;
	}
	/* buffered IO case */
	/*
	 * repeat fallocate creation request
	 * we already have an unwritten extent
	 */
	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
		goto map_out;

	/* buffered READ or buffered write_begin() lookup */
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
		/*
		 * We have blocks reserved already.  We
		 * return allocated blocks so that delalloc
		 * won't do block reservation for us.  But
		 * the buffer head will be unmapped so that
		 * a read from the block returns 0s.
		 */
		set_buffer_unwritten(bh_result);
		goto out1;
	}

	/* buffered write, writepage time, convert*/
	ret = ext4_ext_convert_to_initialized(handle, inode,
						path, iblock,
						max_blocks);
3221 3222
	if (ret >= 0)
		ext4_update_inode_fsync_trans(handle, inode, 1);
3223 3224 3225 3226 3227 3228 3229
out:
	if (ret <= 0) {
		err = ret;
		goto out2;
	} else
		allocated = ret;
	set_buffer_new(bh_result);
3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240
	/*
	 * if we allocated more blocks than requested
	 * we need to make sure we unmap the extra block
	 * allocated. The actual needed block will get
	 * unmapped later when we find the buffer_head marked
	 * new.
	 */
	if (allocated > max_blocks) {
		unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
					newblock + max_blocks,
					allocated - max_blocks);
3241
		allocated = max_blocks;
3242
	}
3243 3244 3245 3246 3247 3248 3249 3250

	/*
	 * If we have done fallocate with the offset that is already
	 * delayed allocated, we would have block reservation
	 * and quota reservation done in the delayed write path.
	 * But fallocate would have already updated quota and block
	 * count for this offset. So cancel these reservation
	 */
3251
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3252 3253
		ext4_da_update_reserve_space(inode, allocated, 0);

3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268
map_out:
	set_buffer_mapped(bh_result);
out1:
	if (allocated > max_blocks)
		allocated = max_blocks;
	ext4_ext_show_leaf(inode, path);
	bh_result->b_bdev = inode->i_sb->s_bdev;
	bh_result->b_blocknr = newblock;
out2:
	if (path) {
		ext4_ext_drop_refs(path);
		kfree(path);
	}
	return err ? err : allocated;
}
3269
/*
3270 3271 3272
 * Block allocation/map/preallocation routine for extents based files
 *
 *
3273
 * Need to be called with
3274 3275
 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3276 3277 3278 3279 3280 3281 3282 3283 3284 3285
 *
 * return > 0, number of of blocks already mapped/allocated
 *          if create == 0 and these are pre-allocated blocks
 *          	buffer head is unmapped
 *          otherwise blocks are mapped
 *
 * return = 0, if plain look up failed (blocks have not been allocated)
 *          buffer head is unmapped
 *
 * return < 0, error case.
3286
 */
3287
int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
A
Aneesh Kumar K.V 已提交
3288
			ext4_lblk_t iblock,
3289
			unsigned int max_blocks, struct buffer_head *bh_result,
3290
			int flags)
A
Alex Tomas 已提交
3291 3292
{
	struct ext4_ext_path *path = NULL;
3293
	struct ext4_extent_header *eh;
3294
	struct ext4_extent newex, *ex, *last_ex;
3295 3296 3297
	ext4_fsblk_t newblock;
	int err = 0, depth, ret, cache_type;
	unsigned int allocated = 0;
3298
	struct ext4_allocation_request ar;
3299
	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
A
Alex Tomas 已提交
3300 3301

	__clear_bit(BH_New, &bh_result->b_state);
3302
	ext_debug("blocks %u/%u requested for inode %lu\n",
3303
			iblock, max_blocks, inode->i_ino);
A
Alex Tomas 已提交
3304 3305

	/* check in cache */
3306 3307 3308
	cache_type = ext4_ext_in_cache(inode, iblock, &newex);
	if (cache_type) {
		if (cache_type == EXT4_EXT_CACHE_GAP) {
3309
			if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3310 3311 3312 3313
				/*
				 * block isn't allocated yet and
				 * user doesn't want to allocate it
				 */
A
Alex Tomas 已提交
3314 3315 3316
				goto out2;
			}
			/* we should allocate requested block */
3317
		} else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
A
Alex Tomas 已提交
3318
			/* block is already allocated */
D
Dave Kleikamp 已提交
3319 3320 3321
			newblock = iblock
				   - le32_to_cpu(newex.ee_block)
				   + ext_pblock(&newex);
3322
			/* number of remaining blocks in the extent */
3323
			allocated = ext4_ext_get_actual_len(&newex) -
A
Alex Tomas 已提交
3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341
					(iblock - le32_to_cpu(newex.ee_block));
			goto out;
		} else {
			BUG();
		}
	}

	/* find extent for this block */
	path = ext4_ext_find_extent(inode, iblock, NULL);
	if (IS_ERR(path)) {
		err = PTR_ERR(path);
		path = NULL;
		goto out2;
	}

	depth = ext_depth(inode);

	/*
3342 3343
	 * consistent leaf must not be empty;
	 * this situation is possible, though, _during_ tree modification;
A
Alex Tomas 已提交
3344 3345
	 * this is why assert can't be put in ext4_ext_find_extent()
	 */
3346 3347 3348 3349
	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
		EXT4_ERROR_INODE(inode, "bad extent address "
				 "iblock: %d, depth: %d pblock %lld",
				 iblock, depth, path[depth].p_block);
3350 3351 3352
		err = -EIO;
		goto out2;
	}
3353
	eh = path[depth].p_hdr;
A
Alex Tomas 已提交
3354

3355 3356
	ex = path[depth].p_ext;
	if (ex) {
A
Aneesh Kumar K.V 已提交
3357
		ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3358
		ext4_fsblk_t ee_start = ext_pblock(ex);
A
Amit Arora 已提交
3359
		unsigned short ee_len;
3360 3361 3362

		/*
		 * Uninitialized extents are treated as holes, except that
3363
		 * we split out initialized portions during a write.
3364
		 */
A
Amit Arora 已提交
3365
		ee_len = ext4_ext_get_actual_len(ex);
3366
		/* if found extent covers block, simply return it */
3367
		if (in_range(iblock, ee_block, ee_len)) {
A
Alex Tomas 已提交
3368
			newblock = iblock - ee_block + ee_start;
3369
			/* number of remaining blocks in the extent */
A
Alex Tomas 已提交
3370
			allocated = ee_len - (iblock - ee_block);
3371
			ext_debug("%u fit into %u:%d -> %llu\n", iblock,
A
Alex Tomas 已提交
3372
					ee_block, ee_len, newblock);
3373

A
Amit Arora 已提交
3374
			/* Do not put uninitialized extent in the cache */
3375
			if (!ext4_ext_is_uninitialized(ex)) {
A
Amit Arora 已提交
3376 3377 3378
				ext4_ext_put_in_cache(inode, ee_block,
							ee_len, ee_start,
							EXT4_EXT_CACHE_EXTENT);
3379 3380
				goto out;
			}
3381 3382 3383 3384
			ret = ext4_ext_handle_uninitialized_extents(handle,
					inode, iblock, max_blocks, path,
					flags, allocated, bh_result, newblock);
			return ret;
A
Alex Tomas 已提交
3385 3386 3387 3388
		}
	}

	/*
3389
	 * requested block isn't allocated yet;
A
Alex Tomas 已提交
3390 3391
	 * we couldn't try to create block if create flag is zero
	 */
3392
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3393 3394 3395 3396
		/*
		 * put just found gap into cache to speed up
		 * subsequent requests
		 */
A
Alex Tomas 已提交
3397 3398 3399 3400
		ext4_ext_put_gap_in_cache(inode, path, iblock);
		goto out2;
	}
	/*
3401
	 * Okay, we need to do block allocation.
A
Andrew Morton 已提交
3402
	 */
A
Alex Tomas 已提交
3403

3404 3405 3406 3407 3408 3409 3410 3411 3412
	/* find neighbour allocated blocks */
	ar.lleft = iblock;
	err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
	if (err)
		goto out2;
	ar.lright = iblock;
	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
	if (err)
		goto out2;
A
Amit Arora 已提交
3413

3414 3415 3416 3417 3418 3419 3420
	/*
	 * See if request is beyond maximum number of blocks we can have in
	 * a single extent. For an initialized extent this limit is
	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
	 * EXT_UNINIT_MAX_LEN.
	 */
	if (max_blocks > EXT_INIT_MAX_LEN &&
3421
	    !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3422 3423
		max_blocks = EXT_INIT_MAX_LEN;
	else if (max_blocks > EXT_UNINIT_MAX_LEN &&
3424
		 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3425 3426
		max_blocks = EXT_UNINIT_MAX_LEN;

A
Amit Arora 已提交
3427 3428 3429 3430 3431
	/* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
	newex.ee_block = cpu_to_le32(iblock);
	newex.ee_len = cpu_to_le16(max_blocks);
	err = ext4_ext_check_overlap(inode, &newex, path);
	if (err)
3432
		allocated = ext4_ext_get_actual_len(&newex);
A
Amit Arora 已提交
3433 3434
	else
		allocated = max_blocks;
3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446

	/* allocate new block */
	ar.inode = inode;
	ar.goal = ext4_ext_find_goal(inode, path, iblock);
	ar.logical = iblock;
	ar.len = allocated;
	if (S_ISREG(inode->i_mode))
		ar.flags = EXT4_MB_HINT_DATA;
	else
		/* disable in-core preallocation for non-regular files */
		ar.flags = 0;
	newblock = ext4_mb_new_blocks(handle, &ar, &err);
A
Alex Tomas 已提交
3447 3448
	if (!newblock)
		goto out2;
3449
	ext_debug("allocate new block: goal %llu, found %llu/%u\n",
3450
		  ar.goal, newblock, allocated);
A
Alex Tomas 已提交
3451 3452

	/* try to insert new extent into found leaf and return */
3453
	ext4_ext_store_pblock(&newex, newblock);
3454
	newex.ee_len = cpu_to_le16(ar.len);
3455 3456
	/* Mark uninitialized */
	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
A
Amit Arora 已提交
3457
		ext4_ext_mark_uninitialized(&newex);
3458
		/*
3459 3460 3461
		 * io_end structure was created for every IO write to an
		 * uninitialized extent. To avoid unecessary conversion,
		 * here we flag the IO that really needs the conversion.
3462 3463
		 * For non asycn direct IO case, flag the inode state
		 * that we need to perform convertion when IO is done.
3464
		 */
3465
		if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3466
			if (io)
3467
				io->flag = EXT4_IO_UNWRITTEN;
3468
			else
3469 3470
				ext4_set_inode_state(inode,
						     EXT4_STATE_DIO_UNWRITTEN);
3471
		}
3472 3473
		if (ext4_should_dioread_nolock(inode))
			set_buffer_uninit(bh_result);
3474
	}
3475 3476

	if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) {
3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487
		if (unlikely(!eh->eh_entries)) {
			EXT4_ERROR_INODE(inode,
					 "eh->eh_entries == 0 ee_block %d",
					 ex->ee_block);
			err = -EIO;
			goto out2;
		}
		last_ex = EXT_LAST_EXTENT(eh);
		if (iblock + ar.len > le32_to_cpu(last_ex->ee_block)
		    + ext4_ext_get_actual_len(last_ex))
			EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL;
3488
	}
3489
	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3490 3491
	if (err) {
		/* free data blocks we just allocated */
3492 3493
		/* not a good idea to call discard here directly,
		 * but otherwise we'd need to call it every free() */
3494
		ext4_discard_preallocations(inode);
3495 3496
		ext4_free_blocks(handle, inode, 0, ext_pblock(&newex),
				 ext4_ext_get_actual_len(&newex), 0);
A
Alex Tomas 已提交
3497
		goto out2;
3498
	}
A
Alex Tomas 已提交
3499 3500

	/* previous routine could use block we allocated */
3501
	newblock = ext_pblock(&newex);
3502
	allocated = ext4_ext_get_actual_len(&newex);
3503 3504
	if (allocated > max_blocks)
		allocated = max_blocks;
3505
	set_buffer_new(bh_result);
A
Alex Tomas 已提交
3506

3507 3508 3509 3510
	/*
	 * Update reserved blocks/metadata blocks after successful
	 * block allocation which had been deferred till now.
	 */
3511
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3512 3513
		ext4_da_update_reserve_space(inode, allocated, 1);

3514 3515 3516 3517 3518
	/*
	 * Cache the extent and update transaction to commit on fdatasync only
	 * when it is _not_ an uninitialized extent.
	 */
	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
A
Amit Arora 已提交
3519 3520
		ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
						EXT4_EXT_CACHE_EXTENT);
3521 3522 3523
		ext4_update_inode_fsync_trans(handle, inode, 1);
	} else
		ext4_update_inode_fsync_trans(handle, inode, 0);
A
Alex Tomas 已提交
3524 3525 3526 3527
out:
	if (allocated > max_blocks)
		allocated = max_blocks;
	ext4_ext_show_leaf(inode, path);
3528
	set_buffer_mapped(bh_result);
A
Alex Tomas 已提交
3529 3530 3531 3532 3533 3534 3535 3536 3537 3538
	bh_result->b_bdev = inode->i_sb->s_bdev;
	bh_result->b_blocknr = newblock;
out2:
	if (path) {
		ext4_ext_drop_refs(path);
		kfree(path);
	}
	return err ? err : allocated;
}

3539
void ext4_ext_truncate(struct inode *inode)
A
Alex Tomas 已提交
3540 3541 3542
{
	struct address_space *mapping = inode->i_mapping;
	struct super_block *sb = inode->i_sb;
A
Aneesh Kumar K.V 已提交
3543
	ext4_lblk_t last_block;
A
Alex Tomas 已提交
3544 3545 3546 3547 3548 3549
	handle_t *handle;
	int err = 0;

	/*
	 * probably first extent we're gonna free will be last in block
	 */
3550
	err = ext4_writepage_trans_blocks(inode);
A
Alex Tomas 已提交
3551
	handle = ext4_journal_start(inode, err);
3552
	if (IS_ERR(handle))
A
Alex Tomas 已提交
3553 3554
		return;

3555 3556
	if (inode->i_size & (sb->s_blocksize - 1))
		ext4_block_truncate_page(handle, mapping, inode->i_size);
A
Alex Tomas 已提交
3557

3558 3559 3560
	if (ext4_orphan_add(handle, inode))
		goto out_stop;

3561
	down_write(&EXT4_I(inode)->i_data_sem);
A
Alex Tomas 已提交
3562 3563
	ext4_ext_invalidate_cache(inode);

3564
	ext4_discard_preallocations(inode);
3565

A
Alex Tomas 已提交
3566
	/*
3567 3568 3569
	 * TODO: optimization is possible here.
	 * Probably we need not scan at all,
	 * because page truncation is enough.
A
Alex Tomas 已提交
3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580
	 */

	/* we have to know where to truncate from in crash case */
	EXT4_I(inode)->i_disksize = inode->i_size;
	ext4_mark_inode_dirty(handle, inode);

	last_block = (inode->i_size + sb->s_blocksize - 1)
			>> EXT4_BLOCK_SIZE_BITS(sb);
	err = ext4_ext_remove_space(inode, last_block);

	/* In a multi-transaction truncate, we only make the final
3581 3582
	 * transaction synchronous.
	 */
A
Alex Tomas 已提交
3583
	if (IS_SYNC(inode))
3584
		ext4_handle_sync(handle);
A
Alex Tomas 已提交
3585 3586

out_stop:
3587
	up_write(&EXT4_I(inode)->i_data_sem);
A
Alex Tomas 已提交
3588
	/*
3589
	 * If this was a simple ftruncate() and the file will remain alive,
A
Alex Tomas 已提交
3590 3591 3592 3593 3594 3595 3596 3597
	 * then we need to clear up the orphan record which we created above.
	 * However, if this was a real unlink then we were called by
	 * ext4_delete_inode(), and we allow that function to clean up the
	 * orphan info for us.
	 */
	if (inode->i_nlink)
		ext4_orphan_del(handle, inode);

3598 3599
	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
	ext4_mark_inode_dirty(handle, inode);
A
Alex Tomas 已提交
3600 3601 3602
	ext4_journal_stop(handle);
}

3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616
static void ext4_falloc_update_inode(struct inode *inode,
				int mode, loff_t new_size, int update_ctime)
{
	struct timespec now;

	if (update_ctime) {
		now = current_fs_time(inode->i_sb);
		if (!timespec_equal(&inode->i_ctime, &now))
			inode->i_ctime = now;
	}
	/*
	 * Update only when preallocation was requested beyond
	 * the file size.
	 */
3617 3618 3619 3620 3621
	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
		if (new_size > i_size_read(inode))
			i_size_write(inode, new_size);
		if (new_size > EXT4_I(inode)->i_disksize)
			ext4_update_i_disksize(inode, new_size);
3622 3623 3624 3625 3626 3627 3628
	} else {
		/*
		 * Mark that we allocate beyond EOF so the subsequent truncate
		 * can proceed even if the new size is the same as i_size.
		 */
		if (new_size > i_size_read(inode))
			EXT4_I(inode)->i_flags |= EXT4_EOFBLOCKS_FL;
3629 3630 3631 3632
	}

}

A
Amit Arora 已提交
3633 3634 3635 3636 3637 3638 3639 3640 3641 3642
/*
 * preallocate space for a file. This implements ext4's fallocate inode
 * operation, which gets called from sys_fallocate system call.
 * For block-mapped files, posix_fallocate should fall back to the method
 * of writing zeroes to the required new blocks (the same behavior which is
 * expected for file systems which do not support fallocate() system call).
 */
long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
{
	handle_t *handle;
A
Aneesh Kumar K.V 已提交
3643
	ext4_lblk_t block;
3644
	loff_t new_size;
3645
	unsigned int max_blocks;
A
Amit Arora 已提交
3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663
	int ret = 0;
	int ret2 = 0;
	int retries = 0;
	struct buffer_head map_bh;
	unsigned int credits, blkbits = inode->i_blkbits;

	/*
	 * currently supporting (pre)allocate mode for extent-based
	 * files _only_
	 */
	if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
		return -EOPNOTSUPP;

	/* preallocation to directories is currently not supported */
	if (S_ISDIR(inode->i_mode))
		return -ENODEV;

	block = offset >> blkbits;
3664 3665 3666 3667
	/*
	 * We can't just convert len to max_blocks because
	 * If blocksize = 4096 offset = 3072 and len = 2048
	 */
A
Amit Arora 已提交
3668
	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3669
							- block;
A
Amit Arora 已提交
3670
	/*
3671
	 * credits to insert 1 extent into extent tree
A
Amit Arora 已提交
3672
	 */
3673
	credits = ext4_chunk_trans_blocks(inode, max_blocks);
3674
	mutex_lock(&inode->i_mutex);
A
Amit Arora 已提交
3675 3676 3677 3678 3679 3680 3681 3682 3683
retry:
	while (ret >= 0 && ret < max_blocks) {
		block = block + ret;
		max_blocks = max_blocks - ret;
		handle = ext4_journal_start(inode, credits);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			break;
		}
3684
		map_bh.b_state = 0;
3685 3686
		ret = ext4_get_blocks(handle, inode, block,
				      max_blocks, &map_bh,
3687
				      EXT4_GET_BLOCKS_CREATE_UNINIT_EXT);
3688
		if (ret <= 0) {
3689 3690 3691 3692
#ifdef EXT4FS_DEBUG
			WARN_ON(ret <= 0);
			printk(KERN_ERR "%s: ext4_ext_get_blocks "
				    "returned error inode#%lu, block=%u, "
3693
				    "max_blocks=%u", __func__,
3694
				    inode->i_ino, block, max_blocks);
3695
#endif
A
Amit Arora 已提交
3696 3697 3698 3699
			ext4_mark_inode_dirty(handle, inode);
			ret2 = ext4_journal_stop(handle);
			break;
		}
3700 3701 3702 3703 3704
		if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
						blkbits) >> blkbits))
			new_size = offset + len;
		else
			new_size = (block + ret) << blkbits;
A
Amit Arora 已提交
3705

3706 3707
		ext4_falloc_update_inode(inode, mode, new_size,
						buffer_new(&map_bh));
A
Amit Arora 已提交
3708 3709 3710 3711 3712
		ext4_mark_inode_dirty(handle, inode);
		ret2 = ext4_journal_stop(handle);
		if (ret2)
			break;
	}
3713 3714 3715
	if (ret == -ENOSPC &&
			ext4_should_retry_alloc(inode->i_sb, &retries)) {
		ret = 0;
A
Amit Arora 已提交
3716 3717
		goto retry;
	}
3718
	mutex_unlock(&inode->i_mutex);
A
Amit Arora 已提交
3719 3720
	return ret > 0 ? ret2 : ret;
}
3721

3722 3723 3724 3725 3726 3727 3728 3729
/*
 * This function convert a range of blocks to written extents
 * The caller of this function will pass the start offset and the size.
 * all unwritten extents within this range will be converted to
 * written extents.
 *
 * This function is called from the direct IO end io call back
 * function, to convert the fallocated extents after IO is completed.
3730
 * Returns 0 on success.
3731 3732
 */
int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3733
				    ssize_t len)
3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764
{
	handle_t *handle;
	ext4_lblk_t block;
	unsigned int max_blocks;
	int ret = 0;
	int ret2 = 0;
	struct buffer_head map_bh;
	unsigned int credits, blkbits = inode->i_blkbits;

	block = offset >> blkbits;
	/*
	 * We can't just convert len to max_blocks because
	 * If blocksize = 4096 offset = 3072 and len = 2048
	 */
	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
							- block;
	/*
	 * credits to insert 1 extent into extent tree
	 */
	credits = ext4_chunk_trans_blocks(inode, max_blocks);
	while (ret >= 0 && ret < max_blocks) {
		block = block + ret;
		max_blocks = max_blocks - ret;
		handle = ext4_journal_start(inode, credits);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			break;
		}
		map_bh.b_state = 0;
		ret = ext4_get_blocks(handle, inode, block,
				      max_blocks, &map_bh,
3765
				      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779
		if (ret <= 0) {
			WARN_ON(ret <= 0);
			printk(KERN_ERR "%s: ext4_ext_get_blocks "
				    "returned error inode#%lu, block=%u, "
				    "max_blocks=%u", __func__,
				    inode->i_ino, block, max_blocks);
		}
		ext4_mark_inode_dirty(handle, inode);
		ret2 = ext4_journal_stop(handle);
		if (ret <= 0 || ret2 )
			break;
	}
	return ret > 0 ? ret2 : ret;
}
3780 3781 3782
/*
 * Callback function called for each extent to gather FIEMAP information.
 */
A
Aneesh Kumar K.V 已提交
3783
static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3784 3785 3786 3787
		       struct ext4_ext_cache *newex, struct ext4_extent *ex,
		       void *data)
{
	struct fiemap_extent_info *fieinfo = data;
3788
	unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834
	__u64	logical;
	__u64	physical;
	__u64	length;
	__u32	flags = 0;
	int	error;

	logical =  (__u64)newex->ec_block << blksize_bits;

	if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
		pgoff_t offset;
		struct page *page;
		struct buffer_head *bh = NULL;

		offset = logical >> PAGE_SHIFT;
		page = find_get_page(inode->i_mapping, offset);
		if (!page || !page_has_buffers(page))
			return EXT_CONTINUE;

		bh = page_buffers(page);

		if (!bh)
			return EXT_CONTINUE;

		if (buffer_delay(bh)) {
			flags |= FIEMAP_EXTENT_DELALLOC;
			page_cache_release(page);
		} else {
			page_cache_release(page);
			return EXT_CONTINUE;
		}
	}

	physical = (__u64)newex->ec_start << blksize_bits;
	length =   (__u64)newex->ec_len << blksize_bits;

	if (ex && ext4_ext_is_uninitialized(ex))
		flags |= FIEMAP_EXTENT_UNWRITTEN;

	/*
	 * If this extent reaches EXT_MAX_BLOCK, it must be last.
	 *
	 * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK,
	 * this also indicates no more allocated blocks.
	 *
	 * XXX this might miss a single-block extent at EXT_MAX_BLOCK
	 */
3835
	if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK ||
3836 3837 3838 3839
	    newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK) {
		loff_t size = i_size_read(inode);
		loff_t bs = EXT4_BLOCK_SIZE(inode->i_sb);

3840
		flags |= FIEMAP_EXTENT_LAST;
3841 3842 3843 3844
		if ((flags & FIEMAP_EXTENT_DELALLOC) &&
		    logical+length > size)
			length = (size - logical + bs - 1) & ~(bs-1);
	}
3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858

	error = fiemap_fill_next_extent(fieinfo, logical, physical,
					length, flags);
	if (error < 0)
		return error;
	if (error == 1)
		return EXT_BREAK;

	return EXT_CONTINUE;
}

/* fiemap flags we can handle specified here */
#define EXT4_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)

A
Aneesh Kumar K.V 已提交
3859 3860
static int ext4_xattr_fiemap(struct inode *inode,
				struct fiemap_extent_info *fieinfo)
3861 3862 3863 3864 3865 3866 3867 3868
{
	__u64 physical = 0;
	__u64 length;
	__u32 flags = FIEMAP_EXTENT_LAST;
	int blockbits = inode->i_sb->s_blocksize_bits;
	int error = 0;

	/* in-inode? */
3869
	if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909
		struct ext4_iloc iloc;
		int offset;	/* offset of xattr in inode */

		error = ext4_get_inode_loc(inode, &iloc);
		if (error)
			return error;
		physical = iloc.bh->b_blocknr << blockbits;
		offset = EXT4_GOOD_OLD_INODE_SIZE +
				EXT4_I(inode)->i_extra_isize;
		physical += offset;
		length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
		flags |= FIEMAP_EXTENT_DATA_INLINE;
	} else { /* external block */
		physical = EXT4_I(inode)->i_file_acl << blockbits;
		length = inode->i_sb->s_blocksize;
	}

	if (physical)
		error = fiemap_fill_next_extent(fieinfo, 0, physical,
						length, flags);
	return (error < 0 ? error : 0);
}

int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		__u64 start, __u64 len)
{
	ext4_lblk_t start_blk;
	int error = 0;

	/* fallback to generic here if not in extents fmt */
	if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
		return generic_block_fiemap(inode, fieinfo, start, len,
			ext4_get_block);

	if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
		return -EBADR;

	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
		error = ext4_xattr_fiemap(inode, fieinfo);
	} else {
3910 3911 3912
		ext4_lblk_t len_blks;
		__u64 last_blk;

3913
		start_blk = start >> inode->i_sb->s_blocksize_bits;
3914 3915 3916 3917
		last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
		if (last_blk >= EXT_MAX_BLOCK)
			last_blk = EXT_MAX_BLOCK-1;
		len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929

		/*
		 * Walk the extent tree gathering extent information.
		 * ext4_ext_fiemap_cb will push extents back to user.
		 */
		error = ext4_ext_walk_space(inode, start_blk, len_blks,
					  ext4_ext_fiemap_cb, fieinfo);
	}

	return error;
}