extents.c 98.8 KB
Newer Older
A
Alex Tomas 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/*
 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
 * Written by Alex Tomas <alex@clusterfs.com>
 *
 * Architecture independence:
 *   Copyright (c) 2005, Bull S.A.
 *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public Licens
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
 */

/*
 * Extents support for EXT4
 *
 * TODO:
 *   - ext4*_error() should be used in some situations
 *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
 *   - smart tree reduction
 */

#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
35
#include <linux/jbd2.h>
A
Alex Tomas 已提交
36 37 38 39 40
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/slab.h>
A
Amit Arora 已提交
41
#include <linux/falloc.h>
A
Alex Tomas 已提交
42
#include <asm/uaccess.h>
43
#include <linux/fiemap.h>
44 45
#include "ext4_jbd2.h"
#include "ext4_extents.h"
A
Alex Tomas 已提交
46 47


48 49 50 51
/*
 * ext_pblock:
 * combine low and high parts of physical block number into ext4_fsblk_t
 */
52
ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
53 54 55
{
	ext4_fsblk_t block;

56
	block = le32_to_cpu(ex->ee_start_lo);
57
	block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
58 59 60
	return block;
}

61 62 63 64
/*
 * idx_pblock:
 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
 */
65
ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
66 67 68
{
	ext4_fsblk_t block;

69
	block = le32_to_cpu(ix->ei_leaf_lo);
70
	block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
71 72 73
	return block;
}

74 75 76 77 78
/*
 * ext4_ext_store_pblock:
 * stores a large physical block number into an extent struct,
 * breaking it into parts
 */
79
void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
80
{
81
	ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
82
	ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
83 84
}

85 86 87 88 89
/*
 * ext4_idx_store_pblock:
 * stores a large physical block number into an index struct,
 * breaking it into parts
 */
90
static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
91
{
92
	ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
93
	ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
94 95
}

96 97 98
static int ext4_ext_truncate_extend_restart(handle_t *handle,
					    struct inode *inode,
					    int needed)
A
Alex Tomas 已提交
99 100 101
{
	int err;

102 103
	if (!ext4_handle_valid(handle))
		return 0;
A
Alex Tomas 已提交
104
	if (handle->h_buffer_credits > needed)
105 106
		return 0;
	err = ext4_journal_extend(handle, needed);
107
	if (err <= 0)
108
		return err;
109 110 111 112 113 114 115 116
	err = ext4_truncate_restart_trans(handle, inode, needed);
	/*
	 * We have dropped i_data_sem so someone might have cached again
	 * an extent we are going to truncate.
	 */
	ext4_ext_invalidate_cache(inode);

	return err;
A
Alex Tomas 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
}

/*
 * could return:
 *  - EROFS
 *  - ENOMEM
 */
static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path)
{
	if (path->p_bh) {
		/* path points to block */
		return ext4_journal_get_write_access(handle, path->p_bh);
	}
	/* path points to leaf/index in inode body */
	/* we use in-core data, no need to protect them */
	return 0;
}

/*
 * could return:
 *  - EROFS
 *  - ENOMEM
 *  - EIO
 */
static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path)
{
	int err;
	if (path->p_bh) {
		/* path points to block */
148
		err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
A
Alex Tomas 已提交
149 150 151 152 153 154 155
	} else {
		/* path points to leaf/index in inode body */
		err = ext4_mark_inode_dirty(handle, inode);
	}
	return err;
}

156
static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
A
Alex Tomas 已提交
157
			      struct ext4_ext_path *path,
A
Aneesh Kumar K.V 已提交
158
			      ext4_lblk_t block)
A
Alex Tomas 已提交
159 160
{
	struct ext4_inode_info *ei = EXT4_I(inode);
161
	ext4_fsblk_t bg_start;
162
	ext4_fsblk_t last_block;
163
	ext4_grpblk_t colour;
164 165
	ext4_group_t block_group;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
A
Alex Tomas 已提交
166 167 168 169 170 171 172
	int depth;

	if (path) {
		struct ext4_extent *ex;
		depth = path->p_depth;

		/* try to predict block placement */
173 174
		ex = path[depth].p_ext;
		if (ex)
175
			return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
A
Alex Tomas 已提交
176

177 178
		/* it looks like index is empty;
		 * try to find starting block from index itself */
A
Alex Tomas 已提交
179 180 181 182 183
		if (path[depth].p_bh)
			return path[depth].p_bh->b_blocknr;
	}

	/* OK. use inode's group */
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		/*
		 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
		 * block groups per flexgroup, reserve the first block 
		 * group for directories and special files.  Regular 
		 * files will start at the second block group.  This
		 * tends to speed up directory access and improves 
		 * fsck times.
		 */
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
	bg_start = (block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
A
Alex Tomas 已提交
199
		le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
200 201
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

202 203 204 205 206 207 208
	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

209 210
	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
A
Alex Tomas 已提交
211
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
212 213
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
A
Alex Tomas 已提交
214 215 216
	return bg_start + colour + block;
}

A
Aneesh Kumar K.V 已提交
217 218 219
/*
 * Allocation for a meta data block
 */
220
static ext4_fsblk_t
A
Aneesh Kumar K.V 已提交
221
ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
222 223 224
			struct ext4_ext_path *path,
			struct ext4_extent *ex, int *err)
{
225
	ext4_fsblk_t goal, newblock;
A
Alex Tomas 已提交
226 227

	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
T
Theodore Ts'o 已提交
228
	newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err);
A
Alex Tomas 已提交
229 230 231
	return newblock;
}

232
static inline int ext4_ext_space_block(struct inode *inode, int check)
A
Alex Tomas 已提交
233 234 235 236 237
{
	int size;

	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
			/ sizeof(struct ext4_extent);
238
	if (!check) {
239
#ifdef AGGRESSIVE_TEST
240 241
		if (size > 6)
			size = 6;
A
Alex Tomas 已提交
242
#endif
243
	}
A
Alex Tomas 已提交
244 245 246
	return size;
}

247
static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
A
Alex Tomas 已提交
248 249 250 251 252
{
	int size;

	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
			/ sizeof(struct ext4_extent_idx);
253
	if (!check) {
254
#ifdef AGGRESSIVE_TEST
255 256
		if (size > 5)
			size = 5;
A
Alex Tomas 已提交
257
#endif
258
	}
A
Alex Tomas 已提交
259 260 261
	return size;
}

262
static inline int ext4_ext_space_root(struct inode *inode, int check)
A
Alex Tomas 已提交
263 264 265 266 267 268
{
	int size;

	size = sizeof(EXT4_I(inode)->i_data);
	size -= sizeof(struct ext4_extent_header);
	size /= sizeof(struct ext4_extent);
269
	if (!check) {
270
#ifdef AGGRESSIVE_TEST
271 272
		if (size > 3)
			size = 3;
A
Alex Tomas 已提交
273
#endif
274
	}
A
Alex Tomas 已提交
275 276 277
	return size;
}

278
static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
A
Alex Tomas 已提交
279 280 281 282 283 284
{
	int size;

	size = sizeof(EXT4_I(inode)->i_data);
	size -= sizeof(struct ext4_extent_header);
	size /= sizeof(struct ext4_extent_idx);
285
	if (!check) {
286
#ifdef AGGRESSIVE_TEST
287 288
		if (size > 4)
			size = 4;
A
Alex Tomas 已提交
289
#endif
290
	}
A
Alex Tomas 已提交
291 292 293
	return size;
}

294 295 296 297 298
/*
 * Calculate the number of metadata blocks needed
 * to allocate @blocks
 * Worse case is one block per extent
 */
299
int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
300
{
301 302
	struct ext4_inode_info *ei = EXT4_I(inode);
	int idxs, num = 0;
303

304 305
	idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
		/ sizeof(struct ext4_extent_idx));
306 307

	/*
308 309 310 311 312 313
	 * If the new delayed allocation block is contiguous with the
	 * previous da block, it can share index blocks with the
	 * previous block, so we only need to allocate a new index
	 * block every idxs leaf blocks.  At ldxs**2 blocks, we need
	 * an additional index block, and at ldxs**3 blocks, yet
	 * another index blocks.
314
	 */
315 316 317 318 319 320 321 322 323 324 325 326 327 328
	if (ei->i_da_metadata_calc_len &&
	    ei->i_da_metadata_calc_last_lblock+1 == lblock) {
		if ((ei->i_da_metadata_calc_len % idxs) == 0)
			num++;
		if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
			num++;
		if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
			num++;
			ei->i_da_metadata_calc_len = 0;
		} else
			ei->i_da_metadata_calc_len++;
		ei->i_da_metadata_calc_last_lblock++;
		return num;
	}
329

330 331 332 333 334 335 336
	/*
	 * In the worst case we need a new set of index blocks at
	 * every level of the inode's extent tree.
	 */
	ei->i_da_metadata_calc_len = 1;
	ei->i_da_metadata_calc_last_lblock = lblock;
	return ext_depth(inode) + 1;
337 338
}

339 340 341 342 343 344 345
static int
ext4_ext_max_entries(struct inode *inode, int depth)
{
	int max;

	if (depth == ext_depth(inode)) {
		if (depth == 0)
346
			max = ext4_ext_space_root(inode, 1);
347
		else
348
			max = ext4_ext_space_root_idx(inode, 1);
349 350
	} else {
		if (depth == 0)
351
			max = ext4_ext_space_block(inode, 1);
352
		else
353
			max = ext4_ext_space_block_idx(inode, 1);
354 355 356 357 358
	}

	return max;
}

359 360
static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
{
361
	ext4_fsblk_t block = ext_pblock(ext);
362
	int len = ext4_ext_get_actual_len(ext);
363

364
	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
365 366 367 368 369
}

static int ext4_valid_extent_idx(struct inode *inode,
				struct ext4_extent_idx *ext_idx)
{
370
	ext4_fsblk_t block = idx_pblock(ext_idx);
371

372
	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
}

static int ext4_valid_extent_entries(struct inode *inode,
				struct ext4_extent_header *eh,
				int depth)
{
	struct ext4_extent *ext;
	struct ext4_extent_idx *ext_idx;
	unsigned short entries;
	if (eh->eh_entries == 0)
		return 1;

	entries = le16_to_cpu(eh->eh_entries);

	if (depth == 0) {
		/* leaf entries */
		ext = EXT_FIRST_EXTENT(eh);
		while (entries) {
			if (!ext4_valid_extent(inode, ext))
				return 0;
			ext++;
			entries--;
		}
	} else {
		ext_idx = EXT_FIRST_INDEX(eh);
		while (entries) {
			if (!ext4_valid_extent_idx(inode, ext_idx))
				return 0;
			ext_idx++;
			entries--;
		}
	}
	return 1;
}

static int __ext4_ext_check(const char *function, struct inode *inode,
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
					struct ext4_extent_header *eh,
					int depth)
{
	const char *error_msg;
	int max = 0;

	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
		error_msg = "invalid magic";
		goto corrupted;
	}
	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
		error_msg = "unexpected eh_depth";
		goto corrupted;
	}
	if (unlikely(eh->eh_max == 0)) {
		error_msg = "invalid eh_max";
		goto corrupted;
	}
	max = ext4_ext_max_entries(inode, depth);
	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
		error_msg = "too large eh_max";
		goto corrupted;
	}
	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
		error_msg = "invalid eh_entries";
		goto corrupted;
	}
436 437 438 439
	if (!ext4_valid_extent_entries(inode, eh, depth)) {
		error_msg = "invalid extent entries";
		goto corrupted;
	}
440 441 442 443
	return 0;

corrupted:
	ext4_error(inode->i_sb, function,
444
			"bad header/extent in inode #%lu: %s - magic %x, "
445 446 447 448 449 450 451 452
			"entries %u, max %u(%u), depth %u(%u)",
			inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
			le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
			max, le16_to_cpu(eh->eh_depth), depth);

	return -EIO;
}

453 454
#define ext4_ext_check(inode, eh, depth)	\
	__ext4_ext_check(__func__, inode, eh, depth)
455

456 457 458 459 460
int ext4_ext_check_inode(struct inode *inode)
{
	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
}

A
Alex Tomas 已提交
461 462 463 464 465 466 467 468
#ifdef EXT_DEBUG
static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
{
	int k, l = path->p_depth;

	ext_debug("path:");
	for (k = 0; k <= l; k++, path++) {
		if (path->p_idx) {
469
		  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
470
			    idx_pblock(path->p_idx));
A
Alex Tomas 已提交
471
		} else if (path->p_ext) {
472
			ext_debug("  %d:[%d]%d:%llu ",
A
Alex Tomas 已提交
473
				  le32_to_cpu(path->p_ext->ee_block),
474
				  ext4_ext_is_uninitialized(path->p_ext),
A
Amit Arora 已提交
475
				  ext4_ext_get_actual_len(path->p_ext),
476
				  ext_pblock(path->p_ext));
A
Alex Tomas 已提交
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
		} else
			ext_debug("  []");
	}
	ext_debug("\n");
}

static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
{
	int depth = ext_depth(inode);
	struct ext4_extent_header *eh;
	struct ext4_extent *ex;
	int i;

	if (!path)
		return;

	eh = path[depth].p_hdr;
	ex = EXT_FIRST_EXTENT(eh);

496 497
	ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);

A
Alex Tomas 已提交
498
	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
499 500
		ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
			  ext4_ext_is_uninitialized(ex),
A
Amit Arora 已提交
501
			  ext4_ext_get_actual_len(ex), ext_pblock(ex));
A
Alex Tomas 已提交
502 503 504 505
	}
	ext_debug("\n");
}
#else
506 507
#define ext4_ext_show_path(inode, path)
#define ext4_ext_show_leaf(inode, path)
A
Alex Tomas 已提交
508 509
#endif

510
void ext4_ext_drop_refs(struct ext4_ext_path *path)
A
Alex Tomas 已提交
511 512 513 514 515 516 517 518 519 520 521 522
{
	int depth = path->p_depth;
	int i;

	for (i = 0; i <= depth; i++, path++)
		if (path->p_bh) {
			brelse(path->p_bh);
			path->p_bh = NULL;
		}
}

/*
523 524
 * ext4_ext_binsearch_idx:
 * binary search for the closest index of the given block
525
 * the header must be checked before calling this
A
Alex Tomas 已提交
526 527
 */
static void
A
Aneesh Kumar K.V 已提交
528 529
ext4_ext_binsearch_idx(struct inode *inode,
			struct ext4_ext_path *path, ext4_lblk_t block)
A
Alex Tomas 已提交
530 531 532 533 534
{
	struct ext4_extent_header *eh = path->p_hdr;
	struct ext4_extent_idx *r, *l, *m;


535
	ext_debug("binsearch for %u(idx):  ", block);
A
Alex Tomas 已提交
536 537

	l = EXT_FIRST_INDEX(eh) + 1;
D
Dmitry Monakhov 已提交
538
	r = EXT_LAST_INDEX(eh);
A
Alex Tomas 已提交
539 540 541 542 543 544
	while (l <= r) {
		m = l + (r - l) / 2;
		if (block < le32_to_cpu(m->ei_block))
			r = m - 1;
		else
			l = m + 1;
545 546 547
		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
				m, le32_to_cpu(m->ei_block),
				r, le32_to_cpu(r->ei_block));
A
Alex Tomas 已提交
548 549 550
	}

	path->p_idx = l - 1;
551
	ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
552
		  idx_pblock(path->p_idx));
A
Alex Tomas 已提交
553 554 555 556 557 558 559 560 561 562

#ifdef CHECK_BINSEARCH
	{
		struct ext4_extent_idx *chix, *ix;
		int k;

		chix = ix = EXT_FIRST_INDEX(eh);
		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
		  if (k != 0 &&
		      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
563 564 565 566
				printk(KERN_DEBUG "k=%d, ix=0x%p, "
				       "first=0x%p\n", k,
				       ix, EXT_FIRST_INDEX(eh));
				printk(KERN_DEBUG "%u <= %u\n",
A
Alex Tomas 已提交
567 568 569 570
				       le32_to_cpu(ix->ei_block),
				       le32_to_cpu(ix[-1].ei_block));
			}
			BUG_ON(k && le32_to_cpu(ix->ei_block)
D
Dave Kleikamp 已提交
571
					   <= le32_to_cpu(ix[-1].ei_block));
A
Alex Tomas 已提交
572 573 574 575 576 577 578 579 580 581 582
			if (block < le32_to_cpu(ix->ei_block))
				break;
			chix = ix;
		}
		BUG_ON(chix != path->p_idx);
	}
#endif

}

/*
583 584
 * ext4_ext_binsearch:
 * binary search for closest extent of the given block
585
 * the header must be checked before calling this
A
Alex Tomas 已提交
586 587
 */
static void
A
Aneesh Kumar K.V 已提交
588 589
ext4_ext_binsearch(struct inode *inode,
		struct ext4_ext_path *path, ext4_lblk_t block)
A
Alex Tomas 已提交
590 591 592 593 594 595
{
	struct ext4_extent_header *eh = path->p_hdr;
	struct ext4_extent *r, *l, *m;

	if (eh->eh_entries == 0) {
		/*
596 597
		 * this leaf is empty:
		 * we get such a leaf in split/add case
A
Alex Tomas 已提交
598 599 600 601
		 */
		return;
	}

602
	ext_debug("binsearch for %u:  ", block);
A
Alex Tomas 已提交
603 604

	l = EXT_FIRST_EXTENT(eh) + 1;
D
Dmitry Monakhov 已提交
605
	r = EXT_LAST_EXTENT(eh);
A
Alex Tomas 已提交
606 607 608 609 610 611 612

	while (l <= r) {
		m = l + (r - l) / 2;
		if (block < le32_to_cpu(m->ee_block))
			r = m - 1;
		else
			l = m + 1;
613 614 615
		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
				m, le32_to_cpu(m->ee_block),
				r, le32_to_cpu(r->ee_block));
A
Alex Tomas 已提交
616 617 618
	}

	path->p_ext = l - 1;
619
	ext_debug("  -> %d:%llu:[%d]%d ",
D
Dave Kleikamp 已提交
620 621
			le32_to_cpu(path->p_ext->ee_block),
			ext_pblock(path->p_ext),
622
			ext4_ext_is_uninitialized(path->p_ext),
A
Amit Arora 已提交
623
			ext4_ext_get_actual_len(path->p_ext));
A
Alex Tomas 已提交
624 625 626 627 628 629 630 631 632

#ifdef CHECK_BINSEARCH
	{
		struct ext4_extent *chex, *ex;
		int k;

		chex = ex = EXT_FIRST_EXTENT(eh);
		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
			BUG_ON(k && le32_to_cpu(ex->ee_block)
D
Dave Kleikamp 已提交
633
					  <= le32_to_cpu(ex[-1].ee_block));
A
Alex Tomas 已提交
634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
			if (block < le32_to_cpu(ex->ee_block))
				break;
			chex = ex;
		}
		BUG_ON(chex != path->p_ext);
	}
#endif

}

int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
{
	struct ext4_extent_header *eh;

	eh = ext_inode_hdr(inode);
	eh->eh_depth = 0;
	eh->eh_entries = 0;
	eh->eh_magic = EXT4_EXT_MAGIC;
652
	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
A
Alex Tomas 已提交
653 654 655 656 657 658
	ext4_mark_inode_dirty(handle, inode);
	ext4_ext_invalidate_cache(inode);
	return 0;
}

struct ext4_ext_path *
A
Aneesh Kumar K.V 已提交
659 660
ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
					struct ext4_ext_path *path)
A
Alex Tomas 已提交
661 662 663 664 665 666
{
	struct ext4_extent_header *eh;
	struct buffer_head *bh;
	short int depth, i, ppos = 0, alloc = 0;

	eh = ext_inode_hdr(inode);
667
	depth = ext_depth(inode);
A
Alex Tomas 已提交
668 669 670

	/* account possible depth increase */
	if (!path) {
A
Avantika Mathur 已提交
671
		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
A
Alex Tomas 已提交
672 673 674 675 676 677
				GFP_NOFS);
		if (!path)
			return ERR_PTR(-ENOMEM);
		alloc = 1;
	}
	path[0].p_hdr = eh;
678
	path[0].p_bh = NULL;
A
Alex Tomas 已提交
679

680
	i = depth;
A
Alex Tomas 已提交
681 682
	/* walk through the tree */
	while (i) {
683 684
		int need_to_validate = 0;

A
Alex Tomas 已提交
685 686
		ext_debug("depth %d: num %d, max %d\n",
			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
687

A
Alex Tomas 已提交
688
		ext4_ext_binsearch_idx(inode, path + ppos, block);
689
		path[ppos].p_block = idx_pblock(path[ppos].p_idx);
A
Alex Tomas 已提交
690 691 692
		path[ppos].p_depth = i;
		path[ppos].p_ext = NULL;

693 694
		bh = sb_getblk(inode->i_sb, path[ppos].p_block);
		if (unlikely(!bh))
A
Alex Tomas 已提交
695
			goto err;
696 697 698 699 700 701 702 703
		if (!bh_uptodate_or_lock(bh)) {
			if (bh_submit_read(bh) < 0) {
				put_bh(bh);
				goto err;
			}
			/* validate the extent entries */
			need_to_validate = 1;
		}
A
Alex Tomas 已提交
704 705 706 707 708 709 710
		eh = ext_block_hdr(bh);
		ppos++;
		BUG_ON(ppos > depth);
		path[ppos].p_bh = bh;
		path[ppos].p_hdr = eh;
		i--;

711
		if (need_to_validate && ext4_ext_check(inode, eh, i))
A
Alex Tomas 已提交
712 713 714 715 716 717 718 719 720
			goto err;
	}

	path[ppos].p_depth = i;
	path[ppos].p_ext = NULL;
	path[ppos].p_idx = NULL;

	/* find extent */
	ext4_ext_binsearch(inode, path + ppos, block);
721 722 723
	/* if not an empty leaf */
	if (path[ppos].p_ext)
		path[ppos].p_block = ext_pblock(path[ppos].p_ext);
A
Alex Tomas 已提交
724 725 726 727 728 729 730 731 732 733 734 735 736

	ext4_ext_show_path(inode, path);

	return path;

err:
	ext4_ext_drop_refs(path);
	if (alloc)
		kfree(path);
	return ERR_PTR(-EIO);
}

/*
737 738 739
 * ext4_ext_insert_index:
 * insert new index [@logical;@ptr] into the block at @curp;
 * check where to insert: before @curp or after @curp
A
Alex Tomas 已提交
740
 */
741
int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
742
				struct ext4_ext_path *curp,
743
				int logical, ext4_fsblk_t ptr)
A
Alex Tomas 已提交
744 745 746 747
{
	struct ext4_extent_idx *ix;
	int len, err;

748 749
	err = ext4_ext_get_access(handle, inode, curp);
	if (err)
A
Alex Tomas 已提交
750 751 752 753 754 755 756 757 758
		return err;

	BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
	len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
		/* insert after */
		if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
			len = (len - 1) * sizeof(struct ext4_extent_idx);
			len = len < 0 ? 0 : len;
759
			ext_debug("insert new index %d after: %llu. "
A
Alex Tomas 已提交
760 761 762 763 764 765 766 767 768 769
					"move %d from 0x%p to 0x%p\n",
					logical, ptr, len,
					(curp->p_idx + 1), (curp->p_idx + 2));
			memmove(curp->p_idx + 2, curp->p_idx + 1, len);
		}
		ix = curp->p_idx + 1;
	} else {
		/* insert before */
		len = len * sizeof(struct ext4_extent_idx);
		len = len < 0 ? 0 : len;
770
		ext_debug("insert new index %d before: %llu. "
A
Alex Tomas 已提交
771 772 773 774 775 776 777 778
				"move %d from 0x%p to 0x%p\n",
				logical, ptr, len,
				curp->p_idx, (curp->p_idx + 1));
		memmove(curp->p_idx + 1, curp->p_idx, len);
		ix = curp->p_idx;
	}

	ix->ei_block = cpu_to_le32(logical);
779
	ext4_idx_store_pblock(ix, ptr);
M
Marcin Slusarz 已提交
780
	le16_add_cpu(&curp->p_hdr->eh_entries, 1);
A
Alex Tomas 已提交
781 782

	BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
D
Dave Kleikamp 已提交
783
			     > le16_to_cpu(curp->p_hdr->eh_max));
A
Alex Tomas 已提交
784 785 786 787 788 789 790 791 792
	BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));

	err = ext4_ext_dirty(handle, inode, curp);
	ext4_std_error(inode->i_sb, err);

	return err;
}

/*
793 794 795 796 797 798 799 800
 * ext4_ext_split:
 * inserts new subtree into the path, using free index entry
 * at depth @at:
 * - allocates all needed blocks (new leaf and all intermediate index blocks)
 * - makes decision where to split
 * - moves remaining extents and index entries (right to the split point)
 *   into the newly allocated blocks
 * - initializes subtree
A
Alex Tomas 已提交
801 802 803 804 805 806 807 808 809 810 811
 */
static int ext4_ext_split(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path,
				struct ext4_extent *newext, int at)
{
	struct buffer_head *bh = NULL;
	int depth = ext_depth(inode);
	struct ext4_extent_header *neh;
	struct ext4_extent_idx *fidx;
	struct ext4_extent *ex;
	int i = at, k, m, a;
812
	ext4_fsblk_t newblock, oldblock;
A
Alex Tomas 已提交
813
	__le32 border;
814
	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
A
Alex Tomas 已提交
815 816 817
	int err = 0;

	/* make decision: where to split? */
818
	/* FIXME: now decision is simplest: at current extent */
A
Alex Tomas 已提交
819

820
	/* if current leaf will be split, then we should use
A
Alex Tomas 已提交
821 822 823 824
	 * border from split point */
	BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
		border = path[depth].p_ext[1].ee_block;
825
		ext_debug("leaf will be split."
A
Alex Tomas 已提交
826
				" next leaf starts at %d\n",
D
Dave Kleikamp 已提交
827
				  le32_to_cpu(border));
A
Alex Tomas 已提交
828 829 830 831
	} else {
		border = newext->ee_block;
		ext_debug("leaf will be added."
				" next leaf starts at %d\n",
D
Dave Kleikamp 已提交
832
				le32_to_cpu(border));
A
Alex Tomas 已提交
833 834 835
	}

	/*
836 837
	 * If error occurs, then we break processing
	 * and mark filesystem read-only. index won't
A
Alex Tomas 已提交
838
	 * be inserted and tree will be in consistent
839
	 * state. Next mount will repair buffers too.
A
Alex Tomas 已提交
840 841 842
	 */

	/*
843 844 845
	 * Get array to track all allocated blocks.
	 * We need this to handle errors and free blocks
	 * upon them.
A
Alex Tomas 已提交
846
	 */
A
Avantika Mathur 已提交
847
	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
A
Alex Tomas 已提交
848 849 850 851 852 853
	if (!ablocks)
		return -ENOMEM;

	/* allocate all needed blocks */
	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
	for (a = 0; a < depth - at; a++) {
A
Aneesh Kumar K.V 已提交
854 855
		newblock = ext4_ext_new_meta_block(handle, inode, path,
						   newext, &err);
A
Alex Tomas 已提交
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870
		if (newblock == 0)
			goto cleanup;
		ablocks[a] = newblock;
	}

	/* initialize new leaf */
	newblock = ablocks[--a];
	BUG_ON(newblock == 0);
	bh = sb_getblk(inode->i_sb, newblock);
	if (!bh) {
		err = -EIO;
		goto cleanup;
	}
	lock_buffer(bh);

871 872
	err = ext4_journal_get_create_access(handle, bh);
	if (err)
A
Alex Tomas 已提交
873 874 875 876
		goto cleanup;

	neh = ext_block_hdr(bh);
	neh->eh_entries = 0;
877
	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
A
Alex Tomas 已提交
878 879 880 881
	neh->eh_magic = EXT4_EXT_MAGIC;
	neh->eh_depth = 0;
	ex = EXT_FIRST_EXTENT(neh);

882
	/* move remainder of path[depth] to the new leaf */
A
Alex Tomas 已提交
883 884 885 886 887 888 889
	BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
	/* start copy from next extent */
	/* TODO: we could do it by single memmove */
	m = 0;
	path[depth].p_ext++;
	while (path[depth].p_ext <=
			EXT_MAX_EXTENT(path[depth].p_hdr)) {
890
		ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
D
Dave Kleikamp 已提交
891 892
				le32_to_cpu(path[depth].p_ext->ee_block),
				ext_pblock(path[depth].p_ext),
893
				ext4_ext_is_uninitialized(path[depth].p_ext),
A
Amit Arora 已提交
894
				ext4_ext_get_actual_len(path[depth].p_ext),
A
Alex Tomas 已提交
895 896 897 898 899 900 901 902 903
				newblock);
		/*memmove(ex++, path[depth].p_ext++,
				sizeof(struct ext4_extent));
		neh->eh_entries++;*/
		path[depth].p_ext++;
		m++;
	}
	if (m) {
		memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
M
Marcin Slusarz 已提交
904
		le16_add_cpu(&neh->eh_entries, m);
A
Alex Tomas 已提交
905 906 907 908 909
	}

	set_buffer_uptodate(bh);
	unlock_buffer(bh);

910
	err = ext4_handle_dirty_metadata(handle, inode, bh);
911
	if (err)
A
Alex Tomas 已提交
912 913 914 915 916 917
		goto cleanup;
	brelse(bh);
	bh = NULL;

	/* correct old leaf */
	if (m) {
918 919
		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
A
Alex Tomas 已提交
920
			goto cleanup;
M
Marcin Slusarz 已提交
921
		le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
922 923
		err = ext4_ext_dirty(handle, inode, path + depth);
		if (err)
A
Alex Tomas 已提交
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
			goto cleanup;

	}

	/* create intermediate indexes */
	k = depth - at - 1;
	BUG_ON(k < 0);
	if (k)
		ext_debug("create %d intermediate indices\n", k);
	/* insert new index into current index block */
	/* current depth stored in i var */
	i = depth - 1;
	while (k--) {
		oldblock = newblock;
		newblock = ablocks[--a];
939
		bh = sb_getblk(inode->i_sb, newblock);
A
Alex Tomas 已提交
940 941 942 943 944 945
		if (!bh) {
			err = -EIO;
			goto cleanup;
		}
		lock_buffer(bh);

946 947
		err = ext4_journal_get_create_access(handle, bh);
		if (err)
A
Alex Tomas 已提交
948 949 950 951 952
			goto cleanup;

		neh = ext_block_hdr(bh);
		neh->eh_entries = cpu_to_le16(1);
		neh->eh_magic = EXT4_EXT_MAGIC;
953
		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
A
Alex Tomas 已提交
954 955 956
		neh->eh_depth = cpu_to_le16(depth - i);
		fidx = EXT_FIRST_INDEX(neh);
		fidx->ei_block = border;
957
		ext4_idx_store_pblock(fidx, oldblock);
A
Alex Tomas 已提交
958

959 960
		ext_debug("int.index at %d (block %llu): %u -> %llu\n",
				i, newblock, le32_to_cpu(border), oldblock);
A
Alex Tomas 已提交
961 962 963 964 965 966 967 968 969
		/* copy indexes */
		m = 0;
		path[i].p_idx++;

		ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
				EXT_MAX_INDEX(path[i].p_hdr));
		BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
				EXT_LAST_INDEX(path[i].p_hdr));
		while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
970
			ext_debug("%d: move %d:%llu in new index %llu\n", i,
D
Dave Kleikamp 已提交
971 972 973
					le32_to_cpu(path[i].p_idx->ei_block),
					idx_pblock(path[i].p_idx),
					newblock);
A
Alex Tomas 已提交
974 975 976 977 978 979 980 981 982 983
			/*memmove(++fidx, path[i].p_idx++,
					sizeof(struct ext4_extent_idx));
			neh->eh_entries++;
			BUG_ON(neh->eh_entries > neh->eh_max);*/
			path[i].p_idx++;
			m++;
		}
		if (m) {
			memmove(++fidx, path[i].p_idx - m,
				sizeof(struct ext4_extent_idx) * m);
M
Marcin Slusarz 已提交
984
			le16_add_cpu(&neh->eh_entries, m);
A
Alex Tomas 已提交
985 986 987 988
		}
		set_buffer_uptodate(bh);
		unlock_buffer(bh);

989
		err = ext4_handle_dirty_metadata(handle, inode, bh);
990
		if (err)
A
Alex Tomas 已提交
991 992 993 994 995 996 997 998 999
			goto cleanup;
		brelse(bh);
		bh = NULL;

		/* correct old index */
		if (m) {
			err = ext4_ext_get_access(handle, inode, path + i);
			if (err)
				goto cleanup;
M
Marcin Slusarz 已提交
1000
			le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
A
Alex Tomas 已提交
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
			err = ext4_ext_dirty(handle, inode, path + i);
			if (err)
				goto cleanup;
		}

		i--;
	}

	/* insert new index */
	err = ext4_ext_insert_index(handle, inode, path + at,
				    le32_to_cpu(border), newblock);

cleanup:
	if (bh) {
		if (buffer_locked(bh))
			unlock_buffer(bh);
		brelse(bh);
	}

	if (err) {
		/* free all allocated blocks in error case */
		for (i = 0; i < depth; i++) {
			if (!ablocks[i])
				continue;
1025 1026
			ext4_free_blocks(handle, inode, 0, ablocks[i], 1,
					 EXT4_FREE_BLOCKS_METADATA);
A
Alex Tomas 已提交
1027 1028 1029 1030 1031 1032 1033 1034
		}
	}
	kfree(ablocks);

	return err;
}

/*
1035 1036 1037 1038 1039 1040
 * ext4_ext_grow_indepth:
 * implements tree growing procedure:
 * - allocates new block
 * - moves top-level data (index block or leaf) into the new block
 * - initializes new top-level, creating index that points to the
 *   just created block
A
Alex Tomas 已提交
1041 1042 1043 1044 1045 1046 1047 1048 1049
 */
static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
					struct ext4_ext_path *path,
					struct ext4_extent *newext)
{
	struct ext4_ext_path *curp = path;
	struct ext4_extent_header *neh;
	struct ext4_extent_idx *fidx;
	struct buffer_head *bh;
1050
	ext4_fsblk_t newblock;
A
Alex Tomas 已提交
1051 1052
	int err = 0;

A
Aneesh Kumar K.V 已提交
1053
	newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err);
A
Alex Tomas 已提交
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
	if (newblock == 0)
		return err;

	bh = sb_getblk(inode->i_sb, newblock);
	if (!bh) {
		err = -EIO;
		ext4_std_error(inode->i_sb, err);
		return err;
	}
	lock_buffer(bh);

1065 1066
	err = ext4_journal_get_create_access(handle, bh);
	if (err) {
A
Alex Tomas 已提交
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
		unlock_buffer(bh);
		goto out;
	}

	/* move top-level index/leaf into new block */
	memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));

	/* set size of new block */
	neh = ext_block_hdr(bh);
	/* old root could have indexes or leaves
	 * so calculate e_max right way */
	if (ext_depth(inode))
1079
		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
A
Alex Tomas 已提交
1080
	else
1081
		neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
A
Alex Tomas 已提交
1082 1083 1084 1085
	neh->eh_magic = EXT4_EXT_MAGIC;
	set_buffer_uptodate(bh);
	unlock_buffer(bh);

1086
	err = ext4_handle_dirty_metadata(handle, inode, bh);
1087
	if (err)
A
Alex Tomas 已提交
1088 1089 1090
		goto out;

	/* create index in new top-level index: num,max,pointer */
1091 1092
	err = ext4_ext_get_access(handle, inode, curp);
	if (err)
A
Alex Tomas 已提交
1093 1094 1095
		goto out;

	curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
1096
	curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
A
Alex Tomas 已提交
1097 1098
	curp->p_hdr->eh_entries = cpu_to_le16(1);
	curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
D
Dmitry Monakhov 已提交
1099 1100 1101 1102 1103 1104 1105

	if (path[0].p_hdr->eh_depth)
		curp->p_idx->ei_block =
			EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
	else
		curp->p_idx->ei_block =
			EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
1106
	ext4_idx_store_pblock(curp->p_idx, newblock);
A
Alex Tomas 已提交
1107 1108 1109

	neh = ext_inode_hdr(inode);
	fidx = EXT_FIRST_INDEX(neh);
1110
	ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
A
Alex Tomas 已提交
1111
		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1112
		  le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
A
Alex Tomas 已提交
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122

	neh->eh_depth = cpu_to_le16(path->p_depth + 1);
	err = ext4_ext_dirty(handle, inode, curp);
out:
	brelse(bh);

	return err;
}

/*
1123 1124 1125
 * ext4_ext_create_new_leaf:
 * finds empty index and adds new leaf.
 * if no free index is found, then it requests in-depth growing.
A
Alex Tomas 已提交
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
 */
static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
					struct ext4_ext_path *path,
					struct ext4_extent *newext)
{
	struct ext4_ext_path *curp;
	int depth, i, err = 0;

repeat:
	i = depth = ext_depth(inode);

	/* walk up to the tree and look for free index entry */
	curp = path + depth;
	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
		i--;
		curp--;
	}

1144 1145
	/* we use already allocated block for index block,
	 * so subsequent data blocks should be contiguous */
A
Alex Tomas 已提交
1146 1147 1148 1149
	if (EXT_HAS_FREE_INDEX(curp)) {
		/* if we found index with free entry, then use that
		 * entry: create all needed subtree and add new leaf */
		err = ext4_ext_split(handle, inode, path, newext, i);
1150 1151
		if (err)
			goto out;
A
Alex Tomas 已提交
1152 1153 1154 1155

		/* refill path */
		ext4_ext_drop_refs(path);
		path = ext4_ext_find_extent(inode,
A
Aneesh Kumar K.V 已提交
1156 1157
				    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
				    path);
A
Alex Tomas 已提交
1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
		if (IS_ERR(path))
			err = PTR_ERR(path);
	} else {
		/* tree is full, time to grow in depth */
		err = ext4_ext_grow_indepth(handle, inode, path, newext);
		if (err)
			goto out;

		/* refill path */
		ext4_ext_drop_refs(path);
		path = ext4_ext_find_extent(inode,
A
Aneesh Kumar K.V 已提交
1169 1170
				   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
				    path);
A
Alex Tomas 已提交
1171 1172 1173 1174 1175 1176
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			goto out;
		}

		/*
1177 1178
		 * only first (depth 0 -> 1) produces free space;
		 * in all other cases we have to split the grown tree
A
Alex Tomas 已提交
1179 1180 1181
		 */
		depth = ext_depth(inode);
		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1182
			/* now we need to split */
A
Alex Tomas 已提交
1183 1184 1185 1186 1187 1188 1189 1190
			goto repeat;
		}
	}

out:
	return err;
}

1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
/*
 * search the closest allocated block to the left for *logical
 * and returns it at @logical + it's physical address at @phys
 * if *logical is the smallest allocated block, the function
 * returns 0 at @phys
 * return value contains 0 (success) or error code
 */
int
ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
			ext4_lblk_t *logical, ext4_fsblk_t *phys)
{
	struct ext4_extent_idx *ix;
	struct ext4_extent *ex;
1204
	int depth, ee_len;
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217

	BUG_ON(path == NULL);
	depth = path->p_depth;
	*phys = 0;

	if (depth == 0 && path->p_ext == NULL)
		return 0;

	/* usually extent in the path covers blocks smaller
	 * then *logical, but it can be that extent is the
	 * first one in the file */

	ex = path[depth].p_ext;
1218
	ee_len = ext4_ext_get_actual_len(ex);
1219 1220 1221 1222 1223 1224 1225 1226 1227
	if (*logical < le32_to_cpu(ex->ee_block)) {
		BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
		while (--depth >= 0) {
			ix = path[depth].p_idx;
			BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
		}
		return 0;
	}

1228
	BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
1229

1230 1231
	*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
	*phys = ext_pblock(ex) + ee_len - 1;
1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
	return 0;
}

/*
 * search the closest allocated block to the right for *logical
 * and returns it at @logical + it's physical address at @phys
 * if *logical is the smallest allocated block, the function
 * returns 0 at @phys
 * return value contains 0 (success) or error code
 */
int
ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
			ext4_lblk_t *logical, ext4_fsblk_t *phys)
{
	struct buffer_head *bh = NULL;
	struct ext4_extent_header *eh;
	struct ext4_extent_idx *ix;
	struct ext4_extent *ex;
	ext4_fsblk_t block;
1251 1252
	int depth;	/* Note, NOT eh_depth; depth from top of tree */
	int ee_len;
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265

	BUG_ON(path == NULL);
	depth = path->p_depth;
	*phys = 0;

	if (depth == 0 && path->p_ext == NULL)
		return 0;

	/* usually extent in the path covers blocks smaller
	 * then *logical, but it can be that extent is the
	 * first one in the file */

	ex = path[depth].p_ext;
1266
	ee_len = ext4_ext_get_actual_len(ex);
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
	if (*logical < le32_to_cpu(ex->ee_block)) {
		BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
		while (--depth >= 0) {
			ix = path[depth].p_idx;
			BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
		}
		*logical = le32_to_cpu(ex->ee_block);
		*phys = ext_pblock(ex);
		return 0;
	}

1278
	BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291

	if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
		/* next allocated block in this leaf */
		ex++;
		*logical = le32_to_cpu(ex->ee_block);
		*phys = ext_pblock(ex);
		return 0;
	}

	/* go up and search for index to the right */
	while (--depth >= 0) {
		ix = path[depth].p_idx;
		if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
W
Wu Fengguang 已提交
1292
			goto got_index;
1293 1294
	}

W
Wu Fengguang 已提交
1295 1296
	/* we've gone up to the root and found no index to the right */
	return 0;
1297

W
Wu Fengguang 已提交
1298
got_index:
1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
	/* we've found index to the right, let's
	 * follow it and find the closest allocated
	 * block to the right */
	ix++;
	block = idx_pblock(ix);
	while (++depth < path->p_depth) {
		bh = sb_bread(inode->i_sb, block);
		if (bh == NULL)
			return -EIO;
		eh = ext_block_hdr(bh);
1309
		/* subtract from p_depth to get proper eh_depth */
1310
		if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322
			put_bh(bh);
			return -EIO;
		}
		ix = EXT_FIRST_INDEX(eh);
		block = idx_pblock(ix);
		put_bh(bh);
	}

	bh = sb_bread(inode->i_sb, block);
	if (bh == NULL)
		return -EIO;
	eh = ext_block_hdr(bh);
1323
	if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
		put_bh(bh);
		return -EIO;
	}
	ex = EXT_FIRST_EXTENT(eh);
	*logical = le32_to_cpu(ex->ee_block);
	*phys = ext_pblock(ex);
	put_bh(bh);
	return 0;
}

A
Alex Tomas 已提交
1334
/*
1335 1336 1337 1338 1339
 * ext4_ext_next_allocated_block:
 * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
 * NOTE: it considers block number from index entry as
 * allocated block. Thus, index entries have to be consistent
 * with leaves.
A
Alex Tomas 已提交
1340
 */
A
Aneesh Kumar K.V 已提交
1341
static ext4_lblk_t
A
Alex Tomas 已提交
1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
ext4_ext_next_allocated_block(struct ext4_ext_path *path)
{
	int depth;

	BUG_ON(path == NULL);
	depth = path->p_depth;

	if (depth == 0 && path->p_ext == NULL)
		return EXT_MAX_BLOCK;

	while (depth >= 0) {
		if (depth == path->p_depth) {
			/* leaf */
			if (path[depth].p_ext !=
					EXT_LAST_EXTENT(path[depth].p_hdr))
			  return le32_to_cpu(path[depth].p_ext[1].ee_block);
		} else {
			/* index */
			if (path[depth].p_idx !=
					EXT_LAST_INDEX(path[depth].p_hdr))
			  return le32_to_cpu(path[depth].p_idx[1].ei_block);
		}
		depth--;
	}

	return EXT_MAX_BLOCK;
}

/*
1371
 * ext4_ext_next_leaf_block:
A
Alex Tomas 已提交
1372 1373
 * returns first allocated block from next leaf or EXT_MAX_BLOCK
 */
A
Aneesh Kumar K.V 已提交
1374
static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
A
Andrew Morton 已提交
1375
					struct ext4_ext_path *path)
A
Alex Tomas 已提交
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
{
	int depth;

	BUG_ON(path == NULL);
	depth = path->p_depth;

	/* zero-tree has no leaf blocks at all */
	if (depth == 0)
		return EXT_MAX_BLOCK;

	/* go to index block */
	depth--;

	while (depth >= 0) {
		if (path[depth].p_idx !=
				EXT_LAST_INDEX(path[depth].p_hdr))
A
Aneesh Kumar K.V 已提交
1392 1393
			return (ext4_lblk_t)
				le32_to_cpu(path[depth].p_idx[1].ei_block);
A
Alex Tomas 已提交
1394 1395 1396 1397 1398 1399 1400
		depth--;
	}

	return EXT_MAX_BLOCK;
}

/*
1401 1402 1403
 * ext4_ext_correct_indexes:
 * if leaf gets modified and modified extent is first in the leaf,
 * then we have to correct all indexes above.
A
Alex Tomas 已提交
1404 1405
 * TODO: do we need to correct tree in all cases?
 */
A
Aneesh Kumar K.V 已提交
1406
static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
				struct ext4_ext_path *path)
{
	struct ext4_extent_header *eh;
	int depth = ext_depth(inode);
	struct ext4_extent *ex;
	__le32 border;
	int k, err = 0;

	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;
	BUG_ON(ex == NULL);
	BUG_ON(eh == NULL);

	if (depth == 0) {
		/* there is no tree at all */
		return 0;
	}

	if (ex != EXT_FIRST_EXTENT(eh)) {
		/* we correct tree if first leaf got modified only */
		return 0;
	}

	/*
1431
	 * TODO: we need correction if border is smaller than current one
A
Alex Tomas 已提交
1432 1433 1434
	 */
	k = depth - 1;
	border = path[depth].p_ext->ee_block;
1435 1436
	err = ext4_ext_get_access(handle, inode, path + k);
	if (err)
A
Alex Tomas 已提交
1437 1438
		return err;
	path[k].p_idx->ei_block = border;
1439 1440
	err = ext4_ext_dirty(handle, inode, path + k);
	if (err)
A
Alex Tomas 已提交
1441 1442 1443 1444 1445 1446
		return err;

	while (k--) {
		/* change all left-side indexes */
		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
			break;
1447 1448
		err = ext4_ext_get_access(handle, inode, path + k);
		if (err)
A
Alex Tomas 已提交
1449 1450
			break;
		path[k].p_idx->ei_block = border;
1451 1452
		err = ext4_ext_dirty(handle, inode, path + k);
		if (err)
A
Alex Tomas 已提交
1453 1454 1455 1456 1457 1458
			break;
	}

	return err;
}

1459
int
A
Alex Tomas 已提交
1460 1461 1462
ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
				struct ext4_extent *ex2)
{
1463
	unsigned short ext1_ee_len, ext2_ee_len, max_len;
A
Amit Arora 已提交
1464 1465 1466 1467 1468 1469 1470 1471

	/*
	 * Make sure that either both extents are uninitialized, or
	 * both are _not_.
	 */
	if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
		return 0;

1472 1473 1474 1475 1476
	if (ext4_ext_is_uninitialized(ex1))
		max_len = EXT_UNINIT_MAX_LEN;
	else
		max_len = EXT_INIT_MAX_LEN;

A
Amit Arora 已提交
1477 1478 1479 1480
	ext1_ee_len = ext4_ext_get_actual_len(ex1);
	ext2_ee_len = ext4_ext_get_actual_len(ex2);

	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
A
Andrew Morton 已提交
1481
			le32_to_cpu(ex2->ee_block))
A
Alex Tomas 已提交
1482 1483
		return 0;

1484 1485 1486
	/*
	 * To allow future support for preallocated extents to be added
	 * as an RO_COMPAT feature, refuse to merge to extents if
1487
	 * this can result in the top bit of ee_len being set.
1488
	 */
1489
	if (ext1_ee_len + ext2_ee_len > max_len)
1490
		return 0;
1491
#ifdef AGGRESSIVE_TEST
1492
	if (ext1_ee_len >= 4)
A
Alex Tomas 已提交
1493 1494 1495
		return 0;
#endif

A
Amit Arora 已提交
1496
	if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
A
Alex Tomas 已提交
1497 1498 1499 1500
		return 1;
	return 0;
}

1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
/*
 * This function tries to merge the "ex" extent to the next extent in the tree.
 * It always tries to merge towards right. If you want to merge towards
 * left, pass "ex - 1" as argument instead of "ex".
 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
 * 1 if they got merged.
 */
int ext4_ext_try_to_merge(struct inode *inode,
			  struct ext4_ext_path *path,
			  struct ext4_extent *ex)
{
	struct ext4_extent_header *eh;
	unsigned int depth, len;
	int merge_done = 0;
	int uninitialized = 0;

	depth = ext_depth(inode);
	BUG_ON(path[depth].p_hdr == NULL);
	eh = path[depth].p_hdr;

	while (ex < EXT_LAST_EXTENT(eh)) {
		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
			break;
		/* merge with next extent! */
		if (ext4_ext_is_uninitialized(ex))
			uninitialized = 1;
		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
				+ ext4_ext_get_actual_len(ex + 1));
		if (uninitialized)
			ext4_ext_mark_uninitialized(ex);

		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
			len = (EXT_LAST_EXTENT(eh) - ex - 1)
				* sizeof(struct ext4_extent);
			memmove(ex + 1, ex + 2, len);
		}
M
Marcin Slusarz 已提交
1537
		le16_add_cpu(&eh->eh_entries, -1);
1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
		merge_done = 1;
		WARN_ON(eh->eh_entries == 0);
		if (!eh->eh_entries)
			ext4_error(inode->i_sb, "ext4_ext_try_to_merge",
			   "inode#%lu, eh->eh_entries = 0!", inode->i_ino);
	}

	return merge_done;
}

A
Amit Arora 已提交
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
/*
 * check if a portion of the "newext" extent overlaps with an
 * existing extent.
 *
 * If there is an overlap discovered, it updates the length of the newext
 * such that there will be no overlap, and then returns 1.
 * If there is no overlap found, it returns 0.
 */
unsigned int ext4_ext_check_overlap(struct inode *inode,
				    struct ext4_extent *newext,
				    struct ext4_ext_path *path)
{
A
Aneesh Kumar K.V 已提交
1560
	ext4_lblk_t b1, b2;
A
Amit Arora 已提交
1561 1562 1563 1564
	unsigned int depth, len1;
	unsigned int ret = 0;

	b1 = le32_to_cpu(newext->ee_block);
A
Amit Arora 已提交
1565
	len1 = ext4_ext_get_actual_len(newext);
A
Amit Arora 已提交
1566 1567 1568 1569 1570 1571 1572
	depth = ext_depth(inode);
	if (!path[depth].p_ext)
		goto out;
	b2 = le32_to_cpu(path[depth].p_ext->ee_block);

	/*
	 * get the next allocated block if the extent in the path
1573
	 * is before the requested block(s)
A
Amit Arora 已提交
1574 1575 1576 1577 1578 1579 1580
	 */
	if (b2 < b1) {
		b2 = ext4_ext_next_allocated_block(path);
		if (b2 == EXT_MAX_BLOCK)
			goto out;
	}

A
Aneesh Kumar K.V 已提交
1581
	/* check for wrap through zero on extent logical start block*/
A
Amit Arora 已提交
1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
	if (b1 + len1 < b1) {
		len1 = EXT_MAX_BLOCK - b1;
		newext->ee_len = cpu_to_le16(len1);
		ret = 1;
	}

	/* check for overlap */
	if (b1 + len1 > b2) {
		newext->ee_len = cpu_to_le16(b2 - b1);
		ret = 1;
	}
out:
	return ret;
}

A
Alex Tomas 已提交
1597
/*
1598 1599 1600 1601
 * ext4_ext_insert_extent:
 * tries to merge requsted extent into the existing extent or
 * inserts requested extent as new one into the tree,
 * creating new leaf in the no-space case.
A
Alex Tomas 已提交
1602 1603 1604
 */
int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path,
1605
				struct ext4_extent *newext, int flag)
A
Alex Tomas 已提交
1606
{
1607
	struct ext4_extent_header *eh;
A
Alex Tomas 已提交
1608 1609 1610
	struct ext4_extent *ex, *fex;
	struct ext4_extent *nearex; /* nearest extent */
	struct ext4_ext_path *npath = NULL;
A
Aneesh Kumar K.V 已提交
1611 1612
	int depth, len, err;
	ext4_lblk_t next;
A
Amit Arora 已提交
1613
	unsigned uninitialized = 0;
A
Alex Tomas 已提交
1614

A
Amit Arora 已提交
1615
	BUG_ON(ext4_ext_get_actual_len(newext) == 0);
A
Alex Tomas 已提交
1616 1617 1618 1619 1620
	depth = ext_depth(inode);
	ex = path[depth].p_ext;
	BUG_ON(path[depth].p_hdr == NULL);

	/* try to insert block into found extent and return */
1621 1622
	if (ex && (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT)
		&& ext4_can_extents_be_merged(inode, ex, newext)) {
1623 1624
		ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
				ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1625
				ext4_ext_get_actual_len(newext),
A
Alex Tomas 已提交
1626
				le32_to_cpu(ex->ee_block),
1627
				ext4_ext_is_uninitialized(ex),
A
Amit Arora 已提交
1628
				ext4_ext_get_actual_len(ex), ext_pblock(ex));
1629 1630
		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
A
Alex Tomas 已提交
1631
			return err;
A
Amit Arora 已提交
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643

		/*
		 * ext4_can_extents_be_merged should have checked that either
		 * both extents are uninitialized, or both aren't. Thus we
		 * need to check only one of them here.
		 */
		if (ext4_ext_is_uninitialized(ex))
			uninitialized = 1;
		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
					+ ext4_ext_get_actual_len(newext));
		if (uninitialized)
			ext4_ext_mark_uninitialized(ex);
A
Alex Tomas 已提交
1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677
		eh = path[depth].p_hdr;
		nearex = ex;
		goto merge;
	}

repeat:
	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
		goto has_space;

	/* probably next leaf has space for us? */
	fex = EXT_LAST_EXTENT(eh);
	next = ext4_ext_next_leaf_block(inode, path);
	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
	    && next != EXT_MAX_BLOCK) {
		ext_debug("next leaf block - %d\n", next);
		BUG_ON(npath != NULL);
		npath = ext4_ext_find_extent(inode, next, NULL);
		if (IS_ERR(npath))
			return PTR_ERR(npath);
		BUG_ON(npath->p_depth != path->p_depth);
		eh = npath[depth].p_hdr;
		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
			ext_debug("next leaf isnt full(%d)\n",
				  le16_to_cpu(eh->eh_entries));
			path = npath;
			goto repeat;
		}
		ext_debug("next leaf has no free space(%d,%d)\n",
			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
	}

	/*
1678 1679
	 * There is no free space in the found leaf.
	 * We're gonna add a new leaf in the tree.
A
Alex Tomas 已提交
1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
	 */
	err = ext4_ext_create_new_leaf(handle, inode, path, newext);
	if (err)
		goto cleanup;
	depth = ext_depth(inode);
	eh = path[depth].p_hdr;

has_space:
	nearex = path[depth].p_ext;

1690 1691
	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
A
Alex Tomas 已提交
1692 1693 1694 1695
		goto cleanup;

	if (!nearex) {
		/* there is no extent in this leaf, create first one */
1696
		ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n",
D
Dave Kleikamp 已提交
1697 1698
				le32_to_cpu(newext->ee_block),
				ext_pblock(newext),
1699
				ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1700
				ext4_ext_get_actual_len(newext));
A
Alex Tomas 已提交
1701 1702
		path[depth].p_ext = EXT_FIRST_EXTENT(eh);
	} else if (le32_to_cpu(newext->ee_block)
D
Dave Kleikamp 已提交
1703
			   > le32_to_cpu(nearex->ee_block)) {
A
Alex Tomas 已提交
1704 1705 1706 1707 1708
/*		BUG_ON(newext->ee_block == nearex->ee_block); */
		if (nearex != EXT_LAST_EXTENT(eh)) {
			len = EXT_MAX_EXTENT(eh) - nearex;
			len = (len - 1) * sizeof(struct ext4_extent);
			len = len < 0 ? 0 : len;
1709
			ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, "
A
Alex Tomas 已提交
1710
					"move %d from 0x%p to 0x%p\n",
D
Dave Kleikamp 已提交
1711 1712
					le32_to_cpu(newext->ee_block),
					ext_pblock(newext),
1713
					ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1714
					ext4_ext_get_actual_len(newext),
A
Alex Tomas 已提交
1715 1716 1717 1718 1719 1720 1721 1722
					nearex, len, nearex + 1, nearex + 2);
			memmove(nearex + 2, nearex + 1, len);
		}
		path[depth].p_ext = nearex + 1;
	} else {
		BUG_ON(newext->ee_block == nearex->ee_block);
		len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
		len = len < 0 ? 0 : len;
1723
		ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, "
A
Alex Tomas 已提交
1724 1725
				"move %d from 0x%p to 0x%p\n",
				le32_to_cpu(newext->ee_block),
1726
				ext_pblock(newext),
1727
				ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1728
				ext4_ext_get_actual_len(newext),
A
Alex Tomas 已提交
1729 1730 1731 1732 1733
				nearex, len, nearex + 1, nearex + 2);
		memmove(nearex + 1, nearex, len);
		path[depth].p_ext = nearex;
	}

M
Marcin Slusarz 已提交
1734
	le16_add_cpu(&eh->eh_entries, 1);
A
Alex Tomas 已提交
1735 1736
	nearex = path[depth].p_ext;
	nearex->ee_block = newext->ee_block;
1737
	ext4_ext_store_pblock(nearex, ext_pblock(newext));
A
Alex Tomas 已提交
1738 1739 1740 1741
	nearex->ee_len = newext->ee_len;

merge:
	/* try to merge extents to the right */
1742 1743
	if (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT)
		ext4_ext_try_to_merge(inode, path, nearex);
A
Alex Tomas 已提交
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762

	/* try to merge extents to the left */

	/* time to correct all indexes above */
	err = ext4_ext_correct_indexes(handle, inode, path);
	if (err)
		goto cleanup;

	err = ext4_ext_dirty(handle, inode, path + depth);

cleanup:
	if (npath) {
		ext4_ext_drop_refs(npath);
		kfree(npath);
	}
	ext4_ext_invalidate_cache(inode);
	return err;
}

1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
			ext4_lblk_t num, ext_prepare_callback func,
			void *cbdata)
{
	struct ext4_ext_path *path = NULL;
	struct ext4_ext_cache cbex;
	struct ext4_extent *ex;
	ext4_lblk_t next, start = 0, end = 0;
	ext4_lblk_t last = block + num;
	int depth, exists, err = 0;

	BUG_ON(func == NULL);
	BUG_ON(inode == NULL);

	while (block < last && block != EXT_MAX_BLOCK) {
		num = last - block;
		/* find extent for this block */
1780
		down_read(&EXT4_I(inode)->i_data_sem);
1781
		path = ext4_ext_find_extent(inode, block, path);
1782
		up_read(&EXT4_I(inode)->i_data_sem);
1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			path = NULL;
			break;
		}

		depth = ext_depth(inode);
		BUG_ON(path[depth].p_hdr == NULL);
		ex = path[depth].p_ext;
		next = ext4_ext_next_allocated_block(path);

		exists = 0;
		if (!ex) {
			/* there is no extent yet, so try to allocate
			 * all requested space */
			start = block;
			end = block + num;
		} else if (le32_to_cpu(ex->ee_block) > block) {
			/* need to allocate space before found extent */
			start = block;
			end = le32_to_cpu(ex->ee_block);
			if (block + num < end)
				end = block + num;
		} else if (block >= le32_to_cpu(ex->ee_block)
					+ ext4_ext_get_actual_len(ex)) {
			/* need to allocate space after found extent */
			start = block;
			end = block + num;
			if (end >= next)
				end = next;
		} else if (block >= le32_to_cpu(ex->ee_block)) {
			/*
			 * some part of requested space is covered
			 * by found extent
			 */
			start = block;
			end = le32_to_cpu(ex->ee_block)
				+ ext4_ext_get_actual_len(ex);
			if (block + num < end)
				end = block + num;
			exists = 1;
		} else {
			BUG();
		}
		BUG_ON(end <= start);

		if (!exists) {
			cbex.ec_block = start;
			cbex.ec_len = end - start;
			cbex.ec_start = 0;
			cbex.ec_type = EXT4_EXT_CACHE_GAP;
		} else {
			cbex.ec_block = le32_to_cpu(ex->ee_block);
			cbex.ec_len = ext4_ext_get_actual_len(ex);
			cbex.ec_start = ext_pblock(ex);
			cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
		}

		BUG_ON(cbex.ec_len == 0);
		err = func(inode, path, &cbex, ex, cbdata);
		ext4_ext_drop_refs(path);

		if (err < 0)
			break;

		if (err == EXT_REPEAT)
			continue;
		else if (err == EXT_BREAK) {
			err = 0;
			break;
		}

		if (ext_depth(inode) != depth) {
			/* depth was changed. we have to realloc path */
			kfree(path);
			path = NULL;
		}

		block = cbex.ec_block + cbex.ec_len;
	}

	if (path) {
		ext4_ext_drop_refs(path);
		kfree(path);
	}

	return err;
}

1872
static void
A
Aneesh Kumar K.V 已提交
1873
ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1874
			__u32 len, ext4_fsblk_t start, int type)
A
Alex Tomas 已提交
1875 1876 1877
{
	struct ext4_ext_cache *cex;
	BUG_ON(len == 0);
1878
	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
A
Alex Tomas 已提交
1879 1880 1881 1882 1883
	cex = &EXT4_I(inode)->i_cached_extent;
	cex->ec_type = type;
	cex->ec_block = block;
	cex->ec_len = len;
	cex->ec_start = start;
1884
	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
A
Alex Tomas 已提交
1885 1886 1887
}

/*
1888 1889
 * ext4_ext_put_gap_in_cache:
 * calculate boundaries of the gap that the requested block fits into
A
Alex Tomas 已提交
1890 1891
 * and cache this gap
 */
1892
static void
A
Alex Tomas 已提交
1893
ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
A
Aneesh Kumar K.V 已提交
1894
				ext4_lblk_t block)
A
Alex Tomas 已提交
1895 1896
{
	int depth = ext_depth(inode);
A
Aneesh Kumar K.V 已提交
1897 1898
	unsigned long len;
	ext4_lblk_t lblock;
A
Alex Tomas 已提交
1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909
	struct ext4_extent *ex;

	ex = path[depth].p_ext;
	if (ex == NULL) {
		/* there is no extent yet, so gap is [0;-] */
		lblock = 0;
		len = EXT_MAX_BLOCK;
		ext_debug("cache gap(whole file):");
	} else if (block < le32_to_cpu(ex->ee_block)) {
		lblock = block;
		len = le32_to_cpu(ex->ee_block) - block;
1910 1911 1912 1913
		ext_debug("cache gap(before): %u [%u:%u]",
				block,
				le32_to_cpu(ex->ee_block),
				 ext4_ext_get_actual_len(ex));
A
Alex Tomas 已提交
1914
	} else if (block >= le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
1915
			+ ext4_ext_get_actual_len(ex)) {
A
Aneesh Kumar K.V 已提交
1916
		ext4_lblk_t next;
D
Dave Kleikamp 已提交
1917
		lblock = le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
1918
			+ ext4_ext_get_actual_len(ex);
A
Aneesh Kumar K.V 已提交
1919 1920

		next = ext4_ext_next_allocated_block(path);
1921 1922 1923 1924
		ext_debug("cache gap(after): [%u:%u] %u",
				le32_to_cpu(ex->ee_block),
				ext4_ext_get_actual_len(ex),
				block);
A
Aneesh Kumar K.V 已提交
1925 1926
		BUG_ON(next == lblock);
		len = next - lblock;
A
Alex Tomas 已提交
1927 1928 1929 1930 1931
	} else {
		lblock = len = 0;
		BUG();
	}

1932
	ext_debug(" -> %u:%lu\n", lblock, len);
A
Alex Tomas 已提交
1933 1934 1935
	ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
}

1936
static int
A
Aneesh Kumar K.V 已提交
1937
ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
A
Alex Tomas 已提交
1938 1939 1940
			struct ext4_extent *ex)
{
	struct ext4_ext_cache *cex;
1941
	int ret = EXT4_EXT_CACHE_NO;
A
Alex Tomas 已提交
1942

1943 1944 1945 1946
	/* 
	 * We borrow i_block_reservation_lock to protect i_cached_extent
	 */
	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
A
Alex Tomas 已提交
1947 1948 1949 1950
	cex = &EXT4_I(inode)->i_cached_extent;

	/* has cache valid data? */
	if (cex->ec_type == EXT4_EXT_CACHE_NO)
1951
		goto errout;
A
Alex Tomas 已提交
1952 1953 1954 1955

	BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
			cex->ec_type != EXT4_EXT_CACHE_EXTENT);
	if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
D
Dave Kleikamp 已提交
1956
		ex->ee_block = cpu_to_le32(cex->ec_block);
1957
		ext4_ext_store_pblock(ex, cex->ec_start);
D
Dave Kleikamp 已提交
1958
		ex->ee_len = cpu_to_le16(cex->ec_len);
1959 1960 1961
		ext_debug("%u cached by %u:%u:%llu\n",
				block,
				cex->ec_block, cex->ec_len, cex->ec_start);
1962
		ret = cex->ec_type;
A
Alex Tomas 已提交
1963
	}
1964 1965 1966
errout:
	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
	return ret;
A
Alex Tomas 已提交
1967 1968 1969
}

/*
1970 1971 1972 1973
 * ext4_ext_rm_idx:
 * removes index from the index block.
 * It's used in truncate case only, thus all requests are for
 * last index in the block only.
A
Alex Tomas 已提交
1974
 */
A
Aneesh Kumar K.V 已提交
1975
static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
1976 1977 1978
			struct ext4_ext_path *path)
{
	int err;
1979
	ext4_fsblk_t leaf;
A
Alex Tomas 已提交
1980 1981 1982

	/* free index block */
	path--;
1983
	leaf = idx_pblock(path->p_idx);
A
Alex Tomas 已提交
1984
	BUG_ON(path->p_hdr->eh_entries == 0);
1985 1986
	err = ext4_ext_get_access(handle, inode, path);
	if (err)
A
Alex Tomas 已提交
1987
		return err;
M
Marcin Slusarz 已提交
1988
	le16_add_cpu(&path->p_hdr->eh_entries, -1);
1989 1990
	err = ext4_ext_dirty(handle, inode, path);
	if (err)
A
Alex Tomas 已提交
1991
		return err;
1992
	ext_debug("index is empty, remove it, free block %llu\n", leaf);
1993 1994
	ext4_free_blocks(handle, inode, 0, leaf, 1,
			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
A
Alex Tomas 已提交
1995 1996 1997 1998
	return err;
}

/*
1999 2000 2001 2002 2003
 * ext4_ext_calc_credits_for_single_extent:
 * This routine returns max. credits that needed to insert an extent
 * to the extent tree.
 * When pass the actual path, the caller should calculate credits
 * under i_data_sem.
A
Alex Tomas 已提交
2004
 */
2005
int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
A
Alex Tomas 已提交
2006 2007 2008
						struct ext4_ext_path *path)
{
	if (path) {
2009
		int depth = ext_depth(inode);
2010
		int ret = 0;
2011

A
Alex Tomas 已提交
2012 2013
		/* probably there is space in leaf? */
		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2014
				< le16_to_cpu(path[depth].p_hdr->eh_max)) {
A
Alex Tomas 已提交
2015

2016 2017 2018 2019 2020 2021 2022 2023
			/*
			 *  There are some space in the leaf tree, no
			 *  need to account for leaf block credit
			 *
			 *  bitmaps and block group descriptor blocks
			 *  and other metadat blocks still need to be
			 *  accounted.
			 */
2024
			/* 1 bitmap, 1 block group descriptor */
2025
			ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2026
			return ret;
2027 2028
		}
	}
A
Alex Tomas 已提交
2029

2030
	return ext4_chunk_trans_blocks(inode, nrblocks);
2031
}
A
Alex Tomas 已提交
2032

2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043
/*
 * How many index/leaf blocks need to change/allocate to modify nrblocks?
 *
 * if nrblocks are fit in a single extent (chunk flag is 1), then
 * in the worse case, each tree level index/leaf need to be changed
 * if the tree split due to insert a new extent, then the old tree
 * index/leaf need to be updated too
 *
 * If the nrblocks are discontiguous, they could cause
 * the whole tree split more than once, but this is really rare.
 */
2044
int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2045 2046 2047
{
	int index;
	int depth = ext_depth(inode);
A
Alex Tomas 已提交
2048

2049 2050 2051 2052
	if (chunk)
		index = depth * 2;
	else
		index = depth * 3;
A
Alex Tomas 已提交
2053

2054
	return index;
A
Alex Tomas 已提交
2055 2056 2057 2058
}

static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
				struct ext4_extent *ex,
A
Aneesh Kumar K.V 已提交
2059
				ext4_lblk_t from, ext4_lblk_t to)
A
Alex Tomas 已提交
2060
{
A
Amit Arora 已提交
2061
	unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2062
	int flags = EXT4_FREE_BLOCKS_FORGET;
A
Alex Tomas 已提交
2063

2064
	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2065
		flags |= EXT4_FREE_BLOCKS_METADATA;
A
Alex Tomas 已提交
2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081
#ifdef EXTENTS_STATS
	{
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
		spin_lock(&sbi->s_ext_stats_lock);
		sbi->s_ext_blocks += ee_len;
		sbi->s_ext_extents++;
		if (ee_len < sbi->s_ext_min)
			sbi->s_ext_min = ee_len;
		if (ee_len > sbi->s_ext_max)
			sbi->s_ext_max = ee_len;
		if (ext_depth(inode) > sbi->s_depth_max)
			sbi->s_depth_max = ext_depth(inode);
		spin_unlock(&sbi->s_ext_stats_lock);
	}
#endif
	if (from >= le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
2082
	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
A
Alex Tomas 已提交
2083
		/* tail removal */
A
Aneesh Kumar K.V 已提交
2084
		ext4_lblk_t num;
2085
		ext4_fsblk_t start;
A
Aneesh Kumar K.V 已提交
2086

A
Amit Arora 已提交
2087 2088
		num = le32_to_cpu(ex->ee_block) + ee_len - from;
		start = ext_pblock(ex) + ee_len - num;
A
Aneesh Kumar K.V 已提交
2089
		ext_debug("free last %u blocks starting %llu\n", num, start);
2090
		ext4_free_blocks(handle, inode, 0, start, num, flags);
A
Alex Tomas 已提交
2091
	} else if (from == le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
2092
		   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
A
Aneesh Kumar K.V 已提交
2093
		printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
A
Amit Arora 已提交
2094
			from, to, le32_to_cpu(ex->ee_block), ee_len);
A
Alex Tomas 已提交
2095
	} else {
A
Aneesh Kumar K.V 已提交
2096 2097 2098
		printk(KERN_INFO "strange request: removal(2) "
				"%u-%u from %u:%u\n",
				from, to, le32_to_cpu(ex->ee_block), ee_len);
A
Alex Tomas 已提交
2099 2100 2101 2102 2103 2104
	}
	return 0;
}

static int
ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
A
Aneesh Kumar K.V 已提交
2105
		struct ext4_ext_path *path, ext4_lblk_t start)
A
Alex Tomas 已提交
2106 2107 2108 2109
{
	int err = 0, correct_index = 0;
	int depth = ext_depth(inode), credits;
	struct ext4_extent_header *eh;
A
Aneesh Kumar K.V 已提交
2110 2111 2112
	ext4_lblk_t a, b, block;
	unsigned num;
	ext4_lblk_t ex_ee_block;
A
Alex Tomas 已提交
2113
	unsigned short ex_ee_len;
A
Amit Arora 已提交
2114
	unsigned uninitialized = 0;
A
Alex Tomas 已提交
2115 2116
	struct ext4_extent *ex;

2117
	/* the header must be checked already in ext4_ext_remove_space() */
A
Aneesh Kumar K.V 已提交
2118
	ext_debug("truncate since %u in leaf\n", start);
A
Alex Tomas 已提交
2119 2120 2121 2122 2123 2124 2125 2126 2127
	if (!path[depth].p_hdr)
		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
	eh = path[depth].p_hdr;
	BUG_ON(eh == NULL);

	/* find where to start removing */
	ex = EXT_LAST_EXTENT(eh);

	ex_ee_block = le32_to_cpu(ex->ee_block);
A
Amit Arora 已提交
2128
	ex_ee_len = ext4_ext_get_actual_len(ex);
A
Alex Tomas 已提交
2129 2130 2131

	while (ex >= EXT_FIRST_EXTENT(eh) &&
			ex_ee_block + ex_ee_len > start) {
2132 2133 2134 2135 2136 2137

		if (ext4_ext_is_uninitialized(ex))
			uninitialized = 1;
		else
			uninitialized = 0;

2138 2139
		ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
			 uninitialized, ex_ee_len);
A
Alex Tomas 已提交
2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169
		path[depth].p_ext = ex;

		a = ex_ee_block > start ? ex_ee_block : start;
		b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
			ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;

		ext_debug("  border %u:%u\n", a, b);

		if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
			block = 0;
			num = 0;
			BUG();
		} else if (a != ex_ee_block) {
			/* remove tail of the extent */
			block = ex_ee_block;
			num = a - block;
		} else if (b != ex_ee_block + ex_ee_len - 1) {
			/* remove head of the extent */
			block = a;
			num = b - a;
			/* there is no "make a hole" API yet */
			BUG();
		} else {
			/* remove whole extent: excellent! */
			block = ex_ee_block;
			num = 0;
			BUG_ON(a != ex_ee_block);
			BUG_ON(b != ex_ee_block + ex_ee_len - 1);
		}

2170 2171 2172 2173 2174 2175 2176
		/*
		 * 3 for leaf, sb, and inode plus 2 (bmap and group
		 * descriptor) for each block group; assume two block
		 * groups plus ex_ee_len/blocks_per_block_group for
		 * the worst case
		 */
		credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
A
Alex Tomas 已提交
2177 2178 2179 2180
		if (ex == EXT_FIRST_EXTENT(eh)) {
			correct_index = 1;
			credits += (ext_depth(inode)) + 1;
		}
D
Dmitry Monakhov 已提交
2181
		credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
A
Alex Tomas 已提交
2182

2183
		err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2184
		if (err)
A
Alex Tomas 已提交
2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195
			goto out;

		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
			goto out;

		err = ext4_remove_blocks(handle, inode, ex, a, b);
		if (err)
			goto out;

		if (num == 0) {
2196
			/* this extent is removed; mark slot entirely unused */
2197
			ext4_ext_store_pblock(ex, 0);
M
Marcin Slusarz 已提交
2198
			le16_add_cpu(&eh->eh_entries, -1);
A
Alex Tomas 已提交
2199 2200 2201 2202
		}

		ex->ee_block = cpu_to_le32(block);
		ex->ee_len = cpu_to_le16(num);
2203 2204 2205 2206 2207
		/*
		 * Do not mark uninitialized if all the blocks in the
		 * extent have been removed.
		 */
		if (uninitialized && num)
A
Amit Arora 已提交
2208
			ext4_ext_mark_uninitialized(ex);
A
Alex Tomas 已提交
2209 2210 2211 2212 2213

		err = ext4_ext_dirty(handle, inode, path + depth);
		if (err)
			goto out;

2214
		ext_debug("new extent: %u:%u:%llu\n", block, num,
2215
				ext_pblock(ex));
A
Alex Tomas 已提交
2216 2217
		ex--;
		ex_ee_block = le32_to_cpu(ex->ee_block);
A
Amit Arora 已提交
2218
		ex_ee_len = ext4_ext_get_actual_len(ex);
A
Alex Tomas 已提交
2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233
	}

	if (correct_index && eh->eh_entries)
		err = ext4_ext_correct_indexes(handle, inode, path);

	/* if this leaf is free, then we should
	 * remove it from index block above */
	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
		err = ext4_ext_rm_idx(handle, inode, path + depth);

out:
	return err;
}

/*
2234 2235
 * ext4_ext_more_to_rm:
 * returns 1 if current index has to be freed (even partial)
A
Alex Tomas 已提交
2236
 */
2237
static int
A
Alex Tomas 已提交
2238 2239 2240 2241 2242 2243 2244 2245
ext4_ext_more_to_rm(struct ext4_ext_path *path)
{
	BUG_ON(path->p_idx == NULL);

	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
		return 0;

	/*
2246
	 * if truncate on deeper level happened, it wasn't partial,
A
Alex Tomas 已提交
2247 2248 2249 2250 2251 2252 2253
	 * so we have to consider current index for truncation
	 */
	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
		return 0;
	return 1;
}

A
Aneesh Kumar K.V 已提交
2254
static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
A
Alex Tomas 已提交
2255 2256 2257 2258 2259 2260 2261
{
	struct super_block *sb = inode->i_sb;
	int depth = ext_depth(inode);
	struct ext4_ext_path *path;
	handle_t *handle;
	int i = 0, err = 0;

A
Aneesh Kumar K.V 已提交
2262
	ext_debug("truncate since %u\n", start);
A
Alex Tomas 已提交
2263 2264 2265 2266 2267 2268 2269 2270 2271

	/* probably first extent we're gonna free will be last in block */
	handle = ext4_journal_start(inode, depth + 1);
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	ext4_ext_invalidate_cache(inode);

	/*
2272 2273
	 * We start scanning from right side, freeing all the blocks
	 * after i_size and walking into the tree depth-wise.
A
Alex Tomas 已提交
2274
	 */
2275
	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
A
Alex Tomas 已提交
2276 2277 2278 2279 2280
	if (path == NULL) {
		ext4_journal_stop(handle);
		return -ENOMEM;
	}
	path[0].p_hdr = ext_inode_hdr(inode);
2281
	if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
A
Alex Tomas 已提交
2282 2283 2284 2285 2286 2287 2288 2289 2290
		err = -EIO;
		goto out;
	}
	path[0].p_depth = depth;

	while (i >= 0 && err == 0) {
		if (i == depth) {
			/* this is leaf block */
			err = ext4_ext_rm_leaf(handle, inode, path, start);
2291
			/* root level has p_bh == NULL, brelse() eats this */
A
Alex Tomas 已提交
2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304
			brelse(path[i].p_bh);
			path[i].p_bh = NULL;
			i--;
			continue;
		}

		/* this is index block */
		if (!path[i].p_hdr) {
			ext_debug("initialize header\n");
			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
		}

		if (!path[i].p_idx) {
2305
			/* this level hasn't been touched yet */
A
Alex Tomas 已提交
2306 2307 2308 2309 2310 2311
			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
			ext_debug("init index ptr: hdr 0x%p, num %d\n",
				  path[i].p_hdr,
				  le16_to_cpu(path[i].p_hdr->eh_entries));
		} else {
2312
			/* we were already here, see at next index */
A
Alex Tomas 已提交
2313 2314 2315 2316 2317 2318 2319
			path[i].p_idx--;
		}

		ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
				i, EXT_FIRST_INDEX(path[i].p_hdr),
				path[i].p_idx);
		if (ext4_ext_more_to_rm(path + i)) {
2320
			struct buffer_head *bh;
A
Alex Tomas 已提交
2321
			/* go to the next level */
2322
			ext_debug("move to level %d (block %llu)\n",
2323
				  i + 1, idx_pblock(path[i].p_idx));
A
Alex Tomas 已提交
2324
			memset(path + i + 1, 0, sizeof(*path));
2325 2326
			bh = sb_bread(sb, idx_pblock(path[i].p_idx));
			if (!bh) {
A
Alex Tomas 已提交
2327 2328 2329 2330
				/* should we reset i_size? */
				err = -EIO;
				break;
			}
2331 2332 2333 2334
			if (WARN_ON(i + 1 > depth)) {
				err = -EIO;
				break;
			}
2335
			if (ext4_ext_check(inode, ext_block_hdr(bh),
2336 2337 2338 2339 2340
							depth - i - 1)) {
				err = -EIO;
				break;
			}
			path[i + 1].p_bh = bh;
A
Alex Tomas 已提交
2341

2342 2343
			/* save actual number of indexes since this
			 * number is changed at the next iteration */
A
Alex Tomas 已提交
2344 2345 2346
			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
			i++;
		} else {
2347
			/* we finished processing this index, go up */
A
Alex Tomas 已提交
2348
			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2349
				/* index is empty, remove it;
A
Alex Tomas 已提交
2350 2351 2352 2353
				 * handle must be already prepared by the
				 * truncatei_leaf() */
				err = ext4_ext_rm_idx(handle, inode, path + i);
			}
2354
			/* root level has p_bh == NULL, brelse() eats this */
A
Alex Tomas 已提交
2355 2356 2357 2358 2359 2360 2361 2362 2363 2364
			brelse(path[i].p_bh);
			path[i].p_bh = NULL;
			i--;
			ext_debug("return to level %d\n", i);
		}
	}

	/* TODO: flexible tree reduction should be here */
	if (path->p_hdr->eh_entries == 0) {
		/*
2365 2366
		 * truncate to zero freed all the tree,
		 * so we need to correct eh_depth
A
Alex Tomas 已提交
2367 2368 2369 2370 2371
		 */
		err = ext4_ext_get_access(handle, inode, path);
		if (err == 0) {
			ext_inode_hdr(inode)->eh_depth = 0;
			ext_inode_hdr(inode)->eh_max =
2372
				cpu_to_le16(ext4_ext_space_root(inode, 0));
A
Alex Tomas 已提交
2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392
			err = ext4_ext_dirty(handle, inode, path);
		}
	}
out:
	ext4_ext_drop_refs(path);
	kfree(path);
	ext4_journal_stop(handle);

	return err;
}

/*
 * called at mount time
 */
void ext4_ext_init(struct super_block *sb)
{
	/*
	 * possible initialization would be here
	 */

2393
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2394
#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2395
		printk(KERN_INFO "EXT4-fs: file extents enabled");
2396 2397
#ifdef AGGRESSIVE_TEST
		printk(", aggressive tests");
A
Alex Tomas 已提交
2398 2399 2400 2401 2402 2403 2404 2405
#endif
#ifdef CHECK_BINSEARCH
		printk(", check binsearch");
#endif
#ifdef EXTENTS_STATS
		printk(", stats");
#endif
		printk("\n");
2406
#endif
A
Alex Tomas 已提交
2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419
#ifdef EXTENTS_STATS
		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
		EXT4_SB(sb)->s_ext_min = 1 << 30;
		EXT4_SB(sb)->s_ext_max = 0;
#endif
	}
}

/*
 * called at umount time
 */
void ext4_ext_release(struct super_block *sb)
{
2420
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
A
Alex Tomas 已提交
2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
		return;

#ifdef EXTENTS_STATS
	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
		struct ext4_sb_info *sbi = EXT4_SB(sb);
		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
			sbi->s_ext_blocks, sbi->s_ext_extents,
			sbi->s_ext_blocks / sbi->s_ext_extents);
		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
	}
#endif
}

2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506
static void bi_complete(struct bio *bio, int error)
{
	complete((struct completion *)bio->bi_private);
}

/* FIXME!! we need to try to merge to left or right after zero-out  */
static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
{
	int ret = -EIO;
	struct bio *bio;
	int blkbits, blocksize;
	sector_t ee_pblock;
	struct completion event;
	unsigned int ee_len, len, done, offset;


	blkbits   = inode->i_blkbits;
	blocksize = inode->i_sb->s_blocksize;
	ee_len    = ext4_ext_get_actual_len(ex);
	ee_pblock = ext_pblock(ex);

	/* convert ee_pblock to 512 byte sectors */
	ee_pblock = ee_pblock << (blkbits - 9);

	while (ee_len > 0) {

		if (ee_len > BIO_MAX_PAGES)
			len = BIO_MAX_PAGES;
		else
			len = ee_len;

		bio = bio_alloc(GFP_NOIO, len);
		bio->bi_sector = ee_pblock;
		bio->bi_bdev   = inode->i_sb->s_bdev;

		done = 0;
		offset = 0;
		while (done < len) {
			ret = bio_add_page(bio, ZERO_PAGE(0),
							blocksize, offset);
			if (ret != blocksize) {
				/*
				 * We can't add any more pages because of
				 * hardware limitations.  Start a new bio.
				 */
				break;
			}
			done++;
			offset += blocksize;
			if (offset >= PAGE_CACHE_SIZE)
				offset = 0;
		}

		init_completion(&event);
		bio->bi_private = &event;
		bio->bi_end_io = bi_complete;
		submit_bio(WRITE, bio);
		wait_for_completion(&event);

		if (test_bit(BIO_UPTODATE, &bio->bi_flags))
			ret = 0;
		else {
			ret = -EIO;
			break;
		}
		bio_put(bio);
		ee_len    -= done;
		ee_pblock += done  << (blkbits - 9);
	}
	return ret;
}

2507
#define EXT4_EXT_ZERO_LEN 7
2508 2509 2510 2511 2512 2513 2514 2515 2516 2517
/*
 * This function is called by ext4_ext_get_blocks() if someone tries to write
 * to an uninitialized extent. It may result in splitting the uninitialized
 * extent into multiple extents (upto three - one initialized and two
 * uninitialized).
 * There are three possibilities:
 *   a> There is no split required: Entire extent should be initialized
 *   b> Splits in two extents: Write is happening at either end of the extent
 *   c> Splits in three extents: Somone is writing in middle of the extent
 */
A
Aneesh Kumar K.V 已提交
2518 2519 2520 2521
static int ext4_ext_convert_to_initialized(handle_t *handle,
						struct inode *inode,
						struct ext4_ext_path *path,
						ext4_lblk_t iblock,
2522
						unsigned int max_blocks)
2523
{
2524
	struct ext4_extent *ex, newex, orig_ex;
2525 2526 2527 2528
	struct ext4_extent *ex1 = NULL;
	struct ext4_extent *ex2 = NULL;
	struct ext4_extent *ex3 = NULL;
	struct ext4_extent_header *eh;
A
Aneesh Kumar K.V 已提交
2529 2530
	ext4_lblk_t ee_block;
	unsigned int allocated, ee_len, depth;
2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542
	ext4_fsblk_t newblock;
	int err = 0;
	int ret = 0;

	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;
	ee_block = le32_to_cpu(ex->ee_block);
	ee_len = ext4_ext_get_actual_len(ex);
	allocated = ee_len - (iblock - ee_block);
	newblock = iblock - ee_block + ext_pblock(ex);
	ex2 = ex;
2543 2544 2545
	orig_ex.ee_block = ex->ee_block;
	orig_ex.ee_len   = cpu_to_le16(ee_len);
	ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2546

2547 2548 2549
	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
		goto out;
2550 2551 2552 2553 2554 2555 2556 2557 2558 2559
	/* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
	if (ee_len <= 2*EXT4_EXT_ZERO_LEN) {
		err =  ext4_ext_zeroout(inode, &orig_ex);
		if (err)
			goto fix_extent_len;
		/* update the extent length and mark as initialized */
		ex->ee_block = orig_ex.ee_block;
		ex->ee_len   = orig_ex.ee_len;
		ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
		ext4_ext_dirty(handle, inode, path + depth);
2560 2561
		/* zeroed the full extent */
		return allocated;
2562
	}
2563

2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580
	/* ex1: ee_block to iblock - 1 : uninitialized */
	if (iblock > ee_block) {
		ex1 = ex;
		ex1->ee_len = cpu_to_le16(iblock - ee_block);
		ext4_ext_mark_uninitialized(ex1);
		ex2 = &newex;
	}
	/*
	 * for sanity, update the length of the ex2 extent before
	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
	 * overlap of blocks.
	 */
	if (!ex1 && allocated > max_blocks)
		ex2->ee_len = cpu_to_le16(max_blocks);
	/* ex3: to ee_block + ee_len : uninitialised */
	if (allocated > max_blocks) {
		unsigned int newdepth;
2581 2582
		/* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
		if (allocated <= EXT4_EXT_ZERO_LEN) {
2583 2584 2585 2586
			/*
			 * iblock == ee_block is handled by the zerouout
			 * at the beginning.
			 * Mark first half uninitialized.
2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599
			 * Mark second half initialized and zero out the
			 * initialized extent
			 */
			ex->ee_block = orig_ex.ee_block;
			ex->ee_len   = cpu_to_le16(ee_len - allocated);
			ext4_ext_mark_uninitialized(ex);
			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
			ext4_ext_dirty(handle, inode, path + depth);

			ex3 = &newex;
			ex3->ee_block = cpu_to_le32(iblock);
			ext4_ext_store_pblock(ex3, newblock);
			ex3->ee_len = cpu_to_le16(allocated);
2600 2601
			err = ext4_ext_insert_extent(handle, inode, path,
							ex3, 0);
2602 2603 2604 2605 2606 2607 2608 2609
			if (err == -ENOSPC) {
				err =  ext4_ext_zeroout(inode, &orig_ex);
				if (err)
					goto fix_extent_len;
				ex->ee_block = orig_ex.ee_block;
				ex->ee_len   = orig_ex.ee_len;
				ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
				ext4_ext_dirty(handle, inode, path + depth);
2610
				/* blocks available from iblock */
2611
				return allocated;
2612 2613 2614 2615

			} else if (err)
				goto fix_extent_len;

2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637
			/*
			 * We need to zero out the second half because
			 * an fallocate request can update file size and
			 * converting the second half to initialized extent
			 * implies that we can leak some junk data to user
			 * space.
			 */
			err =  ext4_ext_zeroout(inode, ex3);
			if (err) {
				/*
				 * We should actually mark the
				 * second half as uninit and return error
				 * Insert would have changed the extent
				 */
				depth = ext_depth(inode);
				ext4_ext_drop_refs(path);
				path = ext4_ext_find_extent(inode,
								iblock, path);
				if (IS_ERR(path)) {
					err = PTR_ERR(path);
					return err;
				}
2638
				/* get the second half extent details */
2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649
				ex = path[depth].p_ext;
				err = ext4_ext_get_access(handle, inode,
								path + depth);
				if (err)
					return err;
				ext4_ext_mark_uninitialized(ex);
				ext4_ext_dirty(handle, inode, path + depth);
				return err;
			}

			/* zeroed the second half */
2650 2651
			return allocated;
		}
2652 2653 2654 2655 2656
		ex3 = &newex;
		ex3->ee_block = cpu_to_le32(iblock + max_blocks);
		ext4_ext_store_pblock(ex3, newblock + max_blocks);
		ex3->ee_len = cpu_to_le16(allocated - max_blocks);
		ext4_ext_mark_uninitialized(ex3);
2657
		err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
2658 2659 2660 2661 2662
		if (err == -ENOSPC) {
			err =  ext4_ext_zeroout(inode, &orig_ex);
			if (err)
				goto fix_extent_len;
			/* update the extent length and mark as initialized */
2663 2664 2665 2666
			ex->ee_block = orig_ex.ee_block;
			ex->ee_len   = orig_ex.ee_len;
			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
			ext4_ext_dirty(handle, inode, path + depth);
2667
			/* zeroed the full extent */
2668
			/* blocks available from iblock */
2669
			return allocated;
2670 2671 2672

		} else if (err)
			goto fix_extent_len;
2673 2674 2675 2676 2677
		/*
		 * The depth, and hence eh & ex might change
		 * as part of the insert above.
		 */
		newdepth = ext_depth(inode);
2678
		/*
C
Coly Li 已提交
2679
		 * update the extent length after successful insert of the
2680 2681 2682 2683
		 * split extent
		 */
		orig_ex.ee_len = cpu_to_le16(ee_len -
						ext4_ext_get_actual_len(ex3));
2684 2685 2686 2687 2688 2689
		depth = newdepth;
		ext4_ext_drop_refs(path);
		path = ext4_ext_find_extent(inode, iblock, path);
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			goto out;
2690
		}
2691 2692 2693 2694 2695 2696 2697 2698 2699
		eh = path[depth].p_hdr;
		ex = path[depth].p_ext;
		if (ex2 != &newex)
			ex2 = ex;

		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
			goto out;

2700
		allocated = max_blocks;
2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715

		/* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
		 * to insert a extent in the middle zerout directly
		 * otherwise give the extent a chance to merge to left
		 */
		if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
							iblock != ee_block) {
			err =  ext4_ext_zeroout(inode, &orig_ex);
			if (err)
				goto fix_extent_len;
			/* update the extent length and mark as initialized */
			ex->ee_block = orig_ex.ee_block;
			ex->ee_len   = orig_ex.ee_len;
			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
			ext4_ext_dirty(handle, inode, path + depth);
2716
			/* zero out the first half */
2717
			/* blocks available from iblock */
2718
			return allocated;
2719
		}
2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774
	}
	/*
	 * If there was a change of depth as part of the
	 * insertion of ex3 above, we need to update the length
	 * of the ex1 extent again here
	 */
	if (ex1 && ex1 != ex) {
		ex1 = ex;
		ex1->ee_len = cpu_to_le16(iblock - ee_block);
		ext4_ext_mark_uninitialized(ex1);
		ex2 = &newex;
	}
	/* ex2: iblock to iblock + maxblocks-1 : initialised */
	ex2->ee_block = cpu_to_le32(iblock);
	ext4_ext_store_pblock(ex2, newblock);
	ex2->ee_len = cpu_to_le16(allocated);
	if (ex2 != ex)
		goto insert;
	/*
	 * New (initialized) extent starts from the first block
	 * in the current extent. i.e., ex2 == ex
	 * We have to see if it can be merged with the extent
	 * on the left.
	 */
	if (ex2 > EXT_FIRST_EXTENT(eh)) {
		/*
		 * To merge left, pass "ex2 - 1" to try_to_merge(),
		 * since it merges towards right _only_.
		 */
		ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
		if (ret) {
			err = ext4_ext_correct_indexes(handle, inode, path);
			if (err)
				goto out;
			depth = ext_depth(inode);
			ex2--;
		}
	}
	/*
	 * Try to Merge towards right. This might be required
	 * only when the whole extent is being written to.
	 * i.e. ex2 == ex and ex3 == NULL.
	 */
	if (!ex3) {
		ret = ext4_ext_try_to_merge(inode, path, ex2);
		if (ret) {
			err = ext4_ext_correct_indexes(handle, inode, path);
			if (err)
				goto out;
		}
	}
	/* Mark modified extent as dirty */
	err = ext4_ext_dirty(handle, inode, path + depth);
	goto out;
insert:
2775
	err = ext4_ext_insert_extent(handle, inode, path, &newex, 0);
2776 2777 2778 2779 2780
	if (err == -ENOSPC) {
		err =  ext4_ext_zeroout(inode, &orig_ex);
		if (err)
			goto fix_extent_len;
		/* update the extent length and mark as initialized */
2781 2782 2783 2784
		ex->ee_block = orig_ex.ee_block;
		ex->ee_len   = orig_ex.ee_len;
		ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
		ext4_ext_dirty(handle, inode, path + depth);
2785 2786
		/* zero out the first half */
		return allocated;
2787 2788
	} else if (err)
		goto fix_extent_len;
2789
out:
2790
	ext4_ext_show_leaf(inode, path);
2791
	return err ? err : allocated;
2792 2793 2794 2795 2796 2797 2798 2799

fix_extent_len:
	ex->ee_block = orig_ex.ee_block;
	ex->ee_len   = orig_ex.ee_len;
	ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
	ext4_ext_mark_uninitialized(ex);
	ext4_ext_dirty(handle, inode, path + depth);
	return err;
2800 2801
}

2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820
/*
 * This function is called by ext4_ext_get_blocks() from
 * ext4_get_blocks_dio_write() when DIO to write
 * to an uninitialized extent.
 *
 * Writing to an uninitized extent may result in splitting the uninitialized
 * extent into multiple /intialized unintialized extents (up to three)
 * There are three possibilities:
 *   a> There is no split required: Entire extent should be uninitialized
 *   b> Splits in two extents: Write is happening at either end of the extent
 *   c> Splits in three extents: Somone is writing in middle of the extent
 *
 * One of more index blocks maybe needed if the extent tree grow after
 * the unintialized extent split. To prevent ENOSPC occur at the IO
 * complete, we need to split the uninitialized extent before DIO submit
 * the IO. The uninitilized extent called at this time will be split
 * into three uninitialized extent(at most). After IO complete, the part
 * being filled will be convert to initialized by the end_io callback function
 * via ext4_convert_unwritten_extents().
2821 2822
 *
 * Returns the size of uninitialized extent to be written on success.
2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856
 */
static int ext4_split_unwritten_extents(handle_t *handle,
					struct inode *inode,
					struct ext4_ext_path *path,
					ext4_lblk_t iblock,
					unsigned int max_blocks,
					int flags)
{
	struct ext4_extent *ex, newex, orig_ex;
	struct ext4_extent *ex1 = NULL;
	struct ext4_extent *ex2 = NULL;
	struct ext4_extent *ex3 = NULL;
	struct ext4_extent_header *eh;
	ext4_lblk_t ee_block;
	unsigned int allocated, ee_len, depth;
	ext4_fsblk_t newblock;
	int err = 0;

	ext_debug("ext4_split_unwritten_extents: inode %lu,"
		  "iblock %llu, max_blocks %u\n", inode->i_ino,
		  (unsigned long long)iblock, max_blocks);
	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;
	ee_block = le32_to_cpu(ex->ee_block);
	ee_len = ext4_ext_get_actual_len(ex);
	allocated = ee_len - (iblock - ee_block);
	newblock = iblock - ee_block + ext_pblock(ex);
	ex2 = ex;
	orig_ex.ee_block = ex->ee_block;
	orig_ex.ee_len   = cpu_to_le16(ee_len);
	ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));

	/*
2857 2858 2859
 	 * If the uninitialized extent begins at the same logical
 	 * block where the write begins, and the write completely
 	 * covers the extent, then we don't need to split it.
2860
 	 */
2861 2862
	if ((iblock == ee_block) && (allocated <= max_blocks))
		return allocated;
2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040

	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
		goto out;
	/* ex1: ee_block to iblock - 1 : uninitialized */
	if (iblock > ee_block) {
		ex1 = ex;
		ex1->ee_len = cpu_to_le16(iblock - ee_block);
		ext4_ext_mark_uninitialized(ex1);
		ex2 = &newex;
	}
	/*
	 * for sanity, update the length of the ex2 extent before
	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
	 * overlap of blocks.
	 */
	if (!ex1 && allocated > max_blocks)
		ex2->ee_len = cpu_to_le16(max_blocks);
	/* ex3: to ee_block + ee_len : uninitialised */
	if (allocated > max_blocks) {
		unsigned int newdepth;
		ex3 = &newex;
		ex3->ee_block = cpu_to_le32(iblock + max_blocks);
		ext4_ext_store_pblock(ex3, newblock + max_blocks);
		ex3->ee_len = cpu_to_le16(allocated - max_blocks);
		ext4_ext_mark_uninitialized(ex3);
		err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
		if (err == -ENOSPC) {
			err =  ext4_ext_zeroout(inode, &orig_ex);
			if (err)
				goto fix_extent_len;
			/* update the extent length and mark as initialized */
			ex->ee_block = orig_ex.ee_block;
			ex->ee_len   = orig_ex.ee_len;
			ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
			ext4_ext_dirty(handle, inode, path + depth);
			/* zeroed the full extent */
			/* blocks available from iblock */
			return allocated;

		} else if (err)
			goto fix_extent_len;
		/*
		 * The depth, and hence eh & ex might change
		 * as part of the insert above.
		 */
		newdepth = ext_depth(inode);
		/*
		 * update the extent length after successful insert of the
		 * split extent
		 */
		orig_ex.ee_len = cpu_to_le16(ee_len -
						ext4_ext_get_actual_len(ex3));
		depth = newdepth;
		ext4_ext_drop_refs(path);
		path = ext4_ext_find_extent(inode, iblock, path);
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			goto out;
		}
		eh = path[depth].p_hdr;
		ex = path[depth].p_ext;
		if (ex2 != &newex)
			ex2 = ex;

		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
			goto out;

		allocated = max_blocks;
	}
	/*
	 * If there was a change of depth as part of the
	 * insertion of ex3 above, we need to update the length
	 * of the ex1 extent again here
	 */
	if (ex1 && ex1 != ex) {
		ex1 = ex;
		ex1->ee_len = cpu_to_le16(iblock - ee_block);
		ext4_ext_mark_uninitialized(ex1);
		ex2 = &newex;
	}
	/*
	 * ex2: iblock to iblock + maxblocks-1 : to be direct IO written,
	 * uninitialised still.
	 */
	ex2->ee_block = cpu_to_le32(iblock);
	ext4_ext_store_pblock(ex2, newblock);
	ex2->ee_len = cpu_to_le16(allocated);
	ext4_ext_mark_uninitialized(ex2);
	if (ex2 != ex)
		goto insert;
	/* Mark modified extent as dirty */
	err = ext4_ext_dirty(handle, inode, path + depth);
	ext_debug("out here\n");
	goto out;
insert:
	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
	if (err == -ENOSPC) {
		err =  ext4_ext_zeroout(inode, &orig_ex);
		if (err)
			goto fix_extent_len;
		/* update the extent length and mark as initialized */
		ex->ee_block = orig_ex.ee_block;
		ex->ee_len   = orig_ex.ee_len;
		ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
		ext4_ext_dirty(handle, inode, path + depth);
		/* zero out the first half */
		return allocated;
	} else if (err)
		goto fix_extent_len;
out:
	ext4_ext_show_leaf(inode, path);
	return err ? err : allocated;

fix_extent_len:
	ex->ee_block = orig_ex.ee_block;
	ex->ee_len   = orig_ex.ee_len;
	ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
	ext4_ext_mark_uninitialized(ex);
	ext4_ext_dirty(handle, inode, path + depth);
	return err;
}
static int ext4_convert_unwritten_extents_dio(handle_t *handle,
					      struct inode *inode,
					      struct ext4_ext_path *path)
{
	struct ext4_extent *ex;
	struct ext4_extent_header *eh;
	int depth;
	int err = 0;
	int ret = 0;

	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;

	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
		goto out;
	/* first mark the extent as initialized */
	ext4_ext_mark_initialized(ex);

	/*
	 * We have to see if it can be merged with the extent
	 * on the left.
	 */
	if (ex > EXT_FIRST_EXTENT(eh)) {
		/*
		 * To merge left, pass "ex - 1" to try_to_merge(),
		 * since it merges towards right _only_.
		 */
		ret = ext4_ext_try_to_merge(inode, path, ex - 1);
		if (ret) {
			err = ext4_ext_correct_indexes(handle, inode, path);
			if (err)
				goto out;
			depth = ext_depth(inode);
			ex--;
		}
	}
	/*
	 * Try to Merge towards right.
	 */
	ret = ext4_ext_try_to_merge(inode, path, ex);
	if (ret) {
		err = ext4_ext_correct_indexes(handle, inode, path);
		if (err)
			goto out;
		depth = ext_depth(inode);
	}
	/* Mark modified extent as dirty */
	err = ext4_ext_dirty(handle, inode, path + depth);
out:
	ext4_ext_show_leaf(inode, path);
	return err;
}

3041 3042 3043 3044 3045 3046 3047 3048
static void unmap_underlying_metadata_blocks(struct block_device *bdev,
			sector_t block, int count)
{
	int i;
	for (i = 0; i < count; i++)
                unmap_underlying_metadata(bdev, block + i);
}

3049 3050 3051 3052 3053 3054 3055 3056 3057
static int
ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
			ext4_lblk_t iblock, unsigned int max_blocks,
			struct ext4_ext_path *path, int flags,
			unsigned int allocated, struct buffer_head *bh_result,
			ext4_fsblk_t newblock)
{
	int ret = 0;
	int err = 0;
3058
	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070

	ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
		  "block %llu, max_blocks %u, flags %d, allocated %u",
		  inode->i_ino, (unsigned long long)iblock, max_blocks,
		  flags, allocated);
	ext4_ext_show_leaf(inode, path);

	/* DIO get_block() before submit the IO, split the extent */
	if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
		ret = ext4_split_unwritten_extents(handle,
						inode, path, iblock,
						max_blocks, flags);
3071 3072 3073 3074 3075
		/*
		 * Flag the inode(non aio case) or end_io struct (aio case)
		 * that this IO needs to convertion to written when IO is
		 * completed
		 */
3076 3077
		if (io)
			io->flag = DIO_AIO_UNWRITTEN;
3078 3079
		else
			EXT4_I(inode)->i_state |= EXT4_STATE_DIO_UNWRITTEN;
3080 3081
		goto out;
	}
3082
	/* async DIO end_io complete, convert the filled extent to written */
3083 3084 3085
	if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) {
		ret = ext4_convert_unwritten_extents_dio(handle, inode,
							path);
3086 3087
		if (ret >= 0)
			ext4_update_inode_fsync_trans(handle, inode, 1);
3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114
		goto out2;
	}
	/* buffered IO case */
	/*
	 * repeat fallocate creation request
	 * we already have an unwritten extent
	 */
	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
		goto map_out;

	/* buffered READ or buffered write_begin() lookup */
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
		/*
		 * We have blocks reserved already.  We
		 * return allocated blocks so that delalloc
		 * won't do block reservation for us.  But
		 * the buffer head will be unmapped so that
		 * a read from the block returns 0s.
		 */
		set_buffer_unwritten(bh_result);
		goto out1;
	}

	/* buffered write, writepage time, convert*/
	ret = ext4_ext_convert_to_initialized(handle, inode,
						path, iblock,
						max_blocks);
3115 3116
	if (ret >= 0)
		ext4_update_inode_fsync_trans(handle, inode, 1);
3117 3118 3119 3120 3121 3122 3123
out:
	if (ret <= 0) {
		err = ret;
		goto out2;
	} else
		allocated = ret;
	set_buffer_new(bh_result);
3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134
	/*
	 * if we allocated more blocks than requested
	 * we need to make sure we unmap the extra block
	 * allocated. The actual needed block will get
	 * unmapped later when we find the buffer_head marked
	 * new.
	 */
	if (allocated > max_blocks) {
		unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
					newblock + max_blocks,
					allocated - max_blocks);
3135
		allocated = max_blocks;
3136
	}
3137 3138 3139 3140 3141 3142 3143 3144

	/*
	 * If we have done fallocate with the offset that is already
	 * delayed allocated, we would have block reservation
	 * and quota reservation done in the delayed write path.
	 * But fallocate would have already updated quota and block
	 * count for this offset. So cancel these reservation
	 */
3145
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3146 3147
		ext4_da_update_reserve_space(inode, allocated, 0);

3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162
map_out:
	set_buffer_mapped(bh_result);
out1:
	if (allocated > max_blocks)
		allocated = max_blocks;
	ext4_ext_show_leaf(inode, path);
	bh_result->b_bdev = inode->i_sb->s_bdev;
	bh_result->b_blocknr = newblock;
out2:
	if (path) {
		ext4_ext_drop_refs(path);
		kfree(path);
	}
	return err ? err : allocated;
}
3163
/*
3164 3165 3166
 * Block allocation/map/preallocation routine for extents based files
 *
 *
3167
 * Need to be called with
3168 3169
 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3170 3171 3172 3173 3174 3175 3176 3177 3178 3179
 *
 * return > 0, number of of blocks already mapped/allocated
 *          if create == 0 and these are pre-allocated blocks
 *          	buffer head is unmapped
 *          otherwise blocks are mapped
 *
 * return = 0, if plain look up failed (blocks have not been allocated)
 *          buffer head is unmapped
 *
 * return < 0, error case.
3180
 */
3181
int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
A
Aneesh Kumar K.V 已提交
3182
			ext4_lblk_t iblock,
3183
			unsigned int max_blocks, struct buffer_head *bh_result,
3184
			int flags)
A
Alex Tomas 已提交
3185 3186
{
	struct ext4_ext_path *path = NULL;
3187
	struct ext4_extent_header *eh;
A
Alex Tomas 已提交
3188
	struct ext4_extent newex, *ex;
3189 3190 3191
	ext4_fsblk_t newblock;
	int err = 0, depth, ret, cache_type;
	unsigned int allocated = 0;
3192
	struct ext4_allocation_request ar;
3193
	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
A
Alex Tomas 已提交
3194 3195

	__clear_bit(BH_New, &bh_result->b_state);
3196
	ext_debug("blocks %u/%u requested for inode %lu\n",
3197
			iblock, max_blocks, inode->i_ino);
A
Alex Tomas 已提交
3198 3199

	/* check in cache */
3200 3201 3202
	cache_type = ext4_ext_in_cache(inode, iblock, &newex);
	if (cache_type) {
		if (cache_type == EXT4_EXT_CACHE_GAP) {
3203
			if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3204 3205 3206 3207
				/*
				 * block isn't allocated yet and
				 * user doesn't want to allocate it
				 */
A
Alex Tomas 已提交
3208 3209 3210
				goto out2;
			}
			/* we should allocate requested block */
3211
		} else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
A
Alex Tomas 已提交
3212
			/* block is already allocated */
D
Dave Kleikamp 已提交
3213 3214 3215
			newblock = iblock
				   - le32_to_cpu(newex.ee_block)
				   + ext_pblock(&newex);
3216
			/* number of remaining blocks in the extent */
3217
			allocated = ext4_ext_get_actual_len(&newex) -
A
Alex Tomas 已提交
3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235
					(iblock - le32_to_cpu(newex.ee_block));
			goto out;
		} else {
			BUG();
		}
	}

	/* find extent for this block */
	path = ext4_ext_find_extent(inode, iblock, NULL);
	if (IS_ERR(path)) {
		err = PTR_ERR(path);
		path = NULL;
		goto out2;
	}

	depth = ext_depth(inode);

	/*
3236 3237
	 * consistent leaf must not be empty;
	 * this situation is possible, though, _during_ tree modification;
A
Alex Tomas 已提交
3238 3239
	 * this is why assert can't be put in ext4_ext_find_extent()
	 */
3240 3241 3242 3243 3244 3245 3246
	if (path[depth].p_ext == NULL && depth != 0) {
		ext4_error(inode->i_sb, __func__, "bad extent address "
			   "inode: %lu, iblock: %d, depth: %d",
			   inode->i_ino, iblock, depth);
		err = -EIO;
		goto out2;
	}
3247
	eh = path[depth].p_hdr;
A
Alex Tomas 已提交
3248

3249 3250
	ex = path[depth].p_ext;
	if (ex) {
A
Aneesh Kumar K.V 已提交
3251
		ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3252
		ext4_fsblk_t ee_start = ext_pblock(ex);
A
Amit Arora 已提交
3253
		unsigned short ee_len;
3254 3255 3256

		/*
		 * Uninitialized extents are treated as holes, except that
3257
		 * we split out initialized portions during a write.
3258
		 */
A
Amit Arora 已提交
3259
		ee_len = ext4_ext_get_actual_len(ex);
3260
		/* if found extent covers block, simply return it */
D
Dave Kleikamp 已提交
3261
		if (iblock >= ee_block && iblock < ee_block + ee_len) {
A
Alex Tomas 已提交
3262
			newblock = iblock - ee_block + ee_start;
3263
			/* number of remaining blocks in the extent */
A
Alex Tomas 已提交
3264
			allocated = ee_len - (iblock - ee_block);
3265
			ext_debug("%u fit into %u:%d -> %llu\n", iblock,
A
Alex Tomas 已提交
3266
					ee_block, ee_len, newblock);
3267

A
Amit Arora 已提交
3268
			/* Do not put uninitialized extent in the cache */
3269
			if (!ext4_ext_is_uninitialized(ex)) {
A
Amit Arora 已提交
3270 3271 3272
				ext4_ext_put_in_cache(inode, ee_block,
							ee_len, ee_start,
							EXT4_EXT_CACHE_EXTENT);
3273 3274
				goto out;
			}
3275 3276 3277 3278
			ret = ext4_ext_handle_uninitialized_extents(handle,
					inode, iblock, max_blocks, path,
					flags, allocated, bh_result, newblock);
			return ret;
A
Alex Tomas 已提交
3279 3280 3281 3282
		}
	}

	/*
3283
	 * requested block isn't allocated yet;
A
Alex Tomas 已提交
3284 3285
	 * we couldn't try to create block if create flag is zero
	 */
3286
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3287 3288 3289 3290
		/*
		 * put just found gap into cache to speed up
		 * subsequent requests
		 */
A
Alex Tomas 已提交
3291 3292 3293 3294
		ext4_ext_put_gap_in_cache(inode, path, iblock);
		goto out2;
	}
	/*
3295
	 * Okay, we need to do block allocation.
A
Andrew Morton 已提交
3296
	 */
A
Alex Tomas 已提交
3297

3298 3299 3300 3301 3302 3303 3304 3305 3306
	/* find neighbour allocated blocks */
	ar.lleft = iblock;
	err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
	if (err)
		goto out2;
	ar.lright = iblock;
	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
	if (err)
		goto out2;
A
Amit Arora 已提交
3307

3308 3309 3310 3311 3312 3313 3314
	/*
	 * See if request is beyond maximum number of blocks we can have in
	 * a single extent. For an initialized extent this limit is
	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
	 * EXT_UNINIT_MAX_LEN.
	 */
	if (max_blocks > EXT_INIT_MAX_LEN &&
3315
	    !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3316 3317
		max_blocks = EXT_INIT_MAX_LEN;
	else if (max_blocks > EXT_UNINIT_MAX_LEN &&
3318
		 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3319 3320
		max_blocks = EXT_UNINIT_MAX_LEN;

A
Amit Arora 已提交
3321 3322 3323 3324 3325
	/* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
	newex.ee_block = cpu_to_le32(iblock);
	newex.ee_len = cpu_to_le16(max_blocks);
	err = ext4_ext_check_overlap(inode, &newex, path);
	if (err)
3326
		allocated = ext4_ext_get_actual_len(&newex);
A
Amit Arora 已提交
3327 3328
	else
		allocated = max_blocks;
3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340

	/* allocate new block */
	ar.inode = inode;
	ar.goal = ext4_ext_find_goal(inode, path, iblock);
	ar.logical = iblock;
	ar.len = allocated;
	if (S_ISREG(inode->i_mode))
		ar.flags = EXT4_MB_HINT_DATA;
	else
		/* disable in-core preallocation for non-regular files */
		ar.flags = 0;
	newblock = ext4_mb_new_blocks(handle, &ar, &err);
A
Alex Tomas 已提交
3341 3342
	if (!newblock)
		goto out2;
3343
	ext_debug("allocate new block: goal %llu, found %llu/%u\n",
3344
		  ar.goal, newblock, allocated);
A
Alex Tomas 已提交
3345 3346

	/* try to insert new extent into found leaf and return */
3347
	ext4_ext_store_pblock(&newex, newblock);
3348
	newex.ee_len = cpu_to_le16(ar.len);
3349 3350
	/* Mark uninitialized */
	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
A
Amit Arora 已提交
3351
		ext4_ext_mark_uninitialized(&newex);
3352 3353 3354 3355 3356 3357
		/*
		 * io_end structure was created for every async
		 * direct IO write to the middle of the file.
		 * To avoid unecessary convertion for every aio dio rewrite
		 * to the mid of file, here we flag the IO that is really
		 * need the convertion.
3358 3359
		 * For non asycn direct IO case, flag the inode state
		 * that we need to perform convertion when IO is done.
3360
		 */
3361 3362 3363 3364 3365 3366 3367
		if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
			if (io)
				io->flag = DIO_AIO_UNWRITTEN;
			else
				EXT4_I(inode)->i_state |=
					EXT4_STATE_DIO_UNWRITTEN;;
		}
3368
	}
3369
	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3370 3371
	if (err) {
		/* free data blocks we just allocated */
3372 3373
		/* not a good idea to call discard here directly,
		 * but otherwise we'd need to call it every free() */
3374
		ext4_discard_preallocations(inode);
3375 3376
		ext4_free_blocks(handle, inode, 0, ext_pblock(&newex),
				 ext4_ext_get_actual_len(&newex), 0);
A
Alex Tomas 已提交
3377
		goto out2;
3378
	}
A
Alex Tomas 已提交
3379 3380

	/* previous routine could use block we allocated */
3381
	newblock = ext_pblock(&newex);
3382
	allocated = ext4_ext_get_actual_len(&newex);
3383 3384
	if (allocated > max_blocks)
		allocated = max_blocks;
3385
	set_buffer_new(bh_result);
A
Alex Tomas 已提交
3386

3387 3388 3389 3390
	/*
	 * Update reserved blocks/metadata blocks after successful
	 * block allocation which had been deferred till now.
	 */
3391
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3392 3393
		ext4_da_update_reserve_space(inode, allocated, 1);

3394 3395 3396 3397 3398
	/*
	 * Cache the extent and update transaction to commit on fdatasync only
	 * when it is _not_ an uninitialized extent.
	 */
	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
A
Amit Arora 已提交
3399 3400
		ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
						EXT4_EXT_CACHE_EXTENT);
3401 3402 3403
		ext4_update_inode_fsync_trans(handle, inode, 1);
	} else
		ext4_update_inode_fsync_trans(handle, inode, 0);
A
Alex Tomas 已提交
3404 3405 3406 3407
out:
	if (allocated > max_blocks)
		allocated = max_blocks;
	ext4_ext_show_leaf(inode, path);
3408
	set_buffer_mapped(bh_result);
A
Alex Tomas 已提交
3409 3410 3411 3412 3413 3414 3415 3416 3417 3418
	bh_result->b_bdev = inode->i_sb->s_bdev;
	bh_result->b_blocknr = newblock;
out2:
	if (path) {
		ext4_ext_drop_refs(path);
		kfree(path);
	}
	return err ? err : allocated;
}

3419
void ext4_ext_truncate(struct inode *inode)
A
Alex Tomas 已提交
3420 3421 3422
{
	struct address_space *mapping = inode->i_mapping;
	struct super_block *sb = inode->i_sb;
A
Aneesh Kumar K.V 已提交
3423
	ext4_lblk_t last_block;
A
Alex Tomas 已提交
3424 3425 3426 3427 3428 3429
	handle_t *handle;
	int err = 0;

	/*
	 * probably first extent we're gonna free will be last in block
	 */
3430
	err = ext4_writepage_trans_blocks(inode);
A
Alex Tomas 已提交
3431
	handle = ext4_journal_start(inode, err);
3432
	if (IS_ERR(handle))
A
Alex Tomas 已提交
3433 3434
		return;

3435 3436
	if (inode->i_size & (sb->s_blocksize - 1))
		ext4_block_truncate_page(handle, mapping, inode->i_size);
A
Alex Tomas 已提交
3437

3438 3439 3440
	if (ext4_orphan_add(handle, inode))
		goto out_stop;

3441
	down_write(&EXT4_I(inode)->i_data_sem);
A
Alex Tomas 已提交
3442 3443
	ext4_ext_invalidate_cache(inode);

3444
	ext4_discard_preallocations(inode);
3445

A
Alex Tomas 已提交
3446
	/*
3447 3448 3449
	 * TODO: optimization is possible here.
	 * Probably we need not scan at all,
	 * because page truncation is enough.
A
Alex Tomas 已提交
3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460
	 */

	/* we have to know where to truncate from in crash case */
	EXT4_I(inode)->i_disksize = inode->i_size;
	ext4_mark_inode_dirty(handle, inode);

	last_block = (inode->i_size + sb->s_blocksize - 1)
			>> EXT4_BLOCK_SIZE_BITS(sb);
	err = ext4_ext_remove_space(inode, last_block);

	/* In a multi-transaction truncate, we only make the final
3461 3462
	 * transaction synchronous.
	 */
A
Alex Tomas 已提交
3463
	if (IS_SYNC(inode))
3464
		ext4_handle_sync(handle);
A
Alex Tomas 已提交
3465 3466

out_stop:
3467
	up_write(&EXT4_I(inode)->i_data_sem);
A
Alex Tomas 已提交
3468
	/*
3469
	 * If this was a simple ftruncate() and the file will remain alive,
A
Alex Tomas 已提交
3470 3471 3472 3473 3474 3475 3476 3477
	 * then we need to clear up the orphan record which we created above.
	 * However, if this was a real unlink then we were called by
	 * ext4_delete_inode(), and we allow that function to clean up the
	 * orphan info for us.
	 */
	if (inode->i_nlink)
		ext4_orphan_del(handle, inode);

3478 3479
	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
	ext4_mark_inode_dirty(handle, inode);
A
Alex Tomas 已提交
3480 3481 3482
	ext4_journal_stop(handle);
}

3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496
static void ext4_falloc_update_inode(struct inode *inode,
				int mode, loff_t new_size, int update_ctime)
{
	struct timespec now;

	if (update_ctime) {
		now = current_fs_time(inode->i_sb);
		if (!timespec_equal(&inode->i_ctime, &now))
			inode->i_ctime = now;
	}
	/*
	 * Update only when preallocation was requested beyond
	 * the file size.
	 */
3497 3498 3499 3500 3501
	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
		if (new_size > i_size_read(inode))
			i_size_write(inode, new_size);
		if (new_size > EXT4_I(inode)->i_disksize)
			ext4_update_i_disksize(inode, new_size);
3502 3503 3504 3505
	}

}

A
Amit Arora 已提交
3506 3507 3508 3509 3510 3511 3512 3513 3514 3515
/*
 * preallocate space for a file. This implements ext4's fallocate inode
 * operation, which gets called from sys_fallocate system call.
 * For block-mapped files, posix_fallocate should fall back to the method
 * of writing zeroes to the required new blocks (the same behavior which is
 * expected for file systems which do not support fallocate() system call).
 */
long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
{
	handle_t *handle;
A
Aneesh Kumar K.V 已提交
3516
	ext4_lblk_t block;
3517
	loff_t new_size;
3518
	unsigned int max_blocks;
A
Amit Arora 已提交
3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536
	int ret = 0;
	int ret2 = 0;
	int retries = 0;
	struct buffer_head map_bh;
	unsigned int credits, blkbits = inode->i_blkbits;

	/*
	 * currently supporting (pre)allocate mode for extent-based
	 * files _only_
	 */
	if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
		return -EOPNOTSUPP;

	/* preallocation to directories is currently not supported */
	if (S_ISDIR(inode->i_mode))
		return -ENODEV;

	block = offset >> blkbits;
3537 3538 3539 3540
	/*
	 * We can't just convert len to max_blocks because
	 * If blocksize = 4096 offset = 3072 and len = 2048
	 */
A
Amit Arora 已提交
3541
	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3542
							- block;
A
Amit Arora 已提交
3543
	/*
3544
	 * credits to insert 1 extent into extent tree
A
Amit Arora 已提交
3545
	 */
3546
	credits = ext4_chunk_trans_blocks(inode, max_blocks);
3547
	mutex_lock(&inode->i_mutex);
A
Amit Arora 已提交
3548 3549 3550 3551 3552 3553 3554 3555 3556
retry:
	while (ret >= 0 && ret < max_blocks) {
		block = block + ret;
		max_blocks = max_blocks - ret;
		handle = ext4_journal_start(inode, credits);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			break;
		}
3557
		map_bh.b_state = 0;
3558 3559
		ret = ext4_get_blocks(handle, inode, block,
				      max_blocks, &map_bh,
3560
				      EXT4_GET_BLOCKS_CREATE_UNINIT_EXT);
3561
		if (ret <= 0) {
3562 3563 3564 3565
#ifdef EXT4FS_DEBUG
			WARN_ON(ret <= 0);
			printk(KERN_ERR "%s: ext4_ext_get_blocks "
				    "returned error inode#%lu, block=%u, "
3566
				    "max_blocks=%u", __func__,
3567
				    inode->i_ino, block, max_blocks);
3568
#endif
A
Amit Arora 已提交
3569 3570 3571 3572
			ext4_mark_inode_dirty(handle, inode);
			ret2 = ext4_journal_stop(handle);
			break;
		}
3573 3574 3575 3576 3577
		if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
						blkbits) >> blkbits))
			new_size = offset + len;
		else
			new_size = (block + ret) << blkbits;
A
Amit Arora 已提交
3578

3579 3580
		ext4_falloc_update_inode(inode, mode, new_size,
						buffer_new(&map_bh));
A
Amit Arora 已提交
3581 3582 3583 3584 3585
		ext4_mark_inode_dirty(handle, inode);
		ret2 = ext4_journal_stop(handle);
		if (ret2)
			break;
	}
3586 3587 3588
	if (ret == -ENOSPC &&
			ext4_should_retry_alloc(inode->i_sb, &retries)) {
		ret = 0;
A
Amit Arora 已提交
3589 3590
		goto retry;
	}
3591
	mutex_unlock(&inode->i_mutex);
A
Amit Arora 已提交
3592 3593
	return ret > 0 ? ret2 : ret;
}
3594

3595 3596 3597 3598 3599 3600 3601 3602
/*
 * This function convert a range of blocks to written extents
 * The caller of this function will pass the start offset and the size.
 * all unwritten extents within this range will be converted to
 * written extents.
 *
 * This function is called from the direct IO end io call back
 * function, to convert the fallocated extents after IO is completed.
3603
 * Returns 0 on success.
3604 3605
 */
int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3606
				    ssize_t len)
3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652
{
	handle_t *handle;
	ext4_lblk_t block;
	unsigned int max_blocks;
	int ret = 0;
	int ret2 = 0;
	struct buffer_head map_bh;
	unsigned int credits, blkbits = inode->i_blkbits;

	block = offset >> blkbits;
	/*
	 * We can't just convert len to max_blocks because
	 * If blocksize = 4096 offset = 3072 and len = 2048
	 */
	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
							- block;
	/*
	 * credits to insert 1 extent into extent tree
	 */
	credits = ext4_chunk_trans_blocks(inode, max_blocks);
	while (ret >= 0 && ret < max_blocks) {
		block = block + ret;
		max_blocks = max_blocks - ret;
		handle = ext4_journal_start(inode, credits);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			break;
		}
		map_bh.b_state = 0;
		ret = ext4_get_blocks(handle, inode, block,
				      max_blocks, &map_bh,
				      EXT4_GET_BLOCKS_DIO_CONVERT_EXT);
		if (ret <= 0) {
			WARN_ON(ret <= 0);
			printk(KERN_ERR "%s: ext4_ext_get_blocks "
				    "returned error inode#%lu, block=%u, "
				    "max_blocks=%u", __func__,
				    inode->i_ino, block, max_blocks);
		}
		ext4_mark_inode_dirty(handle, inode);
		ret2 = ext4_journal_stop(handle);
		if (ret <= 0 || ret2 )
			break;
	}
	return ret > 0 ? ret2 : ret;
}
3653 3654 3655
/*
 * Callback function called for each extent to gather FIEMAP information.
 */
A
Aneesh Kumar K.V 已提交
3656
static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3657 3658 3659 3660
		       struct ext4_ext_cache *newex, struct ext4_extent *ex,
		       void *data)
{
	struct fiemap_extent_info *fieinfo = data;
3661
	unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707
	__u64	logical;
	__u64	physical;
	__u64	length;
	__u32	flags = 0;
	int	error;

	logical =  (__u64)newex->ec_block << blksize_bits;

	if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
		pgoff_t offset;
		struct page *page;
		struct buffer_head *bh = NULL;

		offset = logical >> PAGE_SHIFT;
		page = find_get_page(inode->i_mapping, offset);
		if (!page || !page_has_buffers(page))
			return EXT_CONTINUE;

		bh = page_buffers(page);

		if (!bh)
			return EXT_CONTINUE;

		if (buffer_delay(bh)) {
			flags |= FIEMAP_EXTENT_DELALLOC;
			page_cache_release(page);
		} else {
			page_cache_release(page);
			return EXT_CONTINUE;
		}
	}

	physical = (__u64)newex->ec_start << blksize_bits;
	length =   (__u64)newex->ec_len << blksize_bits;

	if (ex && ext4_ext_is_uninitialized(ex))
		flags |= FIEMAP_EXTENT_UNWRITTEN;

	/*
	 * If this extent reaches EXT_MAX_BLOCK, it must be last.
	 *
	 * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK,
	 * this also indicates no more allocated blocks.
	 *
	 * XXX this might miss a single-block extent at EXT_MAX_BLOCK
	 */
3708
	if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK ||
3709 3710 3711 3712
	    newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK) {
		loff_t size = i_size_read(inode);
		loff_t bs = EXT4_BLOCK_SIZE(inode->i_sb);

3713
		flags |= FIEMAP_EXTENT_LAST;
3714 3715 3716 3717
		if ((flags & FIEMAP_EXTENT_DELALLOC) &&
		    logical+length > size)
			length = (size - logical + bs - 1) & ~(bs-1);
	}
3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731

	error = fiemap_fill_next_extent(fieinfo, logical, physical,
					length, flags);
	if (error < 0)
		return error;
	if (error == 1)
		return EXT_BREAK;

	return EXT_CONTINUE;
}

/* fiemap flags we can handle specified here */
#define EXT4_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)

A
Aneesh Kumar K.V 已提交
3732 3733
static int ext4_xattr_fiemap(struct inode *inode,
				struct fiemap_extent_info *fieinfo)
3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797
{
	__u64 physical = 0;
	__u64 length;
	__u32 flags = FIEMAP_EXTENT_LAST;
	int blockbits = inode->i_sb->s_blocksize_bits;
	int error = 0;

	/* in-inode? */
	if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) {
		struct ext4_iloc iloc;
		int offset;	/* offset of xattr in inode */

		error = ext4_get_inode_loc(inode, &iloc);
		if (error)
			return error;
		physical = iloc.bh->b_blocknr << blockbits;
		offset = EXT4_GOOD_OLD_INODE_SIZE +
				EXT4_I(inode)->i_extra_isize;
		physical += offset;
		length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
		flags |= FIEMAP_EXTENT_DATA_INLINE;
	} else { /* external block */
		physical = EXT4_I(inode)->i_file_acl << blockbits;
		length = inode->i_sb->s_blocksize;
	}

	if (physical)
		error = fiemap_fill_next_extent(fieinfo, 0, physical,
						length, flags);
	return (error < 0 ? error : 0);
}

int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		__u64 start, __u64 len)
{
	ext4_lblk_t start_blk;
	ext4_lblk_t len_blks;
	int error = 0;

	/* fallback to generic here if not in extents fmt */
	if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
		return generic_block_fiemap(inode, fieinfo, start, len,
			ext4_get_block);

	if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
		return -EBADR;

	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
		error = ext4_xattr_fiemap(inode, fieinfo);
	} else {
		start_blk = start >> inode->i_sb->s_blocksize_bits;
		len_blks = len >> inode->i_sb->s_blocksize_bits;

		/*
		 * Walk the extent tree gathering extent information.
		 * ext4_ext_fiemap_cb will push extents back to user.
		 */
		error = ext4_ext_walk_space(inode, start_blk, len_blks,
					  ext4_ext_fiemap_cb, fieinfo);
	}

	return error;
}