extents.c 102.8 KB
Newer Older
A
Alex Tomas 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/*
 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
 * Written by Alex Tomas <alex@clusterfs.com>
 *
 * Architecture independence:
 *   Copyright (c) 2005, Bull S.A.
 *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public Licens
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
 */

/*
 * Extents support for EXT4
 *
 * TODO:
 *   - ext4*_error() should be used in some situations
 *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
 *   - smart tree reduction
 */

#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
35
#include <linux/jbd2.h>
A
Alex Tomas 已提交
36 37 38 39 40
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/slab.h>
A
Amit Arora 已提交
41
#include <linux/falloc.h>
A
Alex Tomas 已提交
42
#include <asm/uaccess.h>
43
#include <linux/fiemap.h>
44 45
#include "ext4_jbd2.h"
#include "ext4_extents.h"
A
Alex Tomas 已提交
46

47 48 49
static int ext4_ext_truncate_extend_restart(handle_t *handle,
					    struct inode *inode,
					    int needed)
A
Alex Tomas 已提交
50 51 52
{
	int err;

53 54
	if (!ext4_handle_valid(handle))
		return 0;
A
Alex Tomas 已提交
55
	if (handle->h_buffer_credits > needed)
56 57
		return 0;
	err = ext4_journal_extend(handle, needed);
58
	if (err <= 0)
59
		return err;
60
	err = ext4_truncate_restart_trans(handle, inode, needed);
61 62
	if (err == 0)
		err = -EAGAIN;
63 64

	return err;
A
Alex Tomas 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
}

/*
 * could return:
 *  - EROFS
 *  - ENOMEM
 */
static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path)
{
	if (path->p_bh) {
		/* path points to block */
		return ext4_journal_get_write_access(handle, path->p_bh);
	}
	/* path points to leaf/index in inode body */
	/* we use in-core data, no need to protect them */
	return 0;
}

/*
 * could return:
 *  - EROFS
 *  - ENOMEM
 *  - EIO
 */
static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path)
{
	int err;
	if (path->p_bh) {
		/* path points to block */
96
		err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
A
Alex Tomas 已提交
97 98 99 100 101 102 103
	} else {
		/* path points to leaf/index in inode body */
		err = ext4_mark_inode_dirty(handle, inode);
	}
	return err;
}

104
static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
A
Alex Tomas 已提交
105
			      struct ext4_ext_path *path,
A
Aneesh Kumar K.V 已提交
106
			      ext4_lblk_t block)
A
Alex Tomas 已提交
107 108
{
	struct ext4_inode_info *ei = EXT4_I(inode);
109
	ext4_fsblk_t bg_start;
110
	ext4_fsblk_t last_block;
111
	ext4_grpblk_t colour;
112 113
	ext4_group_t block_group;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
A
Alex Tomas 已提交
114 115 116 117 118 119 120
	int depth;

	if (path) {
		struct ext4_extent *ex;
		depth = path->p_depth;

		/* try to predict block placement */
121 122
		ex = path[depth].p_ext;
		if (ex)
123 124
			return (ext4_ext_pblock(ex) +
				(block - le32_to_cpu(ex->ee_block)));
A
Alex Tomas 已提交
125

126 127
		/* it looks like index is empty;
		 * try to find starting block from index itself */
A
Alex Tomas 已提交
128 129 130 131 132
		if (path[depth].p_bh)
			return path[depth].p_bh->b_blocknr;
	}

	/* OK. use inode's group */
133 134 135 136
	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		/*
		 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
137 138
		 * block groups per flexgroup, reserve the first block
		 * group for directories and special files.  Regular
139
		 * files will start at the second block group.  This
140
		 * tends to speed up directory access and improves
141 142 143 144 145 146
		 * fsck times.
		 */
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
147
	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
148 149
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

150 151 152 153 154 155 156
	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

157 158
	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
A
Alex Tomas 已提交
159
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
160 161
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
A
Alex Tomas 已提交
162 163 164
	return bg_start + colour + block;
}

A
Aneesh Kumar K.V 已提交
165 166 167
/*
 * Allocation for a meta data block
 */
168
static ext4_fsblk_t
A
Aneesh Kumar K.V 已提交
169
ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
170 171 172
			struct ext4_ext_path *path,
			struct ext4_extent *ex, int *err)
{
173
	ext4_fsblk_t goal, newblock;
A
Alex Tomas 已提交
174 175

	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
T
Theodore Ts'o 已提交
176
	newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err);
A
Alex Tomas 已提交
177 178 179
	return newblock;
}

180
static inline int ext4_ext_space_block(struct inode *inode, int check)
A
Alex Tomas 已提交
181 182 183 184 185
{
	int size;

	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
			/ sizeof(struct ext4_extent);
186
	if (!check) {
187
#ifdef AGGRESSIVE_TEST
188 189
		if (size > 6)
			size = 6;
A
Alex Tomas 已提交
190
#endif
191
	}
A
Alex Tomas 已提交
192 193 194
	return size;
}

195
static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
A
Alex Tomas 已提交
196 197 198 199 200
{
	int size;

	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
			/ sizeof(struct ext4_extent_idx);
201
	if (!check) {
202
#ifdef AGGRESSIVE_TEST
203 204
		if (size > 5)
			size = 5;
A
Alex Tomas 已提交
205
#endif
206
	}
A
Alex Tomas 已提交
207 208 209
	return size;
}

210
static inline int ext4_ext_space_root(struct inode *inode, int check)
A
Alex Tomas 已提交
211 212 213 214 215 216
{
	int size;

	size = sizeof(EXT4_I(inode)->i_data);
	size -= sizeof(struct ext4_extent_header);
	size /= sizeof(struct ext4_extent);
217
	if (!check) {
218
#ifdef AGGRESSIVE_TEST
219 220
		if (size > 3)
			size = 3;
A
Alex Tomas 已提交
221
#endif
222
	}
A
Alex Tomas 已提交
223 224 225
	return size;
}

226
static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
A
Alex Tomas 已提交
227 228 229 230 231 232
{
	int size;

	size = sizeof(EXT4_I(inode)->i_data);
	size -= sizeof(struct ext4_extent_header);
	size /= sizeof(struct ext4_extent_idx);
233
	if (!check) {
234
#ifdef AGGRESSIVE_TEST
235 236
		if (size > 4)
			size = 4;
A
Alex Tomas 已提交
237
#endif
238
	}
A
Alex Tomas 已提交
239 240 241
	return size;
}

242 243 244 245 246
/*
 * Calculate the number of metadata blocks needed
 * to allocate @blocks
 * Worse case is one block per extent
 */
247
int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
248
{
249 250
	struct ext4_inode_info *ei = EXT4_I(inode);
	int idxs, num = 0;
251

252 253
	idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
		/ sizeof(struct ext4_extent_idx));
254 255

	/*
256 257 258 259 260 261
	 * If the new delayed allocation block is contiguous with the
	 * previous da block, it can share index blocks with the
	 * previous block, so we only need to allocate a new index
	 * block every idxs leaf blocks.  At ldxs**2 blocks, we need
	 * an additional index block, and at ldxs**3 blocks, yet
	 * another index blocks.
262
	 */
263 264 265 266 267 268 269 270 271 272 273 274 275 276
	if (ei->i_da_metadata_calc_len &&
	    ei->i_da_metadata_calc_last_lblock+1 == lblock) {
		if ((ei->i_da_metadata_calc_len % idxs) == 0)
			num++;
		if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
			num++;
		if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
			num++;
			ei->i_da_metadata_calc_len = 0;
		} else
			ei->i_da_metadata_calc_len++;
		ei->i_da_metadata_calc_last_lblock++;
		return num;
	}
277

278 279 280 281 282 283 284
	/*
	 * In the worst case we need a new set of index blocks at
	 * every level of the inode's extent tree.
	 */
	ei->i_da_metadata_calc_len = 1;
	ei->i_da_metadata_calc_last_lblock = lblock;
	return ext_depth(inode) + 1;
285 286
}

287 288 289 290 291 292 293
static int
ext4_ext_max_entries(struct inode *inode, int depth)
{
	int max;

	if (depth == ext_depth(inode)) {
		if (depth == 0)
294
			max = ext4_ext_space_root(inode, 1);
295
		else
296
			max = ext4_ext_space_root_idx(inode, 1);
297 298
	} else {
		if (depth == 0)
299
			max = ext4_ext_space_block(inode, 1);
300
		else
301
			max = ext4_ext_space_block_idx(inode, 1);
302 303 304 305 306
	}

	return max;
}

307 308
static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
{
309
	ext4_fsblk_t block = ext4_ext_pblock(ext);
310
	int len = ext4_ext_get_actual_len(ext);
311

312
	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
313 314 315 316 317
}

static int ext4_valid_extent_idx(struct inode *inode,
				struct ext4_extent_idx *ext_idx)
{
318
	ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
319

320
	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
}

static int ext4_valid_extent_entries(struct inode *inode,
				struct ext4_extent_header *eh,
				int depth)
{
	struct ext4_extent *ext;
	struct ext4_extent_idx *ext_idx;
	unsigned short entries;
	if (eh->eh_entries == 0)
		return 1;

	entries = le16_to_cpu(eh->eh_entries);

	if (depth == 0) {
		/* leaf entries */
		ext = EXT_FIRST_EXTENT(eh);
		while (entries) {
			if (!ext4_valid_extent(inode, ext))
				return 0;
			ext++;
			entries--;
		}
	} else {
		ext_idx = EXT_FIRST_INDEX(eh);
		while (entries) {
			if (!ext4_valid_extent_idx(inode, ext_idx))
				return 0;
			ext_idx++;
			entries--;
		}
	}
	return 1;
}

356 357 358
static int __ext4_ext_check(const char *function, unsigned int line,
			    struct inode *inode, struct ext4_extent_header *eh,
			    int depth)
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
{
	const char *error_msg;
	int max = 0;

	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
		error_msg = "invalid magic";
		goto corrupted;
	}
	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
		error_msg = "unexpected eh_depth";
		goto corrupted;
	}
	if (unlikely(eh->eh_max == 0)) {
		error_msg = "invalid eh_max";
		goto corrupted;
	}
	max = ext4_ext_max_entries(inode, depth);
	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
		error_msg = "too large eh_max";
		goto corrupted;
	}
	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
		error_msg = "invalid eh_entries";
		goto corrupted;
	}
384 385 386 387
	if (!ext4_valid_extent_entries(inode, eh, depth)) {
		error_msg = "invalid extent entries";
		goto corrupted;
	}
388 389 390
	return 0;

corrupted:
391
	ext4_error_inode(inode, function, line, 0,
392
			"bad header/extent: %s - magic %x, "
393
			"entries %u, max %u(%u), depth %u(%u)",
394
			error_msg, le16_to_cpu(eh->eh_magic),
395 396 397 398 399 400
			le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
			max, le16_to_cpu(eh->eh_depth), depth);

	return -EIO;
}

401
#define ext4_ext_check(inode, eh, depth)	\
402
	__ext4_ext_check(__func__, __LINE__, inode, eh, depth)
403

404 405 406 407 408
int ext4_ext_check_inode(struct inode *inode)
{
	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
}

A
Alex Tomas 已提交
409 410 411 412 413 414 415 416
#ifdef EXT_DEBUG
static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
{
	int k, l = path->p_depth;

	ext_debug("path:");
	for (k = 0; k <= l; k++, path++) {
		if (path->p_idx) {
417
		  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
418
			    ext4_idx_pblock(path->p_idx));
A
Alex Tomas 已提交
419
		} else if (path->p_ext) {
420
			ext_debug("  %d:[%d]%d:%llu ",
A
Alex Tomas 已提交
421
				  le32_to_cpu(path->p_ext->ee_block),
422
				  ext4_ext_is_uninitialized(path->p_ext),
A
Amit Arora 已提交
423
				  ext4_ext_get_actual_len(path->p_ext),
424
				  ext4_ext_pblock(path->p_ext));
A
Alex Tomas 已提交
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
		} else
			ext_debug("  []");
	}
	ext_debug("\n");
}

static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
{
	int depth = ext_depth(inode);
	struct ext4_extent_header *eh;
	struct ext4_extent *ex;
	int i;

	if (!path)
		return;

	eh = path[depth].p_hdr;
	ex = EXT_FIRST_EXTENT(eh);

444 445
	ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);

A
Alex Tomas 已提交
446
	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
447 448
		ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
			  ext4_ext_is_uninitialized(ex),
449
			  ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
A
Alex Tomas 已提交
450 451 452 453
	}
	ext_debug("\n");
}
#else
454 455
#define ext4_ext_show_path(inode, path)
#define ext4_ext_show_leaf(inode, path)
A
Alex Tomas 已提交
456 457
#endif

458
void ext4_ext_drop_refs(struct ext4_ext_path *path)
A
Alex Tomas 已提交
459 460 461 462 463 464 465 466 467 468 469 470
{
	int depth = path->p_depth;
	int i;

	for (i = 0; i <= depth; i++, path++)
		if (path->p_bh) {
			brelse(path->p_bh);
			path->p_bh = NULL;
		}
}

/*
471 472
 * ext4_ext_binsearch_idx:
 * binary search for the closest index of the given block
473
 * the header must be checked before calling this
A
Alex Tomas 已提交
474 475
 */
static void
A
Aneesh Kumar K.V 已提交
476 477
ext4_ext_binsearch_idx(struct inode *inode,
			struct ext4_ext_path *path, ext4_lblk_t block)
A
Alex Tomas 已提交
478 479 480 481 482
{
	struct ext4_extent_header *eh = path->p_hdr;
	struct ext4_extent_idx *r, *l, *m;


483
	ext_debug("binsearch for %u(idx):  ", block);
A
Alex Tomas 已提交
484 485

	l = EXT_FIRST_INDEX(eh) + 1;
D
Dmitry Monakhov 已提交
486
	r = EXT_LAST_INDEX(eh);
A
Alex Tomas 已提交
487 488 489 490 491 492
	while (l <= r) {
		m = l + (r - l) / 2;
		if (block < le32_to_cpu(m->ei_block))
			r = m - 1;
		else
			l = m + 1;
493 494 495
		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
				m, le32_to_cpu(m->ei_block),
				r, le32_to_cpu(r->ei_block));
A
Alex Tomas 已提交
496 497 498
	}

	path->p_idx = l - 1;
499
	ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
500
		  ext4_idx_pblock(path->p_idx));
A
Alex Tomas 已提交
501 502 503 504 505 506 507 508 509 510

#ifdef CHECK_BINSEARCH
	{
		struct ext4_extent_idx *chix, *ix;
		int k;

		chix = ix = EXT_FIRST_INDEX(eh);
		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
		  if (k != 0 &&
		      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
511 512 513 514
				printk(KERN_DEBUG "k=%d, ix=0x%p, "
				       "first=0x%p\n", k,
				       ix, EXT_FIRST_INDEX(eh));
				printk(KERN_DEBUG "%u <= %u\n",
A
Alex Tomas 已提交
515 516 517 518
				       le32_to_cpu(ix->ei_block),
				       le32_to_cpu(ix[-1].ei_block));
			}
			BUG_ON(k && le32_to_cpu(ix->ei_block)
D
Dave Kleikamp 已提交
519
					   <= le32_to_cpu(ix[-1].ei_block));
A
Alex Tomas 已提交
520 521 522 523 524 525 526 527 528 529 530
			if (block < le32_to_cpu(ix->ei_block))
				break;
			chix = ix;
		}
		BUG_ON(chix != path->p_idx);
	}
#endif

}

/*
531 532
 * ext4_ext_binsearch:
 * binary search for closest extent of the given block
533
 * the header must be checked before calling this
A
Alex Tomas 已提交
534 535
 */
static void
A
Aneesh Kumar K.V 已提交
536 537
ext4_ext_binsearch(struct inode *inode,
		struct ext4_ext_path *path, ext4_lblk_t block)
A
Alex Tomas 已提交
538 539 540 541 542 543
{
	struct ext4_extent_header *eh = path->p_hdr;
	struct ext4_extent *r, *l, *m;

	if (eh->eh_entries == 0) {
		/*
544 545
		 * this leaf is empty:
		 * we get such a leaf in split/add case
A
Alex Tomas 已提交
546 547 548 549
		 */
		return;
	}

550
	ext_debug("binsearch for %u:  ", block);
A
Alex Tomas 已提交
551 552

	l = EXT_FIRST_EXTENT(eh) + 1;
D
Dmitry Monakhov 已提交
553
	r = EXT_LAST_EXTENT(eh);
A
Alex Tomas 已提交
554 555 556 557 558 559 560

	while (l <= r) {
		m = l + (r - l) / 2;
		if (block < le32_to_cpu(m->ee_block))
			r = m - 1;
		else
			l = m + 1;
561 562 563
		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
				m, le32_to_cpu(m->ee_block),
				r, le32_to_cpu(r->ee_block));
A
Alex Tomas 已提交
564 565 566
	}

	path->p_ext = l - 1;
567
	ext_debug("  -> %d:%llu:[%d]%d ",
D
Dave Kleikamp 已提交
568
			le32_to_cpu(path->p_ext->ee_block),
569
			ext4_ext_pblock(path->p_ext),
570
			ext4_ext_is_uninitialized(path->p_ext),
A
Amit Arora 已提交
571
			ext4_ext_get_actual_len(path->p_ext));
A
Alex Tomas 已提交
572 573 574 575 576 577 578 579 580

#ifdef CHECK_BINSEARCH
	{
		struct ext4_extent *chex, *ex;
		int k;

		chex = ex = EXT_FIRST_EXTENT(eh);
		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
			BUG_ON(k && le32_to_cpu(ex->ee_block)
D
Dave Kleikamp 已提交
581
					  <= le32_to_cpu(ex[-1].ee_block));
A
Alex Tomas 已提交
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
			if (block < le32_to_cpu(ex->ee_block))
				break;
			chex = ex;
		}
		BUG_ON(chex != path->p_ext);
	}
#endif

}

int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
{
	struct ext4_extent_header *eh;

	eh = ext_inode_hdr(inode);
	eh->eh_depth = 0;
	eh->eh_entries = 0;
	eh->eh_magic = EXT4_EXT_MAGIC;
600
	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
A
Alex Tomas 已提交
601 602 603 604 605 606
	ext4_mark_inode_dirty(handle, inode);
	ext4_ext_invalidate_cache(inode);
	return 0;
}

struct ext4_ext_path *
A
Aneesh Kumar K.V 已提交
607 608
ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
					struct ext4_ext_path *path)
A
Alex Tomas 已提交
609 610 611 612 613 614
{
	struct ext4_extent_header *eh;
	struct buffer_head *bh;
	short int depth, i, ppos = 0, alloc = 0;

	eh = ext_inode_hdr(inode);
615
	depth = ext_depth(inode);
A
Alex Tomas 已提交
616 617 618

	/* account possible depth increase */
	if (!path) {
A
Avantika Mathur 已提交
619
		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
A
Alex Tomas 已提交
620 621 622 623 624 625
				GFP_NOFS);
		if (!path)
			return ERR_PTR(-ENOMEM);
		alloc = 1;
	}
	path[0].p_hdr = eh;
626
	path[0].p_bh = NULL;
A
Alex Tomas 已提交
627

628
	i = depth;
A
Alex Tomas 已提交
629 630
	/* walk through the tree */
	while (i) {
631 632
		int need_to_validate = 0;

A
Alex Tomas 已提交
633 634
		ext_debug("depth %d: num %d, max %d\n",
			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
635

A
Alex Tomas 已提交
636
		ext4_ext_binsearch_idx(inode, path + ppos, block);
637
		path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
A
Alex Tomas 已提交
638 639 640
		path[ppos].p_depth = i;
		path[ppos].p_ext = NULL;

641 642
		bh = sb_getblk(inode->i_sb, path[ppos].p_block);
		if (unlikely(!bh))
A
Alex Tomas 已提交
643
			goto err;
644 645 646 647 648 649 650 651
		if (!bh_uptodate_or_lock(bh)) {
			if (bh_submit_read(bh) < 0) {
				put_bh(bh);
				goto err;
			}
			/* validate the extent entries */
			need_to_validate = 1;
		}
A
Alex Tomas 已提交
652 653
		eh = ext_block_hdr(bh);
		ppos++;
654 655 656 657 658 659
		if (unlikely(ppos > depth)) {
			put_bh(bh);
			EXT4_ERROR_INODE(inode,
					 "ppos %d > depth %d", ppos, depth);
			goto err;
		}
A
Alex Tomas 已提交
660 661 662 663
		path[ppos].p_bh = bh;
		path[ppos].p_hdr = eh;
		i--;

664
		if (need_to_validate && ext4_ext_check(inode, eh, i))
A
Alex Tomas 已提交
665 666 667 668 669 670 671 672 673
			goto err;
	}

	path[ppos].p_depth = i;
	path[ppos].p_ext = NULL;
	path[ppos].p_idx = NULL;

	/* find extent */
	ext4_ext_binsearch(inode, path + ppos, block);
674 675
	/* if not an empty leaf */
	if (path[ppos].p_ext)
676
		path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
A
Alex Tomas 已提交
677 678 679 680 681 682 683 684 685 686 687 688 689

	ext4_ext_show_path(inode, path);

	return path;

err:
	ext4_ext_drop_refs(path);
	if (alloc)
		kfree(path);
	return ERR_PTR(-EIO);
}

/*
690 691 692
 * ext4_ext_insert_index:
 * insert new index [@logical;@ptr] into the block at @curp;
 * check where to insert: before @curp or after @curp
A
Alex Tomas 已提交
693
 */
694 695 696
static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
				 struct ext4_ext_path *curp,
				 int logical, ext4_fsblk_t ptr)
A
Alex Tomas 已提交
697 698 699 700
{
	struct ext4_extent_idx *ix;
	int len, err;

701 702
	err = ext4_ext_get_access(handle, inode, curp);
	if (err)
A
Alex Tomas 已提交
703 704
		return err;

705 706 707 708 709 710
	if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
		EXT4_ERROR_INODE(inode,
				 "logical %d == ei_block %d!",
				 logical, le32_to_cpu(curp->p_idx->ei_block));
		return -EIO;
	}
A
Alex Tomas 已提交
711 712 713 714 715 716
	len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
		/* insert after */
		if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
			len = (len - 1) * sizeof(struct ext4_extent_idx);
			len = len < 0 ? 0 : len;
717
			ext_debug("insert new index %d after: %llu. "
A
Alex Tomas 已提交
718 719 720 721 722 723 724 725 726 727
					"move %d from 0x%p to 0x%p\n",
					logical, ptr, len,
					(curp->p_idx + 1), (curp->p_idx + 2));
			memmove(curp->p_idx + 2, curp->p_idx + 1, len);
		}
		ix = curp->p_idx + 1;
	} else {
		/* insert before */
		len = len * sizeof(struct ext4_extent_idx);
		len = len < 0 ? 0 : len;
728
		ext_debug("insert new index %d before: %llu. "
A
Alex Tomas 已提交
729 730 731 732 733 734 735 736
				"move %d from 0x%p to 0x%p\n",
				logical, ptr, len,
				curp->p_idx, (curp->p_idx + 1));
		memmove(curp->p_idx + 1, curp->p_idx, len);
		ix = curp->p_idx;
	}

	ix->ei_block = cpu_to_le32(logical);
737
	ext4_idx_store_pblock(ix, ptr);
M
Marcin Slusarz 已提交
738
	le16_add_cpu(&curp->p_hdr->eh_entries, 1);
A
Alex Tomas 已提交
739

740 741 742 743 744 745 746 747 748 749 750
	if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
			     > le16_to_cpu(curp->p_hdr->eh_max))) {
		EXT4_ERROR_INODE(inode,
				 "logical %d == ei_block %d!",
				 logical, le32_to_cpu(curp->p_idx->ei_block));
		return -EIO;
	}
	if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
		EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
		return -EIO;
	}
A
Alex Tomas 已提交
751 752 753 754 755 756 757 758

	err = ext4_ext_dirty(handle, inode, curp);
	ext4_std_error(inode->i_sb, err);

	return err;
}

/*
759 760 761 762 763 764 765 766
 * ext4_ext_split:
 * inserts new subtree into the path, using free index entry
 * at depth @at:
 * - allocates all needed blocks (new leaf and all intermediate index blocks)
 * - makes decision where to split
 * - moves remaining extents and index entries (right to the split point)
 *   into the newly allocated blocks
 * - initializes subtree
A
Alex Tomas 已提交
767 768 769 770 771 772 773 774 775 776 777
 */
static int ext4_ext_split(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path,
				struct ext4_extent *newext, int at)
{
	struct buffer_head *bh = NULL;
	int depth = ext_depth(inode);
	struct ext4_extent_header *neh;
	struct ext4_extent_idx *fidx;
	struct ext4_extent *ex;
	int i = at, k, m, a;
778
	ext4_fsblk_t newblock, oldblock;
A
Alex Tomas 已提交
779
	__le32 border;
780
	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
A
Alex Tomas 已提交
781 782 783
	int err = 0;

	/* make decision: where to split? */
784
	/* FIXME: now decision is simplest: at current extent */
A
Alex Tomas 已提交
785

786
	/* if current leaf will be split, then we should use
A
Alex Tomas 已提交
787
	 * border from split point */
788 789 790 791
	if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
		EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
		return -EIO;
	}
A
Alex Tomas 已提交
792 793
	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
		border = path[depth].p_ext[1].ee_block;
794
		ext_debug("leaf will be split."
A
Alex Tomas 已提交
795
				" next leaf starts at %d\n",
D
Dave Kleikamp 已提交
796
				  le32_to_cpu(border));
A
Alex Tomas 已提交
797 798 799 800
	} else {
		border = newext->ee_block;
		ext_debug("leaf will be added."
				" next leaf starts at %d\n",
D
Dave Kleikamp 已提交
801
				le32_to_cpu(border));
A
Alex Tomas 已提交
802 803 804
	}

	/*
805 806
	 * If error occurs, then we break processing
	 * and mark filesystem read-only. index won't
A
Alex Tomas 已提交
807
	 * be inserted and tree will be in consistent
808
	 * state. Next mount will repair buffers too.
A
Alex Tomas 已提交
809 810 811
	 */

	/*
812 813 814
	 * Get array to track all allocated blocks.
	 * We need this to handle errors and free blocks
	 * upon them.
A
Alex Tomas 已提交
815
	 */
A
Avantika Mathur 已提交
816
	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
A
Alex Tomas 已提交
817 818 819 820 821 822
	if (!ablocks)
		return -ENOMEM;

	/* allocate all needed blocks */
	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
	for (a = 0; a < depth - at; a++) {
A
Aneesh Kumar K.V 已提交
823 824
		newblock = ext4_ext_new_meta_block(handle, inode, path,
						   newext, &err);
A
Alex Tomas 已提交
825 826 827 828 829 830 831
		if (newblock == 0)
			goto cleanup;
		ablocks[a] = newblock;
	}

	/* initialize new leaf */
	newblock = ablocks[--a];
832 833 834 835 836
	if (unlikely(newblock == 0)) {
		EXT4_ERROR_INODE(inode, "newblock == 0!");
		err = -EIO;
		goto cleanup;
	}
A
Alex Tomas 已提交
837 838 839 840 841 842 843
	bh = sb_getblk(inode->i_sb, newblock);
	if (!bh) {
		err = -EIO;
		goto cleanup;
	}
	lock_buffer(bh);

844 845
	err = ext4_journal_get_create_access(handle, bh);
	if (err)
A
Alex Tomas 已提交
846 847 848 849
		goto cleanup;

	neh = ext_block_hdr(bh);
	neh->eh_entries = 0;
850
	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
A
Alex Tomas 已提交
851 852 853 854
	neh->eh_magic = EXT4_EXT_MAGIC;
	neh->eh_depth = 0;
	ex = EXT_FIRST_EXTENT(neh);

855
	/* move remainder of path[depth] to the new leaf */
856 857 858 859 860 861 862 863
	if (unlikely(path[depth].p_hdr->eh_entries !=
		     path[depth].p_hdr->eh_max)) {
		EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
				 path[depth].p_hdr->eh_entries,
				 path[depth].p_hdr->eh_max);
		err = -EIO;
		goto cleanup;
	}
A
Alex Tomas 已提交
864 865 866 867 868 869
	/* start copy from next extent */
	/* TODO: we could do it by single memmove */
	m = 0;
	path[depth].p_ext++;
	while (path[depth].p_ext <=
			EXT_MAX_EXTENT(path[depth].p_hdr)) {
870
		ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
D
Dave Kleikamp 已提交
871
				le32_to_cpu(path[depth].p_ext->ee_block),
872
				ext4_ext_pblock(path[depth].p_ext),
873
				ext4_ext_is_uninitialized(path[depth].p_ext),
A
Amit Arora 已提交
874
				ext4_ext_get_actual_len(path[depth].p_ext),
A
Alex Tomas 已提交
875 876 877 878 879 880 881 882 883
				newblock);
		/*memmove(ex++, path[depth].p_ext++,
				sizeof(struct ext4_extent));
		neh->eh_entries++;*/
		path[depth].p_ext++;
		m++;
	}
	if (m) {
		memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
M
Marcin Slusarz 已提交
884
		le16_add_cpu(&neh->eh_entries, m);
A
Alex Tomas 已提交
885 886 887 888 889
	}

	set_buffer_uptodate(bh);
	unlock_buffer(bh);

890
	err = ext4_handle_dirty_metadata(handle, inode, bh);
891
	if (err)
A
Alex Tomas 已提交
892 893 894 895 896 897
		goto cleanup;
	brelse(bh);
	bh = NULL;

	/* correct old leaf */
	if (m) {
898 899
		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
A
Alex Tomas 已提交
900
			goto cleanup;
M
Marcin Slusarz 已提交
901
		le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
902 903
		err = ext4_ext_dirty(handle, inode, path + depth);
		if (err)
A
Alex Tomas 已提交
904 905 906 907 908 909
			goto cleanup;

	}

	/* create intermediate indexes */
	k = depth - at - 1;
910 911 912 913 914
	if (unlikely(k < 0)) {
		EXT4_ERROR_INODE(inode, "k %d < 0!", k);
		err = -EIO;
		goto cleanup;
	}
A
Alex Tomas 已提交
915 916 917 918 919 920 921 922
	if (k)
		ext_debug("create %d intermediate indices\n", k);
	/* insert new index into current index block */
	/* current depth stored in i var */
	i = depth - 1;
	while (k--) {
		oldblock = newblock;
		newblock = ablocks[--a];
923
		bh = sb_getblk(inode->i_sb, newblock);
A
Alex Tomas 已提交
924 925 926 927 928 929
		if (!bh) {
			err = -EIO;
			goto cleanup;
		}
		lock_buffer(bh);

930 931
		err = ext4_journal_get_create_access(handle, bh);
		if (err)
A
Alex Tomas 已提交
932 933 934 935 936
			goto cleanup;

		neh = ext_block_hdr(bh);
		neh->eh_entries = cpu_to_le16(1);
		neh->eh_magic = EXT4_EXT_MAGIC;
937
		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
A
Alex Tomas 已提交
938 939 940
		neh->eh_depth = cpu_to_le16(depth - i);
		fidx = EXT_FIRST_INDEX(neh);
		fidx->ei_block = border;
941
		ext4_idx_store_pblock(fidx, oldblock);
A
Alex Tomas 已提交
942

943 944
		ext_debug("int.index at %d (block %llu): %u -> %llu\n",
				i, newblock, le32_to_cpu(border), oldblock);
A
Alex Tomas 已提交
945 946 947 948 949 950
		/* copy indexes */
		m = 0;
		path[i].p_idx++;

		ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
				EXT_MAX_INDEX(path[i].p_hdr));
951 952 953 954 955 956 957 958
		if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
					EXT_LAST_INDEX(path[i].p_hdr))) {
			EXT4_ERROR_INODE(inode,
					 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
					 le32_to_cpu(path[i].p_ext->ee_block));
			err = -EIO;
			goto cleanup;
		}
A
Alex Tomas 已提交
959
		while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
960
			ext_debug("%d: move %d:%llu in new index %llu\n", i,
D
Dave Kleikamp 已提交
961
					le32_to_cpu(path[i].p_idx->ei_block),
962
					ext4_idx_pblock(path[i].p_idx),
D
Dave Kleikamp 已提交
963
					newblock);
A
Alex Tomas 已提交
964 965 966 967 968 969 970 971 972 973
			/*memmove(++fidx, path[i].p_idx++,
					sizeof(struct ext4_extent_idx));
			neh->eh_entries++;
			BUG_ON(neh->eh_entries > neh->eh_max);*/
			path[i].p_idx++;
			m++;
		}
		if (m) {
			memmove(++fidx, path[i].p_idx - m,
				sizeof(struct ext4_extent_idx) * m);
M
Marcin Slusarz 已提交
974
			le16_add_cpu(&neh->eh_entries, m);
A
Alex Tomas 已提交
975 976 977 978
		}
		set_buffer_uptodate(bh);
		unlock_buffer(bh);

979
		err = ext4_handle_dirty_metadata(handle, inode, bh);
980
		if (err)
A
Alex Tomas 已提交
981 982 983 984 985 986 987 988 989
			goto cleanup;
		brelse(bh);
		bh = NULL;

		/* correct old index */
		if (m) {
			err = ext4_ext_get_access(handle, inode, path + i);
			if (err)
				goto cleanup;
M
Marcin Slusarz 已提交
990
			le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
A
Alex Tomas 已提交
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
			err = ext4_ext_dirty(handle, inode, path + i);
			if (err)
				goto cleanup;
		}

		i--;
	}

	/* insert new index */
	err = ext4_ext_insert_index(handle, inode, path + at,
				    le32_to_cpu(border), newblock);

cleanup:
	if (bh) {
		if (buffer_locked(bh))
			unlock_buffer(bh);
		brelse(bh);
	}

	if (err) {
		/* free all allocated blocks in error case */
		for (i = 0; i < depth; i++) {
			if (!ablocks[i])
				continue;
1015 1016
			ext4_free_blocks(handle, inode, 0, ablocks[i], 1,
					 EXT4_FREE_BLOCKS_METADATA);
A
Alex Tomas 已提交
1017 1018 1019 1020 1021 1022 1023 1024
		}
	}
	kfree(ablocks);

	return err;
}

/*
1025 1026 1027 1028 1029 1030
 * ext4_ext_grow_indepth:
 * implements tree growing procedure:
 * - allocates new block
 * - moves top-level data (index block or leaf) into the new block
 * - initializes new top-level, creating index that points to the
 *   just created block
A
Alex Tomas 已提交
1031 1032 1033 1034 1035 1036 1037 1038
 */
static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
					struct ext4_ext_path *path,
					struct ext4_extent *newext)
{
	struct ext4_ext_path *curp = path;
	struct ext4_extent_header *neh;
	struct buffer_head *bh;
1039
	ext4_fsblk_t newblock;
A
Alex Tomas 已提交
1040 1041
	int err = 0;

A
Aneesh Kumar K.V 已提交
1042
	newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err);
A
Alex Tomas 已提交
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
	if (newblock == 0)
		return err;

	bh = sb_getblk(inode->i_sb, newblock);
	if (!bh) {
		err = -EIO;
		ext4_std_error(inode->i_sb, err);
		return err;
	}
	lock_buffer(bh);

1054 1055
	err = ext4_journal_get_create_access(handle, bh);
	if (err) {
A
Alex Tomas 已提交
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
		unlock_buffer(bh);
		goto out;
	}

	/* move top-level index/leaf into new block */
	memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));

	/* set size of new block */
	neh = ext_block_hdr(bh);
	/* old root could have indexes or leaves
	 * so calculate e_max right way */
	if (ext_depth(inode))
1068
		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
A
Alex Tomas 已提交
1069
	else
1070
		neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
A
Alex Tomas 已提交
1071 1072 1073 1074
	neh->eh_magic = EXT4_EXT_MAGIC;
	set_buffer_uptodate(bh);
	unlock_buffer(bh);

1075
	err = ext4_handle_dirty_metadata(handle, inode, bh);
1076
	if (err)
A
Alex Tomas 已提交
1077 1078 1079
		goto out;

	/* create index in new top-level index: num,max,pointer */
1080 1081
	err = ext4_ext_get_access(handle, inode, curp);
	if (err)
A
Alex Tomas 已提交
1082 1083 1084
		goto out;

	curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
1085
	curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
A
Alex Tomas 已提交
1086 1087
	curp->p_hdr->eh_entries = cpu_to_le16(1);
	curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
D
Dmitry Monakhov 已提交
1088 1089 1090 1091 1092 1093 1094

	if (path[0].p_hdr->eh_depth)
		curp->p_idx->ei_block =
			EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
	else
		curp->p_idx->ei_block =
			EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
1095
	ext4_idx_store_pblock(curp->p_idx, newblock);
A
Alex Tomas 已提交
1096 1097

	neh = ext_inode_hdr(inode);
1098
	ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
A
Alex Tomas 已提交
1099
		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1100
		  le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1101
		  ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
A
Alex Tomas 已提交
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111

	neh->eh_depth = cpu_to_le16(path->p_depth + 1);
	err = ext4_ext_dirty(handle, inode, curp);
out:
	brelse(bh);

	return err;
}

/*
1112 1113 1114
 * ext4_ext_create_new_leaf:
 * finds empty index and adds new leaf.
 * if no free index is found, then it requests in-depth growing.
A
Alex Tomas 已提交
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
 */
static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
					struct ext4_ext_path *path,
					struct ext4_extent *newext)
{
	struct ext4_ext_path *curp;
	int depth, i, err = 0;

repeat:
	i = depth = ext_depth(inode);

	/* walk up to the tree and look for free index entry */
	curp = path + depth;
	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
		i--;
		curp--;
	}

1133 1134
	/* we use already allocated block for index block,
	 * so subsequent data blocks should be contiguous */
A
Alex Tomas 已提交
1135 1136 1137 1138
	if (EXT_HAS_FREE_INDEX(curp)) {
		/* if we found index with free entry, then use that
		 * entry: create all needed subtree and add new leaf */
		err = ext4_ext_split(handle, inode, path, newext, i);
1139 1140
		if (err)
			goto out;
A
Alex Tomas 已提交
1141 1142 1143 1144

		/* refill path */
		ext4_ext_drop_refs(path);
		path = ext4_ext_find_extent(inode,
A
Aneesh Kumar K.V 已提交
1145 1146
				    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
				    path);
A
Alex Tomas 已提交
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
		if (IS_ERR(path))
			err = PTR_ERR(path);
	} else {
		/* tree is full, time to grow in depth */
		err = ext4_ext_grow_indepth(handle, inode, path, newext);
		if (err)
			goto out;

		/* refill path */
		ext4_ext_drop_refs(path);
		path = ext4_ext_find_extent(inode,
A
Aneesh Kumar K.V 已提交
1158 1159
				   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
				    path);
A
Alex Tomas 已提交
1160 1161 1162 1163 1164 1165
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			goto out;
		}

		/*
1166 1167
		 * only first (depth 0 -> 1) produces free space;
		 * in all other cases we have to split the grown tree
A
Alex Tomas 已提交
1168 1169 1170
		 */
		depth = ext_depth(inode);
		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1171
			/* now we need to split */
A
Alex Tomas 已提交
1172 1173 1174 1175 1176 1177 1178 1179
			goto repeat;
		}
	}

out:
	return err;
}

1180 1181 1182 1183 1184 1185 1186
/*
 * search the closest allocated block to the left for *logical
 * and returns it at @logical + it's physical address at @phys
 * if *logical is the smallest allocated block, the function
 * returns 0 at @phys
 * return value contains 0 (success) or error code
 */
1187 1188 1189
static int ext4_ext_search_left(struct inode *inode,
				struct ext4_ext_path *path,
				ext4_lblk_t *logical, ext4_fsblk_t *phys)
1190 1191 1192
{
	struct ext4_extent_idx *ix;
	struct ext4_extent *ex;
1193
	int depth, ee_len;
1194

1195 1196 1197 1198
	if (unlikely(path == NULL)) {
		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
		return -EIO;
	}
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
	depth = path->p_depth;
	*phys = 0;

	if (depth == 0 && path->p_ext == NULL)
		return 0;

	/* usually extent in the path covers blocks smaller
	 * then *logical, but it can be that extent is the
	 * first one in the file */

	ex = path[depth].p_ext;
1210
	ee_len = ext4_ext_get_actual_len(ex);
1211
	if (*logical < le32_to_cpu(ex->ee_block)) {
1212 1213 1214 1215 1216 1217
		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
			EXT4_ERROR_INODE(inode,
					 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
					 *logical, le32_to_cpu(ex->ee_block));
			return -EIO;
		}
1218 1219
		while (--depth >= 0) {
			ix = path[depth].p_idx;
1220 1221 1222 1223 1224 1225 1226 1227 1228
			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
				EXT4_ERROR_INODE(inode,
				  "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
				  ix != NULL ? ix->ei_block : 0,
				  EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
				    EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0,
				  depth);
				return -EIO;
			}
1229 1230 1231 1232
		}
		return 0;
	}

1233 1234 1235 1236 1237 1238
	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
		EXT4_ERROR_INODE(inode,
				 "logical %d < ee_block %d + ee_len %d!",
				 *logical, le32_to_cpu(ex->ee_block), ee_len);
		return -EIO;
	}
1239

1240
	*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1241
	*phys = ext4_ext_pblock(ex) + ee_len - 1;
1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
	return 0;
}

/*
 * search the closest allocated block to the right for *logical
 * and returns it at @logical + it's physical address at @phys
 * if *logical is the smallest allocated block, the function
 * returns 0 at @phys
 * return value contains 0 (success) or error code
 */
1252 1253 1254
static int ext4_ext_search_right(struct inode *inode,
				 struct ext4_ext_path *path,
				 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1255 1256 1257 1258 1259 1260
{
	struct buffer_head *bh = NULL;
	struct ext4_extent_header *eh;
	struct ext4_extent_idx *ix;
	struct ext4_extent *ex;
	ext4_fsblk_t block;
1261 1262
	int depth;	/* Note, NOT eh_depth; depth from top of tree */
	int ee_len;
1263

1264 1265 1266 1267
	if (unlikely(path == NULL)) {
		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
		return -EIO;
	}
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
	depth = path->p_depth;
	*phys = 0;

	if (depth == 0 && path->p_ext == NULL)
		return 0;

	/* usually extent in the path covers blocks smaller
	 * then *logical, but it can be that extent is the
	 * first one in the file */

	ex = path[depth].p_ext;
1279
	ee_len = ext4_ext_get_actual_len(ex);
1280
	if (*logical < le32_to_cpu(ex->ee_block)) {
1281 1282 1283 1284 1285 1286
		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
			EXT4_ERROR_INODE(inode,
					 "first_extent(path[%d].p_hdr) != ex",
					 depth);
			return -EIO;
		}
1287 1288
		while (--depth >= 0) {
			ix = path[depth].p_idx;
1289 1290 1291 1292 1293 1294
			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
				EXT4_ERROR_INODE(inode,
						 "ix != EXT_FIRST_INDEX *logical %d!",
						 *logical);
				return -EIO;
			}
1295 1296
		}
		*logical = le32_to_cpu(ex->ee_block);
1297
		*phys = ext4_ext_pblock(ex);
1298 1299 1300
		return 0;
	}

1301 1302 1303 1304 1305 1306
	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
		EXT4_ERROR_INODE(inode,
				 "logical %d < ee_block %d + ee_len %d!",
				 *logical, le32_to_cpu(ex->ee_block), ee_len);
		return -EIO;
	}
1307 1308 1309 1310 1311

	if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
		/* next allocated block in this leaf */
		ex++;
		*logical = le32_to_cpu(ex->ee_block);
1312
		*phys = ext4_ext_pblock(ex);
1313 1314 1315 1316 1317 1318 1319
		return 0;
	}

	/* go up and search for index to the right */
	while (--depth >= 0) {
		ix = path[depth].p_idx;
		if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
W
Wu Fengguang 已提交
1320
			goto got_index;
1321 1322
	}

W
Wu Fengguang 已提交
1323 1324
	/* we've gone up to the root and found no index to the right */
	return 0;
1325

W
Wu Fengguang 已提交
1326
got_index:
1327 1328 1329 1330
	/* we've found index to the right, let's
	 * follow it and find the closest allocated
	 * block to the right */
	ix++;
1331
	block = ext4_idx_pblock(ix);
1332 1333 1334 1335 1336
	while (++depth < path->p_depth) {
		bh = sb_bread(inode->i_sb, block);
		if (bh == NULL)
			return -EIO;
		eh = ext_block_hdr(bh);
1337
		/* subtract from p_depth to get proper eh_depth */
1338
		if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1339 1340 1341 1342
			put_bh(bh);
			return -EIO;
		}
		ix = EXT_FIRST_INDEX(eh);
1343
		block = ext4_idx_pblock(ix);
1344 1345 1346 1347 1348 1349 1350
		put_bh(bh);
	}

	bh = sb_bread(inode->i_sb, block);
	if (bh == NULL)
		return -EIO;
	eh = ext_block_hdr(bh);
1351
	if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1352 1353 1354 1355 1356
		put_bh(bh);
		return -EIO;
	}
	ex = EXT_FIRST_EXTENT(eh);
	*logical = le32_to_cpu(ex->ee_block);
1357
	*phys = ext4_ext_pblock(ex);
1358 1359 1360 1361
	put_bh(bh);
	return 0;
}

A
Alex Tomas 已提交
1362
/*
1363 1364 1365 1366 1367
 * ext4_ext_next_allocated_block:
 * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
 * NOTE: it considers block number from index entry as
 * allocated block. Thus, index entries have to be consistent
 * with leaves.
A
Alex Tomas 已提交
1368
 */
A
Aneesh Kumar K.V 已提交
1369
static ext4_lblk_t
A
Alex Tomas 已提交
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
ext4_ext_next_allocated_block(struct ext4_ext_path *path)
{
	int depth;

	BUG_ON(path == NULL);
	depth = path->p_depth;

	if (depth == 0 && path->p_ext == NULL)
		return EXT_MAX_BLOCK;

	while (depth >= 0) {
		if (depth == path->p_depth) {
			/* leaf */
			if (path[depth].p_ext !=
					EXT_LAST_EXTENT(path[depth].p_hdr))
			  return le32_to_cpu(path[depth].p_ext[1].ee_block);
		} else {
			/* index */
			if (path[depth].p_idx !=
					EXT_LAST_INDEX(path[depth].p_hdr))
			  return le32_to_cpu(path[depth].p_idx[1].ei_block);
		}
		depth--;
	}

	return EXT_MAX_BLOCK;
}

/*
1399
 * ext4_ext_next_leaf_block:
A
Alex Tomas 已提交
1400 1401
 * returns first allocated block from next leaf or EXT_MAX_BLOCK
 */
A
Aneesh Kumar K.V 已提交
1402
static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
A
Andrew Morton 已提交
1403
					struct ext4_ext_path *path)
A
Alex Tomas 已提交
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
{
	int depth;

	BUG_ON(path == NULL);
	depth = path->p_depth;

	/* zero-tree has no leaf blocks at all */
	if (depth == 0)
		return EXT_MAX_BLOCK;

	/* go to index block */
	depth--;

	while (depth >= 0) {
		if (path[depth].p_idx !=
				EXT_LAST_INDEX(path[depth].p_hdr))
A
Aneesh Kumar K.V 已提交
1420 1421
			return (ext4_lblk_t)
				le32_to_cpu(path[depth].p_idx[1].ei_block);
A
Alex Tomas 已提交
1422 1423 1424 1425 1426 1427 1428
		depth--;
	}

	return EXT_MAX_BLOCK;
}

/*
1429 1430 1431
 * ext4_ext_correct_indexes:
 * if leaf gets modified and modified extent is first in the leaf,
 * then we have to correct all indexes above.
A
Alex Tomas 已提交
1432 1433
 * TODO: do we need to correct tree in all cases?
 */
A
Aneesh Kumar K.V 已提交
1434
static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
				struct ext4_ext_path *path)
{
	struct ext4_extent_header *eh;
	int depth = ext_depth(inode);
	struct ext4_extent *ex;
	__le32 border;
	int k, err = 0;

	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;
1445 1446 1447 1448 1449 1450

	if (unlikely(ex == NULL || eh == NULL)) {
		EXT4_ERROR_INODE(inode,
				 "ex %p == NULL or eh %p == NULL", ex, eh);
		return -EIO;
	}
A
Alex Tomas 已提交
1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462

	if (depth == 0) {
		/* there is no tree at all */
		return 0;
	}

	if (ex != EXT_FIRST_EXTENT(eh)) {
		/* we correct tree if first leaf got modified only */
		return 0;
	}

	/*
1463
	 * TODO: we need correction if border is smaller than current one
A
Alex Tomas 已提交
1464 1465 1466
	 */
	k = depth - 1;
	border = path[depth].p_ext->ee_block;
1467 1468
	err = ext4_ext_get_access(handle, inode, path + k);
	if (err)
A
Alex Tomas 已提交
1469 1470
		return err;
	path[k].p_idx->ei_block = border;
1471 1472
	err = ext4_ext_dirty(handle, inode, path + k);
	if (err)
A
Alex Tomas 已提交
1473 1474 1475 1476 1477 1478
		return err;

	while (k--) {
		/* change all left-side indexes */
		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
			break;
1479 1480
		err = ext4_ext_get_access(handle, inode, path + k);
		if (err)
A
Alex Tomas 已提交
1481 1482
			break;
		path[k].p_idx->ei_block = border;
1483 1484
		err = ext4_ext_dirty(handle, inode, path + k);
		if (err)
A
Alex Tomas 已提交
1485 1486 1487 1488 1489 1490
			break;
	}

	return err;
}

1491
int
A
Alex Tomas 已提交
1492 1493 1494
ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
				struct ext4_extent *ex2)
{
1495
	unsigned short ext1_ee_len, ext2_ee_len, max_len;
A
Amit Arora 已提交
1496 1497 1498 1499 1500 1501 1502 1503

	/*
	 * Make sure that either both extents are uninitialized, or
	 * both are _not_.
	 */
	if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
		return 0;

1504 1505 1506 1507 1508
	if (ext4_ext_is_uninitialized(ex1))
		max_len = EXT_UNINIT_MAX_LEN;
	else
		max_len = EXT_INIT_MAX_LEN;

A
Amit Arora 已提交
1509 1510 1511 1512
	ext1_ee_len = ext4_ext_get_actual_len(ex1);
	ext2_ee_len = ext4_ext_get_actual_len(ex2);

	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
A
Andrew Morton 已提交
1513
			le32_to_cpu(ex2->ee_block))
A
Alex Tomas 已提交
1514 1515
		return 0;

1516 1517 1518
	/*
	 * To allow future support for preallocated extents to be added
	 * as an RO_COMPAT feature, refuse to merge to extents if
1519
	 * this can result in the top bit of ee_len being set.
1520
	 */
1521
	if (ext1_ee_len + ext2_ee_len > max_len)
1522
		return 0;
1523
#ifdef AGGRESSIVE_TEST
1524
	if (ext1_ee_len >= 4)
A
Alex Tomas 已提交
1525 1526 1527
		return 0;
#endif

1528
	if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
A
Alex Tomas 已提交
1529 1530 1531 1532
		return 1;
	return 0;
}

1533 1534 1535 1536 1537 1538 1539
/*
 * This function tries to merge the "ex" extent to the next extent in the tree.
 * It always tries to merge towards right. If you want to merge towards
 * left, pass "ex - 1" as argument instead of "ex".
 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
 * 1 if they got merged.
 */
1540 1541 1542
static int ext4_ext_try_to_merge(struct inode *inode,
				 struct ext4_ext_path *path,
				 struct ext4_extent *ex)
1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568
{
	struct ext4_extent_header *eh;
	unsigned int depth, len;
	int merge_done = 0;
	int uninitialized = 0;

	depth = ext_depth(inode);
	BUG_ON(path[depth].p_hdr == NULL);
	eh = path[depth].p_hdr;

	while (ex < EXT_LAST_EXTENT(eh)) {
		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
			break;
		/* merge with next extent! */
		if (ext4_ext_is_uninitialized(ex))
			uninitialized = 1;
		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
				+ ext4_ext_get_actual_len(ex + 1));
		if (uninitialized)
			ext4_ext_mark_uninitialized(ex);

		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
			len = (EXT_LAST_EXTENT(eh) - ex - 1)
				* sizeof(struct ext4_extent);
			memmove(ex + 1, ex + 2, len);
		}
M
Marcin Slusarz 已提交
1569
		le16_add_cpu(&eh->eh_entries, -1);
1570 1571 1572
		merge_done = 1;
		WARN_ON(eh->eh_entries == 0);
		if (!eh->eh_entries)
1573
			EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1574 1575 1576 1577 1578
	}

	return merge_done;
}

A
Amit Arora 已提交
1579 1580 1581 1582 1583 1584 1585 1586
/*
 * check if a portion of the "newext" extent overlaps with an
 * existing extent.
 *
 * If there is an overlap discovered, it updates the length of the newext
 * such that there will be no overlap, and then returns 1.
 * If there is no overlap found, it returns 0.
 */
1587 1588 1589
static unsigned int ext4_ext_check_overlap(struct inode *inode,
					   struct ext4_extent *newext,
					   struct ext4_ext_path *path)
A
Amit Arora 已提交
1590
{
A
Aneesh Kumar K.V 已提交
1591
	ext4_lblk_t b1, b2;
A
Amit Arora 已提交
1592 1593 1594 1595
	unsigned int depth, len1;
	unsigned int ret = 0;

	b1 = le32_to_cpu(newext->ee_block);
A
Amit Arora 已提交
1596
	len1 = ext4_ext_get_actual_len(newext);
A
Amit Arora 已提交
1597 1598 1599 1600 1601 1602 1603
	depth = ext_depth(inode);
	if (!path[depth].p_ext)
		goto out;
	b2 = le32_to_cpu(path[depth].p_ext->ee_block);

	/*
	 * get the next allocated block if the extent in the path
1604
	 * is before the requested block(s)
A
Amit Arora 已提交
1605 1606 1607 1608 1609 1610 1611
	 */
	if (b2 < b1) {
		b2 = ext4_ext_next_allocated_block(path);
		if (b2 == EXT_MAX_BLOCK)
			goto out;
	}

A
Aneesh Kumar K.V 已提交
1612
	/* check for wrap through zero on extent logical start block*/
A
Amit Arora 已提交
1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627
	if (b1 + len1 < b1) {
		len1 = EXT_MAX_BLOCK - b1;
		newext->ee_len = cpu_to_le16(len1);
		ret = 1;
	}

	/* check for overlap */
	if (b1 + len1 > b2) {
		newext->ee_len = cpu_to_le16(b2 - b1);
		ret = 1;
	}
out:
	return ret;
}

A
Alex Tomas 已提交
1628
/*
1629 1630 1631 1632
 * ext4_ext_insert_extent:
 * tries to merge requsted extent into the existing extent or
 * inserts requested extent as new one into the tree,
 * creating new leaf in the no-space case.
A
Alex Tomas 已提交
1633 1634 1635
 */
int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path,
1636
				struct ext4_extent *newext, int flag)
A
Alex Tomas 已提交
1637
{
1638
	struct ext4_extent_header *eh;
A
Alex Tomas 已提交
1639 1640 1641
	struct ext4_extent *ex, *fex;
	struct ext4_extent *nearex; /* nearest extent */
	struct ext4_ext_path *npath = NULL;
A
Aneesh Kumar K.V 已提交
1642 1643
	int depth, len, err;
	ext4_lblk_t next;
A
Amit Arora 已提交
1644
	unsigned uninitialized = 0;
A
Alex Tomas 已提交
1645

1646 1647 1648 1649
	if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
		EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
		return -EIO;
	}
A
Alex Tomas 已提交
1650 1651
	depth = ext_depth(inode);
	ex = path[depth].p_ext;
1652 1653 1654 1655
	if (unlikely(path[depth].p_hdr == NULL)) {
		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
		return -EIO;
	}
A
Alex Tomas 已提交
1656 1657

	/* try to insert block into found extent and return */
1658
	if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1659
		&& ext4_can_extents_be_merged(inode, ex, newext)) {
1660
		ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
1661 1662 1663 1664 1665 1666
			  ext4_ext_is_uninitialized(newext),
			  ext4_ext_get_actual_len(newext),
			  le32_to_cpu(ex->ee_block),
			  ext4_ext_is_uninitialized(ex),
			  ext4_ext_get_actual_len(ex),
			  ext4_ext_pblock(ex));
1667 1668
		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
A
Alex Tomas 已提交
1669
			return err;
A
Amit Arora 已提交
1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681

		/*
		 * ext4_can_extents_be_merged should have checked that either
		 * both extents are uninitialized, or both aren't. Thus we
		 * need to check only one of them here.
		 */
		if (ext4_ext_is_uninitialized(ex))
			uninitialized = 1;
		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
					+ ext4_ext_get_actual_len(newext));
		if (uninitialized)
			ext4_ext_mark_uninitialized(ex);
A
Alex Tomas 已提交
1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715
		eh = path[depth].p_hdr;
		nearex = ex;
		goto merge;
	}

repeat:
	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
		goto has_space;

	/* probably next leaf has space for us? */
	fex = EXT_LAST_EXTENT(eh);
	next = ext4_ext_next_leaf_block(inode, path);
	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
	    && next != EXT_MAX_BLOCK) {
		ext_debug("next leaf block - %d\n", next);
		BUG_ON(npath != NULL);
		npath = ext4_ext_find_extent(inode, next, NULL);
		if (IS_ERR(npath))
			return PTR_ERR(npath);
		BUG_ON(npath->p_depth != path->p_depth);
		eh = npath[depth].p_hdr;
		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
			ext_debug("next leaf isnt full(%d)\n",
				  le16_to_cpu(eh->eh_entries));
			path = npath;
			goto repeat;
		}
		ext_debug("next leaf has no free space(%d,%d)\n",
			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
	}

	/*
1716 1717
	 * There is no free space in the found leaf.
	 * We're gonna add a new leaf in the tree.
A
Alex Tomas 已提交
1718 1719 1720 1721 1722 1723 1724 1725 1726 1727
	 */
	err = ext4_ext_create_new_leaf(handle, inode, path, newext);
	if (err)
		goto cleanup;
	depth = ext_depth(inode);
	eh = path[depth].p_hdr;

has_space:
	nearex = path[depth].p_ext;

1728 1729
	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
A
Alex Tomas 已提交
1730 1731 1732 1733
		goto cleanup;

	if (!nearex) {
		/* there is no extent in this leaf, create first one */
1734
		ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n",
D
Dave Kleikamp 已提交
1735
				le32_to_cpu(newext->ee_block),
1736
				ext4_ext_pblock(newext),
1737
				ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1738
				ext4_ext_get_actual_len(newext));
A
Alex Tomas 已提交
1739 1740
		path[depth].p_ext = EXT_FIRST_EXTENT(eh);
	} else if (le32_to_cpu(newext->ee_block)
D
Dave Kleikamp 已提交
1741
			   > le32_to_cpu(nearex->ee_block)) {
A
Alex Tomas 已提交
1742 1743 1744 1745 1746
/*		BUG_ON(newext->ee_block == nearex->ee_block); */
		if (nearex != EXT_LAST_EXTENT(eh)) {
			len = EXT_MAX_EXTENT(eh) - nearex;
			len = (len - 1) * sizeof(struct ext4_extent);
			len = len < 0 ? 0 : len;
1747
			ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, "
A
Alex Tomas 已提交
1748
					"move %d from 0x%p to 0x%p\n",
D
Dave Kleikamp 已提交
1749
					le32_to_cpu(newext->ee_block),
1750
					ext4_ext_pblock(newext),
1751
					ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1752
					ext4_ext_get_actual_len(newext),
A
Alex Tomas 已提交
1753 1754 1755 1756 1757 1758 1759 1760
					nearex, len, nearex + 1, nearex + 2);
			memmove(nearex + 2, nearex + 1, len);
		}
		path[depth].p_ext = nearex + 1;
	} else {
		BUG_ON(newext->ee_block == nearex->ee_block);
		len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
		len = len < 0 ? 0 : len;
1761
		ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, "
A
Alex Tomas 已提交
1762 1763
				"move %d from 0x%p to 0x%p\n",
				le32_to_cpu(newext->ee_block),
1764
				ext4_ext_pblock(newext),
1765
				ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1766
				ext4_ext_get_actual_len(newext),
A
Alex Tomas 已提交
1767 1768 1769 1770 1771
				nearex, len, nearex + 1, nearex + 2);
		memmove(nearex + 1, nearex, len);
		path[depth].p_ext = nearex;
	}

M
Marcin Slusarz 已提交
1772
	le16_add_cpu(&eh->eh_entries, 1);
A
Alex Tomas 已提交
1773 1774
	nearex = path[depth].p_ext;
	nearex->ee_block = newext->ee_block;
1775
	ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
A
Alex Tomas 已提交
1776 1777 1778 1779
	nearex->ee_len = newext->ee_len;

merge:
	/* try to merge extents to the right */
1780
	if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1781
		ext4_ext_try_to_merge(inode, path, nearex);
A
Alex Tomas 已提交
1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800

	/* try to merge extents to the left */

	/* time to correct all indexes above */
	err = ext4_ext_correct_indexes(handle, inode, path);
	if (err)
		goto cleanup;

	err = ext4_ext_dirty(handle, inode, path + depth);

cleanup:
	if (npath) {
		ext4_ext_drop_refs(npath);
		kfree(npath);
	}
	ext4_ext_invalidate_cache(inode);
	return err;
}

1801 1802 1803
static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
			       ext4_lblk_t num, ext_prepare_callback func,
			       void *cbdata)
1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
{
	struct ext4_ext_path *path = NULL;
	struct ext4_ext_cache cbex;
	struct ext4_extent *ex;
	ext4_lblk_t next, start = 0, end = 0;
	ext4_lblk_t last = block + num;
	int depth, exists, err = 0;

	BUG_ON(func == NULL);
	BUG_ON(inode == NULL);

	while (block < last && block != EXT_MAX_BLOCK) {
		num = last - block;
		/* find extent for this block */
1818
		down_read(&EXT4_I(inode)->i_data_sem);
1819
		path = ext4_ext_find_extent(inode, block, path);
1820
		up_read(&EXT4_I(inode)->i_data_sem);
1821 1822 1823 1824 1825 1826 1827
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			path = NULL;
			break;
		}

		depth = ext_depth(inode);
1828 1829 1830 1831 1832
		if (unlikely(path[depth].p_hdr == NULL)) {
			EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
			err = -EIO;
			break;
		}
1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878
		ex = path[depth].p_ext;
		next = ext4_ext_next_allocated_block(path);

		exists = 0;
		if (!ex) {
			/* there is no extent yet, so try to allocate
			 * all requested space */
			start = block;
			end = block + num;
		} else if (le32_to_cpu(ex->ee_block) > block) {
			/* need to allocate space before found extent */
			start = block;
			end = le32_to_cpu(ex->ee_block);
			if (block + num < end)
				end = block + num;
		} else if (block >= le32_to_cpu(ex->ee_block)
					+ ext4_ext_get_actual_len(ex)) {
			/* need to allocate space after found extent */
			start = block;
			end = block + num;
			if (end >= next)
				end = next;
		} else if (block >= le32_to_cpu(ex->ee_block)) {
			/*
			 * some part of requested space is covered
			 * by found extent
			 */
			start = block;
			end = le32_to_cpu(ex->ee_block)
				+ ext4_ext_get_actual_len(ex);
			if (block + num < end)
				end = block + num;
			exists = 1;
		} else {
			BUG();
		}
		BUG_ON(end <= start);

		if (!exists) {
			cbex.ec_block = start;
			cbex.ec_len = end - start;
			cbex.ec_start = 0;
			cbex.ec_type = EXT4_EXT_CACHE_GAP;
		} else {
			cbex.ec_block = le32_to_cpu(ex->ee_block);
			cbex.ec_len = ext4_ext_get_actual_len(ex);
1879
			cbex.ec_start = ext4_ext_pblock(ex);
1880 1881 1882
			cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
		}

1883 1884 1885 1886 1887
		if (unlikely(cbex.ec_len == 0)) {
			EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
			err = -EIO;
			break;
		}
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917
		err = func(inode, path, &cbex, ex, cbdata);
		ext4_ext_drop_refs(path);

		if (err < 0)
			break;

		if (err == EXT_REPEAT)
			continue;
		else if (err == EXT_BREAK) {
			err = 0;
			break;
		}

		if (ext_depth(inode) != depth) {
			/* depth was changed. we have to realloc path */
			kfree(path);
			path = NULL;
		}

		block = cbex.ec_block + cbex.ec_len;
	}

	if (path) {
		ext4_ext_drop_refs(path);
		kfree(path);
	}

	return err;
}

1918
static void
A
Aneesh Kumar K.V 已提交
1919
ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1920
			__u32 len, ext4_fsblk_t start, int type)
A
Alex Tomas 已提交
1921 1922 1923
{
	struct ext4_ext_cache *cex;
	BUG_ON(len == 0);
1924
	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
A
Alex Tomas 已提交
1925 1926 1927 1928 1929
	cex = &EXT4_I(inode)->i_cached_extent;
	cex->ec_type = type;
	cex->ec_block = block;
	cex->ec_len = len;
	cex->ec_start = start;
1930
	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
A
Alex Tomas 已提交
1931 1932 1933
}

/*
1934 1935
 * ext4_ext_put_gap_in_cache:
 * calculate boundaries of the gap that the requested block fits into
A
Alex Tomas 已提交
1936 1937
 * and cache this gap
 */
1938
static void
A
Alex Tomas 已提交
1939
ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
A
Aneesh Kumar K.V 已提交
1940
				ext4_lblk_t block)
A
Alex Tomas 已提交
1941 1942
{
	int depth = ext_depth(inode);
A
Aneesh Kumar K.V 已提交
1943 1944
	unsigned long len;
	ext4_lblk_t lblock;
A
Alex Tomas 已提交
1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955
	struct ext4_extent *ex;

	ex = path[depth].p_ext;
	if (ex == NULL) {
		/* there is no extent yet, so gap is [0;-] */
		lblock = 0;
		len = EXT_MAX_BLOCK;
		ext_debug("cache gap(whole file):");
	} else if (block < le32_to_cpu(ex->ee_block)) {
		lblock = block;
		len = le32_to_cpu(ex->ee_block) - block;
1956 1957 1958 1959
		ext_debug("cache gap(before): %u [%u:%u]",
				block,
				le32_to_cpu(ex->ee_block),
				 ext4_ext_get_actual_len(ex));
A
Alex Tomas 已提交
1960
	} else if (block >= le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
1961
			+ ext4_ext_get_actual_len(ex)) {
A
Aneesh Kumar K.V 已提交
1962
		ext4_lblk_t next;
D
Dave Kleikamp 已提交
1963
		lblock = le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
1964
			+ ext4_ext_get_actual_len(ex);
A
Aneesh Kumar K.V 已提交
1965 1966

		next = ext4_ext_next_allocated_block(path);
1967 1968 1969 1970
		ext_debug("cache gap(after): [%u:%u] %u",
				le32_to_cpu(ex->ee_block),
				ext4_ext_get_actual_len(ex),
				block);
A
Aneesh Kumar K.V 已提交
1971 1972
		BUG_ON(next == lblock);
		len = next - lblock;
A
Alex Tomas 已提交
1973 1974 1975 1976 1977
	} else {
		lblock = len = 0;
		BUG();
	}

1978
	ext_debug(" -> %u:%lu\n", lblock, len);
A
Alex Tomas 已提交
1979 1980 1981
	ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
}

1982
static int
A
Aneesh Kumar K.V 已提交
1983
ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
A
Alex Tomas 已提交
1984 1985 1986
			struct ext4_extent *ex)
{
	struct ext4_ext_cache *cex;
1987
	int ret = EXT4_EXT_CACHE_NO;
A
Alex Tomas 已提交
1988

1989
	/*
1990 1991 1992
	 * We borrow i_block_reservation_lock to protect i_cached_extent
	 */
	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
A
Alex Tomas 已提交
1993 1994 1995 1996
	cex = &EXT4_I(inode)->i_cached_extent;

	/* has cache valid data? */
	if (cex->ec_type == EXT4_EXT_CACHE_NO)
1997
		goto errout;
A
Alex Tomas 已提交
1998 1999 2000

	BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
			cex->ec_type != EXT4_EXT_CACHE_EXTENT);
2001
	if (in_range(block, cex->ec_block, cex->ec_len)) {
D
Dave Kleikamp 已提交
2002
		ex->ee_block = cpu_to_le32(cex->ec_block);
2003
		ext4_ext_store_pblock(ex, cex->ec_start);
D
Dave Kleikamp 已提交
2004
		ex->ee_len = cpu_to_le16(cex->ec_len);
2005 2006 2007
		ext_debug("%u cached by %u:%u:%llu\n",
				block,
				cex->ec_block, cex->ec_len, cex->ec_start);
2008
		ret = cex->ec_type;
A
Alex Tomas 已提交
2009
	}
2010 2011 2012
errout:
	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
	return ret;
A
Alex Tomas 已提交
2013 2014 2015
}

/*
2016 2017 2018 2019
 * ext4_ext_rm_idx:
 * removes index from the index block.
 * It's used in truncate case only, thus all requests are for
 * last index in the block only.
A
Alex Tomas 已提交
2020
 */
A
Aneesh Kumar K.V 已提交
2021
static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
2022 2023 2024
			struct ext4_ext_path *path)
{
	int err;
2025
	ext4_fsblk_t leaf;
A
Alex Tomas 已提交
2026 2027 2028

	/* free index block */
	path--;
2029
	leaf = ext4_idx_pblock(path->p_idx);
2030 2031 2032 2033
	if (unlikely(path->p_hdr->eh_entries == 0)) {
		EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
		return -EIO;
	}
2034 2035
	err = ext4_ext_get_access(handle, inode, path);
	if (err)
A
Alex Tomas 已提交
2036
		return err;
M
Marcin Slusarz 已提交
2037
	le16_add_cpu(&path->p_hdr->eh_entries, -1);
2038 2039
	err = ext4_ext_dirty(handle, inode, path);
	if (err)
A
Alex Tomas 已提交
2040
		return err;
2041
	ext_debug("index is empty, remove it, free block %llu\n", leaf);
2042 2043
	ext4_free_blocks(handle, inode, 0, leaf, 1,
			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
A
Alex Tomas 已提交
2044 2045 2046 2047
	return err;
}

/*
2048 2049 2050 2051 2052
 * ext4_ext_calc_credits_for_single_extent:
 * This routine returns max. credits that needed to insert an extent
 * to the extent tree.
 * When pass the actual path, the caller should calculate credits
 * under i_data_sem.
A
Alex Tomas 已提交
2053
 */
2054
int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
A
Alex Tomas 已提交
2055 2056 2057
						struct ext4_ext_path *path)
{
	if (path) {
2058
		int depth = ext_depth(inode);
2059
		int ret = 0;
2060

A
Alex Tomas 已提交
2061 2062
		/* probably there is space in leaf? */
		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2063
				< le16_to_cpu(path[depth].p_hdr->eh_max)) {
A
Alex Tomas 已提交
2064

2065 2066 2067 2068 2069 2070 2071 2072
			/*
			 *  There are some space in the leaf tree, no
			 *  need to account for leaf block credit
			 *
			 *  bitmaps and block group descriptor blocks
			 *  and other metadat blocks still need to be
			 *  accounted.
			 */
2073
			/* 1 bitmap, 1 block group descriptor */
2074
			ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2075
			return ret;
2076 2077
		}
	}
A
Alex Tomas 已提交
2078

2079
	return ext4_chunk_trans_blocks(inode, nrblocks);
2080
}
A
Alex Tomas 已提交
2081

2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092
/*
 * How many index/leaf blocks need to change/allocate to modify nrblocks?
 *
 * if nrblocks are fit in a single extent (chunk flag is 1), then
 * in the worse case, each tree level index/leaf need to be changed
 * if the tree split due to insert a new extent, then the old tree
 * index/leaf need to be updated too
 *
 * If the nrblocks are discontiguous, they could cause
 * the whole tree split more than once, but this is really rare.
 */
2093
int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2094 2095 2096
{
	int index;
	int depth = ext_depth(inode);
A
Alex Tomas 已提交
2097

2098 2099 2100 2101
	if (chunk)
		index = depth * 2;
	else
		index = depth * 3;
A
Alex Tomas 已提交
2102

2103
	return index;
A
Alex Tomas 已提交
2104 2105 2106 2107
}

static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
				struct ext4_extent *ex,
A
Aneesh Kumar K.V 已提交
2108
				ext4_lblk_t from, ext4_lblk_t to)
A
Alex Tomas 已提交
2109
{
A
Amit Arora 已提交
2110
	unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2111
	int flags = EXT4_FREE_BLOCKS_FORGET;
A
Alex Tomas 已提交
2112

2113
	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2114
		flags |= EXT4_FREE_BLOCKS_METADATA;
A
Alex Tomas 已提交
2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130
#ifdef EXTENTS_STATS
	{
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
		spin_lock(&sbi->s_ext_stats_lock);
		sbi->s_ext_blocks += ee_len;
		sbi->s_ext_extents++;
		if (ee_len < sbi->s_ext_min)
			sbi->s_ext_min = ee_len;
		if (ee_len > sbi->s_ext_max)
			sbi->s_ext_max = ee_len;
		if (ext_depth(inode) > sbi->s_depth_max)
			sbi->s_depth_max = ext_depth(inode);
		spin_unlock(&sbi->s_ext_stats_lock);
	}
#endif
	if (from >= le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
2131
	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
A
Alex Tomas 已提交
2132
		/* tail removal */
A
Aneesh Kumar K.V 已提交
2133
		ext4_lblk_t num;
2134
		ext4_fsblk_t start;
A
Aneesh Kumar K.V 已提交
2135

A
Amit Arora 已提交
2136
		num = le32_to_cpu(ex->ee_block) + ee_len - from;
2137
		start = ext4_ext_pblock(ex) + ee_len - num;
A
Aneesh Kumar K.V 已提交
2138
		ext_debug("free last %u blocks starting %llu\n", num, start);
2139
		ext4_free_blocks(handle, inode, 0, start, num, flags);
A
Alex Tomas 已提交
2140
	} else if (from == le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
2141
		   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
A
Aneesh Kumar K.V 已提交
2142
		printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
A
Amit Arora 已提交
2143
			from, to, le32_to_cpu(ex->ee_block), ee_len);
A
Alex Tomas 已提交
2144
	} else {
A
Aneesh Kumar K.V 已提交
2145 2146 2147
		printk(KERN_INFO "strange request: removal(2) "
				"%u-%u from %u:%u\n",
				from, to, le32_to_cpu(ex->ee_block), ee_len);
A
Alex Tomas 已提交
2148 2149 2150 2151 2152 2153
	}
	return 0;
}

static int
ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
A
Aneesh Kumar K.V 已提交
2154
		struct ext4_ext_path *path, ext4_lblk_t start)
A
Alex Tomas 已提交
2155 2156 2157 2158
{
	int err = 0, correct_index = 0;
	int depth = ext_depth(inode), credits;
	struct ext4_extent_header *eh;
A
Aneesh Kumar K.V 已提交
2159 2160 2161
	ext4_lblk_t a, b, block;
	unsigned num;
	ext4_lblk_t ex_ee_block;
A
Alex Tomas 已提交
2162
	unsigned short ex_ee_len;
A
Amit Arora 已提交
2163
	unsigned uninitialized = 0;
A
Alex Tomas 已提交
2164 2165
	struct ext4_extent *ex;

2166
	/* the header must be checked already in ext4_ext_remove_space() */
A
Aneesh Kumar K.V 已提交
2167
	ext_debug("truncate since %u in leaf\n", start);
A
Alex Tomas 已提交
2168 2169 2170
	if (!path[depth].p_hdr)
		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
	eh = path[depth].p_hdr;
2171 2172 2173 2174
	if (unlikely(path[depth].p_hdr == NULL)) {
		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
		return -EIO;
	}
A
Alex Tomas 已提交
2175 2176 2177 2178
	/* find where to start removing */
	ex = EXT_LAST_EXTENT(eh);

	ex_ee_block = le32_to_cpu(ex->ee_block);
A
Amit Arora 已提交
2179
	ex_ee_len = ext4_ext_get_actual_len(ex);
A
Alex Tomas 已提交
2180 2181 2182

	while (ex >= EXT_FIRST_EXTENT(eh) &&
			ex_ee_block + ex_ee_len > start) {
2183 2184 2185 2186 2187 2188

		if (ext4_ext_is_uninitialized(ex))
			uninitialized = 1;
		else
			uninitialized = 0;

2189 2190
		ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
			 uninitialized, ex_ee_len);
A
Alex Tomas 已提交
2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220
		path[depth].p_ext = ex;

		a = ex_ee_block > start ? ex_ee_block : start;
		b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
			ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;

		ext_debug("  border %u:%u\n", a, b);

		if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
			block = 0;
			num = 0;
			BUG();
		} else if (a != ex_ee_block) {
			/* remove tail of the extent */
			block = ex_ee_block;
			num = a - block;
		} else if (b != ex_ee_block + ex_ee_len - 1) {
			/* remove head of the extent */
			block = a;
			num = b - a;
			/* there is no "make a hole" API yet */
			BUG();
		} else {
			/* remove whole extent: excellent! */
			block = ex_ee_block;
			num = 0;
			BUG_ON(a != ex_ee_block);
			BUG_ON(b != ex_ee_block + ex_ee_len - 1);
		}

2221 2222 2223 2224 2225 2226 2227
		/*
		 * 3 for leaf, sb, and inode plus 2 (bmap and group
		 * descriptor) for each block group; assume two block
		 * groups plus ex_ee_len/blocks_per_block_group for
		 * the worst case
		 */
		credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
A
Alex Tomas 已提交
2228 2229 2230 2231
		if (ex == EXT_FIRST_EXTENT(eh)) {
			correct_index = 1;
			credits += (ext_depth(inode)) + 1;
		}
D
Dmitry Monakhov 已提交
2232
		credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
A
Alex Tomas 已提交
2233

2234
		err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2235
		if (err)
A
Alex Tomas 已提交
2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246
			goto out;

		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
			goto out;

		err = ext4_remove_blocks(handle, inode, ex, a, b);
		if (err)
			goto out;

		if (num == 0) {
2247
			/* this extent is removed; mark slot entirely unused */
2248
			ext4_ext_store_pblock(ex, 0);
M
Marcin Slusarz 已提交
2249
			le16_add_cpu(&eh->eh_entries, -1);
A
Alex Tomas 已提交
2250 2251 2252 2253
		}

		ex->ee_block = cpu_to_le32(block);
		ex->ee_len = cpu_to_le16(num);
2254 2255 2256 2257 2258
		/*
		 * Do not mark uninitialized if all the blocks in the
		 * extent have been removed.
		 */
		if (uninitialized && num)
A
Amit Arora 已提交
2259
			ext4_ext_mark_uninitialized(ex);
A
Alex Tomas 已提交
2260 2261 2262 2263 2264

		err = ext4_ext_dirty(handle, inode, path + depth);
		if (err)
			goto out;

2265
		ext_debug("new extent: %u:%u:%llu\n", block, num,
2266
				ext4_ext_pblock(ex));
A
Alex Tomas 已提交
2267 2268
		ex--;
		ex_ee_block = le32_to_cpu(ex->ee_block);
A
Amit Arora 已提交
2269
		ex_ee_len = ext4_ext_get_actual_len(ex);
A
Alex Tomas 已提交
2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
	}

	if (correct_index && eh->eh_entries)
		err = ext4_ext_correct_indexes(handle, inode, path);

	/* if this leaf is free, then we should
	 * remove it from index block above */
	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
		err = ext4_ext_rm_idx(handle, inode, path + depth);

out:
	return err;
}

/*
2285 2286
 * ext4_ext_more_to_rm:
 * returns 1 if current index has to be freed (even partial)
A
Alex Tomas 已提交
2287
 */
2288
static int
A
Alex Tomas 已提交
2289 2290 2291 2292 2293 2294 2295 2296
ext4_ext_more_to_rm(struct ext4_ext_path *path)
{
	BUG_ON(path->p_idx == NULL);

	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
		return 0;

	/*
2297
	 * if truncate on deeper level happened, it wasn't partial,
A
Alex Tomas 已提交
2298 2299 2300 2301 2302 2303 2304
	 * so we have to consider current index for truncation
	 */
	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
		return 0;
	return 1;
}

A
Aneesh Kumar K.V 已提交
2305
static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
A
Alex Tomas 已提交
2306 2307 2308 2309 2310
{
	struct super_block *sb = inode->i_sb;
	int depth = ext_depth(inode);
	struct ext4_ext_path *path;
	handle_t *handle;
2311
	int i, err;
A
Alex Tomas 已提交
2312

A
Aneesh Kumar K.V 已提交
2313
	ext_debug("truncate since %u\n", start);
A
Alex Tomas 已提交
2314 2315 2316 2317 2318 2319

	/* probably first extent we're gonna free will be last in block */
	handle = ext4_journal_start(inode, depth + 1);
	if (IS_ERR(handle))
		return PTR_ERR(handle);

2320
again:
A
Alex Tomas 已提交
2321 2322 2323
	ext4_ext_invalidate_cache(inode);

	/*
2324 2325
	 * We start scanning from right side, freeing all the blocks
	 * after i_size and walking into the tree depth-wise.
A
Alex Tomas 已提交
2326
	 */
2327
	depth = ext_depth(inode);
2328
	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
A
Alex Tomas 已提交
2329 2330 2331 2332
	if (path == NULL) {
		ext4_journal_stop(handle);
		return -ENOMEM;
	}
2333
	path[0].p_depth = depth;
A
Alex Tomas 已提交
2334
	path[0].p_hdr = ext_inode_hdr(inode);
2335
	if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
A
Alex Tomas 已提交
2336 2337 2338
		err = -EIO;
		goto out;
	}
2339
	i = err = 0;
A
Alex Tomas 已提交
2340 2341 2342 2343 2344

	while (i >= 0 && err == 0) {
		if (i == depth) {
			/* this is leaf block */
			err = ext4_ext_rm_leaf(handle, inode, path, start);
2345
			/* root level has p_bh == NULL, brelse() eats this */
A
Alex Tomas 已提交
2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358
			brelse(path[i].p_bh);
			path[i].p_bh = NULL;
			i--;
			continue;
		}

		/* this is index block */
		if (!path[i].p_hdr) {
			ext_debug("initialize header\n");
			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
		}

		if (!path[i].p_idx) {
2359
			/* this level hasn't been touched yet */
A
Alex Tomas 已提交
2360 2361 2362 2363 2364 2365
			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
			ext_debug("init index ptr: hdr 0x%p, num %d\n",
				  path[i].p_hdr,
				  le16_to_cpu(path[i].p_hdr->eh_entries));
		} else {
2366
			/* we were already here, see at next index */
A
Alex Tomas 已提交
2367 2368 2369 2370 2371 2372 2373
			path[i].p_idx--;
		}

		ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
				i, EXT_FIRST_INDEX(path[i].p_hdr),
				path[i].p_idx);
		if (ext4_ext_more_to_rm(path + i)) {
2374
			struct buffer_head *bh;
A
Alex Tomas 已提交
2375
			/* go to the next level */
2376
			ext_debug("move to level %d (block %llu)\n",
2377
				  i + 1, ext4_idx_pblock(path[i].p_idx));
A
Alex Tomas 已提交
2378
			memset(path + i + 1, 0, sizeof(*path));
2379
			bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
2380
			if (!bh) {
A
Alex Tomas 已提交
2381 2382 2383 2384
				/* should we reset i_size? */
				err = -EIO;
				break;
			}
2385 2386 2387 2388
			if (WARN_ON(i + 1 > depth)) {
				err = -EIO;
				break;
			}
2389
			if (ext4_ext_check(inode, ext_block_hdr(bh),
2390 2391 2392 2393 2394
							depth - i - 1)) {
				err = -EIO;
				break;
			}
			path[i + 1].p_bh = bh;
A
Alex Tomas 已提交
2395

2396 2397
			/* save actual number of indexes since this
			 * number is changed at the next iteration */
A
Alex Tomas 已提交
2398 2399 2400
			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
			i++;
		} else {
2401
			/* we finished processing this index, go up */
A
Alex Tomas 已提交
2402
			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2403
				/* index is empty, remove it;
A
Alex Tomas 已提交
2404 2405 2406 2407
				 * handle must be already prepared by the
				 * truncatei_leaf() */
				err = ext4_ext_rm_idx(handle, inode, path + i);
			}
2408
			/* root level has p_bh == NULL, brelse() eats this */
A
Alex Tomas 已提交
2409 2410 2411 2412 2413 2414 2415 2416 2417 2418
			brelse(path[i].p_bh);
			path[i].p_bh = NULL;
			i--;
			ext_debug("return to level %d\n", i);
		}
	}

	/* TODO: flexible tree reduction should be here */
	if (path->p_hdr->eh_entries == 0) {
		/*
2419 2420
		 * truncate to zero freed all the tree,
		 * so we need to correct eh_depth
A
Alex Tomas 已提交
2421 2422 2423 2424 2425
		 */
		err = ext4_ext_get_access(handle, inode, path);
		if (err == 0) {
			ext_inode_hdr(inode)->eh_depth = 0;
			ext_inode_hdr(inode)->eh_max =
2426
				cpu_to_le16(ext4_ext_space_root(inode, 0));
A
Alex Tomas 已提交
2427 2428 2429 2430 2431 2432
			err = ext4_ext_dirty(handle, inode, path);
		}
	}
out:
	ext4_ext_drop_refs(path);
	kfree(path);
2433 2434
	if (err == -EAGAIN)
		goto again;
A
Alex Tomas 已提交
2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448
	ext4_journal_stop(handle);

	return err;
}

/*
 * called at mount time
 */
void ext4_ext_init(struct super_block *sb)
{
	/*
	 * possible initialization would be here
	 */

2449
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2450
#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2451
		printk(KERN_INFO "EXT4-fs: file extents enabled");
2452 2453
#ifdef AGGRESSIVE_TEST
		printk(", aggressive tests");
A
Alex Tomas 已提交
2454 2455 2456 2457 2458 2459 2460 2461
#endif
#ifdef CHECK_BINSEARCH
		printk(", check binsearch");
#endif
#ifdef EXTENTS_STATS
		printk(", stats");
#endif
		printk("\n");
2462
#endif
A
Alex Tomas 已提交
2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475
#ifdef EXTENTS_STATS
		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
		EXT4_SB(sb)->s_ext_min = 1 << 30;
		EXT4_SB(sb)->s_ext_max = 0;
#endif
	}
}

/*
 * called at umount time
 */
void ext4_ext_release(struct super_block *sb)
{
2476
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
A
Alex Tomas 已提交
2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490
		return;

#ifdef EXTENTS_STATS
	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
		struct ext4_sb_info *sbi = EXT4_SB(sb);
		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
			sbi->s_ext_blocks, sbi->s_ext_extents,
			sbi->s_ext_blocks / sbi->s_ext_extents);
		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
	}
#endif
}

2491 2492 2493
/* FIXME!! we need to try to merge to left or right after zero-out  */
static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
{
2494 2495
	ext4_fsblk_t ee_pblock;
	unsigned int ee_len;
2496
	int ret;
2497 2498

	ee_len    = ext4_ext_get_actual_len(ex);
2499
	ee_pblock = ext4_ext_pblock(ex);
2500

2501
	ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
2502 2503
	if (ret > 0)
		ret = 0;
2504

2505
	return ret;
2506 2507
}

2508
#define EXT4_EXT_ZERO_LEN 7
2509
/*
2510
 * This function is called by ext4_ext_map_blocks() if someone tries to write
2511 2512 2513 2514 2515 2516 2517 2518
 * to an uninitialized extent. It may result in splitting the uninitialized
 * extent into multiple extents (upto three - one initialized and two
 * uninitialized).
 * There are three possibilities:
 *   a> There is no split required: Entire extent should be initialized
 *   b> Splits in two extents: Write is happening at either end of the extent
 *   c> Splits in three extents: Somone is writing in middle of the extent
 */
A
Aneesh Kumar K.V 已提交
2519
static int ext4_ext_convert_to_initialized(handle_t *handle,
2520 2521 2522
					   struct inode *inode,
					   struct ext4_map_blocks *map,
					   struct ext4_ext_path *path)
2523
{
2524
	struct ext4_extent *ex, newex, orig_ex;
2525 2526 2527 2528
	struct ext4_extent *ex1 = NULL;
	struct ext4_extent *ex2 = NULL;
	struct ext4_extent *ex3 = NULL;
	struct ext4_extent_header *eh;
2529
	ext4_lblk_t ee_block, eof_block;
A
Aneesh Kumar K.V 已提交
2530
	unsigned int allocated, ee_len, depth;
2531 2532 2533
	ext4_fsblk_t newblock;
	int err = 0;
	int ret = 0;
2534 2535 2536 2537
	int may_zeroout;

	ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
		"block %llu, max_blocks %u\n", inode->i_ino,
2538
		(unsigned long long)map->m_lblk, map->m_len);
2539 2540 2541

	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
		inode->i_sb->s_blocksize_bits;
2542 2543
	if (eof_block < map->m_lblk + map->m_len)
		eof_block = map->m_lblk + map->m_len;
2544 2545 2546 2547 2548 2549

	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;
	ee_block = le32_to_cpu(ex->ee_block);
	ee_len = ext4_ext_get_actual_len(ex);
2550
	allocated = ee_len - (map->m_lblk - ee_block);
2551
	newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
2552

2553
	ex2 = ex;
2554 2555
	orig_ex.ee_block = ex->ee_block;
	orig_ex.ee_len   = cpu_to_le16(ee_len);
2556
	ext4_ext_store_pblock(&orig_ex, ext4_ext_pblock(ex));
2557

2558 2559 2560 2561 2562 2563
	/*
	 * It is safe to convert extent to initialized via explicit
	 * zeroout only if extent is fully insde i_size or new_size.
	 */
	may_zeroout = ee_block + ee_len <= eof_block;

2564 2565 2566
	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
		goto out;
2567
	/* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
2568
	if (ee_len <= 2*EXT4_EXT_ZERO_LEN && may_zeroout) {
2569 2570 2571 2572 2573 2574
		err =  ext4_ext_zeroout(inode, &orig_ex);
		if (err)
			goto fix_extent_len;
		/* update the extent length and mark as initialized */
		ex->ee_block = orig_ex.ee_block;
		ex->ee_len   = orig_ex.ee_len;
2575
		ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
2576
		ext4_ext_dirty(handle, inode, path + depth);
2577 2578
		/* zeroed the full extent */
		return allocated;
2579
	}
2580

2581 2582
	/* ex1: ee_block to map->m_lblk - 1 : uninitialized */
	if (map->m_lblk > ee_block) {
2583
		ex1 = ex;
2584
		ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
2585 2586 2587 2588 2589 2590 2591 2592
		ext4_ext_mark_uninitialized(ex1);
		ex2 = &newex;
	}
	/*
	 * for sanity, update the length of the ex2 extent before
	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
	 * overlap of blocks.
	 */
2593 2594
	if (!ex1 && allocated > map->m_len)
		ex2->ee_len = cpu_to_le16(map->m_len);
2595
	/* ex3: to ee_block + ee_len : uninitialised */
2596
	if (allocated > map->m_len) {
2597
		unsigned int newdepth;
2598
		/* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
2599
		if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
2600
			/*
2601
			 * map->m_lblk == ee_block is handled by the zerouout
2602 2603
			 * at the beginning.
			 * Mark first half uninitialized.
2604 2605 2606 2607 2608 2609
			 * Mark second half initialized and zero out the
			 * initialized extent
			 */
			ex->ee_block = orig_ex.ee_block;
			ex->ee_len   = cpu_to_le16(ee_len - allocated);
			ext4_ext_mark_uninitialized(ex);
2610
			ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
2611 2612 2613
			ext4_ext_dirty(handle, inode, path + depth);

			ex3 = &newex;
2614
			ex3->ee_block = cpu_to_le32(map->m_lblk);
2615 2616
			ext4_ext_store_pblock(ex3, newblock);
			ex3->ee_len = cpu_to_le16(allocated);
2617 2618
			err = ext4_ext_insert_extent(handle, inode, path,
							ex3, 0);
2619 2620 2621 2622 2623 2624
			if (err == -ENOSPC) {
				err =  ext4_ext_zeroout(inode, &orig_ex);
				if (err)
					goto fix_extent_len;
				ex->ee_block = orig_ex.ee_block;
				ex->ee_len   = orig_ex.ee_len;
2625 2626
				ext4_ext_store_pblock(ex,
					ext4_ext_pblock(&orig_ex));
2627
				ext4_ext_dirty(handle, inode, path + depth);
2628
				/* blocks available from map->m_lblk */
2629
				return allocated;
2630 2631 2632 2633

			} else if (err)
				goto fix_extent_len;

2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649
			/*
			 * We need to zero out the second half because
			 * an fallocate request can update file size and
			 * converting the second half to initialized extent
			 * implies that we can leak some junk data to user
			 * space.
			 */
			err =  ext4_ext_zeroout(inode, ex3);
			if (err) {
				/*
				 * We should actually mark the
				 * second half as uninit and return error
				 * Insert would have changed the extent
				 */
				depth = ext_depth(inode);
				ext4_ext_drop_refs(path);
2650 2651
				path = ext4_ext_find_extent(inode, map->m_lblk,
							    path);
2652 2653 2654 2655
				if (IS_ERR(path)) {
					err = PTR_ERR(path);
					return err;
				}
2656
				/* get the second half extent details */
2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667
				ex = path[depth].p_ext;
				err = ext4_ext_get_access(handle, inode,
								path + depth);
				if (err)
					return err;
				ext4_ext_mark_uninitialized(ex);
				ext4_ext_dirty(handle, inode, path + depth);
				return err;
			}

			/* zeroed the second half */
2668 2669
			return allocated;
		}
2670
		ex3 = &newex;
2671 2672 2673
		ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
		ext4_ext_store_pblock(ex3, newblock + map->m_len);
		ex3->ee_len = cpu_to_le16(allocated - map->m_len);
2674
		ext4_ext_mark_uninitialized(ex3);
2675
		err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
2676
		if (err == -ENOSPC && may_zeroout) {
2677 2678 2679 2680
			err =  ext4_ext_zeroout(inode, &orig_ex);
			if (err)
				goto fix_extent_len;
			/* update the extent length and mark as initialized */
2681 2682
			ex->ee_block = orig_ex.ee_block;
			ex->ee_len   = orig_ex.ee_len;
2683
			ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
2684
			ext4_ext_dirty(handle, inode, path + depth);
2685
			/* zeroed the full extent */
2686
			/* blocks available from map->m_lblk */
2687
			return allocated;
2688 2689 2690

		} else if (err)
			goto fix_extent_len;
2691 2692 2693 2694 2695
		/*
		 * The depth, and hence eh & ex might change
		 * as part of the insert above.
		 */
		newdepth = ext_depth(inode);
2696
		/*
C
Coly Li 已提交
2697
		 * update the extent length after successful insert of the
2698 2699
		 * split extent
		 */
2700 2701 2702 2703
		ee_len -= ext4_ext_get_actual_len(ex3);
		orig_ex.ee_len = cpu_to_le16(ee_len);
		may_zeroout = ee_block + ee_len <= eof_block;

2704 2705
		depth = newdepth;
		ext4_ext_drop_refs(path);
2706
		path = ext4_ext_find_extent(inode, map->m_lblk, path);
2707 2708 2709
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			goto out;
2710
		}
2711 2712 2713 2714 2715 2716 2717 2718 2719
		eh = path[depth].p_hdr;
		ex = path[depth].p_ext;
		if (ex2 != &newex)
			ex2 = ex;

		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
			goto out;

2720
		allocated = map->m_len;
2721 2722 2723 2724 2725 2726

		/* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
		 * to insert a extent in the middle zerout directly
		 * otherwise give the extent a chance to merge to left
		 */
		if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
2727
			map->m_lblk != ee_block && may_zeroout) {
2728 2729 2730 2731 2732 2733
			err =  ext4_ext_zeroout(inode, &orig_ex);
			if (err)
				goto fix_extent_len;
			/* update the extent length and mark as initialized */
			ex->ee_block = orig_ex.ee_block;
			ex->ee_len   = orig_ex.ee_len;
2734
			ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
2735
			ext4_ext_dirty(handle, inode, path + depth);
2736
			/* zero out the first half */
2737
			/* blocks available from map->m_lblk */
2738
			return allocated;
2739
		}
2740 2741 2742 2743 2744 2745 2746 2747
	}
	/*
	 * If there was a change of depth as part of the
	 * insertion of ex3 above, we need to update the length
	 * of the ex1 extent again here
	 */
	if (ex1 && ex1 != ex) {
		ex1 = ex;
2748
		ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
2749 2750 2751
		ext4_ext_mark_uninitialized(ex1);
		ex2 = &newex;
	}
2752 2753
	/* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */
	ex2->ee_block = cpu_to_le32(map->m_lblk);
2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794
	ext4_ext_store_pblock(ex2, newblock);
	ex2->ee_len = cpu_to_le16(allocated);
	if (ex2 != ex)
		goto insert;
	/*
	 * New (initialized) extent starts from the first block
	 * in the current extent. i.e., ex2 == ex
	 * We have to see if it can be merged with the extent
	 * on the left.
	 */
	if (ex2 > EXT_FIRST_EXTENT(eh)) {
		/*
		 * To merge left, pass "ex2 - 1" to try_to_merge(),
		 * since it merges towards right _only_.
		 */
		ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
		if (ret) {
			err = ext4_ext_correct_indexes(handle, inode, path);
			if (err)
				goto out;
			depth = ext_depth(inode);
			ex2--;
		}
	}
	/*
	 * Try to Merge towards right. This might be required
	 * only when the whole extent is being written to.
	 * i.e. ex2 == ex and ex3 == NULL.
	 */
	if (!ex3) {
		ret = ext4_ext_try_to_merge(inode, path, ex2);
		if (ret) {
			err = ext4_ext_correct_indexes(handle, inode, path);
			if (err)
				goto out;
		}
	}
	/* Mark modified extent as dirty */
	err = ext4_ext_dirty(handle, inode, path + depth);
	goto out;
insert:
2795
	err = ext4_ext_insert_extent(handle, inode, path, &newex, 0);
2796
	if (err == -ENOSPC && may_zeroout) {
2797 2798 2799 2800
		err =  ext4_ext_zeroout(inode, &orig_ex);
		if (err)
			goto fix_extent_len;
		/* update the extent length and mark as initialized */
2801 2802
		ex->ee_block = orig_ex.ee_block;
		ex->ee_len   = orig_ex.ee_len;
2803
		ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
2804
		ext4_ext_dirty(handle, inode, path + depth);
2805 2806
		/* zero out the first half */
		return allocated;
2807 2808
	} else if (err)
		goto fix_extent_len;
2809
out:
2810
	ext4_ext_show_leaf(inode, path);
2811
	return err ? err : allocated;
2812 2813 2814 2815

fix_extent_len:
	ex->ee_block = orig_ex.ee_block;
	ex->ee_len   = orig_ex.ee_len;
2816
	ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
2817 2818 2819
	ext4_ext_mark_uninitialized(ex);
	ext4_ext_dirty(handle, inode, path + depth);
	return err;
2820 2821
}

2822
/*
2823
 * This function is called by ext4_ext_map_blocks() from
2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836
 * ext4_get_blocks_dio_write() when DIO to write
 * to an uninitialized extent.
 *
 * Writing to an uninitized extent may result in splitting the uninitialized
 * extent into multiple /intialized unintialized extents (up to three)
 * There are three possibilities:
 *   a> There is no split required: Entire extent should be uninitialized
 *   b> Splits in two extents: Write is happening at either end of the extent
 *   c> Splits in three extents: Somone is writing in middle of the extent
 *
 * One of more index blocks maybe needed if the extent tree grow after
 * the unintialized extent split. To prevent ENOSPC occur at the IO
 * complete, we need to split the uninitialized extent before DIO submit
2837
 * the IO. The uninitialized extent called at this time will be split
2838 2839 2840
 * into three uninitialized extent(at most). After IO complete, the part
 * being filled will be convert to initialized by the end_io callback function
 * via ext4_convert_unwritten_extents().
2841 2842
 *
 * Returns the size of uninitialized extent to be written on success.
2843 2844 2845
 */
static int ext4_split_unwritten_extents(handle_t *handle,
					struct inode *inode,
2846
					struct ext4_map_blocks *map,
2847 2848 2849 2850 2851 2852 2853
					struct ext4_ext_path *path,
					int flags)
{
	struct ext4_extent *ex, newex, orig_ex;
	struct ext4_extent *ex1 = NULL;
	struct ext4_extent *ex2 = NULL;
	struct ext4_extent *ex3 = NULL;
2854
	ext4_lblk_t ee_block, eof_block;
2855 2856 2857
	unsigned int allocated, ee_len, depth;
	ext4_fsblk_t newblock;
	int err = 0;
2858 2859 2860 2861
	int may_zeroout;

	ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
		"block %llu, max_blocks %u\n", inode->i_ino,
2862
		(unsigned long long)map->m_lblk, map->m_len);
2863 2864 2865

	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
		inode->i_sb->s_blocksize_bits;
2866 2867
	if (eof_block < map->m_lblk + map->m_len)
		eof_block = map->m_lblk + map->m_len;
2868 2869 2870 2871 2872

	depth = ext_depth(inode);
	ex = path[depth].p_ext;
	ee_block = le32_to_cpu(ex->ee_block);
	ee_len = ext4_ext_get_actual_len(ex);
2873
	allocated = ee_len - (map->m_lblk - ee_block);
2874
	newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
2875

2876 2877 2878
	ex2 = ex;
	orig_ex.ee_block = ex->ee_block;
	orig_ex.ee_len   = cpu_to_le16(ee_len);
2879
	ext4_ext_store_pblock(&orig_ex, ext4_ext_pblock(ex));
2880

2881 2882 2883 2884 2885 2886
	/*
	 * It is safe to convert extent to initialized via explicit
	 * zeroout only if extent is fully insde i_size or new_size.
	 */
	may_zeroout = ee_block + ee_len <= eof_block;

2887
	/*
2888 2889 2890
 	 * If the uninitialized extent begins at the same logical
 	 * block where the write begins, and the write completely
 	 * covers the extent, then we don't need to split it.
2891
 	 */
2892
	if ((map->m_lblk == ee_block) && (allocated <= map->m_len))
2893
		return allocated;
2894 2895 2896 2897

	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
		goto out;
2898 2899
	/* ex1: ee_block to map->m_lblk - 1 : uninitialized */
	if (map->m_lblk > ee_block) {
2900
		ex1 = ex;
2901
		ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
2902 2903 2904 2905 2906 2907 2908 2909
		ext4_ext_mark_uninitialized(ex1);
		ex2 = &newex;
	}
	/*
	 * for sanity, update the length of the ex2 extent before
	 * we insert ex3, if ex1 is NULL. This is to avoid temporary
	 * overlap of blocks.
	 */
2910 2911
	if (!ex1 && allocated > map->m_len)
		ex2->ee_len = cpu_to_le16(map->m_len);
2912
	/* ex3: to ee_block + ee_len : uninitialised */
2913
	if (allocated > map->m_len) {
2914 2915
		unsigned int newdepth;
		ex3 = &newex;
2916 2917 2918
		ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
		ext4_ext_store_pblock(ex3, newblock + map->m_len);
		ex3->ee_len = cpu_to_le16(allocated - map->m_len);
2919 2920
		ext4_ext_mark_uninitialized(ex3);
		err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
2921
		if (err == -ENOSPC && may_zeroout) {
2922 2923 2924 2925 2926 2927
			err =  ext4_ext_zeroout(inode, &orig_ex);
			if (err)
				goto fix_extent_len;
			/* update the extent length and mark as initialized */
			ex->ee_block = orig_ex.ee_block;
			ex->ee_len   = orig_ex.ee_len;
2928
			ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
2929 2930
			ext4_ext_dirty(handle, inode, path + depth);
			/* zeroed the full extent */
2931
			/* blocks available from map->m_lblk */
2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944
			return allocated;

		} else if (err)
			goto fix_extent_len;
		/*
		 * The depth, and hence eh & ex might change
		 * as part of the insert above.
		 */
		newdepth = ext_depth(inode);
		/*
		 * update the extent length after successful insert of the
		 * split extent
		 */
2945 2946 2947 2948
		ee_len -= ext4_ext_get_actual_len(ex3);
		orig_ex.ee_len = cpu_to_le16(ee_len);
		may_zeroout = ee_block + ee_len <= eof_block;

2949 2950
		depth = newdepth;
		ext4_ext_drop_refs(path);
2951
		path = ext4_ext_find_extent(inode, map->m_lblk, path);
2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			goto out;
		}
		ex = path[depth].p_ext;
		if (ex2 != &newex)
			ex2 = ex;

		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
			goto out;

2964
		allocated = map->m_len;
2965 2966 2967 2968 2969 2970 2971 2972
	}
	/*
	 * If there was a change of depth as part of the
	 * insertion of ex3 above, we need to update the length
	 * of the ex1 extent again here
	 */
	if (ex1 && ex1 != ex) {
		ex1 = ex;
2973
		ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
2974 2975 2976 2977
		ext4_ext_mark_uninitialized(ex1);
		ex2 = &newex;
	}
	/*
2978 2979
	 * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written
	 * using direct I/O, uninitialised still.
2980
	 */
2981
	ex2->ee_block = cpu_to_le32(map->m_lblk);
2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992
	ext4_ext_store_pblock(ex2, newblock);
	ex2->ee_len = cpu_to_le16(allocated);
	ext4_ext_mark_uninitialized(ex2);
	if (ex2 != ex)
		goto insert;
	/* Mark modified extent as dirty */
	err = ext4_ext_dirty(handle, inode, path + depth);
	ext_debug("out here\n");
	goto out;
insert:
	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
2993
	if (err == -ENOSPC && may_zeroout) {
2994 2995 2996 2997 2998 2999
		err =  ext4_ext_zeroout(inode, &orig_ex);
		if (err)
			goto fix_extent_len;
		/* update the extent length and mark as initialized */
		ex->ee_block = orig_ex.ee_block;
		ex->ee_len   = orig_ex.ee_len;
3000
		ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012
		ext4_ext_dirty(handle, inode, path + depth);
		/* zero out the first half */
		return allocated;
	} else if (err)
		goto fix_extent_len;
out:
	ext4_ext_show_leaf(inode, path);
	return err ? err : allocated;

fix_extent_len:
	ex->ee_block = orig_ex.ee_block;
	ex->ee_len   = orig_ex.ee_len;
3013
	ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
3014 3015 3016 3017
	ext4_ext_mark_uninitialized(ex);
	ext4_ext_dirty(handle, inode, path + depth);
	return err;
}
3018
static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072
					      struct inode *inode,
					      struct ext4_ext_path *path)
{
	struct ext4_extent *ex;
	struct ext4_extent_header *eh;
	int depth;
	int err = 0;
	int ret = 0;

	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;

	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
		goto out;
	/* first mark the extent as initialized */
	ext4_ext_mark_initialized(ex);

	/*
	 * We have to see if it can be merged with the extent
	 * on the left.
	 */
	if (ex > EXT_FIRST_EXTENT(eh)) {
		/*
		 * To merge left, pass "ex - 1" to try_to_merge(),
		 * since it merges towards right _only_.
		 */
		ret = ext4_ext_try_to_merge(inode, path, ex - 1);
		if (ret) {
			err = ext4_ext_correct_indexes(handle, inode, path);
			if (err)
				goto out;
			depth = ext_depth(inode);
			ex--;
		}
	}
	/*
	 * Try to Merge towards right.
	 */
	ret = ext4_ext_try_to_merge(inode, path, ex);
	if (ret) {
		err = ext4_ext_correct_indexes(handle, inode, path);
		if (err)
			goto out;
		depth = ext_depth(inode);
	}
	/* Mark modified extent as dirty */
	err = ext4_ext_dirty(handle, inode, path + depth);
out:
	ext4_ext_show_leaf(inode, path);
	return err;
}

3073 3074 3075 3076 3077 3078 3079 3080
static void unmap_underlying_metadata_blocks(struct block_device *bdev,
			sector_t block, int count)
{
	int i;
	for (i = 0; i < count; i++)
                unmap_underlying_metadata(bdev, block + i);
}

T
Theodore Ts'o 已提交
3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131
/*
 * Handle EOFBLOCKS_FL flag, clearing it if necessary
 */
static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
			      struct ext4_map_blocks *map,
			      struct ext4_ext_path *path,
			      unsigned int len)
{
	int i, depth;
	struct ext4_extent_header *eh;
	struct ext4_extent *ex, *last_ex;

	if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
		return 0;

	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;

	if (unlikely(!eh->eh_entries)) {
		EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and "
				 "EOFBLOCKS_FL set");
		return -EIO;
	}
	last_ex = EXT_LAST_EXTENT(eh);
	/*
	 * We should clear the EOFBLOCKS_FL flag if we are writing the
	 * last block in the last extent in the file.  We test this by
	 * first checking to see if the caller to
	 * ext4_ext_get_blocks() was interested in the last block (or
	 * a block beyond the last block) in the current extent.  If
	 * this turns out to be false, we can bail out from this
	 * function immediately.
	 */
	if (map->m_lblk + len < le32_to_cpu(last_ex->ee_block) +
	    ext4_ext_get_actual_len(last_ex))
		return 0;
	/*
	 * If the caller does appear to be planning to write at or
	 * beyond the end of the current extent, we then test to see
	 * if the current extent is the last extent in the file, by
	 * checking to make sure it was reached via the rightmost node
	 * at each level of the tree.
	 */
	for (i = depth-1; i >= 0; i--)
		if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
			return 0;
	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
	return ext4_mark_inode_dirty(handle, inode);
}

3132 3133
static int
ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3134
			struct ext4_map_blocks *map,
3135
			struct ext4_ext_path *path, int flags,
3136
			unsigned int allocated, ext4_fsblk_t newblock)
3137 3138 3139
{
	int ret = 0;
	int err = 0;
3140
	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3141 3142 3143

	ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
		  "block %llu, max_blocks %u, flags %d, allocated %u",
3144
		  inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3145 3146 3147
		  flags, allocated);
	ext4_ext_show_leaf(inode, path);

3148
	/* get_block() before submit the IO, split the extent */
3149
	if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3150 3151
		ret = ext4_split_unwritten_extents(handle, inode, map,
						   path, flags);
3152 3153 3154 3155 3156
		/*
		 * Flag the inode(non aio case) or end_io struct (aio case)
		 * that this IO needs to convertion to written when IO is
		 * completed
		 */
3157
		if (io)
3158
			io->flag = EXT4_IO_END_UNWRITTEN;
3159
		else
3160
			ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3161
		if (ext4_should_dioread_nolock(inode))
3162
			map->m_flags |= EXT4_MAP_UNINIT;
3163 3164
		goto out;
	}
3165
	/* IO end_io complete, convert the filled extent to written */
3166
	if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3167
		ret = ext4_convert_unwritten_extents_endio(handle, inode,
3168
							path);
T
Theodore Ts'o 已提交
3169
		if (ret >= 0) {
3170
			ext4_update_inode_fsync_trans(handle, inode, 1);
T
Theodore Ts'o 已提交
3171 3172 3173 3174
			err = check_eofblocks_fl(handle, inode, map, path,
						 map->m_len);
		} else
			err = ret;
3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193
		goto out2;
	}
	/* buffered IO case */
	/*
	 * repeat fallocate creation request
	 * we already have an unwritten extent
	 */
	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
		goto map_out;

	/* buffered READ or buffered write_begin() lookup */
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
		/*
		 * We have blocks reserved already.  We
		 * return allocated blocks so that delalloc
		 * won't do block reservation for us.  But
		 * the buffer head will be unmapped so that
		 * a read from the block returns 0s.
		 */
3194
		map->m_flags |= EXT4_MAP_UNWRITTEN;
3195 3196 3197 3198
		goto out1;
	}

	/* buffered write, writepage time, convert*/
3199
	ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
T
Theodore Ts'o 已提交
3200
	if (ret >= 0) {
3201
		ext4_update_inode_fsync_trans(handle, inode, 1);
T
Theodore Ts'o 已提交
3202 3203 3204 3205 3206
		err = check_eofblocks_fl(handle, inode, map, path, map->m_len);
		if (err < 0)
			goto out2;
	}

3207 3208 3209 3210 3211 3212
out:
	if (ret <= 0) {
		err = ret;
		goto out2;
	} else
		allocated = ret;
3213
	map->m_flags |= EXT4_MAP_NEW;
3214 3215 3216 3217 3218 3219 3220
	/*
	 * if we allocated more blocks than requested
	 * we need to make sure we unmap the extra block
	 * allocated. The actual needed block will get
	 * unmapped later when we find the buffer_head marked
	 * new.
	 */
3221
	if (allocated > map->m_len) {
3222
		unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3223 3224 3225
					newblock + map->m_len,
					allocated - map->m_len);
		allocated = map->m_len;
3226
	}
3227 3228 3229 3230 3231 3232 3233 3234

	/*
	 * If we have done fallocate with the offset that is already
	 * delayed allocated, we would have block reservation
	 * and quota reservation done in the delayed write path.
	 * But fallocate would have already updated quota and block
	 * count for this offset. So cancel these reservation
	 */
3235
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3236 3237
		ext4_da_update_reserve_space(inode, allocated, 0);

3238
map_out:
3239
	map->m_flags |= EXT4_MAP_MAPPED;
3240
out1:
3241 3242
	if (allocated > map->m_len)
		allocated = map->m_len;
3243
	ext4_ext_show_leaf(inode, path);
3244 3245
	map->m_pblk = newblock;
	map->m_len = allocated;
3246 3247 3248 3249 3250 3251 3252
out2:
	if (path) {
		ext4_ext_drop_refs(path);
		kfree(path);
	}
	return err ? err : allocated;
}
T
Theodore Ts'o 已提交
3253

3254
/*
3255 3256 3257
 * Block allocation/map/preallocation routine for extents based files
 *
 *
3258
 * Need to be called with
3259 3260
 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3261 3262 3263 3264 3265 3266 3267 3268 3269 3270
 *
 * return > 0, number of of blocks already mapped/allocated
 *          if create == 0 and these are pre-allocated blocks
 *          	buffer head is unmapped
 *          otherwise blocks are mapped
 *
 * return = 0, if plain look up failed (blocks have not been allocated)
 *          buffer head is unmapped
 *
 * return < 0, error case.
3271
 */
3272 3273
int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
			struct ext4_map_blocks *map, int flags)
A
Alex Tomas 已提交
3274 3275
{
	struct ext4_ext_path *path = NULL;
3276
	struct ext4_extent_header *eh;
T
Theodore Ts'o 已提交
3277
	struct ext4_extent newex, *ex;
3278
	ext4_fsblk_t newblock;
T
Theodore Ts'o 已提交
3279
	int err = 0, depth, ret, cache_type;
3280
	unsigned int allocated = 0;
3281
	struct ext4_allocation_request ar;
3282
	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
A
Alex Tomas 已提交
3283

3284
	ext_debug("blocks %u/%u requested for inode %lu\n",
3285
		  map->m_lblk, map->m_len, inode->i_ino);
A
Alex Tomas 已提交
3286 3287

	/* check in cache */
3288
	cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex);
3289 3290
	if (cache_type) {
		if (cache_type == EXT4_EXT_CACHE_GAP) {
3291
			if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3292 3293 3294 3295
				/*
				 * block isn't allocated yet and
				 * user doesn't want to allocate it
				 */
A
Alex Tomas 已提交
3296 3297 3298
				goto out2;
			}
			/* we should allocate requested block */
3299
		} else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
A
Alex Tomas 已提交
3300
			/* block is already allocated */
3301
			newblock = map->m_lblk
D
Dave Kleikamp 已提交
3302
				   - le32_to_cpu(newex.ee_block)
3303
				   + ext4_ext_pblock(&newex);
3304
			/* number of remaining blocks in the extent */
3305
			allocated = ext4_ext_get_actual_len(&newex) -
3306
				(map->m_lblk - le32_to_cpu(newex.ee_block));
A
Alex Tomas 已提交
3307 3308 3309 3310 3311 3312 3313
			goto out;
		} else {
			BUG();
		}
	}

	/* find extent for this block */
3314
	path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
A
Alex Tomas 已提交
3315 3316 3317 3318 3319 3320 3321 3322 3323
	if (IS_ERR(path)) {
		err = PTR_ERR(path);
		path = NULL;
		goto out2;
	}

	depth = ext_depth(inode);

	/*
3324 3325
	 * consistent leaf must not be empty;
	 * this situation is possible, though, _during_ tree modification;
A
Alex Tomas 已提交
3326 3327
	 * this is why assert can't be put in ext4_ext_find_extent()
	 */
3328 3329
	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
		EXT4_ERROR_INODE(inode, "bad extent address "
3330 3331 3332
				 "lblock: %lu, depth: %d pblock %lld",
				 (unsigned long) map->m_lblk, depth,
				 path[depth].p_block);
3333 3334 3335
		err = -EIO;
		goto out2;
	}
3336
	eh = path[depth].p_hdr;
A
Alex Tomas 已提交
3337

3338 3339
	ex = path[depth].p_ext;
	if (ex) {
A
Aneesh Kumar K.V 已提交
3340
		ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3341
		ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
A
Amit Arora 已提交
3342
		unsigned short ee_len;
3343 3344 3345

		/*
		 * Uninitialized extents are treated as holes, except that
3346
		 * we split out initialized portions during a write.
3347
		 */
A
Amit Arora 已提交
3348
		ee_len = ext4_ext_get_actual_len(ex);
3349
		/* if found extent covers block, simply return it */
3350 3351
		if (in_range(map->m_lblk, ee_block, ee_len)) {
			newblock = map->m_lblk - ee_block + ee_start;
3352
			/* number of remaining blocks in the extent */
3353 3354 3355
			allocated = ee_len - (map->m_lblk - ee_block);
			ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
				  ee_block, ee_len, newblock);
3356

A
Amit Arora 已提交
3357
			/* Do not put uninitialized extent in the cache */
3358
			if (!ext4_ext_is_uninitialized(ex)) {
A
Amit Arora 已提交
3359 3360 3361
				ext4_ext_put_in_cache(inode, ee_block,
							ee_len, ee_start,
							EXT4_EXT_CACHE_EXTENT);
3362 3363
				goto out;
			}
3364
			ret = ext4_ext_handle_uninitialized_extents(handle,
3365 3366
					inode, map, path, flags, allocated,
					newblock);
3367
			return ret;
A
Alex Tomas 已提交
3368 3369 3370 3371
		}
	}

	/*
3372
	 * requested block isn't allocated yet;
A
Alex Tomas 已提交
3373 3374
	 * we couldn't try to create block if create flag is zero
	 */
3375
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3376 3377 3378 3379
		/*
		 * put just found gap into cache to speed up
		 * subsequent requests
		 */
3380
		ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
A
Alex Tomas 已提交
3381 3382 3383
		goto out2;
	}
	/*
3384
	 * Okay, we need to do block allocation.
A
Andrew Morton 已提交
3385
	 */
A
Alex Tomas 已提交
3386

3387
	/* find neighbour allocated blocks */
3388
	ar.lleft = map->m_lblk;
3389 3390 3391
	err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
	if (err)
		goto out2;
3392
	ar.lright = map->m_lblk;
3393 3394 3395
	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
	if (err)
		goto out2;
A
Amit Arora 已提交
3396

3397 3398 3399 3400 3401 3402
	/*
	 * See if request is beyond maximum number of blocks we can have in
	 * a single extent. For an initialized extent this limit is
	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
	 * EXT_UNINIT_MAX_LEN.
	 */
3403
	if (map->m_len > EXT_INIT_MAX_LEN &&
3404
	    !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3405 3406
		map->m_len = EXT_INIT_MAX_LEN;
	else if (map->m_len > EXT_UNINIT_MAX_LEN &&
3407
		 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3408
		map->m_len = EXT_UNINIT_MAX_LEN;
3409

3410 3411 3412
	/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
	newex.ee_block = cpu_to_le32(map->m_lblk);
	newex.ee_len = cpu_to_le16(map->m_len);
A
Amit Arora 已提交
3413 3414
	err = ext4_ext_check_overlap(inode, &newex, path);
	if (err)
3415
		allocated = ext4_ext_get_actual_len(&newex);
A
Amit Arora 已提交
3416
	else
3417
		allocated = map->m_len;
3418 3419 3420

	/* allocate new block */
	ar.inode = inode;
3421 3422
	ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
	ar.logical = map->m_lblk;
3423 3424 3425 3426 3427 3428 3429
	ar.len = allocated;
	if (S_ISREG(inode->i_mode))
		ar.flags = EXT4_MB_HINT_DATA;
	else
		/* disable in-core preallocation for non-regular files */
		ar.flags = 0;
	newblock = ext4_mb_new_blocks(handle, &ar, &err);
A
Alex Tomas 已提交
3430 3431
	if (!newblock)
		goto out2;
3432
	ext_debug("allocate new block: goal %llu, found %llu/%u\n",
3433
		  ar.goal, newblock, allocated);
A
Alex Tomas 已提交
3434 3435

	/* try to insert new extent into found leaf and return */
3436
	ext4_ext_store_pblock(&newex, newblock);
3437
	newex.ee_len = cpu_to_le16(ar.len);
3438 3439
	/* Mark uninitialized */
	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
A
Amit Arora 已提交
3440
		ext4_ext_mark_uninitialized(&newex);
3441
		/*
3442 3443 3444
		 * io_end structure was created for every IO write to an
		 * uninitialized extent. To avoid unecessary conversion,
		 * here we flag the IO that really needs the conversion.
3445 3446
		 * For non asycn direct IO case, flag the inode state
		 * that we need to perform convertion when IO is done.
3447
		 */
3448
		if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3449
			if (io)
3450
				io->flag = EXT4_IO_END_UNWRITTEN;
3451
			else
3452 3453
				ext4_set_inode_state(inode,
						     EXT4_STATE_DIO_UNWRITTEN);
3454
		}
3455
		if (ext4_should_dioread_nolock(inode))
3456
			map->m_flags |= EXT4_MAP_UNINIT;
3457
	}
3458

T
Theodore Ts'o 已提交
3459 3460 3461 3462
	err = check_eofblocks_fl(handle, inode, map, path, ar.len);
	if (err)
		goto out2;

3463
	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3464 3465
	if (err) {
		/* free data blocks we just allocated */
3466 3467
		/* not a good idea to call discard here directly,
		 * but otherwise we'd need to call it every free() */
3468
		ext4_discard_preallocations(inode);
3469
		ext4_free_blocks(handle, inode, 0, ext4_ext_pblock(&newex),
3470
				 ext4_ext_get_actual_len(&newex), 0);
A
Alex Tomas 已提交
3471
		goto out2;
3472
	}
A
Alex Tomas 已提交
3473 3474

	/* previous routine could use block we allocated */
3475
	newblock = ext4_ext_pblock(&newex);
3476
	allocated = ext4_ext_get_actual_len(&newex);
3477 3478 3479
	if (allocated > map->m_len)
		allocated = map->m_len;
	map->m_flags |= EXT4_MAP_NEW;
A
Alex Tomas 已提交
3480

3481 3482 3483 3484
	/*
	 * Update reserved blocks/metadata blocks after successful
	 * block allocation which had been deferred till now.
	 */
3485
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3486 3487
		ext4_da_update_reserve_space(inode, allocated, 1);

3488 3489 3490 3491 3492
	/*
	 * Cache the extent and update transaction to commit on fdatasync only
	 * when it is _not_ an uninitialized extent.
	 */
	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
3493
		ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock,
A
Amit Arora 已提交
3494
						EXT4_EXT_CACHE_EXTENT);
3495 3496 3497
		ext4_update_inode_fsync_trans(handle, inode, 1);
	} else
		ext4_update_inode_fsync_trans(handle, inode, 0);
A
Alex Tomas 已提交
3498
out:
3499 3500
	if (allocated > map->m_len)
		allocated = map->m_len;
A
Alex Tomas 已提交
3501
	ext4_ext_show_leaf(inode, path);
3502 3503 3504
	map->m_flags |= EXT4_MAP_MAPPED;
	map->m_pblk = newblock;
	map->m_len = allocated;
A
Alex Tomas 已提交
3505 3506 3507 3508 3509 3510 3511 3512
out2:
	if (path) {
		ext4_ext_drop_refs(path);
		kfree(path);
	}
	return err ? err : allocated;
}

3513
void ext4_ext_truncate(struct inode *inode)
A
Alex Tomas 已提交
3514 3515 3516
{
	struct address_space *mapping = inode->i_mapping;
	struct super_block *sb = inode->i_sb;
A
Aneesh Kumar K.V 已提交
3517
	ext4_lblk_t last_block;
A
Alex Tomas 已提交
3518 3519 3520 3521 3522 3523
	handle_t *handle;
	int err = 0;

	/*
	 * probably first extent we're gonna free will be last in block
	 */
3524
	err = ext4_writepage_trans_blocks(inode);
A
Alex Tomas 已提交
3525
	handle = ext4_journal_start(inode, err);
3526
	if (IS_ERR(handle))
A
Alex Tomas 已提交
3527 3528
		return;

3529 3530
	if (inode->i_size & (sb->s_blocksize - 1))
		ext4_block_truncate_page(handle, mapping, inode->i_size);
A
Alex Tomas 已提交
3531

3532 3533 3534
	if (ext4_orphan_add(handle, inode))
		goto out_stop;

3535
	down_write(&EXT4_I(inode)->i_data_sem);
A
Alex Tomas 已提交
3536 3537
	ext4_ext_invalidate_cache(inode);

3538
	ext4_discard_preallocations(inode);
3539

A
Alex Tomas 已提交
3540
	/*
3541 3542 3543
	 * TODO: optimization is possible here.
	 * Probably we need not scan at all,
	 * because page truncation is enough.
A
Alex Tomas 已提交
3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554
	 */

	/* we have to know where to truncate from in crash case */
	EXT4_I(inode)->i_disksize = inode->i_size;
	ext4_mark_inode_dirty(handle, inode);

	last_block = (inode->i_size + sb->s_blocksize - 1)
			>> EXT4_BLOCK_SIZE_BITS(sb);
	err = ext4_ext_remove_space(inode, last_block);

	/* In a multi-transaction truncate, we only make the final
3555 3556
	 * transaction synchronous.
	 */
A
Alex Tomas 已提交
3557
	if (IS_SYNC(inode))
3558
		ext4_handle_sync(handle);
A
Alex Tomas 已提交
3559 3560

out_stop:
3561
	up_write(&EXT4_I(inode)->i_data_sem);
A
Alex Tomas 已提交
3562
	/*
3563
	 * If this was a simple ftruncate() and the file will remain alive,
A
Alex Tomas 已提交
3564 3565 3566 3567 3568 3569 3570 3571
	 * then we need to clear up the orphan record which we created above.
	 * However, if this was a real unlink then we were called by
	 * ext4_delete_inode(), and we allow that function to clean up the
	 * orphan info for us.
	 */
	if (inode->i_nlink)
		ext4_orphan_del(handle, inode);

3572 3573
	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
	ext4_mark_inode_dirty(handle, inode);
A
Alex Tomas 已提交
3574 3575 3576
	ext4_journal_stop(handle);
}

3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590
static void ext4_falloc_update_inode(struct inode *inode,
				int mode, loff_t new_size, int update_ctime)
{
	struct timespec now;

	if (update_ctime) {
		now = current_fs_time(inode->i_sb);
		if (!timespec_equal(&inode->i_ctime, &now))
			inode->i_ctime = now;
	}
	/*
	 * Update only when preallocation was requested beyond
	 * the file size.
	 */
3591 3592 3593 3594 3595
	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
		if (new_size > i_size_read(inode))
			i_size_write(inode, new_size);
		if (new_size > EXT4_I(inode)->i_disksize)
			ext4_update_i_disksize(inode, new_size);
3596 3597 3598 3599 3600 3601
	} else {
		/*
		 * Mark that we allocate beyond EOF so the subsequent truncate
		 * can proceed even if the new size is the same as i_size.
		 */
		if (new_size > i_size_read(inode))
3602
			ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3603 3604 3605 3606
	}

}

A
Amit Arora 已提交
3607 3608 3609 3610 3611 3612 3613 3614 3615 3616
/*
 * preallocate space for a file. This implements ext4's fallocate inode
 * operation, which gets called from sys_fallocate system call.
 * For block-mapped files, posix_fallocate should fall back to the method
 * of writing zeroes to the required new blocks (the same behavior which is
 * expected for file systems which do not support fallocate() system call).
 */
long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
{
	handle_t *handle;
3617
	loff_t new_size;
3618
	unsigned int max_blocks;
A
Amit Arora 已提交
3619 3620 3621
	int ret = 0;
	int ret2 = 0;
	int retries = 0;
3622
	struct ext4_map_blocks map;
A
Amit Arora 已提交
3623 3624 3625 3626 3627 3628
	unsigned int credits, blkbits = inode->i_blkbits;

	/*
	 * currently supporting (pre)allocate mode for extent-based
	 * files _only_
	 */
3629
	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
A
Amit Arora 已提交
3630 3631 3632 3633 3634 3635
		return -EOPNOTSUPP;

	/* preallocation to directories is currently not supported */
	if (S_ISDIR(inode->i_mode))
		return -ENODEV;

3636
	map.m_lblk = offset >> blkbits;
3637 3638 3639 3640
	/*
	 * We can't just convert len to max_blocks because
	 * If blocksize = 4096 offset = 3072 and len = 2048
	 */
A
Amit Arora 已提交
3641
	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3642
		- map.m_lblk;
A
Amit Arora 已提交
3643
	/*
3644
	 * credits to insert 1 extent into extent tree
A
Amit Arora 已提交
3645
	 */
3646
	credits = ext4_chunk_trans_blocks(inode, max_blocks);
3647
	mutex_lock(&inode->i_mutex);
3648 3649 3650 3651 3652
	ret = inode_newsize_ok(inode, (len + offset));
	if (ret) {
		mutex_unlock(&inode->i_mutex);
		return ret;
	}
A
Amit Arora 已提交
3653 3654
retry:
	while (ret >= 0 && ret < max_blocks) {
3655 3656
		map.m_lblk = map.m_lblk + ret;
		map.m_len = max_blocks = max_blocks - ret;
A
Amit Arora 已提交
3657 3658 3659 3660 3661
		handle = ext4_journal_start(inode, credits);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			break;
		}
3662
		ret = ext4_map_blocks(handle, inode, &map,
3663
				      EXT4_GET_BLOCKS_CREATE_UNINIT_EXT);
3664
		if (ret <= 0) {
3665 3666
#ifdef EXT4FS_DEBUG
			WARN_ON(ret <= 0);
3667
			printk(KERN_ERR "%s: ext4_ext_map_blocks "
3668
				    "returned error inode#%lu, block=%u, "
3669
				    "max_blocks=%u", __func__,
3670
				    inode->i_ino, map.m_lblk, max_blocks);
3671
#endif
A
Amit Arora 已提交
3672 3673 3674 3675
			ext4_mark_inode_dirty(handle, inode);
			ret2 = ext4_journal_stop(handle);
			break;
		}
3676
		if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
3677 3678 3679
						blkbits) >> blkbits))
			new_size = offset + len;
		else
3680
			new_size = (map.m_lblk + ret) << blkbits;
A
Amit Arora 已提交
3681

3682
		ext4_falloc_update_inode(inode, mode, new_size,
3683
					 (map.m_flags & EXT4_MAP_NEW));
A
Amit Arora 已提交
3684 3685 3686 3687 3688
		ext4_mark_inode_dirty(handle, inode);
		ret2 = ext4_journal_stop(handle);
		if (ret2)
			break;
	}
3689 3690 3691
	if (ret == -ENOSPC &&
			ext4_should_retry_alloc(inode->i_sb, &retries)) {
		ret = 0;
A
Amit Arora 已提交
3692 3693
		goto retry;
	}
3694
	mutex_unlock(&inode->i_mutex);
A
Amit Arora 已提交
3695 3696
	return ret > 0 ? ret2 : ret;
}
3697

3698 3699 3700 3701 3702 3703 3704 3705
/*
 * This function convert a range of blocks to written extents
 * The caller of this function will pass the start offset and the size.
 * all unwritten extents within this range will be converted to
 * written extents.
 *
 * This function is called from the direct IO end io call back
 * function, to convert the fallocated extents after IO is completed.
3706
 * Returns 0 on success.
3707 3708
 */
int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3709
				    ssize_t len)
3710 3711 3712 3713 3714
{
	handle_t *handle;
	unsigned int max_blocks;
	int ret = 0;
	int ret2 = 0;
3715
	struct ext4_map_blocks map;
3716 3717
	unsigned int credits, blkbits = inode->i_blkbits;

3718
	map.m_lblk = offset >> blkbits;
3719 3720 3721 3722
	/*
	 * We can't just convert len to max_blocks because
	 * If blocksize = 4096 offset = 3072 and len = 2048
	 */
3723 3724
	max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
		      map.m_lblk);
3725 3726 3727 3728 3729
	/*
	 * credits to insert 1 extent into extent tree
	 */
	credits = ext4_chunk_trans_blocks(inode, max_blocks);
	while (ret >= 0 && ret < max_blocks) {
3730 3731
		map.m_lblk += ret;
		map.m_len = (max_blocks -= ret);
3732 3733 3734 3735 3736
		handle = ext4_journal_start(inode, credits);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			break;
		}
3737
		ret = ext4_map_blocks(handle, inode, &map,
3738
				      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
3739 3740
		if (ret <= 0) {
			WARN_ON(ret <= 0);
3741
			printk(KERN_ERR "%s: ext4_ext_map_blocks "
3742 3743
				    "returned error inode#%lu, block=%u, "
				    "max_blocks=%u", __func__,
3744
				    inode->i_ino, map.m_lblk, map.m_len);
3745 3746 3747 3748 3749 3750 3751 3752
		}
		ext4_mark_inode_dirty(handle, inode);
		ret2 = ext4_journal_stop(handle);
		if (ret <= 0 || ret2 )
			break;
	}
	return ret > 0 ? ret2 : ret;
}
3753 3754 3755
/*
 * Callback function called for each extent to gather FIEMAP information.
 */
A
Aneesh Kumar K.V 已提交
3756
static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3757 3758 3759 3760
		       struct ext4_ext_cache *newex, struct ext4_extent *ex,
		       void *data)
{
	struct fiemap_extent_info *fieinfo = data;
3761
	unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807
	__u64	logical;
	__u64	physical;
	__u64	length;
	__u32	flags = 0;
	int	error;

	logical =  (__u64)newex->ec_block << blksize_bits;

	if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
		pgoff_t offset;
		struct page *page;
		struct buffer_head *bh = NULL;

		offset = logical >> PAGE_SHIFT;
		page = find_get_page(inode->i_mapping, offset);
		if (!page || !page_has_buffers(page))
			return EXT_CONTINUE;

		bh = page_buffers(page);

		if (!bh)
			return EXT_CONTINUE;

		if (buffer_delay(bh)) {
			flags |= FIEMAP_EXTENT_DELALLOC;
			page_cache_release(page);
		} else {
			page_cache_release(page);
			return EXT_CONTINUE;
		}
	}

	physical = (__u64)newex->ec_start << blksize_bits;
	length =   (__u64)newex->ec_len << blksize_bits;

	if (ex && ext4_ext_is_uninitialized(ex))
		flags |= FIEMAP_EXTENT_UNWRITTEN;

	/*
	 * If this extent reaches EXT_MAX_BLOCK, it must be last.
	 *
	 * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK,
	 * this also indicates no more allocated blocks.
	 *
	 * XXX this might miss a single-block extent at EXT_MAX_BLOCK
	 */
3808
	if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK ||
3809 3810 3811 3812
	    newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK) {
		loff_t size = i_size_read(inode);
		loff_t bs = EXT4_BLOCK_SIZE(inode->i_sb);

3813
		flags |= FIEMAP_EXTENT_LAST;
3814 3815 3816 3817
		if ((flags & FIEMAP_EXTENT_DELALLOC) &&
		    logical+length > size)
			length = (size - logical + bs - 1) & ~(bs-1);
	}
3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831

	error = fiemap_fill_next_extent(fieinfo, logical, physical,
					length, flags);
	if (error < 0)
		return error;
	if (error == 1)
		return EXT_BREAK;

	return EXT_CONTINUE;
}

/* fiemap flags we can handle specified here */
#define EXT4_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)

A
Aneesh Kumar K.V 已提交
3832 3833
static int ext4_xattr_fiemap(struct inode *inode,
				struct fiemap_extent_info *fieinfo)
3834 3835 3836 3837 3838 3839 3840 3841
{
	__u64 physical = 0;
	__u64 length;
	__u32 flags = FIEMAP_EXTENT_LAST;
	int blockbits = inode->i_sb->s_blocksize_bits;
	int error = 0;

	/* in-inode? */
3842
	if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854
		struct ext4_iloc iloc;
		int offset;	/* offset of xattr in inode */

		error = ext4_get_inode_loc(inode, &iloc);
		if (error)
			return error;
		physical = iloc.bh->b_blocknr << blockbits;
		offset = EXT4_GOOD_OLD_INODE_SIZE +
				EXT4_I(inode)->i_extra_isize;
		physical += offset;
		length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
		flags |= FIEMAP_EXTENT_DATA_INLINE;
3855
		brelse(iloc.bh);
3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873
	} else { /* external block */
		physical = EXT4_I(inode)->i_file_acl << blockbits;
		length = inode->i_sb->s_blocksize;
	}

	if (physical)
		error = fiemap_fill_next_extent(fieinfo, 0, physical,
						length, flags);
	return (error < 0 ? error : 0);
}

int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		__u64 start, __u64 len)
{
	ext4_lblk_t start_blk;
	int error = 0;

	/* fallback to generic here if not in extents fmt */
3874
	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
3875 3876 3877 3878 3879 3880 3881 3882 3883
		return generic_block_fiemap(inode, fieinfo, start, len,
			ext4_get_block);

	if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
		return -EBADR;

	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
		error = ext4_xattr_fiemap(inode, fieinfo);
	} else {
3884 3885 3886
		ext4_lblk_t len_blks;
		__u64 last_blk;

3887
		start_blk = start >> inode->i_sb->s_blocksize_bits;
3888 3889 3890 3891
		last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
		if (last_blk >= EXT_MAX_BLOCK)
			last_blk = EXT_MAX_BLOCK-1;
		len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903

		/*
		 * Walk the extent tree gathering extent information.
		 * ext4_ext_fiemap_cb will push extents back to user.
		 */
		error = ext4_ext_walk_space(inode, start_blk, len_blks,
					  ext4_ext_fiemap_cb, fieinfo);
	}

	return error;
}