extents.c 113.2 KB
Newer Older
A
Alex Tomas 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/*
 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
 * Written by Alex Tomas <alex@clusterfs.com>
 *
 * Architecture independence:
 *   Copyright (c) 2005, Bull S.A.
 *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public Licens
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
 */

/*
 * Extents support for EXT4
 *
 * TODO:
 *   - ext4*_error() should be used in some situations
 *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
 *   - smart tree reduction
 */

#include <linux/module.h>
#include <linux/fs.h>
#include <linux/time.h>
35
#include <linux/jbd2.h>
A
Alex Tomas 已提交
36 37 38 39 40
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/slab.h>
A
Amit Arora 已提交
41
#include <linux/falloc.h>
A
Alex Tomas 已提交
42
#include <asm/uaccess.h>
43
#include <linux/fiemap.h>
44 45
#include "ext4_jbd2.h"
#include "ext4_extents.h"
A
Alex Tomas 已提交
46

47 48
#include <trace/events/ext4.h>

A
Allison Henderson 已提交
49 50 51 52 53 54 55
static int ext4_split_extent(handle_t *handle,
				struct inode *inode,
				struct ext4_ext_path *path,
				struct ext4_map_blocks *map,
				int split_flag,
				int flags);

56 57 58
static int ext4_ext_truncate_extend_restart(handle_t *handle,
					    struct inode *inode,
					    int needed)
A
Alex Tomas 已提交
59 60 61
{
	int err;

62 63
	if (!ext4_handle_valid(handle))
		return 0;
A
Alex Tomas 已提交
64
	if (handle->h_buffer_credits > needed)
65 66
		return 0;
	err = ext4_journal_extend(handle, needed);
67
	if (err <= 0)
68
		return err;
69
	err = ext4_truncate_restart_trans(handle, inode, needed);
70 71
	if (err == 0)
		err = -EAGAIN;
72 73

	return err;
A
Alex Tomas 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
}

/*
 * could return:
 *  - EROFS
 *  - ENOMEM
 */
static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path)
{
	if (path->p_bh) {
		/* path points to block */
		return ext4_journal_get_write_access(handle, path->p_bh);
	}
	/* path points to leaf/index in inode body */
	/* we use in-core data, no need to protect them */
	return 0;
}

/*
 * could return:
 *  - EROFS
 *  - ENOMEM
 *  - EIO
 */
static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path)
{
	int err;
	if (path->p_bh) {
		/* path points to block */
105
		err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
A
Alex Tomas 已提交
106 107 108 109 110 111 112
	} else {
		/* path points to leaf/index in inode body */
		err = ext4_mark_inode_dirty(handle, inode);
	}
	return err;
}

113
static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
A
Alex Tomas 已提交
114
			      struct ext4_ext_path *path,
A
Aneesh Kumar K.V 已提交
115
			      ext4_lblk_t block)
A
Alex Tomas 已提交
116 117 118 119 120 121 122
{
	int depth;

	if (path) {
		struct ext4_extent *ex;
		depth = path->p_depth;

123 124 125 126 127 128 129 130 131 132 133 134 135 136
		/*
		 * Try to predict block placement assuming that we are
		 * filling in a file which will eventually be
		 * non-sparse --- i.e., in the case of libbfd writing
		 * an ELF object sections out-of-order but in a way
		 * the eventually results in a contiguous object or
		 * executable file, or some database extending a table
		 * space file.  However, this is actually somewhat
		 * non-ideal if we are writing a sparse file such as
		 * qemu or KVM writing a raw image file that is going
		 * to stay fairly sparse, since it will end up
		 * fragmenting the file system's free space.  Maybe we
		 * should have some hueristics or some way to allow
		 * userspace to pass a hint to file system,
T
Tao Ma 已提交
137
		 * especially if the latter case turns out to be
138 139
		 * common.
		 */
140
		ex = path[depth].p_ext;
141 142 143 144 145 146 147 148 149
		if (ex) {
			ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
			ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);

			if (block > ext_block)
				return ext_pblk + (block - ext_block);
			else
				return ext_pblk - (ext_block - block);
		}
A
Alex Tomas 已提交
150

151 152
		/* it looks like index is empty;
		 * try to find starting block from index itself */
A
Alex Tomas 已提交
153 154 155 156 157
		if (path[depth].p_bh)
			return path[depth].p_bh->b_blocknr;
	}

	/* OK. use inode's group */
158
	return ext4_inode_to_goal_block(inode);
A
Alex Tomas 已提交
159 160
}

A
Aneesh Kumar K.V 已提交
161 162 163
/*
 * Allocation for a meta data block
 */
164
static ext4_fsblk_t
A
Aneesh Kumar K.V 已提交
165
ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
166
			struct ext4_ext_path *path,
167
			struct ext4_extent *ex, int *err, unsigned int flags)
A
Alex Tomas 已提交
168
{
169
	ext4_fsblk_t goal, newblock;
A
Alex Tomas 已提交
170 171

	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
172 173
	newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
					NULL, err);
A
Alex Tomas 已提交
174 175 176
	return newblock;
}

177
static inline int ext4_ext_space_block(struct inode *inode, int check)
A
Alex Tomas 已提交
178 179 180 181 182
{
	int size;

	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
			/ sizeof(struct ext4_extent);
183
	if (!check) {
184
#ifdef AGGRESSIVE_TEST
185 186
		if (size > 6)
			size = 6;
A
Alex Tomas 已提交
187
#endif
188
	}
A
Alex Tomas 已提交
189 190 191
	return size;
}

192
static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
A
Alex Tomas 已提交
193 194 195 196 197
{
	int size;

	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
			/ sizeof(struct ext4_extent_idx);
198
	if (!check) {
199
#ifdef AGGRESSIVE_TEST
200 201
		if (size > 5)
			size = 5;
A
Alex Tomas 已提交
202
#endif
203
	}
A
Alex Tomas 已提交
204 205 206
	return size;
}

207
static inline int ext4_ext_space_root(struct inode *inode, int check)
A
Alex Tomas 已提交
208 209 210 211 212 213
{
	int size;

	size = sizeof(EXT4_I(inode)->i_data);
	size -= sizeof(struct ext4_extent_header);
	size /= sizeof(struct ext4_extent);
214
	if (!check) {
215
#ifdef AGGRESSIVE_TEST
216 217
		if (size > 3)
			size = 3;
A
Alex Tomas 已提交
218
#endif
219
	}
A
Alex Tomas 已提交
220 221 222
	return size;
}

223
static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
A
Alex Tomas 已提交
224 225 226 227 228 229
{
	int size;

	size = sizeof(EXT4_I(inode)->i_data);
	size -= sizeof(struct ext4_extent_header);
	size /= sizeof(struct ext4_extent_idx);
230
	if (!check) {
231
#ifdef AGGRESSIVE_TEST
232 233
		if (size > 4)
			size = 4;
A
Alex Tomas 已提交
234
#endif
235
	}
A
Alex Tomas 已提交
236 237 238
	return size;
}

239 240 241 242 243
/*
 * Calculate the number of metadata blocks needed
 * to allocate @blocks
 * Worse case is one block per extent
 */
244
int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
245
{
246 247
	struct ext4_inode_info *ei = EXT4_I(inode);
	int idxs, num = 0;
248

249 250
	idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
		/ sizeof(struct ext4_extent_idx));
251 252

	/*
253 254 255 256 257 258
	 * If the new delayed allocation block is contiguous with the
	 * previous da block, it can share index blocks with the
	 * previous block, so we only need to allocate a new index
	 * block every idxs leaf blocks.  At ldxs**2 blocks, we need
	 * an additional index block, and at ldxs**3 blocks, yet
	 * another index blocks.
259
	 */
260 261 262 263 264 265 266 267 268 269 270 271 272 273
	if (ei->i_da_metadata_calc_len &&
	    ei->i_da_metadata_calc_last_lblock+1 == lblock) {
		if ((ei->i_da_metadata_calc_len % idxs) == 0)
			num++;
		if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
			num++;
		if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
			num++;
			ei->i_da_metadata_calc_len = 0;
		} else
			ei->i_da_metadata_calc_len++;
		ei->i_da_metadata_calc_last_lblock++;
		return num;
	}
274

275 276 277 278 279 280 281
	/*
	 * In the worst case we need a new set of index blocks at
	 * every level of the inode's extent tree.
	 */
	ei->i_da_metadata_calc_len = 1;
	ei->i_da_metadata_calc_last_lblock = lblock;
	return ext_depth(inode) + 1;
282 283
}

284 285 286 287 288 289 290
static int
ext4_ext_max_entries(struct inode *inode, int depth)
{
	int max;

	if (depth == ext_depth(inode)) {
		if (depth == 0)
291
			max = ext4_ext_space_root(inode, 1);
292
		else
293
			max = ext4_ext_space_root_idx(inode, 1);
294 295
	} else {
		if (depth == 0)
296
			max = ext4_ext_space_block(inode, 1);
297
		else
298
			max = ext4_ext_space_block_idx(inode, 1);
299 300 301 302 303
	}

	return max;
}

304 305
static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
{
306
	ext4_fsblk_t block = ext4_ext_pblock(ext);
307
	int len = ext4_ext_get_actual_len(ext);
308

309
	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
310 311 312 313 314
}

static int ext4_valid_extent_idx(struct inode *inode,
				struct ext4_extent_idx *ext_idx)
{
315
	ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
316

317
	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
}

static int ext4_valid_extent_entries(struct inode *inode,
				struct ext4_extent_header *eh,
				int depth)
{
	struct ext4_extent *ext;
	struct ext4_extent_idx *ext_idx;
	unsigned short entries;
	if (eh->eh_entries == 0)
		return 1;

	entries = le16_to_cpu(eh->eh_entries);

	if (depth == 0) {
		/* leaf entries */
		ext = EXT_FIRST_EXTENT(eh);
		while (entries) {
			if (!ext4_valid_extent(inode, ext))
				return 0;
			ext++;
			entries--;
		}
	} else {
		ext_idx = EXT_FIRST_INDEX(eh);
		while (entries) {
			if (!ext4_valid_extent_idx(inode, ext_idx))
				return 0;
			ext_idx++;
			entries--;
		}
	}
	return 1;
}

353 354 355
static int __ext4_ext_check(const char *function, unsigned int line,
			    struct inode *inode, struct ext4_extent_header *eh,
			    int depth)
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
{
	const char *error_msg;
	int max = 0;

	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
		error_msg = "invalid magic";
		goto corrupted;
	}
	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
		error_msg = "unexpected eh_depth";
		goto corrupted;
	}
	if (unlikely(eh->eh_max == 0)) {
		error_msg = "invalid eh_max";
		goto corrupted;
	}
	max = ext4_ext_max_entries(inode, depth);
	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
		error_msg = "too large eh_max";
		goto corrupted;
	}
	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
		error_msg = "invalid eh_entries";
		goto corrupted;
	}
381 382 383 384
	if (!ext4_valid_extent_entries(inode, eh, depth)) {
		error_msg = "invalid extent entries";
		goto corrupted;
	}
385 386 387
	return 0;

corrupted:
388
	ext4_error_inode(inode, function, line, 0,
389
			"bad header/extent: %s - magic %x, "
390
			"entries %u, max %u(%u), depth %u(%u)",
391
			error_msg, le16_to_cpu(eh->eh_magic),
392 393 394 395 396 397
			le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
			max, le16_to_cpu(eh->eh_depth), depth);

	return -EIO;
}

398
#define ext4_ext_check(inode, eh, depth)	\
399
	__ext4_ext_check(__func__, __LINE__, inode, eh, depth)
400

401 402 403 404 405
int ext4_ext_check_inode(struct inode *inode)
{
	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
}

A
Alex Tomas 已提交
406 407 408 409 410 411 412 413
#ifdef EXT_DEBUG
static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
{
	int k, l = path->p_depth;

	ext_debug("path:");
	for (k = 0; k <= l; k++, path++) {
		if (path->p_idx) {
414
		  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
415
			    ext4_idx_pblock(path->p_idx));
A
Alex Tomas 已提交
416
		} else if (path->p_ext) {
417
			ext_debug("  %d:[%d]%d:%llu ",
A
Alex Tomas 已提交
418
				  le32_to_cpu(path->p_ext->ee_block),
419
				  ext4_ext_is_uninitialized(path->p_ext),
A
Amit Arora 已提交
420
				  ext4_ext_get_actual_len(path->p_ext),
421
				  ext4_ext_pblock(path->p_ext));
A
Alex Tomas 已提交
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
		} else
			ext_debug("  []");
	}
	ext_debug("\n");
}

static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
{
	int depth = ext_depth(inode);
	struct ext4_extent_header *eh;
	struct ext4_extent *ex;
	int i;

	if (!path)
		return;

	eh = path[depth].p_hdr;
	ex = EXT_FIRST_EXTENT(eh);

441 442
	ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);

A
Alex Tomas 已提交
443
	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
444 445
		ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
			  ext4_ext_is_uninitialized(ex),
446
			  ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
A
Alex Tomas 已提交
447 448 449
	}
	ext_debug("\n");
}
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482

static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
			ext4_fsblk_t newblock, int level)
{
	int depth = ext_depth(inode);
	struct ext4_extent *ex;

	if (depth != level) {
		struct ext4_extent_idx *idx;
		idx = path[level].p_idx;
		while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
			ext_debug("%d: move %d:%llu in new index %llu\n", level,
					le32_to_cpu(idx->ei_block),
					ext4_idx_pblock(idx),
					newblock);
			idx++;
		}

		return;
	}

	ex = path[depth].p_ext;
	while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
		ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
				le32_to_cpu(ex->ee_block),
				ext4_ext_pblock(ex),
				ext4_ext_is_uninitialized(ex),
				ext4_ext_get_actual_len(ex),
				newblock);
		ex++;
	}
}

A
Alex Tomas 已提交
483
#else
484 485
#define ext4_ext_show_path(inode, path)
#define ext4_ext_show_leaf(inode, path)
486
#define ext4_ext_show_move(inode, path, newblock, level)
A
Alex Tomas 已提交
487 488
#endif

489
void ext4_ext_drop_refs(struct ext4_ext_path *path)
A
Alex Tomas 已提交
490 491 492 493 494 495 496 497 498 499 500 501
{
	int depth = path->p_depth;
	int i;

	for (i = 0; i <= depth; i++, path++)
		if (path->p_bh) {
			brelse(path->p_bh);
			path->p_bh = NULL;
		}
}

/*
502 503
 * ext4_ext_binsearch_idx:
 * binary search for the closest index of the given block
504
 * the header must be checked before calling this
A
Alex Tomas 已提交
505 506
 */
static void
A
Aneesh Kumar K.V 已提交
507 508
ext4_ext_binsearch_idx(struct inode *inode,
			struct ext4_ext_path *path, ext4_lblk_t block)
A
Alex Tomas 已提交
509 510 511 512 513
{
	struct ext4_extent_header *eh = path->p_hdr;
	struct ext4_extent_idx *r, *l, *m;


514
	ext_debug("binsearch for %u(idx):  ", block);
A
Alex Tomas 已提交
515 516

	l = EXT_FIRST_INDEX(eh) + 1;
D
Dmitry Monakhov 已提交
517
	r = EXT_LAST_INDEX(eh);
A
Alex Tomas 已提交
518 519 520 521 522 523
	while (l <= r) {
		m = l + (r - l) / 2;
		if (block < le32_to_cpu(m->ei_block))
			r = m - 1;
		else
			l = m + 1;
524 525 526
		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
				m, le32_to_cpu(m->ei_block),
				r, le32_to_cpu(r->ei_block));
A
Alex Tomas 已提交
527 528 529
	}

	path->p_idx = l - 1;
530
	ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
531
		  ext4_idx_pblock(path->p_idx));
A
Alex Tomas 已提交
532 533 534 535 536 537 538 539 540 541

#ifdef CHECK_BINSEARCH
	{
		struct ext4_extent_idx *chix, *ix;
		int k;

		chix = ix = EXT_FIRST_INDEX(eh);
		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
		  if (k != 0 &&
		      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
542 543 544 545
				printk(KERN_DEBUG "k=%d, ix=0x%p, "
				       "first=0x%p\n", k,
				       ix, EXT_FIRST_INDEX(eh));
				printk(KERN_DEBUG "%u <= %u\n",
A
Alex Tomas 已提交
546 547 548 549
				       le32_to_cpu(ix->ei_block),
				       le32_to_cpu(ix[-1].ei_block));
			}
			BUG_ON(k && le32_to_cpu(ix->ei_block)
D
Dave Kleikamp 已提交
550
					   <= le32_to_cpu(ix[-1].ei_block));
A
Alex Tomas 已提交
551 552 553 554 555 556 557 558 559 560 561
			if (block < le32_to_cpu(ix->ei_block))
				break;
			chix = ix;
		}
		BUG_ON(chix != path->p_idx);
	}
#endif

}

/*
562 563
 * ext4_ext_binsearch:
 * binary search for closest extent of the given block
564
 * the header must be checked before calling this
A
Alex Tomas 已提交
565 566
 */
static void
A
Aneesh Kumar K.V 已提交
567 568
ext4_ext_binsearch(struct inode *inode,
		struct ext4_ext_path *path, ext4_lblk_t block)
A
Alex Tomas 已提交
569 570 571 572 573 574
{
	struct ext4_extent_header *eh = path->p_hdr;
	struct ext4_extent *r, *l, *m;

	if (eh->eh_entries == 0) {
		/*
575 576
		 * this leaf is empty:
		 * we get such a leaf in split/add case
A
Alex Tomas 已提交
577 578 579 580
		 */
		return;
	}

581
	ext_debug("binsearch for %u:  ", block);
A
Alex Tomas 已提交
582 583

	l = EXT_FIRST_EXTENT(eh) + 1;
D
Dmitry Monakhov 已提交
584
	r = EXT_LAST_EXTENT(eh);
A
Alex Tomas 已提交
585 586 587 588 589 590 591

	while (l <= r) {
		m = l + (r - l) / 2;
		if (block < le32_to_cpu(m->ee_block))
			r = m - 1;
		else
			l = m + 1;
592 593 594
		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
				m, le32_to_cpu(m->ee_block),
				r, le32_to_cpu(r->ee_block));
A
Alex Tomas 已提交
595 596 597
	}

	path->p_ext = l - 1;
598
	ext_debug("  -> %d:%llu:[%d]%d ",
D
Dave Kleikamp 已提交
599
			le32_to_cpu(path->p_ext->ee_block),
600
			ext4_ext_pblock(path->p_ext),
601
			ext4_ext_is_uninitialized(path->p_ext),
A
Amit Arora 已提交
602
			ext4_ext_get_actual_len(path->p_ext));
A
Alex Tomas 已提交
603 604 605 606 607 608 609 610 611

#ifdef CHECK_BINSEARCH
	{
		struct ext4_extent *chex, *ex;
		int k;

		chex = ex = EXT_FIRST_EXTENT(eh);
		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
			BUG_ON(k && le32_to_cpu(ex->ee_block)
D
Dave Kleikamp 已提交
612
					  <= le32_to_cpu(ex[-1].ee_block));
A
Alex Tomas 已提交
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
			if (block < le32_to_cpu(ex->ee_block))
				break;
			chex = ex;
		}
		BUG_ON(chex != path->p_ext);
	}
#endif

}

int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
{
	struct ext4_extent_header *eh;

	eh = ext_inode_hdr(inode);
	eh->eh_depth = 0;
	eh->eh_entries = 0;
	eh->eh_magic = EXT4_EXT_MAGIC;
631
	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
A
Alex Tomas 已提交
632 633 634 635 636 637
	ext4_mark_inode_dirty(handle, inode);
	ext4_ext_invalidate_cache(inode);
	return 0;
}

struct ext4_ext_path *
A
Aneesh Kumar K.V 已提交
638 639
ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
					struct ext4_ext_path *path)
A
Alex Tomas 已提交
640 641 642 643 644 645
{
	struct ext4_extent_header *eh;
	struct buffer_head *bh;
	short int depth, i, ppos = 0, alloc = 0;

	eh = ext_inode_hdr(inode);
646
	depth = ext_depth(inode);
A
Alex Tomas 已提交
647 648 649

	/* account possible depth increase */
	if (!path) {
A
Avantika Mathur 已提交
650
		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
A
Alex Tomas 已提交
651 652 653 654 655 656
				GFP_NOFS);
		if (!path)
			return ERR_PTR(-ENOMEM);
		alloc = 1;
	}
	path[0].p_hdr = eh;
657
	path[0].p_bh = NULL;
A
Alex Tomas 已提交
658

659
	i = depth;
A
Alex Tomas 已提交
660 661
	/* walk through the tree */
	while (i) {
662 663
		int need_to_validate = 0;

A
Alex Tomas 已提交
664 665
		ext_debug("depth %d: num %d, max %d\n",
			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
666

A
Alex Tomas 已提交
667
		ext4_ext_binsearch_idx(inode, path + ppos, block);
668
		path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
A
Alex Tomas 已提交
669 670 671
		path[ppos].p_depth = i;
		path[ppos].p_ext = NULL;

672 673
		bh = sb_getblk(inode->i_sb, path[ppos].p_block);
		if (unlikely(!bh))
A
Alex Tomas 已提交
674
			goto err;
675
		if (!bh_uptodate_or_lock(bh)) {
676 677
			trace_ext4_ext_load_extent(inode, block,
						path[ppos].p_block);
678 679 680 681 682 683 684
			if (bh_submit_read(bh) < 0) {
				put_bh(bh);
				goto err;
			}
			/* validate the extent entries */
			need_to_validate = 1;
		}
A
Alex Tomas 已提交
685 686
		eh = ext_block_hdr(bh);
		ppos++;
687 688 689 690 691 692
		if (unlikely(ppos > depth)) {
			put_bh(bh);
			EXT4_ERROR_INODE(inode,
					 "ppos %d > depth %d", ppos, depth);
			goto err;
		}
A
Alex Tomas 已提交
693 694 695 696
		path[ppos].p_bh = bh;
		path[ppos].p_hdr = eh;
		i--;

697
		if (need_to_validate && ext4_ext_check(inode, eh, i))
A
Alex Tomas 已提交
698 699 700 701 702 703 704 705 706
			goto err;
	}

	path[ppos].p_depth = i;
	path[ppos].p_ext = NULL;
	path[ppos].p_idx = NULL;

	/* find extent */
	ext4_ext_binsearch(inode, path + ppos, block);
707 708
	/* if not an empty leaf */
	if (path[ppos].p_ext)
709
		path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
A
Alex Tomas 已提交
710 711 712 713 714 715 716 717 718 719 720 721 722

	ext4_ext_show_path(inode, path);

	return path;

err:
	ext4_ext_drop_refs(path);
	if (alloc)
		kfree(path);
	return ERR_PTR(-EIO);
}

/*
723 724 725
 * ext4_ext_insert_index:
 * insert new index [@logical;@ptr] into the block at @curp;
 * check where to insert: before @curp or after @curp
A
Alex Tomas 已提交
726
 */
727 728 729
static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
				 struct ext4_ext_path *curp,
				 int logical, ext4_fsblk_t ptr)
A
Alex Tomas 已提交
730 731 732 733
{
	struct ext4_extent_idx *ix;
	int len, err;

734 735
	err = ext4_ext_get_access(handle, inode, curp);
	if (err)
A
Alex Tomas 已提交
736 737
		return err;

738 739 740 741 742 743
	if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
		EXT4_ERROR_INODE(inode,
				 "logical %d == ei_block %d!",
				 logical, le32_to_cpu(curp->p_idx->ei_block));
		return -EIO;
	}
744 745 746 747 748 749 750 751 752 753

	if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
			     >= le16_to_cpu(curp->p_hdr->eh_max))) {
		EXT4_ERROR_INODE(inode,
				 "eh_entries %d >= eh_max %d!",
				 le16_to_cpu(curp->p_hdr->eh_entries),
				 le16_to_cpu(curp->p_hdr->eh_max));
		return -EIO;
	}

A
Alex Tomas 已提交
754 755 756 757 758 759
	len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
		/* insert after */
		if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
			len = (len - 1) * sizeof(struct ext4_extent_idx);
			len = len < 0 ? 0 : len;
760
			ext_debug("insert new index %d after: %llu. "
A
Alex Tomas 已提交
761 762 763 764 765 766 767 768 769 770
					"move %d from 0x%p to 0x%p\n",
					logical, ptr, len,
					(curp->p_idx + 1), (curp->p_idx + 2));
			memmove(curp->p_idx + 2, curp->p_idx + 1, len);
		}
		ix = curp->p_idx + 1;
	} else {
		/* insert before */
		len = len * sizeof(struct ext4_extent_idx);
		len = len < 0 ? 0 : len;
771
		ext_debug("insert new index %d before: %llu. "
A
Alex Tomas 已提交
772 773 774 775 776 777 778 779
				"move %d from 0x%p to 0x%p\n",
				logical, ptr, len,
				curp->p_idx, (curp->p_idx + 1));
		memmove(curp->p_idx + 1, curp->p_idx, len);
		ix = curp->p_idx;
	}

	ix->ei_block = cpu_to_le32(logical);
780
	ext4_idx_store_pblock(ix, ptr);
M
Marcin Slusarz 已提交
781
	le16_add_cpu(&curp->p_hdr->eh_entries, 1);
A
Alex Tomas 已提交
782

783 784 785 786
	if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
		EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
		return -EIO;
	}
A
Alex Tomas 已提交
787 788 789 790 791 792 793 794

	err = ext4_ext_dirty(handle, inode, curp);
	ext4_std_error(inode->i_sb, err);

	return err;
}

/*
795 796 797 798 799 800 801 802
 * ext4_ext_split:
 * inserts new subtree into the path, using free index entry
 * at depth @at:
 * - allocates all needed blocks (new leaf and all intermediate index blocks)
 * - makes decision where to split
 * - moves remaining extents and index entries (right to the split point)
 *   into the newly allocated blocks
 * - initializes subtree
A
Alex Tomas 已提交
803 804
 */
static int ext4_ext_split(handle_t *handle, struct inode *inode,
805 806 807
			  unsigned int flags,
			  struct ext4_ext_path *path,
			  struct ext4_extent *newext, int at)
A
Alex Tomas 已提交
808 809 810 811 812 813
{
	struct buffer_head *bh = NULL;
	int depth = ext_depth(inode);
	struct ext4_extent_header *neh;
	struct ext4_extent_idx *fidx;
	int i = at, k, m, a;
814
	ext4_fsblk_t newblock, oldblock;
A
Alex Tomas 已提交
815
	__le32 border;
816
	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
A
Alex Tomas 已提交
817 818 819
	int err = 0;

	/* make decision: where to split? */
820
	/* FIXME: now decision is simplest: at current extent */
A
Alex Tomas 已提交
821

822
	/* if current leaf will be split, then we should use
A
Alex Tomas 已提交
823
	 * border from split point */
824 825 826 827
	if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
		EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
		return -EIO;
	}
A
Alex Tomas 已提交
828 829
	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
		border = path[depth].p_ext[1].ee_block;
830
		ext_debug("leaf will be split."
A
Alex Tomas 已提交
831
				" next leaf starts at %d\n",
D
Dave Kleikamp 已提交
832
				  le32_to_cpu(border));
A
Alex Tomas 已提交
833 834 835 836
	} else {
		border = newext->ee_block;
		ext_debug("leaf will be added."
				" next leaf starts at %d\n",
D
Dave Kleikamp 已提交
837
				le32_to_cpu(border));
A
Alex Tomas 已提交
838 839 840
	}

	/*
841 842
	 * If error occurs, then we break processing
	 * and mark filesystem read-only. index won't
A
Alex Tomas 已提交
843
	 * be inserted and tree will be in consistent
844
	 * state. Next mount will repair buffers too.
A
Alex Tomas 已提交
845 846 847
	 */

	/*
848 849 850
	 * Get array to track all allocated blocks.
	 * We need this to handle errors and free blocks
	 * upon them.
A
Alex Tomas 已提交
851
	 */
A
Avantika Mathur 已提交
852
	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
A
Alex Tomas 已提交
853 854 855 856 857 858
	if (!ablocks)
		return -ENOMEM;

	/* allocate all needed blocks */
	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
	for (a = 0; a < depth - at; a++) {
A
Aneesh Kumar K.V 已提交
859
		newblock = ext4_ext_new_meta_block(handle, inode, path,
860
						   newext, &err, flags);
A
Alex Tomas 已提交
861 862 863 864 865 866 867
		if (newblock == 0)
			goto cleanup;
		ablocks[a] = newblock;
	}

	/* initialize new leaf */
	newblock = ablocks[--a];
868 869 870 871 872
	if (unlikely(newblock == 0)) {
		EXT4_ERROR_INODE(inode, "newblock == 0!");
		err = -EIO;
		goto cleanup;
	}
A
Alex Tomas 已提交
873 874 875 876 877 878 879
	bh = sb_getblk(inode->i_sb, newblock);
	if (!bh) {
		err = -EIO;
		goto cleanup;
	}
	lock_buffer(bh);

880 881
	err = ext4_journal_get_create_access(handle, bh);
	if (err)
A
Alex Tomas 已提交
882 883 884 885
		goto cleanup;

	neh = ext_block_hdr(bh);
	neh->eh_entries = 0;
886
	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
A
Alex Tomas 已提交
887 888 889
	neh->eh_magic = EXT4_EXT_MAGIC;
	neh->eh_depth = 0;

890
	/* move remainder of path[depth] to the new leaf */
891 892 893 894 895 896 897 898
	if (unlikely(path[depth].p_hdr->eh_entries !=
		     path[depth].p_hdr->eh_max)) {
		EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
				 path[depth].p_hdr->eh_entries,
				 path[depth].p_hdr->eh_max);
		err = -EIO;
		goto cleanup;
	}
A
Alex Tomas 已提交
899
	/* start copy from next extent */
900 901
	m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
	ext4_ext_show_move(inode, path, newblock, depth);
A
Alex Tomas 已提交
902
	if (m) {
903 904 905
		struct ext4_extent *ex;
		ex = EXT_FIRST_EXTENT(neh);
		memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
M
Marcin Slusarz 已提交
906
		le16_add_cpu(&neh->eh_entries, m);
A
Alex Tomas 已提交
907 908 909 910 911
	}

	set_buffer_uptodate(bh);
	unlock_buffer(bh);

912
	err = ext4_handle_dirty_metadata(handle, inode, bh);
913
	if (err)
A
Alex Tomas 已提交
914 915 916 917 918 919
		goto cleanup;
	brelse(bh);
	bh = NULL;

	/* correct old leaf */
	if (m) {
920 921
		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
A
Alex Tomas 已提交
922
			goto cleanup;
M
Marcin Slusarz 已提交
923
		le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
924 925
		err = ext4_ext_dirty(handle, inode, path + depth);
		if (err)
A
Alex Tomas 已提交
926 927 928 929 930 931
			goto cleanup;

	}

	/* create intermediate indexes */
	k = depth - at - 1;
932 933 934 935 936
	if (unlikely(k < 0)) {
		EXT4_ERROR_INODE(inode, "k %d < 0!", k);
		err = -EIO;
		goto cleanup;
	}
A
Alex Tomas 已提交
937 938 939 940 941 942 943 944
	if (k)
		ext_debug("create %d intermediate indices\n", k);
	/* insert new index into current index block */
	/* current depth stored in i var */
	i = depth - 1;
	while (k--) {
		oldblock = newblock;
		newblock = ablocks[--a];
945
		bh = sb_getblk(inode->i_sb, newblock);
A
Alex Tomas 已提交
946 947 948 949 950 951
		if (!bh) {
			err = -EIO;
			goto cleanup;
		}
		lock_buffer(bh);

952 953
		err = ext4_journal_get_create_access(handle, bh);
		if (err)
A
Alex Tomas 已提交
954 955 956 957 958
			goto cleanup;

		neh = ext_block_hdr(bh);
		neh->eh_entries = cpu_to_le16(1);
		neh->eh_magic = EXT4_EXT_MAGIC;
959
		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
A
Alex Tomas 已提交
960 961 962
		neh->eh_depth = cpu_to_le16(depth - i);
		fidx = EXT_FIRST_INDEX(neh);
		fidx->ei_block = border;
963
		ext4_idx_store_pblock(fidx, oldblock);
A
Alex Tomas 已提交
964

965 966
		ext_debug("int.index at %d (block %llu): %u -> %llu\n",
				i, newblock, le32_to_cpu(border), oldblock);
A
Alex Tomas 已提交
967

968
		/* move remainder of path[i] to the new index block */
969 970 971 972 973 974 975 976
		if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
					EXT_LAST_INDEX(path[i].p_hdr))) {
			EXT4_ERROR_INODE(inode,
					 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
					 le32_to_cpu(path[i].p_ext->ee_block));
			err = -EIO;
			goto cleanup;
		}
977 978 979 980 981
		/* start copy indexes */
		m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
		ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
				EXT_MAX_INDEX(path[i].p_hdr));
		ext4_ext_show_move(inode, path, newblock, i);
A
Alex Tomas 已提交
982
		if (m) {
983
			memmove(++fidx, path[i].p_idx,
A
Alex Tomas 已提交
984
				sizeof(struct ext4_extent_idx) * m);
M
Marcin Slusarz 已提交
985
			le16_add_cpu(&neh->eh_entries, m);
A
Alex Tomas 已提交
986 987 988 989
		}
		set_buffer_uptodate(bh);
		unlock_buffer(bh);

990
		err = ext4_handle_dirty_metadata(handle, inode, bh);
991
		if (err)
A
Alex Tomas 已提交
992 993 994 995 996 997 998 999 1000
			goto cleanup;
		brelse(bh);
		bh = NULL;

		/* correct old index */
		if (m) {
			err = ext4_ext_get_access(handle, inode, path + i);
			if (err)
				goto cleanup;
M
Marcin Slusarz 已提交
1001
			le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
A
Alex Tomas 已提交
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
			err = ext4_ext_dirty(handle, inode, path + i);
			if (err)
				goto cleanup;
		}

		i--;
	}

	/* insert new index */
	err = ext4_ext_insert_index(handle, inode, path + at,
				    le32_to_cpu(border), newblock);

cleanup:
	if (bh) {
		if (buffer_locked(bh))
			unlock_buffer(bh);
		brelse(bh);
	}

	if (err) {
		/* free all allocated blocks in error case */
		for (i = 0; i < depth; i++) {
			if (!ablocks[i])
				continue;
1026
			ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1027
					 EXT4_FREE_BLOCKS_METADATA);
A
Alex Tomas 已提交
1028 1029 1030 1031 1032 1033 1034 1035
		}
	}
	kfree(ablocks);

	return err;
}

/*
1036 1037 1038 1039 1040 1041
 * ext4_ext_grow_indepth:
 * implements tree growing procedure:
 * - allocates new block
 * - moves top-level data (index block or leaf) into the new block
 * - initializes new top-level, creating index that points to the
 *   just created block
A
Alex Tomas 已提交
1042 1043
 */
static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1044 1045 1046
				 unsigned int flags,
				 struct ext4_ext_path *path,
				 struct ext4_extent *newext)
A
Alex Tomas 已提交
1047 1048 1049 1050
{
	struct ext4_ext_path *curp = path;
	struct ext4_extent_header *neh;
	struct buffer_head *bh;
1051
	ext4_fsblk_t newblock;
A
Alex Tomas 已提交
1052 1053
	int err = 0;

1054 1055
	newblock = ext4_ext_new_meta_block(handle, inode, path,
		newext, &err, flags);
A
Alex Tomas 已提交
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
	if (newblock == 0)
		return err;

	bh = sb_getblk(inode->i_sb, newblock);
	if (!bh) {
		err = -EIO;
		ext4_std_error(inode->i_sb, err);
		return err;
	}
	lock_buffer(bh);

1067 1068
	err = ext4_journal_get_create_access(handle, bh);
	if (err) {
A
Alex Tomas 已提交
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
		unlock_buffer(bh);
		goto out;
	}

	/* move top-level index/leaf into new block */
	memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));

	/* set size of new block */
	neh = ext_block_hdr(bh);
	/* old root could have indexes or leaves
	 * so calculate e_max right way */
	if (ext_depth(inode))
1081
		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
A
Alex Tomas 已提交
1082
	else
1083
		neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
A
Alex Tomas 已提交
1084 1085 1086 1087
	neh->eh_magic = EXT4_EXT_MAGIC;
	set_buffer_uptodate(bh);
	unlock_buffer(bh);

1088
	err = ext4_handle_dirty_metadata(handle, inode, bh);
1089
	if (err)
A
Alex Tomas 已提交
1090 1091 1092
		goto out;

	/* create index in new top-level index: num,max,pointer */
1093 1094
	err = ext4_ext_get_access(handle, inode, curp);
	if (err)
A
Alex Tomas 已提交
1095 1096 1097
		goto out;

	curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
1098
	curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
A
Alex Tomas 已提交
1099 1100
	curp->p_hdr->eh_entries = cpu_to_le16(1);
	curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
D
Dmitry Monakhov 已提交
1101 1102 1103 1104 1105 1106 1107

	if (path[0].p_hdr->eh_depth)
		curp->p_idx->ei_block =
			EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
	else
		curp->p_idx->ei_block =
			EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
1108
	ext4_idx_store_pblock(curp->p_idx, newblock);
A
Alex Tomas 已提交
1109 1110

	neh = ext_inode_hdr(inode);
1111
	ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
A
Alex Tomas 已提交
1112
		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1113
		  le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1114
		  ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
A
Alex Tomas 已提交
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124

	neh->eh_depth = cpu_to_le16(path->p_depth + 1);
	err = ext4_ext_dirty(handle, inode, curp);
out:
	brelse(bh);

	return err;
}

/*
1125 1126 1127
 * ext4_ext_create_new_leaf:
 * finds empty index and adds new leaf.
 * if no free index is found, then it requests in-depth growing.
A
Alex Tomas 已提交
1128 1129
 */
static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1130 1131 1132
				    unsigned int flags,
				    struct ext4_ext_path *path,
				    struct ext4_extent *newext)
A
Alex Tomas 已提交
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
{
	struct ext4_ext_path *curp;
	int depth, i, err = 0;

repeat:
	i = depth = ext_depth(inode);

	/* walk up to the tree and look for free index entry */
	curp = path + depth;
	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
		i--;
		curp--;
	}

1147 1148
	/* we use already allocated block for index block,
	 * so subsequent data blocks should be contiguous */
A
Alex Tomas 已提交
1149 1150 1151
	if (EXT_HAS_FREE_INDEX(curp)) {
		/* if we found index with free entry, then use that
		 * entry: create all needed subtree and add new leaf */
1152
		err = ext4_ext_split(handle, inode, flags, path, newext, i);
1153 1154
		if (err)
			goto out;
A
Alex Tomas 已提交
1155 1156 1157 1158

		/* refill path */
		ext4_ext_drop_refs(path);
		path = ext4_ext_find_extent(inode,
A
Aneesh Kumar K.V 已提交
1159 1160
				    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
				    path);
A
Alex Tomas 已提交
1161 1162 1163 1164
		if (IS_ERR(path))
			err = PTR_ERR(path);
	} else {
		/* tree is full, time to grow in depth */
1165 1166
		err = ext4_ext_grow_indepth(handle, inode, flags,
					    path, newext);
A
Alex Tomas 已提交
1167 1168 1169 1170 1171 1172
		if (err)
			goto out;

		/* refill path */
		ext4_ext_drop_refs(path);
		path = ext4_ext_find_extent(inode,
A
Aneesh Kumar K.V 已提交
1173 1174
				   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
				    path);
A
Alex Tomas 已提交
1175 1176 1177 1178 1179 1180
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			goto out;
		}

		/*
1181 1182
		 * only first (depth 0 -> 1) produces free space;
		 * in all other cases we have to split the grown tree
A
Alex Tomas 已提交
1183 1184 1185
		 */
		depth = ext_depth(inode);
		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1186
			/* now we need to split */
A
Alex Tomas 已提交
1187 1188 1189 1190 1191 1192 1193 1194
			goto repeat;
		}
	}

out:
	return err;
}

1195 1196 1197 1198 1199 1200 1201
/*
 * search the closest allocated block to the left for *logical
 * and returns it at @logical + it's physical address at @phys
 * if *logical is the smallest allocated block, the function
 * returns 0 at @phys
 * return value contains 0 (success) or error code
 */
1202 1203 1204
static int ext4_ext_search_left(struct inode *inode,
				struct ext4_ext_path *path,
				ext4_lblk_t *logical, ext4_fsblk_t *phys)
1205 1206 1207
{
	struct ext4_extent_idx *ix;
	struct ext4_extent *ex;
1208
	int depth, ee_len;
1209

1210 1211 1212 1213
	if (unlikely(path == NULL)) {
		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
		return -EIO;
	}
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
	depth = path->p_depth;
	*phys = 0;

	if (depth == 0 && path->p_ext == NULL)
		return 0;

	/* usually extent in the path covers blocks smaller
	 * then *logical, but it can be that extent is the
	 * first one in the file */

	ex = path[depth].p_ext;
1225
	ee_len = ext4_ext_get_actual_len(ex);
1226
	if (*logical < le32_to_cpu(ex->ee_block)) {
1227 1228 1229 1230 1231 1232
		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
			EXT4_ERROR_INODE(inode,
					 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
					 *logical, le32_to_cpu(ex->ee_block));
			return -EIO;
		}
1233 1234
		while (--depth >= 0) {
			ix = path[depth].p_idx;
1235 1236 1237 1238 1239 1240 1241 1242 1243
			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
				EXT4_ERROR_INODE(inode,
				  "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
				  ix != NULL ? ix->ei_block : 0,
				  EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
				    EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block : 0,
				  depth);
				return -EIO;
			}
1244 1245 1246 1247
		}
		return 0;
	}

1248 1249 1250 1251 1252 1253
	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
		EXT4_ERROR_INODE(inode,
				 "logical %d < ee_block %d + ee_len %d!",
				 *logical, le32_to_cpu(ex->ee_block), ee_len);
		return -EIO;
	}
1254

1255
	*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1256
	*phys = ext4_ext_pblock(ex) + ee_len - 1;
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
	return 0;
}

/*
 * search the closest allocated block to the right for *logical
 * and returns it at @logical + it's physical address at @phys
 * if *logical is the smallest allocated block, the function
 * returns 0 at @phys
 * return value contains 0 (success) or error code
 */
1267 1268 1269
static int ext4_ext_search_right(struct inode *inode,
				 struct ext4_ext_path *path,
				 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1270 1271 1272 1273 1274 1275
{
	struct buffer_head *bh = NULL;
	struct ext4_extent_header *eh;
	struct ext4_extent_idx *ix;
	struct ext4_extent *ex;
	ext4_fsblk_t block;
1276 1277
	int depth;	/* Note, NOT eh_depth; depth from top of tree */
	int ee_len;
1278

1279 1280 1281 1282
	if (unlikely(path == NULL)) {
		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
		return -EIO;
	}
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
	depth = path->p_depth;
	*phys = 0;

	if (depth == 0 && path->p_ext == NULL)
		return 0;

	/* usually extent in the path covers blocks smaller
	 * then *logical, but it can be that extent is the
	 * first one in the file */

	ex = path[depth].p_ext;
1294
	ee_len = ext4_ext_get_actual_len(ex);
1295
	if (*logical < le32_to_cpu(ex->ee_block)) {
1296 1297 1298 1299 1300 1301
		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
			EXT4_ERROR_INODE(inode,
					 "first_extent(path[%d].p_hdr) != ex",
					 depth);
			return -EIO;
		}
1302 1303
		while (--depth >= 0) {
			ix = path[depth].p_idx;
1304 1305 1306 1307 1308 1309
			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
				EXT4_ERROR_INODE(inode,
						 "ix != EXT_FIRST_INDEX *logical %d!",
						 *logical);
				return -EIO;
			}
1310 1311
		}
		*logical = le32_to_cpu(ex->ee_block);
1312
		*phys = ext4_ext_pblock(ex);
1313 1314 1315
		return 0;
	}

1316 1317 1318 1319 1320 1321
	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
		EXT4_ERROR_INODE(inode,
				 "logical %d < ee_block %d + ee_len %d!",
				 *logical, le32_to_cpu(ex->ee_block), ee_len);
		return -EIO;
	}
1322 1323 1324 1325 1326

	if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
		/* next allocated block in this leaf */
		ex++;
		*logical = le32_to_cpu(ex->ee_block);
1327
		*phys = ext4_ext_pblock(ex);
1328 1329 1330 1331 1332 1333 1334
		return 0;
	}

	/* go up and search for index to the right */
	while (--depth >= 0) {
		ix = path[depth].p_idx;
		if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
W
Wu Fengguang 已提交
1335
			goto got_index;
1336 1337
	}

W
Wu Fengguang 已提交
1338 1339
	/* we've gone up to the root and found no index to the right */
	return 0;
1340

W
Wu Fengguang 已提交
1341
got_index:
1342 1343 1344 1345
	/* we've found index to the right, let's
	 * follow it and find the closest allocated
	 * block to the right */
	ix++;
1346
	block = ext4_idx_pblock(ix);
1347 1348 1349 1350 1351
	while (++depth < path->p_depth) {
		bh = sb_bread(inode->i_sb, block);
		if (bh == NULL)
			return -EIO;
		eh = ext_block_hdr(bh);
1352
		/* subtract from p_depth to get proper eh_depth */
1353
		if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1354 1355 1356 1357
			put_bh(bh);
			return -EIO;
		}
		ix = EXT_FIRST_INDEX(eh);
1358
		block = ext4_idx_pblock(ix);
1359 1360 1361 1362 1363 1364 1365
		put_bh(bh);
	}

	bh = sb_bread(inode->i_sb, block);
	if (bh == NULL)
		return -EIO;
	eh = ext_block_hdr(bh);
1366
	if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1367 1368 1369 1370 1371
		put_bh(bh);
		return -EIO;
	}
	ex = EXT_FIRST_EXTENT(eh);
	*logical = le32_to_cpu(ex->ee_block);
1372
	*phys = ext4_ext_pblock(ex);
1373 1374 1375 1376
	put_bh(bh);
	return 0;
}

A
Alex Tomas 已提交
1377
/*
1378
 * ext4_ext_next_allocated_block:
1379
 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1380 1381 1382
 * NOTE: it considers block number from index entry as
 * allocated block. Thus, index entries have to be consistent
 * with leaves.
A
Alex Tomas 已提交
1383
 */
A
Aneesh Kumar K.V 已提交
1384
static ext4_lblk_t
A
Alex Tomas 已提交
1385 1386 1387 1388 1389 1390 1391 1392
ext4_ext_next_allocated_block(struct ext4_ext_path *path)
{
	int depth;

	BUG_ON(path == NULL);
	depth = path->p_depth;

	if (depth == 0 && path->p_ext == NULL)
1393
		return EXT_MAX_BLOCKS;
A
Alex Tomas 已提交
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409

	while (depth >= 0) {
		if (depth == path->p_depth) {
			/* leaf */
			if (path[depth].p_ext !=
					EXT_LAST_EXTENT(path[depth].p_hdr))
			  return le32_to_cpu(path[depth].p_ext[1].ee_block);
		} else {
			/* index */
			if (path[depth].p_idx !=
					EXT_LAST_INDEX(path[depth].p_hdr))
			  return le32_to_cpu(path[depth].p_idx[1].ei_block);
		}
		depth--;
	}

1410
	return EXT_MAX_BLOCKS;
A
Alex Tomas 已提交
1411 1412 1413
}

/*
1414
 * ext4_ext_next_leaf_block:
1415
 * returns first allocated block from next leaf or EXT_MAX_BLOCKS
A
Alex Tomas 已提交
1416
 */
1417
static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
A
Alex Tomas 已提交
1418 1419 1420 1421 1422 1423 1424 1425
{
	int depth;

	BUG_ON(path == NULL);
	depth = path->p_depth;

	/* zero-tree has no leaf blocks at all */
	if (depth == 0)
1426
		return EXT_MAX_BLOCKS;
A
Alex Tomas 已提交
1427 1428 1429 1430 1431 1432 1433

	/* go to index block */
	depth--;

	while (depth >= 0) {
		if (path[depth].p_idx !=
				EXT_LAST_INDEX(path[depth].p_hdr))
A
Aneesh Kumar K.V 已提交
1434 1435
			return (ext4_lblk_t)
				le32_to_cpu(path[depth].p_idx[1].ei_block);
A
Alex Tomas 已提交
1436 1437 1438
		depth--;
	}

1439
	return EXT_MAX_BLOCKS;
A
Alex Tomas 已提交
1440 1441 1442
}

/*
1443 1444 1445
 * ext4_ext_correct_indexes:
 * if leaf gets modified and modified extent is first in the leaf,
 * then we have to correct all indexes above.
A
Alex Tomas 已提交
1446 1447
 * TODO: do we need to correct tree in all cases?
 */
A
Aneesh Kumar K.V 已提交
1448
static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
				struct ext4_ext_path *path)
{
	struct ext4_extent_header *eh;
	int depth = ext_depth(inode);
	struct ext4_extent *ex;
	__le32 border;
	int k, err = 0;

	eh = path[depth].p_hdr;
	ex = path[depth].p_ext;
1459 1460 1461 1462 1463 1464

	if (unlikely(ex == NULL || eh == NULL)) {
		EXT4_ERROR_INODE(inode,
				 "ex %p == NULL or eh %p == NULL", ex, eh);
		return -EIO;
	}
A
Alex Tomas 已提交
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476

	if (depth == 0) {
		/* there is no tree at all */
		return 0;
	}

	if (ex != EXT_FIRST_EXTENT(eh)) {
		/* we correct tree if first leaf got modified only */
		return 0;
	}

	/*
1477
	 * TODO: we need correction if border is smaller than current one
A
Alex Tomas 已提交
1478 1479 1480
	 */
	k = depth - 1;
	border = path[depth].p_ext->ee_block;
1481 1482
	err = ext4_ext_get_access(handle, inode, path + k);
	if (err)
A
Alex Tomas 已提交
1483 1484
		return err;
	path[k].p_idx->ei_block = border;
1485 1486
	err = ext4_ext_dirty(handle, inode, path + k);
	if (err)
A
Alex Tomas 已提交
1487 1488 1489 1490 1491 1492
		return err;

	while (k--) {
		/* change all left-side indexes */
		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
			break;
1493 1494
		err = ext4_ext_get_access(handle, inode, path + k);
		if (err)
A
Alex Tomas 已提交
1495 1496
			break;
		path[k].p_idx->ei_block = border;
1497 1498
		err = ext4_ext_dirty(handle, inode, path + k);
		if (err)
A
Alex Tomas 已提交
1499 1500 1501 1502 1503 1504
			break;
	}

	return err;
}

1505
int
A
Alex Tomas 已提交
1506 1507 1508
ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
				struct ext4_extent *ex2)
{
1509
	unsigned short ext1_ee_len, ext2_ee_len, max_len;
A
Amit Arora 已提交
1510 1511 1512 1513 1514 1515 1516 1517

	/*
	 * Make sure that either both extents are uninitialized, or
	 * both are _not_.
	 */
	if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
		return 0;

1518 1519 1520 1521 1522
	if (ext4_ext_is_uninitialized(ex1))
		max_len = EXT_UNINIT_MAX_LEN;
	else
		max_len = EXT_INIT_MAX_LEN;

A
Amit Arora 已提交
1523 1524 1525 1526
	ext1_ee_len = ext4_ext_get_actual_len(ex1);
	ext2_ee_len = ext4_ext_get_actual_len(ex2);

	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
A
Andrew Morton 已提交
1527
			le32_to_cpu(ex2->ee_block))
A
Alex Tomas 已提交
1528 1529
		return 0;

1530 1531 1532
	/*
	 * To allow future support for preallocated extents to be added
	 * as an RO_COMPAT feature, refuse to merge to extents if
1533
	 * this can result in the top bit of ee_len being set.
1534
	 */
1535
	if (ext1_ee_len + ext2_ee_len > max_len)
1536
		return 0;
1537
#ifdef AGGRESSIVE_TEST
1538
	if (ext1_ee_len >= 4)
A
Alex Tomas 已提交
1539 1540 1541
		return 0;
#endif

1542
	if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
A
Alex Tomas 已提交
1543 1544 1545 1546
		return 1;
	return 0;
}

1547 1548 1549 1550 1551 1552 1553
/*
 * This function tries to merge the "ex" extent to the next extent in the tree.
 * It always tries to merge towards right. If you want to merge towards
 * left, pass "ex - 1" as argument instead of "ex".
 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
 * 1 if they got merged.
 */
1554
static int ext4_ext_try_to_merge_right(struct inode *inode,
1555 1556
				 struct ext4_ext_path *path,
				 struct ext4_extent *ex)
1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582
{
	struct ext4_extent_header *eh;
	unsigned int depth, len;
	int merge_done = 0;
	int uninitialized = 0;

	depth = ext_depth(inode);
	BUG_ON(path[depth].p_hdr == NULL);
	eh = path[depth].p_hdr;

	while (ex < EXT_LAST_EXTENT(eh)) {
		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
			break;
		/* merge with next extent! */
		if (ext4_ext_is_uninitialized(ex))
			uninitialized = 1;
		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
				+ ext4_ext_get_actual_len(ex + 1));
		if (uninitialized)
			ext4_ext_mark_uninitialized(ex);

		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
			len = (EXT_LAST_EXTENT(eh) - ex - 1)
				* sizeof(struct ext4_extent);
			memmove(ex + 1, ex + 2, len);
		}
M
Marcin Slusarz 已提交
1583
		le16_add_cpu(&eh->eh_entries, -1);
1584 1585 1586
		merge_done = 1;
		WARN_ON(eh->eh_entries == 0);
		if (!eh->eh_entries)
1587
			EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1588 1589 1590 1591 1592
	}

	return merge_done;
}

1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
/*
 * This function tries to merge the @ex extent to neighbours in the tree.
 * return 1 if merge left else 0.
 */
static int ext4_ext_try_to_merge(struct inode *inode,
				  struct ext4_ext_path *path,
				  struct ext4_extent *ex) {
	struct ext4_extent_header *eh;
	unsigned int depth;
	int merge_done = 0;
	int ret = 0;

	depth = ext_depth(inode);
	BUG_ON(path[depth].p_hdr == NULL);
	eh = path[depth].p_hdr;

	if (ex > EXT_FIRST_EXTENT(eh))
		merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);

	if (!merge_done)
		ret = ext4_ext_try_to_merge_right(inode, path, ex);

	return ret;
}

A
Amit Arora 已提交
1618 1619 1620 1621 1622 1623 1624 1625
/*
 * check if a portion of the "newext" extent overlaps with an
 * existing extent.
 *
 * If there is an overlap discovered, it updates the length of the newext
 * such that there will be no overlap, and then returns 1.
 * If there is no overlap found, it returns 0.
 */
1626 1627 1628
static unsigned int ext4_ext_check_overlap(struct inode *inode,
					   struct ext4_extent *newext,
					   struct ext4_ext_path *path)
A
Amit Arora 已提交
1629
{
A
Aneesh Kumar K.V 已提交
1630
	ext4_lblk_t b1, b2;
A
Amit Arora 已提交
1631 1632 1633 1634
	unsigned int depth, len1;
	unsigned int ret = 0;

	b1 = le32_to_cpu(newext->ee_block);
A
Amit Arora 已提交
1635
	len1 = ext4_ext_get_actual_len(newext);
A
Amit Arora 已提交
1636 1637 1638 1639 1640 1641 1642
	depth = ext_depth(inode);
	if (!path[depth].p_ext)
		goto out;
	b2 = le32_to_cpu(path[depth].p_ext->ee_block);

	/*
	 * get the next allocated block if the extent in the path
1643
	 * is before the requested block(s)
A
Amit Arora 已提交
1644 1645 1646
	 */
	if (b2 < b1) {
		b2 = ext4_ext_next_allocated_block(path);
1647
		if (b2 == EXT_MAX_BLOCKS)
A
Amit Arora 已提交
1648 1649 1650
			goto out;
	}

A
Aneesh Kumar K.V 已提交
1651
	/* check for wrap through zero on extent logical start block*/
A
Amit Arora 已提交
1652
	if (b1 + len1 < b1) {
1653
		len1 = EXT_MAX_BLOCKS - b1;
A
Amit Arora 已提交
1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666
		newext->ee_len = cpu_to_le16(len1);
		ret = 1;
	}

	/* check for overlap */
	if (b1 + len1 > b2) {
		newext->ee_len = cpu_to_le16(b2 - b1);
		ret = 1;
	}
out:
	return ret;
}

A
Alex Tomas 已提交
1667
/*
1668 1669 1670 1671
 * ext4_ext_insert_extent:
 * tries to merge requsted extent into the existing extent or
 * inserts requested extent as new one into the tree,
 * creating new leaf in the no-space case.
A
Alex Tomas 已提交
1672 1673 1674
 */
int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
				struct ext4_ext_path *path,
1675
				struct ext4_extent *newext, int flag)
A
Alex Tomas 已提交
1676
{
1677
	struct ext4_extent_header *eh;
A
Alex Tomas 已提交
1678 1679 1680
	struct ext4_extent *ex, *fex;
	struct ext4_extent *nearex; /* nearest extent */
	struct ext4_ext_path *npath = NULL;
A
Aneesh Kumar K.V 已提交
1681 1682
	int depth, len, err;
	ext4_lblk_t next;
A
Amit Arora 已提交
1683
	unsigned uninitialized = 0;
1684
	int flags = 0;
A
Alex Tomas 已提交
1685

1686 1687 1688 1689
	if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
		EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
		return -EIO;
	}
A
Alex Tomas 已提交
1690 1691
	depth = ext_depth(inode);
	ex = path[depth].p_ext;
1692 1693 1694 1695
	if (unlikely(path[depth].p_hdr == NULL)) {
		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
		return -EIO;
	}
A
Alex Tomas 已提交
1696 1697

	/* try to insert block into found extent and return */
1698
	if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1699
		&& ext4_can_extents_be_merged(inode, ex, newext)) {
1700
		ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
1701 1702 1703 1704 1705 1706
			  ext4_ext_is_uninitialized(newext),
			  ext4_ext_get_actual_len(newext),
			  le32_to_cpu(ex->ee_block),
			  ext4_ext_is_uninitialized(ex),
			  ext4_ext_get_actual_len(ex),
			  ext4_ext_pblock(ex));
1707 1708
		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
A
Alex Tomas 已提交
1709
			return err;
A
Amit Arora 已提交
1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721

		/*
		 * ext4_can_extents_be_merged should have checked that either
		 * both extents are uninitialized, or both aren't. Thus we
		 * need to check only one of them here.
		 */
		if (ext4_ext_is_uninitialized(ex))
			uninitialized = 1;
		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
					+ ext4_ext_get_actual_len(newext));
		if (uninitialized)
			ext4_ext_mark_uninitialized(ex);
A
Alex Tomas 已提交
1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
		eh = path[depth].p_hdr;
		nearex = ex;
		goto merge;
	}

	depth = ext_depth(inode);
	eh = path[depth].p_hdr;
	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
		goto has_space;

	/* probably next leaf has space for us? */
	fex = EXT_LAST_EXTENT(eh);
1734 1735
	next = EXT_MAX_BLOCKS;
	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
1736
		next = ext4_ext_next_leaf_block(path);
1737
	if (next != EXT_MAX_BLOCKS) {
A
Alex Tomas 已提交
1738 1739 1740 1741 1742 1743 1744 1745
		ext_debug("next leaf block - %d\n", next);
		BUG_ON(npath != NULL);
		npath = ext4_ext_find_extent(inode, next, NULL);
		if (IS_ERR(npath))
			return PTR_ERR(npath);
		BUG_ON(npath->p_depth != path->p_depth);
		eh = npath[depth].p_hdr;
		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
L
Lucas De Marchi 已提交
1746
			ext_debug("next leaf isn't full(%d)\n",
A
Alex Tomas 已提交
1747 1748
				  le16_to_cpu(eh->eh_entries));
			path = npath;
1749
			goto has_space;
A
Alex Tomas 已提交
1750 1751 1752 1753 1754 1755
		}
		ext_debug("next leaf has no free space(%d,%d)\n",
			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
	}

	/*
1756 1757
	 * There is no free space in the found leaf.
	 * We're gonna add a new leaf in the tree.
A
Alex Tomas 已提交
1758
	 */
1759 1760 1761
	if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
		flags = EXT4_MB_USE_ROOT_BLOCKS;
	err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
A
Alex Tomas 已提交
1762 1763 1764 1765 1766 1767 1768 1769
	if (err)
		goto cleanup;
	depth = ext_depth(inode);
	eh = path[depth].p_hdr;

has_space:
	nearex = path[depth].p_ext;

1770 1771
	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
A
Alex Tomas 已提交
1772 1773 1774 1775
		goto cleanup;

	if (!nearex) {
		/* there is no extent in this leaf, create first one */
1776
		ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n",
D
Dave Kleikamp 已提交
1777
				le32_to_cpu(newext->ee_block),
1778
				ext4_ext_pblock(newext),
1779
				ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1780
				ext4_ext_get_actual_len(newext));
A
Alex Tomas 已提交
1781 1782
		path[depth].p_ext = EXT_FIRST_EXTENT(eh);
	} else if (le32_to_cpu(newext->ee_block)
D
Dave Kleikamp 已提交
1783
			   > le32_to_cpu(nearex->ee_block)) {
A
Alex Tomas 已提交
1784 1785 1786 1787 1788
/*		BUG_ON(newext->ee_block == nearex->ee_block); */
		if (nearex != EXT_LAST_EXTENT(eh)) {
			len = EXT_MAX_EXTENT(eh) - nearex;
			len = (len - 1) * sizeof(struct ext4_extent);
			len = len < 0 ? 0 : len;
1789
			ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, "
A
Alex Tomas 已提交
1790
					"move %d from 0x%p to 0x%p\n",
D
Dave Kleikamp 已提交
1791
					le32_to_cpu(newext->ee_block),
1792
					ext4_ext_pblock(newext),
1793
					ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1794
					ext4_ext_get_actual_len(newext),
A
Alex Tomas 已提交
1795 1796 1797 1798 1799 1800 1801 1802
					nearex, len, nearex + 1, nearex + 2);
			memmove(nearex + 2, nearex + 1, len);
		}
		path[depth].p_ext = nearex + 1;
	} else {
		BUG_ON(newext->ee_block == nearex->ee_block);
		len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
		len = len < 0 ? 0 : len;
1803
		ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, "
A
Alex Tomas 已提交
1804 1805
				"move %d from 0x%p to 0x%p\n",
				le32_to_cpu(newext->ee_block),
1806
				ext4_ext_pblock(newext),
1807
				ext4_ext_is_uninitialized(newext),
A
Amit Arora 已提交
1808
				ext4_ext_get_actual_len(newext),
1809
				nearex, len, nearex, nearex + 1);
A
Alex Tomas 已提交
1810 1811 1812 1813
		memmove(nearex + 1, nearex, len);
		path[depth].p_ext = nearex;
	}

M
Marcin Slusarz 已提交
1814
	le16_add_cpu(&eh->eh_entries, 1);
A
Alex Tomas 已提交
1815 1816
	nearex = path[depth].p_ext;
	nearex->ee_block = newext->ee_block;
1817
	ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
A
Alex Tomas 已提交
1818 1819 1820 1821
	nearex->ee_len = newext->ee_len;

merge:
	/* try to merge extents to the right */
1822
	if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1823
		ext4_ext_try_to_merge(inode, path, nearex);
A
Alex Tomas 已提交
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842

	/* try to merge extents to the left */

	/* time to correct all indexes above */
	err = ext4_ext_correct_indexes(handle, inode, path);
	if (err)
		goto cleanup;

	err = ext4_ext_dirty(handle, inode, path + depth);

cleanup:
	if (npath) {
		ext4_ext_drop_refs(npath);
		kfree(npath);
	}
	ext4_ext_invalidate_cache(inode);
	return err;
}

1843 1844 1845
static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
			       ext4_lblk_t num, ext_prepare_callback func,
			       void *cbdata)
1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
{
	struct ext4_ext_path *path = NULL;
	struct ext4_ext_cache cbex;
	struct ext4_extent *ex;
	ext4_lblk_t next, start = 0, end = 0;
	ext4_lblk_t last = block + num;
	int depth, exists, err = 0;

	BUG_ON(func == NULL);
	BUG_ON(inode == NULL);

1857
	while (block < last && block != EXT_MAX_BLOCKS) {
1858 1859
		num = last - block;
		/* find extent for this block */
1860
		down_read(&EXT4_I(inode)->i_data_sem);
1861
		path = ext4_ext_find_extent(inode, block, path);
1862
		up_read(&EXT4_I(inode)->i_data_sem);
1863 1864 1865 1866 1867 1868 1869
		if (IS_ERR(path)) {
			err = PTR_ERR(path);
			path = NULL;
			break;
		}

		depth = ext_depth(inode);
1870 1871 1872 1873 1874
		if (unlikely(path[depth].p_hdr == NULL)) {
			EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
			err = -EIO;
			break;
		}
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919
		ex = path[depth].p_ext;
		next = ext4_ext_next_allocated_block(path);

		exists = 0;
		if (!ex) {
			/* there is no extent yet, so try to allocate
			 * all requested space */
			start = block;
			end = block + num;
		} else if (le32_to_cpu(ex->ee_block) > block) {
			/* need to allocate space before found extent */
			start = block;
			end = le32_to_cpu(ex->ee_block);
			if (block + num < end)
				end = block + num;
		} else if (block >= le32_to_cpu(ex->ee_block)
					+ ext4_ext_get_actual_len(ex)) {
			/* need to allocate space after found extent */
			start = block;
			end = block + num;
			if (end >= next)
				end = next;
		} else if (block >= le32_to_cpu(ex->ee_block)) {
			/*
			 * some part of requested space is covered
			 * by found extent
			 */
			start = block;
			end = le32_to_cpu(ex->ee_block)
				+ ext4_ext_get_actual_len(ex);
			if (block + num < end)
				end = block + num;
			exists = 1;
		} else {
			BUG();
		}
		BUG_ON(end <= start);

		if (!exists) {
			cbex.ec_block = start;
			cbex.ec_len = end - start;
			cbex.ec_start = 0;
		} else {
			cbex.ec_block = le32_to_cpu(ex->ee_block);
			cbex.ec_len = ext4_ext_get_actual_len(ex);
1920
			cbex.ec_start = ext4_ext_pblock(ex);
1921 1922
		}

1923 1924 1925 1926 1927
		if (unlikely(cbex.ec_len == 0)) {
			EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
			err = -EIO;
			break;
		}
1928
		err = func(inode, next, &cbex, ex, cbdata);
1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957
		ext4_ext_drop_refs(path);

		if (err < 0)
			break;

		if (err == EXT_REPEAT)
			continue;
		else if (err == EXT_BREAK) {
			err = 0;
			break;
		}

		if (ext_depth(inode) != depth) {
			/* depth was changed. we have to realloc path */
			kfree(path);
			path = NULL;
		}

		block = cbex.ec_block + cbex.ec_len;
	}

	if (path) {
		ext4_ext_drop_refs(path);
		kfree(path);
	}

	return err;
}

1958
static void
A
Aneesh Kumar K.V 已提交
1959
ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1960
			__u32 len, ext4_fsblk_t start)
A
Alex Tomas 已提交
1961 1962 1963
{
	struct ext4_ext_cache *cex;
	BUG_ON(len == 0);
1964
	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
A
Alex Tomas 已提交
1965 1966 1967 1968
	cex = &EXT4_I(inode)->i_cached_extent;
	cex->ec_block = block;
	cex->ec_len = len;
	cex->ec_start = start;
1969
	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
A
Alex Tomas 已提交
1970 1971 1972
}

/*
1973 1974
 * ext4_ext_put_gap_in_cache:
 * calculate boundaries of the gap that the requested block fits into
A
Alex Tomas 已提交
1975 1976
 * and cache this gap
 */
1977
static void
A
Alex Tomas 已提交
1978
ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
A
Aneesh Kumar K.V 已提交
1979
				ext4_lblk_t block)
A
Alex Tomas 已提交
1980 1981
{
	int depth = ext_depth(inode);
A
Aneesh Kumar K.V 已提交
1982 1983
	unsigned long len;
	ext4_lblk_t lblock;
A
Alex Tomas 已提交
1984 1985 1986 1987 1988 1989
	struct ext4_extent *ex;

	ex = path[depth].p_ext;
	if (ex == NULL) {
		/* there is no extent yet, so gap is [0;-] */
		lblock = 0;
1990
		len = EXT_MAX_BLOCKS;
A
Alex Tomas 已提交
1991 1992 1993 1994
		ext_debug("cache gap(whole file):");
	} else if (block < le32_to_cpu(ex->ee_block)) {
		lblock = block;
		len = le32_to_cpu(ex->ee_block) - block;
1995 1996 1997 1998
		ext_debug("cache gap(before): %u [%u:%u]",
				block,
				le32_to_cpu(ex->ee_block),
				 ext4_ext_get_actual_len(ex));
A
Alex Tomas 已提交
1999
	} else if (block >= le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
2000
			+ ext4_ext_get_actual_len(ex)) {
A
Aneesh Kumar K.V 已提交
2001
		ext4_lblk_t next;
D
Dave Kleikamp 已提交
2002
		lblock = le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
2003
			+ ext4_ext_get_actual_len(ex);
A
Aneesh Kumar K.V 已提交
2004 2005

		next = ext4_ext_next_allocated_block(path);
2006 2007 2008 2009
		ext_debug("cache gap(after): [%u:%u] %u",
				le32_to_cpu(ex->ee_block),
				ext4_ext_get_actual_len(ex),
				block);
A
Aneesh Kumar K.V 已提交
2010 2011
		BUG_ON(next == lblock);
		len = next - lblock;
A
Alex Tomas 已提交
2012 2013 2014 2015 2016
	} else {
		lblock = len = 0;
		BUG();
	}

2017
	ext_debug(" -> %u:%lu\n", lblock, len);
2018
	ext4_ext_put_in_cache(inode, lblock, len, 0);
A
Alex Tomas 已提交
2019 2020
}

2021
/*
2022
 * ext4_ext_check_cache()
2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034
 * Checks to see if the given block is in the cache.
 * If it is, the cached extent is stored in the given
 * cache extent pointer.  If the cached extent is a hole,
 * this routine should be used instead of
 * ext4_ext_in_cache if the calling function needs to
 * know the size of the hole.
 *
 * @inode: The files inode
 * @block: The block to look for in the cache
 * @ex:    Pointer where the cached extent will be stored
 *         if it contains block
 *
2035 2036
 * Return 0 if cache is invalid; 1 if the cache is valid
 */
2037 2038
static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block,
	struct ext4_ext_cache *ex){
A
Alex Tomas 已提交
2039
	struct ext4_ext_cache *cex;
2040
	struct ext4_sb_info *sbi;
2041
	int ret = 0;
A
Alex Tomas 已提交
2042

2043
	/*
2044 2045 2046
	 * We borrow i_block_reservation_lock to protect i_cached_extent
	 */
	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
A
Alex Tomas 已提交
2047
	cex = &EXT4_I(inode)->i_cached_extent;
2048
	sbi = EXT4_SB(inode->i_sb);
A
Alex Tomas 已提交
2049 2050

	/* has cache valid data? */
2051
	if (cex->ec_len == 0)
2052
		goto errout;
A
Alex Tomas 已提交
2053

2054
	if (in_range(block, cex->ec_block, cex->ec_len)) {
2055
		memcpy(ex, cex, sizeof(struct ext4_ext_cache));
2056 2057 2058
		ext_debug("%u cached by %u:%u:%llu\n",
				block,
				cex->ec_block, cex->ec_len, cex->ec_start);
2059
		ret = 1;
A
Alex Tomas 已提交
2060
	}
2061
errout:
2062 2063 2064 2065
	if (!ret)
		sbi->extent_cache_misses++;
	else
		sbi->extent_cache_hits++;
2066 2067
	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
	return ret;
A
Alex Tomas 已提交
2068 2069
}

2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100
/*
 * ext4_ext_in_cache()
 * Checks to see if the given block is in the cache.
 * If it is, the cached extent is stored in the given
 * extent pointer.
 *
 * @inode: The files inode
 * @block: The block to look for in the cache
 * @ex:    Pointer where the cached extent will be stored
 *         if it contains block
 *
 * Return 0 if cache is invalid; 1 if the cache is valid
 */
static int
ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
			struct ext4_extent *ex)
{
	struct ext4_ext_cache cex;
	int ret = 0;

	if (ext4_ext_check_cache(inode, block, &cex)) {
		ex->ee_block = cpu_to_le32(cex.ec_block);
		ext4_ext_store_pblock(ex, cex.ec_start);
		ex->ee_len = cpu_to_le16(cex.ec_len);
		ret = 1;
	}

	return ret;
}


A
Alex Tomas 已提交
2101
/*
2102 2103
 * ext4_ext_rm_idx:
 * removes index from the index block.
A
Alex Tomas 已提交
2104
 */
A
Aneesh Kumar K.V 已提交
2105
static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
A
Alex Tomas 已提交
2106 2107 2108
			struct ext4_ext_path *path)
{
	int err;
2109
	ext4_fsblk_t leaf;
A
Alex Tomas 已提交
2110 2111 2112

	/* free index block */
	path--;
2113
	leaf = ext4_idx_pblock(path->p_idx);
2114 2115 2116 2117
	if (unlikely(path->p_hdr->eh_entries == 0)) {
		EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
		return -EIO;
	}
2118 2119
	err = ext4_ext_get_access(handle, inode, path);
	if (err)
A
Alex Tomas 已提交
2120
		return err;
2121 2122 2123 2124 2125 2126 2127

	if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
		int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
		len *= sizeof(struct ext4_extent_idx);
		memmove(path->p_idx, path->p_idx + 1, len);
	}

M
Marcin Slusarz 已提交
2128
	le16_add_cpu(&path->p_hdr->eh_entries, -1);
2129 2130
	err = ext4_ext_dirty(handle, inode, path);
	if (err)
A
Alex Tomas 已提交
2131
		return err;
2132
	ext_debug("index is empty, remove it, free block %llu\n", leaf);
2133
	ext4_free_blocks(handle, inode, NULL, leaf, 1,
2134
			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
A
Alex Tomas 已提交
2135 2136 2137 2138
	return err;
}

/*
2139 2140 2141 2142 2143
 * ext4_ext_calc_credits_for_single_extent:
 * This routine returns max. credits that needed to insert an extent
 * to the extent tree.
 * When pass the actual path, the caller should calculate credits
 * under i_data_sem.
A
Alex Tomas 已提交
2144
 */
2145
int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
A
Alex Tomas 已提交
2146 2147 2148
						struct ext4_ext_path *path)
{
	if (path) {
2149
		int depth = ext_depth(inode);
2150
		int ret = 0;
2151

A
Alex Tomas 已提交
2152 2153
		/* probably there is space in leaf? */
		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2154
				< le16_to_cpu(path[depth].p_hdr->eh_max)) {
A
Alex Tomas 已提交
2155

2156 2157 2158 2159 2160 2161 2162 2163
			/*
			 *  There are some space in the leaf tree, no
			 *  need to account for leaf block credit
			 *
			 *  bitmaps and block group descriptor blocks
			 *  and other metadat blocks still need to be
			 *  accounted.
			 */
2164
			/* 1 bitmap, 1 block group descriptor */
2165
			ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2166
			return ret;
2167 2168
		}
	}
A
Alex Tomas 已提交
2169

2170
	return ext4_chunk_trans_blocks(inode, nrblocks);
2171
}
A
Alex Tomas 已提交
2172

2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183
/*
 * How many index/leaf blocks need to change/allocate to modify nrblocks?
 *
 * if nrblocks are fit in a single extent (chunk flag is 1), then
 * in the worse case, each tree level index/leaf need to be changed
 * if the tree split due to insert a new extent, then the old tree
 * index/leaf need to be updated too
 *
 * If the nrblocks are discontiguous, they could cause
 * the whole tree split more than once, but this is really rare.
 */
2184
int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2185 2186 2187
{
	int index;
	int depth = ext_depth(inode);
A
Alex Tomas 已提交
2188

2189 2190 2191 2192
	if (chunk)
		index = depth * 2;
	else
		index = depth * 3;
A
Alex Tomas 已提交
2193

2194
	return index;
A
Alex Tomas 已提交
2195 2196 2197 2198
}

static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
				struct ext4_extent *ex,
A
Aneesh Kumar K.V 已提交
2199
				ext4_lblk_t from, ext4_lblk_t to)
A
Alex Tomas 已提交
2200
{
A
Amit Arora 已提交
2201
	unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2202
	int flags = EXT4_FREE_BLOCKS_FORGET;
A
Alex Tomas 已提交
2203

2204
	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2205
		flags |= EXT4_FREE_BLOCKS_METADATA;
A
Alex Tomas 已提交
2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221
#ifdef EXTENTS_STATS
	{
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
		spin_lock(&sbi->s_ext_stats_lock);
		sbi->s_ext_blocks += ee_len;
		sbi->s_ext_extents++;
		if (ee_len < sbi->s_ext_min)
			sbi->s_ext_min = ee_len;
		if (ee_len > sbi->s_ext_max)
			sbi->s_ext_max = ee_len;
		if (ext_depth(inode) > sbi->s_depth_max)
			sbi->s_depth_max = ext_depth(inode);
		spin_unlock(&sbi->s_ext_stats_lock);
	}
#endif
	if (from >= le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
2222
	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
A
Alex Tomas 已提交
2223
		/* tail removal */
A
Aneesh Kumar K.V 已提交
2224
		ext4_lblk_t num;
2225
		ext4_fsblk_t start;
A
Aneesh Kumar K.V 已提交
2226

A
Amit Arora 已提交
2227
		num = le32_to_cpu(ex->ee_block) + ee_len - from;
2228
		start = ext4_ext_pblock(ex) + ee_len - num;
A
Aneesh Kumar K.V 已提交
2229
		ext_debug("free last %u blocks starting %llu\n", num, start);
2230
		ext4_free_blocks(handle, inode, NULL, start, num, flags);
A
Alex Tomas 已提交
2231
	} else if (from == le32_to_cpu(ex->ee_block)
A
Amit Arora 已提交
2232
		   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
A
Allison Henderson 已提交
2233 2234 2235 2236 2237 2238 2239 2240 2241 2242
		/* head removal */
		ext4_lblk_t num;
		ext4_fsblk_t start;

		num = to - from;
		start = ext4_ext_pblock(ex);

		ext_debug("free first %u blocks starting %llu\n", num, start);
		ext4_free_blocks(handle, inode, 0, start, num, flags);

A
Alex Tomas 已提交
2243
	} else {
A
Aneesh Kumar K.V 已提交
2244 2245 2246
		printk(KERN_INFO "strange request: removal(2) "
				"%u-%u from %u:%u\n",
				from, to, le32_to_cpu(ex->ee_block), ee_len);
A
Alex Tomas 已提交
2247 2248 2249 2250
	}
	return 0;
}

A
Allison Henderson 已提交
2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262

/*
 * ext4_ext_rm_leaf() Removes the extents associated with the
 * blocks appearing between "start" and "end", and splits the extents
 * if "start" and "end" appear in the same extent
 *
 * @handle: The journal handle
 * @inode:  The files inode
 * @path:   The path to the leaf
 * @start:  The first block to remove
 * @end:   The last block to remove
 */
A
Alex Tomas 已提交
2263 2264
static int
ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
A
Allison Henderson 已提交
2265 2266
		struct ext4_ext_path *path, ext4_lblk_t start,
		ext4_lblk_t end)
A
Alex Tomas 已提交
2267 2268 2269 2270
{
	int err = 0, correct_index = 0;
	int depth = ext_depth(inode), credits;
	struct ext4_extent_header *eh;
A
Aneesh Kumar K.V 已提交
2271 2272 2273
	ext4_lblk_t a, b, block;
	unsigned num;
	ext4_lblk_t ex_ee_block;
A
Alex Tomas 已提交
2274
	unsigned short ex_ee_len;
A
Amit Arora 已提交
2275
	unsigned uninitialized = 0;
A
Alex Tomas 已提交
2276
	struct ext4_extent *ex;
A
Allison Henderson 已提交
2277
	struct ext4_map_blocks map;
A
Alex Tomas 已提交
2278

2279
	/* the header must be checked already in ext4_ext_remove_space() */
A
Aneesh Kumar K.V 已提交
2280
	ext_debug("truncate since %u in leaf\n", start);
A
Alex Tomas 已提交
2281 2282 2283
	if (!path[depth].p_hdr)
		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
	eh = path[depth].p_hdr;
2284 2285 2286 2287
	if (unlikely(path[depth].p_hdr == NULL)) {
		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
		return -EIO;
	}
A
Alex Tomas 已提交
2288 2289 2290 2291
	/* find where to start removing */
	ex = EXT_LAST_EXTENT(eh);

	ex_ee_block = le32_to_cpu(ex->ee_block);
A
Amit Arora 已提交
2292
	ex_ee_len = ext4_ext_get_actual_len(ex);
A
Alex Tomas 已提交
2293 2294 2295

	while (ex >= EXT_FIRST_EXTENT(eh) &&
			ex_ee_block + ex_ee_len > start) {
2296 2297 2298 2299 2300 2301

		if (ext4_ext_is_uninitialized(ex))
			uninitialized = 1;
		else
			uninitialized = 0;

2302 2303
		ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
			 uninitialized, ex_ee_len);
A
Alex Tomas 已提交
2304 2305 2306
		path[depth].p_ext = ex;

		a = ex_ee_block > start ? ex_ee_block : start;
A
Allison Henderson 已提交
2307 2308
		b = ex_ee_block+ex_ee_len - 1 < end ?
			ex_ee_block+ex_ee_len - 1 : end;
A
Alex Tomas 已提交
2309 2310 2311

		ext_debug("  border %u:%u\n", a, b);

A
Allison Henderson 已提交
2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324
		/* If this extent is beyond the end of the hole, skip it */
		if (end <= ex_ee_block) {
			ex--;
			ex_ee_block = le32_to_cpu(ex->ee_block);
			ex_ee_len = ext4_ext_get_actual_len(ex);
			continue;
		} else if (a != ex_ee_block &&
			b != ex_ee_block + ex_ee_len - 1) {
			/*
			 * If this is a truncate, then this condition should
			 * never happen because at least one of the end points
			 * needs to be on the edge of the extent.
			 */
2325
			if (end == EXT_MAX_BLOCKS - 1) {
A
Allison Henderson 已提交
2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359
				ext_debug("  bad truncate %u:%u\n",
						start, end);
				block = 0;
				num = 0;
				err = -EIO;
				goto out;
			}
			/*
			 * else this is a hole punch, so the extent needs to
			 * be split since neither edge of the hole is on the
			 * extent edge
			 */
			else{
				map.m_pblk = ext4_ext_pblock(ex);
				map.m_lblk = ex_ee_block;
				map.m_len = b - ex_ee_block;

				err = ext4_split_extent(handle,
					inode, path, &map, 0,
					EXT4_GET_BLOCKS_PUNCH_OUT_EXT |
					EXT4_GET_BLOCKS_PRE_IO);

				if (err < 0)
					goto out;

				ex_ee_len = ext4_ext_get_actual_len(ex);

				b = ex_ee_block+ex_ee_len - 1 < end ?
					ex_ee_block+ex_ee_len - 1 : end;

				/* Then remove tail of this extent */
				block = ex_ee_block;
				num = a - block;
			}
A
Alex Tomas 已提交
2360 2361 2362 2363 2364 2365
		} else if (a != ex_ee_block) {
			/* remove tail of the extent */
			block = ex_ee_block;
			num = a - block;
		} else if (b != ex_ee_block + ex_ee_len - 1) {
			/* remove head of the extent */
A
Allison Henderson 已提交
2366 2367 2368 2369 2370 2371 2372
			block = b;
			num =  ex_ee_block + ex_ee_len - b;

			/*
			 * If this is a truncate, this condition
			 * should never happen
			 */
2373
			if (end == EXT_MAX_BLOCKS - 1) {
A
Allison Henderson 已提交
2374 2375 2376 2377 2378
				ext_debug("  bad truncate %u:%u\n",
					start, end);
				err = -EIO;
				goto out;
			}
A
Alex Tomas 已提交
2379 2380 2381 2382
		} else {
			/* remove whole extent: excellent! */
			block = ex_ee_block;
			num = 0;
A
Allison Henderson 已提交
2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395
			if (a != ex_ee_block) {
				ext_debug("  bad truncate %u:%u\n",
					start, end);
				err = -EIO;
				goto out;
			}

			if (b != ex_ee_block + ex_ee_len - 1) {
				ext_debug("  bad truncate %u:%u\n",
					start, end);
				err = -EIO;
				goto out;
			}
A
Alex Tomas 已提交
2396 2397
		}

2398 2399 2400 2401 2402 2403 2404
		/*
		 * 3 for leaf, sb, and inode plus 2 (bmap and group
		 * descriptor) for each block group; assume two block
		 * groups plus ex_ee_len/blocks_per_block_group for
		 * the worst case
		 */
		credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
A
Alex Tomas 已提交
2405 2406 2407 2408
		if (ex == EXT_FIRST_EXTENT(eh)) {
			correct_index = 1;
			credits += (ext_depth(inode)) + 1;
		}
D
Dmitry Monakhov 已提交
2409
		credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
A
Alex Tomas 已提交
2410

2411
		err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2412
		if (err)
A
Alex Tomas 已提交
2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423
			goto out;

		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
			goto out;

		err = ext4_remove_blocks(handle, inode, ex, a, b);
		if (err)
			goto out;

		if (num == 0) {
2424
			/* this extent is removed; mark slot entirely unused */
2425
			ext4_ext_store_pblock(ex, 0);
A
Allison Henderson 已提交
2426 2427 2428 2429 2430 2431 2432
		} else if (block != ex_ee_block) {
			/*
			 * If this was a head removal, then we need to update
			 * the physical block since it is now at a different
			 * location
			 */
			ext4_ext_store_pblock(ex, ext4_ext_pblock(ex) + (b-a));
A
Alex Tomas 已提交
2433 2434 2435 2436
		}

		ex->ee_block = cpu_to_le32(block);
		ex->ee_len = cpu_to_le16(num);
2437 2438 2439 2440 2441
		/*
		 * Do not mark uninitialized if all the blocks in the
		 * extent have been removed.
		 */
		if (uninitialized && num)
A
Amit Arora 已提交
2442
			ext4_ext_mark_uninitialized(ex);
A
Alex Tomas 已提交
2443 2444 2445 2446 2447

		err = ext4_ext_dirty(handle, inode, path + depth);
		if (err)
			goto out;

A
Allison Henderson 已提交
2448 2449 2450 2451 2452
		/*
		 * If the extent was completely released,
		 * we need to remove it from the leaf
		 */
		if (num == 0) {
2453
			if (end != EXT_MAX_BLOCKS - 1) {
A
Allison Henderson 已提交
2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
				/*
				 * For hole punching, we need to scoot all the
				 * extents up when an extent is removed so that
				 * we dont have blank extents in the middle
				 */
				memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
					sizeof(struct ext4_extent));

				/* Now get rid of the one at the end */
				memset(EXT_LAST_EXTENT(eh), 0,
					sizeof(struct ext4_extent));
			}
			le16_add_cpu(&eh->eh_entries, -1);
		}

2469
		ext_debug("new extent: %u:%u:%llu\n", block, num,
2470
				ext4_ext_pblock(ex));
A
Alex Tomas 已提交
2471 2472
		ex--;
		ex_ee_block = le32_to_cpu(ex->ee_block);
A
Amit Arora 已提交
2473
		ex_ee_len = ext4_ext_get_actual_len(ex);
A
Alex Tomas 已提交
2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488
	}

	if (correct_index && eh->eh_entries)
		err = ext4_ext_correct_indexes(handle, inode, path);

	/* if this leaf is free, then we should
	 * remove it from index block above */
	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
		err = ext4_ext_rm_idx(handle, inode, path + depth);

out:
	return err;
}

/*
2489 2490
 * ext4_ext_more_to_rm:
 * returns 1 if current index has to be freed (even partial)
A
Alex Tomas 已提交
2491
 */
2492
static int
A
Alex Tomas 已提交
2493 2494 2495 2496 2497 2498 2499 2500
ext4_ext_more_to_rm(struct ext4_ext_path *path)
{
	BUG_ON(path->p_idx == NULL);

	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
		return 0;

	/*
2501
	 * if truncate on deeper level happened, it wasn't partial,
A
Alex Tomas 已提交
2502 2503 2504 2505 2506 2507 2508
	 * so we have to consider current index for truncation
	 */
	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
		return 0;
	return 1;
}

2509
static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
A
Alex Tomas 已提交
2510 2511 2512 2513 2514
{
	struct super_block *sb = inode->i_sb;
	int depth = ext_depth(inode);
	struct ext4_ext_path *path;
	handle_t *handle;
2515
	int i, err;
A
Alex Tomas 已提交
2516

A
Aneesh Kumar K.V 已提交
2517
	ext_debug("truncate since %u\n", start);
A
Alex Tomas 已提交
2518 2519 2520 2521 2522 2523

	/* probably first extent we're gonna free will be last in block */
	handle = ext4_journal_start(inode, depth + 1);
	if (IS_ERR(handle))
		return PTR_ERR(handle);

2524
again:
A
Alex Tomas 已提交
2525 2526 2527
	ext4_ext_invalidate_cache(inode);

	/*
2528 2529
	 * We start scanning from right side, freeing all the blocks
	 * after i_size and walking into the tree depth-wise.
A
Alex Tomas 已提交
2530
	 */
2531
	depth = ext_depth(inode);
2532
	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
A
Alex Tomas 已提交
2533 2534 2535 2536
	if (path == NULL) {
		ext4_journal_stop(handle);
		return -ENOMEM;
	}
2537
	path[0].p_depth = depth;
A
Alex Tomas 已提交
2538
	path[0].p_hdr = ext_inode_hdr(inode);
2539
	if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
A
Alex Tomas 已提交
2540 2541 2542
		err = -EIO;
		goto out;
	}
2543
	i = err = 0;
A
Alex Tomas 已提交
2544 2545 2546 2547

	while (i >= 0 && err == 0) {
		if (i == depth) {
			/* this is leaf block */
A
Allison Henderson 已提交
2548
			err = ext4_ext_rm_leaf(handle, inode, path,
2549
					start, EXT_MAX_BLOCKS - 1);
2550
			/* root level has p_bh == NULL, brelse() eats this */
A
Alex Tomas 已提交
2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563
			brelse(path[i].p_bh);
			path[i].p_bh = NULL;
			i--;
			continue;
		}

		/* this is index block */
		if (!path[i].p_hdr) {
			ext_debug("initialize header\n");
			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
		}

		if (!path[i].p_idx) {
2564
			/* this level hasn't been touched yet */
A
Alex Tomas 已提交
2565 2566 2567 2568 2569 2570
			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
			ext_debug("init index ptr: hdr 0x%p, num %d\n",
				  path[i].p_hdr,
				  le16_to_cpu(path[i].p_hdr->eh_entries));
		} else {
2571
			/* we were already here, see at next index */
A
Alex Tomas 已提交
2572 2573 2574 2575 2576 2577 2578
			path[i].p_idx--;
		}

		ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
				i, EXT_FIRST_INDEX(path[i].p_hdr),
				path[i].p_idx);
		if (ext4_ext_more_to_rm(path + i)) {
2579
			struct buffer_head *bh;
A
Alex Tomas 已提交
2580
			/* go to the next level */
2581
			ext_debug("move to level %d (block %llu)\n",
2582
				  i + 1, ext4_idx_pblock(path[i].p_idx));
A
Alex Tomas 已提交
2583
			memset(path + i + 1, 0, sizeof(*path));
2584
			bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
2585
			if (!bh) {
A
Alex Tomas 已提交
2586 2587 2588 2589
				/* should we reset i_size? */
				err = -EIO;
				break;
			}
2590 2591 2592 2593
			if (WARN_ON(i + 1 > depth)) {
				err = -EIO;
				break;
			}
2594
			if (ext4_ext_check(inode, ext_block_hdr(bh),
2595 2596 2597 2598 2599
							depth - i - 1)) {
				err = -EIO;
				break;
			}
			path[i + 1].p_bh = bh;
A
Alex Tomas 已提交
2600

2601 2602
			/* save actual number of indexes since this
			 * number is changed at the next iteration */
A
Alex Tomas 已提交
2603 2604 2605
			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
			i++;
		} else {
2606
			/* we finished processing this index, go up */
A
Alex Tomas 已提交
2607
			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2608
				/* index is empty, remove it;
A
Alex Tomas 已提交
2609 2610 2611 2612
				 * handle must be already prepared by the
				 * truncatei_leaf() */
				err = ext4_ext_rm_idx(handle, inode, path + i);
			}
2613
			/* root level has p_bh == NULL, brelse() eats this */
A
Alex Tomas 已提交
2614 2615 2616 2617 2618 2619 2620 2621 2622 2623
			brelse(path[i].p_bh);
			path[i].p_bh = NULL;
			i--;
			ext_debug("return to level %d\n", i);
		}
	}

	/* TODO: flexible tree reduction should be here */
	if (path->p_hdr->eh_entries == 0) {
		/*
2624 2625
		 * truncate to zero freed all the tree,
		 * so we need to correct eh_depth
A
Alex Tomas 已提交
2626 2627 2628 2629 2630
		 */
		err = ext4_ext_get_access(handle, inode, path);
		if (err == 0) {
			ext_inode_hdr(inode)->eh_depth = 0;
			ext_inode_hdr(inode)->eh_max =
2631
				cpu_to_le16(ext4_ext_space_root(inode, 0));
A
Alex Tomas 已提交
2632 2633 2634 2635 2636 2637
			err = ext4_ext_dirty(handle, inode, path);
		}
	}
out:
	ext4_ext_drop_refs(path);
	kfree(path);
2638 2639
	if (err == -EAGAIN)
		goto again;
A
Alex Tomas 已提交
2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653
	ext4_journal_stop(handle);

	return err;
}

/*
 * called at mount time
 */
void ext4_ext_init(struct super_block *sb)
{
	/*
	 * possible initialization would be here
	 */

2654
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2655
#if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2656
		printk(KERN_INFO "EXT4-fs: file extents enabled");
2657 2658
#ifdef AGGRESSIVE_TEST
		printk(", aggressive tests");
A
Alex Tomas 已提交
2659 2660 2661 2662 2663 2664 2665 2666
#endif
#ifdef CHECK_BINSEARCH
		printk(", check binsearch");
#endif
#ifdef EXTENTS_STATS
		printk(", stats");
#endif
		printk("\n");
2667
#endif
A
Alex Tomas 已提交
2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680
#ifdef EXTENTS_STATS
		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
		EXT4_SB(sb)->s_ext_min = 1 << 30;
		EXT4_SB(sb)->s_ext_max = 0;
#endif
	}
}

/*
 * called at umount time
 */
void ext4_ext_release(struct super_block *sb)
{
2681
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
A
Alex Tomas 已提交
2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695
		return;

#ifdef EXTENTS_STATS
	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
		struct ext4_sb_info *sbi = EXT4_SB(sb);
		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
			sbi->s_ext_blocks, sbi->s_ext_extents,
			sbi->s_ext_blocks / sbi->s_ext_extents);
		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
	}
#endif
}

2696 2697 2698
/* FIXME!! we need to try to merge to left or right after zero-out  */
static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
{
2699 2700
	ext4_fsblk_t ee_pblock;
	unsigned int ee_len;
2701
	int ret;
2702 2703

	ee_len    = ext4_ext_get_actual_len(ex);
2704
	ee_pblock = ext4_ext_pblock(ex);
2705

2706
	ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
2707 2708
	if (ret > 0)
		ret = 0;
2709

2710
	return ret;
2711 2712
}

2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874
/*
 * used by extent splitting.
 */
#define EXT4_EXT_MAY_ZEROOUT	0x1  /* safe to zeroout if split fails \
					due to ENOSPC */
#define EXT4_EXT_MARK_UNINIT1	0x2  /* mark first half uninitialized */
#define EXT4_EXT_MARK_UNINIT2	0x4  /* mark second half uninitialized */

/*
 * ext4_split_extent_at() splits an extent at given block.
 *
 * @handle: the journal handle
 * @inode: the file inode
 * @path: the path to the extent
 * @split: the logical block where the extent is splitted.
 * @split_flags: indicates if the extent could be zeroout if split fails, and
 *		 the states(init or uninit) of new extents.
 * @flags: flags used to insert new extent to extent tree.
 *
 *
 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
 * of which are deterimined by split_flag.
 *
 * There are two cases:
 *  a> the extent are splitted into two extent.
 *  b> split is not needed, and just mark the extent.
 *
 * return 0 on success.
 */
static int ext4_split_extent_at(handle_t *handle,
			     struct inode *inode,
			     struct ext4_ext_path *path,
			     ext4_lblk_t split,
			     int split_flag,
			     int flags)
{
	ext4_fsblk_t newblock;
	ext4_lblk_t ee_block;
	struct ext4_extent *ex, newex, orig_ex;
	struct ext4_extent *ex2 = NULL;
	unsigned int ee_len, depth;
	int err = 0;

	ext_debug("ext4_split_extents_at: inode %lu, logical"
		"block %llu\n", inode->i_ino, (unsigned long long)split);

	ext4_ext_show_leaf(inode, path);

	depth = ext_depth(inode);
	ex = path[depth].p_ext;
	ee_block = le32_to_cpu(ex->ee_block);
	ee_len = ext4_ext_get_actual_len(ex);
	newblock = split - ee_block + ext4_ext_pblock(ex);

	BUG_ON(split < ee_block || split >= (ee_block + ee_len));

	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
		goto out;

	if (split == ee_block) {
		/*
		 * case b: block @split is the block that the extent begins with
		 * then we just change the state of the extent, and splitting
		 * is not needed.
		 */
		if (split_flag & EXT4_EXT_MARK_UNINIT2)
			ext4_ext_mark_uninitialized(ex);
		else
			ext4_ext_mark_initialized(ex);

		if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
			ext4_ext_try_to_merge(inode, path, ex);

		err = ext4_ext_dirty(handle, inode, path + depth);
		goto out;
	}

	/* case a */
	memcpy(&orig_ex, ex, sizeof(orig_ex));
	ex->ee_len = cpu_to_le16(split - ee_block);
	if (split_flag & EXT4_EXT_MARK_UNINIT1)
		ext4_ext_mark_uninitialized(ex);

	/*
	 * path may lead to new leaf, not to original leaf any more
	 * after ext4_ext_insert_extent() returns,
	 */
	err = ext4_ext_dirty(handle, inode, path + depth);
	if (err)
		goto fix_extent_len;

	ex2 = &newex;
	ex2->ee_block = cpu_to_le32(split);
	ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
	ext4_ext_store_pblock(ex2, newblock);
	if (split_flag & EXT4_EXT_MARK_UNINIT2)
		ext4_ext_mark_uninitialized(ex2);

	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
	if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
		err = ext4_ext_zeroout(inode, &orig_ex);
		if (err)
			goto fix_extent_len;
		/* update the extent length and mark as initialized */
		ex->ee_len = cpu_to_le32(ee_len);
		ext4_ext_try_to_merge(inode, path, ex);
		err = ext4_ext_dirty(handle, inode, path + depth);
		goto out;
	} else if (err)
		goto fix_extent_len;

out:
	ext4_ext_show_leaf(inode, path);
	return err;

fix_extent_len:
	ex->ee_len = orig_ex.ee_len;
	ext4_ext_dirty(handle, inode, path + depth);
	return err;
}

/*
 * ext4_split_extents() splits an extent and mark extent which is covered
 * by @map as split_flags indicates
 *
 * It may result in splitting the extent into multiple extents (upto three)
 * There are three possibilities:
 *   a> There is no split required
 *   b> Splits in two extents: Split is happening at either end of the extent
 *   c> Splits in three extents: Somone is splitting in middle of the extent
 *
 */
static int ext4_split_extent(handle_t *handle,
			      struct inode *inode,
			      struct ext4_ext_path *path,
			      struct ext4_map_blocks *map,
			      int split_flag,
			      int flags)
{
	ext4_lblk_t ee_block;
	struct ext4_extent *ex;
	unsigned int ee_len, depth;
	int err = 0;
	int uninitialized;
	int split_flag1, flags1;

	depth = ext_depth(inode);
	ex = path[depth].p_ext;
	ee_block = le32_to_cpu(ex->ee_block);
	ee_len = ext4_ext_get_actual_len(ex);
	uninitialized = ext4_ext_is_uninitialized(ex);

	if (map->m_lblk + map->m_len < ee_block + ee_len) {
		split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
			      EXT4_EXT_MAY_ZEROOUT : 0;
		flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
		if (uninitialized)
			split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
				       EXT4_EXT_MARK_UNINIT2;
		err = ext4_split_extent_at(handle, inode, path,
				map->m_lblk + map->m_len, split_flag1, flags1);
2875 2876
		if (err)
			goto out;
2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901
	}

	ext4_ext_drop_refs(path);
	path = ext4_ext_find_extent(inode, map->m_lblk, path);
	if (IS_ERR(path))
		return PTR_ERR(path);

	if (map->m_lblk >= ee_block) {
		split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
			      EXT4_EXT_MAY_ZEROOUT : 0;
		if (uninitialized)
			split_flag1 |= EXT4_EXT_MARK_UNINIT1;
		if (split_flag & EXT4_EXT_MARK_UNINIT2)
			split_flag1 |= EXT4_EXT_MARK_UNINIT2;
		err = ext4_split_extent_at(handle, inode, path,
				map->m_lblk, split_flag1, flags);
		if (err)
			goto out;
	}

	ext4_ext_show_leaf(inode, path);
out:
	return err ? err : map->m_len;
}

2902
#define EXT4_EXT_ZERO_LEN 7
2903
/*
2904
 * This function is called by ext4_ext_map_blocks() if someone tries to write
2905
 * to an uninitialized extent. It may result in splitting the uninitialized
L
Lucas De Marchi 已提交
2906
 * extent into multiple extents (up to three - one initialized and two
2907 2908 2909 2910 2911 2912
 * uninitialized).
 * There are three possibilities:
 *   a> There is no split required: Entire extent should be initialized
 *   b> Splits in two extents: Write is happening at either end of the extent
 *   c> Splits in three extents: Somone is writing in middle of the extent
 */
A
Aneesh Kumar K.V 已提交
2913
static int ext4_ext_convert_to_initialized(handle_t *handle,
2914 2915 2916
					   struct inode *inode,
					   struct ext4_map_blocks *map,
					   struct ext4_ext_path *path)
2917
{
2918 2919 2920
	struct ext4_map_blocks split_map;
	struct ext4_extent zero_ex;
	struct ext4_extent *ex;
2921
	ext4_lblk_t ee_block, eof_block;
A
Aneesh Kumar K.V 已提交
2922
	unsigned int allocated, ee_len, depth;
2923
	int err = 0;
2924
	int split_flag = 0;
2925 2926 2927

	ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
		"block %llu, max_blocks %u\n", inode->i_ino,
2928
		(unsigned long long)map->m_lblk, map->m_len);
2929 2930 2931

	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
		inode->i_sb->s_blocksize_bits;
2932 2933
	if (eof_block < map->m_lblk + map->m_len)
		eof_block = map->m_lblk + map->m_len;
2934 2935 2936 2937 2938

	depth = ext_depth(inode);
	ex = path[depth].p_ext;
	ee_block = le32_to_cpu(ex->ee_block);
	ee_len = ext4_ext_get_actual_len(ex);
2939
	allocated = ee_len - (map->m_lblk - ee_block);
2940

2941
	WARN_ON(map->m_lblk < ee_block);
2942 2943 2944 2945
	/*
	 * It is safe to convert extent to initialized via explicit
	 * zeroout only if extent is fully insde i_size or new_size.
	 */
2946
	split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
2947

2948
	/* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
2949 2950 2951
	if (ee_len <= 2*EXT4_EXT_ZERO_LEN &&
	    (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
		err = ext4_ext_zeroout(inode, ex);
2952
		if (err)
2953 2954 2955 2956 2957
			goto out;

		err = ext4_ext_get_access(handle, inode, path + depth);
		if (err)
			goto out;
2958 2959 2960 2961
		ext4_ext_mark_initialized(ex);
		ext4_ext_try_to_merge(inode, path, ex);
		err = ext4_ext_dirty(handle, inode, path + depth);
		goto out;
2962
	}
2963

2964
	/*
2965 2966 2967 2968 2969
	 * four cases:
	 * 1. split the extent into three extents.
	 * 2. split the extent into two extents, zeroout the first half.
	 * 3. split the extent into two extents, zeroout the second half.
	 * 4. split the extent into two extents with out zeroout.
2970
	 */
2971 2972 2973 2974 2975 2976 2977 2978
	split_map.m_lblk = map->m_lblk;
	split_map.m_len = map->m_len;

	if (allocated > map->m_len) {
		if (allocated <= EXT4_EXT_ZERO_LEN &&
		    (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
			/* case 3 */
			zero_ex.ee_block =
2979 2980
					 cpu_to_le32(map->m_lblk);
			zero_ex.ee_len = cpu_to_le16(allocated);
2981 2982 2983
			ext4_ext_store_pblock(&zero_ex,
				ext4_ext_pblock(ex) + map->m_lblk - ee_block);
			err = ext4_ext_zeroout(inode, &zero_ex);
2984 2985
			if (err)
				goto out;
2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003
			split_map.m_lblk = map->m_lblk;
			split_map.m_len = allocated;
		} else if ((map->m_lblk - ee_block + map->m_len <
			   EXT4_EXT_ZERO_LEN) &&
			   (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
			/* case 2 */
			if (map->m_lblk != ee_block) {
				zero_ex.ee_block = ex->ee_block;
				zero_ex.ee_len = cpu_to_le16(map->m_lblk -
							ee_block);
				ext4_ext_store_pblock(&zero_ex,
						      ext4_ext_pblock(ex));
				err = ext4_ext_zeroout(inode, &zero_ex);
				if (err)
					goto out;
			}

			split_map.m_lblk = ee_block;
3004 3005
			split_map.m_len = map->m_lblk - ee_block + map->m_len;
			allocated = map->m_len;
3006 3007
		}
	}
3008 3009 3010 3011 3012 3013

	allocated = ext4_split_extent(handle, inode, path,
				       &split_map, split_flag, 0);
	if (allocated < 0)
		err = allocated;

3014 3015 3016 3017
out:
	return err ? err : allocated;
}

3018
/*
3019
 * This function is called by ext4_ext_map_blocks() from
3020 3021 3022
 * ext4_get_blocks_dio_write() when DIO to write
 * to an uninitialized extent.
 *
P
Paul Bolle 已提交
3023
 * Writing to an uninitialized extent may result in splitting the uninitialized
3024
 * extent into multiple /initialized uninitialized extents (up to three)
3025 3026 3027 3028 3029 3030
 * There are three possibilities:
 *   a> There is no split required: Entire extent should be uninitialized
 *   b> Splits in two extents: Write is happening at either end of the extent
 *   c> Splits in three extents: Somone is writing in middle of the extent
 *
 * One of more index blocks maybe needed if the extent tree grow after
3031
 * the uninitialized extent split. To prevent ENOSPC occur at the IO
3032
 * complete, we need to split the uninitialized extent before DIO submit
3033
 * the IO. The uninitialized extent called at this time will be split
3034 3035 3036
 * into three uninitialized extent(at most). After IO complete, the part
 * being filled will be convert to initialized by the end_io callback function
 * via ext4_convert_unwritten_extents().
3037 3038
 *
 * Returns the size of uninitialized extent to be written on success.
3039 3040 3041
 */
static int ext4_split_unwritten_extents(handle_t *handle,
					struct inode *inode,
3042
					struct ext4_map_blocks *map,
3043 3044 3045
					struct ext4_ext_path *path,
					int flags)
{
3046 3047 3048 3049 3050
	ext4_lblk_t eof_block;
	ext4_lblk_t ee_block;
	struct ext4_extent *ex;
	unsigned int ee_len;
	int split_flag = 0, depth;
3051 3052 3053

	ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
		"block %llu, max_blocks %u\n", inode->i_ino,
3054
		(unsigned long long)map->m_lblk, map->m_len);
3055 3056 3057

	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
		inode->i_sb->s_blocksize_bits;
3058 3059
	if (eof_block < map->m_lblk + map->m_len)
		eof_block = map->m_lblk + map->m_len;
3060 3061 3062 3063
	/*
	 * It is safe to convert extent to initialized via explicit
	 * zeroout only if extent is fully insde i_size or new_size.
	 */
3064 3065 3066 3067
	depth = ext_depth(inode);
	ex = path[depth].p_ext;
	ee_block = le32_to_cpu(ex->ee_block);
	ee_len = ext4_ext_get_actual_len(ex);
3068

3069 3070
	split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
	split_flag |= EXT4_EXT_MARK_UNINIT2;
3071

3072 3073
	flags |= EXT4_GET_BLOCKS_PRE_IO;
	return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3074
}
3075

3076
static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3077 3078 3079 3080 3081 3082 3083 3084 3085 3086
					      struct inode *inode,
					      struct ext4_ext_path *path)
{
	struct ext4_extent *ex;
	int depth;
	int err = 0;

	depth = ext_depth(inode);
	ex = path[depth].p_ext;

3087 3088 3089 3090 3091
	ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
		"block %llu, max_blocks %u\n", inode->i_ino,
		(unsigned long long)le32_to_cpu(ex->ee_block),
		ext4_ext_get_actual_len(ex));

3092 3093 3094 3095 3096 3097
	err = ext4_ext_get_access(handle, inode, path + depth);
	if (err)
		goto out;
	/* first mark the extent as initialized */
	ext4_ext_mark_initialized(ex);

3098 3099
	/* note: ext4_ext_correct_indexes() isn't needed here because
	 * borders are not changed
3100
	 */
3101 3102
	ext4_ext_try_to_merge(inode, path, ex);

3103 3104 3105 3106 3107 3108 3109
	/* Mark modified extent as dirty */
	err = ext4_ext_dirty(handle, inode, path + depth);
out:
	ext4_ext_show_leaf(inode, path);
	return err;
}

3110 3111 3112 3113 3114 3115 3116 3117
static void unmap_underlying_metadata_blocks(struct block_device *bdev,
			sector_t block, int count)
{
	int i;
	for (i = 0; i < count; i++)
                unmap_underlying_metadata(bdev, block + i);
}

T
Theodore Ts'o 已提交
3118 3119 3120 3121
/*
 * Handle EOFBLOCKS_FL flag, clearing it if necessary
 */
static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3122
			      ext4_lblk_t lblk,
T
Theodore Ts'o 已提交
3123 3124 3125 3126 3127
			      struct ext4_ext_path *path,
			      unsigned int len)
{
	int i, depth;
	struct ext4_extent_header *eh;
3128
	struct ext4_extent *last_ex;
T
Theodore Ts'o 已提交
3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150

	if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
		return 0;

	depth = ext_depth(inode);
	eh = path[depth].p_hdr;

	if (unlikely(!eh->eh_entries)) {
		EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and "
				 "EOFBLOCKS_FL set");
		return -EIO;
	}
	last_ex = EXT_LAST_EXTENT(eh);
	/*
	 * We should clear the EOFBLOCKS_FL flag if we are writing the
	 * last block in the last extent in the file.  We test this by
	 * first checking to see if the caller to
	 * ext4_ext_get_blocks() was interested in the last block (or
	 * a block beyond the last block) in the current extent.  If
	 * this turns out to be false, we can bail out from this
	 * function immediately.
	 */
3151
	if (lblk + len < le32_to_cpu(last_ex->ee_block) +
T
Theodore Ts'o 已提交
3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167
	    ext4_ext_get_actual_len(last_ex))
		return 0;
	/*
	 * If the caller does appear to be planning to write at or
	 * beyond the end of the current extent, we then test to see
	 * if the current extent is the last extent in the file, by
	 * checking to make sure it was reached via the rightmost node
	 * at each level of the tree.
	 */
	for (i = depth-1; i >= 0; i--)
		if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
			return 0;
	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
	return ext4_mark_inode_dirty(handle, inode);
}

3168 3169
static int
ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3170
			struct ext4_map_blocks *map,
3171
			struct ext4_ext_path *path, int flags,
3172
			unsigned int allocated, ext4_fsblk_t newblock)
3173 3174 3175
{
	int ret = 0;
	int err = 0;
3176
	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3177 3178 3179

	ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
		  "block %llu, max_blocks %u, flags %d, allocated %u",
3180
		  inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3181 3182 3183
		  flags, allocated);
	ext4_ext_show_leaf(inode, path);

3184
	/* get_block() before submit the IO, split the extent */
3185
	if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3186 3187
		ret = ext4_split_unwritten_extents(handle, inode, map,
						   path, flags);
3188 3189
		/*
		 * Flag the inode(non aio case) or end_io struct (aio case)
L
Lucas De Marchi 已提交
3190
		 * that this IO needs to conversion to written when IO is
3191 3192
		 * completed
		 */
3193
		if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
3194
			io->flag = EXT4_IO_END_UNWRITTEN;
3195 3196
			atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
		} else
3197
			ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3198
		if (ext4_should_dioread_nolock(inode))
3199
			map->m_flags |= EXT4_MAP_UNINIT;
3200 3201
		goto out;
	}
3202
	/* IO end_io complete, convert the filled extent to written */
3203
	if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3204
		ret = ext4_convert_unwritten_extents_endio(handle, inode,
3205
							path);
T
Theodore Ts'o 已提交
3206
		if (ret >= 0) {
3207
			ext4_update_inode_fsync_trans(handle, inode, 1);
3208 3209
			err = check_eofblocks_fl(handle, inode, map->m_lblk,
						 path, map->m_len);
T
Theodore Ts'o 已提交
3210 3211
		} else
			err = ret;
3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230
		goto out2;
	}
	/* buffered IO case */
	/*
	 * repeat fallocate creation request
	 * we already have an unwritten extent
	 */
	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
		goto map_out;

	/* buffered READ or buffered write_begin() lookup */
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
		/*
		 * We have blocks reserved already.  We
		 * return allocated blocks so that delalloc
		 * won't do block reservation for us.  But
		 * the buffer head will be unmapped so that
		 * a read from the block returns 0s.
		 */
3231
		map->m_flags |= EXT4_MAP_UNWRITTEN;
3232 3233 3234 3235
		goto out1;
	}

	/* buffered write, writepage time, convert*/
3236
	ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
T
Theodore Ts'o 已提交
3237
	if (ret >= 0) {
3238
		ext4_update_inode_fsync_trans(handle, inode, 1);
3239 3240
		err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
					 map->m_len);
T
Theodore Ts'o 已提交
3241 3242 3243 3244
		if (err < 0)
			goto out2;
	}

3245 3246 3247 3248 3249 3250
out:
	if (ret <= 0) {
		err = ret;
		goto out2;
	} else
		allocated = ret;
3251
	map->m_flags |= EXT4_MAP_NEW;
3252 3253 3254 3255 3256 3257 3258
	/*
	 * if we allocated more blocks than requested
	 * we need to make sure we unmap the extra block
	 * allocated. The actual needed block will get
	 * unmapped later when we find the buffer_head marked
	 * new.
	 */
3259
	if (allocated > map->m_len) {
3260
		unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3261 3262 3263
					newblock + map->m_len,
					allocated - map->m_len);
		allocated = map->m_len;
3264
	}
3265 3266 3267 3268 3269 3270 3271 3272

	/*
	 * If we have done fallocate with the offset that is already
	 * delayed allocated, we would have block reservation
	 * and quota reservation done in the delayed write path.
	 * But fallocate would have already updated quota and block
	 * count for this offset. So cancel these reservation
	 */
3273
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3274 3275
		ext4_da_update_reserve_space(inode, allocated, 0);

3276
map_out:
3277
	map->m_flags |= EXT4_MAP_MAPPED;
3278
out1:
3279 3280
	if (allocated > map->m_len)
		allocated = map->m_len;
3281
	ext4_ext_show_leaf(inode, path);
3282 3283
	map->m_pblk = newblock;
	map->m_len = allocated;
3284 3285 3286 3287 3288 3289 3290
out2:
	if (path) {
		ext4_ext_drop_refs(path);
		kfree(path);
	}
	return err ? err : allocated;
}
T
Theodore Ts'o 已提交
3291

3292
/*
3293 3294 3295
 * Block allocation/map/preallocation routine for extents based files
 *
 *
3296
 * Need to be called with
3297 3298
 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3299 3300 3301 3302 3303 3304 3305 3306 3307 3308
 *
 * return > 0, number of of blocks already mapped/allocated
 *          if create == 0 and these are pre-allocated blocks
 *          	buffer head is unmapped
 *          otherwise blocks are mapped
 *
 * return = 0, if plain look up failed (blocks have not been allocated)
 *          buffer head is unmapped
 *
 * return < 0, error case.
3309
 */
3310 3311
int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
			struct ext4_map_blocks *map, int flags)
A
Alex Tomas 已提交
3312 3313
{
	struct ext4_ext_path *path = NULL;
T
Theodore Ts'o 已提交
3314
	struct ext4_extent newex, *ex;
3315
	ext4_fsblk_t newblock = 0;
3316
	int err = 0, depth, ret;
3317
	unsigned int allocated = 0;
3318 3319
	unsigned int punched_out = 0;
	unsigned int result = 0;
3320
	struct ext4_allocation_request ar;
3321
	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3322
	struct ext4_map_blocks punch_map;
A
Alex Tomas 已提交
3323

3324
	ext_debug("blocks %u/%u requested for inode %lu\n",
3325
		  map->m_lblk, map->m_len, inode->i_ino);
3326
	trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
A
Alex Tomas 已提交
3327 3328

	/* check in cache */
3329 3330
	if (!(flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) &&
		ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
3331
		if (!newex.ee_start_lo && !newex.ee_start_hi) {
3332
			if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3333 3334 3335 3336
				/*
				 * block isn't allocated yet and
				 * user doesn't want to allocate it
				 */
A
Alex Tomas 已提交
3337 3338 3339
				goto out2;
			}
			/* we should allocate requested block */
3340
		} else {
A
Alex Tomas 已提交
3341
			/* block is already allocated */
3342
			newblock = map->m_lblk
D
Dave Kleikamp 已提交
3343
				   - le32_to_cpu(newex.ee_block)
3344
				   + ext4_ext_pblock(&newex);
3345
			/* number of remaining blocks in the extent */
3346
			allocated = ext4_ext_get_actual_len(&newex) -
3347
				(map->m_lblk - le32_to_cpu(newex.ee_block));
A
Alex Tomas 已提交
3348 3349 3350 3351 3352
			goto out;
		}
	}

	/* find extent for this block */
3353
	path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
A
Alex Tomas 已提交
3354 3355 3356 3357 3358 3359 3360 3361 3362
	if (IS_ERR(path)) {
		err = PTR_ERR(path);
		path = NULL;
		goto out2;
	}

	depth = ext_depth(inode);

	/*
3363 3364
	 * consistent leaf must not be empty;
	 * this situation is possible, though, _during_ tree modification;
A
Alex Tomas 已提交
3365 3366
	 * this is why assert can't be put in ext4_ext_find_extent()
	 */
3367 3368
	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
		EXT4_ERROR_INODE(inode, "bad extent address "
3369 3370 3371
				 "lblock: %lu, depth: %d pblock %lld",
				 (unsigned long) map->m_lblk, depth,
				 path[depth].p_block);
3372 3373 3374
		err = -EIO;
		goto out2;
	}
A
Alex Tomas 已提交
3375

3376 3377
	ex = path[depth].p_ext;
	if (ex) {
A
Aneesh Kumar K.V 已提交
3378
		ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3379
		ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
A
Amit Arora 已提交
3380
		unsigned short ee_len;
3381 3382 3383

		/*
		 * Uninitialized extents are treated as holes, except that
3384
		 * we split out initialized portions during a write.
3385
		 */
A
Amit Arora 已提交
3386
		ee_len = ext4_ext_get_actual_len(ex);
3387
		/* if found extent covers block, simply return it */
3388 3389
		if (in_range(map->m_lblk, ee_block, ee_len)) {
			newblock = map->m_lblk - ee_block + ee_start;
3390
			/* number of remaining blocks in the extent */
3391 3392 3393
			allocated = ee_len - (map->m_lblk - ee_block);
			ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
				  ee_block, ee_len, newblock);
3394

3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408
			if ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0) {
				/*
				 * Do not put uninitialized extent
				 * in the cache
				 */
				if (!ext4_ext_is_uninitialized(ex)) {
					ext4_ext_put_in_cache(inode, ee_block,
						ee_len, ee_start);
					goto out;
				}
				ret = ext4_ext_handle_uninitialized_extents(
					handle, inode, map, path, flags,
					allocated, newblock);
				return ret;
3409
			}
3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468

			/*
			 * Punch out the map length, but only to the
			 * end of the extent
			 */
			punched_out = allocated < map->m_len ?
				allocated : map->m_len;

			/*
			 * Sense extents need to be converted to
			 * uninitialized, they must fit in an
			 * uninitialized extent
			 */
			if (punched_out > EXT_UNINIT_MAX_LEN)
				punched_out = EXT_UNINIT_MAX_LEN;

			punch_map.m_lblk = map->m_lblk;
			punch_map.m_pblk = newblock;
			punch_map.m_len = punched_out;
			punch_map.m_flags = 0;

			/* Check to see if the extent needs to be split */
			if (punch_map.m_len != ee_len ||
				punch_map.m_lblk != ee_block) {

				ret = ext4_split_extent(handle, inode,
				path, &punch_map, 0,
				EXT4_GET_BLOCKS_PUNCH_OUT_EXT |
				EXT4_GET_BLOCKS_PRE_IO);

				if (ret < 0) {
					err = ret;
					goto out2;
				}
				/*
				 * find extent for the block at
				 * the start of the hole
				 */
				ext4_ext_drop_refs(path);
				kfree(path);

				path = ext4_ext_find_extent(inode,
				map->m_lblk, NULL);
				if (IS_ERR(path)) {
					err = PTR_ERR(path);
					path = NULL;
					goto out2;
				}

				depth = ext_depth(inode);
				ex = path[depth].p_ext;
				ee_len = ext4_ext_get_actual_len(ex);
				ee_block = le32_to_cpu(ex->ee_block);
				ee_start = ext4_ext_pblock(ex);

			}

			ext4_ext_mark_uninitialized(ex);

3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489
			ext4_ext_invalidate_cache(inode);

			err = ext4_ext_rm_leaf(handle, inode, path,
				map->m_lblk, map->m_lblk + punched_out);

			if (!err && path->p_hdr->eh_entries == 0) {
				/*
				 * Punch hole freed all of this sub tree,
				 * so we need to correct eh_depth
				 */
				err = ext4_ext_get_access(handle, inode, path);
				if (err == 0) {
					ext_inode_hdr(inode)->eh_depth = 0;
					ext_inode_hdr(inode)->eh_max =
					cpu_to_le16(ext4_ext_space_root(
						inode, 0));

					err = ext4_ext_dirty(
						handle, inode, path);
				}
			}
3490 3491

			goto out2;
A
Alex Tomas 已提交
3492 3493 3494 3495
		}
	}

	/*
3496
	 * requested block isn't allocated yet;
A
Alex Tomas 已提交
3497 3498
	 * we couldn't try to create block if create flag is zero
	 */
3499
	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3500 3501 3502 3503
		/*
		 * put just found gap into cache to speed up
		 * subsequent requests
		 */
3504
		ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
A
Alex Tomas 已提交
3505 3506 3507
		goto out2;
	}
	/*
3508
	 * Okay, we need to do block allocation.
A
Andrew Morton 已提交
3509
	 */
A
Alex Tomas 已提交
3510

3511
	/* find neighbour allocated blocks */
3512
	ar.lleft = map->m_lblk;
3513 3514 3515
	err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
	if (err)
		goto out2;
3516
	ar.lright = map->m_lblk;
3517 3518 3519
	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
	if (err)
		goto out2;
A
Amit Arora 已提交
3520

3521 3522 3523 3524 3525 3526
	/*
	 * See if request is beyond maximum number of blocks we can have in
	 * a single extent. For an initialized extent this limit is
	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
	 * EXT_UNINIT_MAX_LEN.
	 */
3527
	if (map->m_len > EXT_INIT_MAX_LEN &&
3528
	    !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3529 3530
		map->m_len = EXT_INIT_MAX_LEN;
	else if (map->m_len > EXT_UNINIT_MAX_LEN &&
3531
		 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3532
		map->m_len = EXT_UNINIT_MAX_LEN;
3533

3534 3535 3536
	/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
	newex.ee_block = cpu_to_le32(map->m_lblk);
	newex.ee_len = cpu_to_le16(map->m_len);
A
Amit Arora 已提交
3537 3538
	err = ext4_ext_check_overlap(inode, &newex, path);
	if (err)
3539
		allocated = ext4_ext_get_actual_len(&newex);
A
Amit Arora 已提交
3540
	else
3541
		allocated = map->m_len;
3542 3543 3544

	/* allocate new block */
	ar.inode = inode;
3545 3546
	ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
	ar.logical = map->m_lblk;
3547 3548 3549 3550 3551 3552
	ar.len = allocated;
	if (S_ISREG(inode->i_mode))
		ar.flags = EXT4_MB_HINT_DATA;
	else
		/* disable in-core preallocation for non-regular files */
		ar.flags = 0;
3553 3554
	if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
		ar.flags |= EXT4_MB_HINT_NOPREALLOC;
3555
	newblock = ext4_mb_new_blocks(handle, &ar, &err);
A
Alex Tomas 已提交
3556 3557
	if (!newblock)
		goto out2;
3558
	ext_debug("allocate new block: goal %llu, found %llu/%u\n",
3559
		  ar.goal, newblock, allocated);
A
Alex Tomas 已提交
3560 3561

	/* try to insert new extent into found leaf and return */
3562
	ext4_ext_store_pblock(&newex, newblock);
3563
	newex.ee_len = cpu_to_le16(ar.len);
3564 3565
	/* Mark uninitialized */
	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
A
Amit Arora 已提交
3566
		ext4_ext_mark_uninitialized(&newex);
3567
		/*
3568
		 * io_end structure was created for every IO write to an
L
Lucas De Marchi 已提交
3569
		 * uninitialized extent. To avoid unnecessary conversion,
3570
		 * here we flag the IO that really needs the conversion.
3571
		 * For non asycn direct IO case, flag the inode state
L
Lucas De Marchi 已提交
3572
		 * that we need to perform conversion when IO is done.
3573
		 */
3574
		if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3575
			if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
3576
				io->flag = EXT4_IO_END_UNWRITTEN;
3577 3578
				atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
			} else
3579 3580
				ext4_set_inode_state(inode,
						     EXT4_STATE_DIO_UNWRITTEN);
3581
		}
3582
		if (ext4_should_dioread_nolock(inode))
3583
			map->m_flags |= EXT4_MAP_UNINIT;
3584
	}
3585

3586
	err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
3587 3588 3589
	if (!err)
		err = ext4_ext_insert_extent(handle, inode, path,
					     &newex, flags);
3590
	if (err) {
3591 3592
		int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
			EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
3593
		/* free data blocks we just allocated */
3594 3595
		/* not a good idea to call discard here directly,
		 * but otherwise we'd need to call it every free() */
3596
		ext4_discard_preallocations(inode);
3597
		ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
3598
				 ext4_ext_get_actual_len(&newex), fb_flags);
A
Alex Tomas 已提交
3599
		goto out2;
3600
	}
A
Alex Tomas 已提交
3601 3602

	/* previous routine could use block we allocated */
3603
	newblock = ext4_ext_pblock(&newex);
3604
	allocated = ext4_ext_get_actual_len(&newex);
3605 3606 3607
	if (allocated > map->m_len)
		allocated = map->m_len;
	map->m_flags |= EXT4_MAP_NEW;
A
Alex Tomas 已提交
3608

3609 3610 3611 3612
	/*
	 * Update reserved blocks/metadata blocks after successful
	 * block allocation which had been deferred till now.
	 */
3613
	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3614 3615
		ext4_da_update_reserve_space(inode, allocated, 1);

3616 3617 3618 3619 3620
	/*
	 * Cache the extent and update transaction to commit on fdatasync only
	 * when it is _not_ an uninitialized extent.
	 */
	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
3621
		ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
3622 3623 3624
		ext4_update_inode_fsync_trans(handle, inode, 1);
	} else
		ext4_update_inode_fsync_trans(handle, inode, 0);
A
Alex Tomas 已提交
3625
out:
3626 3627
	if (allocated > map->m_len)
		allocated = map->m_len;
A
Alex Tomas 已提交
3628
	ext4_ext_show_leaf(inode, path);
3629 3630 3631
	map->m_flags |= EXT4_MAP_MAPPED;
	map->m_pblk = newblock;
	map->m_len = allocated;
A
Alex Tomas 已提交
3632 3633 3634 3635 3636
out2:
	if (path) {
		ext4_ext_drop_refs(path);
		kfree(path);
	}
3637 3638
	trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
		newblock, map->m_len, err ? err : allocated);
3639 3640 3641 3642 3643

	result = (flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) ?
			punched_out : allocated;

	return err ? err : result;
A
Alex Tomas 已提交
3644 3645
}

3646
void ext4_ext_truncate(struct inode *inode)
A
Alex Tomas 已提交
3647 3648 3649
{
	struct address_space *mapping = inode->i_mapping;
	struct super_block *sb = inode->i_sb;
A
Aneesh Kumar K.V 已提交
3650
	ext4_lblk_t last_block;
A
Alex Tomas 已提交
3651 3652 3653
	handle_t *handle;
	int err = 0;

3654 3655 3656 3657 3658 3659
	/*
	 * finish any pending end_io work so we won't run the risk of
	 * converting any truncated blocks to initialized later
	 */
	ext4_flush_completed_IO(inode);

A
Alex Tomas 已提交
3660 3661 3662
	/*
	 * probably first extent we're gonna free will be last in block
	 */
3663
	err = ext4_writepage_trans_blocks(inode);
A
Alex Tomas 已提交
3664
	handle = ext4_journal_start(inode, err);
3665
	if (IS_ERR(handle))
A
Alex Tomas 已提交
3666 3667
		return;

3668 3669
	if (inode->i_size & (sb->s_blocksize - 1))
		ext4_block_truncate_page(handle, mapping, inode->i_size);
A
Alex Tomas 已提交
3670

3671 3672 3673
	if (ext4_orphan_add(handle, inode))
		goto out_stop;

3674
	down_write(&EXT4_I(inode)->i_data_sem);
A
Alex Tomas 已提交
3675 3676
	ext4_ext_invalidate_cache(inode);

3677
	ext4_discard_preallocations(inode);
3678

A
Alex Tomas 已提交
3679
	/*
3680 3681 3682
	 * TODO: optimization is possible here.
	 * Probably we need not scan at all,
	 * because page truncation is enough.
A
Alex Tomas 已提交
3683 3684 3685 3686 3687 3688 3689 3690
	 */

	/* we have to know where to truncate from in crash case */
	EXT4_I(inode)->i_disksize = inode->i_size;
	ext4_mark_inode_dirty(handle, inode);

	last_block = (inode->i_size + sb->s_blocksize - 1)
			>> EXT4_BLOCK_SIZE_BITS(sb);
3691
	err = ext4_ext_remove_space(inode, last_block);
A
Alex Tomas 已提交
3692 3693

	/* In a multi-transaction truncate, we only make the final
3694 3695
	 * transaction synchronous.
	 */
A
Alex Tomas 已提交
3696
	if (IS_SYNC(inode))
3697
		ext4_handle_sync(handle);
A
Alex Tomas 已提交
3698

3699
	up_write(&EXT4_I(inode)->i_data_sem);
3700 3701

out_stop:
A
Alex Tomas 已提交
3702
	/*
3703
	 * If this was a simple ftruncate() and the file will remain alive,
A
Alex Tomas 已提交
3704 3705 3706 3707 3708 3709 3710 3711
	 * then we need to clear up the orphan record which we created above.
	 * However, if this was a real unlink then we were called by
	 * ext4_delete_inode(), and we allow that function to clean up the
	 * orphan info for us.
	 */
	if (inode->i_nlink)
		ext4_orphan_del(handle, inode);

3712 3713
	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
	ext4_mark_inode_dirty(handle, inode);
A
Alex Tomas 已提交
3714 3715 3716
	ext4_journal_stop(handle);
}

3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730
static void ext4_falloc_update_inode(struct inode *inode,
				int mode, loff_t new_size, int update_ctime)
{
	struct timespec now;

	if (update_ctime) {
		now = current_fs_time(inode->i_sb);
		if (!timespec_equal(&inode->i_ctime, &now))
			inode->i_ctime = now;
	}
	/*
	 * Update only when preallocation was requested beyond
	 * the file size.
	 */
3731 3732 3733 3734 3735
	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
		if (new_size > i_size_read(inode))
			i_size_write(inode, new_size);
		if (new_size > EXT4_I(inode)->i_disksize)
			ext4_update_i_disksize(inode, new_size);
3736 3737 3738 3739 3740 3741
	} else {
		/*
		 * Mark that we allocate beyond EOF so the subsequent truncate
		 * can proceed even if the new size is the same as i_size.
		 */
		if (new_size > i_size_read(inode))
3742
			ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3743 3744 3745 3746
	}

}

A
Amit Arora 已提交
3747
/*
3748
 * preallocate space for a file. This implements ext4's fallocate file
A
Amit Arora 已提交
3749 3750 3751 3752 3753
 * operation, which gets called from sys_fallocate system call.
 * For block-mapped files, posix_fallocate should fall back to the method
 * of writing zeroes to the required new blocks (the same behavior which is
 * expected for file systems which do not support fallocate() system call).
 */
3754
long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
A
Amit Arora 已提交
3755
{
3756
	struct inode *inode = file->f_path.dentry->d_inode;
A
Amit Arora 已提交
3757
	handle_t *handle;
3758
	loff_t new_size;
3759
	unsigned int max_blocks;
A
Amit Arora 已提交
3760 3761 3762
	int ret = 0;
	int ret2 = 0;
	int retries = 0;
3763
	struct ext4_map_blocks map;
A
Amit Arora 已提交
3764 3765 3766 3767 3768 3769
	unsigned int credits, blkbits = inode->i_blkbits;

	/*
	 * currently supporting (pre)allocate mode for extent-based
	 * files _only_
	 */
3770
	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
A
Amit Arora 已提交
3771 3772
		return -EOPNOTSUPP;

3773 3774 3775 3776 3777 3778 3779
	/* Return error if mode is not supported */
	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
		return -EOPNOTSUPP;

	if (mode & FALLOC_FL_PUNCH_HOLE)
		return ext4_punch_hole(file, offset, len);

3780
	trace_ext4_fallocate_enter(inode, offset, len, mode);
3781
	map.m_lblk = offset >> blkbits;
3782 3783 3784 3785
	/*
	 * We can't just convert len to max_blocks because
	 * If blocksize = 4096 offset = 3072 and len = 2048
	 */
A
Amit Arora 已提交
3786
	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3787
		- map.m_lblk;
A
Amit Arora 已提交
3788
	/*
3789
	 * credits to insert 1 extent into extent tree
A
Amit Arora 已提交
3790
	 */
3791
	credits = ext4_chunk_trans_blocks(inode, max_blocks);
3792
	mutex_lock(&inode->i_mutex);
3793 3794 3795
	ret = inode_newsize_ok(inode, (len + offset));
	if (ret) {
		mutex_unlock(&inode->i_mutex);
3796
		trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
3797 3798
		return ret;
	}
A
Amit Arora 已提交
3799 3800
retry:
	while (ret >= 0 && ret < max_blocks) {
3801 3802
		map.m_lblk = map.m_lblk + ret;
		map.m_len = max_blocks = max_blocks - ret;
A
Amit Arora 已提交
3803 3804 3805 3806 3807
		handle = ext4_journal_start(inode, credits);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			break;
		}
3808
		ret = ext4_map_blocks(handle, inode, &map,
3809 3810
				      EXT4_GET_BLOCKS_CREATE_UNINIT_EXT |
				      EXT4_GET_BLOCKS_NO_NORMALIZE);
3811
		if (ret <= 0) {
3812 3813
#ifdef EXT4FS_DEBUG
			WARN_ON(ret <= 0);
3814
			printk(KERN_ERR "%s: ext4_ext_map_blocks "
3815
				    "returned error inode#%lu, block=%u, "
3816
				    "max_blocks=%u", __func__,
3817
				    inode->i_ino, map.m_lblk, max_blocks);
3818
#endif
A
Amit Arora 已提交
3819 3820 3821 3822
			ext4_mark_inode_dirty(handle, inode);
			ret2 = ext4_journal_stop(handle);
			break;
		}
3823
		if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
3824 3825 3826
						blkbits) >> blkbits))
			new_size = offset + len;
		else
3827
			new_size = ((loff_t) map.m_lblk + ret) << blkbits;
A
Amit Arora 已提交
3828

3829
		ext4_falloc_update_inode(inode, mode, new_size,
3830
					 (map.m_flags & EXT4_MAP_NEW));
A
Amit Arora 已提交
3831 3832 3833 3834 3835
		ext4_mark_inode_dirty(handle, inode);
		ret2 = ext4_journal_stop(handle);
		if (ret2)
			break;
	}
3836 3837 3838
	if (ret == -ENOSPC &&
			ext4_should_retry_alloc(inode->i_sb, &retries)) {
		ret = 0;
A
Amit Arora 已提交
3839 3840
		goto retry;
	}
3841
	mutex_unlock(&inode->i_mutex);
3842 3843
	trace_ext4_fallocate_exit(inode, offset, max_blocks,
				ret > 0 ? ret2 : ret);
A
Amit Arora 已提交
3844 3845
	return ret > 0 ? ret2 : ret;
}
3846

3847 3848 3849 3850 3851 3852 3853 3854
/*
 * This function convert a range of blocks to written extents
 * The caller of this function will pass the start offset and the size.
 * all unwritten extents within this range will be converted to
 * written extents.
 *
 * This function is called from the direct IO end io call back
 * function, to convert the fallocated extents after IO is completed.
3855
 * Returns 0 on success.
3856 3857
 */
int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3858
				    ssize_t len)
3859 3860 3861 3862 3863
{
	handle_t *handle;
	unsigned int max_blocks;
	int ret = 0;
	int ret2 = 0;
3864
	struct ext4_map_blocks map;
3865 3866
	unsigned int credits, blkbits = inode->i_blkbits;

3867
	map.m_lblk = offset >> blkbits;
3868 3869 3870 3871
	/*
	 * We can't just convert len to max_blocks because
	 * If blocksize = 4096 offset = 3072 and len = 2048
	 */
3872 3873
	max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
		      map.m_lblk);
3874 3875 3876 3877 3878
	/*
	 * credits to insert 1 extent into extent tree
	 */
	credits = ext4_chunk_trans_blocks(inode, max_blocks);
	while (ret >= 0 && ret < max_blocks) {
3879 3880
		map.m_lblk += ret;
		map.m_len = (max_blocks -= ret);
3881 3882 3883 3884 3885
		handle = ext4_journal_start(inode, credits);
		if (IS_ERR(handle)) {
			ret = PTR_ERR(handle);
			break;
		}
3886
		ret = ext4_map_blocks(handle, inode, &map,
3887
				      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
3888 3889
		if (ret <= 0) {
			WARN_ON(ret <= 0);
3890
			printk(KERN_ERR "%s: ext4_ext_map_blocks "
3891 3892
				    "returned error inode#%lu, block=%u, "
				    "max_blocks=%u", __func__,
3893
				    inode->i_ino, map.m_lblk, map.m_len);
3894 3895 3896 3897 3898 3899 3900 3901
		}
		ext4_mark_inode_dirty(handle, inode);
		ret2 = ext4_journal_stop(handle);
		if (ret <= 0 || ret2 )
			break;
	}
	return ret > 0 ? ret2 : ret;
}
3902

3903 3904 3905
/*
 * Callback function called for each extent to gather FIEMAP information.
 */
3906
static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next,
3907 3908 3909 3910 3911 3912 3913
		       struct ext4_ext_cache *newex, struct ext4_extent *ex,
		       void *data)
{
	__u64	logical;
	__u64	physical;
	__u64	length;
	__u32	flags = 0;
3914 3915 3916
	int		ret = 0;
	struct fiemap_extent_info *fieinfo = data;
	unsigned char blksize_bits;
3917

3918 3919
	blksize_bits = inode->i_sb->s_blocksize_bits;
	logical = (__u64)newex->ec_block << blksize_bits;
3920

3921
	if (newex->ec_start == 0) {
3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939
		/*
		 * No extent in extent-tree contains block @newex->ec_start,
		 * then the block may stay in 1)a hole or 2)delayed-extent.
		 *
		 * Holes or delayed-extents are processed as follows.
		 * 1. lookup dirty pages with specified range in pagecache.
		 *    If no page is got, then there is no delayed-extent and
		 *    return with EXT_CONTINUE.
		 * 2. find the 1st mapped buffer,
		 * 3. check if the mapped buffer is both in the request range
		 *    and a delayed buffer. If not, there is no delayed-extent,
		 *    then return.
		 * 4. a delayed-extent is found, the extent will be collected.
		 */
		ext4_lblk_t	end = 0;
		pgoff_t		last_offset;
		pgoff_t		offset;
		pgoff_t		index;
3940
		pgoff_t		start_index = 0;
3941
		struct page	**pages = NULL;
3942
		struct buffer_head *bh = NULL;
3943 3944 3945 3946 3947 3948
		struct buffer_head *head = NULL;
		unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);

		pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
		if (pages == NULL)
			return -ENOMEM;
3949 3950

		offset = logical >> PAGE_SHIFT;
3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966
repeat:
		last_offset = offset;
		head = NULL;
		ret = find_get_pages_tag(inode->i_mapping, &offset,
					PAGECACHE_TAG_DIRTY, nr_pages, pages);

		if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
			/* First time, try to find a mapped buffer. */
			if (ret == 0) {
out:
				for (index = 0; index < ret; index++)
					page_cache_release(pages[index]);
				/* just a hole. */
				kfree(pages);
				return EXT_CONTINUE;
			}
3967
			index = 0;
3968

3969
next_page:
3970
			/* Try to find the 1st mapped buffer. */
3971
			end = ((__u64)pages[index]->index << PAGE_SHIFT) >>
3972
				  blksize_bits;
3973
			if (!page_has_buffers(pages[index]))
3974
				goto out;
3975
			head = page_buffers(pages[index]);
3976 3977
			if (!head)
				goto out;
3978

3979
			index++;
3980 3981
			bh = head;
			do {
3982 3983 3984 3985 3986 3987 3988 3989 3990 3991
				if (end >= newex->ec_block +
					newex->ec_len)
					/* The buffer is out of
					 * the request range.
					 */
					goto out;

				if (buffer_mapped(bh) &&
				    end >= newex->ec_block) {
					start_index = index - 1;
3992 3993 3994
					/* get the 1st mapped buffer. */
					goto found_mapped_buffer;
				}
3995

3996 3997 3998
				bh = bh->b_this_page;
				end++;
			} while (bh != head);
3999

4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010
			/* No mapped buffer in the range found in this page,
			 * We need to look up next page.
			 */
			if (index >= ret) {
				/* There is no page left, but we need to limit
				 * newex->ec_len.
				 */
				newex->ec_len = end - newex->ec_block;
				goto out;
			}
			goto next_page;
4011
		} else {
4012 4013 4014 4015
			/*Find contiguous delayed buffers. */
			if (ret > 0 && pages[0]->index == last_offset)
				head = page_buffers(pages[0]);
			bh = head;
4016 4017
			index = 1;
			start_index = 0;
4018
		}
4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039

found_mapped_buffer:
		if (bh != NULL && buffer_delay(bh)) {
			/* 1st or contiguous delayed buffer found. */
			if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
				/*
				 * 1st delayed buffer found, record
				 * the start of extent.
				 */
				flags |= FIEMAP_EXTENT_DELALLOC;
				newex->ec_block = end;
				logical = (__u64)end << blksize_bits;
			}
			/* Find contiguous delayed buffers. */
			do {
				if (!buffer_delay(bh))
					goto found_delayed_extent;
				bh = bh->b_this_page;
				end++;
			} while (bh != head);

4040
			for (; index < ret; index++) {
4041 4042 4043 4044 4045 4046 4047 4048 4049
				if (!page_has_buffers(pages[index])) {
					bh = NULL;
					break;
				}
				head = page_buffers(pages[index]);
				if (!head) {
					bh = NULL;
					break;
				}
4050

4051
				if (pages[index]->index !=
4052 4053
				    pages[start_index]->index + index
				    - start_index) {
4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080
					/* Blocks are not contiguous. */
					bh = NULL;
					break;
				}
				bh = head;
				do {
					if (!buffer_delay(bh))
						/* Delayed-extent ends. */
						goto found_delayed_extent;
					bh = bh->b_this_page;
					end++;
				} while (bh != head);
			}
		} else if (!(flags & FIEMAP_EXTENT_DELALLOC))
			/* a hole found. */
			goto out;

found_delayed_extent:
		newex->ec_len = min(end - newex->ec_block,
						(ext4_lblk_t)EXT_INIT_MAX_LEN);
		if (ret == nr_pages && bh != NULL &&
			newex->ec_len < EXT_INIT_MAX_LEN &&
			buffer_delay(bh)) {
			/* Have not collected an extent and continue. */
			for (index = 0; index < ret; index++)
				page_cache_release(pages[index]);
			goto repeat;
4081
		}
4082 4083 4084 4085

		for (index = 0; index < ret; index++)
			page_cache_release(pages[index]);
		kfree(pages);
4086 4087 4088 4089 4090 4091 4092 4093
	}

	physical = (__u64)newex->ec_start << blksize_bits;
	length =   (__u64)newex->ec_len << blksize_bits;

	if (ex && ext4_ext_is_uninitialized(ex))
		flags |= FIEMAP_EXTENT_UNWRITTEN;

4094
	if (next == EXT_MAX_BLOCKS)
4095 4096
		flags |= FIEMAP_EXTENT_LAST;

4097
	ret = fiemap_fill_next_extent(fieinfo, logical, physical,
4098
					length, flags);
4099 4100 4101
	if (ret < 0)
		return ret;
	if (ret == 1)
4102 4103 4104 4105 4106 4107 4108
		return EXT_BREAK;
	return EXT_CONTINUE;
}

/* fiemap flags we can handle specified here */
#define EXT4_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)

A
Aneesh Kumar K.V 已提交
4109 4110
static int ext4_xattr_fiemap(struct inode *inode,
				struct fiemap_extent_info *fieinfo)
4111 4112 4113 4114 4115 4116 4117 4118
{
	__u64 physical = 0;
	__u64 length;
	__u32 flags = FIEMAP_EXTENT_LAST;
	int blockbits = inode->i_sb->s_blocksize_bits;
	int error = 0;

	/* in-inode? */
4119
	if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131
		struct ext4_iloc iloc;
		int offset;	/* offset of xattr in inode */

		error = ext4_get_inode_loc(inode, &iloc);
		if (error)
			return error;
		physical = iloc.bh->b_blocknr << blockbits;
		offset = EXT4_GOOD_OLD_INODE_SIZE +
				EXT4_I(inode)->i_extra_isize;
		physical += offset;
		length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
		flags |= FIEMAP_EXTENT_DATA_INLINE;
4132
		brelse(iloc.bh);
4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143
	} else { /* external block */
		physical = EXT4_I(inode)->i_file_acl << blockbits;
		length = inode->i_sb->s_blocksize;
	}

	if (physical)
		error = fiemap_fill_next_extent(fieinfo, 0, physical,
						length, flags);
	return (error < 0 ? error : 0);
}

4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314
/*
 * ext4_ext_punch_hole
 *
 * Punches a hole of "length" bytes in a file starting
 * at byte "offset"
 *
 * @inode:  The inode of the file to punch a hole in
 * @offset: The starting byte offset of the hole
 * @length: The length of the hole
 *
 * Returns the number of blocks removed or negative on err
 */
int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
{
	struct inode *inode = file->f_path.dentry->d_inode;
	struct super_block *sb = inode->i_sb;
	struct ext4_ext_cache cache_ex;
	ext4_lblk_t first_block, last_block, num_blocks, iblock, max_blocks;
	struct address_space *mapping = inode->i_mapping;
	struct ext4_map_blocks map;
	handle_t *handle;
	loff_t first_block_offset, last_block_offset, block_len;
	loff_t first_page, last_page, first_page_offset, last_page_offset;
	int ret, credits, blocks_released, err = 0;

	first_block = (offset + sb->s_blocksize - 1) >>
		EXT4_BLOCK_SIZE_BITS(sb);
	last_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);

	first_block_offset = first_block << EXT4_BLOCK_SIZE_BITS(sb);
	last_block_offset = last_block << EXT4_BLOCK_SIZE_BITS(sb);

	first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
	last_page = (offset + length) >> PAGE_CACHE_SHIFT;

	first_page_offset = first_page << PAGE_CACHE_SHIFT;
	last_page_offset = last_page << PAGE_CACHE_SHIFT;

	/*
	 * Write out all dirty pages to avoid race conditions
	 * Then release them.
	 */
	if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
		err = filemap_write_and_wait_range(mapping,
			first_page_offset == 0 ? 0 : first_page_offset-1,
			last_page_offset);

			if (err)
				return err;
	}

	/* Now release the pages */
	if (last_page_offset > first_page_offset) {
		truncate_inode_pages_range(mapping, first_page_offset,
					   last_page_offset-1);
	}

	/* finish any pending end_io work */
	ext4_flush_completed_IO(inode);

	credits = ext4_writepage_trans_blocks(inode);
	handle = ext4_journal_start(inode, credits);
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	err = ext4_orphan_add(handle, inode);
	if (err)
		goto out;

	/*
	 * Now we need to zero out the un block aligned data.
	 * If the file is smaller than a block, just
	 * zero out the middle
	 */
	if (first_block > last_block)
		ext4_block_zero_page_range(handle, mapping, offset, length);
	else {
		/* zero out the head of the hole before the first block */
		block_len  = first_block_offset - offset;
		if (block_len > 0)
			ext4_block_zero_page_range(handle, mapping,
						   offset, block_len);

		/* zero out the tail of the hole after the last block */
		block_len = offset + length - last_block_offset;
		if (block_len > 0) {
			ext4_block_zero_page_range(handle, mapping,
					last_block_offset, block_len);
		}
	}

	/* If there are no blocks to remove, return now */
	if (first_block >= last_block)
		goto out;

	down_write(&EXT4_I(inode)->i_data_sem);
	ext4_ext_invalidate_cache(inode);
	ext4_discard_preallocations(inode);

	/*
	 * Loop over all the blocks and identify blocks
	 * that need to be punched out
	 */
	iblock = first_block;
	blocks_released = 0;
	while (iblock < last_block) {
		max_blocks = last_block - iblock;
		num_blocks = 1;
		memset(&map, 0, sizeof(map));
		map.m_lblk = iblock;
		map.m_len = max_blocks;
		ret = ext4_ext_map_blocks(handle, inode, &map,
			EXT4_GET_BLOCKS_PUNCH_OUT_EXT);

		if (ret > 0) {
			blocks_released += ret;
			num_blocks = ret;
		} else if (ret == 0) {
			/*
			 * If map blocks could not find the block,
			 * then it is in a hole.  If the hole was
			 * not already cached, then map blocks should
			 * put it in the cache.  So we can get the hole
			 * out of the cache
			 */
			memset(&cache_ex, 0, sizeof(cache_ex));
			if ((ext4_ext_check_cache(inode, iblock, &cache_ex)) &&
				!cache_ex.ec_start) {

				/* The hole is cached */
				num_blocks = cache_ex.ec_block +
				cache_ex.ec_len - iblock;

			} else {
				/* The block could not be identified */
				err = -EIO;
				break;
			}
		} else {
			/* Map blocks error */
			err = ret;
			break;
		}

		if (num_blocks == 0) {
			/* This condition should never happen */
			ext_debug("Block lookup failed");
			err = -EIO;
			break;
		}

		iblock += num_blocks;
	}

	if (blocks_released > 0) {
		ext4_ext_invalidate_cache(inode);
		ext4_discard_preallocations(inode);
	}

	if (IS_SYNC(inode))
		ext4_handle_sync(handle);

	up_write(&EXT4_I(inode)->i_data_sem);

out:
	ext4_orphan_del(handle, inode);
	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
	ext4_mark_inode_dirty(handle, inode);
	ext4_journal_stop(handle);
	return err;
}
4315 4316 4317 4318 4319 4320 4321
int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		__u64 start, __u64 len)
{
	ext4_lblk_t start_blk;
	int error = 0;

	/* fallback to generic here if not in extents fmt */
4322
	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4323 4324 4325 4326 4327 4328 4329 4330 4331
		return generic_block_fiemap(inode, fieinfo, start, len,
			ext4_get_block);

	if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
		return -EBADR;

	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
		error = ext4_xattr_fiemap(inode, fieinfo);
	} else {
4332 4333 4334
		ext4_lblk_t len_blks;
		__u64 last_blk;

4335
		start_blk = start >> inode->i_sb->s_blocksize_bits;
4336
		last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4337 4338
		if (last_blk >= EXT_MAX_BLOCKS)
			last_blk = EXT_MAX_BLOCKS-1;
4339
		len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350

		/*
		 * Walk the extent tree gathering extent information.
		 * ext4_ext_fiemap_cb will push extents back to user.
		 */
		error = ext4_ext_walk_space(inode, start_blk, len_blks,
					  ext4_ext_fiemap_cb, fieinfo);
	}

	return error;
}