migrate.c 17.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Copyright IBM Corporation, 2007
 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2.1 of the GNU Lesser General Public License
 * as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it would be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 *
 */

15
#include <linux/slab.h>
16
#include "ext4_jbd2.h"
17
#include "ext4_extents.h"
18 19 20 21 22

/*
 * The contiguous blocks details which can be
 * represented by a single extent
 */
D
Dmitry Monakhov 已提交
23 24
struct migrate_struct {
	ext4_lblk_t first_block, last_block, curr_block;
25 26 27 28
	ext4_fsblk_t first_pblock, last_pblock;
};

static int finish_range(handle_t *handle, struct inode *inode,
D
Dmitry Monakhov 已提交
29
				struct migrate_struct *lb)
30 31 32 33 34 35 36 37 38 39 40 41

{
	int retval = 0, needed;
	struct ext4_extent newext;
	struct ext4_ext_path *path;
	if (lb->first_pblock == 0)
		return 0;

	/* Add the extent to temp inode*/
	newext.ee_block = cpu_to_le32(lb->first_block);
	newext.ee_len   = cpu_to_le16(lb->last_block - lb->first_block + 1);
	ext4_ext_store_pblock(&newext, lb->first_pblock);
42 43
	/* Locking only for convinience since we are operating on temp inode */
	down_write(&EXT4_I(inode)->i_data_sem);
44
	path = ext4_find_extent(inode, lb->first_block, NULL, 0);
45 46
	if (IS_ERR(path)) {
		retval = PTR_ERR(path);
47
		path = NULL;
48 49 50 51 52 53 54 55 56
		goto err_out;
	}

	/*
	 * Calculate the credit needed to inserting this extent
	 * Since we are doing this in loop we may accumalate extra
	 * credit. But below we try to not accumalate too much
	 * of them by restarting the journal.
	 */
57 58
	needed = ext4_ext_calc_credits_for_single_extent(inode,
		    lb->last_block - lb->first_block + 1, path);
59 60 61 62

	/*
	 * Make sure the credit we accumalated is not really high
	 */
63 64
	if (needed && ext4_handle_has_enough_credits(handle,
						EXT4_RESERVE_TRANS_BLOCKS)) {
65
		up_write((&EXT4_I(inode)->i_data_sem));
66
		retval = ext4_journal_restart(handle, needed);
67
		down_write((&EXT4_I(inode)->i_data_sem));
68 69
		if (retval)
			goto err_out;
70
	} else if (needed) {
71
		retval = ext4_journal_extend(handle, needed);
72
		if (retval) {
73 74 75
			/*
			 * IF not able to extend the journal restart the journal
			 */
76
			up_write((&EXT4_I(inode)->i_data_sem));
77
			retval = ext4_journal_restart(handle, needed);
78
			down_write((&EXT4_I(inode)->i_data_sem));
79 80 81 82
			if (retval)
				goto err_out;
		}
	}
83
	retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
84
err_out:
85
	up_write((&EXT4_I(inode)->i_data_sem));
86 87
	ext4_ext_drop_refs(path);
	kfree(path);
88 89 90 91 92
	lb->first_pblock = 0;
	return retval;
}

static int update_extent_range(handle_t *handle, struct inode *inode,
D
Dmitry Monakhov 已提交
93
			       ext4_fsblk_t pblock, struct migrate_struct *lb)
94 95 96 97 98 99 100
{
	int retval;
	/*
	 * See if we can add on to the existing range (if it exists)
	 */
	if (lb->first_pblock &&
		(lb->last_pblock+1 == pblock) &&
D
Dmitry Monakhov 已提交
101
		(lb->last_block+1 == lb->curr_block)) {
102
		lb->last_pblock = pblock;
D
Dmitry Monakhov 已提交
103 104
		lb->last_block = lb->curr_block;
		lb->curr_block++;
105 106 107 108 109 110 111
		return 0;
	}
	/*
	 * Start a new range.
	 */
	retval = finish_range(handle, inode, lb);
	lb->first_pblock = lb->last_pblock = pblock;
D
Dmitry Monakhov 已提交
112 113
	lb->first_block = lb->last_block = lb->curr_block;
	lb->curr_block++;
114 115 116 117
	return retval;
}

static int update_ind_extent_range(handle_t *handle, struct inode *inode,
D
Dmitry Monakhov 已提交
118 119
				   ext4_fsblk_t pblock,
				   struct migrate_struct *lb)
120 121 122 123 124 125 126 127 128 129 130
{
	struct buffer_head *bh;
	__le32 *i_data;
	int i, retval = 0;
	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;

	bh = sb_bread(inode->i_sb, pblock);
	if (!bh)
		return -EIO;

	i_data = (__le32 *)bh->b_data;
D
Dmitry Monakhov 已提交
131
	for (i = 0; i < max_entries; i++) {
132 133
		if (i_data[i]) {
			retval = update_extent_range(handle, inode,
D
Dmitry Monakhov 已提交
134
						le32_to_cpu(i_data[i]), lb);
135 136
			if (retval)
				break;
D
Dmitry Monakhov 已提交
137 138
		} else {
			lb->curr_block++;
139 140 141 142 143 144 145 146
		}
	}
	put_bh(bh);
	return retval;

}

static int update_dind_extent_range(handle_t *handle, struct inode *inode,
D
Dmitry Monakhov 已提交
147 148
				    ext4_fsblk_t pblock,
				    struct migrate_struct *lb)
149 150 151 152 153 154 155 156 157 158 159 160 161 162
{
	struct buffer_head *bh;
	__le32 *i_data;
	int i, retval = 0;
	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;

	bh = sb_bread(inode->i_sb, pblock);
	if (!bh)
		return -EIO;

	i_data = (__le32 *)bh->b_data;
	for (i = 0; i < max_entries; i++) {
		if (i_data[i]) {
			retval = update_ind_extent_range(handle, inode,
D
Dmitry Monakhov 已提交
163
						le32_to_cpu(i_data[i]), lb);
164 165 166 167
			if (retval)
				break;
		} else {
			/* Only update the file block number */
D
Dmitry Monakhov 已提交
168
			lb->curr_block += max_entries;
169 170 171 172 173 174 175 176
		}
	}
	put_bh(bh);
	return retval;

}

static int update_tind_extent_range(handle_t *handle, struct inode *inode,
D
Dmitry Monakhov 已提交
177 178
				    ext4_fsblk_t pblock,
				    struct migrate_struct *lb)
179 180 181 182 183 184 185 186 187 188 189 190 191 192
{
	struct buffer_head *bh;
	__le32 *i_data;
	int i, retval = 0;
	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;

	bh = sb_bread(inode->i_sb, pblock);
	if (!bh)
		return -EIO;

	i_data = (__le32 *)bh->b_data;
	for (i = 0; i < max_entries; i++) {
		if (i_data[i]) {
			retval = update_dind_extent_range(handle, inode,
D
Dmitry Monakhov 已提交
193
						le32_to_cpu(i_data[i]), lb);
194 195
			if (retval)
				break;
D
Dmitry Monakhov 已提交
196
		} else {
197
			/* Only update the file block number */
D
Dmitry Monakhov 已提交
198 199
			lb->curr_block += max_entries * max_entries;
		}
200 201 202 203 204 205
	}
	put_bh(bh);
	return retval;

}

206 207 208 209
static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
{
	int retval = 0, needed;

210
	if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
211 212 213 214 215 216 217
		return 0;
	/*
	 * We are freeing a blocks. During this we touch
	 * superblock, group descriptor and block bitmap.
	 * So allocate a credit of 3. We may update
	 * quota (user and group).
	 */
D
Dmitry Monakhov 已提交
218
	needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
219 220 221 222 223 224 225

	if (ext4_journal_extend(handle, needed) != 0)
		retval = ext4_journal_restart(handle, needed);

	return retval;
}

226 227 228 229 230 231 232 233 234 235 236 237 238 239
static int free_dind_blocks(handle_t *handle,
				struct inode *inode, __le32 i_data)
{
	int i;
	__le32 *tmp_idata;
	struct buffer_head *bh;
	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;

	bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
	if (!bh)
		return -EIO;

	tmp_idata = (__le32 *)bh->b_data;
	for (i = 0; i < max_entries; i++) {
240 241
		if (tmp_idata[i]) {
			extend_credit_for_blkdel(handle, inode);
242
			ext4_free_blocks(handle, inode, NULL,
243 244 245
					 le32_to_cpu(tmp_idata[i]), 1,
					 EXT4_FREE_BLOCKS_METADATA |
					 EXT4_FREE_BLOCKS_FORGET);
246
		}
247 248
	}
	put_bh(bh);
249
	extend_credit_for_blkdel(handle, inode);
250
	ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
251 252
			 EXT4_FREE_BLOCKS_METADATA |
			 EXT4_FREE_BLOCKS_FORGET);
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
	return 0;
}

static int free_tind_blocks(handle_t *handle,
				struct inode *inode, __le32 i_data)
{
	int i, retval = 0;
	__le32 *tmp_idata;
	struct buffer_head *bh;
	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;

	bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
	if (!bh)
		return -EIO;

	tmp_idata = (__le32 *)bh->b_data;
	for (i = 0; i < max_entries; i++) {
		if (tmp_idata[i]) {
			retval = free_dind_blocks(handle,
					inode, tmp_idata[i]);
			if (retval) {
				put_bh(bh);
				return retval;
			}
		}
	}
	put_bh(bh);
280
	extend_credit_for_blkdel(handle, inode);
281
	ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
282 283
			 EXT4_FREE_BLOCKS_METADATA |
			 EXT4_FREE_BLOCKS_FORGET);
284 285 286
	return 0;
}

287
static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
288 289 290
{
	int retval;

291 292 293
	/* ei->i_data[EXT4_IND_BLOCK] */
	if (i_data[0]) {
		extend_credit_for_blkdel(handle, inode);
294
		ext4_free_blocks(handle, inode, NULL,
295 296 297
				le32_to_cpu(i_data[0]), 1,
				 EXT4_FREE_BLOCKS_METADATA |
				 EXT4_FREE_BLOCKS_FORGET);
298
	}
299

300 301 302
	/* ei->i_data[EXT4_DIND_BLOCK] */
	if (i_data[1]) {
		retval = free_dind_blocks(handle, inode, i_data[1]);
303 304 305 306
		if (retval)
			return retval;
	}

307 308 309
	/* ei->i_data[EXT4_TIND_BLOCK] */
	if (i_data[2]) {
		retval = free_tind_blocks(handle, inode, i_data[2]);
310 311 312 313 314 315 316
		if (retval)
			return retval;
	}
	return 0;
}

static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
317
						struct inode *tmp_inode)
318
{
319 320
	int retval;
	__le32	i_data[3];
321 322 323 324 325 326 327 328
	struct ext4_inode_info *ei = EXT4_I(inode);
	struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);

	/*
	 * One credit accounted for writing the
	 * i_data field of the original inode
	 */
	retval = ext4_journal_extend(handle, 1);
329
	if (retval) {
330 331 332 333 334
		retval = ext4_journal_restart(handle, 1);
		if (retval)
			goto err_out;
	}

335 336 337 338 339
	i_data[0] = ei->i_data[EXT4_IND_BLOCK];
	i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
	i_data[2] = ei->i_data[EXT4_TIND_BLOCK];

	down_write(&EXT4_I(inode)->i_data_sem);
340
	/*
341
	 * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
342 343 344
	 * happened after we started the migrate. We need to
	 * fail the migrate
	 */
345
	if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
346 347 348 349
		retval = -EAGAIN;
		up_write(&EXT4_I(inode)->i_data_sem);
		goto err_out;
	} else
350
		ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
351 352 353 354
	/*
	 * We have the extent map build with the tmp inode.
	 * Now copy the i_data across
	 */
355
	ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
356 357 358 359 360 361 362 363
	memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));

	/*
	 * Update i_blocks with the new blocks that got
	 * allocated while adding extents for extent index
	 * blocks.
	 *
	 * While converting to extents we need not
364
	 * update the original inode i_blocks for extent blocks
365 366 367 368 369
	 * via quota APIs. The quota update happened via tmp_inode already.
	 */
	spin_lock(&inode->i_lock);
	inode->i_blocks += tmp_inode->i_blocks;
	spin_unlock(&inode->i_lock);
370
	up_write(&EXT4_I(inode)->i_data_sem);
371

372 373 374 375 376
	/*
	 * We mark the inode dirty after, because we decrement the
	 * i_blocks when freeing the indirect meta-data blocks
	 */
	retval = free_ind_block(handle, inode, i_data);
377
	ext4_mark_inode_dirty(handle, inode);
378

379 380 381 382 383 384 385 386 387 388 389 390
err_out:
	return retval;
}

static int free_ext_idx(handle_t *handle, struct inode *inode,
					struct ext4_extent_idx *ix)
{
	int i, retval = 0;
	ext4_fsblk_t block;
	struct buffer_head *bh;
	struct ext4_extent_header *eh;

391
	block = ext4_idx_pblock(ix);
392 393 394 395 396 397 398 399 400 401 402 403 404 405
	bh = sb_bread(inode->i_sb, block);
	if (!bh)
		return -EIO;

	eh = (struct ext4_extent_header *)bh->b_data;
	if (eh->eh_depth != 0) {
		ix = EXT_FIRST_INDEX(eh);
		for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
			retval = free_ext_idx(handle, inode, ix);
			if (retval)
				break;
		}
	}
	put_bh(bh);
406
	extend_credit_for_blkdel(handle, inode);
407
	ext4_free_blocks(handle, inode, NULL, block, 1,
408
			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
	return retval;
}

/*
 * Free the extent meta data blocks only
 */
static int free_ext_block(handle_t *handle, struct inode *inode)
{
	int i, retval = 0;
	struct ext4_inode_info *ei = EXT4_I(inode);
	struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
	struct ext4_extent_idx *ix;
	if (eh->eh_depth == 0)
		/*
		 * No extra blocks allocated for extent meta data
		 */
		return 0;
	ix = EXT_FIRST_INDEX(eh);
	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
		retval = free_ext_idx(handle, inode, ix);
		if (retval)
			return retval;
	}
	return retval;
}

435
int ext4_ext_migrate(struct inode *inode)
436 437 438 439 440 441
{
	handle_t *handle;
	int retval = 0, i;
	__le32 *i_data;
	struct ext4_inode_info *ei;
	struct inode *tmp_inode = NULL;
D
Dmitry Monakhov 已提交
442
	struct migrate_struct lb;
443
	unsigned long max_entries;
444
	__u32 goal;
445
	uid_t owner[2];
446

447 448 449 450
	/*
	 * If the filesystem does not support extents, or the inode
	 * already is extent-based, error out.
	 */
451
	if (!ext4_has_feature_extents(inode->i_sb) ||
452
	    (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
453 454
		return -EINVAL;

455 456 457 458 459 460
	if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
		/*
		 * don't migrate fast symlink
		 */
		return retval;

461 462 463 464 465
	/*
	 * Worst case we can touch the allocation bitmaps, a bgd
	 * block, and a block to link in the orphan list.  We do need
	 * need to worry about credits for modifying the quota inode.
	 */
466
	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
467 468
		4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));

469 470
	if (IS_ERR(handle)) {
		retval = PTR_ERR(handle);
471
		return retval;
472
	}
473 474
	goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
		EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
475 476
	owner[0] = i_uid_read(inode);
	owner[1] = i_gid_read(inode);
477
	tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root),
478
				   S_IFREG, NULL, goal, owner);
479
	if (IS_ERR(tmp_inode)) {
480
		retval = PTR_ERR(tmp_inode);
481
		ext4_journal_stop(handle);
482
		return retval;
483 484 485
	}
	i_size_write(tmp_inode, i_size_read(inode));
	/*
486 487
	 * Set the i_nlink to zero so it will be deleted later
	 * when we drop inode reference.
488
	 */
489
	clear_nlink(tmp_inode);
490 491 492 493 494 495 496 497 498

	ext4_ext_tree_init(handle, tmp_inode);
	ext4_orphan_add(handle, tmp_inode);
	ext4_journal_stop(handle);

	/*
	 * start with one credit accounted for
	 * superblock modification.
	 *
L
Lucas De Marchi 已提交
499
	 * For the tmp_inode we already have committed the
500
	 * transaction that created the inode. Later as and
501 502
	 * when we add extents we extent the journal
	 */
503
	/*
504 505 506 507 508 509
	 * Even though we take i_mutex we can still cause block
	 * allocation via mmap write to holes. If we have allocated
	 * new blocks we fail migrate.  New block allocation will
	 * clear EXT4_STATE_EXT_MIGRATE flag.  The flag is updated
	 * with i_data_sem held to prevent racing with block
	 * allocation.
510
	 */
511
	down_read(&EXT4_I(inode)->i_data_sem);
512
	ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
513 514
	up_read((&EXT4_I(inode)->i_data_sem));

515
	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
516 517 518 519 520 521 522 523 524 525
	if (IS_ERR(handle)) {
		/*
		 * It is impossible to update on-disk structures without
		 * a handle, so just rollback in-core changes and live other
		 * work to orphan_list_cleanup()
		 */
		ext4_orphan_del(NULL, tmp_inode);
		retval = PTR_ERR(handle);
		goto out;
	}
526 527 528 529 530 531 532

	ei = EXT4_I(inode);
	i_data = ei->i_data;
	memset(&lb, 0, sizeof(lb));

	/* 32 bit block address 4 bytes */
	max_entries = inode->i_sb->s_blocksize >> 2;
D
Dmitry Monakhov 已提交
533
	for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
534 535
		if (i_data[i]) {
			retval = update_extent_range(handle, tmp_inode,
D
Dmitry Monakhov 已提交
536
						le32_to_cpu(i_data[i]), &lb);
537 538
			if (retval)
				goto err_out;
D
Dmitry Monakhov 已提交
539 540
		} else
			lb.curr_block++;
541 542 543
	}
	if (i_data[EXT4_IND_BLOCK]) {
		retval = update_ind_extent_range(handle, tmp_inode,
D
Dmitry Monakhov 已提交
544
				le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
545 546 547
			if (retval)
				goto err_out;
	} else
D
Dmitry Monakhov 已提交
548
		lb.curr_block += max_entries;
549 550
	if (i_data[EXT4_DIND_BLOCK]) {
		retval = update_dind_extent_range(handle, tmp_inode,
D
Dmitry Monakhov 已提交
551
				le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
552 553 554
			if (retval)
				goto err_out;
	} else
D
Dmitry Monakhov 已提交
555
		lb.curr_block += max_entries * max_entries;
556 557
	if (i_data[EXT4_TIND_BLOCK]) {
		retval = update_tind_extent_range(handle, tmp_inode,
D
Dmitry Monakhov 已提交
558
				le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
559 560 561 562 563 564 565 566 567 568 569 570 571 572
			if (retval)
				goto err_out;
	}
	/*
	 * Build the last extent
	 */
	retval = finish_range(handle, tmp_inode, &lb);
err_out:
	if (retval)
		/*
		 * Failure case delete the extent information with the
		 * tmp_inode
		 */
		free_ext_block(handle, tmp_inode);
573 574 575 576 577 578 579 580 581
	else {
		retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
		if (retval)
			/*
			 * if we fail to swap inode data free the extent
			 * details of the tmp inode
			 */
			free_ext_block(handle, tmp_inode);
	}
582 583 584 585

	/* We mark the tmp_inode dirty via ext4_ext_tree_init. */
	if (ext4_journal_extend(handle, 1) != 0)
		ext4_journal_restart(handle, 1);
586 587 588 589 590 591 592 593

	/*
	 * Mark the tmp_inode as of size zero
	 */
	i_size_write(tmp_inode, 0);

	/*
	 * set the  i_blocks count to zero
594
	 * so that the ext4_evict_inode() does the
595 596 597 598 599 600 601 602 603 604
	 * right job
	 *
	 * We don't need to take the i_lock because
	 * the inode is not visible to user space.
	 */
	tmp_inode->i_blocks = 0;

	/* Reset the extent details */
	ext4_ext_tree_init(handle, tmp_inode);
	ext4_journal_stop(handle);
605
out:
606
	unlock_new_inode(tmp_inode);
607
	iput(tmp_inode);
608 609 610

	return retval;
}
611 612 613 614 615 616 617 618 619 620 621

/*
 * Migrate a simple extent-based inode to use the i_blocks[] array
 */
int ext4_ind_migrate(struct inode *inode)
{
	struct ext4_extent_header	*eh;
	struct ext4_super_block		*es = EXT4_SB(inode->i_sb)->s_es;
	struct ext4_inode_info		*ei = EXT4_I(inode);
	struct ext4_extent		*ex;
	unsigned int			i, len;
622
	ext4_lblk_t			start, end;
623 624 625 626
	ext4_fsblk_t			blk;
	handle_t			*handle;
	int				ret;

627
	if (!ext4_has_feature_extents(inode->i_sb) ||
628 629 630
	    (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
		return -EINVAL;

631
	if (ext4_has_feature_bigalloc(inode->i_sb))
632 633
		return -EOPNOTSUPP;

634 635 636 637 638 639 640 641
	/*
	 * In order to get correct extent info, force all delayed allocation
	 * blocks to be allocated, otherwise delayed allocation blocks may not
	 * be reflected and bypass the checks on extent header.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		ext4_alloc_da_blocks(inode);

642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	down_write(&EXT4_I(inode)->i_data_sem);
	ret = ext4_ext_check_inode(inode);
	if (ret)
		goto errout;

	eh = ext_inode_hdr(inode);
	ex  = EXT_FIRST_EXTENT(eh);
	if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
	    eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
		ret = -EOPNOTSUPP;
		goto errout;
	}
	if (eh->eh_entries == 0)
659
		blk = len = start = end = 0;
660 661 662
	else {
		len = le16_to_cpu(ex->ee_len);
		blk = ext4_ext_pblock(ex);
663 664
		start = le32_to_cpu(ex->ee_block);
		end = start + len - 1;
665
		if (end >= EXT4_NDIR_BLOCKS) {
666 667 668 669 670 671 672
			ret = -EOPNOTSUPP;
			goto errout;
		}
	}

	ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
	memset(ei->i_data, 0, sizeof(ei->i_data));
673
	for (i = start; i <= end; i++)
674 675 676 677 678 679 680
		ei->i_data[i] = cpu_to_le32(blk++);
	ext4_mark_inode_dirty(handle, inode);
errout:
	ext4_journal_stop(handle);
	up_write(&EXT4_I(inode)->i_data_sem);
	return ret;
}