migrate.c 16.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Copyright IBM Corporation, 2007
 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2.1 of the GNU Lesser General Public License
 * as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it would be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 *
 */

15
#include <linux/slab.h>
16
#include "ext4_jbd2.h"
17
#include "ext4_extents.h"
18 19 20 21 22

/*
 * The contiguous blocks details which can be
 * represented by a single extent
 */
D
Dmitry Monakhov 已提交
23 24
struct migrate_struct {
	ext4_lblk_t first_block, last_block, curr_block;
25 26 27 28
	ext4_fsblk_t first_pblock, last_pblock;
};

static int finish_range(handle_t *handle, struct inode *inode,
D
Dmitry Monakhov 已提交
29
				struct migrate_struct *lb)
30 31 32 33 34 35 36 37 38 39 40 41

{
	int retval = 0, needed;
	struct ext4_extent newext;
	struct ext4_ext_path *path;
	if (lb->first_pblock == 0)
		return 0;

	/* Add the extent to temp inode*/
	newext.ee_block = cpu_to_le32(lb->first_block);
	newext.ee_len   = cpu_to_le16(lb->last_block - lb->first_block + 1);
	ext4_ext_store_pblock(&newext, lb->first_pblock);
42
	path = ext4_ext_find_extent(inode, lb->first_block, NULL, 0);
43 44 45

	if (IS_ERR(path)) {
		retval = PTR_ERR(path);
46
		path = NULL;
47 48 49 50 51 52 53 54 55
		goto err_out;
	}

	/*
	 * Calculate the credit needed to inserting this extent
	 * Since we are doing this in loop we may accumalate extra
	 * credit. But below we try to not accumalate too much
	 * of them by restarting the journal.
	 */
56 57
	needed = ext4_ext_calc_credits_for_single_extent(inode,
		    lb->last_block - lb->first_block + 1, path);
58 59 60 61

	/*
	 * Make sure the credit we accumalated is not really high
	 */
62 63
	if (needed && ext4_handle_has_enough_credits(handle,
						EXT4_RESERVE_TRANS_BLOCKS)) {
64 65 66
		retval = ext4_journal_restart(handle, needed);
		if (retval)
			goto err_out;
67
	} else if (needed) {
68
		retval = ext4_journal_extend(handle, needed);
69
		if (retval) {
70 71 72 73 74 75 76 77
			/*
			 * IF not able to extend the journal restart the journal
			 */
			retval = ext4_journal_restart(handle, needed);
			if (retval)
				goto err_out;
		}
	}
78
	retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
79
err_out:
80 81 82 83
	if (path) {
		ext4_ext_drop_refs(path);
		kfree(path);
	}
84 85 86 87 88
	lb->first_pblock = 0;
	return retval;
}

static int update_extent_range(handle_t *handle, struct inode *inode,
D
Dmitry Monakhov 已提交
89
			       ext4_fsblk_t pblock, struct migrate_struct *lb)
90 91 92 93 94 95 96
{
	int retval;
	/*
	 * See if we can add on to the existing range (if it exists)
	 */
	if (lb->first_pblock &&
		(lb->last_pblock+1 == pblock) &&
D
Dmitry Monakhov 已提交
97
		(lb->last_block+1 == lb->curr_block)) {
98
		lb->last_pblock = pblock;
D
Dmitry Monakhov 已提交
99 100
		lb->last_block = lb->curr_block;
		lb->curr_block++;
101 102 103 104 105 106 107
		return 0;
	}
	/*
	 * Start a new range.
	 */
	retval = finish_range(handle, inode, lb);
	lb->first_pblock = lb->last_pblock = pblock;
D
Dmitry Monakhov 已提交
108 109
	lb->first_block = lb->last_block = lb->curr_block;
	lb->curr_block++;
110 111 112 113
	return retval;
}

static int update_ind_extent_range(handle_t *handle, struct inode *inode,
D
Dmitry Monakhov 已提交
114 115
				   ext4_fsblk_t pblock,
				   struct migrate_struct *lb)
116 117 118 119 120 121 122 123 124 125 126
{
	struct buffer_head *bh;
	__le32 *i_data;
	int i, retval = 0;
	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;

	bh = sb_bread(inode->i_sb, pblock);
	if (!bh)
		return -EIO;

	i_data = (__le32 *)bh->b_data;
D
Dmitry Monakhov 已提交
127
	for (i = 0; i < max_entries; i++) {
128 129
		if (i_data[i]) {
			retval = update_extent_range(handle, inode,
D
Dmitry Monakhov 已提交
130
						le32_to_cpu(i_data[i]), lb);
131 132
			if (retval)
				break;
D
Dmitry Monakhov 已提交
133 134
		} else {
			lb->curr_block++;
135 136 137 138 139 140 141 142
		}
	}
	put_bh(bh);
	return retval;

}

static int update_dind_extent_range(handle_t *handle, struct inode *inode,
D
Dmitry Monakhov 已提交
143 144
				    ext4_fsblk_t pblock,
				    struct migrate_struct *lb)
145 146 147 148 149 150 151 152 153 154 155 156 157 158
{
	struct buffer_head *bh;
	__le32 *i_data;
	int i, retval = 0;
	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;

	bh = sb_bread(inode->i_sb, pblock);
	if (!bh)
		return -EIO;

	i_data = (__le32 *)bh->b_data;
	for (i = 0; i < max_entries; i++) {
		if (i_data[i]) {
			retval = update_ind_extent_range(handle, inode,
D
Dmitry Monakhov 已提交
159
						le32_to_cpu(i_data[i]), lb);
160 161 162 163
			if (retval)
				break;
		} else {
			/* Only update the file block number */
D
Dmitry Monakhov 已提交
164
			lb->curr_block += max_entries;
165 166 167 168 169 170 171 172
		}
	}
	put_bh(bh);
	return retval;

}

static int update_tind_extent_range(handle_t *handle, struct inode *inode,
D
Dmitry Monakhov 已提交
173 174
				    ext4_fsblk_t pblock,
				    struct migrate_struct *lb)
175 176 177 178 179 180 181 182 183 184 185 186 187 188
{
	struct buffer_head *bh;
	__le32 *i_data;
	int i, retval = 0;
	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;

	bh = sb_bread(inode->i_sb, pblock);
	if (!bh)
		return -EIO;

	i_data = (__le32 *)bh->b_data;
	for (i = 0; i < max_entries; i++) {
		if (i_data[i]) {
			retval = update_dind_extent_range(handle, inode,
D
Dmitry Monakhov 已提交
189
						le32_to_cpu(i_data[i]), lb);
190 191
			if (retval)
				break;
D
Dmitry Monakhov 已提交
192
		} else {
193
			/* Only update the file block number */
D
Dmitry Monakhov 已提交
194 195
			lb->curr_block += max_entries * max_entries;
		}
196 197 198 199 200 201
	}
	put_bh(bh);
	return retval;

}

202 203 204 205
static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
{
	int retval = 0, needed;

206
	if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
207 208 209 210 211 212 213
		return 0;
	/*
	 * We are freeing a blocks. During this we touch
	 * superblock, group descriptor and block bitmap.
	 * So allocate a credit of 3. We may update
	 * quota (user and group).
	 */
D
Dmitry Monakhov 已提交
214
	needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
215 216 217 218 219 220 221

	if (ext4_journal_extend(handle, needed) != 0)
		retval = ext4_journal_restart(handle, needed);

	return retval;
}

222 223 224 225 226 227 228 229 230 231 232 233 234 235
static int free_dind_blocks(handle_t *handle,
				struct inode *inode, __le32 i_data)
{
	int i;
	__le32 *tmp_idata;
	struct buffer_head *bh;
	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;

	bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
	if (!bh)
		return -EIO;

	tmp_idata = (__le32 *)bh->b_data;
	for (i = 0; i < max_entries; i++) {
236 237
		if (tmp_idata[i]) {
			extend_credit_for_blkdel(handle, inode);
238
			ext4_free_blocks(handle, inode, NULL,
239 240 241
					 le32_to_cpu(tmp_idata[i]), 1,
					 EXT4_FREE_BLOCKS_METADATA |
					 EXT4_FREE_BLOCKS_FORGET);
242
		}
243 244
	}
	put_bh(bh);
245
	extend_credit_for_blkdel(handle, inode);
246
	ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
247 248
			 EXT4_FREE_BLOCKS_METADATA |
			 EXT4_FREE_BLOCKS_FORGET);
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
	return 0;
}

static int free_tind_blocks(handle_t *handle,
				struct inode *inode, __le32 i_data)
{
	int i, retval = 0;
	__le32 *tmp_idata;
	struct buffer_head *bh;
	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;

	bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
	if (!bh)
		return -EIO;

	tmp_idata = (__le32 *)bh->b_data;
	for (i = 0; i < max_entries; i++) {
		if (tmp_idata[i]) {
			retval = free_dind_blocks(handle,
					inode, tmp_idata[i]);
			if (retval) {
				put_bh(bh);
				return retval;
			}
		}
	}
	put_bh(bh);
276
	extend_credit_for_blkdel(handle, inode);
277
	ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
278 279
			 EXT4_FREE_BLOCKS_METADATA |
			 EXT4_FREE_BLOCKS_FORGET);
280 281 282
	return 0;
}

283
static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
284 285 286
{
	int retval;

287 288 289
	/* ei->i_data[EXT4_IND_BLOCK] */
	if (i_data[0]) {
		extend_credit_for_blkdel(handle, inode);
290
		ext4_free_blocks(handle, inode, NULL,
291 292 293
				le32_to_cpu(i_data[0]), 1,
				 EXT4_FREE_BLOCKS_METADATA |
				 EXT4_FREE_BLOCKS_FORGET);
294
	}
295

296 297 298
	/* ei->i_data[EXT4_DIND_BLOCK] */
	if (i_data[1]) {
		retval = free_dind_blocks(handle, inode, i_data[1]);
299 300 301 302
		if (retval)
			return retval;
	}

303 304 305
	/* ei->i_data[EXT4_TIND_BLOCK] */
	if (i_data[2]) {
		retval = free_tind_blocks(handle, inode, i_data[2]);
306 307 308 309 310 311 312
		if (retval)
			return retval;
	}
	return 0;
}

static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
313
						struct inode *tmp_inode)
314
{
315 316
	int retval;
	__le32	i_data[3];
317 318 319 320 321 322 323 324
	struct ext4_inode_info *ei = EXT4_I(inode);
	struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);

	/*
	 * One credit accounted for writing the
	 * i_data field of the original inode
	 */
	retval = ext4_journal_extend(handle, 1);
325
	if (retval) {
326 327 328 329 330
		retval = ext4_journal_restart(handle, 1);
		if (retval)
			goto err_out;
	}

331 332 333 334 335
	i_data[0] = ei->i_data[EXT4_IND_BLOCK];
	i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
	i_data[2] = ei->i_data[EXT4_TIND_BLOCK];

	down_write(&EXT4_I(inode)->i_data_sem);
336
	/*
337
	 * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
338 339 340
	 * happened after we started the migrate. We need to
	 * fail the migrate
	 */
341
	if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
342 343 344 345
		retval = -EAGAIN;
		up_write(&EXT4_I(inode)->i_data_sem);
		goto err_out;
	} else
346
		ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
347 348 349 350
	/*
	 * We have the extent map build with the tmp inode.
	 * Now copy the i_data across
	 */
351
	ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
352 353 354 355 356 357 358 359 360 361 362 363 364 365
	memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));

	/*
	 * Update i_blocks with the new blocks that got
	 * allocated while adding extents for extent index
	 * blocks.
	 *
	 * While converting to extents we need not
	 * update the orignal inode i_blocks for extent blocks
	 * via quota APIs. The quota update happened via tmp_inode already.
	 */
	spin_lock(&inode->i_lock);
	inode->i_blocks += tmp_inode->i_blocks;
	spin_unlock(&inode->i_lock);
366
	up_write(&EXT4_I(inode)->i_data_sem);
367

368 369 370 371 372
	/*
	 * We mark the inode dirty after, because we decrement the
	 * i_blocks when freeing the indirect meta-data blocks
	 */
	retval = free_ind_block(handle, inode, i_data);
373
	ext4_mark_inode_dirty(handle, inode);
374

375 376 377 378 379 380 381 382 383 384 385 386
err_out:
	return retval;
}

static int free_ext_idx(handle_t *handle, struct inode *inode,
					struct ext4_extent_idx *ix)
{
	int i, retval = 0;
	ext4_fsblk_t block;
	struct buffer_head *bh;
	struct ext4_extent_header *eh;

387
	block = ext4_idx_pblock(ix);
388 389 390 391 392 393 394 395 396 397 398 399 400 401
	bh = sb_bread(inode->i_sb, block);
	if (!bh)
		return -EIO;

	eh = (struct ext4_extent_header *)bh->b_data;
	if (eh->eh_depth != 0) {
		ix = EXT_FIRST_INDEX(eh);
		for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
			retval = free_ext_idx(handle, inode, ix);
			if (retval)
				break;
		}
	}
	put_bh(bh);
402
	extend_credit_for_blkdel(handle, inode);
403
	ext4_free_blocks(handle, inode, NULL, block, 1,
404
			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
	return retval;
}

/*
 * Free the extent meta data blocks only
 */
static int free_ext_block(handle_t *handle, struct inode *inode)
{
	int i, retval = 0;
	struct ext4_inode_info *ei = EXT4_I(inode);
	struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
	struct ext4_extent_idx *ix;
	if (eh->eh_depth == 0)
		/*
		 * No extra blocks allocated for extent meta data
		 */
		return 0;
	ix = EXT_FIRST_INDEX(eh);
	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
		retval = free_ext_idx(handle, inode, ix);
		if (retval)
			return retval;
	}
	return retval;
}

431
int ext4_ext_migrate(struct inode *inode)
432 433 434 435 436 437
{
	handle_t *handle;
	int retval = 0, i;
	__le32 *i_data;
	struct ext4_inode_info *ei;
	struct inode *tmp_inode = NULL;
D
Dmitry Monakhov 已提交
438
	struct migrate_struct lb;
439
	unsigned long max_entries;
440
	__u32 goal;
441
	uid_t owner[2];
442

443 444 445 446 447 448
	/*
	 * If the filesystem does not support extents, or the inode
	 * already is extent-based, error out.
	 */
	if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
				       EXT4_FEATURE_INCOMPAT_EXTENTS) ||
449
	    (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
450 451
		return -EINVAL;

452 453 454 455 456 457
	if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
		/*
		 * don't migrate fast symlink
		 */
		return retval;

458 459 460 461 462
	/*
	 * Worst case we can touch the allocation bitmaps, a bgd
	 * block, and a block to link in the orphan list.  We do need
	 * need to worry about credits for modifying the quota inode.
	 */
463
	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
464 465
		4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));

466 467
	if (IS_ERR(handle)) {
		retval = PTR_ERR(handle);
468
		return retval;
469
	}
470 471
	goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
		EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
472 473
	owner[0] = i_uid_read(inode);
	owner[1] = i_gid_read(inode);
474
	tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
475
				   S_IFREG, NULL, goal, owner);
476
	if (IS_ERR(tmp_inode)) {
477
		retval = PTR_ERR(tmp_inode);
478
		ext4_journal_stop(handle);
479
		return retval;
480 481 482
	}
	i_size_write(tmp_inode, i_size_read(inode));
	/*
483 484
	 * Set the i_nlink to zero so it will be deleted later
	 * when we drop inode reference.
485
	 */
486
	clear_nlink(tmp_inode);
487 488 489 490 491 492 493 494 495

	ext4_ext_tree_init(handle, tmp_inode);
	ext4_orphan_add(handle, tmp_inode);
	ext4_journal_stop(handle);

	/*
	 * start with one credit accounted for
	 * superblock modification.
	 *
L
Lucas De Marchi 已提交
496
	 * For the tmp_inode we already have committed the
497
	 * transaction that created the inode. Later as and
498 499
	 * when we add extents we extent the journal
	 */
500
	/*
501 502 503 504 505 506
	 * Even though we take i_mutex we can still cause block
	 * allocation via mmap write to holes. If we have allocated
	 * new blocks we fail migrate.  New block allocation will
	 * clear EXT4_STATE_EXT_MIGRATE flag.  The flag is updated
	 * with i_data_sem held to prevent racing with block
	 * allocation.
507
	 */
508
	down_read(&EXT4_I(inode)->i_data_sem);
509
	ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
510 511
	up_read((&EXT4_I(inode)->i_data_sem));

512
	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
513 514 515 516 517 518 519 520 521 522
	if (IS_ERR(handle)) {
		/*
		 * It is impossible to update on-disk structures without
		 * a handle, so just rollback in-core changes and live other
		 * work to orphan_list_cleanup()
		 */
		ext4_orphan_del(NULL, tmp_inode);
		retval = PTR_ERR(handle);
		goto out;
	}
523 524 525 526 527 528 529

	ei = EXT4_I(inode);
	i_data = ei->i_data;
	memset(&lb, 0, sizeof(lb));

	/* 32 bit block address 4 bytes */
	max_entries = inode->i_sb->s_blocksize >> 2;
D
Dmitry Monakhov 已提交
530
	for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
531 532
		if (i_data[i]) {
			retval = update_extent_range(handle, tmp_inode,
D
Dmitry Monakhov 已提交
533
						le32_to_cpu(i_data[i]), &lb);
534 535
			if (retval)
				goto err_out;
D
Dmitry Monakhov 已提交
536 537
		} else
			lb.curr_block++;
538 539 540
	}
	if (i_data[EXT4_IND_BLOCK]) {
		retval = update_ind_extent_range(handle, tmp_inode,
D
Dmitry Monakhov 已提交
541
				le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
542 543 544
			if (retval)
				goto err_out;
	} else
D
Dmitry Monakhov 已提交
545
		lb.curr_block += max_entries;
546 547
	if (i_data[EXT4_DIND_BLOCK]) {
		retval = update_dind_extent_range(handle, tmp_inode,
D
Dmitry Monakhov 已提交
548
				le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
549 550 551
			if (retval)
				goto err_out;
	} else
D
Dmitry Monakhov 已提交
552
		lb.curr_block += max_entries * max_entries;
553 554
	if (i_data[EXT4_TIND_BLOCK]) {
		retval = update_tind_extent_range(handle, tmp_inode,
D
Dmitry Monakhov 已提交
555
				le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
556 557 558 559 560 561 562 563 564 565 566 567 568 569
			if (retval)
				goto err_out;
	}
	/*
	 * Build the last extent
	 */
	retval = finish_range(handle, tmp_inode, &lb);
err_out:
	if (retval)
		/*
		 * Failure case delete the extent information with the
		 * tmp_inode
		 */
		free_ext_block(handle, tmp_inode);
570 571 572 573 574 575 576 577 578
	else {
		retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
		if (retval)
			/*
			 * if we fail to swap inode data free the extent
			 * details of the tmp inode
			 */
			free_ext_block(handle, tmp_inode);
	}
579 580 581 582

	/* We mark the tmp_inode dirty via ext4_ext_tree_init. */
	if (ext4_journal_extend(handle, 1) != 0)
		ext4_journal_restart(handle, 1);
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601

	/*
	 * Mark the tmp_inode as of size zero
	 */
	i_size_write(tmp_inode, 0);

	/*
	 * set the  i_blocks count to zero
	 * so that the ext4_delete_inode does the
	 * right job
	 *
	 * We don't need to take the i_lock because
	 * the inode is not visible to user space.
	 */
	tmp_inode->i_blocks = 0;

	/* Reset the extent details */
	ext4_ext_tree_init(handle, tmp_inode);
	ext4_journal_stop(handle);
602
out:
603
	unlock_new_inode(tmp_inode);
604
	iput(tmp_inode);
605 606 607

	return retval;
}
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627

/*
 * Migrate a simple extent-based inode to use the i_blocks[] array
 */
int ext4_ind_migrate(struct inode *inode)
{
	struct ext4_extent_header	*eh;
	struct ext4_super_block		*es = EXT4_SB(inode->i_sb)->s_es;
	struct ext4_inode_info		*ei = EXT4_I(inode);
	struct ext4_extent		*ex;
	unsigned int			i, len;
	ext4_fsblk_t			blk;
	handle_t			*handle;
	int				ret;

	if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
				       EXT4_FEATURE_INCOMPAT_EXTENTS) ||
	    (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
		return -EINVAL;

628 629 630 631
	if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
				       EXT4_FEATURE_RO_COMPAT_BIGALLOC))
		return -EOPNOTSUPP;

632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
	if (IS_ERR(handle))
		return PTR_ERR(handle);

	down_write(&EXT4_I(inode)->i_data_sem);
	ret = ext4_ext_check_inode(inode);
	if (ret)
		goto errout;

	eh = ext_inode_hdr(inode);
	ex  = EXT_FIRST_EXTENT(eh);
	if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
	    eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
		ret = -EOPNOTSUPP;
		goto errout;
	}
	if (eh->eh_entries == 0)
		blk = len = 0;
	else {
		len = le16_to_cpu(ex->ee_len);
		blk = ext4_ext_pblock(ex);
		if (len > EXT4_NDIR_BLOCKS) {
			ret = -EOPNOTSUPP;
			goto errout;
		}
	}

	ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
	memset(ei->i_data, 0, sizeof(ei->i_data));
	for (i=0; i < len; i++)
		ei->i_data[i] = cpu_to_le32(blk++);
	ext4_mark_inode_dirty(handle, inode);
errout:
	ext4_journal_stop(handle);
	up_write(&EXT4_I(inode)->i_data_sem);
	return ret;
}