ialloc.c 32.4 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/ialloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  BSD ufs-inspired inode and directory allocation by
 *  Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19 20 21 22 23
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/random.h>
#include <linux/bitops.h>
24
#include <linux/blkdev.h>
25
#include <asm/byteorder.h>
26

27 28
#include "ext4.h"
#include "ext4_jbd2.h"
29 30 31
#include "xattr.h"
#include "acl.h"

32 33
#include <trace/events/ext4.h>

34 35 36 37 38 39 40 41 42 43 44 45 46 47
/*
 * ialloc.c contains the inodes allocation and deallocation routines
 */

/*
 * The free inodes are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.
 */

A
Andreas Dilger 已提交
48 49 50 51 52
/*
 * To avoid calling the atomic setbit hundreds or thousands of times, we only
 * need to use it within a single byte (to ensure we get endianness right).
 * We can use memset for the rest of the bitmap as there are no other users.
 */
53
void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
A
Andreas Dilger 已提交
54 55 56 57 58 59 60 61 62 63 64 65 66 67
{
	int i;

	if (start_bit >= end_bit)
		return;

	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
		ext4_set_bit(i, bitmap);
	if (i < end_bit)
		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
}

/* Initializes an uninitialized inode bitmap */
68 69 70 71
static unsigned ext4_init_inode_bitmap(struct super_block *sb,
				       struct buffer_head *bh,
				       ext4_group_t block_group,
				       struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
72 73 74 75 76 77 78 79
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);

	J_ASSERT_BH(bh, buffer_locked(bh));

	/* If checksum is bad mark all blocks and inodes use to prevent
	 * allocation, essentially implementing a per-group read-only flag. */
	if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
80
		ext4_error(sb, "Checksum bad for group %u", block_group);
81
		ext4_free_group_clusters_set(sb, gdp, 0);
82 83
		ext4_free_inodes_set(sb, gdp, 0);
		ext4_itable_unused_set(sb, gdp, 0);
A
Andreas Dilger 已提交
84 85 86 87 88
		memset(bh->b_data, 0xff, sb->s_blocksize);
		return 0;
	}

	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
89
	ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
A
Andreas Dilger 已提交
90 91 92 93
			bh->b_data);

	return EXT4_INODES_PER_GROUP(sb);
}
94

95 96 97 98 99 100 101 102 103 104
void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
{
	if (uptodate) {
		set_buffer_uptodate(bh);
		set_bitmap_uptodate(bh);
	}
	unlock_buffer(bh);
	put_bh(bh);
}

105 106 107 108 109 110 111
/*
 * Read the inode allocation bitmap for a given block_group, reading
 * into the specified slot in the superblock's bitmap cache.
 *
 * Return buffer_head of bitmap on success or NULL.
 */
static struct buffer_head *
112
ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
113
{
114
	struct ext4_group_desc *desc;
115
	struct buffer_head *bh = NULL;
116
	ext4_fsblk_t bitmap_blk;
117

118
	desc = ext4_get_group_desc(sb, block_group, NULL);
119
	if (!desc)
120
		return NULL;
121

122 123 124
	bitmap_blk = ext4_inode_bitmap(sb, desc);
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
125
		ext4_error(sb, "Cannot read inode bitmap - "
126
			    "block_group = %u, inode_bitmap = %llu",
127 128 129
			    block_group, bitmap_blk);
		return NULL;
	}
130
	if (bitmap_uptodate(bh))
131 132
		return bh;

133
	lock_buffer(bh);
134 135 136 137
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
		return bh;
	}
138

139
	ext4_lock_group(sb, block_group);
A
Andreas Dilger 已提交
140
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
141
		ext4_init_inode_bitmap(sb, bh, block_group, desc);
142
		set_bitmap_uptodate(bh);
143
		set_buffer_uptodate(bh);
144
		ext4_unlock_group(sb, block_group);
A
Aneesh Kumar K.V 已提交
145
		unlock_buffer(bh);
146
		return bh;
A
Andreas Dilger 已提交
147
	}
148
	ext4_unlock_group(sb, block_group);
149

150 151 152 153 154 155 156 157 158 159
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
		return bh;
	}
	/*
160
	 * submit the buffer_head for reading
161
	 */
162
	trace_ext4_load_inode_bitmap(sb, block_group);
163 164 165 166 167
	bh->b_end_io = ext4_end_bitmap_read;
	get_bh(bh);
	submit_bh(READ, bh);
	wait_on_buffer(bh);
	if (!buffer_uptodate(bh)) {
168
		put_bh(bh);
169
		ext4_error(sb, "Cannot read inode bitmap - "
170 171
			   "block_group = %u, inode_bitmap = %llu",
			   block_group, bitmap_blk);
172 173
		return NULL;
	}
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
	return bh;
}

/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
193
void ext4_free_inode(handle_t *handle, struct inode *inode)
194
{
195
	struct super_block *sb = inode->i_sb;
196 197 198 199
	int is_directory;
	unsigned long ino;
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *bh2;
200
	ext4_group_t block_group;
201
	unsigned long bit;
202 203
	struct ext4_group_desc *gdp;
	struct ext4_super_block *es;
204
	struct ext4_sb_info *sbi;
205
	int fatal = 0, err, count, cleared;
206

207 208 209
	if (!sb) {
		printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
		       "nonexistent device\n", __func__, __LINE__);
210 211
		return;
	}
212 213 214 215
	if (atomic_read(&inode->i_count) > 1) {
		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
			 __func__, __LINE__, inode->i_ino,
			 atomic_read(&inode->i_count));
216 217
		return;
	}
218 219 220
	if (inode->i_nlink) {
		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n",
			 __func__, __LINE__, inode->i_ino, inode->i_nlink);
221 222
		return;
	}
223
	sbi = EXT4_SB(sb);
224 225

	ino = inode->i_ino;
226
	ext4_debug("freeing inode %lu\n", ino);
227
	trace_ext4_free_inode(inode);
228 229 230 231 232

	/*
	 * Note: we must free any quota before locking the superblock,
	 * as writing the quota to disk may need the lock as well.
	 */
233
	dquot_initialize(inode);
234
	ext4_xattr_delete_inode(handle, inode);
235
	dquot_free_inode(inode);
236
	dquot_drop(inode);
237 238 239 240

	is_directory = S_ISDIR(inode->i_mode);

	/* Do this BEFORE marking the inode not in use or returning an error */
A
Al Viro 已提交
241
	ext4_clear_inode(inode);
242

243 244
	es = EXT4_SB(sb)->s_es;
	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
245
		ext4_error(sb, "reserved or nonexistent inode %lu", ino);
246 247
		goto error_return;
	}
248 249
	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
250
	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
251 252 253 254
	if (!bitmap_bh)
		goto error_return;

	BUFFER_TRACE(bitmap_bh, "get_write_access");
255
	fatal = ext4_journal_get_write_access(handle, bitmap_bh);
256 257 258
	if (fatal)
		goto error_return;

259 260 261
	fatal = -ESRCH;
	gdp = ext4_get_group_desc(sb, block_group, &bh2);
	if (gdp) {
262
		BUFFER_TRACE(bh2, "get_write_access");
263
		fatal = ext4_journal_get_write_access(handle, bh2);
264 265
	}
	ext4_lock_group(sb, block_group);
266
	cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
267 268 269 270
	if (fatal || !cleared) {
		ext4_unlock_group(sb, block_group);
		goto out;
	}
271

272 273 274 275 276 277
	count = ext4_free_inodes_count(sb, gdp) + 1;
	ext4_free_inodes_set(sb, gdp, count);
	if (is_directory) {
		count = ext4_used_dirs_count(sb, gdp) - 1;
		ext4_used_dirs_set(sb, gdp, count);
		percpu_counter_dec(&sbi->s_dirs_counter);
278
	}
279 280
	gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
	ext4_unlock_group(sb, block_group);
281

282 283 284
	percpu_counter_inc(&sbi->s_freeinodes_counter);
	if (sbi->s_log_groups_per_flex) {
		ext4_group_t f = ext4_flex_group(sbi, block_group);
285

286 287 288
		atomic_inc(&sbi->s_flex_groups[f].free_inodes);
		if (is_directory)
			atomic_dec(&sbi->s_flex_groups[f].used_dirs);
289
	}
290 291 292 293 294 295 296 297
	BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
	fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
out:
	if (cleared) {
		BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
		if (!fatal)
			fatal = err;
T
Theodore Ts'o 已提交
298
		ext4_mark_super_dirty(sb);
299 300 301
	} else
		ext4_error(sb, "bit already cleared for inode %lu", ino);

302 303
error_return:
	brelse(bitmap_bh);
304
	ext4_std_error(sb, fatal);
305 306
}

307 308
struct orlov_stats {
	__u32 free_inodes;
309
	__u32 free_clusters;
310 311 312 313 314 315 316 317
	__u32 used_dirs;
};

/*
 * Helper function for Orlov's allocator; returns critical information
 * for a particular block group or flex_bg.  If flex_size is 1, then g
 * is a block group number; otherwise it is flex_bg number.
 */
318 319
static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
			    int flex_size, struct orlov_stats *stats)
320 321
{
	struct ext4_group_desc *desc;
322
	struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
323

324 325
	if (flex_size > 1) {
		stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
326
		stats->free_clusters = atomic_read(&flex_group[g].free_clusters);
327 328 329
		stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
		return;
	}
330

331 332 333
	desc = ext4_get_group_desc(sb, g, NULL);
	if (desc) {
		stats->free_inodes = ext4_free_inodes_count(sb, desc);
334
		stats->free_clusters = ext4_free_group_clusters(sb, desc);
335 336 337
		stats->used_dirs = ext4_used_dirs_count(sb, desc);
	} else {
		stats->free_inodes = 0;
338
		stats->free_clusters = 0;
339
		stats->used_dirs = 0;
340 341 342
	}
}

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
/*
 * Orlov's allocator for directories.
 *
 * We always try to spread first-level directories.
 *
 * If there are blockgroups with both free inodes and free blocks counts
 * not worse than average we return one with smallest directory count.
 * Otherwise we simply return a random group.
 *
 * For the rest rules look so:
 *
 * It's OK to put directory into a group unless
 * it has too many directories already (max_dirs) or
 * it has too few free inodes left (min_inodes) or
 * it has too few free blocks left (min_blocks) or
358
 * Parent's group is preferred, if it doesn't satisfy these
359 360 361 362 363
 * conditions we search cyclically through the rest. If none
 * of the groups look good we just look for a group with more
 * free inodes than average (starting at parent's group).
 */

364
static int find_group_orlov(struct super_block *sb, struct inode *parent,
A
Al Viro 已提交
365
			    ext4_group_t *group, umode_t mode,
366
			    const struct qstr *qstr)
367
{
368
	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
369
	struct ext4_sb_info *sbi = EXT4_SB(sb);
370
	ext4_group_t real_ngroups = ext4_get_groups_count(sb);
371
	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
372
	unsigned int freei, avefreei, grp_free;
373
	ext4_fsblk_t freeb, avefreec;
374
	unsigned int ndirs;
375
	int max_dirs, min_inodes;
376
	ext4_grpblk_t min_clusters;
377
	ext4_group_t i, grp, g, ngroups;
378
	struct ext4_group_desc *desc;
379 380
	struct orlov_stats stats;
	int flex_size = ext4_flex_bg_size(sbi);
381
	struct dx_hash_info hinfo;
382

383
	ngroups = real_ngroups;
384
	if (flex_size > 1) {
385
		ngroups = (real_ngroups + flex_size - 1) >>
386 387 388
			sbi->s_log_groups_per_flex;
		parent_group >>= sbi->s_log_groups_per_flex;
	}
389 390 391

	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
	avefreei = freei / ngroups;
392 393
	freeb = EXT4_C2B(sbi,
		percpu_counter_read_positive(&sbi->s_freeclusters_counter));
394 395
	avefreec = freeb;
	do_div(avefreec, ngroups);
396 397
	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);

398 399
	if (S_ISDIR(mode) &&
	    ((parent == sb->s_root->d_inode) ||
400
	     (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
401
		int best_ndir = inodes_per_group;
402
		int ret = -1;
403

404 405 406 407 408 409 410
		if (qstr) {
			hinfo.hash_version = DX_HASH_HALF_MD4;
			hinfo.seed = sbi->s_hash_seed;
			ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
			grp = hinfo.hash;
		} else
			get_random_bytes(&grp, sizeof(grp));
411
		parent_group = (unsigned)grp % ngroups;
412
		for (i = 0; i < ngroups; i++) {
413 414 415
			g = (parent_group + i) % ngroups;
			get_orlov_stats(sb, g, flex_size, &stats);
			if (!stats.free_inodes)
416
				continue;
417
			if (stats.used_dirs >= best_ndir)
418
				continue;
419
			if (stats.free_inodes < avefreei)
420
				continue;
421
			if (stats.free_clusters < avefreec)
422
				continue;
423
			grp = g;
424
			ret = 0;
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
			best_ndir = stats.used_dirs;
		}
		if (ret)
			goto fallback;
	found_flex_bg:
		if (flex_size == 1) {
			*group = grp;
			return 0;
		}

		/*
		 * We pack inodes at the beginning of the flexgroup's
		 * inode tables.  Block allocation decisions will do
		 * something similar, although regular files will
		 * start at 2nd block group of the flexgroup.  See
		 * ext4_ext_find_goal() and ext4_find_near().
		 */
		grp *= flex_size;
		for (i = 0; i < flex_size; i++) {
444
			if (grp+i >= real_ngroups)
445 446 447 448 449 450
				break;
			desc = ext4_get_group_desc(sb, grp+i, NULL);
			if (desc && ext4_free_inodes_count(sb, desc)) {
				*group = grp+i;
				return 0;
			}
451 452 453 454 455
		}
		goto fallback;
	}

	max_dirs = ndirs / ngroups + inodes_per_group / 16;
456 457 458
	min_inodes = avefreei - inodes_per_group*flex_size / 4;
	if (min_inodes < 1)
		min_inodes = 1;
459
	min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
460 461 462 463 464 465 466 467 468 469

	/*
	 * Start looking in the flex group where we last allocated an
	 * inode for this parent directory
	 */
	if (EXT4_I(parent)->i_last_alloc_group != ~0) {
		parent_group = EXT4_I(parent)->i_last_alloc_group;
		if (flex_size > 1)
			parent_group >>= sbi->s_log_groups_per_flex;
	}
470 471

	for (i = 0; i < ngroups; i++) {
472 473 474
		grp = (parent_group + i) % ngroups;
		get_orlov_stats(sb, grp, flex_size, &stats);
		if (stats.used_dirs >= max_dirs)
475
			continue;
476
		if (stats.free_inodes < min_inodes)
477
			continue;
478
		if (stats.free_clusters < min_clusters)
479
			continue;
480
		goto found_flex_bg;
481 482 483
	}

fallback:
484
	ngroups = real_ngroups;
485
	avefreei = freei / ngroups;
486
fallback_retry:
487
	parent_group = EXT4_I(parent)->i_block_group;
488
	for (i = 0; i < ngroups; i++) {
489 490
		grp = (parent_group + i) % ngroups;
		desc = ext4_get_group_desc(sb, grp, NULL);
491 492
		grp_free = ext4_free_inodes_count(sb, desc);
		if (desc && grp_free && grp_free >= avefreei) {
493
			*group = grp;
494
			return 0;
495
		}
496 497 498 499 500 501 502 503
	}

	if (avefreei) {
		/*
		 * The free-inodes counter is approximate, and for really small
		 * filesystems the above test can fail to find any blockgroups
		 */
		avefreei = 0;
504
		goto fallback_retry;
505 506 507 508 509
	}

	return -1;
}

510
static int find_group_other(struct super_block *sb, struct inode *parent,
A
Al Viro 已提交
511
			    ext4_group_t *group, umode_t mode)
512
{
513
	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
514
	ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
515
	struct ext4_group_desc *desc;
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
	int flex_size = ext4_flex_bg_size(EXT4_SB(sb));

	/*
	 * Try to place the inode is the same flex group as its
	 * parent.  If we can't find space, use the Orlov algorithm to
	 * find another flex group, and store that information in the
	 * parent directory's inode information so that use that flex
	 * group for future allocations.
	 */
	if (flex_size > 1) {
		int retry = 0;

	try_again:
		parent_group &= ~(flex_size-1);
		last = parent_group + flex_size;
		if (last > ngroups)
			last = ngroups;
		for  (i = parent_group; i < last; i++) {
			desc = ext4_get_group_desc(sb, i, NULL);
			if (desc && ext4_free_inodes_count(sb, desc)) {
				*group = i;
				return 0;
			}
		}
		if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
			retry = 1;
			parent_group = EXT4_I(parent)->i_last_alloc_group;
			goto try_again;
		}
		/*
		 * If this didn't work, use the Orlov search algorithm
		 * to find a new flex group; we pass in the mode to
		 * avoid the topdir algorithms.
		 */
		*group = parent_group + flex_size;
		if (*group > ngroups)
			*group = 0;
553
		return find_group_orlov(sb, parent, group, mode, NULL);
554
	}
555 556 557 558

	/*
	 * Try to place the inode in its parent directory
	 */
559 560
	*group = parent_group;
	desc = ext4_get_group_desc(sb, *group, NULL);
561
	if (desc && ext4_free_inodes_count(sb, desc) &&
562
	    ext4_free_group_clusters(sb, desc))
563
		return 0;
564 565 566 567 568 569 570 571 572 573

	/*
	 * We're going to place this inode in a different blockgroup from its
	 * parent.  We want to cause files in a common directory to all land in
	 * the same blockgroup.  But we want files which are in a different
	 * directory which shares a blockgroup with our parent to land in a
	 * different blockgroup.
	 *
	 * So add our directory's i_ino into the starting point for the hash.
	 */
574
	*group = (*group + parent->i_ino) % ngroups;
575 576 577 578 579 580

	/*
	 * Use a quadratic hash to find a group with a free inode and some free
	 * blocks.
	 */
	for (i = 1; i < ngroups; i <<= 1) {
581 582 583 584
		*group += i;
		if (*group >= ngroups)
			*group -= ngroups;
		desc = ext4_get_group_desc(sb, *group, NULL);
585
		if (desc && ext4_free_inodes_count(sb, desc) &&
586
		    ext4_free_group_clusters(sb, desc))
587
			return 0;
588 589 590 591 592 593
	}

	/*
	 * That failed: try linear search for a free inode, even if that group
	 * has no free blocks.
	 */
594
	*group = parent_group;
595
	for (i = 0; i < ngroups; i++) {
596 597 598
		if (++*group >= ngroups)
			*group = 0;
		desc = ext4_get_group_desc(sb, *group, NULL);
599
		if (desc && ext4_free_inodes_count(sb, desc))
600
			return 0;
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
	}

	return -1;
}

/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
A
Al Viro 已提交
616
struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode,
617
			     const struct qstr *qstr, __u32 goal, uid_t *owner)
618 619
{
	struct super_block *sb;
A
Aneesh Kumar K.V 已提交
620 621
	struct buffer_head *inode_bitmap_bh = NULL;
	struct buffer_head *group_desc_bh;
622
	ext4_group_t ngroups, group = 0;
623
	unsigned long ino = 0;
624 625
	struct inode *inode;
	struct ext4_group_desc *gdp = NULL;
626 627
	struct ext4_inode_info *ei;
	struct ext4_sb_info *sbi;
628
	int ret2, err = 0;
629
	struct inode *ret;
630
	ext4_group_t i;
631
	ext4_group_t flex_group;
632 633 634 635 636 637

	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink)
		return ERR_PTR(-EPERM);

	sb = dir->i_sb;
638
	ngroups = ext4_get_groups_count(sb);
639
	trace_ext4_request_inode(dir, mode);
640 641 642
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);
643 644
	ei = EXT4_I(inode);
	sbi = EXT4_SB(sb);
645

646 647 648
	if (!goal)
		goal = sbi->s_inode_goal;

649
	if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
650 651 652 653 654 655
		group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
		ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
		ret2 = 0;
		goto got_group;
	}

L
Lukas Czerner 已提交
656 657 658
	if (S_ISDIR(mode))
		ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
	else
659
		ret2 = find_group_other(sb, dir, &group, mode);
660

661
got_group:
662
	EXT4_I(dir)->i_last_alloc_group = group;
663
	err = -ENOSPC;
664
	if (ret2 == -1)
665 666
		goto out;

667 668 669 670 671
	/*
	 * Normally we will only go through one pass of this loop,
	 * unless we get unlucky and it turns out the group we selected
	 * had its last inode grabbed by someone else.
	 */
672
	for (i = 0; i < ngroups; i++, ino = 0) {
673 674
		err = -EIO;

A
Aneesh Kumar K.V 已提交
675
		gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
676 677 678
		if (!gdp)
			goto fail;

A
Aneesh Kumar K.V 已提交
679 680 681
		brelse(inode_bitmap_bh);
		inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
		if (!inode_bitmap_bh)
682 683 684
			goto fail;

repeat_in_this_group:
685
		ino = ext4_find_next_zero_bit((unsigned long *)
A
Aneesh Kumar K.V 已提交
686 687
					      inode_bitmap_bh->b_data,
					      EXT4_INODES_PER_GROUP(sb), ino);
688 689 690 691
		if (ino >= EXT4_INODES_PER_GROUP(sb)) {
			if (++group == ngroups)
				group = 0;
			continue;
692
		}
693 694 695 696 697 698 699 700 701 702 703 704 705
		if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
			ext4_error(sb, "reserved inode found cleared - "
				   "inode=%lu", ino + 1);
			continue;
		}
		ext4_lock_group(sb, group);
		ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
		ext4_unlock_group(sb, group);
		ino++;		/* the inode bitmap is zero-based */
		if (!ret2)
			goto got; /* we grabbed the inode! */
		if (ino < EXT4_INODES_PER_GROUP(sb))
			goto repeat_in_this_group;
706 707 708 709 710
	}
	err = -ENOSPC;
	goto out;

got:
A
Andreas Dilger 已提交
711 712 713
	/* We may have to initialize the block bitmap if it isn't already */
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
A
Aneesh Kumar K.V 已提交
714
		struct buffer_head *block_bitmap_bh;
A
Andreas Dilger 已提交
715

A
Aneesh Kumar K.V 已提交
716 717 718
		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
		BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
		err = ext4_journal_get_write_access(handle, block_bitmap_bh);
A
Andreas Dilger 已提交
719
		if (err) {
A
Aneesh Kumar K.V 已提交
720
			brelse(block_bitmap_bh);
A
Andreas Dilger 已提交
721 722 723
			goto fail;
		}

724 725 726 727
		BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
		err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
		brelse(block_bitmap_bh);

A
Andreas Dilger 已提交
728
		/* recheck and clear flag under lock if we still need to */
729
		ext4_lock_group(sb, group);
A
Andreas Dilger 已提交
730
		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
A
Aneesh Kumar K.V 已提交
731
			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
732
			ext4_free_group_clusters_set(sb, gdp,
733
				ext4_free_clusters_after_init(sb, group, gdp));
734 735
			gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
								gdp);
A
Andreas Dilger 已提交
736
		}
737
		ext4_unlock_group(sb, group);
A
Andreas Dilger 已提交
738 739 740 741

		if (err)
			goto fail;
	}
742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794

	BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
	err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
	if (err)
		goto fail;

	BUFFER_TRACE(group_desc_bh, "get_write_access");
	err = ext4_journal_get_write_access(handle, group_desc_bh);
	if (err)
		goto fail;

	/* Update the relevant bg descriptor fields */
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
		int free;
		struct ext4_group_info *grp = ext4_get_group_info(sb, group);

		down_read(&grp->alloc_sem); /* protect vs itable lazyinit */
		ext4_lock_group(sb, group); /* while we modify the bg desc */
		free = EXT4_INODES_PER_GROUP(sb) -
			ext4_itable_unused_count(sb, gdp);
		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
			free = 0;
		}
		/*
		 * Check the relative inode number against the last used
		 * relative inode number in this group. if it is greater
		 * we need to update the bg_itable_unused count
		 */
		if (ino > free)
			ext4_itable_unused_set(sb, gdp,
					(EXT4_INODES_PER_GROUP(sb) - ino));
		up_read(&grp->alloc_sem);
	}
	ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
	if (S_ISDIR(mode)) {
		ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
		if (sbi->s_log_groups_per_flex) {
			ext4_group_t f = ext4_flex_group(sbi, group);

			atomic_inc(&sbi->s_flex_groups[f].used_dirs);
		}
	}
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
		gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
		ext4_unlock_group(sb, group);
	}

	BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
	err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
	if (err)
		goto fail;

A
Aneesh Kumar K.V 已提交
795 796
	BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
	err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
797 798
	if (err)
		goto fail;
799 800 801 802

	percpu_counter_dec(&sbi->s_freeinodes_counter);
	if (S_ISDIR(mode))
		percpu_counter_inc(&sbi->s_dirs_counter);
T
Theodore Ts'o 已提交
803
	ext4_mark_super_dirty(sb);
804

805 806
	if (sbi->s_log_groups_per_flex) {
		flex_group = ext4_flex_group(sbi, group);
807
		atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
808
	}
809 810 811 812 813
	if (owner) {
		inode->i_mode = mode;
		inode->i_uid = owner[0];
		inode->i_gid = owner[1];
	} else if (test_opt(sb, GRPID)) {
814 815
		inode->i_mode = mode;
		inode->i_uid = current_fsuid();
816 817
		inode->i_gid = dir->i_gid;
	} else
818
		inode_init_owner(inode, dir, mode);
819

A
Andreas Dilger 已提交
820
	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
821 822
	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
K
Kalpak Shah 已提交
823 824
	inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
						       ext4_current_time(inode);
825 826 827 828 829

	memset(ei->i_data, 0, sizeof(ei->i_data));
	ei->i_dir_start_lookup = 0;
	ei->i_disksize = 0;

830
	/* Don't inherit extent flag from directory, amongst others. */
831 832
	ei->i_flags =
		ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
833 834 835
	ei->i_file_acl = 0;
	ei->i_dtime = 0;
	ei->i_block_group = group;
836
	ei->i_last_alloc_group = ~0;
837

838
	ext4_set_inode_flags(inode);
839
	if (IS_DIRSYNC(inode))
840
		ext4_handle_sync(handle);
A
Al Viro 已提交
841
	if (insert_inode_locked(inode) < 0) {
842 843 844 845 846 847
		/*
		 * Likely a bitmap corruption causing inode to be allocated
		 * twice.
		 */
		err = -EIO;
		goto fail;
A
Al Viro 已提交
848
	}
849 850 851 852
	spin_lock(&sbi->s_next_gen_lock);
	inode->i_generation = sbi->s_next_generation++;
	spin_unlock(&sbi->s_next_gen_lock);

853 854 855 856 857 858 859 860 861 862 863 864 865
	/* Precompute checksum seed for inode metadata */
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
		__u32 csum;
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
		__le32 inum = cpu_to_le32(inode->i_ino);
		__le32 gen = cpu_to_le32(inode->i_generation);
		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
				   sizeof(inum));
		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
					      sizeof(gen));
	}

866
	ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
867
	ext4_set_inode_state(inode, EXT4_STATE_NEW);
K
Kalpak Shah 已提交
868 869

	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
870 871

	ret = inode;
872
	dquot_initialize(inode);
873 874
	err = dquot_alloc_inode(inode);
	if (err)
875 876
		goto fail_drop;

877
	err = ext4_init_acl(handle, inode, dir);
878 879 880
	if (err)
		goto fail_free_drop;

881
	err = ext4_init_security(handle, inode, dir, qstr);
882 883 884
	if (err)
		goto fail_free_drop;

885
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
886
		/* set extent flag only for directory, file and normal symlink*/
887
		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
888
			ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
889 890
			ext4_ext_tree_init(handle, inode);
		}
A
Alex Tomas 已提交
891
	}
892

893 894 895 896 897
	if (ext4_handle_valid(handle)) {
		ei->i_sync_tid = handle->h_transaction->t_tid;
		ei->i_datasync_tid = handle->h_transaction->t_tid;
	}

898 899 900 901 902 903
	err = ext4_mark_inode_dirty(handle, inode);
	if (err) {
		ext4_std_error(sb, err);
		goto fail_free_drop;
	}

904
	ext4_debug("allocating inode %lu\n", inode->i_ino);
905
	trace_ext4_allocate_inode(inode, dir, mode);
906 907
	goto really_out;
fail:
908
	ext4_std_error(sb, err);
909 910 911 912
out:
	iput(inode);
	ret = ERR_PTR(err);
really_out:
A
Aneesh Kumar K.V 已提交
913
	brelse(inode_bitmap_bh);
914 915 916
	return ret;

fail_free_drop:
917
	dquot_free_inode(inode);
918 919

fail_drop:
920
	dquot_drop(inode);
921
	inode->i_flags |= S_NOQUOTA;
922
	clear_nlink(inode);
A
Al Viro 已提交
923
	unlock_new_inode(inode);
924
	iput(inode);
A
Aneesh Kumar K.V 已提交
925
	brelse(inode_bitmap_bh);
926 927 928 929
	return ERR_PTR(err);
}

/* Verify that we are loading a valid orphan from disk */
930
struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
931
{
932
	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
933
	ext4_group_t block_group;
934
	int bit;
935
	struct buffer_head *bitmap_bh;
936
	struct inode *inode = NULL;
937
	long err = -EIO;
938 939 940

	/* Error cases - e2fsck has already cleaned up for us */
	if (ino > max_ino) {
941
		ext4_warning(sb, "bad orphan ino %lu!  e2fsck was run?", ino);
942
		goto error;
943 944
	}

945 946
	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
947
	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
948
	if (!bitmap_bh) {
949
		ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
950
		goto error;
951 952 953 954 955 956
	}

	/* Having the inode bit set should be a 100% indicator that this
	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
	 * inodes that were being truncated, so we can't check i_nlink==0.
	 */
957 958 959 960 961 962 963
	if (!ext4_test_bit(bit, bitmap_bh->b_data))
		goto bad_orphan;

	inode = ext4_iget(sb, ino);
	if (IS_ERR(inode))
		goto iget_failed;

964 965 966 967 968 969 970 971
	/*
	 * If the orphans has i_nlinks > 0 then it should be able to be
	 * truncated, otherwise it won't be removed from the orphan list
	 * during processing and an infinite loop will result.
	 */
	if (inode->i_nlink && !ext4_can_truncate(inode))
		goto bad_orphan;

972 973 974 975 976 977 978 979 980
	if (NEXT_ORPHAN(inode) > max_ino)
		goto bad_orphan;
	brelse(bitmap_bh);
	return inode;

iget_failed:
	err = PTR_ERR(inode);
	inode = NULL;
bad_orphan:
981
	ext4_warning(sb, "bad orphan inode %lu!  e2fsck was run?", ino);
982 983 984 985 986 987 988 989 990 991
	printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
	       bit, (unsigned long long)bitmap_bh->b_blocknr,
	       ext4_test_bit(bit, bitmap_bh->b_data));
	printk(KERN_NOTICE "inode=%p\n", inode);
	if (inode) {
		printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
		       is_bad_inode(inode));
		printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
		       NEXT_ORPHAN(inode));
		printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
992
		printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
993
		/* Avoid freeing blocks if we got a bad deleted inode */
994
		if (inode->i_nlink == 0)
995 996 997 998
			inode->i_blocks = 0;
		iput(inode);
	}
	brelse(bitmap_bh);
999 1000
error:
	return ERR_PTR(err);
1001 1002
}

1003
unsigned long ext4_count_free_inodes(struct super_block *sb)
1004 1005
{
	unsigned long desc_count;
1006
	struct ext4_group_desc *gdp;
1007
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1008 1009
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
1010 1011 1012
	unsigned long bitmap_count, x;
	struct buffer_head *bitmap_bh = NULL;

1013
	es = EXT4_SB(sb)->s_es;
1014 1015 1016
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;
1017
	for (i = 0; i < ngroups; i++) {
1018
		gdp = ext4_get_group_desc(sb, i, NULL);
1019 1020
		if (!gdp)
			continue;
1021
		desc_count += ext4_free_inodes_count(sb, gdp);
1022
		brelse(bitmap_bh);
1023
		bitmap_bh = ext4_read_inode_bitmap(sb, i);
1024 1025 1026
		if (!bitmap_bh)
			continue;

1027
		x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
E
Eric Sandeen 已提交
1028
		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1029
			(unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
1030 1031 1032
		bitmap_count += x;
	}
	brelse(bitmap_bh);
1033 1034 1035
	printk(KERN_DEBUG "ext4_count_free_inodes: "
	       "stored = %u, computed = %lu, %lu\n",
	       le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1036 1037 1038
	return desc_count;
#else
	desc_count = 0;
1039
	for (i = 0; i < ngroups; i++) {
1040
		gdp = ext4_get_group_desc(sb, i, NULL);
1041 1042
		if (!gdp)
			continue;
1043
		desc_count += ext4_free_inodes_count(sb, gdp);
1044 1045 1046 1047 1048 1049 1050
		cond_resched();
	}
	return desc_count;
#endif
}

/* Called at mount-time, super-block is locked */
1051
unsigned long ext4_count_dirs(struct super_block * sb)
1052 1053
{
	unsigned long count = 0;
1054
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1055

1056
	for (i = 0; i < ngroups; i++) {
1057
		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1058 1059
		if (!gdp)
			continue;
1060
		count += ext4_used_dirs_count(sb, gdp);
1061 1062 1063
	}
	return count;
}
1064 1065 1066 1067 1068 1069 1070

/*
 * Zeroes not yet zeroed inode table - just write zeroes through the whole
 * inode table. Must be called without any spinlock held. The only place
 * where it is called from on active part of filesystem is ext4lazyinit
 * thread, so we do not need any special locks, however we have to prevent
 * inode allocation from the current group, so we take alloc_sem lock, to
1071
 * block ext4_new_inode() until we are finished.
1072
 */
1073
int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
				 int barrier)
{
	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_desc *gdp = NULL;
	struct buffer_head *group_desc_bh;
	handle_t *handle;
	ext4_fsblk_t blk;
	int num, ret = 0, used_blks = 0;

	/* This should not happen, but just to be sure check this */
	if (sb->s_flags & MS_RDONLY) {
		ret = 1;
		goto out;
	}

	gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
	if (!gdp)
		goto out;

	/*
	 * We do not need to lock this, because we are the only one
	 * handling this flag.
	 */
	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
		goto out;

	handle = ext4_journal_start_sb(sb, 1);
	if (IS_ERR(handle)) {
		ret = PTR_ERR(handle);
		goto out;
	}

	down_write(&grp->alloc_sem);
	/*
	 * If inode bitmap was already initialized there may be some
	 * used inodes so we need to skip blocks with used inodes in
	 * inode table.
	 */
	if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
		used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
			    ext4_itable_unused_count(sb, gdp)),
			    sbi->s_inodes_per_block);

1118
	if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
1119 1120 1121
		ext4_error(sb, "Something is wrong with group %u: "
			   "used itable blocks: %d; "
			   "itable unused count: %u",
1122 1123 1124
			   group, used_blks,
			   ext4_itable_unused_count(sb, gdp));
		ret = 1;
1125
		goto err_out;
1126 1127
	}

1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
	blk = ext4_inode_table(sb, gdp) + used_blks;
	num = sbi->s_itb_per_group - used_blks;

	BUFFER_TRACE(group_desc_bh, "get_write_access");
	ret = ext4_journal_get_write_access(handle,
					    group_desc_bh);
	if (ret)
		goto err_out;

	/*
	 * Skip zeroout if the inode table is full. But we set the ZEROED
	 * flag anyway, because obviously, when it is full it does not need
	 * further zeroing.
	 */
	if (unlikely(num == 0))
		goto skip_zeroout;

	ext4_debug("going to zero out inode table in group %d\n",
		   group);
1147
	ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
1148 1149
	if (ret < 0)
		goto err_out;
1150 1151
	if (barrier)
		blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169

skip_zeroout:
	ext4_lock_group(sb, group);
	gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
	gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
	ext4_unlock_group(sb, group);

	BUFFER_TRACE(group_desc_bh,
		     "call ext4_handle_dirty_metadata");
	ret = ext4_handle_dirty_metadata(handle, NULL,
					 group_desc_bh);

err_out:
	up_write(&grp->alloc_sem);
	ext4_journal_stop(handle);
out:
	return ret;
}