ialloc.c 33.4 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/ialloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  BSD ufs-inspired inode and directory allocation by
 *  Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19 20 21 22 23
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/random.h>
#include <linux/bitops.h>
24
#include <linux/blkdev.h>
25
#include <asm/byteorder.h>
26

27 28
#include "ext4.h"
#include "ext4_jbd2.h"
29 30 31
#include "xattr.h"
#include "acl.h"

32 33
#include <trace/events/ext4.h>

34 35 36 37 38 39 40 41 42 43 44 45 46 47
/*
 * ialloc.c contains the inodes allocation and deallocation routines
 */

/*
 * The free inodes are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.
 */

A
Andreas Dilger 已提交
48 49 50 51 52
/*
 * To avoid calling the atomic setbit hundreds or thousands of times, we only
 * need to use it within a single byte (to ensure we get endianness right).
 * We can use memset for the rest of the bitmap as there are no other users.
 */
53
void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
A
Andreas Dilger 已提交
54 55 56 57 58 59 60 61 62 63 64 65 66 67
{
	int i;

	if (start_bit >= end_bit)
		return;

	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
		ext4_set_bit(i, bitmap);
	if (i < end_bit)
		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
}

/* Initializes an uninitialized inode bitmap */
68 69 70 71
static unsigned ext4_init_inode_bitmap(struct super_block *sb,
				       struct buffer_head *bh,
				       ext4_group_t block_group,
				       struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
72 73 74 75 76 77 78 79
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);

	J_ASSERT_BH(bh, buffer_locked(bh));

	/* If checksum is bad mark all blocks and inodes use to prevent
	 * allocation, essentially implementing a per-group read-only flag. */
	if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
80
		ext4_error(sb, "Checksum bad for group %u", block_group);
81
		ext4_free_group_clusters_set(sb, gdp, 0);
82 83
		ext4_free_inodes_set(sb, gdp, 0);
		ext4_itable_unused_set(sb, gdp, 0);
A
Andreas Dilger 已提交
84
		memset(bh->b_data, 0xff, sb->s_blocksize);
85 86
		ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
					   EXT4_INODES_PER_GROUP(sb) / 8);
A
Andreas Dilger 已提交
87 88 89 90
		return 0;
	}

	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
91
	ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
A
Andreas Dilger 已提交
92
			bh->b_data);
93 94 95
	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
				   EXT4_INODES_PER_GROUP(sb) / 8);
	gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
A
Andreas Dilger 已提交
96 97 98

	return EXT4_INODES_PER_GROUP(sb);
}
99

100 101 102 103 104 105 106 107 108 109
void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
{
	if (uptodate) {
		set_buffer_uptodate(bh);
		set_bitmap_uptodate(bh);
	}
	unlock_buffer(bh);
	put_bh(bh);
}

110 111 112 113 114 115 116
/*
 * Read the inode allocation bitmap for a given block_group, reading
 * into the specified slot in the superblock's bitmap cache.
 *
 * Return buffer_head of bitmap on success or NULL.
 */
static struct buffer_head *
117
ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
118
{
119
	struct ext4_group_desc *desc;
120
	struct buffer_head *bh = NULL;
121
	ext4_fsblk_t bitmap_blk;
122

123
	desc = ext4_get_group_desc(sb, block_group, NULL);
124
	if (!desc)
125
		return NULL;
126

127 128 129
	bitmap_blk = ext4_inode_bitmap(sb, desc);
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
130
		ext4_error(sb, "Cannot read inode bitmap - "
131
			    "block_group = %u, inode_bitmap = %llu",
132 133 134
			    block_group, bitmap_blk);
		return NULL;
	}
135
	if (bitmap_uptodate(bh))
136
		goto verify;
137

138
	lock_buffer(bh);
139 140
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
141
		goto verify;
142
	}
143

144
	ext4_lock_group(sb, block_group);
A
Andreas Dilger 已提交
145
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
146
		ext4_init_inode_bitmap(sb, bh, block_group, desc);
147
		set_bitmap_uptodate(bh);
148
		set_buffer_uptodate(bh);
149
		set_buffer_verified(bh);
150
		ext4_unlock_group(sb, block_group);
A
Aneesh Kumar K.V 已提交
151
		unlock_buffer(bh);
152
		return bh;
A
Andreas Dilger 已提交
153
	}
154
	ext4_unlock_group(sb, block_group);
155

156 157 158 159 160 161 162
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
163
		goto verify;
164 165
	}
	/*
166
	 * submit the buffer_head for reading
167
	 */
168
	trace_ext4_load_inode_bitmap(sb, block_group);
169 170 171 172 173
	bh->b_end_io = ext4_end_bitmap_read;
	get_bh(bh);
	submit_bh(READ, bh);
	wait_on_buffer(bh);
	if (!buffer_uptodate(bh)) {
174
		put_bh(bh);
175
		ext4_error(sb, "Cannot read inode bitmap - "
176 177
			   "block_group = %u, inode_bitmap = %llu",
			   block_group, bitmap_blk);
178 179
		return NULL;
	}
180 181 182 183 184 185 186 187 188 189 190 191 192 193

verify:
	ext4_lock_group(sb, block_group);
	if (!buffer_verified(bh) &&
	    !ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
					   EXT4_INODES_PER_GROUP(sb) / 8)) {
		ext4_unlock_group(sb, block_group);
		put_bh(bh);
		ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
			   "inode_bitmap = %llu", block_group, bitmap_blk);
		return NULL;
	}
	ext4_unlock_group(sb, block_group);
	set_buffer_verified(bh);
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
	return bh;
}

/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
213
void ext4_free_inode(handle_t *handle, struct inode *inode)
214
{
215
	struct super_block *sb = inode->i_sb;
216 217 218 219
	int is_directory;
	unsigned long ino;
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *bh2;
220
	ext4_group_t block_group;
221
	unsigned long bit;
222 223
	struct ext4_group_desc *gdp;
	struct ext4_super_block *es;
224
	struct ext4_sb_info *sbi;
225
	int fatal = 0, err, count, cleared;
226

227 228 229
	if (!sb) {
		printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
		       "nonexistent device\n", __func__, __LINE__);
230 231
		return;
	}
232 233 234 235
	if (atomic_read(&inode->i_count) > 1) {
		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
			 __func__, __LINE__, inode->i_ino,
			 atomic_read(&inode->i_count));
236 237
		return;
	}
238 239 240
	if (inode->i_nlink) {
		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n",
			 __func__, __LINE__, inode->i_ino, inode->i_nlink);
241 242
		return;
	}
243
	sbi = EXT4_SB(sb);
244 245

	ino = inode->i_ino;
246
	ext4_debug("freeing inode %lu\n", ino);
247
	trace_ext4_free_inode(inode);
248 249 250 251 252

	/*
	 * Note: we must free any quota before locking the superblock,
	 * as writing the quota to disk may need the lock as well.
	 */
253
	dquot_initialize(inode);
254
	ext4_xattr_delete_inode(handle, inode);
255
	dquot_free_inode(inode);
256
	dquot_drop(inode);
257 258 259 260

	is_directory = S_ISDIR(inode->i_mode);

	/* Do this BEFORE marking the inode not in use or returning an error */
A
Al Viro 已提交
261
	ext4_clear_inode(inode);
262

263 264
	es = EXT4_SB(sb)->s_es;
	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
265
		ext4_error(sb, "reserved or nonexistent inode %lu", ino);
266 267
		goto error_return;
	}
268 269
	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
270
	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
271 272 273 274
	if (!bitmap_bh)
		goto error_return;

	BUFFER_TRACE(bitmap_bh, "get_write_access");
275
	fatal = ext4_journal_get_write_access(handle, bitmap_bh);
276 277 278
	if (fatal)
		goto error_return;

279 280 281
	fatal = -ESRCH;
	gdp = ext4_get_group_desc(sb, block_group, &bh2);
	if (gdp) {
282
		BUFFER_TRACE(bh2, "get_write_access");
283
		fatal = ext4_journal_get_write_access(handle, bh2);
284 285
	}
	ext4_lock_group(sb, block_group);
286
	cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
287 288 289 290
	if (fatal || !cleared) {
		ext4_unlock_group(sb, block_group);
		goto out;
	}
291

292 293 294 295 296 297
	count = ext4_free_inodes_count(sb, gdp) + 1;
	ext4_free_inodes_set(sb, gdp, count);
	if (is_directory) {
		count = ext4_used_dirs_count(sb, gdp) - 1;
		ext4_used_dirs_set(sb, gdp, count);
		percpu_counter_dec(&sbi->s_dirs_counter);
298
	}
299 300
	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
				   EXT4_INODES_PER_GROUP(sb) / 8);
301 302
	gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
	ext4_unlock_group(sb, block_group);
303

304 305 306
	percpu_counter_inc(&sbi->s_freeinodes_counter);
	if (sbi->s_log_groups_per_flex) {
		ext4_group_t f = ext4_flex_group(sbi, block_group);
307

308 309 310
		atomic_inc(&sbi->s_flex_groups[f].free_inodes);
		if (is_directory)
			atomic_dec(&sbi->s_flex_groups[f].used_dirs);
311
	}
312 313 314 315 316 317 318 319
	BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
	fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
out:
	if (cleared) {
		BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
		if (!fatal)
			fatal = err;
T
Theodore Ts'o 已提交
320
		ext4_mark_super_dirty(sb);
321 322 323
	} else
		ext4_error(sb, "bit already cleared for inode %lu", ino);

324 325
error_return:
	brelse(bitmap_bh);
326
	ext4_std_error(sb, fatal);
327 328
}

329 330
struct orlov_stats {
	__u32 free_inodes;
331
	__u32 free_clusters;
332 333 334 335 336 337 338 339
	__u32 used_dirs;
};

/*
 * Helper function for Orlov's allocator; returns critical information
 * for a particular block group or flex_bg.  If flex_size is 1, then g
 * is a block group number; otherwise it is flex_bg number.
 */
340 341
static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
			    int flex_size, struct orlov_stats *stats)
342 343
{
	struct ext4_group_desc *desc;
344
	struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
345

346 347
	if (flex_size > 1) {
		stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
348
		stats->free_clusters = atomic_read(&flex_group[g].free_clusters);
349 350 351
		stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
		return;
	}
352

353 354 355
	desc = ext4_get_group_desc(sb, g, NULL);
	if (desc) {
		stats->free_inodes = ext4_free_inodes_count(sb, desc);
356
		stats->free_clusters = ext4_free_group_clusters(sb, desc);
357 358 359
		stats->used_dirs = ext4_used_dirs_count(sb, desc);
	} else {
		stats->free_inodes = 0;
360
		stats->free_clusters = 0;
361
		stats->used_dirs = 0;
362 363 364
	}
}

365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
/*
 * Orlov's allocator for directories.
 *
 * We always try to spread first-level directories.
 *
 * If there are blockgroups with both free inodes and free blocks counts
 * not worse than average we return one with smallest directory count.
 * Otherwise we simply return a random group.
 *
 * For the rest rules look so:
 *
 * It's OK to put directory into a group unless
 * it has too many directories already (max_dirs) or
 * it has too few free inodes left (min_inodes) or
 * it has too few free blocks left (min_blocks) or
380
 * Parent's group is preferred, if it doesn't satisfy these
381 382 383 384 385
 * conditions we search cyclically through the rest. If none
 * of the groups look good we just look for a group with more
 * free inodes than average (starting at parent's group).
 */

386
static int find_group_orlov(struct super_block *sb, struct inode *parent,
A
Al Viro 已提交
387
			    ext4_group_t *group, umode_t mode,
388
			    const struct qstr *qstr)
389
{
390
	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
391
	struct ext4_sb_info *sbi = EXT4_SB(sb);
392
	ext4_group_t real_ngroups = ext4_get_groups_count(sb);
393
	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
394
	unsigned int freei, avefreei, grp_free;
395
	ext4_fsblk_t freeb, avefreec;
396
	unsigned int ndirs;
397
	int max_dirs, min_inodes;
398
	ext4_grpblk_t min_clusters;
399
	ext4_group_t i, grp, g, ngroups;
400
	struct ext4_group_desc *desc;
401 402
	struct orlov_stats stats;
	int flex_size = ext4_flex_bg_size(sbi);
403
	struct dx_hash_info hinfo;
404

405
	ngroups = real_ngroups;
406
	if (flex_size > 1) {
407
		ngroups = (real_ngroups + flex_size - 1) >>
408 409 410
			sbi->s_log_groups_per_flex;
		parent_group >>= sbi->s_log_groups_per_flex;
	}
411 412 413

	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
	avefreei = freei / ngroups;
414 415
	freeb = EXT4_C2B(sbi,
		percpu_counter_read_positive(&sbi->s_freeclusters_counter));
416 417
	avefreec = freeb;
	do_div(avefreec, ngroups);
418 419
	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);

420 421
	if (S_ISDIR(mode) &&
	    ((parent == sb->s_root->d_inode) ||
422
	     (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
423
		int best_ndir = inodes_per_group;
424
		int ret = -1;
425

426 427 428 429 430 431 432
		if (qstr) {
			hinfo.hash_version = DX_HASH_HALF_MD4;
			hinfo.seed = sbi->s_hash_seed;
			ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
			grp = hinfo.hash;
		} else
			get_random_bytes(&grp, sizeof(grp));
433
		parent_group = (unsigned)grp % ngroups;
434
		for (i = 0; i < ngroups; i++) {
435 436 437
			g = (parent_group + i) % ngroups;
			get_orlov_stats(sb, g, flex_size, &stats);
			if (!stats.free_inodes)
438
				continue;
439
			if (stats.used_dirs >= best_ndir)
440
				continue;
441
			if (stats.free_inodes < avefreei)
442
				continue;
443
			if (stats.free_clusters < avefreec)
444
				continue;
445
			grp = g;
446
			ret = 0;
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
			best_ndir = stats.used_dirs;
		}
		if (ret)
			goto fallback;
	found_flex_bg:
		if (flex_size == 1) {
			*group = grp;
			return 0;
		}

		/*
		 * We pack inodes at the beginning of the flexgroup's
		 * inode tables.  Block allocation decisions will do
		 * something similar, although regular files will
		 * start at 2nd block group of the flexgroup.  See
		 * ext4_ext_find_goal() and ext4_find_near().
		 */
		grp *= flex_size;
		for (i = 0; i < flex_size; i++) {
466
			if (grp+i >= real_ngroups)
467 468 469 470 471 472
				break;
			desc = ext4_get_group_desc(sb, grp+i, NULL);
			if (desc && ext4_free_inodes_count(sb, desc)) {
				*group = grp+i;
				return 0;
			}
473 474 475 476 477
		}
		goto fallback;
	}

	max_dirs = ndirs / ngroups + inodes_per_group / 16;
478 479 480
	min_inodes = avefreei - inodes_per_group*flex_size / 4;
	if (min_inodes < 1)
		min_inodes = 1;
481
	min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
482 483 484 485 486 487 488 489 490 491

	/*
	 * Start looking in the flex group where we last allocated an
	 * inode for this parent directory
	 */
	if (EXT4_I(parent)->i_last_alloc_group != ~0) {
		parent_group = EXT4_I(parent)->i_last_alloc_group;
		if (flex_size > 1)
			parent_group >>= sbi->s_log_groups_per_flex;
	}
492 493

	for (i = 0; i < ngroups; i++) {
494 495 496
		grp = (parent_group + i) % ngroups;
		get_orlov_stats(sb, grp, flex_size, &stats);
		if (stats.used_dirs >= max_dirs)
497
			continue;
498
		if (stats.free_inodes < min_inodes)
499
			continue;
500
		if (stats.free_clusters < min_clusters)
501
			continue;
502
		goto found_flex_bg;
503 504 505
	}

fallback:
506
	ngroups = real_ngroups;
507
	avefreei = freei / ngroups;
508
fallback_retry:
509
	parent_group = EXT4_I(parent)->i_block_group;
510
	for (i = 0; i < ngroups; i++) {
511 512
		grp = (parent_group + i) % ngroups;
		desc = ext4_get_group_desc(sb, grp, NULL);
513 514
		grp_free = ext4_free_inodes_count(sb, desc);
		if (desc && grp_free && grp_free >= avefreei) {
515
			*group = grp;
516
			return 0;
517
		}
518 519 520 521 522 523 524 525
	}

	if (avefreei) {
		/*
		 * The free-inodes counter is approximate, and for really small
		 * filesystems the above test can fail to find any blockgroups
		 */
		avefreei = 0;
526
		goto fallback_retry;
527 528 529 530 531
	}

	return -1;
}

532
static int find_group_other(struct super_block *sb, struct inode *parent,
A
Al Viro 已提交
533
			    ext4_group_t *group, umode_t mode)
534
{
535
	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
536
	ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
537
	struct ext4_group_desc *desc;
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
	int flex_size = ext4_flex_bg_size(EXT4_SB(sb));

	/*
	 * Try to place the inode is the same flex group as its
	 * parent.  If we can't find space, use the Orlov algorithm to
	 * find another flex group, and store that information in the
	 * parent directory's inode information so that use that flex
	 * group for future allocations.
	 */
	if (flex_size > 1) {
		int retry = 0;

	try_again:
		parent_group &= ~(flex_size-1);
		last = parent_group + flex_size;
		if (last > ngroups)
			last = ngroups;
		for  (i = parent_group; i < last; i++) {
			desc = ext4_get_group_desc(sb, i, NULL);
			if (desc && ext4_free_inodes_count(sb, desc)) {
				*group = i;
				return 0;
			}
		}
		if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
			retry = 1;
			parent_group = EXT4_I(parent)->i_last_alloc_group;
			goto try_again;
		}
		/*
		 * If this didn't work, use the Orlov search algorithm
		 * to find a new flex group; we pass in the mode to
		 * avoid the topdir algorithms.
		 */
		*group = parent_group + flex_size;
		if (*group > ngroups)
			*group = 0;
575
		return find_group_orlov(sb, parent, group, mode, NULL);
576
	}
577 578 579 580

	/*
	 * Try to place the inode in its parent directory
	 */
581 582
	*group = parent_group;
	desc = ext4_get_group_desc(sb, *group, NULL);
583
	if (desc && ext4_free_inodes_count(sb, desc) &&
584
	    ext4_free_group_clusters(sb, desc))
585
		return 0;
586 587 588 589 590 591 592 593 594 595

	/*
	 * We're going to place this inode in a different blockgroup from its
	 * parent.  We want to cause files in a common directory to all land in
	 * the same blockgroup.  But we want files which are in a different
	 * directory which shares a blockgroup with our parent to land in a
	 * different blockgroup.
	 *
	 * So add our directory's i_ino into the starting point for the hash.
	 */
596
	*group = (*group + parent->i_ino) % ngroups;
597 598 599 600 601 602

	/*
	 * Use a quadratic hash to find a group with a free inode and some free
	 * blocks.
	 */
	for (i = 1; i < ngroups; i <<= 1) {
603 604 605 606
		*group += i;
		if (*group >= ngroups)
			*group -= ngroups;
		desc = ext4_get_group_desc(sb, *group, NULL);
607
		if (desc && ext4_free_inodes_count(sb, desc) &&
608
		    ext4_free_group_clusters(sb, desc))
609
			return 0;
610 611 612 613 614 615
	}

	/*
	 * That failed: try linear search for a free inode, even if that group
	 * has no free blocks.
	 */
616
	*group = parent_group;
617
	for (i = 0; i < ngroups; i++) {
618 619 620
		if (++*group >= ngroups)
			*group = 0;
		desc = ext4_get_group_desc(sb, *group, NULL);
621
		if (desc && ext4_free_inodes_count(sb, desc))
622
			return 0;
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
	}

	return -1;
}

/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
A
Al Viro 已提交
638
struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode,
639
			     const struct qstr *qstr, __u32 goal, uid_t *owner)
640 641
{
	struct super_block *sb;
A
Aneesh Kumar K.V 已提交
642 643
	struct buffer_head *inode_bitmap_bh = NULL;
	struct buffer_head *group_desc_bh;
644
	ext4_group_t ngroups, group = 0;
645
	unsigned long ino = 0;
646 647
	struct inode *inode;
	struct ext4_group_desc *gdp = NULL;
648 649
	struct ext4_inode_info *ei;
	struct ext4_sb_info *sbi;
650
	int ret2, err = 0;
651
	struct inode *ret;
652
	ext4_group_t i;
653
	ext4_group_t flex_group;
654 655 656 657 658 659

	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink)
		return ERR_PTR(-EPERM);

	sb = dir->i_sb;
660
	ngroups = ext4_get_groups_count(sb);
661
	trace_ext4_request_inode(dir, mode);
662 663 664
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);
665 666
	ei = EXT4_I(inode);
	sbi = EXT4_SB(sb);
667

668 669 670
	if (!goal)
		goal = sbi->s_inode_goal;

671
	if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
672 673 674 675 676 677
		group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
		ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
		ret2 = 0;
		goto got_group;
	}

L
Lukas Czerner 已提交
678 679 680
	if (S_ISDIR(mode))
		ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
	else
681
		ret2 = find_group_other(sb, dir, &group, mode);
682

683
got_group:
684
	EXT4_I(dir)->i_last_alloc_group = group;
685
	err = -ENOSPC;
686
	if (ret2 == -1)
687 688
		goto out;

689 690 691 692 693
	/*
	 * Normally we will only go through one pass of this loop,
	 * unless we get unlucky and it turns out the group we selected
	 * had its last inode grabbed by someone else.
	 */
694
	for (i = 0; i < ngroups; i++, ino = 0) {
695 696
		err = -EIO;

A
Aneesh Kumar K.V 已提交
697
		gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
698 699 700
		if (!gdp)
			goto fail;

A
Aneesh Kumar K.V 已提交
701 702 703
		brelse(inode_bitmap_bh);
		inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
		if (!inode_bitmap_bh)
704 705 706
			goto fail;

repeat_in_this_group:
707
		ino = ext4_find_next_zero_bit((unsigned long *)
A
Aneesh Kumar K.V 已提交
708 709
					      inode_bitmap_bh->b_data,
					      EXT4_INODES_PER_GROUP(sb), ino);
710 711 712 713
		if (ino >= EXT4_INODES_PER_GROUP(sb)) {
			if (++group == ngroups)
				group = 0;
			continue;
714
		}
715 716 717 718 719 720 721 722 723 724 725 726 727
		if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
			ext4_error(sb, "reserved inode found cleared - "
				   "inode=%lu", ino + 1);
			continue;
		}
		ext4_lock_group(sb, group);
		ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
		ext4_unlock_group(sb, group);
		ino++;		/* the inode bitmap is zero-based */
		if (!ret2)
			goto got; /* we grabbed the inode! */
		if (ino < EXT4_INODES_PER_GROUP(sb))
			goto repeat_in_this_group;
728 729 730 731 732
	}
	err = -ENOSPC;
	goto out;

got:
A
Andreas Dilger 已提交
733 734 735
	/* We may have to initialize the block bitmap if it isn't already */
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
A
Aneesh Kumar K.V 已提交
736
		struct buffer_head *block_bitmap_bh;
A
Andreas Dilger 已提交
737

A
Aneesh Kumar K.V 已提交
738 739 740
		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
		BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
		err = ext4_journal_get_write_access(handle, block_bitmap_bh);
A
Andreas Dilger 已提交
741
		if (err) {
A
Aneesh Kumar K.V 已提交
742
			brelse(block_bitmap_bh);
A
Andreas Dilger 已提交
743 744 745
			goto fail;
		}

746 747 748 749
		BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
		err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
		brelse(block_bitmap_bh);

A
Andreas Dilger 已提交
750
		/* recheck and clear flag under lock if we still need to */
751
		ext4_lock_group(sb, group);
A
Andreas Dilger 已提交
752
		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
A
Aneesh Kumar K.V 已提交
753
			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
754
			ext4_free_group_clusters_set(sb, gdp,
755
				ext4_free_clusters_after_init(sb, group, gdp));
756 757 758 759
			ext4_block_bitmap_csum_set(sb, group, gdp,
						   block_bitmap_bh,
						   EXT4_BLOCKS_PER_GROUP(sb) /
						   8);
760 761
			gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
								gdp);
A
Andreas Dilger 已提交
762
		}
763
		ext4_unlock_group(sb, group);
A
Andreas Dilger 已提交
764 765 766 767

		if (err)
			goto fail;
	}
768 769 770 771 772 773 774 775 776 777 778 779

	BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
	err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
	if (err)
		goto fail;

	BUFFER_TRACE(group_desc_bh, "get_write_access");
	err = ext4_journal_get_write_access(handle, group_desc_bh);
	if (err)
		goto fail;

	/* Update the relevant bg descriptor fields */
780
	if (ext4_has_group_desc_csum(sb)) {
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
		int free;
		struct ext4_group_info *grp = ext4_get_group_info(sb, group);

		down_read(&grp->alloc_sem); /* protect vs itable lazyinit */
		ext4_lock_group(sb, group); /* while we modify the bg desc */
		free = EXT4_INODES_PER_GROUP(sb) -
			ext4_itable_unused_count(sb, gdp);
		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
			free = 0;
		}
		/*
		 * Check the relative inode number against the last used
		 * relative inode number in this group. if it is greater
		 * we need to update the bg_itable_unused count
		 */
		if (ino > free)
			ext4_itable_unused_set(sb, gdp,
					(EXT4_INODES_PER_GROUP(sb) - ino));
		up_read(&grp->alloc_sem);
	}
	ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
	if (S_ISDIR(mode)) {
		ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
		if (sbi->s_log_groups_per_flex) {
			ext4_group_t f = ext4_flex_group(sbi, group);

			atomic_inc(&sbi->s_flex_groups[f].used_dirs);
		}
	}
811 812 813
	if (ext4_has_group_desc_csum(sb)) {
		ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
					   EXT4_INODES_PER_GROUP(sb) / 8);
814 815 816 817 818 819 820 821 822
		gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
		ext4_unlock_group(sb, group);
	}

	BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
	err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
	if (err)
		goto fail;

A
Aneesh Kumar K.V 已提交
823 824
	BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
	err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
825 826
	if (err)
		goto fail;
827 828 829 830

	percpu_counter_dec(&sbi->s_freeinodes_counter);
	if (S_ISDIR(mode))
		percpu_counter_inc(&sbi->s_dirs_counter);
T
Theodore Ts'o 已提交
831
	ext4_mark_super_dirty(sb);
832

833 834
	if (sbi->s_log_groups_per_flex) {
		flex_group = ext4_flex_group(sbi, group);
835
		atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
836
	}
837 838 839 840 841
	if (owner) {
		inode->i_mode = mode;
		inode->i_uid = owner[0];
		inode->i_gid = owner[1];
	} else if (test_opt(sb, GRPID)) {
842 843
		inode->i_mode = mode;
		inode->i_uid = current_fsuid();
844 845
		inode->i_gid = dir->i_gid;
	} else
846
		inode_init_owner(inode, dir, mode);
847

A
Andreas Dilger 已提交
848
	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
849 850
	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
K
Kalpak Shah 已提交
851 852
	inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
						       ext4_current_time(inode);
853 854 855 856 857

	memset(ei->i_data, 0, sizeof(ei->i_data));
	ei->i_dir_start_lookup = 0;
	ei->i_disksize = 0;

858
	/* Don't inherit extent flag from directory, amongst others. */
859 860
	ei->i_flags =
		ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
861 862 863
	ei->i_file_acl = 0;
	ei->i_dtime = 0;
	ei->i_block_group = group;
864
	ei->i_last_alloc_group = ~0;
865

866
	ext4_set_inode_flags(inode);
867
	if (IS_DIRSYNC(inode))
868
		ext4_handle_sync(handle);
A
Al Viro 已提交
869
	if (insert_inode_locked(inode) < 0) {
870 871 872 873 874 875
		/*
		 * Likely a bitmap corruption causing inode to be allocated
		 * twice.
		 */
		err = -EIO;
		goto fail;
A
Al Viro 已提交
876
	}
877 878 879 880
	spin_lock(&sbi->s_next_gen_lock);
	inode->i_generation = sbi->s_next_generation++;
	spin_unlock(&sbi->s_next_gen_lock);

881 882 883 884 885 886 887 888 889 890 891 892 893
	/* Precompute checksum seed for inode metadata */
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
		__u32 csum;
		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
		__le32 inum = cpu_to_le32(inode->i_ino);
		__le32 gen = cpu_to_le32(inode->i_generation);
		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
				   sizeof(inum));
		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
					      sizeof(gen));
	}

894
	ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
895
	ext4_set_inode_state(inode, EXT4_STATE_NEW);
K
Kalpak Shah 已提交
896 897

	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
898 899

	ret = inode;
900
	dquot_initialize(inode);
901 902
	err = dquot_alloc_inode(inode);
	if (err)
903 904
		goto fail_drop;

905
	err = ext4_init_acl(handle, inode, dir);
906 907 908
	if (err)
		goto fail_free_drop;

909
	err = ext4_init_security(handle, inode, dir, qstr);
910 911 912
	if (err)
		goto fail_free_drop;

913
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
914
		/* set extent flag only for directory, file and normal symlink*/
915
		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
916
			ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
917 918
			ext4_ext_tree_init(handle, inode);
		}
A
Alex Tomas 已提交
919
	}
920

921 922 923 924 925
	if (ext4_handle_valid(handle)) {
		ei->i_sync_tid = handle->h_transaction->t_tid;
		ei->i_datasync_tid = handle->h_transaction->t_tid;
	}

926 927 928 929 930 931
	err = ext4_mark_inode_dirty(handle, inode);
	if (err) {
		ext4_std_error(sb, err);
		goto fail_free_drop;
	}

932
	ext4_debug("allocating inode %lu\n", inode->i_ino);
933
	trace_ext4_allocate_inode(inode, dir, mode);
934 935
	goto really_out;
fail:
936
	ext4_std_error(sb, err);
937 938 939 940
out:
	iput(inode);
	ret = ERR_PTR(err);
really_out:
A
Aneesh Kumar K.V 已提交
941
	brelse(inode_bitmap_bh);
942 943 944
	return ret;

fail_free_drop:
945
	dquot_free_inode(inode);
946 947

fail_drop:
948
	dquot_drop(inode);
949
	inode->i_flags |= S_NOQUOTA;
950
	clear_nlink(inode);
A
Al Viro 已提交
951
	unlock_new_inode(inode);
952
	iput(inode);
A
Aneesh Kumar K.V 已提交
953
	brelse(inode_bitmap_bh);
954 955 956 957
	return ERR_PTR(err);
}

/* Verify that we are loading a valid orphan from disk */
958
struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
959
{
960
	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
961
	ext4_group_t block_group;
962
	int bit;
963
	struct buffer_head *bitmap_bh;
964
	struct inode *inode = NULL;
965
	long err = -EIO;
966 967 968

	/* Error cases - e2fsck has already cleaned up for us */
	if (ino > max_ino) {
969
		ext4_warning(sb, "bad orphan ino %lu!  e2fsck was run?", ino);
970
		goto error;
971 972
	}

973 974
	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
975
	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
976
	if (!bitmap_bh) {
977
		ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
978
		goto error;
979 980 981 982 983 984
	}

	/* Having the inode bit set should be a 100% indicator that this
	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
	 * inodes that were being truncated, so we can't check i_nlink==0.
	 */
985 986 987 988 989 990 991
	if (!ext4_test_bit(bit, bitmap_bh->b_data))
		goto bad_orphan;

	inode = ext4_iget(sb, ino);
	if (IS_ERR(inode))
		goto iget_failed;

992 993 994 995 996 997 998 999
	/*
	 * If the orphans has i_nlinks > 0 then it should be able to be
	 * truncated, otherwise it won't be removed from the orphan list
	 * during processing and an infinite loop will result.
	 */
	if (inode->i_nlink && !ext4_can_truncate(inode))
		goto bad_orphan;

1000 1001 1002 1003 1004 1005 1006 1007 1008
	if (NEXT_ORPHAN(inode) > max_ino)
		goto bad_orphan;
	brelse(bitmap_bh);
	return inode;

iget_failed:
	err = PTR_ERR(inode);
	inode = NULL;
bad_orphan:
1009
	ext4_warning(sb, "bad orphan inode %lu!  e2fsck was run?", ino);
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
	printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
	       bit, (unsigned long long)bitmap_bh->b_blocknr,
	       ext4_test_bit(bit, bitmap_bh->b_data));
	printk(KERN_NOTICE "inode=%p\n", inode);
	if (inode) {
		printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
		       is_bad_inode(inode));
		printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
		       NEXT_ORPHAN(inode));
		printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
1020
		printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
1021
		/* Avoid freeing blocks if we got a bad deleted inode */
1022
		if (inode->i_nlink == 0)
1023 1024 1025 1026
			inode->i_blocks = 0;
		iput(inode);
	}
	brelse(bitmap_bh);
1027 1028
error:
	return ERR_PTR(err);
1029 1030
}

1031
unsigned long ext4_count_free_inodes(struct super_block *sb)
1032 1033
{
	unsigned long desc_count;
1034
	struct ext4_group_desc *gdp;
1035
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1036 1037
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
1038 1039 1040
	unsigned long bitmap_count, x;
	struct buffer_head *bitmap_bh = NULL;

1041
	es = EXT4_SB(sb)->s_es;
1042 1043 1044
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;
1045
	for (i = 0; i < ngroups; i++) {
1046
		gdp = ext4_get_group_desc(sb, i, NULL);
1047 1048
		if (!gdp)
			continue;
1049
		desc_count += ext4_free_inodes_count(sb, gdp);
1050
		brelse(bitmap_bh);
1051
		bitmap_bh = ext4_read_inode_bitmap(sb, i);
1052 1053 1054
		if (!bitmap_bh)
			continue;

1055
		x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
E
Eric Sandeen 已提交
1056
		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1057
			(unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
1058 1059 1060
		bitmap_count += x;
	}
	brelse(bitmap_bh);
1061 1062 1063
	printk(KERN_DEBUG "ext4_count_free_inodes: "
	       "stored = %u, computed = %lu, %lu\n",
	       le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1064 1065 1066
	return desc_count;
#else
	desc_count = 0;
1067
	for (i = 0; i < ngroups; i++) {
1068
		gdp = ext4_get_group_desc(sb, i, NULL);
1069 1070
		if (!gdp)
			continue;
1071
		desc_count += ext4_free_inodes_count(sb, gdp);
1072 1073 1074 1075 1076 1077 1078
		cond_resched();
	}
	return desc_count;
#endif
}

/* Called at mount-time, super-block is locked */
1079
unsigned long ext4_count_dirs(struct super_block * sb)
1080 1081
{
	unsigned long count = 0;
1082
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1083

1084
	for (i = 0; i < ngroups; i++) {
1085
		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1086 1087
		if (!gdp)
			continue;
1088
		count += ext4_used_dirs_count(sb, gdp);
1089 1090 1091
	}
	return count;
}
1092 1093 1094 1095 1096 1097 1098

/*
 * Zeroes not yet zeroed inode table - just write zeroes through the whole
 * inode table. Must be called without any spinlock held. The only place
 * where it is called from on active part of filesystem is ext4lazyinit
 * thread, so we do not need any special locks, however we have to prevent
 * inode allocation from the current group, so we take alloc_sem lock, to
1099
 * block ext4_new_inode() until we are finished.
1100
 */
1101
int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
				 int barrier)
{
	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_desc *gdp = NULL;
	struct buffer_head *group_desc_bh;
	handle_t *handle;
	ext4_fsblk_t blk;
	int num, ret = 0, used_blks = 0;

	/* This should not happen, but just to be sure check this */
	if (sb->s_flags & MS_RDONLY) {
		ret = 1;
		goto out;
	}

	gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
	if (!gdp)
		goto out;

	/*
	 * We do not need to lock this, because we are the only one
	 * handling this flag.
	 */
	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
		goto out;

	handle = ext4_journal_start_sb(sb, 1);
	if (IS_ERR(handle)) {
		ret = PTR_ERR(handle);
		goto out;
	}

	down_write(&grp->alloc_sem);
	/*
	 * If inode bitmap was already initialized there may be some
	 * used inodes so we need to skip blocks with used inodes in
	 * inode table.
	 */
	if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
		used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
			    ext4_itable_unused_count(sb, gdp)),
			    sbi->s_inodes_per_block);

1146
	if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
1147 1148 1149
		ext4_error(sb, "Something is wrong with group %u: "
			   "used itable blocks: %d; "
			   "itable unused count: %u",
1150 1151 1152
			   group, used_blks,
			   ext4_itable_unused_count(sb, gdp));
		ret = 1;
1153
		goto err_out;
1154 1155
	}

1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
	blk = ext4_inode_table(sb, gdp) + used_blks;
	num = sbi->s_itb_per_group - used_blks;

	BUFFER_TRACE(group_desc_bh, "get_write_access");
	ret = ext4_journal_get_write_access(handle,
					    group_desc_bh);
	if (ret)
		goto err_out;

	/*
	 * Skip zeroout if the inode table is full. But we set the ZEROED
	 * flag anyway, because obviously, when it is full it does not need
	 * further zeroing.
	 */
	if (unlikely(num == 0))
		goto skip_zeroout;

	ext4_debug("going to zero out inode table in group %d\n",
		   group);
1175
	ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
1176 1177
	if (ret < 0)
		goto err_out;
1178 1179
	if (barrier)
		blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197

skip_zeroout:
	ext4_lock_group(sb, group);
	gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
	gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
	ext4_unlock_group(sb, group);

	BUFFER_TRACE(group_desc_bh,
		     "call ext4_handle_dirty_metadata");
	ret = ext4_handle_dirty_metadata(handle, NULL,
					 group_desc_bh);

err_out:
	up_write(&grp->alloc_sem);
	ext4_journal_stop(handle);
out:
	return ret;
}