ialloc.c 33.6 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/ialloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  BSD ufs-inspired inode and directory allocation by
 *  Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19 20 21 22 23
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/random.h>
#include <linux/bitops.h>
24
#include <linux/blkdev.h>
25
#include <asm/byteorder.h>
26

27 28
#include "ext4.h"
#include "ext4_jbd2.h"
29 30 31
#include "xattr.h"
#include "acl.h"

32 33
#include <trace/events/ext4.h>

34 35 36 37 38 39 40 41 42 43 44 45 46 47
/*
 * ialloc.c contains the inodes allocation and deallocation routines
 */

/*
 * The free inodes are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.
 */

A
Andreas Dilger 已提交
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
/*
 * To avoid calling the atomic setbit hundreds or thousands of times, we only
 * need to use it within a single byte (to ensure we get endianness right).
 * We can use memset for the rest of the bitmap as there are no other users.
 */
void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
{
	int i;

	if (start_bit >= end_bit)
		return;

	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
		ext4_set_bit(i, bitmap);
	if (i < end_bit)
		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
}

/* Initializes an uninitialized inode bitmap */
68 69
unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
				ext4_group_t block_group,
A
Andreas Dilger 已提交
70 71 72 73 74 75 76 77 78
				struct ext4_group_desc *gdp)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);

	J_ASSERT_BH(bh, buffer_locked(bh));

	/* If checksum is bad mark all blocks and inodes use to prevent
	 * allocation, essentially implementing a per-group read-only flag. */
	if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
79
		ext4_error(sb, "Checksum bad for group %u", block_group);
80 81 82
		ext4_free_blks_set(sb, gdp, 0);
		ext4_free_inodes_set(sb, gdp, 0);
		ext4_itable_unused_set(sb, gdp, 0);
A
Andreas Dilger 已提交
83 84 85 86 87
		memset(bh->b_data, 0xff, sb->s_blocksize);
		return 0;
	}

	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
88
	mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
A
Andreas Dilger 已提交
89 90 91 92
			bh->b_data);

	return EXT4_INODES_PER_GROUP(sb);
}
93 94 95 96 97 98 99 100

/*
 * Read the inode allocation bitmap for a given block_group, reading
 * into the specified slot in the superblock's bitmap cache.
 *
 * Return buffer_head of bitmap on success or NULL.
 */
static struct buffer_head *
101
ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
102
{
103
	struct ext4_group_desc *desc;
104
	struct buffer_head *bh = NULL;
105
	ext4_fsblk_t bitmap_blk;
106

107
	desc = ext4_get_group_desc(sb, block_group, NULL);
108
	if (!desc)
109 110 111 112
		return NULL;
	bitmap_blk = ext4_inode_bitmap(sb, desc);
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
113
		ext4_error(sb, "Cannot read inode bitmap - "
114
			    "block_group = %u, inode_bitmap = %llu",
115 116 117
			    block_group, bitmap_blk);
		return NULL;
	}
118
	if (bitmap_uptodate(bh))
119 120
		return bh;

121
	lock_buffer(bh);
122 123 124 125
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
		return bh;
	}
126
	ext4_lock_group(sb, block_group);
A
Andreas Dilger 已提交
127
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
128
		ext4_init_inode_bitmap(sb, bh, block_group, desc);
129
		set_bitmap_uptodate(bh);
130
		set_buffer_uptodate(bh);
131
		ext4_unlock_group(sb, block_group);
A
Aneesh Kumar K.V 已提交
132
		unlock_buffer(bh);
133
		return bh;
A
Andreas Dilger 已提交
134
	}
135
	ext4_unlock_group(sb, block_group);
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
		return bh;
	}
	/*
	 * submit the buffer_head for read. We can
	 * safely mark the bitmap as uptodate now.
	 * We do it here so the bitmap uptodate bit
	 * get set with buffer lock held.
	 */
	set_bitmap_uptodate(bh);
152 153
	if (bh_submit_read(bh) < 0) {
		put_bh(bh);
154
		ext4_error(sb, "Cannot read inode bitmap - "
155
			    "block_group = %u, inode_bitmap = %llu",
156 157 158
			    block_group, bitmap_blk);
		return NULL;
	}
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
	return bh;
}

/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
178
void ext4_free_inode(handle_t *handle, struct inode *inode)
179
{
180
	struct super_block *sb = inode->i_sb;
181 182 183 184
	int is_directory;
	unsigned long ino;
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *bh2;
185
	ext4_group_t block_group;
186
	unsigned long bit;
187 188
	struct ext4_group_desc *gdp;
	struct ext4_super_block *es;
189
	struct ext4_sb_info *sbi;
190
	int fatal = 0, err, count, cleared;
191 192

	if (atomic_read(&inode->i_count) > 1) {
193 194
		printk(KERN_ERR "ext4_free_inode: inode has count=%d\n",
		       atomic_read(&inode->i_count));
195 196 197
		return;
	}
	if (inode->i_nlink) {
198 199
		printk(KERN_ERR "ext4_free_inode: inode has nlink=%d\n",
		       inode->i_nlink);
200 201 202
		return;
	}
	if (!sb) {
203 204
		printk(KERN_ERR "ext4_free_inode: inode on "
		       "nonexistent device\n");
205 206
		return;
	}
207
	sbi = EXT4_SB(sb);
208 209

	ino = inode->i_ino;
210
	ext4_debug("freeing inode %lu\n", ino);
211
	trace_ext4_free_inode(inode);
212 213 214 215 216

	/*
	 * Note: we must free any quota before locking the superblock,
	 * as writing the quota to disk may need the lock as well.
	 */
217
	dquot_initialize(inode);
218
	ext4_xattr_delete_inode(handle, inode);
219
	dquot_free_inode(inode);
220
	dquot_drop(inode);
221 222 223 224

	is_directory = S_ISDIR(inode->i_mode);

	/* Do this BEFORE marking the inode not in use or returning an error */
225
	clear_inode(inode);
226

227 228
	es = EXT4_SB(sb)->s_es;
	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
229
		ext4_error(sb, "reserved or nonexistent inode %lu", ino);
230 231
		goto error_return;
	}
232 233
	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
234
	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
235 236 237 238
	if (!bitmap_bh)
		goto error_return;

	BUFFER_TRACE(bitmap_bh, "get_write_access");
239
	fatal = ext4_journal_get_write_access(handle, bitmap_bh);
240 241 242
	if (fatal)
		goto error_return;

243 244 245
	fatal = -ESRCH;
	gdp = ext4_get_group_desc(sb, block_group, &bh2);
	if (gdp) {
246
		BUFFER_TRACE(bh2, "get_write_access");
247
		fatal = ext4_journal_get_write_access(handle, bh2);
248 249 250 251 252 253 254
	}
	ext4_lock_group(sb, block_group);
	cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
	if (fatal || !cleared) {
		ext4_unlock_group(sb, block_group);
		goto out;
	}
255

256 257 258 259 260 261
	count = ext4_free_inodes_count(sb, gdp) + 1;
	ext4_free_inodes_set(sb, gdp, count);
	if (is_directory) {
		count = ext4_used_dirs_count(sb, gdp) - 1;
		ext4_used_dirs_set(sb, gdp, count);
		percpu_counter_dec(&sbi->s_dirs_counter);
262
	}
263 264
	gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
	ext4_unlock_group(sb, block_group);
265

266 267 268
	percpu_counter_inc(&sbi->s_freeinodes_counter);
	if (sbi->s_log_groups_per_flex) {
		ext4_group_t f = ext4_flex_group(sbi, block_group);
269

270 271 272
		atomic_inc(&sbi->s_flex_groups[f].free_inodes);
		if (is_directory)
			atomic_dec(&sbi->s_flex_groups[f].used_dirs);
273
	}
274 275 276 277 278 279 280 281 282 283 284 285
	BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
	fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
out:
	if (cleared) {
		BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
		if (!fatal)
			fatal = err;
		sb->s_dirt = 1;
	} else
		ext4_error(sb, "bit already cleared for inode %lu", ino);

286 287
error_return:
	brelse(bitmap_bh);
288
	ext4_std_error(sb, fatal);
289 290 291 292 293 294 295 296 297 298 299 300
}

/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory\'s block
 * group to find a free inode.
 */
301 302
static int find_group_dir(struct super_block *sb, struct inode *parent,
				ext4_group_t *best_group)
303
{
304
	ext4_group_t ngroups = ext4_get_groups_count(sb);
305
	unsigned int freei, avefreei;
306
	struct ext4_group_desc *desc, *best_desc = NULL;
307 308
	ext4_group_t group;
	int ret = -1;
309

310
	freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter);
311 312 313
	avefreei = freei / ngroups;

	for (group = 0; group < ngroups; group++) {
314
		desc = ext4_get_group_desc(sb, group, NULL);
315
		if (!desc || !ext4_free_inodes_count(sb, desc))
316
			continue;
317
		if (ext4_free_inodes_count(sb, desc) < avefreei)
318 319
			continue;
		if (!best_desc ||
320 321
		    (ext4_free_blks_count(sb, desc) >
		     ext4_free_blks_count(sb, best_desc))) {
322
			*best_group = group;
323
			best_desc = desc;
324
			ret = 0;
325 326
		}
	}
327
	return ret;
328 329
}

330 331 332 333 334 335 336 337 338 339
#define free_block_ratio 10

static int find_group_flex(struct super_block *sb, struct inode *parent,
			   ext4_group_t *best_group)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_desc *desc;
	struct flex_groups *flex_group = sbi->s_flex_groups;
	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
	ext4_group_t parent_fbg_group = ext4_flex_group(sbi, parent_group);
340
	ext4_group_t ngroups = ext4_get_groups_count(sb);
341 342 343 344 345 346 347 348
	int flex_size = ext4_flex_bg_size(sbi);
	ext4_group_t best_flex = parent_fbg_group;
	int blocks_per_flex = sbi->s_blocks_per_group * flex_size;
	int flexbg_free_blocks;
	int flex_freeb_ratio;
	ext4_group_t n_fbg_groups;
	ext4_group_t i;

349
	n_fbg_groups = (ngroups + flex_size - 1) >>
350 351 352
		sbi->s_log_groups_per_flex;

find_close_to_parent:
353
	flexbg_free_blocks = atomic_read(&flex_group[best_flex].free_blocks);
354
	flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
355
	if (atomic_read(&flex_group[best_flex].free_inodes) &&
356 357 358 359 360 361 362 363 364 365 366 367
	    flex_freeb_ratio > free_block_ratio)
		goto found_flexbg;

	if (best_flex && best_flex == parent_fbg_group) {
		best_flex--;
		goto find_close_to_parent;
	}

	for (i = 0; i < n_fbg_groups; i++) {
		if (i == parent_fbg_group || i == parent_fbg_group - 1)
			continue;

368
		flexbg_free_blocks = atomic_read(&flex_group[i].free_blocks);
369 370 371
		flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;

		if (flex_freeb_ratio > free_block_ratio &&
372
		    (atomic_read(&flex_group[i].free_inodes))) {
373 374 375 376
			best_flex = i;
			goto found_flexbg;
		}

377 378 379 380
		if ((atomic_read(&flex_group[best_flex].free_inodes) == 0) ||
		    ((atomic_read(&flex_group[i].free_blocks) >
		      atomic_read(&flex_group[best_flex].free_blocks)) &&
		     atomic_read(&flex_group[i].free_inodes)))
381 382 383
			best_flex = i;
	}

384 385
	if (!atomic_read(&flex_group[best_flex].free_inodes) ||
	    !atomic_read(&flex_group[best_flex].free_blocks))
386 387 388 389 390
		return -1;

found_flexbg:
	for (i = best_flex * flex_size; i < ngroups &&
		     i < (best_flex + 1) * flex_size; i++) {
391
		desc = ext4_get_group_desc(sb, i, NULL);
392
		if (ext4_free_inodes_count(sb, desc)) {
393 394 395 396 397 398 399 400 401 402
			*best_group = i;
			goto out;
		}
	}

	return -1;
out:
	return 0;
}

403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
struct orlov_stats {
	__u32 free_inodes;
	__u32 free_blocks;
	__u32 used_dirs;
};

/*
 * Helper function for Orlov's allocator; returns critical information
 * for a particular block group or flex_bg.  If flex_size is 1, then g
 * is a block group number; otherwise it is flex_bg number.
 */
void get_orlov_stats(struct super_block *sb, ext4_group_t g,
		       int flex_size, struct orlov_stats *stats)
{
	struct ext4_group_desc *desc;
418
	struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
419

420 421 422 423 424 425
	if (flex_size > 1) {
		stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
		stats->free_blocks = atomic_read(&flex_group[g].free_blocks);
		stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
		return;
	}
426

427 428 429 430 431 432 433 434 435
	desc = ext4_get_group_desc(sb, g, NULL);
	if (desc) {
		stats->free_inodes = ext4_free_inodes_count(sb, desc);
		stats->free_blocks = ext4_free_blks_count(sb, desc);
		stats->used_dirs = ext4_used_dirs_count(sb, desc);
	} else {
		stats->free_inodes = 0;
		stats->free_blocks = 0;
		stats->used_dirs = 0;
436 437 438
	}
}

439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
/*
 * Orlov's allocator for directories.
 *
 * We always try to spread first-level directories.
 *
 * If there are blockgroups with both free inodes and free blocks counts
 * not worse than average we return one with smallest directory count.
 * Otherwise we simply return a random group.
 *
 * For the rest rules look so:
 *
 * It's OK to put directory into a group unless
 * it has too many directories already (max_dirs) or
 * it has too few free inodes left (min_inodes) or
 * it has too few free blocks left (min_blocks) or
454
 * Parent's group is preferred, if it doesn't satisfy these
455 456 457 458 459
 * conditions we search cyclically through the rest. If none
 * of the groups look good we just look for a group with more
 * free inodes than average (starting at parent's group).
 */

460
static int find_group_orlov(struct super_block *sb, struct inode *parent,
461 462
			    ext4_group_t *group, int mode,
			    const struct qstr *qstr)
463
{
464
	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
465
	struct ext4_sb_info *sbi = EXT4_SB(sb);
466
	ext4_group_t real_ngroups = ext4_get_groups_count(sb);
467
	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
468
	unsigned int freei, avefreei;
469
	ext4_fsblk_t freeb, avefreeb;
470
	unsigned int ndirs;
471
	int max_dirs, min_inodes;
472
	ext4_grpblk_t min_blocks;
473
	ext4_group_t i, grp, g, ngroups;
474
	struct ext4_group_desc *desc;
475 476
	struct orlov_stats stats;
	int flex_size = ext4_flex_bg_size(sbi);
477
	struct dx_hash_info hinfo;
478

479
	ngroups = real_ngroups;
480
	if (flex_size > 1) {
481
		ngroups = (real_ngroups + flex_size - 1) >>
482 483 484
			sbi->s_log_groups_per_flex;
		parent_group >>= sbi->s_log_groups_per_flex;
	}
485 486 487 488

	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
	avefreei = freei / ngroups;
	freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
489
	avefreeb = freeb;
A
Andrew Morton 已提交
490
	do_div(avefreeb, ngroups);
491 492
	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);

493 494
	if (S_ISDIR(mode) &&
	    ((parent == sb->s_root->d_inode) ||
495
	     (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
496
		int best_ndir = inodes_per_group;
497
		int ret = -1;
498

499 500 501 502 503 504 505
		if (qstr) {
			hinfo.hash_version = DX_HASH_HALF_MD4;
			hinfo.seed = sbi->s_hash_seed;
			ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
			grp = hinfo.hash;
		} else
			get_random_bytes(&grp, sizeof(grp));
506
		parent_group = (unsigned)grp % ngroups;
507
		for (i = 0; i < ngroups; i++) {
508 509 510
			g = (parent_group + i) % ngroups;
			get_orlov_stats(sb, g, flex_size, &stats);
			if (!stats.free_inodes)
511
				continue;
512
			if (stats.used_dirs >= best_ndir)
513
				continue;
514
			if (stats.free_inodes < avefreei)
515
				continue;
516
			if (stats.free_blocks < avefreeb)
517
				continue;
518
			grp = g;
519
			ret = 0;
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
			best_ndir = stats.used_dirs;
		}
		if (ret)
			goto fallback;
	found_flex_bg:
		if (flex_size == 1) {
			*group = grp;
			return 0;
		}

		/*
		 * We pack inodes at the beginning of the flexgroup's
		 * inode tables.  Block allocation decisions will do
		 * something similar, although regular files will
		 * start at 2nd block group of the flexgroup.  See
		 * ext4_ext_find_goal() and ext4_find_near().
		 */
		grp *= flex_size;
		for (i = 0; i < flex_size; i++) {
539
			if (grp+i >= real_ngroups)
540 541 542 543 544 545
				break;
			desc = ext4_get_group_desc(sb, grp+i, NULL);
			if (desc && ext4_free_inodes_count(sb, desc)) {
				*group = grp+i;
				return 0;
			}
546 547 548 549 550
		}
		goto fallback;
	}

	max_dirs = ndirs / ngroups + inodes_per_group / 16;
551 552 553 554 555 556 557 558 559 560 561 562 563 564
	min_inodes = avefreei - inodes_per_group*flex_size / 4;
	if (min_inodes < 1)
		min_inodes = 1;
	min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb)*flex_size / 4;

	/*
	 * Start looking in the flex group where we last allocated an
	 * inode for this parent directory
	 */
	if (EXT4_I(parent)->i_last_alloc_group != ~0) {
		parent_group = EXT4_I(parent)->i_last_alloc_group;
		if (flex_size > 1)
			parent_group >>= sbi->s_log_groups_per_flex;
	}
565 566

	for (i = 0; i < ngroups; i++) {
567 568 569
		grp = (parent_group + i) % ngroups;
		get_orlov_stats(sb, grp, flex_size, &stats);
		if (stats.used_dirs >= max_dirs)
570
			continue;
571
		if (stats.free_inodes < min_inodes)
572
			continue;
573
		if (stats.free_blocks < min_blocks)
574
			continue;
575
		goto found_flex_bg;
576 577 578
	}

fallback:
579
	ngroups = real_ngroups;
580
	avefreei = freei / ngroups;
581
fallback_retry:
582
	parent_group = EXT4_I(parent)->i_block_group;
583
	for (i = 0; i < ngroups; i++) {
584 585
		grp = (parent_group + i) % ngroups;
		desc = ext4_get_group_desc(sb, grp, NULL);
586
		if (desc && ext4_free_inodes_count(sb, desc) &&
587 588
		    ext4_free_inodes_count(sb, desc) >= avefreei) {
			*group = grp;
589
			return 0;
590
		}
591 592 593 594 595 596 597 598
	}

	if (avefreei) {
		/*
		 * The free-inodes counter is approximate, and for really small
		 * filesystems the above test can fail to find any blockgroups
		 */
		avefreei = 0;
599
		goto fallback_retry;
600 601 602 603 604
	}

	return -1;
}

605
static int find_group_other(struct super_block *sb, struct inode *parent,
606
			    ext4_group_t *group, int mode)
607
{
608
	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
609
	ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
610
	struct ext4_group_desc *desc;
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
	int flex_size = ext4_flex_bg_size(EXT4_SB(sb));

	/*
	 * Try to place the inode is the same flex group as its
	 * parent.  If we can't find space, use the Orlov algorithm to
	 * find another flex group, and store that information in the
	 * parent directory's inode information so that use that flex
	 * group for future allocations.
	 */
	if (flex_size > 1) {
		int retry = 0;

	try_again:
		parent_group &= ~(flex_size-1);
		last = parent_group + flex_size;
		if (last > ngroups)
			last = ngroups;
		for  (i = parent_group; i < last; i++) {
			desc = ext4_get_group_desc(sb, i, NULL);
			if (desc && ext4_free_inodes_count(sb, desc)) {
				*group = i;
				return 0;
			}
		}
		if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
			retry = 1;
			parent_group = EXT4_I(parent)->i_last_alloc_group;
			goto try_again;
		}
		/*
		 * If this didn't work, use the Orlov search algorithm
		 * to find a new flex group; we pass in the mode to
		 * avoid the topdir algorithms.
		 */
		*group = parent_group + flex_size;
		if (*group > ngroups)
			*group = 0;
648
		return find_group_orlov(sb, parent, group, mode, 0);
649
	}
650 651 652 653

	/*
	 * Try to place the inode in its parent directory
	 */
654 655
	*group = parent_group;
	desc = ext4_get_group_desc(sb, *group, NULL);
656 657
	if (desc && ext4_free_inodes_count(sb, desc) &&
			ext4_free_blks_count(sb, desc))
658
		return 0;
659 660 661 662 663 664 665 666 667 668

	/*
	 * We're going to place this inode in a different blockgroup from its
	 * parent.  We want to cause files in a common directory to all land in
	 * the same blockgroup.  But we want files which are in a different
	 * directory which shares a blockgroup with our parent to land in a
	 * different blockgroup.
	 *
	 * So add our directory's i_ino into the starting point for the hash.
	 */
669
	*group = (*group + parent->i_ino) % ngroups;
670 671 672 673 674 675

	/*
	 * Use a quadratic hash to find a group with a free inode and some free
	 * blocks.
	 */
	for (i = 1; i < ngroups; i <<= 1) {
676 677 678 679
		*group += i;
		if (*group >= ngroups)
			*group -= ngroups;
		desc = ext4_get_group_desc(sb, *group, NULL);
680 681
		if (desc && ext4_free_inodes_count(sb, desc) &&
				ext4_free_blks_count(sb, desc))
682
			return 0;
683 684 685 686 687 688
	}

	/*
	 * That failed: try linear search for a free inode, even if that group
	 * has no free blocks.
	 */
689
	*group = parent_group;
690
	for (i = 0; i < ngroups; i++) {
691 692 693
		if (++*group >= ngroups)
			*group = 0;
		desc = ext4_get_group_desc(sb, *group, NULL);
694
		if (desc && ext4_free_inodes_count(sb, desc))
695
			return 0;
696 697 698 699 700
	}

	return -1;
}

701 702
/*
 * claim the inode from the inode bitmap. If the group
703
 * is uninit we need to take the groups's ext4_group_lock
704 705
 * and clear the uninit flag. The inode bitmap update
 * and group desc uninit flag clear should be done
706
 * after holding ext4_group_lock so that ext4_read_inode_bitmap
707 708 709 710 711 712 713 714 715 716
 * doesn't race with the ext4_claim_inode
 */
static int ext4_claim_inode(struct super_block *sb,
			struct buffer_head *inode_bitmap_bh,
			unsigned long ino, ext4_group_t group, int mode)
{
	int free = 0, retval = 0, count;
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);

717
	ext4_lock_group(sb, group);
718 719 720 721 722 723 724 725
	if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
		/* not a free inode */
		retval = 1;
		goto err_ret;
	}
	ino++;
	if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
			ino > EXT4_INODES_PER_GROUP(sb)) {
726
		ext4_unlock_group(sb, group);
727
		ext4_error(sb, "reserved inode or inode > inodes count - "
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
			   "block_group = %u, inode=%lu", group,
			   ino + group * EXT4_INODES_PER_GROUP(sb));
		return 1;
	}
	/* If we didn't allocate from within the initialized part of the inode
	 * table then we need to initialize up to this inode. */
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {

		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
			/* When marking the block group with
			 * ~EXT4_BG_INODE_UNINIT we don't want to depend
			 * on the value of bg_itable_unused even though
			 * mke2fs could have initialized the same for us.
			 * Instead we calculated the value below
			 */

			free = 0;
		} else {
			free = EXT4_INODES_PER_GROUP(sb) -
				ext4_itable_unused_count(sb, gdp);
		}

		/*
		 * Check the relative inode number against the last used
		 * relative inode number in this group. if it is greater
		 * we need to  update the bg_itable_unused count
		 *
		 */
		if (ino > free)
			ext4_itable_unused_set(sb, gdp,
					(EXT4_INODES_PER_GROUP(sb) - ino));
	}
	count = ext4_free_inodes_count(sb, gdp) - 1;
	ext4_free_inodes_set(sb, gdp, count);
	if (S_ISDIR(mode)) {
		count = ext4_used_dirs_count(sb, gdp) + 1;
		ext4_used_dirs_set(sb, gdp, count);
766 767 768
		if (sbi->s_log_groups_per_flex) {
			ext4_group_t f = ext4_flex_group(sbi, group);

769
			atomic_inc(&sbi->s_flex_groups[f].used_dirs);
770
		}
771 772 773
	}
	gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
err_ret:
774
	ext4_unlock_group(sb, group);
775 776 777
	return retval;
}

778 779 780 781 782 783 784 785 786 787
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
788
struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode,
789
			     const struct qstr *qstr, __u32 goal)
790 791
{
	struct super_block *sb;
A
Aneesh Kumar K.V 已提交
792 793
	struct buffer_head *inode_bitmap_bh = NULL;
	struct buffer_head *group_desc_bh;
794
	ext4_group_t ngroups, group = 0;
795
	unsigned long ino = 0;
796 797
	struct inode *inode;
	struct ext4_group_desc *gdp = NULL;
798 799
	struct ext4_inode_info *ei;
	struct ext4_sb_info *sbi;
800
	int ret2, err = 0;
801
	struct inode *ret;
802 803
	ext4_group_t i;
	int free = 0;
804
	static int once = 1;
805
	ext4_group_t flex_group;
806 807 808 809 810 811

	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink)
		return ERR_PTR(-EPERM);

	sb = dir->i_sb;
812
	ngroups = ext4_get_groups_count(sb);
813
	trace_ext4_request_inode(dir, mode);
814 815 816
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);
817 818
	ei = EXT4_I(inode);
	sbi = EXT4_SB(sb);
819

820 821 822
	if (!goal)
		goal = sbi->s_inode_goal;

823
	if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
824 825 826 827 828 829
		group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
		ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
		ret2 = 0;
		goto got_group;
	}

830
	if (sbi->s_log_groups_per_flex && test_opt(sb, OLDALLOC)) {
831
		ret2 = find_group_flex(sb, dir, &group);
832
		if (ret2 == -1) {
833
			ret2 = find_group_other(sb, dir, &group, mode);
834
			if (ret2 == 0 && once) {
835
				once = 0;
836 837 838
				printk(KERN_NOTICE "ext4: find_group_flex "
				       "failed, fallback succeeded dir %lu\n",
				       dir->i_ino);
839
			}
840
		}
841 842 843
		goto got_group;
	}

844
	if (S_ISDIR(mode)) {
845
		if (test_opt(sb, OLDALLOC))
846
			ret2 = find_group_dir(sb, dir, &group);
847
		else
848
			ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
849
	} else
850
		ret2 = find_group_other(sb, dir, &group, mode);
851

852
got_group:
853
	EXT4_I(dir)->i_last_alloc_group = group;
854
	err = -ENOSPC;
855
	if (ret2 == -1)
856 857
		goto out;

858
	for (i = 0; i < ngroups; i++, ino = 0) {
859 860
		err = -EIO;

A
Aneesh Kumar K.V 已提交
861
		gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
862 863 864
		if (!gdp)
			goto fail;

A
Aneesh Kumar K.V 已提交
865 866 867
		brelse(inode_bitmap_bh);
		inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
		if (!inode_bitmap_bh)
868 869 870
			goto fail;

repeat_in_this_group:
871
		ino = ext4_find_next_zero_bit((unsigned long *)
A
Aneesh Kumar K.V 已提交
872 873 874
					      inode_bitmap_bh->b_data,
					      EXT4_INODES_PER_GROUP(sb), ino);

875
		if (ino < EXT4_INODES_PER_GROUP(sb)) {
876

A
Aneesh Kumar K.V 已提交
877 878 879
			BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
			err = ext4_journal_get_write_access(handle,
							    inode_bitmap_bh);
880 881 882
			if (err)
				goto fail;

883 884 885 886 887 888 889
			BUFFER_TRACE(group_desc_bh, "get_write_access");
			err = ext4_journal_get_write_access(handle,
								group_desc_bh);
			if (err)
				goto fail;
			if (!ext4_claim_inode(sb, inode_bitmap_bh,
						ino, group, mode)) {
890
				/* we won it */
A
Aneesh Kumar K.V 已提交
891
				BUFFER_TRACE(inode_bitmap_bh,
892 893
					"call ext4_handle_dirty_metadata");
				err = ext4_handle_dirty_metadata(handle,
894
								 NULL,
A
Aneesh Kumar K.V 已提交
895
							inode_bitmap_bh);
896 897
				if (err)
					goto fail;
898 899
				/* zero bit is inode number 1*/
				ino++;
900 901 902
				goto got;
			}
			/* we lost it */
A
Aneesh Kumar K.V 已提交
903
			ext4_handle_release_buffer(handle, inode_bitmap_bh);
904
			ext4_handle_release_buffer(handle, group_desc_bh);
905

906
			if (++ino < EXT4_INODES_PER_GROUP(sb))
907 908 909 910 911 912 913 914 915 916
				goto repeat_in_this_group;
		}

		/*
		 * This case is possible in concurrent environment.  It is very
		 * rare.  We cannot repeat the find_group_xxx() call because
		 * that will simply return the same blockgroup, because the
		 * group descriptor metadata has not yet been updated.
		 * So we just go onto the next blockgroup.
		 */
917
		if (++group == ngroups)
918 919 920 921 922 923
			group = 0;
	}
	err = -ENOSPC;
	goto out;

got:
A
Andreas Dilger 已提交
924 925 926
	/* We may have to initialize the block bitmap if it isn't already */
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
A
Aneesh Kumar K.V 已提交
927
		struct buffer_head *block_bitmap_bh;
A
Andreas Dilger 已提交
928

A
Aneesh Kumar K.V 已提交
929 930 931
		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
		BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
		err = ext4_journal_get_write_access(handle, block_bitmap_bh);
A
Andreas Dilger 已提交
932
		if (err) {
A
Aneesh Kumar K.V 已提交
933
			brelse(block_bitmap_bh);
A
Andreas Dilger 已提交
934 935 936 937
			goto fail;
		}

		free = 0;
938
		ext4_lock_group(sb, group);
A
Andreas Dilger 已提交
939 940 941
		/* recheck and clear flag under lock if we still need to */
		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
			free = ext4_free_blocks_after_init(sb, group, gdp);
A
Aneesh Kumar K.V 已提交
942
			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
943
			ext4_free_blks_set(sb, gdp, free);
944 945
			gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
								gdp);
A
Andreas Dilger 已提交
946
		}
947
		ext4_unlock_group(sb, group);
A
Andreas Dilger 已提交
948 949 950

		/* Don't need to dirty bitmap block if we didn't change it */
		if (free) {
A
Aneesh Kumar K.V 已提交
951
			BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
952
			err = ext4_handle_dirty_metadata(handle,
A
Aneesh Kumar K.V 已提交
953
							NULL, block_bitmap_bh);
A
Andreas Dilger 已提交
954 955
		}

A
Aneesh Kumar K.V 已提交
956
		brelse(block_bitmap_bh);
A
Andreas Dilger 已提交
957 958 959
		if (err)
			goto fail;
	}
A
Aneesh Kumar K.V 已提交
960 961
	BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
	err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
962 963
	if (err)
		goto fail;
964 965 966 967 968 969

	percpu_counter_dec(&sbi->s_freeinodes_counter);
	if (S_ISDIR(mode))
		percpu_counter_inc(&sbi->s_dirs_counter);
	sb->s_dirt = 1;

970 971
	if (sbi->s_log_groups_per_flex) {
		flex_group = ext4_flex_group(sbi, group);
972
		atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
973 974
	}

975 976 977
	if (test_opt(sb, GRPID)) {
		inode->i_mode = mode;
		inode->i_uid = current_fsuid();
978 979
		inode->i_gid = dir->i_gid;
	} else
980
		inode_init_owner(inode, dir, mode);
981

A
Andreas Dilger 已提交
982
	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
983 984
	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
K
Kalpak Shah 已提交
985 986
	inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
						       ext4_current_time(inode);
987 988 989 990 991

	memset(ei->i_data, 0, sizeof(ei->i_data));
	ei->i_dir_start_lookup = 0;
	ei->i_disksize = 0;

992
	/*
993 994 995
	 * Don't inherit extent flag from directory, amongst others. We set
	 * extent flag on newly created directory and file only if -o extent
	 * mount option is specified
996
	 */
997 998
	ei->i_flags =
		ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
999 1000 1001
	ei->i_file_acl = 0;
	ei->i_dtime = 0;
	ei->i_block_group = group;
1002
	ei->i_last_alloc_group = ~0;
1003

1004
	ext4_set_inode_flags(inode);
1005
	if (IS_DIRSYNC(inode))
1006
		ext4_handle_sync(handle);
A
Al Viro 已提交
1007 1008 1009 1010
	if (insert_inode_locked(inode) < 0) {
		err = -EINVAL;
		goto fail_drop;
	}
1011 1012 1013 1014
	spin_lock(&sbi->s_next_gen_lock);
	inode->i_generation = sbi->s_next_generation++;
	spin_unlock(&sbi->s_next_gen_lock);

1015 1016
	ei->i_state_flags = 0;
	ext4_set_inode_state(inode, EXT4_STATE_NEW);
K
Kalpak Shah 已提交
1017 1018

	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
1019 1020

	ret = inode;
1021
	dquot_initialize(inode);
1022 1023
	err = dquot_alloc_inode(inode);
	if (err)
1024 1025
		goto fail_drop;

1026
	err = ext4_init_acl(handle, inode, dir);
1027 1028 1029
	if (err)
		goto fail_free_drop;

1030
	err = ext4_init_security(handle, inode, dir);
1031 1032 1033
	if (err)
		goto fail_free_drop;

1034
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
1035
		/* set extent flag only for directory, file and normal symlink*/
1036
		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
1037
			ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
1038 1039
			ext4_ext_tree_init(handle, inode);
		}
A
Alex Tomas 已提交
1040
	}
1041

1042 1043 1044 1045 1046 1047
	err = ext4_mark_inode_dirty(handle, inode);
	if (err) {
		ext4_std_error(sb, err);
		goto fail_free_drop;
	}

1048
	ext4_debug("allocating inode %lu\n", inode->i_ino);
1049
	trace_ext4_allocate_inode(inode, dir, mode);
1050 1051
	goto really_out;
fail:
1052
	ext4_std_error(sb, err);
1053 1054 1055 1056
out:
	iput(inode);
	ret = ERR_PTR(err);
really_out:
A
Aneesh Kumar K.V 已提交
1057
	brelse(inode_bitmap_bh);
1058 1059 1060
	return ret;

fail_free_drop:
1061
	dquot_free_inode(inode);
1062 1063

fail_drop:
1064
	dquot_drop(inode);
1065 1066
	inode->i_flags |= S_NOQUOTA;
	inode->i_nlink = 0;
A
Al Viro 已提交
1067
	unlock_new_inode(inode);
1068
	iput(inode);
A
Aneesh Kumar K.V 已提交
1069
	brelse(inode_bitmap_bh);
1070 1071 1072 1073
	return ERR_PTR(err);
}

/* Verify that we are loading a valid orphan from disk */
1074
struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
1075
{
1076
	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
1077
	ext4_group_t block_group;
1078
	int bit;
1079
	struct buffer_head *bitmap_bh;
1080
	struct inode *inode = NULL;
1081
	long err = -EIO;
1082 1083 1084

	/* Error cases - e2fsck has already cleaned up for us */
	if (ino > max_ino) {
1085
		ext4_warning(sb, "bad orphan ino %lu!  e2fsck was run?", ino);
1086
		goto error;
1087 1088
	}

1089 1090
	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
1091
	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
1092
	if (!bitmap_bh) {
1093
		ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
1094
		goto error;
1095 1096 1097 1098 1099 1100
	}

	/* Having the inode bit set should be a 100% indicator that this
	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
	 * inodes that were being truncated, so we can't check i_nlink==0.
	 */
1101 1102 1103 1104 1105 1106 1107
	if (!ext4_test_bit(bit, bitmap_bh->b_data))
		goto bad_orphan;

	inode = ext4_iget(sb, ino);
	if (IS_ERR(inode))
		goto iget_failed;

1108 1109 1110 1111 1112 1113 1114 1115
	/*
	 * If the orphans has i_nlinks > 0 then it should be able to be
	 * truncated, otherwise it won't be removed from the orphan list
	 * during processing and an infinite loop will result.
	 */
	if (inode->i_nlink && !ext4_can_truncate(inode))
		goto bad_orphan;

1116 1117 1118 1119 1120 1121 1122 1123 1124
	if (NEXT_ORPHAN(inode) > max_ino)
		goto bad_orphan;
	brelse(bitmap_bh);
	return inode;

iget_failed:
	err = PTR_ERR(inode);
	inode = NULL;
bad_orphan:
1125
	ext4_warning(sb, "bad orphan inode %lu!  e2fsck was run?", ino);
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
	printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
	       bit, (unsigned long long)bitmap_bh->b_blocknr,
	       ext4_test_bit(bit, bitmap_bh->b_data));
	printk(KERN_NOTICE "inode=%p\n", inode);
	if (inode) {
		printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
		       is_bad_inode(inode));
		printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
		       NEXT_ORPHAN(inode));
		printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
1136
		printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
1137
		/* Avoid freeing blocks if we got a bad deleted inode */
1138
		if (inode->i_nlink == 0)
1139 1140 1141 1142
			inode->i_blocks = 0;
		iput(inode);
	}
	brelse(bitmap_bh);
1143 1144
error:
	return ERR_PTR(err);
1145 1146
}

1147
unsigned long ext4_count_free_inodes(struct super_block *sb)
1148 1149
{
	unsigned long desc_count;
1150
	struct ext4_group_desc *gdp;
1151
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1152 1153
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
1154 1155 1156
	unsigned long bitmap_count, x;
	struct buffer_head *bitmap_bh = NULL;

1157
	es = EXT4_SB(sb)->s_es;
1158 1159 1160
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;
1161
	for (i = 0; i < ngroups; i++) {
1162
		gdp = ext4_get_group_desc(sb, i, NULL);
1163 1164
		if (!gdp)
			continue;
1165
		desc_count += ext4_free_inodes_count(sb, gdp);
1166
		brelse(bitmap_bh);
1167
		bitmap_bh = ext4_read_inode_bitmap(sb, i);
1168 1169 1170
		if (!bitmap_bh)
			continue;

1171
		x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
E
Eric Sandeen 已提交
1172
		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1173
			(unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
1174 1175 1176
		bitmap_count += x;
	}
	brelse(bitmap_bh);
1177 1178 1179
	printk(KERN_DEBUG "ext4_count_free_inodes: "
	       "stored = %u, computed = %lu, %lu\n",
	       le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1180 1181 1182
	return desc_count;
#else
	desc_count = 0;
1183
	for (i = 0; i < ngroups; i++) {
1184
		gdp = ext4_get_group_desc(sb, i, NULL);
1185 1186
		if (!gdp)
			continue;
1187
		desc_count += ext4_free_inodes_count(sb, gdp);
1188 1189 1190 1191 1192 1193 1194
		cond_resched();
	}
	return desc_count;
#endif
}

/* Called at mount-time, super-block is locked */
1195
unsigned long ext4_count_dirs(struct super_block * sb)
1196 1197
{
	unsigned long count = 0;
1198
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1199

1200
	for (i = 0; i < ngroups; i++) {
1201
		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1202 1203
		if (!gdp)
			continue;
1204
		count += ext4_used_dirs_count(sb, gdp);
1205 1206 1207
	}
	return count;
}