ialloc.c 36.2 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/ialloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  BSD ufs-inspired inode and directory allocation by
 *  Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19 20 21 22 23
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/random.h>
#include <linux/bitops.h>
24
#include <linux/blkdev.h>
25
#include <asm/byteorder.h>
26

27 28
#include "ext4.h"
#include "ext4_jbd2.h"
29 30 31
#include "xattr.h"
#include "acl.h"

32 33
#include <trace/events/ext4.h>

34 35 36 37 38 39 40 41 42 43 44 45 46 47
/*
 * ialloc.c contains the inodes allocation and deallocation routines
 */

/*
 * The free inodes are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.
 */

A
Andreas Dilger 已提交
48 49 50 51 52
/*
 * To avoid calling the atomic setbit hundreds or thousands of times, we only
 * need to use it within a single byte (to ensure we get endianness right).
 * We can use memset for the rest of the bitmap as there are no other users.
 */
53
void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
A
Andreas Dilger 已提交
54 55 56 57 58 59 60 61 62 63 64 65 66 67
{
	int i;

	if (start_bit >= end_bit)
		return;

	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
		ext4_set_bit(i, bitmap);
	if (i < end_bit)
		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
}

/* Initializes an uninitialized inode bitmap */
68 69 70 71
static unsigned ext4_init_inode_bitmap(struct super_block *sb,
				       struct buffer_head *bh,
				       ext4_group_t block_group,
				       struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
72 73 74 75 76
{
	J_ASSERT_BH(bh, buffer_locked(bh));

	/* If checksum is bad mark all blocks and inodes use to prevent
	 * allocation, essentially implementing a per-group read-only flag. */
77
	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
78
		ext4_error(sb, "Checksum bad for group %u", block_group);
79
		ext4_free_group_clusters_set(sb, gdp, 0);
80 81
		ext4_free_inodes_set(sb, gdp, 0);
		ext4_itable_unused_set(sb, gdp, 0);
A
Andreas Dilger 已提交
82
		memset(bh->b_data, 0xff, sb->s_blocksize);
83 84
		ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
					   EXT4_INODES_PER_GROUP(sb) / 8);
A
Andreas Dilger 已提交
85 86 87 88
		return 0;
	}

	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
89
	ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
A
Andreas Dilger 已提交
90
			bh->b_data);
91 92
	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
				   EXT4_INODES_PER_GROUP(sb) / 8);
93
	ext4_group_desc_csum_set(sb, block_group, gdp);
A
Andreas Dilger 已提交
94 95 96

	return EXT4_INODES_PER_GROUP(sb);
}
97

98 99 100 101 102 103 104 105 106 107
void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
{
	if (uptodate) {
		set_buffer_uptodate(bh);
		set_bitmap_uptodate(bh);
	}
	unlock_buffer(bh);
	put_bh(bh);
}

108 109 110 111 112 113 114
/*
 * Read the inode allocation bitmap for a given block_group, reading
 * into the specified slot in the superblock's bitmap cache.
 *
 * Return buffer_head of bitmap on success or NULL.
 */
static struct buffer_head *
115
ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
116
{
117
	struct ext4_group_desc *desc;
118
	struct buffer_head *bh = NULL;
119
	ext4_fsblk_t bitmap_blk;
120
	struct ext4_group_info *grp;
121

122
	desc = ext4_get_group_desc(sb, block_group, NULL);
123
	if (!desc)
124
		return NULL;
125

126 127 128
	bitmap_blk = ext4_inode_bitmap(sb, desc);
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
129
		ext4_error(sb, "Cannot read inode bitmap - "
130
			    "block_group = %u, inode_bitmap = %llu",
131 132 133
			    block_group, bitmap_blk);
		return NULL;
	}
134
	if (bitmap_uptodate(bh))
135
		goto verify;
136

137
	lock_buffer(bh);
138 139
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
140
		goto verify;
141
	}
142

143
	ext4_lock_group(sb, block_group);
A
Andreas Dilger 已提交
144
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
145
		ext4_init_inode_bitmap(sb, bh, block_group, desc);
146
		set_bitmap_uptodate(bh);
147
		set_buffer_uptodate(bh);
148
		set_buffer_verified(bh);
149
		ext4_unlock_group(sb, block_group);
A
Aneesh Kumar K.V 已提交
150
		unlock_buffer(bh);
151
		return bh;
A
Andreas Dilger 已提交
152
	}
153
	ext4_unlock_group(sb, block_group);
154

155 156 157 158 159 160 161
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
162
		goto verify;
163 164
	}
	/*
165
	 * submit the buffer_head for reading
166
	 */
167
	trace_ext4_load_inode_bitmap(sb, block_group);
168 169
	bh->b_end_io = ext4_end_bitmap_read;
	get_bh(bh);
170
	submit_bh(READ | REQ_META | REQ_PRIO, bh);
171 172
	wait_on_buffer(bh);
	if (!buffer_uptodate(bh)) {
173
		put_bh(bh);
174
		ext4_error(sb, "Cannot read inode bitmap - "
175 176
			   "block_group = %u, inode_bitmap = %llu",
			   block_group, bitmap_blk);
177 178
		return NULL;
	}
179 180 181 182 183 184 185 186 187 188

verify:
	ext4_lock_group(sb, block_group);
	if (!buffer_verified(bh) &&
	    !ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
					   EXT4_INODES_PER_GROUP(sb) / 8)) {
		ext4_unlock_group(sb, block_group);
		put_bh(bh);
		ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
			   "inode_bitmap = %llu", block_group, bitmap_blk);
189 190
		grp = ext4_get_group_info(sb, block_group);
		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
191 192 193 194
		return NULL;
	}
	ext4_unlock_group(sb, block_group);
	set_buffer_verified(bh);
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
	return bh;
}

/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
214
void ext4_free_inode(handle_t *handle, struct inode *inode)
215
{
216
	struct super_block *sb = inode->i_sb;
217 218 219 220
	int is_directory;
	unsigned long ino;
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *bh2;
221
	ext4_group_t block_group;
222
	unsigned long bit;
223 224
	struct ext4_group_desc *gdp;
	struct ext4_super_block *es;
225
	struct ext4_sb_info *sbi;
226
	int fatal = 0, err, count, cleared;
227
	struct ext4_group_info *grp;
228

229 230 231
	if (!sb) {
		printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
		       "nonexistent device\n", __func__, __LINE__);
232 233
		return;
	}
234 235 236 237
	if (atomic_read(&inode->i_count) > 1) {
		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
			 __func__, __LINE__, inode->i_ino,
			 atomic_read(&inode->i_count));
238 239
		return;
	}
240 241 242
	if (inode->i_nlink) {
		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n",
			 __func__, __LINE__, inode->i_ino, inode->i_nlink);
243 244
		return;
	}
245
	sbi = EXT4_SB(sb);
246 247

	ino = inode->i_ino;
248
	ext4_debug("freeing inode %lu\n", ino);
249
	trace_ext4_free_inode(inode);
250 251 252 253 254

	/*
	 * Note: we must free any quota before locking the superblock,
	 * as writing the quota to disk may need the lock as well.
	 */
255
	dquot_initialize(inode);
256
	ext4_xattr_delete_inode(handle, inode);
257
	dquot_free_inode(inode);
258
	dquot_drop(inode);
259 260 261 262

	is_directory = S_ISDIR(inode->i_mode);

	/* Do this BEFORE marking the inode not in use or returning an error */
A
Al Viro 已提交
263
	ext4_clear_inode(inode);
264

265 266
	es = EXT4_SB(sb)->s_es;
	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
267
		ext4_error(sb, "reserved or nonexistent inode %lu", ino);
268 269
		goto error_return;
	}
270 271
	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
272
	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
273 274 275
	/* Don't bother if the inode bitmap is corrupt. */
	grp = ext4_get_group_info(sb, block_group);
	if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) || !bitmap_bh)
276 277 278
		goto error_return;

	BUFFER_TRACE(bitmap_bh, "get_write_access");
279
	fatal = ext4_journal_get_write_access(handle, bitmap_bh);
280 281 282
	if (fatal)
		goto error_return;

283 284 285
	fatal = -ESRCH;
	gdp = ext4_get_group_desc(sb, block_group, &bh2);
	if (gdp) {
286
		BUFFER_TRACE(bh2, "get_write_access");
287
		fatal = ext4_journal_get_write_access(handle, bh2);
288 289
	}
	ext4_lock_group(sb, block_group);
290
	cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
291 292 293 294
	if (fatal || !cleared) {
		ext4_unlock_group(sb, block_group);
		goto out;
	}
295

296 297 298 299 300 301
	count = ext4_free_inodes_count(sb, gdp) + 1;
	ext4_free_inodes_set(sb, gdp, count);
	if (is_directory) {
		count = ext4_used_dirs_count(sb, gdp) - 1;
		ext4_used_dirs_set(sb, gdp, count);
		percpu_counter_dec(&sbi->s_dirs_counter);
302
	}
303 304
	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
				   EXT4_INODES_PER_GROUP(sb) / 8);
305
	ext4_group_desc_csum_set(sb, block_group, gdp);
306
	ext4_unlock_group(sb, block_group);
307

308 309 310
	percpu_counter_inc(&sbi->s_freeinodes_counter);
	if (sbi->s_log_groups_per_flex) {
		ext4_group_t f = ext4_flex_group(sbi, block_group);
311

312 313 314
		atomic_inc(&sbi->s_flex_groups[f].free_inodes);
		if (is_directory)
			atomic_dec(&sbi->s_flex_groups[f].used_dirs);
315
	}
316 317 318 319 320 321 322 323
	BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
	fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
out:
	if (cleared) {
		BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
		if (!fatal)
			fatal = err;
324
	} else {
325
		ext4_error(sb, "bit already cleared for inode %lu", ino);
326 327
		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
	}
328

329 330
error_return:
	brelse(bitmap_bh);
331
	ext4_std_error(sb, fatal);
332 333
}

334
struct orlov_stats {
335
	__u64 free_clusters;
336 337 338 339 340 341 342 343 344
	__u32 free_inodes;
	__u32 used_dirs;
};

/*
 * Helper function for Orlov's allocator; returns critical information
 * for a particular block group or flex_bg.  If flex_size is 1, then g
 * is a block group number; otherwise it is flex_bg number.
 */
345 346
static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
			    int flex_size, struct orlov_stats *stats)
347 348
{
	struct ext4_group_desc *desc;
349
	struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
350

351 352
	if (flex_size > 1) {
		stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
353
		stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
354 355 356
		stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
		return;
	}
357

358 359 360
	desc = ext4_get_group_desc(sb, g, NULL);
	if (desc) {
		stats->free_inodes = ext4_free_inodes_count(sb, desc);
361
		stats->free_clusters = ext4_free_group_clusters(sb, desc);
362 363 364
		stats->used_dirs = ext4_used_dirs_count(sb, desc);
	} else {
		stats->free_inodes = 0;
365
		stats->free_clusters = 0;
366
		stats->used_dirs = 0;
367 368 369
	}
}

370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
/*
 * Orlov's allocator for directories.
 *
 * We always try to spread first-level directories.
 *
 * If there are blockgroups with both free inodes and free blocks counts
 * not worse than average we return one with smallest directory count.
 * Otherwise we simply return a random group.
 *
 * For the rest rules look so:
 *
 * It's OK to put directory into a group unless
 * it has too many directories already (max_dirs) or
 * it has too few free inodes left (min_inodes) or
 * it has too few free blocks left (min_blocks) or
385
 * Parent's group is preferred, if it doesn't satisfy these
386 387 388 389 390
 * conditions we search cyclically through the rest. If none
 * of the groups look good we just look for a group with more
 * free inodes than average (starting at parent's group).
 */

391
static int find_group_orlov(struct super_block *sb, struct inode *parent,
A
Al Viro 已提交
392
			    ext4_group_t *group, umode_t mode,
393
			    const struct qstr *qstr)
394
{
395
	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
396
	struct ext4_sb_info *sbi = EXT4_SB(sb);
397
	ext4_group_t real_ngroups = ext4_get_groups_count(sb);
398
	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
399
	unsigned int freei, avefreei, grp_free;
400
	ext4_fsblk_t freeb, avefreec;
401
	unsigned int ndirs;
402
	int max_dirs, min_inodes;
403
	ext4_grpblk_t min_clusters;
404
	ext4_group_t i, grp, g, ngroups;
405
	struct ext4_group_desc *desc;
406 407
	struct orlov_stats stats;
	int flex_size = ext4_flex_bg_size(sbi);
408
	struct dx_hash_info hinfo;
409

410
	ngroups = real_ngroups;
411
	if (flex_size > 1) {
412
		ngroups = (real_ngroups + flex_size - 1) >>
413 414 415
			sbi->s_log_groups_per_flex;
		parent_group >>= sbi->s_log_groups_per_flex;
	}
416 417 418

	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
	avefreei = freei / ngroups;
419 420
	freeb = EXT4_C2B(sbi,
		percpu_counter_read_positive(&sbi->s_freeclusters_counter));
421 422
	avefreec = freeb;
	do_div(avefreec, ngroups);
423 424
	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);

425 426
	if (S_ISDIR(mode) &&
	    ((parent == sb->s_root->d_inode) ||
427
	     (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
428
		int best_ndir = inodes_per_group;
429
		int ret = -1;
430

431 432 433 434 435 436 437
		if (qstr) {
			hinfo.hash_version = DX_HASH_HALF_MD4;
			hinfo.seed = sbi->s_hash_seed;
			ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
			grp = hinfo.hash;
		} else
			get_random_bytes(&grp, sizeof(grp));
438
		parent_group = (unsigned)grp % ngroups;
439
		for (i = 0; i < ngroups; i++) {
440 441 442
			g = (parent_group + i) % ngroups;
			get_orlov_stats(sb, g, flex_size, &stats);
			if (!stats.free_inodes)
443
				continue;
444
			if (stats.used_dirs >= best_ndir)
445
				continue;
446
			if (stats.free_inodes < avefreei)
447
				continue;
448
			if (stats.free_clusters < avefreec)
449
				continue;
450
			grp = g;
451
			ret = 0;
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
			best_ndir = stats.used_dirs;
		}
		if (ret)
			goto fallback;
	found_flex_bg:
		if (flex_size == 1) {
			*group = grp;
			return 0;
		}

		/*
		 * We pack inodes at the beginning of the flexgroup's
		 * inode tables.  Block allocation decisions will do
		 * something similar, although regular files will
		 * start at 2nd block group of the flexgroup.  See
		 * ext4_ext_find_goal() and ext4_find_near().
		 */
		grp *= flex_size;
		for (i = 0; i < flex_size; i++) {
471
			if (grp+i >= real_ngroups)
472 473 474 475 476 477
				break;
			desc = ext4_get_group_desc(sb, grp+i, NULL);
			if (desc && ext4_free_inodes_count(sb, desc)) {
				*group = grp+i;
				return 0;
			}
478 479 480 481 482
		}
		goto fallback;
	}

	max_dirs = ndirs / ngroups + inodes_per_group / 16;
483 484 485
	min_inodes = avefreei - inodes_per_group*flex_size / 4;
	if (min_inodes < 1)
		min_inodes = 1;
486
	min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
487 488 489 490 491 492 493 494 495 496

	/*
	 * Start looking in the flex group where we last allocated an
	 * inode for this parent directory
	 */
	if (EXT4_I(parent)->i_last_alloc_group != ~0) {
		parent_group = EXT4_I(parent)->i_last_alloc_group;
		if (flex_size > 1)
			parent_group >>= sbi->s_log_groups_per_flex;
	}
497 498

	for (i = 0; i < ngroups; i++) {
499 500 501
		grp = (parent_group + i) % ngroups;
		get_orlov_stats(sb, grp, flex_size, &stats);
		if (stats.used_dirs >= max_dirs)
502
			continue;
503
		if (stats.free_inodes < min_inodes)
504
			continue;
505
		if (stats.free_clusters < min_clusters)
506
			continue;
507
		goto found_flex_bg;
508 509 510
	}

fallback:
511
	ngroups = real_ngroups;
512
	avefreei = freei / ngroups;
513
fallback_retry:
514
	parent_group = EXT4_I(parent)->i_block_group;
515
	for (i = 0; i < ngroups; i++) {
516 517
		grp = (parent_group + i) % ngroups;
		desc = ext4_get_group_desc(sb, grp, NULL);
518 519 520 521 522 523
		if (desc) {
			grp_free = ext4_free_inodes_count(sb, desc);
			if (grp_free && grp_free >= avefreei) {
				*group = grp;
				return 0;
			}
524
		}
525 526 527 528 529 530 531 532
	}

	if (avefreei) {
		/*
		 * The free-inodes counter is approximate, and for really small
		 * filesystems the above test can fail to find any blockgroups
		 */
		avefreei = 0;
533
		goto fallback_retry;
534 535 536 537 538
	}

	return -1;
}

539
static int find_group_other(struct super_block *sb, struct inode *parent,
A
Al Viro 已提交
540
			    ext4_group_t *group, umode_t mode)
541
{
542
	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
543
	ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
544
	struct ext4_group_desc *desc;
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
	int flex_size = ext4_flex_bg_size(EXT4_SB(sb));

	/*
	 * Try to place the inode is the same flex group as its
	 * parent.  If we can't find space, use the Orlov algorithm to
	 * find another flex group, and store that information in the
	 * parent directory's inode information so that use that flex
	 * group for future allocations.
	 */
	if (flex_size > 1) {
		int retry = 0;

	try_again:
		parent_group &= ~(flex_size-1);
		last = parent_group + flex_size;
		if (last > ngroups)
			last = ngroups;
		for  (i = parent_group; i < last; i++) {
			desc = ext4_get_group_desc(sb, i, NULL);
			if (desc && ext4_free_inodes_count(sb, desc)) {
				*group = i;
				return 0;
			}
		}
		if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
			retry = 1;
			parent_group = EXT4_I(parent)->i_last_alloc_group;
			goto try_again;
		}
		/*
		 * If this didn't work, use the Orlov search algorithm
		 * to find a new flex group; we pass in the mode to
		 * avoid the topdir algorithms.
		 */
		*group = parent_group + flex_size;
		if (*group > ngroups)
			*group = 0;
582
		return find_group_orlov(sb, parent, group, mode, NULL);
583
	}
584 585 586 587

	/*
	 * Try to place the inode in its parent directory
	 */
588 589
	*group = parent_group;
	desc = ext4_get_group_desc(sb, *group, NULL);
590
	if (desc && ext4_free_inodes_count(sb, desc) &&
591
	    ext4_free_group_clusters(sb, desc))
592
		return 0;
593 594 595 596 597 598 599 600 601 602

	/*
	 * We're going to place this inode in a different blockgroup from its
	 * parent.  We want to cause files in a common directory to all land in
	 * the same blockgroup.  But we want files which are in a different
	 * directory which shares a blockgroup with our parent to land in a
	 * different blockgroup.
	 *
	 * So add our directory's i_ino into the starting point for the hash.
	 */
603
	*group = (*group + parent->i_ino) % ngroups;
604 605 606 607 608 609

	/*
	 * Use a quadratic hash to find a group with a free inode and some free
	 * blocks.
	 */
	for (i = 1; i < ngroups; i <<= 1) {
610 611 612 613
		*group += i;
		if (*group >= ngroups)
			*group -= ngroups;
		desc = ext4_get_group_desc(sb, *group, NULL);
614
		if (desc && ext4_free_inodes_count(sb, desc) &&
615
		    ext4_free_group_clusters(sb, desc))
616
			return 0;
617 618 619 620 621 622
	}

	/*
	 * That failed: try linear search for a free inode, even if that group
	 * has no free blocks.
	 */
623
	*group = parent_group;
624
	for (i = 0; i < ngroups; i++) {
625 626 627
		if (++*group >= ngroups)
			*group = 0;
		desc = ext4_get_group_desc(sb, *group, NULL);
628
		if (desc && ext4_free_inodes_count(sb, desc))
629
			return 0;
630 631 632 633 634
	}

	return -1;
}

635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
/*
 * In no journal mode, if an inode has recently been deleted, we want
 * to avoid reusing it until we're reasonably sure the inode table
 * block has been written back to disk.  (Yes, these values are
 * somewhat arbitrary...)
 */
#define RECENTCY_MIN	5
#define RECENTCY_DIRTY	30

static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
{
	struct ext4_group_desc	*gdp;
	struct ext4_inode	*raw_inode;
	struct buffer_head	*bh;
	unsigned long		dtime, now;
	int	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
	int	offset, ret = 0, recentcy = RECENTCY_MIN;

	gdp = ext4_get_group_desc(sb, group, NULL);
	if (unlikely(!gdp))
		return 0;

	bh = sb_getblk(sb, ext4_inode_table(sb, gdp) +
		       (ino / inodes_per_block));
	if (unlikely(!bh) || !buffer_uptodate(bh))
		/*
		 * If the block is not in the buffer cache, then it
		 * must have been written out.
		 */
		goto out;

	offset = (ino % inodes_per_block) * EXT4_INODE_SIZE(sb);
	raw_inode = (struct ext4_inode *) (bh->b_data + offset);
	dtime = le32_to_cpu(raw_inode->i_dtime);
	now = get_seconds();
	if (buffer_dirty(bh))
		recentcy += RECENTCY_DIRTY;

	if (dtime && (dtime < now) && (now < dtime + recentcy))
		ret = 1;
out:
	brelse(bh);
	return ret;
}

680 681 682 683 684 685 686 687 688 689
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
690 691 692 693
struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
			       umode_t mode, const struct qstr *qstr,
			       __u32 goal, uid_t *owner, int handle_type,
			       unsigned int line_no, int nblocks)
694 695
{
	struct super_block *sb;
A
Aneesh Kumar K.V 已提交
696 697
	struct buffer_head *inode_bitmap_bh = NULL;
	struct buffer_head *group_desc_bh;
698
	ext4_group_t ngroups, group = 0;
699
	unsigned long ino = 0;
700 701
	struct inode *inode;
	struct ext4_group_desc *gdp = NULL;
702 703
	struct ext4_inode_info *ei;
	struct ext4_sb_info *sbi;
704
	int ret2, err = 0;
705
	struct inode *ret;
706
	ext4_group_t i;
707
	ext4_group_t flex_group;
708
	struct ext4_group_info *grp;
709 710 711 712 713 714

	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink)
		return ERR_PTR(-EPERM);

	sb = dir->i_sb;
715
	ngroups = ext4_get_groups_count(sb);
716
	trace_ext4_request_inode(dir, mode);
717 718 719
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);
720 721
	ei = EXT4_I(inode);
	sbi = EXT4_SB(sb);
722

723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
	/*
	 * Initalize owners and quota early so that we don't have to account
	 * for quota initialization worst case in standard inode creating
	 * transaction
	 */
	if (owner) {
		inode->i_mode = mode;
		i_uid_write(inode, owner[0]);
		i_gid_write(inode, owner[1]);
	} else if (test_opt(sb, GRPID)) {
		inode->i_mode = mode;
		inode->i_uid = current_fsuid();
		inode->i_gid = dir->i_gid;
	} else
		inode_init_owner(inode, dir, mode);
	dquot_initialize(inode);

740 741 742
	if (!goal)
		goal = sbi->s_inode_goal;

743
	if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
744 745 746 747 748 749
		group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
		ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
		ret2 = 0;
		goto got_group;
	}

L
Lukas Czerner 已提交
750 751 752
	if (S_ISDIR(mode))
		ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
	else
753
		ret2 = find_group_other(sb, dir, &group, mode);
754

755
got_group:
756
	EXT4_I(dir)->i_last_alloc_group = group;
757
	err = -ENOSPC;
758
	if (ret2 == -1)
759 760
		goto out;

761 762 763 764 765
	/*
	 * Normally we will only go through one pass of this loop,
	 * unless we get unlucky and it turns out the group we selected
	 * had its last inode grabbed by someone else.
	 */
766
	for (i = 0; i < ngroups; i++, ino = 0) {
767 768
		err = -EIO;

A
Aneesh Kumar K.V 已提交
769
		gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
770
		if (!gdp)
771
			goto out;
772

773 774 775 776 777 778 779 780 781
		/*
		 * Check free inodes count before loading bitmap.
		 */
		if (ext4_free_inodes_count(sb, gdp) == 0) {
			if (++group == ngroups)
				group = 0;
			continue;
		}

782 783 784 785 786 787 788 789
		grp = ext4_get_group_info(sb, group);
		/* Skip groups with already-known suspicious inode tables */
		if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
			if (++group == ngroups)
				group = 0;
			continue;
		}

A
Aneesh Kumar K.V 已提交
790 791
		brelse(inode_bitmap_bh);
		inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
792 793 794 795 796 797
		/* Skip groups with suspicious inode tables */
		if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp) || !inode_bitmap_bh) {
			if (++group == ngroups)
				group = 0;
			continue;
		}
798 799

repeat_in_this_group:
800
		ino = ext4_find_next_zero_bit((unsigned long *)
A
Aneesh Kumar K.V 已提交
801 802
					      inode_bitmap_bh->b_data,
					      EXT4_INODES_PER_GROUP(sb), ino);
803 804
		if (ino >= EXT4_INODES_PER_GROUP(sb))
			goto next_group;
805 806 807 808 809
		if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
			ext4_error(sb, "reserved inode found cleared - "
				   "inode=%lu", ino + 1);
			continue;
		}
810 811 812 813 814
		if ((EXT4_SB(sb)->s_journal == NULL) &&
		    recently_deleted(sb, group, ino)) {
			ino++;
			goto next_inode;
		}
815 816 817
		if (!handle) {
			BUG_ON(nblocks <= 0);
			handle = __ext4_journal_start_sb(dir->i_sb, line_no,
818 819
							 handle_type, nblocks,
							 0);
820 821
			if (IS_ERR(handle)) {
				err = PTR_ERR(handle);
822 823
				ext4_std_error(sb, err);
				goto out;
824 825
			}
		}
826 827
		BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
		err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
828 829 830 831
		if (err) {
			ext4_std_error(sb, err);
			goto out;
		}
832 833 834 835 836 837
		ext4_lock_group(sb, group);
		ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
		ext4_unlock_group(sb, group);
		ino++;		/* the inode bitmap is zero-based */
		if (!ret2)
			goto got; /* we grabbed the inode! */
838
next_inode:
839 840
		if (ino < EXT4_INODES_PER_GROUP(sb))
			goto repeat_in_this_group;
841 842 843
next_group:
		if (++group == ngroups)
			group = 0;
844 845 846 847 848
	}
	err = -ENOSPC;
	goto out;

got:
849 850
	BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
	err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
851 852 853 854
	if (err) {
		ext4_std_error(sb, err);
		goto out;
	}
855

A
Andreas Dilger 已提交
856
	/* We may have to initialize the block bitmap if it isn't already */
857
	if (ext4_has_group_desc_csum(sb) &&
A
Andreas Dilger 已提交
858
	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
A
Aneesh Kumar K.V 已提交
859
		struct buffer_head *block_bitmap_bh;
A
Andreas Dilger 已提交
860

A
Aneesh Kumar K.V 已提交
861 862 863
		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
		BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
		err = ext4_journal_get_write_access(handle, block_bitmap_bh);
A
Andreas Dilger 已提交
864
		if (err) {
A
Aneesh Kumar K.V 已提交
865
			brelse(block_bitmap_bh);
866 867
			ext4_std_error(sb, err);
			goto out;
A
Andreas Dilger 已提交
868 869
		}

870 871 872
		BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
		err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);

A
Andreas Dilger 已提交
873
		/* recheck and clear flag under lock if we still need to */
874
		ext4_lock_group(sb, group);
A
Andreas Dilger 已提交
875
		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
A
Aneesh Kumar K.V 已提交
876
			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
877
			ext4_free_group_clusters_set(sb, gdp,
878
				ext4_free_clusters_after_init(sb, group, gdp));
879
			ext4_block_bitmap_csum_set(sb, group, gdp,
880
						   block_bitmap_bh);
881
			ext4_group_desc_csum_set(sb, group, gdp);
A
Andreas Dilger 已提交
882
		}
883
		ext4_unlock_group(sb, group);
884
		brelse(block_bitmap_bh);
A
Andreas Dilger 已提交
885

886 887 888 889
		if (err) {
			ext4_std_error(sb, err);
			goto out;
		}
A
Andreas Dilger 已提交
890
	}
891 892 893

	BUFFER_TRACE(group_desc_bh, "get_write_access");
	err = ext4_journal_get_write_access(handle, group_desc_bh);
894 895 896 897
	if (err) {
		ext4_std_error(sb, err);
		goto out;
	}
898 899

	/* Update the relevant bg descriptor fields */
900
	if (ext4_has_group_desc_csum(sb)) {
901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
		int free;
		struct ext4_group_info *grp = ext4_get_group_info(sb, group);

		down_read(&grp->alloc_sem); /* protect vs itable lazyinit */
		ext4_lock_group(sb, group); /* while we modify the bg desc */
		free = EXT4_INODES_PER_GROUP(sb) -
			ext4_itable_unused_count(sb, gdp);
		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
			free = 0;
		}
		/*
		 * Check the relative inode number against the last used
		 * relative inode number in this group. if it is greater
		 * we need to update the bg_itable_unused count
		 */
		if (ino > free)
			ext4_itable_unused_set(sb, gdp,
					(EXT4_INODES_PER_GROUP(sb) - ino));
		up_read(&grp->alloc_sem);
921 922
	} else {
		ext4_lock_group(sb, group);
923
	}
924

925 926 927 928 929 930 931 932 933
	ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
	if (S_ISDIR(mode)) {
		ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
		if (sbi->s_log_groups_per_flex) {
			ext4_group_t f = ext4_flex_group(sbi, group);

			atomic_inc(&sbi->s_flex_groups[f].used_dirs);
		}
	}
934 935 936
	if (ext4_has_group_desc_csum(sb)) {
		ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
					   EXT4_INODES_PER_GROUP(sb) / 8);
937
		ext4_group_desc_csum_set(sb, group, gdp);
938
	}
939
	ext4_unlock_group(sb, group);
940

A
Aneesh Kumar K.V 已提交
941 942
	BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
	err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
943 944 945 946
	if (err) {
		ext4_std_error(sb, err);
		goto out;
	}
947 948 949 950 951

	percpu_counter_dec(&sbi->s_freeinodes_counter);
	if (S_ISDIR(mode))
		percpu_counter_inc(&sbi->s_dirs_counter);

952 953
	if (sbi->s_log_groups_per_flex) {
		flex_group = ext4_flex_group(sbi, group);
954
		atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
955
	}
956

A
Andreas Dilger 已提交
957
	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
958 959
	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
K
Kalpak Shah 已提交
960 961
	inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
						       ext4_current_time(inode);
962 963 964 965 966

	memset(ei->i_data, 0, sizeof(ei->i_data));
	ei->i_dir_start_lookup = 0;
	ei->i_disksize = 0;

967
	/* Don't inherit extent flag from directory, amongst others. */
968 969
	ei->i_flags =
		ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
970 971 972
	ei->i_file_acl = 0;
	ei->i_dtime = 0;
	ei->i_block_group = group;
973
	ei->i_last_alloc_group = ~0;
974

975
	ext4_set_inode_flags(inode);
976
	if (IS_DIRSYNC(inode))
977
		ext4_handle_sync(handle);
A
Al Viro 已提交
978
	if (insert_inode_locked(inode) < 0) {
979 980 981 982 983
		/*
		 * Likely a bitmap corruption causing inode to be allocated
		 * twice.
		 */
		err = -EIO;
984 985 986
		ext4_error(sb, "failed to insert inode %lu: doubly allocated?",
			   inode->i_ino);
		goto out;
A
Al Viro 已提交
987
	}
988 989 990 991
	spin_lock(&sbi->s_next_gen_lock);
	inode->i_generation = sbi->s_next_generation++;
	spin_unlock(&sbi->s_next_gen_lock);

992 993 994 995 996 997 998 999 1000 1001 1002 1003
	/* Precompute checksum seed for inode metadata */
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
		__u32 csum;
		__le32 inum = cpu_to_le32(inode->i_ino);
		__le32 gen = cpu_to_le32(inode->i_generation);
		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
				   sizeof(inum));
		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
					      sizeof(gen));
	}

1004
	ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
1005
	ext4_set_inode_state(inode, EXT4_STATE_NEW);
K
Kalpak Shah 已提交
1006 1007

	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
1008

T
Tao Ma 已提交
1009 1010 1011 1012
	ei->i_inline_off = 0;
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_INLINE_DATA))
		ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);

1013
	ret = inode;
1014 1015
	err = dquot_alloc_inode(inode);
	if (err)
1016 1017
		goto fail_drop;

1018
	err = ext4_init_acl(handle, inode, dir);
1019 1020 1021
	if (err)
		goto fail_free_drop;

1022
	err = ext4_init_security(handle, inode, dir, qstr);
1023 1024 1025
	if (err)
		goto fail_free_drop;

1026
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
1027
		/* set extent flag only for directory, file and normal symlink*/
1028
		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
1029
			ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
1030 1031
			ext4_ext_tree_init(handle, inode);
		}
A
Alex Tomas 已提交
1032
	}
1033

1034 1035 1036 1037 1038
	if (ext4_handle_valid(handle)) {
		ei->i_sync_tid = handle->h_transaction->t_tid;
		ei->i_datasync_tid = handle->h_transaction->t_tid;
	}

1039 1040 1041 1042 1043 1044
	err = ext4_mark_inode_dirty(handle, inode);
	if (err) {
		ext4_std_error(sb, err);
		goto fail_free_drop;
	}

1045
	ext4_debug("allocating inode %lu\n", inode->i_ino);
1046
	trace_ext4_allocate_inode(inode, dir, mode);
A
Aneesh Kumar K.V 已提交
1047
	brelse(inode_bitmap_bh);
1048 1049 1050
	return ret;

fail_free_drop:
1051
	dquot_free_inode(inode);
1052
fail_drop:
1053
	clear_nlink(inode);
A
Al Viro 已提交
1054
	unlock_new_inode(inode);
1055 1056 1057
out:
	dquot_drop(inode);
	inode->i_flags |= S_NOQUOTA;
1058
	iput(inode);
A
Aneesh Kumar K.V 已提交
1059
	brelse(inode_bitmap_bh);
1060 1061 1062 1063
	return ERR_PTR(err);
}

/* Verify that we are loading a valid orphan from disk */
1064
struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
1065
{
1066
	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
1067
	ext4_group_t block_group;
1068
	int bit;
1069
	struct buffer_head *bitmap_bh;
1070
	struct inode *inode = NULL;
1071
	long err = -EIO;
1072 1073 1074

	/* Error cases - e2fsck has already cleaned up for us */
	if (ino > max_ino) {
1075
		ext4_warning(sb, "bad orphan ino %lu!  e2fsck was run?", ino);
1076
		goto error;
1077 1078
	}

1079 1080
	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
1081
	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
1082
	if (!bitmap_bh) {
1083
		ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
1084
		goto error;
1085 1086 1087 1088 1089 1090
	}

	/* Having the inode bit set should be a 100% indicator that this
	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
	 * inodes that were being truncated, so we can't check i_nlink==0.
	 */
1091 1092 1093 1094 1095 1096 1097
	if (!ext4_test_bit(bit, bitmap_bh->b_data))
		goto bad_orphan;

	inode = ext4_iget(sb, ino);
	if (IS_ERR(inode))
		goto iget_failed;

1098 1099 1100 1101 1102 1103 1104 1105
	/*
	 * If the orphans has i_nlinks > 0 then it should be able to be
	 * truncated, otherwise it won't be removed from the orphan list
	 * during processing and an infinite loop will result.
	 */
	if (inode->i_nlink && !ext4_can_truncate(inode))
		goto bad_orphan;

1106 1107 1108 1109 1110 1111 1112 1113 1114
	if (NEXT_ORPHAN(inode) > max_ino)
		goto bad_orphan;
	brelse(bitmap_bh);
	return inode;

iget_failed:
	err = PTR_ERR(inode);
	inode = NULL;
bad_orphan:
1115
	ext4_warning(sb, "bad orphan inode %lu!  e2fsck was run?", ino);
1116
	printk(KERN_WARNING "ext4_test_bit(bit=%d, block=%llu) = %d\n",
1117 1118
	       bit, (unsigned long long)bitmap_bh->b_blocknr,
	       ext4_test_bit(bit, bitmap_bh->b_data));
1119
	printk(KERN_WARNING "inode=%p\n", inode);
1120
	if (inode) {
1121
		printk(KERN_WARNING "is_bad_inode(inode)=%d\n",
1122
		       is_bad_inode(inode));
1123
		printk(KERN_WARNING "NEXT_ORPHAN(inode)=%u\n",
1124
		       NEXT_ORPHAN(inode));
1125 1126
		printk(KERN_WARNING "max_ino=%lu\n", max_ino);
		printk(KERN_WARNING "i_nlink=%u\n", inode->i_nlink);
1127
		/* Avoid freeing blocks if we got a bad deleted inode */
1128
		if (inode->i_nlink == 0)
1129 1130 1131 1132
			inode->i_blocks = 0;
		iput(inode);
	}
	brelse(bitmap_bh);
1133 1134
error:
	return ERR_PTR(err);
1135 1136
}

1137
unsigned long ext4_count_free_inodes(struct super_block *sb)
1138 1139
{
	unsigned long desc_count;
1140
	struct ext4_group_desc *gdp;
1141
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1142 1143
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
1144 1145 1146
	unsigned long bitmap_count, x;
	struct buffer_head *bitmap_bh = NULL;

1147
	es = EXT4_SB(sb)->s_es;
1148 1149 1150
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;
1151
	for (i = 0; i < ngroups; i++) {
1152
		gdp = ext4_get_group_desc(sb, i, NULL);
1153 1154
		if (!gdp)
			continue;
1155
		desc_count += ext4_free_inodes_count(sb, gdp);
1156
		brelse(bitmap_bh);
1157
		bitmap_bh = ext4_read_inode_bitmap(sb, i);
1158 1159 1160
		if (!bitmap_bh)
			continue;

1161 1162
		x = ext4_count_free(bitmap_bh->b_data,
				    EXT4_INODES_PER_GROUP(sb) / 8);
E
Eric Sandeen 已提交
1163
		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1164
			(unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
1165 1166 1167
		bitmap_count += x;
	}
	brelse(bitmap_bh);
1168 1169 1170
	printk(KERN_DEBUG "ext4_count_free_inodes: "
	       "stored = %u, computed = %lu, %lu\n",
	       le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1171 1172 1173
	return desc_count;
#else
	desc_count = 0;
1174
	for (i = 0; i < ngroups; i++) {
1175
		gdp = ext4_get_group_desc(sb, i, NULL);
1176 1177
		if (!gdp)
			continue;
1178
		desc_count += ext4_free_inodes_count(sb, gdp);
1179 1180 1181 1182 1183 1184 1185
		cond_resched();
	}
	return desc_count;
#endif
}

/* Called at mount-time, super-block is locked */
1186
unsigned long ext4_count_dirs(struct super_block * sb)
1187 1188
{
	unsigned long count = 0;
1189
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1190

1191
	for (i = 0; i < ngroups; i++) {
1192
		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1193 1194
		if (!gdp)
			continue;
1195
		count += ext4_used_dirs_count(sb, gdp);
1196 1197 1198
	}
	return count;
}
1199 1200 1201 1202 1203 1204 1205

/*
 * Zeroes not yet zeroed inode table - just write zeroes through the whole
 * inode table. Must be called without any spinlock held. The only place
 * where it is called from on active part of filesystem is ext4lazyinit
 * thread, so we do not need any special locks, however we have to prevent
 * inode allocation from the current group, so we take alloc_sem lock, to
1206
 * block ext4_new_inode() until we are finished.
1207
 */
1208
int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
				 int barrier)
{
	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_desc *gdp = NULL;
	struct buffer_head *group_desc_bh;
	handle_t *handle;
	ext4_fsblk_t blk;
	int num, ret = 0, used_blks = 0;

	/* This should not happen, but just to be sure check this */
	if (sb->s_flags & MS_RDONLY) {
		ret = 1;
		goto out;
	}

	gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
	if (!gdp)
		goto out;

	/*
	 * We do not need to lock this, because we are the only one
	 * handling this flag.
	 */
	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
		goto out;

1236
	handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
	if (IS_ERR(handle)) {
		ret = PTR_ERR(handle);
		goto out;
	}

	down_write(&grp->alloc_sem);
	/*
	 * If inode bitmap was already initialized there may be some
	 * used inodes so we need to skip blocks with used inodes in
	 * inode table.
	 */
	if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
		used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
			    ext4_itable_unused_count(sb, gdp)),
			    sbi->s_inodes_per_block);

1253
	if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
1254 1255 1256
		ext4_error(sb, "Something is wrong with group %u: "
			   "used itable blocks: %d; "
			   "itable unused count: %u",
1257 1258 1259
			   group, used_blks,
			   ext4_itable_unused_count(sb, gdp));
		ret = 1;
1260
		goto err_out;
1261 1262
	}

1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
	blk = ext4_inode_table(sb, gdp) + used_blks;
	num = sbi->s_itb_per_group - used_blks;

	BUFFER_TRACE(group_desc_bh, "get_write_access");
	ret = ext4_journal_get_write_access(handle,
					    group_desc_bh);
	if (ret)
		goto err_out;

	/*
	 * Skip zeroout if the inode table is full. But we set the ZEROED
	 * flag anyway, because obviously, when it is full it does not need
	 * further zeroing.
	 */
	if (unlikely(num == 0))
		goto skip_zeroout;

	ext4_debug("going to zero out inode table in group %d\n",
		   group);
1282
	ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
1283 1284
	if (ret < 0)
		goto err_out;
1285 1286
	if (barrier)
		blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
1287 1288 1289 1290

skip_zeroout:
	ext4_lock_group(sb, group);
	gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
1291
	ext4_group_desc_csum_set(sb, group, gdp);
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
	ext4_unlock_group(sb, group);

	BUFFER_TRACE(group_desc_bh,
		     "call ext4_handle_dirty_metadata");
	ret = ext4_handle_dirty_metadata(handle, NULL,
					 group_desc_bh);

err_out:
	up_write(&grp->alloc_sem);
	ext4_journal_stop(handle);
out:
	return ret;
}