ialloc.c 28.6 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/ialloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  BSD ufs-inspired inode and directory allocation by
 *  Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19 20 21 22 23
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/random.h>
#include <linux/bitops.h>
24
#include <linux/blkdev.h>
25
#include <asm/byteorder.h>
26 27
#include "ext4.h"
#include "ext4_jbd2.h"
28 29
#include "xattr.h"
#include "acl.h"
A
Andreas Dilger 已提交
30
#include "group.h"
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45

/*
 * ialloc.c contains the inodes allocation and deallocation routines
 */

/*
 * The free inodes are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.
 */

A
Andreas Dilger 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
/*
 * To avoid calling the atomic setbit hundreds or thousands of times, we only
 * need to use it within a single byte (to ensure we get endianness right).
 * We can use memset for the rest of the bitmap as there are no other users.
 */
void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
{
	int i;

	if (start_bit >= end_bit)
		return;

	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
		ext4_set_bit(i, bitmap);
	if (i < end_bit)
		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
}

/* Initializes an uninitialized inode bitmap */
66 67
unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
				ext4_group_t block_group,
A
Andreas Dilger 已提交
68 69 70 71 72 73 74 75 76
				struct ext4_group_desc *gdp)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);

	J_ASSERT_BH(bh, buffer_locked(bh));

	/* If checksum is bad mark all blocks and inodes use to prevent
	 * allocation, essentially implementing a per-group read-only flag. */
	if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
77
		ext4_error(sb, __func__, "Checksum bad for group %lu\n",
A
Andreas Dilger 已提交
78 79 80 81 82 83 84 85 86 87 88 89 90 91
			   block_group);
		gdp->bg_free_blocks_count = 0;
		gdp->bg_free_inodes_count = 0;
		gdp->bg_itable_unused = 0;
		memset(bh->b_data, 0xff, sb->s_blocksize);
		return 0;
	}

	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
	mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb),
			bh->b_data);

	return EXT4_INODES_PER_GROUP(sb);
}
92 93 94 95 96 97 98 99

/*
 * Read the inode allocation bitmap for a given block_group, reading
 * into the specified slot in the superblock's bitmap cache.
 *
 * Return buffer_head of bitmap on success or NULL.
 */
static struct buffer_head *
100
ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
101
{
102
	struct ext4_group_desc *desc;
103
	struct buffer_head *bh = NULL;
104
	ext4_fsblk_t bitmap_blk;
105

106
	desc = ext4_get_group_desc(sb, block_group, NULL);
107
	if (!desc)
108 109 110 111 112 113 114 115 116 117
		return NULL;
	bitmap_blk = ext4_inode_bitmap(sb, desc);
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
		ext4_error(sb, __func__,
			    "Cannot read inode bitmap - "
			    "block_group = %lu, inode_bitmap = %llu",
			    block_group, bitmap_blk);
		return NULL;
	}
118 119
	if (buffer_uptodate(bh) &&
	    !(desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
120 121
		return bh;

122
	lock_buffer(bh);
123
	spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
A
Andreas Dilger 已提交
124
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
125 126 127
		ext4_init_inode_bitmap(sb, bh, block_group, desc);
		set_buffer_uptodate(bh);
		unlock_buffer(bh);
128
		spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
129
		return bh;
A
Andreas Dilger 已提交
130
	}
131
	spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
132 133 134
	if (bh_submit_read(bh) < 0) {
		put_bh(bh);
		ext4_error(sb, __func__,
135
			    "Cannot read inode bitmap - "
L
Laurent Vivier 已提交
136
			    "block_group = %lu, inode_bitmap = %llu",
137 138 139
			    block_group, bitmap_blk);
		return NULL;
	}
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
	return bh;
}

/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
159
void ext4_free_inode(handle_t *handle, struct inode *inode)
160
{
161
	struct super_block *sb = inode->i_sb;
162 163 164 165
	int is_directory;
	unsigned long ino;
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *bh2;
166
	ext4_group_t block_group;
167
	unsigned long bit;
168 169
	struct ext4_group_desc *gdp;
	struct ext4_super_block *es;
170
	struct ext4_sb_info *sbi;
171
	int fatal = 0, err;
172
	ext4_group_t flex_group;
173 174

	if (atomic_read(&inode->i_count) > 1) {
175 176
		printk(KERN_ERR "ext4_free_inode: inode has count=%d\n",
		       atomic_read(&inode->i_count));
177 178 179
		return;
	}
	if (inode->i_nlink) {
180 181
		printk(KERN_ERR "ext4_free_inode: inode has nlink=%d\n",
		       inode->i_nlink);
182 183 184
		return;
	}
	if (!sb) {
185 186
		printk(KERN_ERR "ext4_free_inode: inode on "
		       "nonexistent device\n");
187 188
		return;
	}
189
	sbi = EXT4_SB(sb);
190 191

	ino = inode->i_ino;
192
	ext4_debug("freeing inode %lu\n", ino);
193 194 195 196 197 198

	/*
	 * Note: we must free any quota before locking the superblock,
	 * as writing the quota to disk may need the lock as well.
	 */
	DQUOT_INIT(inode);
199
	ext4_xattr_delete_inode(handle, inode);
200 201 202 203 204 205
	DQUOT_FREE_INODE(inode);
	DQUOT_DROP(inode);

	is_directory = S_ISDIR(inode->i_mode);

	/* Do this BEFORE marking the inode not in use or returning an error */
206
	clear_inode(inode);
207

208 209
	es = EXT4_SB(sb)->s_es;
	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
210 211
		ext4_error(sb, "ext4_free_inode",
			   "reserved or nonexistent inode %lu", ino);
212 213
		goto error_return;
	}
214 215
	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
216
	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
217 218 219 220
	if (!bitmap_bh)
		goto error_return;

	BUFFER_TRACE(bitmap_bh, "get_write_access");
221
	fatal = ext4_journal_get_write_access(handle, bitmap_bh);
222 223 224 225
	if (fatal)
		goto error_return;

	/* Ok, now we can actually update the inode bitmaps.. */
226
	if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
227
					bit, bitmap_bh->b_data))
228 229
		ext4_error(sb, "ext4_free_inode",
			   "bit already cleared for inode %lu", ino);
230
	else {
231
		gdp = ext4_get_group_desc(sb, block_group, &bh2);
232 233

		BUFFER_TRACE(bh2, "get_write_access");
234
		fatal = ext4_journal_get_write_access(handle, bh2);
235 236 237 238
		if (fatal) goto error_return;

		if (gdp) {
			spin_lock(sb_bgl_lock(sbi, block_group));
M
Marcin Slusarz 已提交
239
			le16_add_cpu(&gdp->bg_free_inodes_count, 1);
240
			if (is_directory)
M
Marcin Slusarz 已提交
241
				le16_add_cpu(&gdp->bg_used_dirs_count, -1);
A
Andreas Dilger 已提交
242 243
			gdp->bg_checksum = ext4_group_desc_csum(sbi,
							block_group, gdp);
244 245 246 247 248
			spin_unlock(sb_bgl_lock(sbi, block_group));
			percpu_counter_inc(&sbi->s_freeinodes_counter);
			if (is_directory)
				percpu_counter_dec(&sbi->s_dirs_counter);

249 250 251 252 253 254
			if (sbi->s_log_groups_per_flex) {
				flex_group = ext4_flex_group(sbi, block_group);
				spin_lock(sb_bgl_lock(sbi, flex_group));
				sbi->s_flex_groups[flex_group].free_inodes++;
				spin_unlock(sb_bgl_lock(sbi, flex_group));
			}
255
		}
256 257
		BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
		err = ext4_journal_dirty_metadata(handle, bh2);
258 259
		if (!fatal) fatal = err;
	}
260 261
	BUFFER_TRACE(bitmap_bh, "call ext4_journal_dirty_metadata");
	err = ext4_journal_dirty_metadata(handle, bitmap_bh);
262 263 264 265 266
	if (!fatal)
		fatal = err;
	sb->s_dirt = 1;
error_return:
	brelse(bitmap_bh);
267
	ext4_std_error(sb, fatal);
268 269 270 271 272 273 274 275 276 277 278 279
}

/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory\'s block
 * group to find a free inode.
 */
280 281
static int find_group_dir(struct super_block *sb, struct inode *parent,
				ext4_group_t *best_group)
282
{
283
	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
284
	unsigned int freei, avefreei;
285
	struct ext4_group_desc *desc, *best_desc = NULL;
286 287
	ext4_group_t group;
	int ret = -1;
288

289
	freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter);
290 291 292
	avefreei = freei / ngroups;

	for (group = 0; group < ngroups; group++) {
293
		desc = ext4_get_group_desc(sb, group, NULL);
294 295 296 297 298 299 300
		if (!desc || !desc->bg_free_inodes_count)
			continue;
		if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
			continue;
		if (!best_desc ||
		    (le16_to_cpu(desc->bg_free_blocks_count) >
		     le16_to_cpu(best_desc->bg_free_blocks_count))) {
301
			*best_group = group;
302
			best_desc = desc;
303
			ret = 0;
304 305
		}
	}
306
	return ret;
307 308
}

309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
#define free_block_ratio 10

static int find_group_flex(struct super_block *sb, struct inode *parent,
			   ext4_group_t *best_group)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_desc *desc;
	struct buffer_head *bh;
	struct flex_groups *flex_group = sbi->s_flex_groups;
	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
	ext4_group_t parent_fbg_group = ext4_flex_group(sbi, parent_group);
	ext4_group_t ngroups = sbi->s_groups_count;
	int flex_size = ext4_flex_bg_size(sbi);
	ext4_group_t best_flex = parent_fbg_group;
	int blocks_per_flex = sbi->s_blocks_per_group * flex_size;
	int flexbg_free_blocks;
	int flex_freeb_ratio;
	ext4_group_t n_fbg_groups;
	ext4_group_t i;

	n_fbg_groups = (sbi->s_groups_count + flex_size - 1) >>
		sbi->s_log_groups_per_flex;

find_close_to_parent:
	flexbg_free_blocks = flex_group[best_flex].free_blocks;
	flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
	if (flex_group[best_flex].free_inodes &&
	    flex_freeb_ratio > free_block_ratio)
		goto found_flexbg;

	if (best_flex && best_flex == parent_fbg_group) {
		best_flex--;
		goto find_close_to_parent;
	}

	for (i = 0; i < n_fbg_groups; i++) {
		if (i == parent_fbg_group || i == parent_fbg_group - 1)
			continue;

		flexbg_free_blocks = flex_group[i].free_blocks;
		flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;

		if (flex_freeb_ratio > free_block_ratio &&
		    flex_group[i].free_inodes) {
			best_flex = i;
			goto found_flexbg;
		}

357
		if (flex_group[best_flex].free_inodes == 0 ||
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
		    (flex_group[i].free_blocks >
		     flex_group[best_flex].free_blocks &&
		     flex_group[i].free_inodes))
			best_flex = i;
	}

	if (!flex_group[best_flex].free_inodes ||
	    !flex_group[best_flex].free_blocks)
		return -1;

found_flexbg:
	for (i = best_flex * flex_size; i < ngroups &&
		     i < (best_flex + 1) * flex_size; i++) {
		desc = ext4_get_group_desc(sb, i, &bh);
		if (le16_to_cpu(desc->bg_free_inodes_count)) {
			*best_group = i;
			goto out;
		}
	}

	return -1;
out:
	return 0;
}

383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
/*
 * Orlov's allocator for directories.
 *
 * We always try to spread first-level directories.
 *
 * If there are blockgroups with both free inodes and free blocks counts
 * not worse than average we return one with smallest directory count.
 * Otherwise we simply return a random group.
 *
 * For the rest rules look so:
 *
 * It's OK to put directory into a group unless
 * it has too many directories already (max_dirs) or
 * it has too few free inodes left (min_inodes) or
 * it has too few free blocks left (min_blocks) or
 * it's already running too large debt (max_debt).
399
 * Parent's group is preferred, if it doesn't satisfy these
400 401 402 403 404 405 406 407 408 409 410
 * conditions we search cyclically through the rest. If none
 * of the groups look good we just look for a group with more
 * free inodes than average (starting at parent's group).
 *
 * Debt is incremented each time we allocate a directory and decremented
 * when we allocate an inode, within 0--255.
 */

#define INODE_COST 64
#define BLOCK_COST 256

411 412
static int find_group_orlov(struct super_block *sb, struct inode *parent,
				ext4_group_t *group)
413
{
414
	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
415 416
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_super_block *es = sbi->s_es;
417
	ext4_group_t ngroups = sbi->s_groups_count;
418
	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
419
	unsigned int freei, avefreei;
420 421
	ext4_fsblk_t freeb, avefreeb;
	ext4_fsblk_t blocks_per_dir;
422 423
	unsigned int ndirs;
	int max_debt, max_dirs, min_inodes;
424
	ext4_grpblk_t min_blocks;
425
	ext4_group_t i;
426
	struct ext4_group_desc *desc;
427 428 429 430

	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
	avefreei = freei / ngroups;
	freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
431
	avefreeb = freeb;
A
Andrew Morton 已提交
432
	do_div(avefreeb, ngroups);
433 434 435
	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);

	if ((parent == sb->s_root->d_inode) ||
436
	    (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL)) {
437
		int best_ndir = inodes_per_group;
438 439
		ext4_group_t grp;
		int ret = -1;
440

441 442
		get_random_bytes(&grp, sizeof(grp));
		parent_group = (unsigned)grp % ngroups;
443
		for (i = 0; i < ngroups; i++) {
444 445
			grp = (parent_group + i) % ngroups;
			desc = ext4_get_group_desc(sb, grp, NULL);
446 447 448 449 450 451 452 453
			if (!desc || !desc->bg_free_inodes_count)
				continue;
			if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir)
				continue;
			if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
				continue;
			if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb)
				continue;
454 455
			*group = grp;
			ret = 0;
456 457
			best_ndir = le16_to_cpu(desc->bg_used_dirs_count);
		}
458 459
		if (ret == 0)
			return ret;
460 461 462
		goto fallback;
	}

L
Laurent Vivier 已提交
463
	blocks_per_dir = ext4_blocks_count(es) - freeb;
A
Andrew Morton 已提交
464
	do_div(blocks_per_dir, ndirs);
465 466 467

	max_dirs = ndirs / ngroups + inodes_per_group / 16;
	min_inodes = avefreei - inodes_per_group / 4;
468
	min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb) / 4;
469

470
	max_debt = EXT4_BLOCKS_PER_GROUP(sb);
A
Andrew Morton 已提交
471
	max_debt /= max_t(int, blocks_per_dir, BLOCK_COST);
472 473 474 475 476 477 478 479
	if (max_debt * INODE_COST > inodes_per_group)
		max_debt = inodes_per_group / INODE_COST;
	if (max_debt > 255)
		max_debt = 255;
	if (max_debt == 0)
		max_debt = 1;

	for (i = 0; i < ngroups; i++) {
480 481
		*group = (parent_group + i) % ngroups;
		desc = ext4_get_group_desc(sb, *group, NULL);
482 483 484 485 486 487 488 489
		if (!desc || !desc->bg_free_inodes_count)
			continue;
		if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs)
			continue;
		if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes)
			continue;
		if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks)
			continue;
490
		return 0;
491 492 493 494
	}

fallback:
	for (i = 0; i < ngroups; i++) {
495 496 497 498 499
		*group = (parent_group + i) % ngroups;
		desc = ext4_get_group_desc(sb, *group, NULL);
		if (desc && desc->bg_free_inodes_count &&
			le16_to_cpu(desc->bg_free_inodes_count) >= avefreei)
			return 0;
500 501 502 503 504 505 506 507 508 509 510 511 512 513
	}

	if (avefreei) {
		/*
		 * The free-inodes counter is approximate, and for really small
		 * filesystems the above test can fail to find any blockgroups
		 */
		avefreei = 0;
		goto fallback;
	}

	return -1;
}

514 515
static int find_group_other(struct super_block *sb, struct inode *parent,
				ext4_group_t *group)
516
{
517 518
	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
519
	struct ext4_group_desc *desc;
520
	ext4_group_t i;
521 522 523 524

	/*
	 * Try to place the inode in its parent directory
	 */
525 526
	*group = parent_group;
	desc = ext4_get_group_desc(sb, *group, NULL);
527 528
	if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
			le16_to_cpu(desc->bg_free_blocks_count))
529
		return 0;
530 531 532 533 534 535 536 537 538 539

	/*
	 * We're going to place this inode in a different blockgroup from its
	 * parent.  We want to cause files in a common directory to all land in
	 * the same blockgroup.  But we want files which are in a different
	 * directory which shares a blockgroup with our parent to land in a
	 * different blockgroup.
	 *
	 * So add our directory's i_ino into the starting point for the hash.
	 */
540
	*group = (*group + parent->i_ino) % ngroups;
541 542 543 544 545 546

	/*
	 * Use a quadratic hash to find a group with a free inode and some free
	 * blocks.
	 */
	for (i = 1; i < ngroups; i <<= 1) {
547 548 549 550
		*group += i;
		if (*group >= ngroups)
			*group -= ngroups;
		desc = ext4_get_group_desc(sb, *group, NULL);
551 552
		if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
				le16_to_cpu(desc->bg_free_blocks_count))
553
			return 0;
554 555 556 557 558 559
	}

	/*
	 * That failed: try linear search for a free inode, even if that group
	 * has no free blocks.
	 */
560
	*group = parent_group;
561
	for (i = 0; i < ngroups; i++) {
562 563 564
		if (++*group >= ngroups)
			*group = 0;
		desc = ext4_get_group_desc(sb, *group, NULL);
565
		if (desc && le16_to_cpu(desc->bg_free_inodes_count))
566
			return 0;
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
	}

	return -1;
}

/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
582
struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
583 584 585 586
{
	struct super_block *sb;
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *bh2;
587
	ext4_group_t group = 0;
588
	unsigned long ino = 0;
589 590 591
	struct inode *inode;
	struct ext4_group_desc *gdp = NULL;
	struct ext4_super_block *es;
592 593
	struct ext4_inode_info *ei;
	struct ext4_sb_info *sbi;
594
	int ret2, err = 0;
595
	struct inode *ret;
596 597
	ext4_group_t i;
	int free = 0;
598
	ext4_group_t flex_group;
599 600 601 602 603 604 605 606 607

	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink)
		return ERR_PTR(-EPERM);

	sb = dir->i_sb;
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);
608
	ei = EXT4_I(inode);
609

610
	sbi = EXT4_SB(sb);
611
	es = sbi->s_es;
612 613 614 615 616 617

	if (sbi->s_log_groups_per_flex) {
		ret2 = find_group_flex(sb, dir, &group);
		goto got_group;
	}

618
	if (S_ISDIR(mode)) {
619
		if (test_opt(sb, OLDALLOC))
620
			ret2 = find_group_dir(sb, dir, &group);
621
		else
622
			ret2 = find_group_orlov(sb, dir, &group);
623
	} else
624
		ret2 = find_group_other(sb, dir, &group);
625

626
got_group:
627
	err = -ENOSPC;
628
	if (ret2 == -1)
629 630 631 632 633
		goto out;

	for (i = 0; i < sbi->s_groups_count; i++) {
		err = -EIO;

634
		gdp = ext4_get_group_desc(sb, group, &bh2);
635 636 637 638
		if (!gdp)
			goto fail;

		brelse(bitmap_bh);
639
		bitmap_bh = ext4_read_inode_bitmap(sb, group);
640 641 642 643 644 645
		if (!bitmap_bh)
			goto fail;

		ino = 0;

repeat_in_this_group:
646 647 648
		ino = ext4_find_next_zero_bit((unsigned long *)
				bitmap_bh->b_data, EXT4_INODES_PER_GROUP(sb), ino);
		if (ino < EXT4_INODES_PER_GROUP(sb)) {
649 650

			BUFFER_TRACE(bitmap_bh, "get_write_access");
651
			err = ext4_journal_get_write_access(handle, bitmap_bh);
652 653 654
			if (err)
				goto fail;

655
			if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group),
656 657 658
						ino, bitmap_bh->b_data)) {
				/* we won it */
				BUFFER_TRACE(bitmap_bh,
659 660
					"call ext4_journal_dirty_metadata");
				err = ext4_journal_dirty_metadata(handle,
661 662 663 664 665 666
								bitmap_bh);
				if (err)
					goto fail;
				goto got;
			}
			/* we lost it */
667
			jbd2_journal_release_buffer(handle, bitmap_bh);
668

669
			if (++ino < EXT4_INODES_PER_GROUP(sb))
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
				goto repeat_in_this_group;
		}

		/*
		 * This case is possible in concurrent environment.  It is very
		 * rare.  We cannot repeat the find_group_xxx() call because
		 * that will simply return the same blockgroup, because the
		 * group descriptor metadata has not yet been updated.
		 * So we just go onto the next blockgroup.
		 */
		if (++group == sbi->s_groups_count)
			group = 0;
	}
	err = -ENOSPC;
	goto out;

got:
A
Andreas Dilger 已提交
687 688 689
	ino++;
	if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
	    ino > EXT4_INODES_PER_GROUP(sb)) {
690
		ext4_error(sb, __func__,
A
Andreas Dilger 已提交
691
			   "reserved inode or inode > inodes count - "
692
			   "block_group = %lu, inode=%lu", group,
A
Andreas Dilger 已提交
693
			   ino + group * EXT4_INODES_PER_GROUP(sb));
694 695 696 697 698
		err = -EIO;
		goto fail;
	}

	BUFFER_TRACE(bh2, "get_write_access");
699
	err = ext4_journal_get_write_access(handle, bh2);
700
	if (err) goto fail;
A
Andreas Dilger 已提交
701 702 703 704

	/* We may have to initialize the block bitmap if it isn't already */
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
705
		struct buffer_head *block_bh = ext4_read_block_bitmap(sb, group);
A
Andreas Dilger 已提交
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734

		BUFFER_TRACE(block_bh, "get block bitmap access");
		err = ext4_journal_get_write_access(handle, block_bh);
		if (err) {
			brelse(block_bh);
			goto fail;
		}

		free = 0;
		spin_lock(sb_bgl_lock(sbi, group));
		/* recheck and clear flag under lock if we still need to */
		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
			free = ext4_free_blocks_after_init(sb, group, gdp);
			gdp->bg_free_blocks_count = cpu_to_le16(free);
		}
		spin_unlock(sb_bgl_lock(sbi, group));

		/* Don't need to dirty bitmap block if we didn't change it */
		if (free) {
			BUFFER_TRACE(block_bh, "dirty block bitmap");
			err = ext4_journal_dirty_metadata(handle, block_bh);
		}

		brelse(block_bh);
		if (err)
			goto fail;
	}

735
	spin_lock(sb_bgl_lock(sbi, group));
A
Andreas Dilger 已提交
736 737 738 739 740 741 742 743
	/* If we didn't allocate from within the initialized part of the inode
	 * table then we need to initialize up to this inode. */
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);

			/* When marking the block group with
			 * ~EXT4_BG_INODE_UNINIT we don't want to depend
744
			 * on the value of bg_itable_unused even though
A
Andreas Dilger 已提交
745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
			 * mke2fs could have initialized the same for us.
			 * Instead we calculated the value below
			 */

			free = 0;
		} else {
			free = EXT4_INODES_PER_GROUP(sb) -
				le16_to_cpu(gdp->bg_itable_unused);
		}

		/*
		 * Check the relative inode number against the last used
		 * relative inode number in this group. if it is greater
		 * we need to  update the bg_itable_unused count
		 *
		 */
		if (ino > free)
			gdp->bg_itable_unused =
				cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);
	}

M
Marcin Slusarz 已提交
766
	le16_add_cpu(&gdp->bg_free_inodes_count, -1);
767
	if (S_ISDIR(mode)) {
M
Marcin Slusarz 已提交
768
		le16_add_cpu(&gdp->bg_used_dirs_count, 1);
769
	}
A
Andreas Dilger 已提交
770
	gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
771
	spin_unlock(sb_bgl_lock(sbi, group));
772 773
	BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
	err = ext4_journal_dirty_metadata(handle, bh2);
774 775 776 777 778 779 780
	if (err) goto fail;

	percpu_counter_dec(&sbi->s_freeinodes_counter);
	if (S_ISDIR(mode))
		percpu_counter_inc(&sbi->s_dirs_counter);
	sb->s_dirt = 1;

781 782 783 784 785 786 787
	if (sbi->s_log_groups_per_flex) {
		flex_group = ext4_flex_group(sbi, group);
		spin_lock(sb_bgl_lock(sbi, flex_group));
		sbi->s_flex_groups[flex_group].free_inodes--;
		spin_unlock(sb_bgl_lock(sbi, flex_group));
	}

788
	inode->i_uid = current->fsuid;
789
	if (test_opt(sb, GRPID))
790 791 792 793 794 795 796 797 798
		inode->i_gid = dir->i_gid;
	else if (dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	} else
		inode->i_gid = current->fsgid;
	inode->i_mode = mode;

A
Andreas Dilger 已提交
799
	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
800 801
	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
K
Kalpak Shah 已提交
802 803
	inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
						       ext4_current_time(inode);
804 805 806 807 808

	memset(ei->i_data, 0, sizeof(ei->i_data));
	ei->i_dir_start_lookup = 0;
	ei->i_disksize = 0;

809 810 811 812 813 814
	/*
	 * Don't inherit extent flag from directory. We set extent flag on
	 * newly created directory and file only if -o extent mount option is
	 * specified
	 */
	ei->i_flags = EXT4_I(dir)->i_flags & ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL);
815
	if (S_ISLNK(mode))
816
		ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);
817 818
	/* dirsync only applies to directories */
	if (!S_ISDIR(mode))
819
		ei->i_flags &= ~EXT4_DIRSYNC_FL;
820 821 822 823
	ei->i_file_acl = 0;
	ei->i_dtime = 0;
	ei->i_block_group = group;

824
	ext4_set_inode_flags(inode);
825 826 827 828 829 830 831
	if (IS_DIRSYNC(inode))
		handle->h_sync = 1;
	insert_inode_hash(inode);
	spin_lock(&sbi->s_next_gen_lock);
	inode->i_generation = sbi->s_next_generation++;
	spin_unlock(&sbi->s_next_gen_lock);

832
	ei->i_state = EXT4_STATE_NEW;
K
Kalpak Shah 已提交
833 834

	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
835 836

	ret = inode;
837
	if (DQUOT_ALLOC_INODE(inode)) {
838 839 840 841
		err = -EDQUOT;
		goto fail_drop;
	}

842
	err = ext4_init_acl(handle, inode, dir);
843 844 845
	if (err)
		goto fail_free_drop;

846
	err = ext4_init_security(handle, inode, dir);
847 848 849
	if (err)
		goto fail_free_drop;

A
Alex Tomas 已提交
850
	if (test_opt(sb, EXTENTS)) {
851
		/* set extent flag only for directory, file and normal symlink*/
852
		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
853 854 855
			EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
			ext4_ext_tree_init(handle, inode);
		}
A
Alex Tomas 已提交
856
	}
857

858 859 860 861 862 863
	err = ext4_mark_inode_dirty(handle, inode);
	if (err) {
		ext4_std_error(sb, err);
		goto fail_free_drop;
	}

864
	ext4_debug("allocating inode %lu\n", inode->i_ino);
865 866
	goto really_out;
fail:
867
	ext4_std_error(sb, err);
868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
out:
	iput(inode);
	ret = ERR_PTR(err);
really_out:
	brelse(bitmap_bh);
	return ret;

fail_free_drop:
	DQUOT_FREE_INODE(inode);

fail_drop:
	DQUOT_DROP(inode);
	inode->i_flags |= S_NOQUOTA;
	inode->i_nlink = 0;
	iput(inode);
	brelse(bitmap_bh);
	return ERR_PTR(err);
}

/* Verify that we are loading a valid orphan from disk */
888
struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
889
{
890
	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
891
	ext4_group_t block_group;
892
	int bit;
893
	struct buffer_head *bitmap_bh;
894
	struct inode *inode = NULL;
895
	long err = -EIO;
896 897 898

	/* Error cases - e2fsck has already cleaned up for us */
	if (ino > max_ino) {
899
		ext4_warning(sb, __func__,
900
			     "bad orphan ino %lu!  e2fsck was run?", ino);
901
		goto error;
902 903
	}

904 905
	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
906
	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
907
	if (!bitmap_bh) {
908
		ext4_warning(sb, __func__,
909
			     "inode bitmap error for orphan %lu", ino);
910
		goto error;
911 912 913 914 915 916
	}

	/* Having the inode bit set should be a 100% indicator that this
	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
	 * inodes that were being truncated, so we can't check i_nlink==0.
	 */
917 918 919 920 921 922 923
	if (!ext4_test_bit(bit, bitmap_bh->b_data))
		goto bad_orphan;

	inode = ext4_iget(sb, ino);
	if (IS_ERR(inode))
		goto iget_failed;

924 925 926 927 928 929 930 931
	/*
	 * If the orphans has i_nlinks > 0 then it should be able to be
	 * truncated, otherwise it won't be removed from the orphan list
	 * during processing and an infinite loop will result.
	 */
	if (inode->i_nlink && !ext4_can_truncate(inode))
		goto bad_orphan;

932 933 934 935 936 937 938 939 940
	if (NEXT_ORPHAN(inode) > max_ino)
		goto bad_orphan;
	brelse(bitmap_bh);
	return inode;

iget_failed:
	err = PTR_ERR(inode);
	inode = NULL;
bad_orphan:
941
	ext4_warning(sb, __func__,
942 943 944 945 946 947 948 949 950 951 952
		     "bad orphan inode %lu!  e2fsck was run?", ino);
	printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
	       bit, (unsigned long long)bitmap_bh->b_blocknr,
	       ext4_test_bit(bit, bitmap_bh->b_data));
	printk(KERN_NOTICE "inode=%p\n", inode);
	if (inode) {
		printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
		       is_bad_inode(inode));
		printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
		       NEXT_ORPHAN(inode));
		printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
953
		printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
954
		/* Avoid freeing blocks if we got a bad deleted inode */
955
		if (inode->i_nlink == 0)
956 957 958 959
			inode->i_blocks = 0;
		iput(inode);
	}
	brelse(bitmap_bh);
960 961
error:
	return ERR_PTR(err);
962 963
}

964
unsigned long ext4_count_free_inodes(struct super_block *sb)
965 966
{
	unsigned long desc_count;
967
	struct ext4_group_desc *gdp;
968
	ext4_group_t i;
969 970
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
971 972 973
	unsigned long bitmap_count, x;
	struct buffer_head *bitmap_bh = NULL;

974
	es = EXT4_SB(sb)->s_es;
975 976 977
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;
978
	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
979
		gdp = ext4_get_group_desc(sb, i, NULL);
980 981 982 983
		if (!gdp)
			continue;
		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
		brelse(bitmap_bh);
984
		bitmap_bh = ext4_read_inode_bitmap(sb, i);
985 986 987
		if (!bitmap_bh)
			continue;

988
		x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
E
Eric Sandeen 已提交
989
		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
990 991 992 993
			i, le16_to_cpu(gdp->bg_free_inodes_count), x);
		bitmap_count += x;
	}
	brelse(bitmap_bh);
994 995 996
	printk(KERN_DEBUG "ext4_count_free_inodes: "
	       "stored = %u, computed = %lu, %lu\n",
	       le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
997 998 999
	return desc_count;
#else
	desc_count = 0;
1000
	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
1001
		gdp = ext4_get_group_desc(sb, i, NULL);
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
		if (!gdp)
			continue;
		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
		cond_resched();
	}
	return desc_count;
#endif
}

/* Called at mount-time, super-block is locked */
1012
unsigned long ext4_count_dirs(struct super_block * sb)
1013 1014
{
	unsigned long count = 0;
1015
	ext4_group_t i;
1016

1017
	for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
1018
		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1019 1020 1021 1022 1023 1024 1025
		if (!gdp)
			continue;
		count += le16_to_cpu(gdp->bg_used_dirs_count);
	}
	return count;
}