ialloc.c 39.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 *  linux/fs/ext4/ialloc.c
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  BSD ufs-inspired inode and directory allocation by
 *  Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/fs.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/random.h>
#include <linux/bitops.h>
24
#include <linux/blkdev.h>
25 26
#include <linux/cred.h>

27
#include <asm/byteorder.h>
28

29 30
#include "ext4.h"
#include "ext4_jbd2.h"
31 32 33
#include "xattr.h"
#include "acl.h"

34 35
#include <trace/events/ext4.h>

36 37 38 39 40 41 42 43 44 45 46 47 48 49
/*
 * ialloc.c contains the inodes allocation and deallocation routines
 */

/*
 * The free inodes are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.
 */

A
Andreas Dilger 已提交
50 51 52 53 54
/*
 * To avoid calling the atomic setbit hundreds or thousands of times, we only
 * need to use it within a single byte (to ensure we get endianness right).
 * We can use memset for the rest of the bitmap as there are no other users.
 */
55
void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
A
Andreas Dilger 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68
{
	int i;

	if (start_bit >= end_bit)
		return;

	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
		ext4_set_bit(i, bitmap);
	if (i < end_bit)
		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
}

69 70 71 72 73 74 75 76 77 78
void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
{
	if (uptodate) {
		set_buffer_uptodate(bh);
		set_bitmap_uptodate(bh);
	}
	unlock_buffer(bh);
	put_bh(bh);
}

79 80 81 82 83 84 85 86 87 88 89 90 91 92
static int ext4_validate_inode_bitmap(struct super_block *sb,
				      struct ext4_group_desc *desc,
				      ext4_group_t block_group,
				      struct buffer_head *bh)
{
	ext4_fsblk_t	blk;
	struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);

	if (buffer_verified(bh))
		return 0;
	if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
		return -EFSCORRUPTED;

	ext4_lock_group(sb, block_group);
93 94
	if (buffer_verified(bh))
		goto verified;
95 96 97 98 99 100
	blk = ext4_inode_bitmap(sb, desc);
	if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
					   EXT4_INODES_PER_GROUP(sb) / 8)) {
		ext4_unlock_group(sb, block_group);
		ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
			   "inode_bitmap = %llu", block_group, blk);
101 102
		ext4_mark_group_bitmap_corrupted(sb, block_group,
					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
103 104 105
		return -EFSBADCRC;
	}
	set_buffer_verified(bh);
106
verified:
107 108 109 110
	ext4_unlock_group(sb, block_group);
	return 0;
}

111 112 113 114 115 116 117
/*
 * Read the inode allocation bitmap for a given block_group, reading
 * into the specified slot in the superblock's bitmap cache.
 *
 * Return buffer_head of bitmap on success or NULL.
 */
static struct buffer_head *
118
ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
119
{
120
	struct ext4_group_desc *desc;
121
	struct ext4_sb_info *sbi = EXT4_SB(sb);
122
	struct buffer_head *bh = NULL;
123
	ext4_fsblk_t bitmap_blk;
124
	int err;
125

126
	desc = ext4_get_group_desc(sb, block_group, NULL);
127
	if (!desc)
128
		return ERR_PTR(-EFSCORRUPTED);
129

130
	bitmap_blk = ext4_inode_bitmap(sb, desc);
131 132 133 134
	if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
	    (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
		ext4_error(sb, "Invalid inode bitmap blk %llu in "
			   "block_group %u", bitmap_blk, block_group);
135 136
		ext4_mark_group_bitmap_corrupted(sb, block_group,
					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
137 138
		return ERR_PTR(-EFSCORRUPTED);
	}
139 140
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
141 142 143
		ext4_warning(sb, "Cannot read inode bitmap - "
			     "block_group = %u, inode_bitmap = %llu",
			     block_group, bitmap_blk);
144
		return ERR_PTR(-ENOMEM);
145
	}
146
	if (bitmap_uptodate(bh))
147
		goto verify;
148

149
	lock_buffer(bh);
150 151
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
152
		goto verify;
153
	}
154

155
	ext4_lock_group(sb, block_group);
156 157 158 159 160 161 162 163 164 165
	if (ext4_has_group_desc_csum(sb) &&
	    (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
		if (block_group == 0) {
			ext4_unlock_group(sb, block_group);
			unlock_buffer(bh);
			ext4_error(sb, "Inode bitmap for bg 0 marked "
				   "uninitialized");
			err = -EFSCORRUPTED;
			goto out;
		}
166 167 168
		memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
		ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
				     sb->s_blocksize * 8, bh->b_data);
169
		set_bitmap_uptodate(bh);
170
		set_buffer_uptodate(bh);
171
		set_buffer_verified(bh);
172
		ext4_unlock_group(sb, block_group);
A
Aneesh Kumar K.V 已提交
173
		unlock_buffer(bh);
174
		return bh;
A
Andreas Dilger 已提交
175
	}
176
	ext4_unlock_group(sb, block_group);
177

178 179 180 181 182 183 184
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
185
		goto verify;
186 187
	}
	/*
188
	 * submit the buffer_head for reading
189
	 */
190
	trace_ext4_load_inode_bitmap(sb, block_group);
191 192
	bh->b_end_io = ext4_end_bitmap_read;
	get_bh(bh);
193
	submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
194 195
	wait_on_buffer(bh);
	if (!buffer_uptodate(bh)) {
196
		put_bh(bh);
197
		ext4_error(sb, "Cannot read inode bitmap - "
198 199
			   "block_group = %u, inode_bitmap = %llu",
			   block_group, bitmap_blk);
200 201
		ext4_mark_group_bitmap_corrupted(sb, block_group,
				EXT4_GROUP_INFO_IBITMAP_CORRUPT);
202
		return ERR_PTR(-EIO);
203
	}
204 205

verify:
206 207 208
	err = ext4_validate_inode_bitmap(sb, desc, block_group, bh);
	if (err)
		goto out;
209
	return bh;
210 211 212
out:
	put_bh(bh);
	return ERR_PTR(err);
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
}

/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
231
void ext4_free_inode(handle_t *handle, struct inode *inode)
232
{
233
	struct super_block *sb = inode->i_sb;
234 235 236 237
	int is_directory;
	unsigned long ino;
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *bh2;
238
	ext4_group_t block_group;
239
	unsigned long bit;
240 241
	struct ext4_group_desc *gdp;
	struct ext4_super_block *es;
242
	struct ext4_sb_info *sbi;
243
	int fatal = 0, err, count, cleared;
244
	struct ext4_group_info *grp;
245

246 247 248
	if (!sb) {
		printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
		       "nonexistent device\n", __func__, __LINE__);
249 250
		return;
	}
251 252 253 254
	if (atomic_read(&inode->i_count) > 1) {
		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
			 __func__, __LINE__, inode->i_ino,
			 atomic_read(&inode->i_count));
255 256
		return;
	}
257 258 259
	if (inode->i_nlink) {
		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n",
			 __func__, __LINE__, inode->i_ino, inode->i_nlink);
260 261
		return;
	}
262
	sbi = EXT4_SB(sb);
263 264

	ino = inode->i_ino;
265
	ext4_debug("freeing inode %lu\n", ino);
266
	trace_ext4_free_inode(inode);
267 268 269 270 271

	/*
	 * Note: we must free any quota before locking the superblock,
	 * as writing the quota to disk may need the lock as well.
	 */
272
	dquot_initialize(inode);
273
	dquot_free_inode(inode);
274
	dquot_drop(inode);
275 276 277 278

	is_directory = S_ISDIR(inode->i_mode);

	/* Do this BEFORE marking the inode not in use or returning an error */
A
Al Viro 已提交
279
	ext4_clear_inode(inode);
280

281
	es = sbi->s_es;
282
	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
283
		ext4_error(sb, "reserved or nonexistent inode %lu", ino);
284 285
		goto error_return;
	}
286 287
	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
288
	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
289 290
	/* Don't bother if the inode bitmap is corrupt. */
	grp = ext4_get_group_info(sb, block_group);
291 292 293 294 295 296 297
	if (IS_ERR(bitmap_bh)) {
		fatal = PTR_ERR(bitmap_bh);
		bitmap_bh = NULL;
		goto error_return;
	}
	if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) {
		fatal = -EFSCORRUPTED;
298
		goto error_return;
299
	}
300 301

	BUFFER_TRACE(bitmap_bh, "get_write_access");
302
	fatal = ext4_journal_get_write_access(handle, bitmap_bh);
303 304 305
	if (fatal)
		goto error_return;

306 307 308
	fatal = -ESRCH;
	gdp = ext4_get_group_desc(sb, block_group, &bh2);
	if (gdp) {
309
		BUFFER_TRACE(bh2, "get_write_access");
310
		fatal = ext4_journal_get_write_access(handle, bh2);
311 312
	}
	ext4_lock_group(sb, block_group);
313
	cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
314 315 316 317
	if (fatal || !cleared) {
		ext4_unlock_group(sb, block_group);
		goto out;
	}
318

319 320 321 322 323 324
	count = ext4_free_inodes_count(sb, gdp) + 1;
	ext4_free_inodes_set(sb, gdp, count);
	if (is_directory) {
		count = ext4_used_dirs_count(sb, gdp) - 1;
		ext4_used_dirs_set(sb, gdp, count);
		percpu_counter_dec(&sbi->s_dirs_counter);
325
	}
326 327
	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
				   EXT4_INODES_PER_GROUP(sb) / 8);
328
	ext4_group_desc_csum_set(sb, block_group, gdp);
329
	ext4_unlock_group(sb, block_group);
330

331 332 333
	percpu_counter_inc(&sbi->s_freeinodes_counter);
	if (sbi->s_log_groups_per_flex) {
		ext4_group_t f = ext4_flex_group(sbi, block_group);
334

335 336 337
		atomic_inc(&sbi->s_flex_groups[f].free_inodes);
		if (is_directory)
			atomic_dec(&sbi->s_flex_groups[f].used_dirs);
338
	}
339 340 341 342 343 344 345 346
	BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
	fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
out:
	if (cleared) {
		BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
		if (!fatal)
			fatal = err;
347
	} else {
348
		ext4_error(sb, "bit already cleared for inode %lu", ino);
349 350
		ext4_mark_group_bitmap_corrupted(sb, block_group,
					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
351
	}
352

353 354
error_return:
	brelse(bitmap_bh);
355
	ext4_std_error(sb, fatal);
356 357
}

358
struct orlov_stats {
359
	__u64 free_clusters;
360 361 362 363 364 365 366 367 368
	__u32 free_inodes;
	__u32 used_dirs;
};

/*
 * Helper function for Orlov's allocator; returns critical information
 * for a particular block group or flex_bg.  If flex_size is 1, then g
 * is a block group number; otherwise it is flex_bg number.
 */
369 370
static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
			    int flex_size, struct orlov_stats *stats)
371 372
{
	struct ext4_group_desc *desc;
373
	struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
374

375 376
	if (flex_size > 1) {
		stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
377
		stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
378 379 380
		stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
		return;
	}
381

382 383 384
	desc = ext4_get_group_desc(sb, g, NULL);
	if (desc) {
		stats->free_inodes = ext4_free_inodes_count(sb, desc);
385
		stats->free_clusters = ext4_free_group_clusters(sb, desc);
386 387 388
		stats->used_dirs = ext4_used_dirs_count(sb, desc);
	} else {
		stats->free_inodes = 0;
389
		stats->free_clusters = 0;
390
		stats->used_dirs = 0;
391 392 393
	}
}

394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
/*
 * Orlov's allocator for directories.
 *
 * We always try to spread first-level directories.
 *
 * If there are blockgroups with both free inodes and free blocks counts
 * not worse than average we return one with smallest directory count.
 * Otherwise we simply return a random group.
 *
 * For the rest rules look so:
 *
 * It's OK to put directory into a group unless
 * it has too many directories already (max_dirs) or
 * it has too few free inodes left (min_inodes) or
 * it has too few free blocks left (min_blocks) or
409
 * Parent's group is preferred, if it doesn't satisfy these
410 411 412 413 414
 * conditions we search cyclically through the rest. If none
 * of the groups look good we just look for a group with more
 * free inodes than average (starting at parent's group).
 */

415
static int find_group_orlov(struct super_block *sb, struct inode *parent,
A
Al Viro 已提交
416
			    ext4_group_t *group, umode_t mode,
417
			    const struct qstr *qstr)
418
{
419
	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
420
	struct ext4_sb_info *sbi = EXT4_SB(sb);
421
	ext4_group_t real_ngroups = ext4_get_groups_count(sb);
422
	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
423
	unsigned int freei, avefreei, grp_free;
424
	ext4_fsblk_t freeb, avefreec;
425
	unsigned int ndirs;
426
	int max_dirs, min_inodes;
427
	ext4_grpblk_t min_clusters;
428
	ext4_group_t i, grp, g, ngroups;
429
	struct ext4_group_desc *desc;
430 431
	struct orlov_stats stats;
	int flex_size = ext4_flex_bg_size(sbi);
432
	struct dx_hash_info hinfo;
433

434
	ngroups = real_ngroups;
435
	if (flex_size > 1) {
436
		ngroups = (real_ngroups + flex_size - 1) >>
437 438 439
			sbi->s_log_groups_per_flex;
		parent_group >>= sbi->s_log_groups_per_flex;
	}
440 441 442

	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
	avefreei = freei / ngroups;
443 444
	freeb = EXT4_C2B(sbi,
		percpu_counter_read_positive(&sbi->s_freeclusters_counter));
445 446
	avefreec = freeb;
	do_div(avefreec, ngroups);
447 448
	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);

449
	if (S_ISDIR(mode) &&
450
	    ((parent == d_inode(sb->s_root)) ||
451
	     (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
452
		int best_ndir = inodes_per_group;
453
		int ret = -1;
454

455 456 457 458 459 460
		if (qstr) {
			hinfo.hash_version = DX_HASH_HALF_MD4;
			hinfo.seed = sbi->s_hash_seed;
			ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
			grp = hinfo.hash;
		} else
461
			grp = prandom_u32();
462
		parent_group = (unsigned)grp % ngroups;
463
		for (i = 0; i < ngroups; i++) {
464 465 466
			g = (parent_group + i) % ngroups;
			get_orlov_stats(sb, g, flex_size, &stats);
			if (!stats.free_inodes)
467
				continue;
468
			if (stats.used_dirs >= best_ndir)
469
				continue;
470
			if (stats.free_inodes < avefreei)
471
				continue;
472
			if (stats.free_clusters < avefreec)
473
				continue;
474
			grp = g;
475
			ret = 0;
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
			best_ndir = stats.used_dirs;
		}
		if (ret)
			goto fallback;
	found_flex_bg:
		if (flex_size == 1) {
			*group = grp;
			return 0;
		}

		/*
		 * We pack inodes at the beginning of the flexgroup's
		 * inode tables.  Block allocation decisions will do
		 * something similar, although regular files will
		 * start at 2nd block group of the flexgroup.  See
		 * ext4_ext_find_goal() and ext4_find_near().
		 */
		grp *= flex_size;
		for (i = 0; i < flex_size; i++) {
495
			if (grp+i >= real_ngroups)
496 497 498 499 500 501
				break;
			desc = ext4_get_group_desc(sb, grp+i, NULL);
			if (desc && ext4_free_inodes_count(sb, desc)) {
				*group = grp+i;
				return 0;
			}
502 503 504 505 506
		}
		goto fallback;
	}

	max_dirs = ndirs / ngroups + inodes_per_group / 16;
507 508 509
	min_inodes = avefreei - inodes_per_group*flex_size / 4;
	if (min_inodes < 1)
		min_inodes = 1;
510
	min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
511 512 513 514 515 516 517 518 519 520

	/*
	 * Start looking in the flex group where we last allocated an
	 * inode for this parent directory
	 */
	if (EXT4_I(parent)->i_last_alloc_group != ~0) {
		parent_group = EXT4_I(parent)->i_last_alloc_group;
		if (flex_size > 1)
			parent_group >>= sbi->s_log_groups_per_flex;
	}
521 522

	for (i = 0; i < ngroups; i++) {
523 524 525
		grp = (parent_group + i) % ngroups;
		get_orlov_stats(sb, grp, flex_size, &stats);
		if (stats.used_dirs >= max_dirs)
526
			continue;
527
		if (stats.free_inodes < min_inodes)
528
			continue;
529
		if (stats.free_clusters < min_clusters)
530
			continue;
531
		goto found_flex_bg;
532 533 534
	}

fallback:
535
	ngroups = real_ngroups;
536
	avefreei = freei / ngroups;
537
fallback_retry:
538
	parent_group = EXT4_I(parent)->i_block_group;
539
	for (i = 0; i < ngroups; i++) {
540 541
		grp = (parent_group + i) % ngroups;
		desc = ext4_get_group_desc(sb, grp, NULL);
542 543 544 545 546 547
		if (desc) {
			grp_free = ext4_free_inodes_count(sb, desc);
			if (grp_free && grp_free >= avefreei) {
				*group = grp;
				return 0;
			}
548
		}
549 550 551 552 553 554 555 556
	}

	if (avefreei) {
		/*
		 * The free-inodes counter is approximate, and for really small
		 * filesystems the above test can fail to find any blockgroups
		 */
		avefreei = 0;
557
		goto fallback_retry;
558 559 560 561 562
	}

	return -1;
}

563
static int find_group_other(struct super_block *sb, struct inode *parent,
A
Al Viro 已提交
564
			    ext4_group_t *group, umode_t mode)
565
{
566
	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
567
	ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
568
	struct ext4_group_desc *desc;
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
	int flex_size = ext4_flex_bg_size(EXT4_SB(sb));

	/*
	 * Try to place the inode is the same flex group as its
	 * parent.  If we can't find space, use the Orlov algorithm to
	 * find another flex group, and store that information in the
	 * parent directory's inode information so that use that flex
	 * group for future allocations.
	 */
	if (flex_size > 1) {
		int retry = 0;

	try_again:
		parent_group &= ~(flex_size-1);
		last = parent_group + flex_size;
		if (last > ngroups)
			last = ngroups;
		for  (i = parent_group; i < last; i++) {
			desc = ext4_get_group_desc(sb, i, NULL);
			if (desc && ext4_free_inodes_count(sb, desc)) {
				*group = i;
				return 0;
			}
		}
		if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
			retry = 1;
			parent_group = EXT4_I(parent)->i_last_alloc_group;
			goto try_again;
		}
		/*
		 * If this didn't work, use the Orlov search algorithm
		 * to find a new flex group; we pass in the mode to
		 * avoid the topdir algorithms.
		 */
		*group = parent_group + flex_size;
		if (*group > ngroups)
			*group = 0;
606
		return find_group_orlov(sb, parent, group, mode, NULL);
607
	}
608 609 610 611

	/*
	 * Try to place the inode in its parent directory
	 */
612 613
	*group = parent_group;
	desc = ext4_get_group_desc(sb, *group, NULL);
614
	if (desc && ext4_free_inodes_count(sb, desc) &&
615
	    ext4_free_group_clusters(sb, desc))
616
		return 0;
617 618 619 620 621 622 623 624 625 626

	/*
	 * We're going to place this inode in a different blockgroup from its
	 * parent.  We want to cause files in a common directory to all land in
	 * the same blockgroup.  But we want files which are in a different
	 * directory which shares a blockgroup with our parent to land in a
	 * different blockgroup.
	 *
	 * So add our directory's i_ino into the starting point for the hash.
	 */
627
	*group = (*group + parent->i_ino) % ngroups;
628 629 630 631 632 633

	/*
	 * Use a quadratic hash to find a group with a free inode and some free
	 * blocks.
	 */
	for (i = 1; i < ngroups; i <<= 1) {
634 635 636 637
		*group += i;
		if (*group >= ngroups)
			*group -= ngroups;
		desc = ext4_get_group_desc(sb, *group, NULL);
638
		if (desc && ext4_free_inodes_count(sb, desc) &&
639
		    ext4_free_group_clusters(sb, desc))
640
			return 0;
641 642 643 644 645 646
	}

	/*
	 * That failed: try linear search for a free inode, even if that group
	 * has no free blocks.
	 */
647
	*group = parent_group;
648
	for (i = 0; i < ngroups; i++) {
649 650 651
		if (++*group >= ngroups)
			*group = 0;
		desc = ext4_get_group_desc(sb, *group, NULL);
652
		if (desc && ext4_free_inodes_count(sb, desc))
653
			return 0;
654 655 656 657 658
	}

	return -1;
}

659 660 661 662 663 664 665
/*
 * In no journal mode, if an inode has recently been deleted, we want
 * to avoid reusing it until we're reasonably sure the inode table
 * block has been written back to disk.  (Yes, these values are
 * somewhat arbitrary...)
 */
#define RECENTCY_MIN	5
666
#define RECENTCY_DIRTY	300
667 668 669 670 671 672

static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
{
	struct ext4_group_desc	*gdp;
	struct ext4_inode	*raw_inode;
	struct buffer_head	*bh;
673 674 675 676
	int inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
	int offset, ret = 0;
	int recentcy = RECENTCY_MIN;
	u32 dtime, now;
677 678 679 680 681

	gdp = ext4_get_group_desc(sb, group, NULL);
	if (unlikely(!gdp))
		return 0;

682
	bh = sb_find_get_block(sb, ext4_inode_table(sb, gdp) +
683
		       (ino / inodes_per_block));
684
	if (!bh || !buffer_uptodate(bh))
685 686 687 688 689 690 691 692
		/*
		 * If the block is not in the buffer cache, then it
		 * must have been written out.
		 */
		goto out;

	offset = (ino % inodes_per_block) * EXT4_INODE_SIZE(sb);
	raw_inode = (struct ext4_inode *) (bh->b_data + offset);
693 694 695 696 697

	/* i_dtime is only 32 bits on disk, but we only care about relative
	 * times in the range of a few minutes (i.e. long enough to sync a
	 * recently-deleted inode to disk), so using the low 32 bits of the
	 * clock (a 68 year range) is enough, see time_before32() */
698
	dtime = le32_to_cpu(raw_inode->i_dtime);
699
	now = ktime_get_real_seconds();
700 701 702
	if (buffer_dirty(bh))
		recentcy += RECENTCY_DIRTY;

703 704
	if (dtime && time_before32(dtime, now) &&
	    time_before32(now, dtime + recentcy))
705 706 707 708 709 710
		ret = 1;
out:
	brelse(bh);
	return ret;
}

711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
static int find_inode_bit(struct super_block *sb, ext4_group_t group,
			  struct buffer_head *bitmap, unsigned long *ino)
{
next:
	*ino = ext4_find_next_zero_bit((unsigned long *)
				       bitmap->b_data,
				       EXT4_INODES_PER_GROUP(sb), *ino);
	if (*ino >= EXT4_INODES_PER_GROUP(sb))
		return 0;

	if ((EXT4_SB(sb)->s_journal == NULL) &&
	    recently_deleted(sb, group, *ino)) {
		*ino = *ino + 1;
		if (*ino < EXT4_INODES_PER_GROUP(sb))
			goto next;
		return 0;
	}

	return 1;
}

732 733 734 735 736 737 738 739 740 741
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
742 743
struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
			       umode_t mode, const struct qstr *qstr,
744 745 746
			       __u32 goal, uid_t *owner, __u32 i_flags,
			       int handle_type, unsigned int line_no,
			       int nblocks)
747 748
{
	struct super_block *sb;
A
Aneesh Kumar K.V 已提交
749 750
	struct buffer_head *inode_bitmap_bh = NULL;
	struct buffer_head *group_desc_bh;
751
	ext4_group_t ngroups, group = 0;
752
	unsigned long ino = 0;
753 754
	struct inode *inode;
	struct ext4_group_desc *gdp = NULL;
755 756
	struct ext4_inode_info *ei;
	struct ext4_sb_info *sbi;
757
	int ret2, err;
758
	struct inode *ret;
759
	ext4_group_t i;
760
	ext4_group_t flex_group;
761
	struct ext4_group_info *grp;
762
	int encrypt = 0;
763 764 765 766 767

	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink)
		return ERR_PTR(-EPERM);

768 769 770 771
	sb = dir->i_sb;
	sbi = EXT4_SB(sb);

	if (unlikely(ext4_forced_shutdown(sbi)))
772 773
		return ERR_PTR(-EIO);

774
	if ((ext4_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
775 776
	    (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) &&
	    !(i_flags & EXT4_EA_INODE_FL)) {
777
		err = fscrypt_get_encryption_info(dir);
778 779
		if (err)
			return ERR_PTR(err);
780
		if (!fscrypt_has_encryption_key(dir))
781
			return ERR_PTR(-ENOKEY);
782 783 784
		encrypt = 1;
	}

785 786 787 788
	if (!handle && sbi->s_journal && !(i_flags & EXT4_EA_INODE_FL)) {
#ifdef CONFIG_EXT4_FS_POSIX_ACL
		struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT);

789 790
		if (IS_ERR(p))
			return ERR_CAST(p);
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
		if (p) {
			int acl_size = p->a_count * sizeof(ext4_acl_entry);

			nblocks += (S_ISDIR(mode) ? 2 : 1) *
				__ext4_xattr_set_credits(sb, NULL /* inode */,
					NULL /* block_bh */, acl_size,
					true /* is_create */);
			posix_acl_release(p);
		}
#endif

#ifdef CONFIG_SECURITY
		{
			int num_security_xattrs = 1;

#ifdef CONFIG_INTEGRITY
			num_security_xattrs++;
#endif
			/*
			 * We assume that security xattrs are never
			 * more than 1k.  In practice they are under
			 * 128 bytes.
			 */
			nblocks += num_security_xattrs *
				__ext4_xattr_set_credits(sb, NULL /* inode */,
					NULL /* block_bh */, 1024,
					true /* is_create */);
		}
#endif
		if (encrypt)
			nblocks += __ext4_xattr_set_credits(sb,
					NULL /* inode */, NULL /* block_bh */,
					FSCRYPT_SET_CONTEXT_MAX_SIZE,
					true /* is_create */);
	}

827
	ngroups = ext4_get_groups_count(sb);
828
	trace_ext4_request_inode(dir, mode);
829 830 831
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);
832
	ei = EXT4_I(inode);
833

834
	/*
835
	 * Initialize owners and quota early so that we don't have to account
836 837 838 839 840 841 842 843 844 845 846 847 848
	 * for quota initialization worst case in standard inode creating
	 * transaction
	 */
	if (owner) {
		inode->i_mode = mode;
		i_uid_write(inode, owner[0]);
		i_gid_write(inode, owner[1]);
	} else if (test_opt(sb, GRPID)) {
		inode->i_mode = mode;
		inode->i_uid = current_fsuid();
		inode->i_gid = dir->i_gid;
	} else
		inode_init_owner(inode, dir, mode);
L
Li Xi 已提交
849

K
Kaho Ng 已提交
850
	if (ext4_has_feature_project(sb) &&
L
Li Xi 已提交
851 852 853 854 855
	    ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT))
		ei->i_projid = EXT4_I(dir)->i_projid;
	else
		ei->i_projid = make_kprojid(&init_user_ns, EXT4_DEF_PROJID);

856 857 858
	err = dquot_initialize(inode);
	if (err)
		goto out;
859

860 861 862
	if (!goal)
		goal = sbi->s_inode_goal;

863
	if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
864 865 866 867 868 869
		group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
		ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
		ret2 = 0;
		goto got_group;
	}

L
Lukas Czerner 已提交
870 871 872
	if (S_ISDIR(mode))
		ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
	else
873
		ret2 = find_group_other(sb, dir, &group, mode);
874

875
got_group:
876
	EXT4_I(dir)->i_last_alloc_group = group;
877
	err = -ENOSPC;
878
	if (ret2 == -1)
879 880
		goto out;

881 882 883 884 885
	/*
	 * Normally we will only go through one pass of this loop,
	 * unless we get unlucky and it turns out the group we selected
	 * had its last inode grabbed by someone else.
	 */
886
	for (i = 0; i < ngroups; i++, ino = 0) {
887 888
		err = -EIO;

A
Aneesh Kumar K.V 已提交
889
		gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
890
		if (!gdp)
891
			goto out;
892

893 894 895
		/*
		 * Check free inodes count before loading bitmap.
		 */
W
Wang Shilong 已提交
896 897
		if (ext4_free_inodes_count(sb, gdp) == 0)
			goto next_group;
898

899 900
		grp = ext4_get_group_info(sb, group);
		/* Skip groups with already-known suspicious inode tables */
W
Wang Shilong 已提交
901 902
		if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
			goto next_group;
903

A
Aneesh Kumar K.V 已提交
904 905
		brelse(inode_bitmap_bh);
		inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
906
		/* Skip groups with suspicious inode tables */
907 908 909
		if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp) ||
		    IS_ERR(inode_bitmap_bh)) {
			inode_bitmap_bh = NULL;
W
Wang Shilong 已提交
910
			goto next_group;
911
		}
912 913

repeat_in_this_group:
914 915
		ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino);
		if (!ret2)
916
			goto next_group;
917 918

		if (group == 0 && (ino + 1) < EXT4_FIRST_INO(sb)) {
919 920
			ext4_error(sb, "reserved inode found cleared - "
				   "inode=%lu", ino + 1);
921 922
			ext4_mark_group_bitmap_corrupted(sb, group,
					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
W
Wang Shilong 已提交
923
			goto next_group;
924
		}
925

926 927 928
		if (!handle) {
			BUG_ON(nblocks <= 0);
			handle = __ext4_journal_start_sb(dir->i_sb, line_no,
929 930
							 handle_type, nblocks,
							 0);
931 932
			if (IS_ERR(handle)) {
				err = PTR_ERR(handle);
933 934
				ext4_std_error(sb, err);
				goto out;
935 936
			}
		}
937 938
		BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
		err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
939 940 941 942
		if (err) {
			ext4_std_error(sb, err);
			goto out;
		}
943 944
		ext4_lock_group(sb, group);
		ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
945 946 947 948 949 950 951 952 953 954 955 956
		if (ret2) {
			/* Someone already took the bit. Repeat the search
			 * with lock held.
			 */
			ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino);
			if (ret2) {
				ext4_set_bit(ino, inode_bitmap_bh->b_data);
				ret2 = 0;
			} else {
				ret2 = 1; /* we didn't grab the inode */
			}
		}
957 958 959 960
		ext4_unlock_group(sb, group);
		ino++;		/* the inode bitmap is zero-based */
		if (!ret2)
			goto got; /* we grabbed the inode! */
961

962 963
		if (ino < EXT4_INODES_PER_GROUP(sb))
			goto repeat_in_this_group;
964 965 966
next_group:
		if (++group == ngroups)
			group = 0;
967 968 969 970 971
	}
	err = -ENOSPC;
	goto out;

got:
972 973
	BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
	err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
974 975 976 977
	if (err) {
		ext4_std_error(sb, err);
		goto out;
	}
978

979 980 981 982 983 984 985
	BUFFER_TRACE(group_desc_bh, "get_write_access");
	err = ext4_journal_get_write_access(handle, group_desc_bh);
	if (err) {
		ext4_std_error(sb, err);
		goto out;
	}

A
Andreas Dilger 已提交
986
	/* We may have to initialize the block bitmap if it isn't already */
987
	if (ext4_has_group_desc_csum(sb) &&
A
Andreas Dilger 已提交
988
	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
A
Aneesh Kumar K.V 已提交
989
		struct buffer_head *block_bitmap_bh;
A
Andreas Dilger 已提交
990

A
Aneesh Kumar K.V 已提交
991
		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
992 993
		if (IS_ERR(block_bitmap_bh)) {
			err = PTR_ERR(block_bitmap_bh);
994 995
			goto out;
		}
A
Aneesh Kumar K.V 已提交
996 997
		BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
		err = ext4_journal_get_write_access(handle, block_bitmap_bh);
A
Andreas Dilger 已提交
998
		if (err) {
A
Aneesh Kumar K.V 已提交
999
			brelse(block_bitmap_bh);
1000 1001
			ext4_std_error(sb, err);
			goto out;
A
Andreas Dilger 已提交
1002 1003
		}

1004 1005 1006
		BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
		err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);

A
Andreas Dilger 已提交
1007
		/* recheck and clear flag under lock if we still need to */
1008
		ext4_lock_group(sb, group);
1009 1010
		if (ext4_has_group_desc_csum(sb) &&
		    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
A
Aneesh Kumar K.V 已提交
1011
			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
1012
			ext4_free_group_clusters_set(sb, gdp,
1013
				ext4_free_clusters_after_init(sb, group, gdp));
1014
			ext4_block_bitmap_csum_set(sb, group, gdp,
1015
						   block_bitmap_bh);
1016
			ext4_group_desc_csum_set(sb, group, gdp);
A
Andreas Dilger 已提交
1017
		}
1018
		ext4_unlock_group(sb, group);
1019
		brelse(block_bitmap_bh);
A
Andreas Dilger 已提交
1020

1021 1022 1023 1024
		if (err) {
			ext4_std_error(sb, err);
			goto out;
		}
A
Andreas Dilger 已提交
1025
	}
1026 1027

	/* Update the relevant bg descriptor fields */
1028
	if (ext4_has_group_desc_csum(sb)) {
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
		int free;
		struct ext4_group_info *grp = ext4_get_group_info(sb, group);

		down_read(&grp->alloc_sem); /* protect vs itable lazyinit */
		ext4_lock_group(sb, group); /* while we modify the bg desc */
		free = EXT4_INODES_PER_GROUP(sb) -
			ext4_itable_unused_count(sb, gdp);
		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
			free = 0;
		}
		/*
		 * Check the relative inode number against the last used
		 * relative inode number in this group. if it is greater
		 * we need to update the bg_itable_unused count
		 */
		if (ino > free)
			ext4_itable_unused_set(sb, gdp,
					(EXT4_INODES_PER_GROUP(sb) - ino));
		up_read(&grp->alloc_sem);
1049 1050
	} else {
		ext4_lock_group(sb, group);
1051
	}
1052

1053 1054 1055 1056 1057 1058 1059 1060 1061
	ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
	if (S_ISDIR(mode)) {
		ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
		if (sbi->s_log_groups_per_flex) {
			ext4_group_t f = ext4_flex_group(sbi, group);

			atomic_inc(&sbi->s_flex_groups[f].used_dirs);
		}
	}
1062 1063 1064
	if (ext4_has_group_desc_csum(sb)) {
		ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
					   EXT4_INODES_PER_GROUP(sb) / 8);
1065
		ext4_group_desc_csum_set(sb, group, gdp);
1066
	}
1067
	ext4_unlock_group(sb, group);
1068

A
Aneesh Kumar K.V 已提交
1069 1070
	BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
	err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
1071 1072 1073 1074
	if (err) {
		ext4_std_error(sb, err);
		goto out;
	}
1075 1076 1077 1078 1079

	percpu_counter_dec(&sbi->s_freeinodes_counter);
	if (S_ISDIR(mode))
		percpu_counter_inc(&sbi->s_dirs_counter);

1080 1081
	if (sbi->s_log_groups_per_flex) {
		flex_group = ext4_flex_group(sbi, group);
1082
		atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
1083
	}
1084

A
Andreas Dilger 已提交
1085
	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
1086 1087
	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
1088
	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
1089
	ei->i_crtime = inode->i_mtime;
1090 1091 1092 1093 1094

	memset(ei->i_data, 0, sizeof(ei->i_data));
	ei->i_dir_start_lookup = 0;
	ei->i_disksize = 0;

1095
	/* Don't inherit extent flag from directory, amongst others. */
1096 1097
	ei->i_flags =
		ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
1098
	ei->i_flags |= i_flags;
1099 1100 1101
	ei->i_file_acl = 0;
	ei->i_dtime = 0;
	ei->i_block_group = group;
1102
	ei->i_last_alloc_group = ~0;
1103

1104
	ext4_set_inode_flags(inode);
1105
	if (IS_DIRSYNC(inode))
1106
		ext4_handle_sync(handle);
A
Al Viro 已提交
1107
	if (insert_inode_locked(inode) < 0) {
1108 1109 1110 1111 1112
		/*
		 * Likely a bitmap corruption causing inode to be allocated
		 * twice.
		 */
		err = -EIO;
1113 1114
		ext4_error(sb, "failed to insert inode %lu: doubly allocated?",
			   inode->i_ino);
1115 1116
		ext4_mark_group_bitmap_corrupted(sb, group,
					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
1117
		goto out;
A
Al Viro 已提交
1118
	}
1119
	inode->i_generation = prandom_u32();
1120

1121
	/* Precompute checksum seed for inode metadata */
1122
	if (ext4_has_metadata_csum(sb)) {
1123 1124 1125 1126 1127 1128 1129 1130 1131
		__u32 csum;
		__le32 inum = cpu_to_le32(inode->i_ino);
		__le32 gen = cpu_to_le32(inode->i_generation);
		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
				   sizeof(inum));
		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
					      sizeof(gen));
	}

1132
	ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
1133
	ext4_set_inode_state(inode, EXT4_STATE_NEW);
K
Kalpak Shah 已提交
1134

1135
	ei->i_extra_isize = sbi->s_want_extra_isize;
T
Tao Ma 已提交
1136
	ei->i_inline_off = 0;
1137
	if (ext4_has_feature_inline_data(sb))
T
Tao Ma 已提交
1138
		ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
1139
	ret = inode;
1140 1141
	err = dquot_alloc_inode(inode);
	if (err)
1142 1143
		goto fail_drop;

1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
	/*
	 * Since the encryption xattr will always be unique, create it first so
	 * that it's less likely to end up in an external xattr block and
	 * prevent its deduplication.
	 */
	if (encrypt) {
		err = fscrypt_inherit_context(dir, inode, handle, true);
		if (err)
			goto fail_free_drop;
	}

1155 1156 1157 1158
	if (!(ei->i_flags & EXT4_EA_INODE_FL)) {
		err = ext4_init_acl(handle, inode, dir);
		if (err)
			goto fail_free_drop;
1159

1160 1161 1162 1163
		err = ext4_init_security(handle, inode, dir, qstr);
		if (err)
			goto fail_free_drop;
	}
1164

1165
	if (ext4_has_feature_extents(sb)) {
1166
		/* set extent flag only for directory, file and normal symlink*/
1167
		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
1168
			ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
1169 1170
			ext4_ext_tree_init(handle, inode);
		}
A
Alex Tomas 已提交
1171
	}
1172

1173 1174 1175 1176 1177
	if (ext4_handle_valid(handle)) {
		ei->i_sync_tid = handle->h_transaction->t_tid;
		ei->i_datasync_tid = handle->h_transaction->t_tid;
	}

1178 1179 1180 1181 1182 1183
	err = ext4_mark_inode_dirty(handle, inode);
	if (err) {
		ext4_std_error(sb, err);
		goto fail_free_drop;
	}

1184
	ext4_debug("allocating inode %lu\n", inode->i_ino);
1185
	trace_ext4_allocate_inode(inode, dir, mode);
A
Aneesh Kumar K.V 已提交
1186
	brelse(inode_bitmap_bh);
1187 1188 1189
	return ret;

fail_free_drop:
1190
	dquot_free_inode(inode);
1191
fail_drop:
1192
	clear_nlink(inode);
A
Al Viro 已提交
1193
	unlock_new_inode(inode);
1194 1195 1196
out:
	dquot_drop(inode);
	inode->i_flags |= S_NOQUOTA;
1197
	iput(inode);
A
Aneesh Kumar K.V 已提交
1198
	brelse(inode_bitmap_bh);
1199 1200 1201 1202
	return ERR_PTR(err);
}

/* Verify that we are loading a valid orphan from disk */
1203
struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
1204
{
1205
	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
1206
	ext4_group_t block_group;
1207
	int bit;
1208
	struct buffer_head *bitmap_bh = NULL;
1209
	struct inode *inode = NULL;
1210
	int err = -EFSCORRUPTED;
1211

1212 1213
	if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
		goto bad_orphan;
1214

1215 1216
	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
1217
	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
1218
	if (IS_ERR(bitmap_bh))
1219
		return (struct inode *) bitmap_bh;
1220 1221 1222 1223 1224

	/* Having the inode bit set should be a 100% indicator that this
	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
	 * inodes that were being truncated, so we can't check i_nlink==0.
	 */
1225 1226 1227
	if (!ext4_test_bit(bit, bitmap_bh->b_data))
		goto bad_orphan;

1228
	inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
1229 1230 1231 1232 1233 1234
	if (IS_ERR(inode)) {
		err = PTR_ERR(inode);
		ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
			   ino, err);
		return inode;
	}
1235

1236
	/*
1237 1238 1239 1240
	 * If the orphans has i_nlinks > 0 then it should be able to
	 * be truncated, otherwise it won't be removed from the orphan
	 * list during processing and an infinite loop will result.
	 * Similarly, it must not be a bad inode.
1241
	 */
1242 1243
	if ((inode->i_nlink && !ext4_can_truncate(inode)) ||
	    is_bad_inode(inode))
1244 1245
		goto bad_orphan;

1246 1247 1248 1249 1250 1251
	if (NEXT_ORPHAN(inode) > max_ino)
		goto bad_orphan;
	brelse(bitmap_bh);
	return inode;

bad_orphan:
1252 1253 1254 1255 1256
	ext4_error(sb, "bad orphan inode %lu", ino);
	if (bitmap_bh)
		printk(KERN_ERR "ext4_test_bit(bit=%d, block=%llu) = %d\n",
		       bit, (unsigned long long)bitmap_bh->b_blocknr,
		       ext4_test_bit(bit, bitmap_bh->b_data));
1257
	if (inode) {
1258
		printk(KERN_ERR "is_bad_inode(inode)=%d\n",
1259
		       is_bad_inode(inode));
1260
		printk(KERN_ERR "NEXT_ORPHAN(inode)=%u\n",
1261
		       NEXT_ORPHAN(inode));
1262 1263
		printk(KERN_ERR "max_ino=%lu\n", max_ino);
		printk(KERN_ERR "i_nlink=%u\n", inode->i_nlink);
1264
		/* Avoid freeing blocks if we got a bad deleted inode */
1265
		if (inode->i_nlink == 0)
1266 1267 1268 1269
			inode->i_blocks = 0;
		iput(inode);
	}
	brelse(bitmap_bh);
1270
	return ERR_PTR(err);
1271 1272
}

1273
unsigned long ext4_count_free_inodes(struct super_block *sb)
1274 1275
{
	unsigned long desc_count;
1276
	struct ext4_group_desc *gdp;
1277
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1278 1279
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
1280 1281 1282
	unsigned long bitmap_count, x;
	struct buffer_head *bitmap_bh = NULL;

1283
	es = EXT4_SB(sb)->s_es;
1284 1285 1286
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;
1287
	for (i = 0; i < ngroups; i++) {
1288
		gdp = ext4_get_group_desc(sb, i, NULL);
1289 1290
		if (!gdp)
			continue;
1291
		desc_count += ext4_free_inodes_count(sb, gdp);
1292
		brelse(bitmap_bh);
1293
		bitmap_bh = ext4_read_inode_bitmap(sb, i);
1294 1295
		if (IS_ERR(bitmap_bh)) {
			bitmap_bh = NULL;
1296
			continue;
1297
		}
1298

1299 1300
		x = ext4_count_free(bitmap_bh->b_data,
				    EXT4_INODES_PER_GROUP(sb) / 8);
E
Eric Sandeen 已提交
1301
		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1302
			(unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
1303 1304 1305
		bitmap_count += x;
	}
	brelse(bitmap_bh);
1306 1307 1308
	printk(KERN_DEBUG "ext4_count_free_inodes: "
	       "stored = %u, computed = %lu, %lu\n",
	       le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1309 1310 1311
	return desc_count;
#else
	desc_count = 0;
1312
	for (i = 0; i < ngroups; i++) {
1313
		gdp = ext4_get_group_desc(sb, i, NULL);
1314 1315
		if (!gdp)
			continue;
1316
		desc_count += ext4_free_inodes_count(sb, gdp);
1317 1318 1319 1320 1321 1322 1323
		cond_resched();
	}
	return desc_count;
#endif
}

/* Called at mount-time, super-block is locked */
1324
unsigned long ext4_count_dirs(struct super_block * sb)
1325 1326
{
	unsigned long count = 0;
1327
	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1328

1329
	for (i = 0; i < ngroups; i++) {
1330
		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1331 1332
		if (!gdp)
			continue;
1333
		count += ext4_used_dirs_count(sb, gdp);
1334 1335 1336
	}
	return count;
}
1337 1338 1339 1340 1341 1342 1343

/*
 * Zeroes not yet zeroed inode table - just write zeroes through the whole
 * inode table. Must be called without any spinlock held. The only place
 * where it is called from on active part of filesystem is ext4lazyinit
 * thread, so we do not need any special locks, however we have to prevent
 * inode allocation from the current group, so we take alloc_sem lock, to
1344
 * block ext4_new_inode() until we are finished.
1345
 */
1346
int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
				 int barrier)
{
	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_group_desc *gdp = NULL;
	struct buffer_head *group_desc_bh;
	handle_t *handle;
	ext4_fsblk_t blk;
	int num, ret = 0, used_blks = 0;

	/* This should not happen, but just to be sure check this */
1358
	if (sb_rdonly(sb)) {
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
		ret = 1;
		goto out;
	}

	gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
	if (!gdp)
		goto out;

	/*
	 * We do not need to lock this, because we are the only one
	 * handling this flag.
	 */
	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
		goto out;

1374
	handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
	if (IS_ERR(handle)) {
		ret = PTR_ERR(handle);
		goto out;
	}

	down_write(&grp->alloc_sem);
	/*
	 * If inode bitmap was already initialized there may be some
	 * used inodes so we need to skip blocks with used inodes in
	 * inode table.
	 */
	if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
		used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
			    ext4_itable_unused_count(sb, gdp)),
			    sbi->s_inodes_per_block);

1391 1392 1393 1394
	if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
	    ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
			       ext4_itable_unused_count(sb, gdp)) <
			      EXT4_FIRST_INO(sb)))) {
1395 1396 1397
		ext4_error(sb, "Something is wrong with group %u: "
			   "used itable blocks: %d; "
			   "itable unused count: %u",
1398 1399 1400
			   group, used_blks,
			   ext4_itable_unused_count(sb, gdp));
		ret = 1;
1401
		goto err_out;
1402 1403
	}

1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422
	blk = ext4_inode_table(sb, gdp) + used_blks;
	num = sbi->s_itb_per_group - used_blks;

	BUFFER_TRACE(group_desc_bh, "get_write_access");
	ret = ext4_journal_get_write_access(handle,
					    group_desc_bh);
	if (ret)
		goto err_out;

	/*
	 * Skip zeroout if the inode table is full. But we set the ZEROED
	 * flag anyway, because obviously, when it is full it does not need
	 * further zeroing.
	 */
	if (unlikely(num == 0))
		goto skip_zeroout;

	ext4_debug("going to zero out inode table in group %d\n",
		   group);
1423
	ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
1424 1425
	if (ret < 0)
		goto err_out;
1426 1427
	if (barrier)
		blkdev_issue_flush(sb->s_bdev, GFP_NOFS, NULL);
1428 1429 1430 1431

skip_zeroout:
	ext4_lock_group(sb, group);
	gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
1432
	ext4_group_desc_csum_set(sb, group, gdp);
1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
	ext4_unlock_group(sb, group);

	BUFFER_TRACE(group_desc_bh,
		     "call ext4_handle_dirty_metadata");
	ret = ext4_handle_dirty_metadata(handle, NULL,
					 group_desc_bh);

err_out:
	up_write(&grp->alloc_sem);
	ext4_journal_stop(handle);
out:
	return ret;
}