balloc.c 19.3 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/balloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
20 21
#include "ext4.h"
#include "ext4_jbd2.h"
22
#include "mballoc.h"
23

24 25
#include <trace/events/ext4.h>

26 27 28
static unsigned int num_base_meta_blocks(struct super_block *sb,
					 ext4_group_t block_group);

29 30 31 32
/*
 * balloc.c contains the blocks allocation and deallocation routines
 */

33 34 35 36
/*
 * Calculate the block group number and offset, given a block number
 */
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
37
		ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
38
{
D
Dave Kleikamp 已提交
39
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
40 41
	ext4_grpblk_t offset;

D
Dave Kleikamp 已提交
42
	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
A
Andrew Morton 已提交
43
	offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
44 45 46
	if (offsetp)
		*offsetp = offset;
	if (blockgrpp)
D
Dave Kleikamp 已提交
47
		*blockgrpp = blocknr;
48 49 50

}

51 52 53 54
static int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block,
			ext4_group_t block_group)
{
	ext4_group_t actual_group;
A
Aneesh Kumar K.V 已提交
55
	ext4_get_group_no_and_offset(sb, block, &actual_group, NULL);
56 57 58 59 60 61
	if (actual_group == block_group)
		return 1;
	return 0;
}

static int ext4_group_used_meta_blocks(struct super_block *sb,
62 63
				       ext4_group_t block_group,
				       struct ext4_group_desc *gdp)
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
{
	ext4_fsblk_t tmp;
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	/* block bitmap, inode bitmap, and inode table blocks */
	int used_blocks = sbi->s_itb_per_group + 2;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		if (!ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp),
					block_group))
			used_blocks--;

		if (!ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp),
					block_group))
			used_blocks--;

		tmp = ext4_inode_table(sb, gdp);
		for (; tmp < ext4_inode_table(sb, gdp) +
				sbi->s_itb_per_group; tmp++) {
			if (!ext4_block_in_group(sb, tmp, block_group))
				used_blocks -= 1;
		}
	}
	return used_blocks;
}
88

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
static unsigned int num_blocks_in_group(struct super_block *sb,
					ext4_group_t block_group)
{
	if (block_group == ext4_get_groups_count(sb) - 1) {
		/*
		 * Even though mke2fs always initializes the first and
		 * last group, just in case some other tool was used,
		 * we need to make sure we calculate the right free
		 * blocks.
		 */
		return ext4_blocks_count(EXT4_SB(sb)->s_es) -
			ext4_group_first_block_no(sb, block_group);
	} else
		return EXT4_BLOCKS_PER_GROUP(sb);
}

A
Andreas Dilger 已提交
105 106 107
/* Initializes an uninitialized block bitmap if given, and returns the
 * number of blocks free in the group. */
unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
108
		 ext4_group_t block_group, struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
109
{
110
	unsigned int bit, bit_max = num_base_meta_blocks(sb, block_group);
111
	ext4_group_t ngroups = ext4_get_groups_count(sb);
112
	unsigned group_blocks = num_blocks_in_group(sb, block_group);
A
Andreas Dilger 已提交
113 114 115 116 117 118 119 120
	struct ext4_sb_info *sbi = EXT4_SB(sb);

	if (bh) {
		J_ASSERT_BH(bh, buffer_locked(bh));

		/* If checksum is bad mark all blocks used to prevent allocation
		 * essentially implementing a per-group read-only flag. */
		if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
121 122
			ext4_error(sb, "Checksum bad for group %u",
					block_group);
123 124 125
			ext4_free_blks_set(sb, gdp, 0);
			ext4_free_inodes_set(sb, gdp, 0);
			ext4_itable_unused_set(sb, gdp, 0);
A
Andreas Dilger 已提交
126 127 128 129 130 131 132
			memset(bh->b_data, 0xff, sb->s_blocksize);
			return 0;
		}
		memset(bh->b_data, 0, sb->s_blocksize);
	}

	if (bh) {
133 134
		ext4_fsblk_t start, tmp;
		int flex_bg = 0;
135

A
Andreas Dilger 已提交
136 137 138
		for (bit = 0; bit < bit_max; bit++)
			ext4_set_bit(bit, bh->b_data);

139
		start = ext4_group_first_block_no(sb, block_group);
A
Andreas Dilger 已提交
140

141 142 143
		if (EXT4_HAS_INCOMPAT_FEATURE(sb,
					      EXT4_FEATURE_INCOMPAT_FLEX_BG))
			flex_bg = 1;
A
Andreas Dilger 已提交
144

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
		/* Set bits for block and inode bitmaps, and inode table */
		tmp = ext4_block_bitmap(sb, gdp);
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
			ext4_set_bit(tmp - start, bh->b_data);

		tmp = ext4_inode_bitmap(sb, gdp);
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
			ext4_set_bit(tmp - start, bh->b_data);

		tmp = ext4_inode_table(sb, gdp);
		for (; tmp < ext4_inode_table(sb, gdp) +
				sbi->s_itb_per_group; tmp++) {
			if (!flex_bg ||
				ext4_block_in_group(sb, tmp, block_group))
				ext4_set_bit(tmp - start, bh->b_data);
		}
A
Andreas Dilger 已提交
161 162 163 164 165
		/*
		 * Also if the number of blocks within the group is
		 * less than the blocksize * 8 ( which is the size
		 * of bitmap ), set rest of the block bitmap to 1
		 */
166 167
		ext4_mark_bitmap_end(group_blocks, sb->s_blocksize * 8,
				     bh->b_data);
A
Andreas Dilger 已提交
168
	}
169 170
	return group_blocks - bit_max -
		ext4_group_used_meta_blocks(sb, block_group, gdp);
A
Andreas Dilger 已提交
171 172 173
}


174 175 176 177 178 179 180 181
/*
 * The free blocks are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.  The descriptors are loaded in memory
182
 * when a file system is mounted (see ext4_fill_super).
183 184 185
 */

/**
186
 * ext4_get_group_desc() -- load group descriptor from disk
187 188 189 190 191
 * @sb:			super block
 * @block_group:	given block group
 * @bh:			pointer to the buffer head to store the block
 *			group descriptor
 */
192
struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
193
					     ext4_group_t block_group,
194
					     struct buffer_head **bh)
195
{
196 197
	unsigned int group_desc;
	unsigned int offset;
198
	ext4_group_t ngroups = ext4_get_groups_count(sb);
199
	struct ext4_group_desc *desc;
200
	struct ext4_sb_info *sbi = EXT4_SB(sb);
201

202
	if (block_group >= ngroups) {
203 204
		ext4_error(sb, "block_group >= groups_count - block_group = %u,"
			   " groups_count = %u", block_group, ngroups);
205 206 207 208

		return NULL;
	}

209 210
	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
211
	if (!sbi->s_group_desc[group_desc]) {
212
		ext4_error(sb, "Group descriptor not loaded - "
213
			   "block_group = %u, group_desc = %u, desc = %u",
214
			   block_group, group_desc, offset);
215 216 217
		return NULL;
	}

218 219 220
	desc = (struct ext4_group_desc *)(
		(__u8 *)sbi->s_group_desc[group_desc]->b_data +
		offset * EXT4_DESC_SIZE(sb));
221 222
	if (bh)
		*bh = sbi->s_group_desc[group_desc];
223
	return desc;
224 225
}

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
static int ext4_valid_block_bitmap(struct super_block *sb,
					struct ext4_group_desc *desc,
					unsigned int block_group,
					struct buffer_head *bh)
{
	ext4_grpblk_t offset;
	ext4_grpblk_t next_zero_bit;
	ext4_fsblk_t bitmap_blk;
	ext4_fsblk_t group_first_block;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		/* with FLEX_BG, the inode/block bitmaps and itable
		 * blocks may not be in the group at all
		 * so the bitmap validation will be skipped for those groups
		 * or it has to also read the block group where the bitmaps
		 * are located to verify they are set.
		 */
		return 1;
	}
	group_first_block = ext4_group_first_block_no(sb, block_group);

	/* check whether block bitmap block number is set */
	bitmap_blk = ext4_block_bitmap(sb, desc);
	offset = bitmap_blk - group_first_block;
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
		goto err_out;

	/* check whether the inode bitmap block number is set */
	bitmap_blk = ext4_inode_bitmap(sb, desc);
	offset = bitmap_blk - group_first_block;
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
		goto err_out;

	/* check whether the inode table block number is set */
	bitmap_blk = ext4_inode_table(sb, desc);
	offset = bitmap_blk - group_first_block;
	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
				offset + EXT4_SB(sb)->s_itb_per_group,
				offset);
	if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group)
		/* good bitmap for inode tables */
		return 1;

err_out:
272
	ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu",
273 274 275
			block_group, bitmap_blk);
	return 0;
}
276
/**
277
 * ext4_read_block_bitmap()
278 279 280
 * @sb:			super block
 * @block_group:	given block group
 *
281 282
 * Read the bitmap for a given block_group,and validate the
 * bits for block/inode/inode tables are set in the bitmaps
283 284 285
 *
 * Return buffer_head on success or NULL in case of failure.
 */
A
Andreas Dilger 已提交
286
struct buffer_head *
287
ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
288
{
289 290
	struct ext4_group_desc *desc;
	struct buffer_head *bh = NULL;
291
	ext4_fsblk_t bitmap_blk;
292

A
Andreas Dilger 已提交
293
	desc = ext4_get_group_desc(sb, block_group, NULL);
294
	if (!desc)
295 296
		return NULL;
	bitmap_blk = ext4_block_bitmap(sb, desc);
297 298
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
299
		ext4_error(sb, "Cannot read block bitmap - "
300
			    "block_group = %u, block_bitmap = %llu",
301
			    block_group, bitmap_blk);
302 303
		return NULL;
	}
304 305

	if (bitmap_uptodate(bh))
306 307
		return bh;

308
	lock_buffer(bh);
309 310 311 312
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
		return bh;
	}
313
	ext4_lock_group(sb, block_group);
A
Andreas Dilger 已提交
314
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
315
		ext4_init_block_bitmap(sb, bh, block_group, desc);
316
		set_bitmap_uptodate(bh);
317
		set_buffer_uptodate(bh);
318
		ext4_unlock_group(sb, block_group);
A
Aneesh Kumar K.V 已提交
319
		unlock_buffer(bh);
320
		return bh;
A
Andreas Dilger 已提交
321
	}
322
	ext4_unlock_group(sb, block_group);
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
		return bh;
	}
	/*
	 * submit the buffer_head for read. We can
	 * safely mark the bitmap as uptodate now.
	 * We do it here so the bitmap uptodate bit
	 * get set with buffer lock held.
	 */
338
	trace_ext4_read_block_bitmap_load(sb, block_group);
339
	set_bitmap_uptodate(bh);
340 341
	if (bh_submit_read(bh) < 0) {
		put_bh(bh);
342
		ext4_error(sb, "Cannot read block bitmap - "
343
			    "block_group = %u, block_bitmap = %llu",
344
			    block_group, bitmap_blk);
345 346
		return NULL;
	}
347 348 349 350 351
	ext4_valid_block_bitmap(sb, desc, block_group, bh);
	/*
	 * file system mounted not to panic on error,
	 * continue with corrupt bitmap
	 */
352 353 354
	return bh;
}

355 356 357 358 359 360 361 362
/**
 * ext4_has_free_blocks()
 * @sbi:	in-core super block structure.
 * @nblocks:	number of needed blocks
 *
 * Check if filesystem has nblocks free & available for allocation.
 * On success return 1, return 0 on failure.
 */
363 364
static int ext4_has_free_blocks(struct ext4_sb_info *sbi,
				s64 nblocks, unsigned int flags)
365
{
366
	s64 free_blocks, dirty_blocks, root_blocks;
367
	struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
368
	struct percpu_counter *dbc = &sbi->s_dirtyblocks_counter;
369

370 371
	free_blocks  = percpu_counter_read_positive(fbc);
	dirty_blocks = percpu_counter_read_positive(dbc);
372
	root_blocks = ext4_r_blocks_count(sbi->s_es);
373

374 375
	if (free_blocks - (nblocks + root_blocks + dirty_blocks) <
						EXT4_FREEBLOCKS_WATERMARK) {
376 377
		free_blocks  = percpu_counter_sum_positive(fbc);
		dirty_blocks = percpu_counter_sum_positive(dbc);
378 379
	}
	/* Check whether we have space after
380
	 * accounting for current dirty blocks & root reserved blocks.
381
	 */
382 383
	if (free_blocks >= ((root_blocks + nblocks) + dirty_blocks))
		return 1;
384

385
	/* Hm, nope.  Are (enough) root reserved blocks available? */
386
	if (sbi->s_resuid == current_fsuid() ||
387
	    ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
388 389 390
	    capable(CAP_SYS_RESOURCE) ||
		(flags & EXT4_MB_USE_ROOT_BLOCKS)) {

391 392 393 394 395
		if (free_blocks >= (nblocks + dirty_blocks))
			return 1;
	}

	return 0;
396 397
}

398
int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
399
			   s64 nblocks, unsigned int flags)
400
{
401
	if (ext4_has_free_blocks(sbi, nblocks, flags)) {
402
		percpu_counter_add(&sbi->s_dirtyblocks_counter, nblocks);
403
		return 0;
404 405
	} else
		return -ENOSPC;
406
}
407

408
/**
409
 * ext4_should_retry_alloc()
410 411 412
 * @sb:			super block
 * @retries		number of attemps has been made
 *
413
 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
414
 * it is profitable to retry the operation, this function will wait
L
Lucas De Marchi 已提交
415
 * for the current or committing transaction to complete, and then
416 417 418 419
 * return TRUE.
 *
 * if the total number of retries exceed three times, return FALSE.
 */
420
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
421
{
422
	if (!ext4_has_free_blocks(EXT4_SB(sb), 1, 0) ||
423 424
	    (*retries)++ > 3 ||
	    !EXT4_SB(sb)->s_journal)
425 426 427 428
		return 0;

	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);

429
	return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
430 431
}

A
Aneesh Kumar K.V 已提交
432
/*
433
 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
A
Aneesh Kumar K.V 已提交
434 435 436 437
 *
 * @handle:             handle to this transaction
 * @inode:              file inode
 * @goal:               given target block(filesystem wide)
T
Theodore Ts'o 已提交
438
 * @count:		pointer to total number of blocks needed
A
Aneesh Kumar K.V 已提交
439 440
 * @errp:               error code
 *
T
Theodore Ts'o 已提交
441
 * Return 1st allocated block number on success, *count stores total account
442
 * error stores in errp pointer
A
Aneesh Kumar K.V 已提交
443
 */
444
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
445 446
				  ext4_fsblk_t goal, unsigned int flags,
				  unsigned long *count, int *errp)
A
Aneesh Kumar K.V 已提交
447
{
T
Theodore Ts'o 已提交
448
	struct ext4_allocation_request ar;
449
	ext4_fsblk_t ret;
T
Theodore Ts'o 已提交
450 451 452 453 454 455

	memset(&ar, 0, sizeof(ar));
	/* Fill with neighbour allocated blocks */
	ar.inode = inode;
	ar.goal = goal;
	ar.len = count ? *count : 1;
456
	ar.flags = flags;
T
Theodore Ts'o 已提交
457 458 459 460

	ret = ext4_mb_new_blocks(handle, &ar, errp);
	if (count)
		*count = ar.len;
461
	/*
462 463
	 * Account for the allocated meta blocks.  We will never
	 * fail EDQUOT for metdata, but we do account for it.
464
	 */
465 466
	if (!(*errp) &&
	    ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
467
		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
T
Theodore Ts'o 已提交
468
		EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
469
		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
470
		dquot_alloc_block_nofail(inode, ar.len);
471 472
	}
	return ret;
A
Aneesh Kumar K.V 已提交
473 474
}

475
/**
476
 * ext4_count_free_blocks() -- count filesystem free blocks
477 478 479 480
 * @sb:		superblock
 *
 * Adds up the number of free blocks from each block group.
 */
481
ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
482
{
483 484
	ext4_fsblk_t desc_count;
	struct ext4_group_desc *gdp;
485
	ext4_group_t i;
486
	ext4_group_t ngroups = ext4_get_groups_count(sb);
487 488 489
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
	ext4_fsblk_t bitmap_count;
490
	unsigned int x;
491 492
	struct buffer_head *bitmap_bh = NULL;

493
	es = EXT4_SB(sb)->s_es;
494 495 496 497 498
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;

	for (i = 0; i < ngroups; i++) {
499
		gdp = ext4_get_group_desc(sb, i, NULL);
500 501
		if (!gdp)
			continue;
502
		desc_count += ext4_free_blks_count(sb, gdp);
503
		brelse(bitmap_bh);
504
		bitmap_bh = ext4_read_block_bitmap(sb, i);
505 506 507
		if (bitmap_bh == NULL)
			continue;

508
		x = ext4_count_free(bitmap_bh, sb->s_blocksize);
509 510
		printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
			i, ext4_free_blks_count(sb, gdp), x);
511 512 513
		bitmap_count += x;
	}
	brelse(bitmap_bh);
514 515 516
	printk(KERN_DEBUG "ext4_count_free_blocks: stored = %llu"
		", computed = %llu, %llu\n", ext4_free_blocks_count(es),
	       desc_count, bitmap_count);
517 518 519 520
	return bitmap_count;
#else
	desc_count = 0;
	for (i = 0; i < ngroups; i++) {
521
		gdp = ext4_get_group_desc(sb, i, NULL);
522 523
		if (!gdp)
			continue;
524
		desc_count += ext4_free_blks_count(sb, gdp);
525 526 527 528 529 530
	}

	return desc_count;
#endif
}

531
static inline int test_root(ext4_group_t a, int b)
532 533 534 535 536 537 538 539
{
	int num = b;

	while (a > num)
		num *= b;
	return num == a;
}

540
static int ext4_group_sparse(ext4_group_t group)
541 542 543 544 545 546 547 548 549 550
{
	if (group <= 1)
		return 1;
	if (!(group & 1))
		return 0;
	return (test_root(group, 7) || test_root(group, 5) ||
		test_root(group, 3));
}

/**
551
 *	ext4_bg_has_super - number of blocks used by the superblock in group
552 553 554 555 556 557
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the superblock (primary or backup)
 *	in this group.  Currently this will be only 0 or 1.
 */
558
int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
559
{
560 561 562
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
				EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
			!ext4_group_sparse(group))
563 564 565 566
		return 0;
	return 1;
}

567 568
static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
					ext4_group_t group)
569
{
570
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
571 572
	ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
	ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
573 574 575 576 577 578

	if (group == first || group == first + 1 || group == last)
		return 1;
	return 0;
}

579 580
static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
					ext4_group_t group)
581
{
582 583 584 585 586 587 588
	if (!ext4_bg_has_super(sb, group))
		return 0;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
		return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	else
		return EXT4_SB(sb)->s_gdb_count;
589 590 591
}

/**
592
 *	ext4_bg_num_gdb - number of blocks used by the group table in group
593 594 595 596 597 598 599
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the group descriptor table
 *	(primary or backup) in this group.  In the future there may be a
 *	different number of descriptor blocks in each group.
 */
600
unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
601 602
{
	unsigned long first_meta_bg =
603 604
			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
605

606
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
607
			metagroup < first_meta_bg)
608
		return ext4_bg_num_gdb_nometa(sb, group);
609

610
	return ext4_bg_num_gdb_meta(sb,group);
611 612

}
613

614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
/*
 * This function returns the number of file system metadata blocks at
 * the beginning of a block group, including the reserved gdt blocks.
 */
static unsigned int num_base_meta_blocks(struct super_block *sb,
					 ext4_group_t block_group)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	int num;

	/* Check for superblock and gdt backups in this group */
	num = ext4_bg_has_super(sb, block_group);

	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
	    block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
			  sbi->s_desc_per_block) {
		if (num) {
			num += ext4_bg_num_gdb(sb, block_group);
			num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
		}
	} else { /* For META_BG_BLOCK_GROUPS */
		num += ext4_bg_num_gdb(sb, block_group);
	}
	return num;
}
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
/**
 *	ext4_inode_to_goal_block - return a hint for block allocation
 *	@inode: inode for block allocation
 *
 *	Return the ideal location to start allocating blocks for a
 *	newly created inode.
 */
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
{
	struct ext4_inode_info *ei = EXT4_I(inode);
	ext4_group_t block_group;
	ext4_grpblk_t colour;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
	ext4_fsblk_t bg_start;
	ext4_fsblk_t last_block;

	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		/*
		 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
		 * block groups per flexgroup, reserve the first block
		 * group for directories and special files.  Regular
		 * files will start at the second block group.  This
		 * tends to speed up directory access and improves
		 * fsck times.
		 */
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
	return bg_start + colour;
}