balloc.c 17.2 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/balloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
20 21
#include "ext4.h"
#include "ext4_jbd2.h"
22
#include "mballoc.h"
23

24 25
#include <trace/events/ext4.h>

26 27 28 29
/*
 * balloc.c contains the blocks allocation and deallocation routines
 */

30 31 32 33
/*
 * Calculate the block group number and offset, given a block number
 */
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
34
		ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
35
{
D
Dave Kleikamp 已提交
36
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
37 38
	ext4_grpblk_t offset;

D
Dave Kleikamp 已提交
39
	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
A
Andrew Morton 已提交
40
	offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
41 42 43
	if (offsetp)
		*offsetp = offset;
	if (blockgrpp)
D
Dave Kleikamp 已提交
44
		*blockgrpp = blocknr;
45 46 47

}

48 49 50 51
static int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block,
			ext4_group_t block_group)
{
	ext4_group_t actual_group;
A
Aneesh Kumar K.V 已提交
52
	ext4_get_group_no_and_offset(sb, block, &actual_group, NULL);
53 54 55 56 57 58
	if (actual_group == block_group)
		return 1;
	return 0;
}

static int ext4_group_used_meta_blocks(struct super_block *sb,
59 60
				       ext4_group_t block_group,
				       struct ext4_group_desc *gdp)
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
{
	ext4_fsblk_t tmp;
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	/* block bitmap, inode bitmap, and inode table blocks */
	int used_blocks = sbi->s_itb_per_group + 2;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		if (!ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp),
					block_group))
			used_blocks--;

		if (!ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp),
					block_group))
			used_blocks--;

		tmp = ext4_inode_table(sb, gdp);
		for (; tmp < ext4_inode_table(sb, gdp) +
				sbi->s_itb_per_group; tmp++) {
			if (!ext4_block_in_group(sb, tmp, block_group))
				used_blocks -= 1;
		}
	}
	return used_blocks;
}
85

A
Andreas Dilger 已提交
86 87 88
/* Initializes an uninitialized block bitmap if given, and returns the
 * number of blocks free in the group. */
unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
89
		 ext4_group_t block_group, struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
90 91
{
	int bit, bit_max;
92
	ext4_group_t ngroups = ext4_get_groups_count(sb);
A
Andreas Dilger 已提交
93 94 95 96 97 98 99 100 101
	unsigned free_blocks, group_blocks;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

	if (bh) {
		J_ASSERT_BH(bh, buffer_locked(bh));

		/* If checksum is bad mark all blocks used to prevent allocation
		 * essentially implementing a per-group read-only flag. */
		if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
102 103
			ext4_error(sb, "Checksum bad for group %u",
					block_group);
104 105 106
			ext4_free_blks_set(sb, gdp, 0);
			ext4_free_inodes_set(sb, gdp, 0);
			ext4_itable_unused_set(sb, gdp, 0);
A
Andreas Dilger 已提交
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
			memset(bh->b_data, 0xff, sb->s_blocksize);
			return 0;
		}
		memset(bh->b_data, 0, sb->s_blocksize);
	}

	/* Check for superblock and gdt backups in this group */
	bit_max = ext4_bg_has_super(sb, block_group);

	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
	    block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
			  sbi->s_desc_per_block) {
		if (bit_max) {
			bit_max += ext4_bg_num_gdb(sb, block_group);
			bit_max +=
				le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
		}
	} else { /* For META_BG_BLOCK_GROUPS */
125
		bit_max += ext4_bg_num_gdb(sb, block_group);
A
Andreas Dilger 已提交
126 127
	}

128
	if (block_group == ngroups - 1) {
A
Andreas Dilger 已提交
129 130 131 132 133 134
		/*
		 * Even though mke2fs always initialize first and last group
		 * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need
		 * to make sure we calculate the right free blocks
		 */
		group_blocks = ext4_blocks_count(sbi->s_es) -
135
			ext4_group_first_block_no(sb, ngroups - 1);
A
Andreas Dilger 已提交
136 137 138 139 140 141 142
	} else {
		group_blocks = EXT4_BLOCKS_PER_GROUP(sb);
	}

	free_blocks = group_blocks - bit_max;

	if (bh) {
143 144
		ext4_fsblk_t start, tmp;
		int flex_bg = 0;
145

A
Andreas Dilger 已提交
146 147 148
		for (bit = 0; bit < bit_max; bit++)
			ext4_set_bit(bit, bh->b_data);

149
		start = ext4_group_first_block_no(sb, block_group);
A
Andreas Dilger 已提交
150

151 152 153
		if (EXT4_HAS_INCOMPAT_FEATURE(sb,
					      EXT4_FEATURE_INCOMPAT_FLEX_BG))
			flex_bg = 1;
A
Andreas Dilger 已提交
154

155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
		/* Set bits for block and inode bitmaps, and inode table */
		tmp = ext4_block_bitmap(sb, gdp);
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
			ext4_set_bit(tmp - start, bh->b_data);

		tmp = ext4_inode_bitmap(sb, gdp);
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
			ext4_set_bit(tmp - start, bh->b_data);

		tmp = ext4_inode_table(sb, gdp);
		for (; tmp < ext4_inode_table(sb, gdp) +
				sbi->s_itb_per_group; tmp++) {
			if (!flex_bg ||
				ext4_block_in_group(sb, tmp, block_group))
				ext4_set_bit(tmp - start, bh->b_data);
		}
A
Andreas Dilger 已提交
171 172 173 174 175
		/*
		 * Also if the number of blocks within the group is
		 * less than the blocksize * 8 ( which is the size
		 * of bitmap ), set rest of the block bitmap to 1
		 */
176 177
		ext4_mark_bitmap_end(group_blocks, sb->s_blocksize * 8,
				     bh->b_data);
A
Andreas Dilger 已提交
178
	}
179
	return free_blocks - ext4_group_used_meta_blocks(sb, block_group, gdp);
A
Andreas Dilger 已提交
180 181 182
}


183 184 185 186 187 188 189 190
/*
 * The free blocks are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.  The descriptors are loaded in memory
191
 * when a file system is mounted (see ext4_fill_super).
192 193 194
 */

/**
195
 * ext4_get_group_desc() -- load group descriptor from disk
196 197 198 199 200
 * @sb:			super block
 * @block_group:	given block group
 * @bh:			pointer to the buffer head to store the block
 *			group descriptor
 */
201
struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
202
					     ext4_group_t block_group,
203
					     struct buffer_head **bh)
204
{
205 206
	unsigned int group_desc;
	unsigned int offset;
207
	ext4_group_t ngroups = ext4_get_groups_count(sb);
208
	struct ext4_group_desc *desc;
209
	struct ext4_sb_info *sbi = EXT4_SB(sb);
210

211
	if (block_group >= ngroups) {
212 213
		ext4_error(sb, "block_group >= groups_count - block_group = %u,"
			   " groups_count = %u", block_group, ngroups);
214 215 216 217

		return NULL;
	}

218 219
	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
220
	if (!sbi->s_group_desc[group_desc]) {
221
		ext4_error(sb, "Group descriptor not loaded - "
222
			   "block_group = %u, group_desc = %u, desc = %u",
223
			   block_group, group_desc, offset);
224 225 226
		return NULL;
	}

227 228 229
	desc = (struct ext4_group_desc *)(
		(__u8 *)sbi->s_group_desc[group_desc]->b_data +
		offset * EXT4_DESC_SIZE(sb));
230 231
	if (bh)
		*bh = sbi->s_group_desc[group_desc];
232
	return desc;
233 234
}

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
static int ext4_valid_block_bitmap(struct super_block *sb,
					struct ext4_group_desc *desc,
					unsigned int block_group,
					struct buffer_head *bh)
{
	ext4_grpblk_t offset;
	ext4_grpblk_t next_zero_bit;
	ext4_fsblk_t bitmap_blk;
	ext4_fsblk_t group_first_block;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		/* with FLEX_BG, the inode/block bitmaps and itable
		 * blocks may not be in the group at all
		 * so the bitmap validation will be skipped for those groups
		 * or it has to also read the block group where the bitmaps
		 * are located to verify they are set.
		 */
		return 1;
	}
	group_first_block = ext4_group_first_block_no(sb, block_group);

	/* check whether block bitmap block number is set */
	bitmap_blk = ext4_block_bitmap(sb, desc);
	offset = bitmap_blk - group_first_block;
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
		goto err_out;

	/* check whether the inode bitmap block number is set */
	bitmap_blk = ext4_inode_bitmap(sb, desc);
	offset = bitmap_blk - group_first_block;
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
		goto err_out;

	/* check whether the inode table block number is set */
	bitmap_blk = ext4_inode_table(sb, desc);
	offset = bitmap_blk - group_first_block;
	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
				offset + EXT4_SB(sb)->s_itb_per_group,
				offset);
	if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group)
		/* good bitmap for inode tables */
		return 1;

err_out:
281
	ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu",
282 283 284
			block_group, bitmap_blk);
	return 0;
}
285
/**
286
 * ext4_read_block_bitmap()
287 288 289
 * @sb:			super block
 * @block_group:	given block group
 *
290 291
 * Read the bitmap for a given block_group,and validate the
 * bits for block/inode/inode tables are set in the bitmaps
292 293 294
 *
 * Return buffer_head on success or NULL in case of failure.
 */
A
Andreas Dilger 已提交
295
struct buffer_head *
296
ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
297
{
298 299
	struct ext4_group_desc *desc;
	struct buffer_head *bh = NULL;
300
	ext4_fsblk_t bitmap_blk;
301

A
Andreas Dilger 已提交
302
	desc = ext4_get_group_desc(sb, block_group, NULL);
303
	if (!desc)
304 305
		return NULL;
	bitmap_blk = ext4_block_bitmap(sb, desc);
306 307
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
308
		ext4_error(sb, "Cannot read block bitmap - "
309
			    "block_group = %u, block_bitmap = %llu",
310
			    block_group, bitmap_blk);
311 312
		return NULL;
	}
313 314

	if (bitmap_uptodate(bh))
315 316
		return bh;

317
	lock_buffer(bh);
318 319 320 321
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
		return bh;
	}
322
	ext4_lock_group(sb, block_group);
A
Andreas Dilger 已提交
323
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
324
		ext4_init_block_bitmap(sb, bh, block_group, desc);
325
		set_bitmap_uptodate(bh);
326
		set_buffer_uptodate(bh);
327
		ext4_unlock_group(sb, block_group);
A
Aneesh Kumar K.V 已提交
328
		unlock_buffer(bh);
329
		return bh;
A
Andreas Dilger 已提交
330
	}
331
	ext4_unlock_group(sb, block_group);
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
		return bh;
	}
	/*
	 * submit the buffer_head for read. We can
	 * safely mark the bitmap as uptodate now.
	 * We do it here so the bitmap uptodate bit
	 * get set with buffer lock held.
	 */
347
	trace_ext4_read_block_bitmap_load(sb, block_group);
348
	set_bitmap_uptodate(bh);
349 350
	if (bh_submit_read(bh) < 0) {
		put_bh(bh);
351
		ext4_error(sb, "Cannot read block bitmap - "
352
			    "block_group = %u, block_bitmap = %llu",
353
			    block_group, bitmap_blk);
354 355
		return NULL;
	}
356 357 358 359 360
	ext4_valid_block_bitmap(sb, desc, block_group, bh);
	/*
	 * file system mounted not to panic on error,
	 * continue with corrupt bitmap
	 */
361 362 363
	return bh;
}

364 365 366 367 368 369 370 371
/**
 * ext4_has_free_blocks()
 * @sbi:	in-core super block structure.
 * @nblocks:	number of needed blocks
 *
 * Check if filesystem has nblocks free & available for allocation.
 * On success return 1, return 0 on failure.
 */
372
static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
373
{
374
	s64 free_blocks, dirty_blocks, root_blocks;
375
	struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
376
	struct percpu_counter *dbc = &sbi->s_dirtyblocks_counter;
377

378 379
	free_blocks  = percpu_counter_read_positive(fbc);
	dirty_blocks = percpu_counter_read_positive(dbc);
380
	root_blocks = ext4_r_blocks_count(sbi->s_es);
381

382 383
	if (free_blocks - (nblocks + root_blocks + dirty_blocks) <
						EXT4_FREEBLOCKS_WATERMARK) {
384 385
		free_blocks  = percpu_counter_sum_positive(fbc);
		dirty_blocks = percpu_counter_sum_positive(dbc);
386 387
	}
	/* Check whether we have space after
388
	 * accounting for current dirty blocks & root reserved blocks.
389
	 */
390 391
	if (free_blocks >= ((root_blocks + nblocks) + dirty_blocks))
		return 1;
392

393
	/* Hm, nope.  Are (enough) root reserved blocks available? */
394
	if (sbi->s_resuid == current_fsuid() ||
395 396 397 398 399 400 401
	    ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
	    capable(CAP_SYS_RESOURCE)) {
		if (free_blocks >= (nblocks + dirty_blocks))
			return 1;
	}

	return 0;
402 403
}

404
int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
A
Aneesh Kumar K.V 已提交
405
						s64 nblocks)
406
{
407 408
	if (ext4_has_free_blocks(sbi, nblocks)) {
		percpu_counter_add(&sbi->s_dirtyblocks_counter, nblocks);
409
		return 0;
410 411
	} else
		return -ENOSPC;
412
}
413

414
/**
415
 * ext4_should_retry_alloc()
416 417 418
 * @sb:			super block
 * @retries		number of attemps has been made
 *
419
 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
420
 * it is profitable to retry the operation, this function will wait
L
Lucas De Marchi 已提交
421
 * for the current or committing transaction to complete, and then
422 423 424 425
 * return TRUE.
 *
 * if the total number of retries exceed three times, return FALSE.
 */
426
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
427
{
428 429 430
	if (!ext4_has_free_blocks(EXT4_SB(sb), 1) ||
	    (*retries)++ > 3 ||
	    !EXT4_SB(sb)->s_journal)
431 432 433 434
		return 0;

	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);

435
	return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
436 437
}

A
Aneesh Kumar K.V 已提交
438
/*
439
 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
A
Aneesh Kumar K.V 已提交
440 441 442 443
 *
 * @handle:             handle to this transaction
 * @inode:              file inode
 * @goal:               given target block(filesystem wide)
T
Theodore Ts'o 已提交
444
 * @count:		pointer to total number of blocks needed
A
Aneesh Kumar K.V 已提交
445 446
 * @errp:               error code
 *
T
Theodore Ts'o 已提交
447
 * Return 1st allocated block number on success, *count stores total account
448
 * error stores in errp pointer
A
Aneesh Kumar K.V 已提交
449
 */
450 451
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
		ext4_fsblk_t goal, unsigned long *count, int *errp)
A
Aneesh Kumar K.V 已提交
452
{
T
Theodore Ts'o 已提交
453
	struct ext4_allocation_request ar;
454
	ext4_fsblk_t ret;
T
Theodore Ts'o 已提交
455 456 457 458 459 460 461 462 463 464

	memset(&ar, 0, sizeof(ar));
	/* Fill with neighbour allocated blocks */
	ar.inode = inode;
	ar.goal = goal;
	ar.len = count ? *count : 1;

	ret = ext4_mb_new_blocks(handle, &ar, errp);
	if (count)
		*count = ar.len;
465
	/*
466 467
	 * Account for the allocated meta blocks.  We will never
	 * fail EDQUOT for metdata, but we do account for it.
468
	 */
469 470
	if (!(*errp) &&
	    ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
471
		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
T
Theodore Ts'o 已提交
472
		EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
473
		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
474
		dquot_alloc_block_nofail(inode, ar.len);
475 476
	}
	return ret;
A
Aneesh Kumar K.V 已提交
477 478
}

479
/**
480
 * ext4_count_free_blocks() -- count filesystem free blocks
481 482 483 484
 * @sb:		superblock
 *
 * Adds up the number of free blocks from each block group.
 */
485
ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
486
{
487 488
	ext4_fsblk_t desc_count;
	struct ext4_group_desc *gdp;
489
	ext4_group_t i;
490
	ext4_group_t ngroups = ext4_get_groups_count(sb);
491 492 493
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
	ext4_fsblk_t bitmap_count;
494
	unsigned int x;
495 496
	struct buffer_head *bitmap_bh = NULL;

497
	es = EXT4_SB(sb)->s_es;
498 499 500 501 502
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;

	for (i = 0; i < ngroups; i++) {
503
		gdp = ext4_get_group_desc(sb, i, NULL);
504 505
		if (!gdp)
			continue;
506
		desc_count += ext4_free_blks_count(sb, gdp);
507
		brelse(bitmap_bh);
508
		bitmap_bh = ext4_read_block_bitmap(sb, i);
509 510 511
		if (bitmap_bh == NULL)
			continue;

512
		x = ext4_count_free(bitmap_bh, sb->s_blocksize);
513 514
		printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
			i, ext4_free_blks_count(sb, gdp), x);
515 516 517
		bitmap_count += x;
	}
	brelse(bitmap_bh);
518 519 520
	printk(KERN_DEBUG "ext4_count_free_blocks: stored = %llu"
		", computed = %llu, %llu\n", ext4_free_blocks_count(es),
	       desc_count, bitmap_count);
521 522 523 524
	return bitmap_count;
#else
	desc_count = 0;
	for (i = 0; i < ngroups; i++) {
525
		gdp = ext4_get_group_desc(sb, i, NULL);
526 527
		if (!gdp)
			continue;
528
		desc_count += ext4_free_blks_count(sb, gdp);
529 530 531 532 533 534
	}

	return desc_count;
#endif
}

535
static inline int test_root(ext4_group_t a, int b)
536 537 538 539 540 541 542 543
{
	int num = b;

	while (a > num)
		num *= b;
	return num == a;
}

544
static int ext4_group_sparse(ext4_group_t group)
545 546 547 548 549 550 551 552 553 554
{
	if (group <= 1)
		return 1;
	if (!(group & 1))
		return 0;
	return (test_root(group, 7) || test_root(group, 5) ||
		test_root(group, 3));
}

/**
555
 *	ext4_bg_has_super - number of blocks used by the superblock in group
556 557 558 559 560 561
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the superblock (primary or backup)
 *	in this group.  Currently this will be only 0 or 1.
 */
562
int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
563
{
564 565 566
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
				EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
			!ext4_group_sparse(group))
567 568 569 570
		return 0;
	return 1;
}

571 572
static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
					ext4_group_t group)
573
{
574
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
575 576
	ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
	ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
577 578 579 580 581 582

	if (group == first || group == first + 1 || group == last)
		return 1;
	return 0;
}

583 584
static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
					ext4_group_t group)
585
{
586 587 588 589 590 591 592
	if (!ext4_bg_has_super(sb, group))
		return 0;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
		return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	else
		return EXT4_SB(sb)->s_gdb_count;
593 594 595
}

/**
596
 *	ext4_bg_num_gdb - number of blocks used by the group table in group
597 598 599 600 601 602 603
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the group descriptor table
 *	(primary or backup) in this group.  In the future there may be a
 *	different number of descriptor blocks in each group.
 */
604
unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
605 606
{
	unsigned long first_meta_bg =
607 608
			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
609

610
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
611
			metagroup < first_meta_bg)
612
		return ext4_bg_num_gdb_nometa(sb, group);
613

614
	return ext4_bg_num_gdb_meta(sb,group);
615 616

}
617