balloc.c 21.1 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/balloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
20 21
#include "ext4.h"
#include "ext4_jbd2.h"
22
#include "mballoc.h"
23

24 25
#include <trace/events/ext4.h>

26 27 28 29
/*
 * balloc.c contains the blocks allocation and deallocation routines
 */

30 31 32 33
/*
 * Calculate the block group number and offset, given a block number
 */
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
34
		ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
35
{
D
Dave Kleikamp 已提交
36
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
37 38
	ext4_grpblk_t offset;

D
Dave Kleikamp 已提交
39
	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
A
Andrew Morton 已提交
40
	offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
41 42 43
	if (offsetp)
		*offsetp = offset;
	if (blockgrpp)
D
Dave Kleikamp 已提交
44
		*blockgrpp = blocknr;
45 46 47

}

48 49 50 51
static int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block,
			ext4_group_t block_group)
{
	ext4_group_t actual_group;
A
Aneesh Kumar K.V 已提交
52
	ext4_get_group_no_and_offset(sb, block, &actual_group, NULL);
53 54 55 56 57
	if (actual_group == block_group)
		return 1;
	return 0;
}

58 59 60 61 62 63
/* Return the number of clusters used for file system metadata; this
 * represents the overhead needed by the file system.
 */
unsigned ext4_num_overhead_clusters(struct super_block *sb,
				    ext4_group_t block_group,
				    struct ext4_group_desc *gdp)
64
{
65 66 67 68
	unsigned num_clusters;
	int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
	ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
	ext4_fsblk_t itbl_blk;
69 70
	struct ext4_sb_info *sbi = EXT4_SB(sb);

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
	/* This is the number of clusters used by the superblock,
	 * block group descriptors, and reserved block group
	 * descriptor blocks */
	num_clusters = ext4_num_base_meta_clusters(sb, block_group);

	/*
	 * For the allocation bitmaps and inode table, we first need
	 * to check to see if the block is in the block group.  If it
	 * is, then check to see if the cluster is already accounted
	 * for in the clusters used for the base metadata cluster, or
	 * if we can increment the base metadata cluster to include
	 * that block.  Otherwise, we will have to track the cluster
	 * used for the allocation bitmap or inode table explicitly.
	 * Normally all of these blocks are contiguous, so the special
	 * case handling shouldn't be necessary except for *very*
	 * unusual file system layouts.
	 */
	if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
		block_cluster = EXT4_B2C(sbi, (start -
					       ext4_block_bitmap(sb, gdp)));
		if (block_cluster < num_clusters)
			block_cluster = -1;
		else if (block_cluster == num_clusters) {
			num_clusters++;
			block_cluster = -1;
		}
	}

	if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
		inode_cluster = EXT4_B2C(sbi,
					 start - ext4_inode_bitmap(sb, gdp));
		if (inode_cluster < num_clusters)
			inode_cluster = -1;
		else if (inode_cluster == num_clusters) {
			num_clusters++;
			inode_cluster = -1;
		}
	}

	itbl_blk = ext4_inode_table(sb, gdp);
	for (i = 0; i < sbi->s_itb_per_group; i++) {
		if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
			c = EXT4_B2C(sbi, start - itbl_blk + i);
			if ((c < num_clusters) || (c == inode_cluster) ||
			    (c == block_cluster) || (c == itbl_cluster))
				continue;
			if (c == num_clusters) {
				num_clusters++;
				continue;
			}
			num_clusters++;
			itbl_cluster = c;
123 124
		}
	}
125 126 127 128 129 130 131

	if (block_cluster != -1)
		num_clusters++;
	if (inode_cluster != -1)
		num_clusters++;

	return num_clusters;
132
}
133

134 135
static unsigned int num_clusters_in_group(struct super_block *sb,
					  ext4_group_t block_group)
136
{
137 138
	unsigned int blocks;

139 140 141 142 143 144 145
	if (block_group == ext4_get_groups_count(sb) - 1) {
		/*
		 * Even though mke2fs always initializes the first and
		 * last group, just in case some other tool was used,
		 * we need to make sure we calculate the right free
		 * blocks.
		 */
146
		blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
147 148
			ext4_group_first_block_no(sb, block_group);
	} else
149 150
		blocks = EXT4_BLOCKS_PER_GROUP(sb);
	return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
151 152
}

153 154 155 156
/* Initializes an uninitialized block bitmap */
void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
			    ext4_group_t block_group,
			    struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
157
{
158
	unsigned int bit, bit_max;
A
Andreas Dilger 已提交
159
	struct ext4_sb_info *sbi = EXT4_SB(sb);
160 161 162 163 164 165 166 167 168 169 170 171 172 173
	ext4_fsblk_t start, tmp;
	int flex_bg = 0;

	J_ASSERT_BH(bh, buffer_locked(bh));

	/* If checksum is bad mark all blocks used to prevent allocation
	 * essentially implementing a per-group read-only flag. */
	if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
		ext4_error(sb, "Checksum bad for group %u", block_group);
		ext4_free_blks_set(sb, gdp, 0);
		ext4_free_inodes_set(sb, gdp, 0);
		ext4_itable_unused_set(sb, gdp, 0);
		memset(bh->b_data, 0xff, sb->s_blocksize);
		return;
A
Andreas Dilger 已提交
174
	}
175
	memset(bh->b_data, 0, sb->s_blocksize);
A
Andreas Dilger 已提交
176

177
	bit_max = ext4_num_base_meta_clusters(sb, block_group);
178 179
	for (bit = 0; bit < bit_max; bit++)
		ext4_set_bit(bit, bh->b_data);
180

181
	start = ext4_group_first_block_no(sb, block_group);
A
Andreas Dilger 已提交
182

183 184
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
		flex_bg = 1;
A
Andreas Dilger 已提交
185

186 187 188
	/* Set bits for block and inode bitmaps, and inode table */
	tmp = ext4_block_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
189
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
A
Andreas Dilger 已提交
190

191 192
	tmp = ext4_inode_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
193
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
194

195 196 197
	tmp = ext4_inode_table(sb, gdp);
	for (; tmp < ext4_inode_table(sb, gdp) +
		     sbi->s_itb_per_group; tmp++) {
198
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
199
			ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
A
Andreas Dilger 已提交
200
	}
201

202 203 204 205 206
	/*
	 * Also if the number of blocks within the group is less than
	 * the blocksize * 8 ( which is the size of bitmap ), set rest
	 * of the block bitmap to 1
	 */
207
	ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
208
			     sb->s_blocksize * 8, bh->b_data);
A
Andreas Dilger 已提交
209 210
}

211 212 213 214 215 216 217
/* Return the number of free blocks in a block group.  It is used when
 * the block bitmap is uninitialized, so we can't just count the bits
 * in the bitmap. */
unsigned ext4_free_blocks_after_init(struct super_block *sb,
				     ext4_group_t block_group,
				     struct ext4_group_desc *gdp)
{
218 219
	return num_clusters_in_group(sb, block_group) - 
		ext4_num_overhead_clusters(sb, block_group, gdp);
220
}
A
Andreas Dilger 已提交
221

222 223 224 225 226 227 228 229
/*
 * The free blocks are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.  The descriptors are loaded in memory
230
 * when a file system is mounted (see ext4_fill_super).
231 232 233
 */

/**
234
 * ext4_get_group_desc() -- load group descriptor from disk
235 236 237 238 239
 * @sb:			super block
 * @block_group:	given block group
 * @bh:			pointer to the buffer head to store the block
 *			group descriptor
 */
240
struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
241
					     ext4_group_t block_group,
242
					     struct buffer_head **bh)
243
{
244 245
	unsigned int group_desc;
	unsigned int offset;
246
	ext4_group_t ngroups = ext4_get_groups_count(sb);
247
	struct ext4_group_desc *desc;
248
	struct ext4_sb_info *sbi = EXT4_SB(sb);
249

250
	if (block_group >= ngroups) {
251 252
		ext4_error(sb, "block_group >= groups_count - block_group = %u,"
			   " groups_count = %u", block_group, ngroups);
253 254 255 256

		return NULL;
	}

257 258
	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
259
	if (!sbi->s_group_desc[group_desc]) {
260
		ext4_error(sb, "Group descriptor not loaded - "
261
			   "block_group = %u, group_desc = %u, desc = %u",
262
			   block_group, group_desc, offset);
263 264 265
		return NULL;
	}

266 267 268
	desc = (struct ext4_group_desc *)(
		(__u8 *)sbi->s_group_desc[group_desc]->b_data +
		offset * EXT4_DESC_SIZE(sb));
269 270
	if (bh)
		*bh = sbi->s_group_desc[group_desc];
271
	return desc;
272 273
}

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
static int ext4_valid_block_bitmap(struct super_block *sb,
					struct ext4_group_desc *desc,
					unsigned int block_group,
					struct buffer_head *bh)
{
	ext4_grpblk_t offset;
	ext4_grpblk_t next_zero_bit;
	ext4_fsblk_t bitmap_blk;
	ext4_fsblk_t group_first_block;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		/* with FLEX_BG, the inode/block bitmaps and itable
		 * blocks may not be in the group at all
		 * so the bitmap validation will be skipped for those groups
		 * or it has to also read the block group where the bitmaps
		 * are located to verify they are set.
		 */
		return 1;
	}
	group_first_block = ext4_group_first_block_no(sb, block_group);

	/* check whether block bitmap block number is set */
	bitmap_blk = ext4_block_bitmap(sb, desc);
	offset = bitmap_blk - group_first_block;
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
		goto err_out;

	/* check whether the inode bitmap block number is set */
	bitmap_blk = ext4_inode_bitmap(sb, desc);
	offset = bitmap_blk - group_first_block;
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
		goto err_out;

	/* check whether the inode table block number is set */
	bitmap_blk = ext4_inode_table(sb, desc);
	offset = bitmap_blk - group_first_block;
	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
				offset + EXT4_SB(sb)->s_itb_per_group,
				offset);
	if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group)
		/* good bitmap for inode tables */
		return 1;

err_out:
320
	ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu",
321 322 323
			block_group, bitmap_blk);
	return 0;
}
324
/**
325
 * ext4_read_block_bitmap()
326 327 328
 * @sb:			super block
 * @block_group:	given block group
 *
329 330
 * Read the bitmap for a given block_group,and validate the
 * bits for block/inode/inode tables are set in the bitmaps
331 332 333
 *
 * Return buffer_head on success or NULL in case of failure.
 */
A
Andreas Dilger 已提交
334
struct buffer_head *
335
ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
336
{
337 338
	struct ext4_group_desc *desc;
	struct buffer_head *bh = NULL;
339
	ext4_fsblk_t bitmap_blk;
340

A
Andreas Dilger 已提交
341
	desc = ext4_get_group_desc(sb, block_group, NULL);
342
	if (!desc)
343 344
		return NULL;
	bitmap_blk = ext4_block_bitmap(sb, desc);
345 346
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
347
		ext4_error(sb, "Cannot read block bitmap - "
348
			    "block_group = %u, block_bitmap = %llu",
349
			    block_group, bitmap_blk);
350 351
		return NULL;
	}
352 353

	if (bitmap_uptodate(bh))
354 355
		return bh;

356
	lock_buffer(bh);
357 358 359 360
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
		return bh;
	}
361
	ext4_lock_group(sb, block_group);
A
Andreas Dilger 已提交
362
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
363
		ext4_init_block_bitmap(sb, bh, block_group, desc);
364
		set_bitmap_uptodate(bh);
365
		set_buffer_uptodate(bh);
366
		ext4_unlock_group(sb, block_group);
A
Aneesh Kumar K.V 已提交
367
		unlock_buffer(bh);
368
		return bh;
A
Andreas Dilger 已提交
369
	}
370
	ext4_unlock_group(sb, block_group);
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
		return bh;
	}
	/*
	 * submit the buffer_head for read. We can
	 * safely mark the bitmap as uptodate now.
	 * We do it here so the bitmap uptodate bit
	 * get set with buffer lock held.
	 */
386
	trace_ext4_read_block_bitmap_load(sb, block_group);
387
	set_bitmap_uptodate(bh);
388 389
	if (bh_submit_read(bh) < 0) {
		put_bh(bh);
390
		ext4_error(sb, "Cannot read block bitmap - "
391
			    "block_group = %u, block_bitmap = %llu",
392
			    block_group, bitmap_blk);
393 394
		return NULL;
	}
395 396 397 398 399
	ext4_valid_block_bitmap(sb, desc, block_group, bh);
	/*
	 * file system mounted not to panic on error,
	 * continue with corrupt bitmap
	 */
400 401 402
	return bh;
}

403 404 405 406 407 408 409 410
/**
 * ext4_has_free_blocks()
 * @sbi:	in-core super block structure.
 * @nblocks:	number of needed blocks
 *
 * Check if filesystem has nblocks free & available for allocation.
 * On success return 1, return 0 on failure.
 */
411 412
static int ext4_has_free_blocks(struct ext4_sb_info *sbi,
				s64 nblocks, unsigned int flags)
413
{
414
	s64 free_blocks, dirty_blocks, root_blocks;
415
	struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
416
	struct percpu_counter *dbc = &sbi->s_dirtyblocks_counter;
417

418 419
	free_blocks  = percpu_counter_read_positive(fbc);
	dirty_blocks = percpu_counter_read_positive(dbc);
420
	root_blocks = ext4_r_blocks_count(sbi->s_es);
421

422 423
	if (free_blocks - (nblocks + root_blocks + dirty_blocks) <
						EXT4_FREEBLOCKS_WATERMARK) {
424 425
		free_blocks  = percpu_counter_sum_positive(fbc);
		dirty_blocks = percpu_counter_sum_positive(dbc);
426 427
	}
	/* Check whether we have space after
428
	 * accounting for current dirty blocks & root reserved blocks.
429
	 */
430 431
	if (free_blocks >= ((root_blocks + nblocks) + dirty_blocks))
		return 1;
432

433
	/* Hm, nope.  Are (enough) root reserved blocks available? */
434
	if (sbi->s_resuid == current_fsuid() ||
435
	    ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
436 437 438
	    capable(CAP_SYS_RESOURCE) ||
		(flags & EXT4_MB_USE_ROOT_BLOCKS)) {

439 440 441 442 443
		if (free_blocks >= (nblocks + dirty_blocks))
			return 1;
	}

	return 0;
444 445
}

446
int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
447
			   s64 nblocks, unsigned int flags)
448
{
449
	if (ext4_has_free_blocks(sbi, nblocks, flags)) {
450
		percpu_counter_add(&sbi->s_dirtyblocks_counter, nblocks);
451
		return 0;
452 453
	} else
		return -ENOSPC;
454
}
455

456
/**
457
 * ext4_should_retry_alloc()
458 459 460
 * @sb:			super block
 * @retries		number of attemps has been made
 *
461
 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
462
 * it is profitable to retry the operation, this function will wait
L
Lucas De Marchi 已提交
463
 * for the current or committing transaction to complete, and then
464 465 466 467
 * return TRUE.
 *
 * if the total number of retries exceed three times, return FALSE.
 */
468
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
469
{
470
	if (!ext4_has_free_blocks(EXT4_SB(sb), 1, 0) ||
471 472
	    (*retries)++ > 3 ||
	    !EXT4_SB(sb)->s_journal)
473 474 475 476
		return 0;

	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);

477
	return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
478 479
}

A
Aneesh Kumar K.V 已提交
480
/*
481
 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
A
Aneesh Kumar K.V 已提交
482 483 484 485
 *
 * @handle:             handle to this transaction
 * @inode:              file inode
 * @goal:               given target block(filesystem wide)
T
Theodore Ts'o 已提交
486
 * @count:		pointer to total number of blocks needed
A
Aneesh Kumar K.V 已提交
487 488
 * @errp:               error code
 *
T
Theodore Ts'o 已提交
489
 * Return 1st allocated block number on success, *count stores total account
490
 * error stores in errp pointer
A
Aneesh Kumar K.V 已提交
491
 */
492
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
493 494
				  ext4_fsblk_t goal, unsigned int flags,
				  unsigned long *count, int *errp)
A
Aneesh Kumar K.V 已提交
495
{
T
Theodore Ts'o 已提交
496
	struct ext4_allocation_request ar;
497
	ext4_fsblk_t ret;
T
Theodore Ts'o 已提交
498 499 500 501 502 503

	memset(&ar, 0, sizeof(ar));
	/* Fill with neighbour allocated blocks */
	ar.inode = inode;
	ar.goal = goal;
	ar.len = count ? *count : 1;
504
	ar.flags = flags;
T
Theodore Ts'o 已提交
505 506 507 508

	ret = ext4_mb_new_blocks(handle, &ar, errp);
	if (count)
		*count = ar.len;
509
	/*
510 511
	 * Account for the allocated meta blocks.  We will never
	 * fail EDQUOT for metdata, but we do account for it.
512
	 */
513 514
	if (!(*errp) &&
	    ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
515
		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
T
Theodore Ts'o 已提交
516
		EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
517
		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
518
		dquot_alloc_block_nofail(inode, ar.len);
519 520
	}
	return ret;
A
Aneesh Kumar K.V 已提交
521 522
}

523
/**
524
 * ext4_count_free_blocks() -- count filesystem free blocks
525 526 527 528
 * @sb:		superblock
 *
 * Adds up the number of free blocks from each block group.
 */
529
ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
530
{
531 532
	ext4_fsblk_t desc_count;
	struct ext4_group_desc *gdp;
533
	ext4_group_t i;
534
	ext4_group_t ngroups = ext4_get_groups_count(sb);
535 536 537
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
	ext4_fsblk_t bitmap_count;
538
	unsigned int x;
539 540
	struct buffer_head *bitmap_bh = NULL;

541
	es = EXT4_SB(sb)->s_es;
542 543 544 545 546
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;

	for (i = 0; i < ngroups; i++) {
547
		gdp = ext4_get_group_desc(sb, i, NULL);
548 549
		if (!gdp)
			continue;
550
		desc_count += ext4_free_blks_count(sb, gdp);
551
		brelse(bitmap_bh);
552
		bitmap_bh = ext4_read_block_bitmap(sb, i);
553 554 555
		if (bitmap_bh == NULL)
			continue;

556
		x = ext4_count_free(bitmap_bh, sb->s_blocksize);
557 558
		printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
			i, ext4_free_blks_count(sb, gdp), x);
559 560 561
		bitmap_count += x;
	}
	brelse(bitmap_bh);
562 563 564
	printk(KERN_DEBUG "ext4_count_free_blocks: stored = %llu"
		", computed = %llu, %llu\n", ext4_free_blocks_count(es),
	       desc_count, bitmap_count);
565 566 567 568
	return bitmap_count;
#else
	desc_count = 0;
	for (i = 0; i < ngroups; i++) {
569
		gdp = ext4_get_group_desc(sb, i, NULL);
570 571
		if (!gdp)
			continue;
572
		desc_count += ext4_free_blks_count(sb, gdp);
573 574 575 576 577 578
	}

	return desc_count;
#endif
}

579
static inline int test_root(ext4_group_t a, int b)
580 581 582 583 584 585 586 587
{
	int num = b;

	while (a > num)
		num *= b;
	return num == a;
}

588
static int ext4_group_sparse(ext4_group_t group)
589 590 591 592 593 594 595 596 597 598
{
	if (group <= 1)
		return 1;
	if (!(group & 1))
		return 0;
	return (test_root(group, 7) || test_root(group, 5) ||
		test_root(group, 3));
}

/**
599
 *	ext4_bg_has_super - number of blocks used by the superblock in group
600 601 602 603 604 605
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the superblock (primary or backup)
 *	in this group.  Currently this will be only 0 or 1.
 */
606
int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
607
{
608 609 610
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
				EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
			!ext4_group_sparse(group))
611 612 613 614
		return 0;
	return 1;
}

615 616
static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
					ext4_group_t group)
617
{
618
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
619 620
	ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
	ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
621 622 623 624 625 626

	if (group == first || group == first + 1 || group == last)
		return 1;
	return 0;
}

627 628
static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
					ext4_group_t group)
629
{
630 631 632 633 634 635 636
	if (!ext4_bg_has_super(sb, group))
		return 0;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
		return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	else
		return EXT4_SB(sb)->s_gdb_count;
637 638 639
}

/**
640
 *	ext4_bg_num_gdb - number of blocks used by the group table in group
641 642 643 644 645 646 647
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the group descriptor table
 *	(primary or backup) in this group.  In the future there may be a
 *	different number of descriptor blocks in each group.
 */
648
unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
649 650
{
	unsigned long first_meta_bg =
651 652
			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
653

654
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
655
			metagroup < first_meta_bg)
656
		return ext4_bg_num_gdb_nometa(sb, group);
657

658
	return ext4_bg_num_gdb_meta(sb,group);
659 660

}
661

662
/*
663
 * This function returns the number of file system metadata clusters at
664 665
 * the beginning of a block group, including the reserved gdt blocks.
 */
666 667
unsigned ext4_num_base_meta_clusters(struct super_block *sb,
				     ext4_group_t block_group)
668 669
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
670
	unsigned num;
671 672 673 674 675 676 677 678 679 680 681 682 683 684

	/* Check for superblock and gdt backups in this group */
	num = ext4_bg_has_super(sb, block_group);

	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
	    block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
			  sbi->s_desc_per_block) {
		if (num) {
			num += ext4_bg_num_gdb(sb, block_group);
			num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
		}
	} else { /* For META_BG_BLOCK_GROUPS */
		num += ext4_bg_num_gdb(sb, block_group);
	}
685
	return EXT4_NUM_B2C(sbi, num);
686
}
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
/**
 *	ext4_inode_to_goal_block - return a hint for block allocation
 *	@inode: inode for block allocation
 *
 *	Return the ideal location to start allocating blocks for a
 *	newly created inode.
 */
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
{
	struct ext4_inode_info *ei = EXT4_I(inode);
	ext4_group_t block_group;
	ext4_grpblk_t colour;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
	ext4_fsblk_t bg_start;
	ext4_fsblk_t last_block;

	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		/*
		 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
		 * block groups per flexgroup, reserve the first block
		 * group for directories and special files.  Regular
		 * files will start at the second block group.  This
		 * tends to speed up directory access and improves
		 * fsck times.
		 */
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
	return bg_start + colour;
}