balloc.c 21.4 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/balloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
20 21
#include "ext4.h"
#include "ext4_jbd2.h"
22
#include "mballoc.h"
23

24 25
#include <trace/events/ext4.h>

26 27 28 29
/*
 * balloc.c contains the blocks allocation and deallocation routines
 */

30
/*
31 32
 * Calculate the block group number and offset into the block/cluster
 * allocation bitmap, given a block number
33 34
 */
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
35
		ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
36
{
D
Dave Kleikamp 已提交
37
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
38 39
	ext4_grpblk_t offset;

D
Dave Kleikamp 已提交
40
	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
41 42
	offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
		EXT4_SB(sb)->s_cluster_bits;
43 44 45
	if (offsetp)
		*offsetp = offset;
	if (blockgrpp)
D
Dave Kleikamp 已提交
46
		*blockgrpp = blocknr;
47 48 49

}

50 51 52 53
static int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block,
			ext4_group_t block_group)
{
	ext4_group_t actual_group;
A
Aneesh Kumar K.V 已提交
54
	ext4_get_group_no_and_offset(sb, block, &actual_group, NULL);
55 56 57 58 59
	if (actual_group == block_group)
		return 1;
	return 0;
}

60 61 62 63 64 65
/* Return the number of clusters used for file system metadata; this
 * represents the overhead needed by the file system.
 */
unsigned ext4_num_overhead_clusters(struct super_block *sb,
				    ext4_group_t block_group,
				    struct ext4_group_desc *gdp)
66
{
67 68 69 70
	unsigned num_clusters;
	int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
	ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
	ext4_fsblk_t itbl_blk;
71 72
	struct ext4_sb_info *sbi = EXT4_SB(sb);

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
	/* This is the number of clusters used by the superblock,
	 * block group descriptors, and reserved block group
	 * descriptor blocks */
	num_clusters = ext4_num_base_meta_clusters(sb, block_group);

	/*
	 * For the allocation bitmaps and inode table, we first need
	 * to check to see if the block is in the block group.  If it
	 * is, then check to see if the cluster is already accounted
	 * for in the clusters used for the base metadata cluster, or
	 * if we can increment the base metadata cluster to include
	 * that block.  Otherwise, we will have to track the cluster
	 * used for the allocation bitmap or inode table explicitly.
	 * Normally all of these blocks are contiguous, so the special
	 * case handling shouldn't be necessary except for *very*
	 * unusual file system layouts.
	 */
	if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
		block_cluster = EXT4_B2C(sbi, (start -
					       ext4_block_bitmap(sb, gdp)));
		if (block_cluster < num_clusters)
			block_cluster = -1;
		else if (block_cluster == num_clusters) {
			num_clusters++;
			block_cluster = -1;
		}
	}

	if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
		inode_cluster = EXT4_B2C(sbi,
					 start - ext4_inode_bitmap(sb, gdp));
		if (inode_cluster < num_clusters)
			inode_cluster = -1;
		else if (inode_cluster == num_clusters) {
			num_clusters++;
			inode_cluster = -1;
		}
	}

	itbl_blk = ext4_inode_table(sb, gdp);
	for (i = 0; i < sbi->s_itb_per_group; i++) {
		if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
			c = EXT4_B2C(sbi, start - itbl_blk + i);
			if ((c < num_clusters) || (c == inode_cluster) ||
			    (c == block_cluster) || (c == itbl_cluster))
				continue;
			if (c == num_clusters) {
				num_clusters++;
				continue;
			}
			num_clusters++;
			itbl_cluster = c;
125 126
		}
	}
127 128 129 130 131 132 133

	if (block_cluster != -1)
		num_clusters++;
	if (inode_cluster != -1)
		num_clusters++;

	return num_clusters;
134
}
135

136 137
static unsigned int num_clusters_in_group(struct super_block *sb,
					  ext4_group_t block_group)
138
{
139 140
	unsigned int blocks;

141 142 143 144 145 146 147
	if (block_group == ext4_get_groups_count(sb) - 1) {
		/*
		 * Even though mke2fs always initializes the first and
		 * last group, just in case some other tool was used,
		 * we need to make sure we calculate the right free
		 * blocks.
		 */
148
		blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
149 150
			ext4_group_first_block_no(sb, block_group);
	} else
151 152
		blocks = EXT4_BLOCKS_PER_GROUP(sb);
	return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
153 154
}

155 156 157 158
/* Initializes an uninitialized block bitmap */
void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
			    ext4_group_t block_group,
			    struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
159
{
160
	unsigned int bit, bit_max;
A
Andreas Dilger 已提交
161
	struct ext4_sb_info *sbi = EXT4_SB(sb);
162 163 164 165 166 167 168 169 170
	ext4_fsblk_t start, tmp;
	int flex_bg = 0;

	J_ASSERT_BH(bh, buffer_locked(bh));

	/* If checksum is bad mark all blocks used to prevent allocation
	 * essentially implementing a per-group read-only flag. */
	if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
		ext4_error(sb, "Checksum bad for group %u", block_group);
171
		ext4_free_group_clusters_set(sb, gdp, 0);
172 173 174 175
		ext4_free_inodes_set(sb, gdp, 0);
		ext4_itable_unused_set(sb, gdp, 0);
		memset(bh->b_data, 0xff, sb->s_blocksize);
		return;
A
Andreas Dilger 已提交
176
	}
177
	memset(bh->b_data, 0, sb->s_blocksize);
A
Andreas Dilger 已提交
178

179
	bit_max = ext4_num_base_meta_clusters(sb, block_group);
180 181
	for (bit = 0; bit < bit_max; bit++)
		ext4_set_bit(bit, bh->b_data);
182

183
	start = ext4_group_first_block_no(sb, block_group);
A
Andreas Dilger 已提交
184

185 186
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
		flex_bg = 1;
A
Andreas Dilger 已提交
187

188 189 190
	/* Set bits for block and inode bitmaps, and inode table */
	tmp = ext4_block_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
191
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
A
Andreas Dilger 已提交
192

193 194
	tmp = ext4_inode_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
195
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
196

197 198 199
	tmp = ext4_inode_table(sb, gdp);
	for (; tmp < ext4_inode_table(sb, gdp) +
		     sbi->s_itb_per_group; tmp++) {
200
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
201
			ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
A
Andreas Dilger 已提交
202
	}
203

204 205 206 207 208
	/*
	 * Also if the number of blocks within the group is less than
	 * the blocksize * 8 ( which is the size of bitmap ), set rest
	 * of the block bitmap to 1
	 */
209
	ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
210
			     sb->s_blocksize * 8, bh->b_data);
A
Andreas Dilger 已提交
211 212
}

213 214 215
/* Return the number of free blocks in a block group.  It is used when
 * the block bitmap is uninitialized, so we can't just count the bits
 * in the bitmap. */
216 217 218
unsigned ext4_free_clusters_after_init(struct super_block *sb,
				       ext4_group_t block_group,
				       struct ext4_group_desc *gdp)
219
{
220 221
	return num_clusters_in_group(sb, block_group) - 
		ext4_num_overhead_clusters(sb, block_group, gdp);
222
}
A
Andreas Dilger 已提交
223

224 225 226 227 228 229 230 231
/*
 * The free blocks are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.  The descriptors are loaded in memory
232
 * when a file system is mounted (see ext4_fill_super).
233 234 235
 */

/**
236
 * ext4_get_group_desc() -- load group descriptor from disk
237 238 239 240 241
 * @sb:			super block
 * @block_group:	given block group
 * @bh:			pointer to the buffer head to store the block
 *			group descriptor
 */
242
struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
243
					     ext4_group_t block_group,
244
					     struct buffer_head **bh)
245
{
246 247
	unsigned int group_desc;
	unsigned int offset;
248
	ext4_group_t ngroups = ext4_get_groups_count(sb);
249
	struct ext4_group_desc *desc;
250
	struct ext4_sb_info *sbi = EXT4_SB(sb);
251

252
	if (block_group >= ngroups) {
253 254
		ext4_error(sb, "block_group >= groups_count - block_group = %u,"
			   " groups_count = %u", block_group, ngroups);
255 256 257 258

		return NULL;
	}

259 260
	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
261
	if (!sbi->s_group_desc[group_desc]) {
262
		ext4_error(sb, "Group descriptor not loaded - "
263
			   "block_group = %u, group_desc = %u, desc = %u",
264
			   block_group, group_desc, offset);
265 266 267
		return NULL;
	}

268 269 270
	desc = (struct ext4_group_desc *)(
		(__u8 *)sbi->s_group_desc[group_desc]->b_data +
		offset * EXT4_DESC_SIZE(sb));
271 272
	if (bh)
		*bh = sbi->s_group_desc[group_desc];
273
	return desc;
274 275
}

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
static int ext4_valid_block_bitmap(struct super_block *sb,
					struct ext4_group_desc *desc,
					unsigned int block_group,
					struct buffer_head *bh)
{
	ext4_grpblk_t offset;
	ext4_grpblk_t next_zero_bit;
	ext4_fsblk_t bitmap_blk;
	ext4_fsblk_t group_first_block;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		/* with FLEX_BG, the inode/block bitmaps and itable
		 * blocks may not be in the group at all
		 * so the bitmap validation will be skipped for those groups
		 * or it has to also read the block group where the bitmaps
		 * are located to verify they are set.
		 */
		return 1;
	}
	group_first_block = ext4_group_first_block_no(sb, block_group);

	/* check whether block bitmap block number is set */
	bitmap_blk = ext4_block_bitmap(sb, desc);
	offset = bitmap_blk - group_first_block;
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
		goto err_out;

	/* check whether the inode bitmap block number is set */
	bitmap_blk = ext4_inode_bitmap(sb, desc);
	offset = bitmap_blk - group_first_block;
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
		goto err_out;

	/* check whether the inode table block number is set */
	bitmap_blk = ext4_inode_table(sb, desc);
	offset = bitmap_blk - group_first_block;
	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
				offset + EXT4_SB(sb)->s_itb_per_group,
				offset);
	if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group)
		/* good bitmap for inode tables */
		return 1;

err_out:
322
	ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu",
323 324 325
			block_group, bitmap_blk);
	return 0;
}
326
/**
327
 * ext4_read_block_bitmap()
328 329 330
 * @sb:			super block
 * @block_group:	given block group
 *
331 332
 * Read the bitmap for a given block_group,and validate the
 * bits for block/inode/inode tables are set in the bitmaps
333 334 335
 *
 * Return buffer_head on success or NULL in case of failure.
 */
A
Andreas Dilger 已提交
336
struct buffer_head *
337
ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
338
{
339 340
	struct ext4_group_desc *desc;
	struct buffer_head *bh = NULL;
341
	ext4_fsblk_t bitmap_blk;
342

A
Andreas Dilger 已提交
343
	desc = ext4_get_group_desc(sb, block_group, NULL);
344
	if (!desc)
345 346
		return NULL;
	bitmap_blk = ext4_block_bitmap(sb, desc);
347 348
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
349
		ext4_error(sb, "Cannot read block bitmap - "
350
			    "block_group = %u, block_bitmap = %llu",
351
			    block_group, bitmap_blk);
352 353
		return NULL;
	}
354 355

	if (bitmap_uptodate(bh))
356 357
		return bh;

358
	lock_buffer(bh);
359 360 361 362
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
		return bh;
	}
363
	ext4_lock_group(sb, block_group);
A
Andreas Dilger 已提交
364
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
365
		ext4_init_block_bitmap(sb, bh, block_group, desc);
366
		set_bitmap_uptodate(bh);
367
		set_buffer_uptodate(bh);
368
		ext4_unlock_group(sb, block_group);
A
Aneesh Kumar K.V 已提交
369
		unlock_buffer(bh);
370
		return bh;
A
Andreas Dilger 已提交
371
	}
372
	ext4_unlock_group(sb, block_group);
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
		return bh;
	}
	/*
	 * submit the buffer_head for read. We can
	 * safely mark the bitmap as uptodate now.
	 * We do it here so the bitmap uptodate bit
	 * get set with buffer lock held.
	 */
388
	trace_ext4_read_block_bitmap_load(sb, block_group);
389
	set_bitmap_uptodate(bh);
390 391
	if (bh_submit_read(bh) < 0) {
		put_bh(bh);
392
		ext4_error(sb, "Cannot read block bitmap - "
393
			    "block_group = %u, block_bitmap = %llu",
394
			    block_group, bitmap_blk);
395 396
		return NULL;
	}
397 398 399 400 401
	ext4_valid_block_bitmap(sb, desc, block_group, bh);
	/*
	 * file system mounted not to panic on error,
	 * continue with corrupt bitmap
	 */
402 403 404
	return bh;
}

405
/**
406
 * ext4_has_free_clusters()
407
 * @sbi:	in-core super block structure.
408 409
 * @nclusters:	number of needed blocks
 * @flags:	flags from ext4_mb_new_blocks()
410
 *
411
 * Check if filesystem has nclusters free & available for allocation.
412 413
 * On success return 1, return 0 on failure.
 */
414 415
static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
				  s64 nclusters, unsigned int flags)
416
{
417
	s64 free_clusters, dirty_clusters, root_clusters;
418
	struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
419
	struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
420

421 422 423
	free_clusters  = percpu_counter_read_positive(fcc);
	dirty_clusters = percpu_counter_read_positive(dcc);
	root_clusters = EXT4_B2C(sbi, ext4_r_blocks_count(sbi->s_es));
424

425 426 427 428
	if (free_clusters - (nclusters + root_clusters + dirty_clusters) <
					EXT4_FREECLUSTERS_WATERMARK) {
		free_clusters  = EXT4_C2B(sbi, percpu_counter_sum_positive(fcc));
		dirty_clusters = percpu_counter_sum_positive(dcc);
429
	}
430 431
	/* Check whether we have space after accounting for current
	 * dirty clusters & root reserved clusters.
432
	 */
433
	if (free_clusters >= ((root_clusters + nclusters) + dirty_clusters))
434
		return 1;
435

436
	/* Hm, nope.  Are (enough) root reserved clusters available? */
437
	if (sbi->s_resuid == current_fsuid() ||
438
	    ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
439 440 441
	    capable(CAP_SYS_RESOURCE) ||
		(flags & EXT4_MB_USE_ROOT_BLOCKS)) {

442
		if (free_clusters >= (nclusters + dirty_clusters))
443 444 445 446
			return 1;
	}

	return 0;
447 448
}

449 450
int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
			     s64 nclusters, unsigned int flags)
451
{
452
	if (ext4_has_free_clusters(sbi, nclusters, flags)) {
453
		percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters);
454
		return 0;
455 456
	} else
		return -ENOSPC;
457
}
458

459
/**
460
 * ext4_should_retry_alloc()
461 462 463
 * @sb:			super block
 * @retries		number of attemps has been made
 *
464
 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
465
 * it is profitable to retry the operation, this function will wait
L
Lucas De Marchi 已提交
466
 * for the current or committing transaction to complete, and then
467 468 469 470
 * return TRUE.
 *
 * if the total number of retries exceed three times, return FALSE.
 */
471
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
472
{
473
	if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
474 475
	    (*retries)++ > 3 ||
	    !EXT4_SB(sb)->s_journal)
476 477 478 479
		return 0;

	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);

480
	return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
481 482
}

A
Aneesh Kumar K.V 已提交
483
/*
484
 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
A
Aneesh Kumar K.V 已提交
485 486 487 488
 *
 * @handle:             handle to this transaction
 * @inode:              file inode
 * @goal:               given target block(filesystem wide)
489
 * @count:		pointer to total number of clusters needed
A
Aneesh Kumar K.V 已提交
490 491
 * @errp:               error code
 *
T
Theodore Ts'o 已提交
492
 * Return 1st allocated block number on success, *count stores total account
493
 * error stores in errp pointer
A
Aneesh Kumar K.V 已提交
494
 */
495
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
496 497
				  ext4_fsblk_t goal, unsigned int flags,
				  unsigned long *count, int *errp)
A
Aneesh Kumar K.V 已提交
498
{
T
Theodore Ts'o 已提交
499
	struct ext4_allocation_request ar;
500
	ext4_fsblk_t ret;
T
Theodore Ts'o 已提交
501 502 503 504 505 506

	memset(&ar, 0, sizeof(ar));
	/* Fill with neighbour allocated blocks */
	ar.inode = inode;
	ar.goal = goal;
	ar.len = count ? *count : 1;
507
	ar.flags = flags;
T
Theodore Ts'o 已提交
508 509 510 511

	ret = ext4_mb_new_blocks(handle, &ar, errp);
	if (count)
		*count = ar.len;
512
	/*
513 514
	 * Account for the allocated meta blocks.  We will never
	 * fail EDQUOT for metdata, but we do account for it.
515
	 */
516 517
	if (!(*errp) &&
	    ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
518
		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
T
Theodore Ts'o 已提交
519
		EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
520
		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
521 522
		dquot_alloc_block_nofail(inode,
				EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
523 524
	}
	return ret;
A
Aneesh Kumar K.V 已提交
525 526
}

527
/**
528
 * ext4_count_free_clusters() -- count filesystem free clusters
529 530
 * @sb:		superblock
 *
531
 * Adds up the number of free clusters from each block group.
532
 */
533
ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
534
{
535 536
	ext4_fsblk_t desc_count;
	struct ext4_group_desc *gdp;
537
	ext4_group_t i;
538
	ext4_group_t ngroups = ext4_get_groups_count(sb);
539 540 541
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
	ext4_fsblk_t bitmap_count;
542
	unsigned int x;
543 544
	struct buffer_head *bitmap_bh = NULL;

545
	es = EXT4_SB(sb)->s_es;
546 547 548 549 550
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;

	for (i = 0; i < ngroups; i++) {
551
		gdp = ext4_get_group_desc(sb, i, NULL);
552 553
		if (!gdp)
			continue;
554
		desc_count += ext4_free_group_clusters(sb, gdp);
555
		brelse(bitmap_bh);
556
		bitmap_bh = ext4_read_block_bitmap(sb, i);
557 558 559
		if (bitmap_bh == NULL)
			continue;

560
		x = ext4_count_free(bitmap_bh, sb->s_blocksize);
561
		printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
562
			i, ext4_free_group_clusters(sb, gdp), x);
563 564 565
		bitmap_count += x;
	}
	brelse(bitmap_bh);
566 567 568
	printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
	       ", computed = %llu, %llu\n",
	       EXT4_B2C(sbi, ext4_free_blocks_count(es)),
569
	       desc_count, bitmap_count);
570 571 572 573
	return bitmap_count;
#else
	desc_count = 0;
	for (i = 0; i < ngroups; i++) {
574
		gdp = ext4_get_group_desc(sb, i, NULL);
575 576
		if (!gdp)
			continue;
577
		desc_count += ext4_free_group_clusters(sb, gdp);
578 579 580 581 582 583
	}

	return desc_count;
#endif
}

584
static inline int test_root(ext4_group_t a, int b)
585 586 587 588 589 590 591 592
{
	int num = b;

	while (a > num)
		num *= b;
	return num == a;
}

593
static int ext4_group_sparse(ext4_group_t group)
594 595 596 597 598 599 600 601 602 603
{
	if (group <= 1)
		return 1;
	if (!(group & 1))
		return 0;
	return (test_root(group, 7) || test_root(group, 5) ||
		test_root(group, 3));
}

/**
604
 *	ext4_bg_has_super - number of blocks used by the superblock in group
605 606 607 608 609 610
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the superblock (primary or backup)
 *	in this group.  Currently this will be only 0 or 1.
 */
611
int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
612
{
613 614 615
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
				EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
			!ext4_group_sparse(group))
616 617 618 619
		return 0;
	return 1;
}

620 621
static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
					ext4_group_t group)
622
{
623
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
624 625
	ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
	ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
626 627 628 629 630 631

	if (group == first || group == first + 1 || group == last)
		return 1;
	return 0;
}

632 633
static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
					ext4_group_t group)
634
{
635 636 637 638 639 640 641
	if (!ext4_bg_has_super(sb, group))
		return 0;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
		return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	else
		return EXT4_SB(sb)->s_gdb_count;
642 643 644
}

/**
645
 *	ext4_bg_num_gdb - number of blocks used by the group table in group
646 647 648 649 650 651 652
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the group descriptor table
 *	(primary or backup) in this group.  In the future there may be a
 *	different number of descriptor blocks in each group.
 */
653
unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
654 655
{
	unsigned long first_meta_bg =
656 657
			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
658

659
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
660
			metagroup < first_meta_bg)
661
		return ext4_bg_num_gdb_nometa(sb, group);
662

663
	return ext4_bg_num_gdb_meta(sb,group);
664 665

}
666

667
/*
668
 * This function returns the number of file system metadata clusters at
669 670
 * the beginning of a block group, including the reserved gdt blocks.
 */
671 672
unsigned ext4_num_base_meta_clusters(struct super_block *sb,
				     ext4_group_t block_group)
673 674
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
675
	unsigned num;
676 677 678 679 680 681 682 683 684 685 686 687 688 689

	/* Check for superblock and gdt backups in this group */
	num = ext4_bg_has_super(sb, block_group);

	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
	    block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
			  sbi->s_desc_per_block) {
		if (num) {
			num += ext4_bg_num_gdb(sb, block_group);
			num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
		}
	} else { /* For META_BG_BLOCK_GROUPS */
		num += ext4_bg_num_gdb(sb, block_group);
	}
690
	return EXT4_NUM_B2C(sbi, num);
691
}
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
/**
 *	ext4_inode_to_goal_block - return a hint for block allocation
 *	@inode: inode for block allocation
 *
 *	Return the ideal location to start allocating blocks for a
 *	newly created inode.
 */
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
{
	struct ext4_inode_info *ei = EXT4_I(inode);
	ext4_group_t block_group;
	ext4_grpblk_t colour;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
	ext4_fsblk_t bg_start;
	ext4_fsblk_t last_block;

	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		/*
		 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
		 * block groups per flexgroup, reserve the first block
		 * group for directories and special files.  Regular
		 * files will start at the second block group.  This
		 * tends to speed up directory access and improves
		 * fsck times.
		 */
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
	return bg_start + colour;
}