balloc.c 21.0 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/balloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
20 21
#include "ext4.h"
#include "ext4_jbd2.h"
22
#include "mballoc.h"
23

24 25 26 27
/*
 * balloc.c contains the blocks allocation and deallocation routines
 */

28 29 30 31
/*
 * Calculate the block group number and offset, given a block number
 */
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
32
		ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
33
{
D
Dave Kleikamp 已提交
34
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
35 36
	ext4_grpblk_t offset;

D
Dave Kleikamp 已提交
37
	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
A
Andrew Morton 已提交
38
	offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
39 40 41
	if (offsetp)
		*offsetp = offset;
	if (blockgrpp)
D
Dave Kleikamp 已提交
42
		*blockgrpp = blocknr;
43 44 45

}

46 47 48 49
static int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block,
			ext4_group_t block_group)
{
	ext4_group_t actual_group;
A
Aneesh Kumar K.V 已提交
50
	ext4_get_group_no_and_offset(sb, block, &actual_group, NULL);
51 52 53 54 55 56
	if (actual_group == block_group)
		return 1;
	return 0;
}

static int ext4_group_used_meta_blocks(struct super_block *sb,
57 58
				       ext4_group_t block_group,
				       struct ext4_group_desc *gdp)
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
{
	ext4_fsblk_t tmp;
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	/* block bitmap, inode bitmap, and inode table blocks */
	int used_blocks = sbi->s_itb_per_group + 2;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		if (!ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp),
					block_group))
			used_blocks--;

		if (!ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp),
					block_group))
			used_blocks--;

		tmp = ext4_inode_table(sb, gdp);
		for (; tmp < ext4_inode_table(sb, gdp) +
				sbi->s_itb_per_group; tmp++) {
			if (!ext4_block_in_group(sb, tmp, block_group))
				used_blocks -= 1;
		}
	}
	return used_blocks;
}
83

A
Andreas Dilger 已提交
84 85 86
/* Initializes an uninitialized block bitmap if given, and returns the
 * number of blocks free in the group. */
unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
87
		 ext4_group_t block_group, struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
88 89
{
	int bit, bit_max;
90
	ext4_group_t ngroups = ext4_get_groups_count(sb);
A
Andreas Dilger 已提交
91 92 93 94 95 96 97 98 99
	unsigned free_blocks, group_blocks;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

	if (bh) {
		J_ASSERT_BH(bh, buffer_locked(bh));

		/* If checksum is bad mark all blocks used to prevent allocation
		 * essentially implementing a per-group read-only flag. */
		if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
100 101
			ext4_error(sb, "Checksum bad for group %u",
					block_group);
102 103 104
			ext4_free_blks_set(sb, gdp, 0);
			ext4_free_inodes_set(sb, gdp, 0);
			ext4_itable_unused_set(sb, gdp, 0);
A
Andreas Dilger 已提交
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
			memset(bh->b_data, 0xff, sb->s_blocksize);
			return 0;
		}
		memset(bh->b_data, 0, sb->s_blocksize);
	}

	/* Check for superblock and gdt backups in this group */
	bit_max = ext4_bg_has_super(sb, block_group);

	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
	    block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
			  sbi->s_desc_per_block) {
		if (bit_max) {
			bit_max += ext4_bg_num_gdb(sb, block_group);
			bit_max +=
				le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
		}
	} else { /* For META_BG_BLOCK_GROUPS */
123
		bit_max += ext4_bg_num_gdb(sb, block_group);
A
Andreas Dilger 已提交
124 125
	}

126
	if (block_group == ngroups - 1) {
A
Andreas Dilger 已提交
127 128 129 130 131 132
		/*
		 * Even though mke2fs always initialize first and last group
		 * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need
		 * to make sure we calculate the right free blocks
		 */
		group_blocks = ext4_blocks_count(sbi->s_es) -
133
			ext4_group_first_block_no(sb, ngroups - 1);
A
Andreas Dilger 已提交
134 135 136 137 138 139 140
	} else {
		group_blocks = EXT4_BLOCKS_PER_GROUP(sb);
	}

	free_blocks = group_blocks - bit_max;

	if (bh) {
141 142
		ext4_fsblk_t start, tmp;
		int flex_bg = 0;
143

A
Andreas Dilger 已提交
144 145 146
		for (bit = 0; bit < bit_max; bit++)
			ext4_set_bit(bit, bh->b_data);

147
		start = ext4_group_first_block_no(sb, block_group);
A
Andreas Dilger 已提交
148

149 150 151
		if (EXT4_HAS_INCOMPAT_FEATURE(sb,
					      EXT4_FEATURE_INCOMPAT_FLEX_BG))
			flex_bg = 1;
A
Andreas Dilger 已提交
152

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
		/* Set bits for block and inode bitmaps, and inode table */
		tmp = ext4_block_bitmap(sb, gdp);
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
			ext4_set_bit(tmp - start, bh->b_data);

		tmp = ext4_inode_bitmap(sb, gdp);
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
			ext4_set_bit(tmp - start, bh->b_data);

		tmp = ext4_inode_table(sb, gdp);
		for (; tmp < ext4_inode_table(sb, gdp) +
				sbi->s_itb_per_group; tmp++) {
			if (!flex_bg ||
				ext4_block_in_group(sb, tmp, block_group))
				ext4_set_bit(tmp - start, bh->b_data);
		}
A
Andreas Dilger 已提交
169 170 171 172 173
		/*
		 * Also if the number of blocks within the group is
		 * less than the blocksize * 8 ( which is the size
		 * of bitmap ), set rest of the block bitmap to 1
		 */
174 175
		ext4_mark_bitmap_end(group_blocks, sb->s_blocksize * 8,
				     bh->b_data);
A
Andreas Dilger 已提交
176
	}
177
	return free_blocks - ext4_group_used_meta_blocks(sb, block_group, gdp);
A
Andreas Dilger 已提交
178 179 180
}


181 182 183 184 185 186 187 188
/*
 * The free blocks are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.  The descriptors are loaded in memory
189
 * when a file system is mounted (see ext4_fill_super).
190 191 192
 */

/**
193
 * ext4_get_group_desc() -- load group descriptor from disk
194 195 196 197 198
 * @sb:			super block
 * @block_group:	given block group
 * @bh:			pointer to the buffer head to store the block
 *			group descriptor
 */
199
struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
200
					     ext4_group_t block_group,
201
					     struct buffer_head **bh)
202
{
203 204
	unsigned int group_desc;
	unsigned int offset;
205
	ext4_group_t ngroups = ext4_get_groups_count(sb);
206
	struct ext4_group_desc *desc;
207
	struct ext4_sb_info *sbi = EXT4_SB(sb);
208

209
	if (block_group >= ngroups) {
210 211
		ext4_error(sb, "block_group >= groups_count - block_group = %u,"
			   " groups_count = %u", block_group, ngroups);
212 213 214 215

		return NULL;
	}

216 217
	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
218
	if (!sbi->s_group_desc[group_desc]) {
219
		ext4_error(sb, "Group descriptor not loaded - "
220
			   "block_group = %u, group_desc = %u, desc = %u",
221
			   block_group, group_desc, offset);
222 223 224
		return NULL;
	}

225 226 227
	desc = (struct ext4_group_desc *)(
		(__u8 *)sbi->s_group_desc[group_desc]->b_data +
		offset * EXT4_DESC_SIZE(sb));
228 229
	if (bh)
		*bh = sbi->s_group_desc[group_desc];
230
	return desc;
231 232
}

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
static int ext4_valid_block_bitmap(struct super_block *sb,
					struct ext4_group_desc *desc,
					unsigned int block_group,
					struct buffer_head *bh)
{
	ext4_grpblk_t offset;
	ext4_grpblk_t next_zero_bit;
	ext4_fsblk_t bitmap_blk;
	ext4_fsblk_t group_first_block;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		/* with FLEX_BG, the inode/block bitmaps and itable
		 * blocks may not be in the group at all
		 * so the bitmap validation will be skipped for those groups
		 * or it has to also read the block group where the bitmaps
		 * are located to verify they are set.
		 */
		return 1;
	}
	group_first_block = ext4_group_first_block_no(sb, block_group);

	/* check whether block bitmap block number is set */
	bitmap_blk = ext4_block_bitmap(sb, desc);
	offset = bitmap_blk - group_first_block;
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
		goto err_out;

	/* check whether the inode bitmap block number is set */
	bitmap_blk = ext4_inode_bitmap(sb, desc);
	offset = bitmap_blk - group_first_block;
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
		goto err_out;

	/* check whether the inode table block number is set */
	bitmap_blk = ext4_inode_table(sb, desc);
	offset = bitmap_blk - group_first_block;
	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
				offset + EXT4_SB(sb)->s_itb_per_group,
				offset);
	if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group)
		/* good bitmap for inode tables */
		return 1;

err_out:
279
	ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu",
280 281 282
			block_group, bitmap_blk);
	return 0;
}
283
/**
284
 * ext4_read_block_bitmap()
285 286 287
 * @sb:			super block
 * @block_group:	given block group
 *
288 289
 * Read the bitmap for a given block_group,and validate the
 * bits for block/inode/inode tables are set in the bitmaps
290 291 292
 *
 * Return buffer_head on success or NULL in case of failure.
 */
A
Andreas Dilger 已提交
293
struct buffer_head *
294
ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
295
{
296 297
	struct ext4_group_desc *desc;
	struct buffer_head *bh = NULL;
298
	ext4_fsblk_t bitmap_blk;
299

A
Andreas Dilger 已提交
300
	desc = ext4_get_group_desc(sb, block_group, NULL);
301
	if (!desc)
302 303
		return NULL;
	bitmap_blk = ext4_block_bitmap(sb, desc);
304 305
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
306
		ext4_error(sb, "Cannot read block bitmap - "
307
			    "block_group = %u, block_bitmap = %llu",
308
			    block_group, bitmap_blk);
309 310
		return NULL;
	}
311 312

	if (bitmap_uptodate(bh))
313 314
		return bh;

315
	lock_buffer(bh);
316 317 318 319
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
		return bh;
	}
320
	ext4_lock_group(sb, block_group);
A
Andreas Dilger 已提交
321
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
322
		ext4_init_block_bitmap(sb, bh, block_group, desc);
323
		set_bitmap_uptodate(bh);
324
		set_buffer_uptodate(bh);
325
		ext4_unlock_group(sb, block_group);
A
Aneesh Kumar K.V 已提交
326
		unlock_buffer(bh);
327
		return bh;
A
Andreas Dilger 已提交
328
	}
329
	ext4_unlock_group(sb, block_group);
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
		return bh;
	}
	/*
	 * submit the buffer_head for read. We can
	 * safely mark the bitmap as uptodate now.
	 * We do it here so the bitmap uptodate bit
	 * get set with buffer lock held.
	 */
	set_bitmap_uptodate(bh);
346 347
	if (bh_submit_read(bh) < 0) {
		put_bh(bh);
348
		ext4_error(sb, "Cannot read block bitmap - "
349
			    "block_group = %u, block_bitmap = %llu",
350
			    block_group, bitmap_blk);
351 352
		return NULL;
	}
353 354 355 356 357
	ext4_valid_block_bitmap(sb, desc, block_group, bh);
	/*
	 * file system mounted not to panic on error,
	 * continue with corrupt bitmap
	 */
358 359 360 361
	return bh;
}

/**
362
 * ext4_add_groupblocks() -- Add given blocks to an existing group
363 364
 * @handle:			handle to this transaction
 * @sb:				super block
365
 * @block:			start physcial block to add to the block group
366
 * @count:			number of blocks to free
367
 *
368 369 370
 * This marks the blocks as free in the bitmap. We ask the
 * mballoc to reload the buddy after this by setting group
 * EXT4_GROUP_INFO_NEED_INIT_BIT flag
371
 */
372 373
void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
			 ext4_fsblk_t block, unsigned long count)
374 375 376
{
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *gd_bh;
377
	ext4_group_t block_group;
378
	ext4_grpblk_t bit;
379
	unsigned int i;
380
	struct ext4_group_desc *desc;
T
Theodore Ts'o 已提交
381
	struct ext4_sb_info *sbi = EXT4_SB(sb);
382
	int err = 0, ret, blk_free_count;
383 384
	ext4_grpblk_t blocks_freed;
	struct ext4_group_info *grp;
385

386
	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
387

388
	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
389
	grp = ext4_get_group_info(sb, block_group);
390 391 392 393
	/*
	 * Check to see if we are freeing blocks across a group
	 * boundary.
	 */
394
	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
395
		goto error_return;
396
	}
397
	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
398 399
	if (!bitmap_bh)
		goto error_return;
400
	desc = ext4_get_group_desc(sb, block_group, &gd_bh);
401 402 403
	if (!desc)
		goto error_return;

404 405 406 407
	if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
	    in_range(ext4_inode_bitmap(sb, desc), block, count) ||
	    in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
	    in_range(block + count - 1, ext4_inode_table(sb, desc),
408
		     sbi->s_itb_per_group)) {
409
		ext4_error(sb, "Adding blocks in system zones - "
410 411
			   "Block = %llu, count = %lu",
			   block, count);
412 413
		goto error_return;
	}
414 415

	/*
416
	 * We are about to add blocks to the bitmap,
417 418 419
	 * so we need undo access.
	 */
	BUFFER_TRACE(bitmap_bh, "getting undo access");
420
	err = ext4_journal_get_undo_access(handle, bitmap_bh);
421 422 423 424 425 426 427 428 429
	if (err)
		goto error_return;

	/*
	 * We are about to modify some metadata.  Call the journal APIs
	 * to unshare ->b_data if a currently-committing transaction is
	 * using it
	 */
	BUFFER_TRACE(gd_bh, "get_write_access");
430
	err = ext4_journal_get_write_access(handle, gd_bh);
431 432
	if (err)
		goto error_return;
433 434 435 436 437
	/*
	 * make sure we don't allow a parallel init on other groups in the
	 * same buddy cache
	 */
	down_write(&grp->alloc_sem);
438
	for (i = 0, blocks_freed = 0; i < count; i++) {
439
		BUFFER_TRACE(bitmap_bh, "clear bit");
440
		if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
441
						bit + i, bitmap_bh->b_data)) {
442
			ext4_error(sb, "bit already cleared for block %llu",
L
Laurent Vivier 已提交
443
				   (ext4_fsblk_t)(block + i));
444 445
			BUFFER_TRACE(bitmap_bh, "bit already cleared");
		} else {
446
			blocks_freed++;
447 448
		}
	}
449
	ext4_lock_group(sb, block_group);
450 451
	blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
	ext4_free_blks_set(sb, desc, blk_free_count);
A
Andreas Dilger 已提交
452
	desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
453
	ext4_unlock_group(sb, block_group);
454
	percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
455

456 457
	if (sbi->s_log_groups_per_flex) {
		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
458 459
		atomic_add(blocks_freed,
			   &sbi->s_flex_groups[flex_group].free_blocks);
460
	}
461 462 463 464 465
	/*
	 * request to reload the buddy with the
	 * new bitmap information
	 */
	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
466
	grp->bb_free += blocks_freed;
467
	up_write(&grp->alloc_sem);
468

469 470
	/* We dirtied the bitmap block */
	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
471
	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
472 473 474

	/* And the group descriptor block */
	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
475
	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
476 477 478
	if (!err)
		err = ret;

479 480
error_return:
	brelse(bitmap_bh);
481
	ext4_std_error(sb, err);
482 483 484
	return;
}

485 486 487 488 489 490 491 492
/**
 * ext4_has_free_blocks()
 * @sbi:	in-core super block structure.
 * @nblocks:	number of needed blocks
 *
 * Check if filesystem has nblocks free & available for allocation.
 * On success return 1, return 0 on failure.
 */
493
static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
494
{
495
	s64 free_blocks, dirty_blocks, root_blocks;
496
	struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
497
	struct percpu_counter *dbc = &sbi->s_dirtyblocks_counter;
498

499 500
	free_blocks  = percpu_counter_read_positive(fbc);
	dirty_blocks = percpu_counter_read_positive(dbc);
501
	root_blocks = ext4_r_blocks_count(sbi->s_es);
502

503 504
	if (free_blocks - (nblocks + root_blocks + dirty_blocks) <
						EXT4_FREEBLOCKS_WATERMARK) {
505 506
		free_blocks  = percpu_counter_sum_positive(fbc);
		dirty_blocks = percpu_counter_sum_positive(dbc);
507 508 509
		if (dirty_blocks < 0) {
			printk(KERN_CRIT "Dirty block accounting "
					"went wrong %lld\n",
510
					(long long)dirty_blocks);
511 512 513
		}
	}
	/* Check whether we have space after
514
	 * accounting for current dirty blocks & root reserved blocks.
515
	 */
516 517
	if (free_blocks >= ((root_blocks + nblocks) + dirty_blocks))
		return 1;
518

519
	/* Hm, nope.  Are (enough) root reserved blocks available? */
520
	if (sbi->s_resuid == current_fsuid() ||
521 522 523 524 525 526 527
	    ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
	    capable(CAP_SYS_RESOURCE)) {
		if (free_blocks >= (nblocks + dirty_blocks))
			return 1;
	}

	return 0;
528 529
}

530
int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
A
Aneesh Kumar K.V 已提交
531
						s64 nblocks)
532
{
533 534
	if (ext4_has_free_blocks(sbi, nblocks)) {
		percpu_counter_add(&sbi->s_dirtyblocks_counter, nblocks);
535
		return 0;
536 537
	} else
		return -ENOSPC;
538
}
539

540
/**
541
 * ext4_should_retry_alloc()
542 543 544
 * @sb:			super block
 * @retries		number of attemps has been made
 *
545
 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
546 547 548 549 550 551
 * it is profitable to retry the operation, this function will wait
 * for the current or commiting transaction to complete, and then
 * return TRUE.
 *
 * if the total number of retries exceed three times, return FALSE.
 */
552
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
553
{
554 555 556
	if (!ext4_has_free_blocks(EXT4_SB(sb), 1) ||
	    (*retries)++ > 3 ||
	    !EXT4_SB(sb)->s_journal)
557 558 559 560
		return 0;

	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);

561
	return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
562 563
}

A
Aneesh Kumar K.V 已提交
564
/*
565
 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
A
Aneesh Kumar K.V 已提交
566 567 568 569
 *
 * @handle:             handle to this transaction
 * @inode:              file inode
 * @goal:               given target block(filesystem wide)
T
Theodore Ts'o 已提交
570
 * @count:		pointer to total number of blocks needed
A
Aneesh Kumar K.V 已提交
571 572
 * @errp:               error code
 *
T
Theodore Ts'o 已提交
573
 * Return 1st allocated block number on success, *count stores total account
574
 * error stores in errp pointer
A
Aneesh Kumar K.V 已提交
575
 */
576 577
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
		ext4_fsblk_t goal, unsigned long *count, int *errp)
A
Aneesh Kumar K.V 已提交
578
{
T
Theodore Ts'o 已提交
579
	struct ext4_allocation_request ar;
580
	ext4_fsblk_t ret;
T
Theodore Ts'o 已提交
581 582 583 584 585 586 587 588 589 590

	memset(&ar, 0, sizeof(ar));
	/* Fill with neighbour allocated blocks */
	ar.inode = inode;
	ar.goal = goal;
	ar.len = count ? *count : 1;

	ret = ext4_mb_new_blocks(handle, &ar, errp);
	if (count)
		*count = ar.len;
591
	/*
592 593
	 * Account for the allocated meta blocks.  We will never
	 * fail EDQUOT for metdata, but we do account for it.
594
	 */
595
	if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) {
596
		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
T
Theodore Ts'o 已提交
597
		EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
598
		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
599
		dquot_alloc_block_nofail(inode, ar.len);
600 601
	}
	return ret;
A
Aneesh Kumar K.V 已提交
602 603
}

604
/**
605
 * ext4_count_free_blocks() -- count filesystem free blocks
606 607 608 609
 * @sb:		superblock
 *
 * Adds up the number of free blocks from each block group.
 */
610
ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
611
{
612 613
	ext4_fsblk_t desc_count;
	struct ext4_group_desc *gdp;
614
	ext4_group_t i;
615
	ext4_group_t ngroups = ext4_get_groups_count(sb);
616 617 618
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
	ext4_fsblk_t bitmap_count;
619
	unsigned int x;
620 621
	struct buffer_head *bitmap_bh = NULL;

622
	es = EXT4_SB(sb)->s_es;
623 624 625 626 627
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;

	for (i = 0; i < ngroups; i++) {
628
		gdp = ext4_get_group_desc(sb, i, NULL);
629 630
		if (!gdp)
			continue;
631
		desc_count += ext4_free_blks_count(sb, gdp);
632
		brelse(bitmap_bh);
633
		bitmap_bh = ext4_read_block_bitmap(sb, i);
634 635 636
		if (bitmap_bh == NULL)
			continue;

637
		x = ext4_count_free(bitmap_bh, sb->s_blocksize);
638 639
		printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
			i, ext4_free_blks_count(sb, gdp), x);
640 641 642
		bitmap_count += x;
	}
	brelse(bitmap_bh);
643 644 645
	printk(KERN_DEBUG "ext4_count_free_blocks: stored = %llu"
		", computed = %llu, %llu\n", ext4_free_blocks_count(es),
	       desc_count, bitmap_count);
646 647 648 649
	return bitmap_count;
#else
	desc_count = 0;
	for (i = 0; i < ngroups; i++) {
650
		gdp = ext4_get_group_desc(sb, i, NULL);
651 652
		if (!gdp)
			continue;
653
		desc_count += ext4_free_blks_count(sb, gdp);
654 655 656 657 658 659
	}

	return desc_count;
#endif
}

660
static inline int test_root(ext4_group_t a, int b)
661 662 663 664 665 666 667 668
{
	int num = b;

	while (a > num)
		num *= b;
	return num == a;
}

669
static int ext4_group_sparse(ext4_group_t group)
670 671 672 673 674 675 676 677 678 679
{
	if (group <= 1)
		return 1;
	if (!(group & 1))
		return 0;
	return (test_root(group, 7) || test_root(group, 5) ||
		test_root(group, 3));
}

/**
680
 *	ext4_bg_has_super - number of blocks used by the superblock in group
681 682 683 684 685 686
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the superblock (primary or backup)
 *	in this group.  Currently this will be only 0 or 1.
 */
687
int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
688
{
689 690 691
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
				EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
			!ext4_group_sparse(group))
692 693 694 695
		return 0;
	return 1;
}

696 697
static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
					ext4_group_t group)
698
{
699
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
700 701
	ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
	ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
702 703 704 705 706 707

	if (group == first || group == first + 1 || group == last)
		return 1;
	return 0;
}

708 709
static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
					ext4_group_t group)
710
{
711 712 713 714 715 716 717
	if (!ext4_bg_has_super(sb, group))
		return 0;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
		return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	else
		return EXT4_SB(sb)->s_gdb_count;
718 719 720
}

/**
721
 *	ext4_bg_num_gdb - number of blocks used by the group table in group
722 723 724 725 726 727 728
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the group descriptor table
 *	(primary or backup) in this group.  In the future there may be a
 *	different number of descriptor blocks in each group.
 */
729
unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
730 731
{
	unsigned long first_meta_bg =
732 733
			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
734

735
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
736
			metagroup < first_meta_bg)
737
		return ext4_bg_num_gdb_nometa(sb, group);
738

739
	return ext4_bg_num_gdb_meta(sb,group);
740 741

}
742