balloc.c 21.9 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/balloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
20 21
#include "ext4.h"
#include "ext4_jbd2.h"
A
Andreas Dilger 已提交
22
#include "group.h"
23
#include "mballoc.h"
24

25 26 27 28
/*
 * balloc.c contains the blocks allocation and deallocation routines
 */

29 30 31 32
/*
 * Calculate the block group number and offset, given a block number
 */
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
33
		ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
34
{
D
Dave Kleikamp 已提交
35
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
36 37
	ext4_grpblk_t offset;

D
Dave Kleikamp 已提交
38
	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
A
Andrew Morton 已提交
39
	offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
40 41 42
	if (offsetp)
		*offsetp = offset;
	if (blockgrpp)
D
Dave Kleikamp 已提交
43
		*blockgrpp = blocknr;
44 45 46

}

47 48 49 50
static int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block,
			ext4_group_t block_group)
{
	ext4_group_t actual_group;
A
Aneesh Kumar K.V 已提交
51
	ext4_get_group_no_and_offset(sb, block, &actual_group, NULL);
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
	if (actual_group == block_group)
		return 1;
	return 0;
}

static int ext4_group_used_meta_blocks(struct super_block *sb,
				ext4_group_t block_group)
{
	ext4_fsblk_t tmp;
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	/* block bitmap, inode bitmap, and inode table blocks */
	int used_blocks = sbi->s_itb_per_group + 2;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		struct ext4_group_desc *gdp;
		struct buffer_head *bh;

		gdp = ext4_get_group_desc(sb, block_group, &bh);
		if (!ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp),
					block_group))
			used_blocks--;

		if (!ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp),
					block_group))
			used_blocks--;

		tmp = ext4_inode_table(sb, gdp);
		for (; tmp < ext4_inode_table(sb, gdp) +
				sbi->s_itb_per_group; tmp++) {
			if (!ext4_block_in_group(sb, tmp, block_group))
				used_blocks -= 1;
		}
	}
	return used_blocks;
}
87

A
Andreas Dilger 已提交
88 89 90
/* Initializes an uninitialized block bitmap if given, and returns the
 * number of blocks free in the group. */
unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
91
		 ext4_group_t block_group, struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
92 93 94 95 96 97 98 99 100 101 102
{
	int bit, bit_max;
	unsigned free_blocks, group_blocks;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

	if (bh) {
		J_ASSERT_BH(bh, buffer_locked(bh));

		/* If checksum is bad mark all blocks used to prevent allocation
		 * essentially implementing a per-group read-only flag. */
		if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
103
			ext4_error(sb, __func__,
104
				  "Checksum bad for group %u", block_group);
105 106 107
			ext4_free_blks_set(sb, gdp, 0);
			ext4_free_inodes_set(sb, gdp, 0);
			ext4_itable_unused_set(sb, gdp, 0);
A
Andreas Dilger 已提交
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
			memset(bh->b_data, 0xff, sb->s_blocksize);
			return 0;
		}
		memset(bh->b_data, 0, sb->s_blocksize);
	}

	/* Check for superblock and gdt backups in this group */
	bit_max = ext4_bg_has_super(sb, block_group);

	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
	    block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
			  sbi->s_desc_per_block) {
		if (bit_max) {
			bit_max += ext4_bg_num_gdb(sb, block_group);
			bit_max +=
				le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
		}
	} else { /* For META_BG_BLOCK_GROUPS */
126
		bit_max += ext4_bg_num_gdb(sb, block_group);
A
Andreas Dilger 已提交
127 128 129 130 131 132 133 134 135 136
	}

	if (block_group == sbi->s_groups_count - 1) {
		/*
		 * Even though mke2fs always initialize first and last group
		 * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need
		 * to make sure we calculate the right free blocks
		 */
		group_blocks = ext4_blocks_count(sbi->s_es) -
			le32_to_cpu(sbi->s_es->s_first_data_block) -
137
			(EXT4_BLOCKS_PER_GROUP(sb) * (sbi->s_groups_count - 1));
A
Andreas Dilger 已提交
138 139 140 141 142 143 144
	} else {
		group_blocks = EXT4_BLOCKS_PER_GROUP(sb);
	}

	free_blocks = group_blocks - bit_max;

	if (bh) {
145 146
		ext4_fsblk_t start, tmp;
		int flex_bg = 0;
147

A
Andreas Dilger 已提交
148 149 150
		for (bit = 0; bit < bit_max; bit++)
			ext4_set_bit(bit, bh->b_data);

151
		start = ext4_group_first_block_no(sb, block_group);
A
Andreas Dilger 已提交
152

153 154 155
		if (EXT4_HAS_INCOMPAT_FEATURE(sb,
					      EXT4_FEATURE_INCOMPAT_FLEX_BG))
			flex_bg = 1;
A
Andreas Dilger 已提交
156

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
		/* Set bits for block and inode bitmaps, and inode table */
		tmp = ext4_block_bitmap(sb, gdp);
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
			ext4_set_bit(tmp - start, bh->b_data);

		tmp = ext4_inode_bitmap(sb, gdp);
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
			ext4_set_bit(tmp - start, bh->b_data);

		tmp = ext4_inode_table(sb, gdp);
		for (; tmp < ext4_inode_table(sb, gdp) +
				sbi->s_itb_per_group; tmp++) {
			if (!flex_bg ||
				ext4_block_in_group(sb, tmp, block_group))
				ext4_set_bit(tmp - start, bh->b_data);
		}
A
Andreas Dilger 已提交
173 174 175 176 177 178 179
		/*
		 * Also if the number of blocks within the group is
		 * less than the blocksize * 8 ( which is the size
		 * of bitmap ), set rest of the block bitmap to 1
		 */
		mark_bitmap_end(group_blocks, sb->s_blocksize * 8, bh->b_data);
	}
180
	return free_blocks - ext4_group_used_meta_blocks(sb, block_group);
A
Andreas Dilger 已提交
181 182 183
}


184 185 186 187 188 189 190 191
/*
 * The free blocks are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.  The descriptors are loaded in memory
192
 * when a file system is mounted (see ext4_fill_super).
193 194 195 196 197 198
 */


#define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)

/**
199
 * ext4_get_group_desc() -- load group descriptor from disk
200 201 202 203 204
 * @sb:			super block
 * @block_group:	given block group
 * @bh:			pointer to the buffer head to store the block
 *			group descriptor
 */
205
struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
206
					     ext4_group_t block_group,
207
					     struct buffer_head **bh)
208
{
209 210
	unsigned int group_desc;
	unsigned int offset;
211
	struct ext4_group_desc *desc;
212
	struct ext4_sb_info *sbi = EXT4_SB(sb);
213 214

	if (block_group >= sbi->s_groups_count) {
215 216
		ext4_error(sb, "ext4_get_group_desc",
			   "block_group >= groups_count - "
217
			   "block_group = %u, groups_count = %u",
218
			   block_group, sbi->s_groups_count);
219 220 221 222 223

		return NULL;
	}
	smp_rmb();

224 225
	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
226
	if (!sbi->s_group_desc[group_desc]) {
227 228
		ext4_error(sb, "ext4_get_group_desc",
			   "Group descriptor not loaded - "
229
			   "block_group = %u, group_desc = %u, desc = %u",
230
			   block_group, group_desc, offset);
231 232 233
		return NULL;
	}

234 235 236
	desc = (struct ext4_group_desc *)(
		(__u8 *)sbi->s_group_desc[group_desc]->b_data +
		offset * EXT4_DESC_SIZE(sb));
237 238
	if (bh)
		*bh = sbi->s_group_desc[group_desc];
239
	return desc;
240 241
}

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
static int ext4_valid_block_bitmap(struct super_block *sb,
					struct ext4_group_desc *desc,
					unsigned int block_group,
					struct buffer_head *bh)
{
	ext4_grpblk_t offset;
	ext4_grpblk_t next_zero_bit;
	ext4_fsblk_t bitmap_blk;
	ext4_fsblk_t group_first_block;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		/* with FLEX_BG, the inode/block bitmaps and itable
		 * blocks may not be in the group at all
		 * so the bitmap validation will be skipped for those groups
		 * or it has to also read the block group where the bitmaps
		 * are located to verify they are set.
		 */
		return 1;
	}
	group_first_block = ext4_group_first_block_no(sb, block_group);

	/* check whether block bitmap block number is set */
	bitmap_blk = ext4_block_bitmap(sb, desc);
	offset = bitmap_blk - group_first_block;
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
		goto err_out;

	/* check whether the inode bitmap block number is set */
	bitmap_blk = ext4_inode_bitmap(sb, desc);
	offset = bitmap_blk - group_first_block;
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
		goto err_out;

	/* check whether the inode table block number is set */
	bitmap_blk = ext4_inode_table(sb, desc);
	offset = bitmap_blk - group_first_block;
	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
				offset + EXT4_SB(sb)->s_itb_per_group,
				offset);
	if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group)
		/* good bitmap for inode tables */
		return 1;

err_out:
288
	ext4_error(sb, __func__,
289 290 291 292 293
			"Invalid block bitmap - "
			"block_group = %d, block = %llu",
			block_group, bitmap_blk);
	return 0;
}
294
/**
295
 * ext4_read_block_bitmap()
296 297 298
 * @sb:			super block
 * @block_group:	given block group
 *
299 300
 * Read the bitmap for a given block_group,and validate the
 * bits for block/inode/inode tables are set in the bitmaps
301 302 303
 *
 * Return buffer_head on success or NULL in case of failure.
 */
A
Andreas Dilger 已提交
304
struct buffer_head *
305
ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
306
{
307 308
	struct ext4_group_desc *desc;
	struct buffer_head *bh = NULL;
309
	ext4_fsblk_t bitmap_blk;
310

A
Andreas Dilger 已提交
311
	desc = ext4_get_group_desc(sb, block_group, NULL);
312
	if (!desc)
313 314
		return NULL;
	bitmap_blk = ext4_block_bitmap(sb, desc);
315 316
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
317
		ext4_error(sb, __func__,
318
			    "Cannot read block bitmap - "
319
			    "block_group = %u, block_bitmap = %llu",
320
			    block_group, bitmap_blk);
321 322
		return NULL;
	}
323 324
	if (buffer_uptodate(bh) &&
	    !(desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))
325 326
		return bh;

327
	lock_buffer(bh);
328
	spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
A
Andreas Dilger 已提交
329
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
330 331
		ext4_init_block_bitmap(sb, bh, block_group, desc);
		set_buffer_uptodate(bh);
332
		spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
A
Aneesh Kumar K.V 已提交
333
		unlock_buffer(bh);
334
		return bh;
A
Andreas Dilger 已提交
335
	}
336
	spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
337 338
	if (bh_submit_read(bh) < 0) {
		put_bh(bh);
339
		ext4_error(sb, __func__,
340
			    "Cannot read block bitmap - "
341
			    "block_group = %u, block_bitmap = %llu",
342
			    block_group, bitmap_blk);
343 344
		return NULL;
	}
345 346 347 348 349
	ext4_valid_block_bitmap(sb, desc, block_group, bh);
	/*
	 * file system mounted not to panic on error,
	 * continue with corrupt bitmap
	 */
350 351 352 353
	return bh;
}

/**
354
 * ext4_add_groupblocks() -- Add given blocks to an existing group
355 356
 * @handle:			handle to this transaction
 * @sb:				super block
357
 * @block:			start physcial block to add to the block group
358
 * @count:			number of blocks to free
359
 *
360 361 362
 * This marks the blocks as free in the bitmap. We ask the
 * mballoc to reload the buddy after this by setting group
 * EXT4_GROUP_INFO_NEED_INIT_BIT flag
363
 */
364 365
void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
			 ext4_fsblk_t block, unsigned long count)
366 367 368
{
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *gd_bh;
369
	ext4_group_t block_group;
370
	ext4_grpblk_t bit;
371
	unsigned int i;
372 373
	struct ext4_group_desc *desc;
	struct ext4_super_block *es;
374
	struct ext4_sb_info *sbi;
375
	int err = 0, ret, blk_free_count;
376 377
	ext4_grpblk_t blocks_freed;
	struct ext4_group_info *grp;
378

379
	sbi = EXT4_SB(sb);
380
	es = sbi->s_es;
381
	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
382

383
	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
384
	grp = ext4_get_group_info(sb, block_group);
385 386 387 388
	/*
	 * Check to see if we are freeing blocks across a group
	 * boundary.
	 */
389
	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
390
		goto error_return;
391
	}
392
	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
393 394
	if (!bitmap_bh)
		goto error_return;
395
	desc = ext4_get_group_desc(sb, block_group, &gd_bh);
396 397 398
	if (!desc)
		goto error_return;

399 400 401 402
	if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
	    in_range(ext4_inode_bitmap(sb, desc), block, count) ||
	    in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
	    in_range(block + count - 1, ext4_inode_table(sb, desc),
403
		     sbi->s_itb_per_group)) {
404 405
		ext4_error(sb, __func__,
			   "Adding blocks in system zones - "
406 407
			   "Block = %llu, count = %lu",
			   block, count);
408 409
		goto error_return;
	}
410 411

	/*
412
	 * We are about to add blocks to the bitmap,
413 414 415
	 * so we need undo access.
	 */
	BUFFER_TRACE(bitmap_bh, "getting undo access");
416
	err = ext4_journal_get_undo_access(handle, bitmap_bh);
417 418 419 420 421 422 423 424 425
	if (err)
		goto error_return;

	/*
	 * We are about to modify some metadata.  Call the journal APIs
	 * to unshare ->b_data if a currently-committing transaction is
	 * using it
	 */
	BUFFER_TRACE(gd_bh, "get_write_access");
426
	err = ext4_journal_get_write_access(handle, gd_bh);
427 428
	if (err)
		goto error_return;
429 430 431 432 433
	/*
	 * make sure we don't allow a parallel init on other groups in the
	 * same buddy cache
	 */
	down_write(&grp->alloc_sem);
434
	for (i = 0, blocks_freed = 0; i < count; i++) {
435
		BUFFER_TRACE(bitmap_bh, "clear bit");
436
		if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
437
						bit + i, bitmap_bh->b_data)) {
438
			ext4_error(sb, __func__,
439
				   "bit already cleared for block %llu",
L
Laurent Vivier 已提交
440
				   (ext4_fsblk_t)(block + i));
441 442
			BUFFER_TRACE(bitmap_bh, "bit already cleared");
		} else {
443
			blocks_freed++;
444 445 446
		}
	}
	spin_lock(sb_bgl_lock(sbi, block_group));
447 448
	blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
	ext4_free_blks_set(sb, desc, blk_free_count);
A
Andreas Dilger 已提交
449
	desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
450
	spin_unlock(sb_bgl_lock(sbi, block_group));
451
	percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
452

453 454 455
	if (sbi->s_log_groups_per_flex) {
		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
		spin_lock(sb_bgl_lock(sbi, flex_group));
456
		sbi->s_flex_groups[flex_group].free_blocks += blocks_freed;
457 458
		spin_unlock(sb_bgl_lock(sbi, flex_group));
	}
459 460 461 462 463 464 465
	/*
	 * request to reload the buddy with the
	 * new bitmap information
	 */
	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
	ext4_mb_update_group_info(grp, blocks_freed);
	up_write(&grp->alloc_sem);
466

467 468
	/* We dirtied the bitmap block */
	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
469
	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
470 471 472

	/* And the group descriptor block */
	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
473
	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
474 475
	if (!err)
		err = ret;
476
	sb->s_dirt = 1;
477

478 479
error_return:
	brelse(bitmap_bh);
480
	ext4_std_error(sb, err);
481 482 483 484
	return;
}

/**
485
 * ext4_free_blocks() -- Free given blocks and update quota
486 487 488 489
 * @handle:		handle for this transaction
 * @inode:		inode
 * @block:		start physical block to free
 * @count:		number of blocks to count
490
 * @metadata: 		Are these metadata blocks
491
 */
492
void ext4_free_blocks(handle_t *handle, struct inode *inode,
493 494
			ext4_fsblk_t block, unsigned long count,
			int metadata)
495
{
496
	struct super_block *sb;
497 498
	unsigned long dquot_freed_blocks;

499 500
	/* this isn't the right place to decide whether block is metadata
	 * inode.c/extents.c knows better, but for safety ... */
501 502 503 504 505 506 507 508 509 510
	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
		metadata = 1;

	/* We need to make sure we don't reuse
	 * block released untill the transaction commit.
	 * writeback mode have weak data consistency so
	 * don't force data as metadata when freeing block
	 * for writeback mode.
	 */
	if (metadata == 0 && !ext4_should_writeback_data(inode))
511 512
		metadata = 1;

513
	sb = inode->i_sb;
514

515 516
	ext4_mb_free_blocks(handle, inode, block, count,
			    metadata, &dquot_freed_blocks);
517 518 519 520 521
	if (dquot_freed_blocks)
		DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
	return;
}

522 523 524 525 526 527 528 529 530
/**
 * ext4_has_free_blocks()
 * @sbi:	in-core super block structure.
 * @nblocks:	number of needed blocks
 *
 * Check if filesystem has nblocks free & available for allocation.
 * On success return 1, return 0 on failure.
 */
int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
531
{
532
	s64 free_blocks, dirty_blocks, root_blocks;
533
	struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
534
	struct percpu_counter *dbc = &sbi->s_dirtyblocks_counter;
535

536 537
	free_blocks  = percpu_counter_read_positive(fbc);
	dirty_blocks = percpu_counter_read_positive(dbc);
538
	root_blocks = ext4_r_blocks_count(sbi->s_es);
539

540 541
	if (free_blocks - (nblocks + root_blocks + dirty_blocks) <
						EXT4_FREEBLOCKS_WATERMARK) {
542 543
		free_blocks  = percpu_counter_sum_positive(fbc);
		dirty_blocks = percpu_counter_sum_positive(dbc);
544 545 546
		if (dirty_blocks < 0) {
			printk(KERN_CRIT "Dirty block accounting "
					"went wrong %lld\n",
547
					(long long)dirty_blocks);
548 549 550
		}
	}
	/* Check whether we have space after
551
	 * accounting for current dirty blocks & root reserved blocks.
552
	 */
553 554
	if (free_blocks >= ((root_blocks + nblocks) + dirty_blocks))
		return 1;
555

556
	/* Hm, nope.  Are (enough) root reserved blocks available? */
557
	if (sbi->s_resuid == current_fsuid() ||
558 559 560 561 562 563 564
	    ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
	    capable(CAP_SYS_RESOURCE)) {
		if (free_blocks >= (nblocks + dirty_blocks))
			return 1;
	}

	return 0;
565 566
}

567
int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
A
Aneesh Kumar K.V 已提交
568
						s64 nblocks)
569
{
570 571
	if (ext4_has_free_blocks(sbi, nblocks)) {
		percpu_counter_add(&sbi->s_dirtyblocks_counter, nblocks);
572
		return 0;
573 574
	} else
		return -ENOSPC;
575
}
576

577
/**
578
 * ext4_should_retry_alloc()
579 580 581
 * @sb:			super block
 * @retries		number of attemps has been made
 *
582
 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
583 584 585 586 587 588
 * it is profitable to retry the operation, this function will wait
 * for the current or commiting transaction to complete, and then
 * return TRUE.
 *
 * if the total number of retries exceed three times, return FALSE.
 */
589
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
590
{
591
	if (!ext4_has_free_blocks(EXT4_SB(sb), 1) || (*retries)++ > 3)
592 593 594 595
		return 0;

	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);

596
	return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
597 598
}

A
Aneesh Kumar K.V 已提交
599
/*
600
 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
A
Aneesh Kumar K.V 已提交
601 602 603 604
 *
 * @handle:             handle to this transaction
 * @inode:              file inode
 * @goal:               given target block(filesystem wide)
T
Theodore Ts'o 已提交
605
 * @count:		pointer to total number of blocks needed
A
Aneesh Kumar K.V 已提交
606 607
 * @errp:               error code
 *
T
Theodore Ts'o 已提交
608
 * Return 1st allocated block number on success, *count stores total account
609
 * error stores in errp pointer
A
Aneesh Kumar K.V 已提交
610
 */
611 612
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
		ext4_fsblk_t goal, unsigned long *count, int *errp)
A
Aneesh Kumar K.V 已提交
613
{
T
Theodore Ts'o 已提交
614
	struct ext4_allocation_request ar;
615
	ext4_fsblk_t ret;
T
Theodore Ts'o 已提交
616 617 618 619 620 621 622 623 624 625 626

	memset(&ar, 0, sizeof(ar));
	/* Fill with neighbour allocated blocks */
	ar.inode = inode;
	ar.goal = goal;
	ar.len = count ? *count : 1;

	ret = ext4_mb_new_blocks(handle, &ar, errp);
	if (count)
		*count = ar.len;

627 628 629
	/*
	 * Account for the allocated meta blocks
	 */
630
	if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) {
631
		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
T
Theodore Ts'o 已提交
632
		EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
633 634 635
		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
	}
	return ret;
A
Aneesh Kumar K.V 已提交
636 637
}

638
/**
639
 * ext4_count_free_blocks() -- count filesystem free blocks
640 641 642 643
 * @sb:		superblock
 *
 * Adds up the number of free blocks from each block group.
 */
644
ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
645
{
646 647
	ext4_fsblk_t desc_count;
	struct ext4_group_desc *gdp;
648 649
	ext4_group_t i;
	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
650 651 652
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
	ext4_fsblk_t bitmap_count;
653
	unsigned int x;
654 655
	struct buffer_head *bitmap_bh = NULL;

656
	es = EXT4_SB(sb)->s_es;
657 658 659 660 661 662
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;

	smp_rmb();
	for (i = 0; i < ngroups; i++) {
663
		gdp = ext4_get_group_desc(sb, i, NULL);
664 665 666 667
		if (!gdp)
			continue;
		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
		brelse(bitmap_bh);
668
		bitmap_bh = ext4_read_block_bitmap(sb, i);
669 670 671
		if (bitmap_bh == NULL)
			continue;

672
		x = ext4_count_free(bitmap_bh, sb->s_blocksize);
673
		printk(KERN_DEBUG "group %lu: stored = %d, counted = %u\n",
674 675 676 677
			i, le16_to_cpu(gdp->bg_free_blocks_count), x);
		bitmap_count += x;
	}
	brelse(bitmap_bh);
678 679 680
	printk(KERN_DEBUG "ext4_count_free_blocks: stored = %llu"
		", computed = %llu, %llu\n", ext4_free_blocks_count(es),
	       desc_count, bitmap_count);
681 682 683 684 685
	return bitmap_count;
#else
	desc_count = 0;
	smp_rmb();
	for (i = 0; i < ngroups; i++) {
686
		gdp = ext4_get_group_desc(sb, i, NULL);
687 688
		if (!gdp)
			continue;
689
		desc_count += ext4_free_blks_count(sb, gdp);
690 691 692 693 694 695
	}

	return desc_count;
#endif
}

696
static inline int test_root(ext4_group_t a, int b)
697 698 699 700 701 702 703 704
{
	int num = b;

	while (a > num)
		num *= b;
	return num == a;
}

705
static int ext4_group_sparse(ext4_group_t group)
706 707 708 709 710 711 712 713 714 715
{
	if (group <= 1)
		return 1;
	if (!(group & 1))
		return 0;
	return (test_root(group, 7) || test_root(group, 5) ||
		test_root(group, 3));
}

/**
716
 *	ext4_bg_has_super - number of blocks used by the superblock in group
717 718 719 720 721 722
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the superblock (primary or backup)
 *	in this group.  Currently this will be only 0 or 1.
 */
723
int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
724
{
725 726 727
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
				EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
			!ext4_group_sparse(group))
728 729 730 731
		return 0;
	return 1;
}

732 733
static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
					ext4_group_t group)
734
{
735
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
736 737
	ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
	ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
738 739 740 741 742 743

	if (group == first || group == first + 1 || group == last)
		return 1;
	return 0;
}

744 745
static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
					ext4_group_t group)
746
{
747
	return ext4_bg_has_super(sb, group) ? EXT4_SB(sb)->s_gdb_count : 0;
748 749 750
}

/**
751
 *	ext4_bg_num_gdb - number of blocks used by the group table in group
752 753 754 755 756 757 758
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the group descriptor table
 *	(primary or backup) in this group.  In the future there may be a
 *	different number of descriptor blocks in each group.
 */
759
unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
760 761
{
	unsigned long first_meta_bg =
762 763
			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
764

765
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
766
			metagroup < first_meta_bg)
767
		return ext4_bg_num_gdb_nometa(sb, group);
768

769
	return ext4_bg_num_gdb_meta(sb,group);
770 771

}
772