balloc.c 25.3 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/balloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
19 20
#include "ext4.h"
#include "ext4_jbd2.h"
21
#include "mballoc.h"
22

23 24
#include <trace/events/ext4.h>

E
Eric Sandeen 已提交
25 26
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
					    ext4_group_t block_group);
27 28 29 30
/*
 * balloc.c contains the blocks allocation and deallocation routines
 */

31 32 33 34 35 36 37 38 39
/*
 * Calculate block group number for a given block number
 */
ext4_group_t ext4_get_group_number(struct super_block *sb,
				   ext4_fsblk_t block)
{
	ext4_group_t group;

	if (test_opt2(sb, STD_GROUP_SIZE))
40 41
		group = (block -
			 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >>
42 43 44 45 46 47
			(EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3);
	else
		ext4_get_group_no_and_offset(sb, block, &group, NULL);
	return group;
}

48
/*
49 50
 * Calculate the block group number and offset into the block/cluster
 * allocation bitmap, given a block number
51 52
 */
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
53
		ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
54
{
D
Dave Kleikamp 已提交
55
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
56 57
	ext4_grpblk_t offset;

D
Dave Kleikamp 已提交
58
	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
59 60
	offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
		EXT4_SB(sb)->s_cluster_bits;
61 62 63
	if (offsetp)
		*offsetp = offset;
	if (blockgrpp)
D
Dave Kleikamp 已提交
64
		*blockgrpp = blocknr;
65 66 67

}

68 69 70 71 72 73 74
/*
 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so
 * and 0 otherwise.
 */
static inline int ext4_block_in_group(struct super_block *sb,
				      ext4_fsblk_t block,
				      ext4_group_t block_group)
75 76
{
	ext4_group_t actual_group;
77

78
	actual_group = ext4_get_group_number(sb, block);
79
	return (actual_group == block_group) ? 1 : 0;
80 81
}

82 83 84
/* Return the number of clusters used for file system metadata; this
 * represents the overhead needed by the file system.
 */
85 86 87
static unsigned ext4_num_overhead_clusters(struct super_block *sb,
					   ext4_group_t block_group,
					   struct ext4_group_desc *gdp)
88
{
89 90 91 92
	unsigned num_clusters;
	int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
	ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
	ext4_fsblk_t itbl_blk;
93 94
	struct ext4_sb_info *sbi = EXT4_SB(sb);

95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
	/* This is the number of clusters used by the superblock,
	 * block group descriptors, and reserved block group
	 * descriptor blocks */
	num_clusters = ext4_num_base_meta_clusters(sb, block_group);

	/*
	 * For the allocation bitmaps and inode table, we first need
	 * to check to see if the block is in the block group.  If it
	 * is, then check to see if the cluster is already accounted
	 * for in the clusters used for the base metadata cluster, or
	 * if we can increment the base metadata cluster to include
	 * that block.  Otherwise, we will have to track the cluster
	 * used for the allocation bitmap or inode table explicitly.
	 * Normally all of these blocks are contiguous, so the special
	 * case handling shouldn't be necessary except for *very*
	 * unusual file system layouts.
	 */
	if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
113 114
		block_cluster = EXT4_B2C(sbi,
					 ext4_block_bitmap(sb, gdp) - start);
115 116 117 118 119 120 121 122 123 124
		if (block_cluster < num_clusters)
			block_cluster = -1;
		else if (block_cluster == num_clusters) {
			num_clusters++;
			block_cluster = -1;
		}
	}

	if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
		inode_cluster = EXT4_B2C(sbi,
125
					 ext4_inode_bitmap(sb, gdp) - start);
126 127 128 129 130 131 132 133 134 135 136
		if (inode_cluster < num_clusters)
			inode_cluster = -1;
		else if (inode_cluster == num_clusters) {
			num_clusters++;
			inode_cluster = -1;
		}
	}

	itbl_blk = ext4_inode_table(sb, gdp);
	for (i = 0; i < sbi->s_itb_per_group; i++) {
		if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
137
			c = EXT4_B2C(sbi, itbl_blk + i - start);
138 139 140 141 142 143 144 145 146
			if ((c < num_clusters) || (c == inode_cluster) ||
			    (c == block_cluster) || (c == itbl_cluster))
				continue;
			if (c == num_clusters) {
				num_clusters++;
				continue;
			}
			num_clusters++;
			itbl_cluster = c;
147 148
		}
	}
149 150 151 152 153 154 155

	if (block_cluster != -1)
		num_clusters++;
	if (inode_cluster != -1)
		num_clusters++;

	return num_clusters;
156
}
157

158 159
static unsigned int num_clusters_in_group(struct super_block *sb,
					  ext4_group_t block_group)
160
{
161 162
	unsigned int blocks;

163 164 165 166 167 168 169
	if (block_group == ext4_get_groups_count(sb) - 1) {
		/*
		 * Even though mke2fs always initializes the first and
		 * last group, just in case some other tool was used,
		 * we need to make sure we calculate the right free
		 * blocks.
		 */
170
		blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
171 172
			ext4_group_first_block_no(sb, block_group);
	} else
173 174
		blocks = EXT4_BLOCKS_PER_GROUP(sb);
	return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
175 176
}

177
/* Initializes an uninitialized block bitmap */
178
static int ext4_init_block_bitmap(struct super_block *sb,
179 180 181
				   struct buffer_head *bh,
				   ext4_group_t block_group,
				   struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
182
{
183
	unsigned int bit, bit_max;
A
Andreas Dilger 已提交
184
	struct ext4_sb_info *sbi = EXT4_SB(sb);
185 186
	ext4_fsblk_t start, tmp;
	int flex_bg = 0;
187
	struct ext4_group_info *grp;
188 189 190 191 192

	J_ASSERT_BH(bh, buffer_locked(bh));

	/* If checksum is bad mark all blocks used to prevent allocation
	 * essentially implementing a per-group read-only flag. */
193
	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
194
		grp = ext4_get_group_info(sb, block_group);
195 196 197
		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			percpu_counter_sub(&sbi->s_freeclusters_counter,
					   grp->bb_free);
198
		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
199 200 201 202 203 204
		if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
			int count;
			count = ext4_free_inodes_count(sb, gdp);
			percpu_counter_sub(&sbi->s_freeinodes_counter,
					   count);
		}
205
		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
206
		return -EIO;
A
Andreas Dilger 已提交
207
	}
208
	memset(bh->b_data, 0, sb->s_blocksize);
A
Andreas Dilger 已提交
209

210
	bit_max = ext4_num_base_meta_clusters(sb, block_group);
211 212
	for (bit = 0; bit < bit_max; bit++)
		ext4_set_bit(bit, bh->b_data);
213

214
	start = ext4_group_first_block_no(sb, block_group);
A
Andreas Dilger 已提交
215

216 217
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
		flex_bg = 1;
A
Andreas Dilger 已提交
218

219 220 221
	/* Set bits for block and inode bitmaps, and inode table */
	tmp = ext4_block_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
222
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
A
Andreas Dilger 已提交
223

224 225
	tmp = ext4_inode_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
226
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
227

228 229 230
	tmp = ext4_inode_table(sb, gdp);
	for (; tmp < ext4_inode_table(sb, gdp) +
		     sbi->s_itb_per_group; tmp++) {
231
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
232
			ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
A
Andreas Dilger 已提交
233
	}
234

235 236 237 238 239
	/*
	 * Also if the number of blocks within the group is less than
	 * the blocksize * 8 ( which is the size of bitmap ), set rest
	 * of the block bitmap to 1
	 */
240
	ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
241
			     sb->s_blocksize * 8, bh->b_data);
242
	ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
243
	ext4_group_desc_csum_set(sb, block_group, gdp);
244
	return 0;
A
Andreas Dilger 已提交
245 246
}

247 248 249
/* Return the number of free blocks in a block group.  It is used when
 * the block bitmap is uninitialized, so we can't just count the bits
 * in the bitmap. */
250 251 252
unsigned ext4_free_clusters_after_init(struct super_block *sb,
				       ext4_group_t block_group,
				       struct ext4_group_desc *gdp)
253
{
254 255
	return num_clusters_in_group(sb, block_group) - 
		ext4_num_overhead_clusters(sb, block_group, gdp);
256
}
A
Andreas Dilger 已提交
257

258 259 260 261 262 263 264 265
/*
 * The free blocks are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.  The descriptors are loaded in memory
266
 * when a file system is mounted (see ext4_fill_super).
267 268 269
 */

/**
270
 * ext4_get_group_desc() -- load group descriptor from disk
271 272 273 274 275
 * @sb:			super block
 * @block_group:	given block group
 * @bh:			pointer to the buffer head to store the block
 *			group descriptor
 */
276
struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
277
					     ext4_group_t block_group,
278
					     struct buffer_head **bh)
279
{
280 281
	unsigned int group_desc;
	unsigned int offset;
282
	ext4_group_t ngroups = ext4_get_groups_count(sb);
283
	struct ext4_group_desc *desc;
284
	struct ext4_sb_info *sbi = EXT4_SB(sb);
285

286
	if (block_group >= ngroups) {
287 288
		ext4_error(sb, "block_group >= groups_count - block_group = %u,"
			   " groups_count = %u", block_group, ngroups);
289 290 291 292

		return NULL;
	}

293 294
	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
295
	if (!sbi->s_group_desc[group_desc]) {
296
		ext4_error(sb, "Group descriptor not loaded - "
297
			   "block_group = %u, group_desc = %u, desc = %u",
298
			   block_group, group_desc, offset);
299 300 301
		return NULL;
	}

302 303 304
	desc = (struct ext4_group_desc *)(
		(__u8 *)sbi->s_group_desc[group_desc]->b_data +
		offset * EXT4_DESC_SIZE(sb));
305 306
	if (bh)
		*bh = sbi->s_group_desc[group_desc];
307
	return desc;
308 309
}

310 311 312 313 314 315
/*
 * Return the block number which was discovered to be invalid, or 0 if
 * the block bitmap is valid.
 */
static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
					    struct ext4_group_desc *desc,
316
					    ext4_group_t block_group,
317
					    struct buffer_head *bh)
318
{
319
	struct ext4_sb_info *sbi = EXT4_SB(sb);
320 321
	ext4_grpblk_t offset;
	ext4_grpblk_t next_zero_bit;
322
	ext4_fsblk_t blk;
323 324 325 326 327 328 329 330 331
	ext4_fsblk_t group_first_block;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		/* with FLEX_BG, the inode/block bitmaps and itable
		 * blocks may not be in the group at all
		 * so the bitmap validation will be skipped for those groups
		 * or it has to also read the block group where the bitmaps
		 * are located to verify they are set.
		 */
332
		return 0;
333 334 335 336
	}
	group_first_block = ext4_group_first_block_no(sb, block_group);

	/* check whether block bitmap block number is set */
337 338
	blk = ext4_block_bitmap(sb, desc);
	offset = blk - group_first_block;
339
	if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
340
		/* bad block bitmap */
341
		return blk;
342 343

	/* check whether the inode bitmap block number is set */
344 345
	blk = ext4_inode_bitmap(sb, desc);
	offset = blk - group_first_block;
346
	if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
347
		/* bad block bitmap */
348
		return blk;
349 350

	/* check whether the inode table block number is set */
351 352
	blk = ext4_inode_table(sb, desc);
	offset = blk - group_first_block;
353
	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
354 355 356 357
			EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group),
			EXT4_B2C(sbi, offset));
	if (next_zero_bit <
	    EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group))
358 359
		/* bad bitmap for inode tables */
		return blk;
360 361
	return 0;
}
362

363 364 365 366
static void ext4_validate_block_bitmap(struct super_block *sb,
				       struct ext4_group_desc *desc,
				       ext4_group_t block_group,
				       struct buffer_head *bh)
367
{
368
	ext4_fsblk_t	blk;
369
	struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
370
	struct ext4_sb_info *sbi = EXT4_SB(sb);
371

372 373 374 375
	if (buffer_verified(bh))
		return;

	ext4_lock_group(sb, block_group);
376 377 378 379 380
	blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
	if (unlikely(blk != 0)) {
		ext4_unlock_group(sb, block_group);
		ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
			   block_group, blk);
381 382 383
		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			percpu_counter_sub(&sbi->s_freeclusters_counter,
					   grp->bb_free);
384
		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
385 386 387
		return;
	}
	if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
388
			desc, bh))) {
389 390
		ext4_unlock_group(sb, block_group);
		ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
391 392 393
		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			percpu_counter_sub(&sbi->s_freeclusters_counter,
					   grp->bb_free);
394
		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
395 396 397
		return;
	}
	set_buffer_verified(bh);
398 399 400
	ext4_unlock_group(sb, block_group);
}

401
/**
402
 * ext4_read_block_bitmap_nowait()
403 404 405
 * @sb:			super block
 * @block_group:	given block group
 *
406 407
 * Read the bitmap for a given block_group,and validate the
 * bits for block/inode/inode tables are set in the bitmaps
408 409 410
 *
 * Return buffer_head on success or NULL in case of failure.
 */
A
Andreas Dilger 已提交
411
struct buffer_head *
412
ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
413
{
414
	struct ext4_group_desc *desc;
415
	struct buffer_head *bh;
416
	ext4_fsblk_t bitmap_blk;
417

A
Andreas Dilger 已提交
418
	desc = ext4_get_group_desc(sb, block_group, NULL);
419
	if (!desc)
420 421
		return NULL;
	bitmap_blk = ext4_block_bitmap(sb, desc);
422 423
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
424 425 426
		ext4_error(sb, "Cannot get buffer for block bitmap - "
			   "block_group = %u, block_bitmap = %llu",
			   block_group, bitmap_blk);
427 428
		return NULL;
	}
429 430

	if (bitmap_uptodate(bh))
431
		goto verify;
432

433
	lock_buffer(bh);
434 435
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
436
		goto verify;
437
	}
438
	ext4_lock_group(sb, block_group);
A
Andreas Dilger 已提交
439
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
440 441 442
		int err;

		err = ext4_init_block_bitmap(sb, bh, block_group, desc);
443
		set_bitmap_uptodate(bh);
444
		set_buffer_uptodate(bh);
445
		ext4_unlock_group(sb, block_group);
A
Aneesh Kumar K.V 已提交
446
		unlock_buffer(bh);
447 448
		if (err)
			ext4_error(sb, "Checksum bad for grp %u", block_group);
449
		return bh;
A
Andreas Dilger 已提交
450
	}
451
	ext4_unlock_group(sb, block_group);
452 453 454 455 456 457 458
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
459
		goto verify;
460 461
	}
	/*
462
	 * submit the buffer_head for reading
463
	 */
464
	set_buffer_new(bh);
465
	trace_ext4_read_block_bitmap_load(sb, block_group);
466 467
	bh->b_end_io = ext4_end_bitmap_read;
	get_bh(bh);
468
	submit_bh(READ | REQ_META | REQ_PRIO, bh);
469
	return bh;
470 471
verify:
	ext4_validate_block_bitmap(sb, desc, block_group, bh);
472 473 474 475
	if (buffer_verified(bh))
		return bh;
	put_bh(bh);
	return NULL;
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
}

/* Returns 0 on success, 1 on error */
int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
			   struct buffer_head *bh)
{
	struct ext4_group_desc *desc;

	if (!buffer_new(bh))
		return 0;
	desc = ext4_get_group_desc(sb, block_group, NULL);
	if (!desc)
		return 1;
	wait_on_buffer(bh);
	if (!buffer_uptodate(bh)) {
491
		ext4_error(sb, "Cannot read block bitmap - "
492
			   "block_group = %u, block_bitmap = %llu",
493
			   block_group, (unsigned long long) bh->b_blocknr);
494
		return 1;
495
	}
496 497
	clear_buffer_new(bh);
	/* Panic or remount fs read-only if block bitmap is invalid */
498
	ext4_validate_block_bitmap(sb, desc, block_group, bh);
499 500
	/* ...but check for error just in case errors=continue. */
	return !buffer_verified(bh);
501 502 503 504 505 506 507 508
}

struct buffer_head *
ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
{
	struct buffer_head *bh;

	bh = ext4_read_block_bitmap_nowait(sb, block_group);
509 510
	if (!bh)
		return NULL;
511 512 513 514
	if (ext4_wait_block_bitmap(sb, block_group, bh)) {
		put_bh(bh);
		return NULL;
	}
515 516 517
	return bh;
}

518
/**
519
 * ext4_has_free_clusters()
520
 * @sbi:	in-core super block structure.
521 522
 * @nclusters:	number of needed blocks
 * @flags:	flags from ext4_mb_new_blocks()
523
 *
524
 * Check if filesystem has nclusters free & available for allocation.
525 526
 * On success return 1, return 0 on failure.
 */
527 528
static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
				  s64 nclusters, unsigned int flags)
529
{
L
Lukas Czerner 已提交
530
	s64 free_clusters, dirty_clusters, rsv, resv_clusters;
531
	struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
532
	struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
533

534 535
	free_clusters  = percpu_counter_read_positive(fcc);
	dirty_clusters = percpu_counter_read_positive(dcc);
L
Lukas Czerner 已提交
536
	resv_clusters = atomic64_read(&sbi->s_resv_clusters);
537 538 539 540 541

	/*
	 * r_blocks_count should always be multiple of the cluster ratio so
	 * we are safe to do a plane bit shift only.
	 */
L
Lukas Czerner 已提交
542 543
	rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) +
	      resv_clusters;
544

L
Lukas Czerner 已提交
545
	if (free_clusters - (nclusters + rsv + dirty_clusters) <
546
					EXT4_FREECLUSTERS_WATERMARK) {
547
		free_clusters  = percpu_counter_sum_positive(fcc);
548
		dirty_clusters = percpu_counter_sum_positive(dcc);
549
	}
550 551
	/* Check whether we have space after accounting for current
	 * dirty clusters & root reserved clusters.
552
	 */
L
Lukas Czerner 已提交
553
	if (free_clusters >= (rsv + nclusters + dirty_clusters))
554
		return 1;
555

556
	/* Hm, nope.  Are (enough) root reserved clusters available? */
557 558
	if (uid_eq(sbi->s_resuid, current_fsuid()) ||
	    (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
559
	    capable(CAP_SYS_RESOURCE) ||
L
Lukas Czerner 已提交
560
	    (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
561

L
Lukas Czerner 已提交
562 563 564 565 566 567
		if (free_clusters >= (nclusters + dirty_clusters +
				      resv_clusters))
			return 1;
	}
	/* No free blocks. Let's see if we can dip into reserved pool */
	if (flags & EXT4_MB_USE_RESERVED) {
568
		if (free_clusters >= (nclusters + dirty_clusters))
569 570 571 572
			return 1;
	}

	return 0;
573 574
}

575 576
int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
			     s64 nclusters, unsigned int flags)
577
{
578
	if (ext4_has_free_clusters(sbi, nclusters, flags)) {
579
		percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters);
580
		return 0;
581 582
	} else
		return -ENOSPC;
583
}
584

585
/**
586
 * ext4_should_retry_alloc()
587 588 589
 * @sb:			super block
 * @retries		number of attemps has been made
 *
590
 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
591
 * it is profitable to retry the operation, this function will wait
L
Lucas De Marchi 已提交
592
 * for the current or committing transaction to complete, and then
593 594 595 596
 * return TRUE.
 *
 * if the total number of retries exceed three times, return FALSE.
 */
597
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
598
{
599
	if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
600 601
	    (*retries)++ > 3 ||
	    !EXT4_SB(sb)->s_journal)
602 603 604 605
		return 0;

	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);

606
	return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
607 608
}

A
Aneesh Kumar K.V 已提交
609
/*
610
 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
A
Aneesh Kumar K.V 已提交
611 612 613 614
 *
 * @handle:             handle to this transaction
 * @inode:              file inode
 * @goal:               given target block(filesystem wide)
615
 * @count:		pointer to total number of clusters needed
A
Aneesh Kumar K.V 已提交
616 617
 * @errp:               error code
 *
T
Theodore Ts'o 已提交
618
 * Return 1st allocated block number on success, *count stores total account
619
 * error stores in errp pointer
A
Aneesh Kumar K.V 已提交
620
 */
621
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
622 623
				  ext4_fsblk_t goal, unsigned int flags,
				  unsigned long *count, int *errp)
A
Aneesh Kumar K.V 已提交
624
{
T
Theodore Ts'o 已提交
625
	struct ext4_allocation_request ar;
626
	ext4_fsblk_t ret;
T
Theodore Ts'o 已提交
627 628 629 630 631 632

	memset(&ar, 0, sizeof(ar));
	/* Fill with neighbour allocated blocks */
	ar.inode = inode;
	ar.goal = goal;
	ar.len = count ? *count : 1;
633
	ar.flags = flags;
T
Theodore Ts'o 已提交
634 635 636 637

	ret = ext4_mb_new_blocks(handle, &ar, errp);
	if (count)
		*count = ar.len;
638
	/*
639 640
	 * Account for the allocated meta blocks.  We will never
	 * fail EDQUOT for metdata, but we do account for it.
641
	 */
642
	if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
643 644
		dquot_alloc_block_nofail(inode,
				EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
645 646
	}
	return ret;
A
Aneesh Kumar K.V 已提交
647 648
}

649
/**
650
 * ext4_count_free_clusters() -- count filesystem free clusters
651 652
 * @sb:		superblock
 *
653
 * Adds up the number of free clusters from each block group.
654
 */
655
ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
656
{
657 658
	ext4_fsblk_t desc_count;
	struct ext4_group_desc *gdp;
659
	ext4_group_t i;
660
	ext4_group_t ngroups = ext4_get_groups_count(sb);
661
	struct ext4_group_info *grp;
662 663 664
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
	ext4_fsblk_t bitmap_count;
665
	unsigned int x;
666 667
	struct buffer_head *bitmap_bh = NULL;

668
	es = EXT4_SB(sb)->s_es;
669 670 671 672 673
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;

	for (i = 0; i < ngroups; i++) {
674
		gdp = ext4_get_group_desc(sb, i, NULL);
675 676
		if (!gdp)
			continue;
677 678 679 680 681
		grp = NULL;
		if (EXT4_SB(sb)->s_group_info)
			grp = ext4_get_group_info(sb, i);
		if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			desc_count += ext4_free_group_clusters(sb, gdp);
682
		brelse(bitmap_bh);
683
		bitmap_bh = ext4_read_block_bitmap(sb, i);
684 685 686
		if (bitmap_bh == NULL)
			continue;

687
		x = ext4_count_free(bitmap_bh->b_data,
688
				    EXT4_CLUSTERS_PER_GROUP(sb) / 8);
689
		printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
690
			i, ext4_free_group_clusters(sb, gdp), x);
691 692 693
		bitmap_count += x;
	}
	brelse(bitmap_bh);
694 695
	printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
	       ", computed = %llu, %llu\n",
696
	       EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
697
	       desc_count, bitmap_count);
698 699 700 701
	return bitmap_count;
#else
	desc_count = 0;
	for (i = 0; i < ngroups; i++) {
702
		gdp = ext4_get_group_desc(sb, i, NULL);
703 704
		if (!gdp)
			continue;
705 706 707 708 709
		grp = NULL;
		if (EXT4_SB(sb)->s_group_info)
			grp = ext4_get_group_info(sb, i);
		if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			desc_count += ext4_free_group_clusters(sb, gdp);
710 711 712 713 714 715
	}

	return desc_count;
#endif
}

716
static inline int test_root(ext4_group_t a, int b)
717
{
T
Theodore Ts'o 已提交
718 719 720 721 722 723 724 725 726
	while (1) {
		if (a < b)
			return 0;
		if (a == b)
			return 1;
		if ((a % b) != 0)
			return 0;
		a = a / b;
	}
727 728 729
}

/**
730
 *	ext4_bg_has_super - number of blocks used by the superblock in group
731 732 733 734 735 736
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the superblock (primary or backup)
 *	in this group.  Currently this will be only 0 or 1.
 */
737
int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
738
{
739 740 741 742 743 744 745 746 747 748 749 750 751 752
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

	if (group == 0)
		return 1;
	if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_SPARSE_SUPER2)) {
		if (group == le32_to_cpu(es->s_backup_bgs[0]) ||
		    group == le32_to_cpu(es->s_backup_bgs[1]))
			return 1;
		return 0;
	}
	if ((group <= 1) || !EXT4_HAS_RO_COMPAT_FEATURE(sb,
					EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER))
		return 1;
	if (!(group & 1))
753
		return 0;
754 755 756 757 758
	if (test_root(group, 3) || (test_root(group, 5)) ||
	    test_root(group, 7))
		return 1;

	return 0;
759 760
}

761 762
static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
					ext4_group_t group)
763
{
764
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
765 766
	ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
	ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
767 768 769 770 771 772

	if (group == first || group == first + 1 || group == last)
		return 1;
	return 0;
}

773 774
static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
					ext4_group_t group)
775
{
776 777 778 779 780 781 782
	if (!ext4_bg_has_super(sb, group))
		return 0;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
		return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	else
		return EXT4_SB(sb)->s_gdb_count;
783 784 785
}

/**
786
 *	ext4_bg_num_gdb - number of blocks used by the group table in group
787 788 789 790 791 792 793
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the group descriptor table
 *	(primary or backup) in this group.  In the future there may be a
 *	different number of descriptor blocks in each group.
 */
794
unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
795 796
{
	unsigned long first_meta_bg =
797 798
			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
799

800
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
801
			metagroup < first_meta_bg)
802
		return ext4_bg_num_gdb_nometa(sb, group);
803

804
	return ext4_bg_num_gdb_meta(sb,group);
805 806

}
807

808
/*
809
 * This function returns the number of file system metadata clusters at
810 811
 * the beginning of a block group, including the reserved gdt blocks.
 */
E
Eric Sandeen 已提交
812
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
813
				     ext4_group_t block_group)
814 815
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
816
	unsigned num;
817 818 819 820 821 822 823 824 825 826 827 828 829 830

	/* Check for superblock and gdt backups in this group */
	num = ext4_bg_has_super(sb, block_group);

	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
	    block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
			  sbi->s_desc_per_block) {
		if (num) {
			num += ext4_bg_num_gdb(sb, block_group);
			num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
		}
	} else { /* For META_BG_BLOCK_GROUPS */
		num += ext4_bg_num_gdb(sb, block_group);
	}
831
	return EXT4_NUM_B2C(sbi, num);
832
}
833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
/**
 *	ext4_inode_to_goal_block - return a hint for block allocation
 *	@inode: inode for block allocation
 *
 *	Return the ideal location to start allocating blocks for a
 *	newly created inode.
 */
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
{
	struct ext4_inode_info *ei = EXT4_I(inode);
	ext4_group_t block_group;
	ext4_grpblk_t colour;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
	ext4_fsblk_t bg_start;
	ext4_fsblk_t last_block;

	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		/*
		 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
		 * block groups per flexgroup, reserve the first block
		 * group for directories and special files.  Regular
		 * files will start at the second block group.  This
		 * tends to speed up directory access and improves
		 * fsck times.
		 */
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
	return bg_start + colour;
}