balloc.c 25.4 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/balloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
20 21
#include "ext4.h"
#include "ext4_jbd2.h"
22
#include "mballoc.h"
23

24 25
#include <trace/events/ext4.h>

E
Eric Sandeen 已提交
26 27
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
					    ext4_group_t block_group);
28 29 30 31
/*
 * balloc.c contains the blocks allocation and deallocation routines
 */

32 33 34 35 36 37 38 39 40
/*
 * Calculate block group number for a given block number
 */
ext4_group_t ext4_get_group_number(struct super_block *sb,
				   ext4_fsblk_t block)
{
	ext4_group_t group;

	if (test_opt2(sb, STD_GROUP_SIZE))
41 42
		group = (block -
			 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >>
43 44 45 46 47 48
			(EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3);
	else
		ext4_get_group_no_and_offset(sb, block, &group, NULL);
	return group;
}

49
/*
50 51
 * Calculate the block group number and offset into the block/cluster
 * allocation bitmap, given a block number
52 53
 */
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
54
		ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
55
{
D
Dave Kleikamp 已提交
56
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
57 58
	ext4_grpblk_t offset;

D
Dave Kleikamp 已提交
59
	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
60 61
	offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
		EXT4_SB(sb)->s_cluster_bits;
62 63 64
	if (offsetp)
		*offsetp = offset;
	if (blockgrpp)
D
Dave Kleikamp 已提交
65
		*blockgrpp = blocknr;
66 67 68

}

69 70 71 72 73 74 75
/*
 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so
 * and 0 otherwise.
 */
static inline int ext4_block_in_group(struct super_block *sb,
				      ext4_fsblk_t block,
				      ext4_group_t block_group)
76 77
{
	ext4_group_t actual_group;
78

79
	actual_group = ext4_get_group_number(sb, block);
80
	return (actual_group == block_group) ? 1 : 0;
81 82
}

83 84 85
/* Return the number of clusters used for file system metadata; this
 * represents the overhead needed by the file system.
 */
86 87 88
static unsigned ext4_num_overhead_clusters(struct super_block *sb,
					   ext4_group_t block_group,
					   struct ext4_group_desc *gdp)
89
{
90 91 92 93
	unsigned num_clusters;
	int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
	ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
	ext4_fsblk_t itbl_blk;
94 95
	struct ext4_sb_info *sbi = EXT4_SB(sb);

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
	/* This is the number of clusters used by the superblock,
	 * block group descriptors, and reserved block group
	 * descriptor blocks */
	num_clusters = ext4_num_base_meta_clusters(sb, block_group);

	/*
	 * For the allocation bitmaps and inode table, we first need
	 * to check to see if the block is in the block group.  If it
	 * is, then check to see if the cluster is already accounted
	 * for in the clusters used for the base metadata cluster, or
	 * if we can increment the base metadata cluster to include
	 * that block.  Otherwise, we will have to track the cluster
	 * used for the allocation bitmap or inode table explicitly.
	 * Normally all of these blocks are contiguous, so the special
	 * case handling shouldn't be necessary except for *very*
	 * unusual file system layouts.
	 */
	if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
114 115
		block_cluster = EXT4_B2C(sbi,
					 ext4_block_bitmap(sb, gdp) - start);
116 117 118 119 120 121 122 123 124 125
		if (block_cluster < num_clusters)
			block_cluster = -1;
		else if (block_cluster == num_clusters) {
			num_clusters++;
			block_cluster = -1;
		}
	}

	if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
		inode_cluster = EXT4_B2C(sbi,
126
					 ext4_inode_bitmap(sb, gdp) - start);
127 128 129 130 131 132 133 134 135 136 137
		if (inode_cluster < num_clusters)
			inode_cluster = -1;
		else if (inode_cluster == num_clusters) {
			num_clusters++;
			inode_cluster = -1;
		}
	}

	itbl_blk = ext4_inode_table(sb, gdp);
	for (i = 0; i < sbi->s_itb_per_group; i++) {
		if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
138
			c = EXT4_B2C(sbi, itbl_blk + i - start);
139 140 141 142 143 144 145 146 147
			if ((c < num_clusters) || (c == inode_cluster) ||
			    (c == block_cluster) || (c == itbl_cluster))
				continue;
			if (c == num_clusters) {
				num_clusters++;
				continue;
			}
			num_clusters++;
			itbl_cluster = c;
148 149
		}
	}
150 151 152 153 154 155 156

	if (block_cluster != -1)
		num_clusters++;
	if (inode_cluster != -1)
		num_clusters++;

	return num_clusters;
157
}
158

159 160
static unsigned int num_clusters_in_group(struct super_block *sb,
					  ext4_group_t block_group)
161
{
162 163
	unsigned int blocks;

164 165 166 167 168 169 170
	if (block_group == ext4_get_groups_count(sb) - 1) {
		/*
		 * Even though mke2fs always initializes the first and
		 * last group, just in case some other tool was used,
		 * we need to make sure we calculate the right free
		 * blocks.
		 */
171
		blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
172 173
			ext4_group_first_block_no(sb, block_group);
	} else
174 175
		blocks = EXT4_BLOCKS_PER_GROUP(sb);
	return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
176 177
}

178
/* Initializes an uninitialized block bitmap */
179 180 181 182
static void ext4_init_block_bitmap(struct super_block *sb,
				   struct buffer_head *bh,
				   ext4_group_t block_group,
				   struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
183
{
184
	unsigned int bit, bit_max;
A
Andreas Dilger 已提交
185
	struct ext4_sb_info *sbi = EXT4_SB(sb);
186 187
	ext4_fsblk_t start, tmp;
	int flex_bg = 0;
188
	struct ext4_group_info *grp;
189 190 191 192 193

	J_ASSERT_BH(bh, buffer_locked(bh));

	/* If checksum is bad mark all blocks used to prevent allocation
	 * essentially implementing a per-group read-only flag. */
194
	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
195
		ext4_error(sb, "Checksum bad for group %u", block_group);
196
		grp = ext4_get_group_info(sb, block_group);
197 198 199
		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			percpu_counter_sub(&sbi->s_freeclusters_counter,
					   grp->bb_free);
200
		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
201 202 203 204 205 206
		if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
			int count;
			count = ext4_free_inodes_count(sb, gdp);
			percpu_counter_sub(&sbi->s_freeinodes_counter,
					   count);
		}
207
		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
208
		return;
A
Andreas Dilger 已提交
209
	}
210
	memset(bh->b_data, 0, sb->s_blocksize);
A
Andreas Dilger 已提交
211

212
	bit_max = ext4_num_base_meta_clusters(sb, block_group);
213 214
	for (bit = 0; bit < bit_max; bit++)
		ext4_set_bit(bit, bh->b_data);
215

216
	start = ext4_group_first_block_no(sb, block_group);
A
Andreas Dilger 已提交
217

218 219
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
		flex_bg = 1;
A
Andreas Dilger 已提交
220

221 222 223
	/* Set bits for block and inode bitmaps, and inode table */
	tmp = ext4_block_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
224
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
A
Andreas Dilger 已提交
225

226 227
	tmp = ext4_inode_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
228
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
229

230 231 232
	tmp = ext4_inode_table(sb, gdp);
	for (; tmp < ext4_inode_table(sb, gdp) +
		     sbi->s_itb_per_group; tmp++) {
233
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
234
			ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
A
Andreas Dilger 已提交
235
	}
236

237 238 239 240 241
	/*
	 * Also if the number of blocks within the group is less than
	 * the blocksize * 8 ( which is the size of bitmap ), set rest
	 * of the block bitmap to 1
	 */
242
	ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
243
			     sb->s_blocksize * 8, bh->b_data);
244
	ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
245
	ext4_group_desc_csum_set(sb, block_group, gdp);
A
Andreas Dilger 已提交
246 247
}

248 249 250
/* Return the number of free blocks in a block group.  It is used when
 * the block bitmap is uninitialized, so we can't just count the bits
 * in the bitmap. */
251 252 253
unsigned ext4_free_clusters_after_init(struct super_block *sb,
				       ext4_group_t block_group,
				       struct ext4_group_desc *gdp)
254
{
255 256
	return num_clusters_in_group(sb, block_group) - 
		ext4_num_overhead_clusters(sb, block_group, gdp);
257
}
A
Andreas Dilger 已提交
258

259 260 261 262 263 264 265 266
/*
 * The free blocks are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.  The descriptors are loaded in memory
267
 * when a file system is mounted (see ext4_fill_super).
268 269 270
 */

/**
271
 * ext4_get_group_desc() -- load group descriptor from disk
272 273 274 275 276
 * @sb:			super block
 * @block_group:	given block group
 * @bh:			pointer to the buffer head to store the block
 *			group descriptor
 */
277
struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
278
					     ext4_group_t block_group,
279
					     struct buffer_head **bh)
280
{
281 282
	unsigned int group_desc;
	unsigned int offset;
283
	ext4_group_t ngroups = ext4_get_groups_count(sb);
284
	struct ext4_group_desc *desc;
285
	struct ext4_sb_info *sbi = EXT4_SB(sb);
286

287
	if (block_group >= ngroups) {
288 289
		ext4_error(sb, "block_group >= groups_count - block_group = %u,"
			   " groups_count = %u", block_group, ngroups);
290 291 292 293

		return NULL;
	}

294 295
	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
296
	if (!sbi->s_group_desc[group_desc]) {
297
		ext4_error(sb, "Group descriptor not loaded - "
298
			   "block_group = %u, group_desc = %u, desc = %u",
299
			   block_group, group_desc, offset);
300 301 302
		return NULL;
	}

303 304 305
	desc = (struct ext4_group_desc *)(
		(__u8 *)sbi->s_group_desc[group_desc]->b_data +
		offset * EXT4_DESC_SIZE(sb));
306 307
	if (bh)
		*bh = sbi->s_group_desc[group_desc];
308
	return desc;
309 310
}

311 312 313 314 315 316
/*
 * Return the block number which was discovered to be invalid, or 0 if
 * the block bitmap is valid.
 */
static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
					    struct ext4_group_desc *desc,
317
					    ext4_group_t block_group,
318
					    struct buffer_head *bh)
319
{
320
	struct ext4_sb_info *sbi = EXT4_SB(sb);
321 322
	ext4_grpblk_t offset;
	ext4_grpblk_t next_zero_bit;
323
	ext4_fsblk_t blk;
324 325 326 327 328 329 330 331 332
	ext4_fsblk_t group_first_block;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		/* with FLEX_BG, the inode/block bitmaps and itable
		 * blocks may not be in the group at all
		 * so the bitmap validation will be skipped for those groups
		 * or it has to also read the block group where the bitmaps
		 * are located to verify they are set.
		 */
333
		return 0;
334 335 336 337
	}
	group_first_block = ext4_group_first_block_no(sb, block_group);

	/* check whether block bitmap block number is set */
338 339
	blk = ext4_block_bitmap(sb, desc);
	offset = blk - group_first_block;
340
	if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
341
		/* bad block bitmap */
342
		return blk;
343 344

	/* check whether the inode bitmap block number is set */
345 346
	blk = ext4_inode_bitmap(sb, desc);
	offset = blk - group_first_block;
347
	if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
348
		/* bad block bitmap */
349
		return blk;
350 351

	/* check whether the inode table block number is set */
352 353
	blk = ext4_inode_table(sb, desc);
	offset = blk - group_first_block;
354
	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
355 356 357 358
			EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group),
			EXT4_B2C(sbi, offset));
	if (next_zero_bit <
	    EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group))
359 360
		/* bad bitmap for inode tables */
		return blk;
361 362
	return 0;
}
363

364 365 366 367
static void ext4_validate_block_bitmap(struct super_block *sb,
				       struct ext4_group_desc *desc,
				       ext4_group_t block_group,
				       struct buffer_head *bh)
368
{
369
	ext4_fsblk_t	blk;
370
	struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
371
	struct ext4_sb_info *sbi = EXT4_SB(sb);
372

373 374 375 376
	if (buffer_verified(bh))
		return;

	ext4_lock_group(sb, block_group);
377 378 379 380 381
	blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
	if (unlikely(blk != 0)) {
		ext4_unlock_group(sb, block_group);
		ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
			   block_group, blk);
382 383 384
		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			percpu_counter_sub(&sbi->s_freeclusters_counter,
					   grp->bb_free);
385
		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
386 387 388
		return;
	}
	if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
389
			desc, bh))) {
390 391
		ext4_unlock_group(sb, block_group);
		ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
392 393 394
		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			percpu_counter_sub(&sbi->s_freeclusters_counter,
					   grp->bb_free);
395
		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
396 397 398
		return;
	}
	set_buffer_verified(bh);
399 400 401
	ext4_unlock_group(sb, block_group);
}

402
/**
403
 * ext4_read_block_bitmap_nowait()
404 405 406
 * @sb:			super block
 * @block_group:	given block group
 *
407 408
 * Read the bitmap for a given block_group,and validate the
 * bits for block/inode/inode tables are set in the bitmaps
409 410 411
 *
 * Return buffer_head on success or NULL in case of failure.
 */
A
Andreas Dilger 已提交
412
struct buffer_head *
413
ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
414
{
415
	struct ext4_group_desc *desc;
416
	struct buffer_head *bh;
417
	ext4_fsblk_t bitmap_blk;
418

A
Andreas Dilger 已提交
419
	desc = ext4_get_group_desc(sb, block_group, NULL);
420
	if (!desc)
421 422
		return NULL;
	bitmap_blk = ext4_block_bitmap(sb, desc);
423 424
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
425 426 427
		ext4_error(sb, "Cannot get buffer for block bitmap - "
			   "block_group = %u, block_bitmap = %llu",
			   block_group, bitmap_blk);
428 429
		return NULL;
	}
430 431

	if (bitmap_uptodate(bh))
432
		goto verify;
433

434
	lock_buffer(bh);
435 436
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
437
		goto verify;
438
	}
439
	ext4_lock_group(sb, block_group);
A
Andreas Dilger 已提交
440
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
441
		ext4_init_block_bitmap(sb, bh, block_group, desc);
442
		set_bitmap_uptodate(bh);
443
		set_buffer_uptodate(bh);
444
		ext4_unlock_group(sb, block_group);
A
Aneesh Kumar K.V 已提交
445
		unlock_buffer(bh);
446
		return bh;
A
Andreas Dilger 已提交
447
	}
448
	ext4_unlock_group(sb, block_group);
449 450 451 452 453 454 455
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
456
		goto verify;
457 458
	}
	/*
459
	 * submit the buffer_head for reading
460
	 */
461
	set_buffer_new(bh);
462
	trace_ext4_read_block_bitmap_load(sb, block_group);
463 464
	bh->b_end_io = ext4_end_bitmap_read;
	get_bh(bh);
465
	submit_bh(READ | REQ_META | REQ_PRIO, bh);
466
	return bh;
467 468
verify:
	ext4_validate_block_bitmap(sb, desc, block_group, bh);
469 470 471 472
	if (buffer_verified(bh))
		return bh;
	put_bh(bh);
	return NULL;
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
}

/* Returns 0 on success, 1 on error */
int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
			   struct buffer_head *bh)
{
	struct ext4_group_desc *desc;

	if (!buffer_new(bh))
		return 0;
	desc = ext4_get_group_desc(sb, block_group, NULL);
	if (!desc)
		return 1;
	wait_on_buffer(bh);
	if (!buffer_uptodate(bh)) {
488
		ext4_error(sb, "Cannot read block bitmap - "
489
			   "block_group = %u, block_bitmap = %llu",
490
			   block_group, (unsigned long long) bh->b_blocknr);
491
		return 1;
492
	}
493 494
	clear_buffer_new(bh);
	/* Panic or remount fs read-only if block bitmap is invalid */
495
	ext4_validate_block_bitmap(sb, desc, block_group, bh);
496 497
	/* ...but check for error just in case errors=continue. */
	return !buffer_verified(bh);
498 499 500 501 502 503 504 505
}

struct buffer_head *
ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
{
	struct buffer_head *bh;

	bh = ext4_read_block_bitmap_nowait(sb, block_group);
506 507
	if (!bh)
		return NULL;
508 509 510 511
	if (ext4_wait_block_bitmap(sb, block_group, bh)) {
		put_bh(bh);
		return NULL;
	}
512 513 514
	return bh;
}

515
/**
516
 * ext4_has_free_clusters()
517
 * @sbi:	in-core super block structure.
518 519
 * @nclusters:	number of needed blocks
 * @flags:	flags from ext4_mb_new_blocks()
520
 *
521
 * Check if filesystem has nclusters free & available for allocation.
522 523
 * On success return 1, return 0 on failure.
 */
524 525
static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
				  s64 nclusters, unsigned int flags)
526
{
L
Lukas Czerner 已提交
527
	s64 free_clusters, dirty_clusters, rsv, resv_clusters;
528
	struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
529
	struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
530

531 532
	free_clusters  = percpu_counter_read_positive(fcc);
	dirty_clusters = percpu_counter_read_positive(dcc);
L
Lukas Czerner 已提交
533
	resv_clusters = atomic64_read(&sbi->s_resv_clusters);
534 535 536 537 538

	/*
	 * r_blocks_count should always be multiple of the cluster ratio so
	 * we are safe to do a plane bit shift only.
	 */
L
Lukas Czerner 已提交
539 540
	rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) +
	      resv_clusters;
541

L
Lukas Czerner 已提交
542
	if (free_clusters - (nclusters + rsv + dirty_clusters) <
543
					EXT4_FREECLUSTERS_WATERMARK) {
544
		free_clusters  = percpu_counter_sum_positive(fcc);
545
		dirty_clusters = percpu_counter_sum_positive(dcc);
546
	}
547 548
	/* Check whether we have space after accounting for current
	 * dirty clusters & root reserved clusters.
549
	 */
L
Lukas Czerner 已提交
550
	if (free_clusters >= (rsv + nclusters + dirty_clusters))
551
		return 1;
552

553
	/* Hm, nope.  Are (enough) root reserved clusters available? */
554 555
	if (uid_eq(sbi->s_resuid, current_fsuid()) ||
	    (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
556
	    capable(CAP_SYS_RESOURCE) ||
L
Lukas Czerner 已提交
557
	    (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
558

L
Lukas Czerner 已提交
559 560 561 562 563 564
		if (free_clusters >= (nclusters + dirty_clusters +
				      resv_clusters))
			return 1;
	}
	/* No free blocks. Let's see if we can dip into reserved pool */
	if (flags & EXT4_MB_USE_RESERVED) {
565
		if (free_clusters >= (nclusters + dirty_clusters))
566 567 568 569
			return 1;
	}

	return 0;
570 571
}

572 573
int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
			     s64 nclusters, unsigned int flags)
574
{
575
	if (ext4_has_free_clusters(sbi, nclusters, flags)) {
576
		percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters);
577
		return 0;
578 579
	} else
		return -ENOSPC;
580
}
581

582
/**
583
 * ext4_should_retry_alloc()
584 585 586
 * @sb:			super block
 * @retries		number of attemps has been made
 *
587
 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
588
 * it is profitable to retry the operation, this function will wait
L
Lucas De Marchi 已提交
589
 * for the current or committing transaction to complete, and then
590 591 592 593
 * return TRUE.
 *
 * if the total number of retries exceed three times, return FALSE.
 */
594
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
595
{
596
	if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
597 598
	    (*retries)++ > 3 ||
	    !EXT4_SB(sb)->s_journal)
599 600 601 602
		return 0;

	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);

603
	return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
604 605
}

A
Aneesh Kumar K.V 已提交
606
/*
607
 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
A
Aneesh Kumar K.V 已提交
608 609 610 611
 *
 * @handle:             handle to this transaction
 * @inode:              file inode
 * @goal:               given target block(filesystem wide)
612
 * @count:		pointer to total number of clusters needed
A
Aneesh Kumar K.V 已提交
613 614
 * @errp:               error code
 *
T
Theodore Ts'o 已提交
615
 * Return 1st allocated block number on success, *count stores total account
616
 * error stores in errp pointer
A
Aneesh Kumar K.V 已提交
617
 */
618
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
619 620
				  ext4_fsblk_t goal, unsigned int flags,
				  unsigned long *count, int *errp)
A
Aneesh Kumar K.V 已提交
621
{
T
Theodore Ts'o 已提交
622
	struct ext4_allocation_request ar;
623
	ext4_fsblk_t ret;
T
Theodore Ts'o 已提交
624 625 626 627 628 629

	memset(&ar, 0, sizeof(ar));
	/* Fill with neighbour allocated blocks */
	ar.inode = inode;
	ar.goal = goal;
	ar.len = count ? *count : 1;
630
	ar.flags = flags;
T
Theodore Ts'o 已提交
631 632 633 634

	ret = ext4_mb_new_blocks(handle, &ar, errp);
	if (count)
		*count = ar.len;
635
	/*
636 637
	 * Account for the allocated meta blocks.  We will never
	 * fail EDQUOT for metdata, but we do account for it.
638
	 */
639
	if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
640 641
		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
642 643
		dquot_alloc_block_nofail(inode,
				EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
644 645
	}
	return ret;
A
Aneesh Kumar K.V 已提交
646 647
}

648
/**
649
 * ext4_count_free_clusters() -- count filesystem free clusters
650 651
 * @sb:		superblock
 *
652
 * Adds up the number of free clusters from each block group.
653
 */
654
ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
655
{
656 657
	ext4_fsblk_t desc_count;
	struct ext4_group_desc *gdp;
658
	ext4_group_t i;
659
	ext4_group_t ngroups = ext4_get_groups_count(sb);
660
	struct ext4_group_info *grp;
661 662 663
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
	ext4_fsblk_t bitmap_count;
664
	unsigned int x;
665 666
	struct buffer_head *bitmap_bh = NULL;

667
	es = EXT4_SB(sb)->s_es;
668 669 670 671 672
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;

	for (i = 0; i < ngroups; i++) {
673
		gdp = ext4_get_group_desc(sb, i, NULL);
674 675
		if (!gdp)
			continue;
676 677 678 679 680
		grp = NULL;
		if (EXT4_SB(sb)->s_group_info)
			grp = ext4_get_group_info(sb, i);
		if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			desc_count += ext4_free_group_clusters(sb, gdp);
681
		brelse(bitmap_bh);
682
		bitmap_bh = ext4_read_block_bitmap(sb, i);
683 684 685
		if (bitmap_bh == NULL)
			continue;

686
		x = ext4_count_free(bitmap_bh->b_data,
687
				    EXT4_CLUSTERS_PER_GROUP(sb) / 8);
688
		printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
689
			i, ext4_free_group_clusters(sb, gdp), x);
690 691 692
		bitmap_count += x;
	}
	brelse(bitmap_bh);
693 694
	printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
	       ", computed = %llu, %llu\n",
695
	       EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
696
	       desc_count, bitmap_count);
697 698 699 700
	return bitmap_count;
#else
	desc_count = 0;
	for (i = 0; i < ngroups; i++) {
701
		gdp = ext4_get_group_desc(sb, i, NULL);
702 703
		if (!gdp)
			continue;
704 705 706 707 708
		grp = NULL;
		if (EXT4_SB(sb)->s_group_info)
			grp = ext4_get_group_info(sb, i);
		if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			desc_count += ext4_free_group_clusters(sb, gdp);
709 710 711 712 713 714
	}

	return desc_count;
#endif
}

715
static inline int test_root(ext4_group_t a, int b)
716
{
T
Theodore Ts'o 已提交
717 718 719 720 721 722 723 724 725
	while (1) {
		if (a < b)
			return 0;
		if (a == b)
			return 1;
		if ((a % b) != 0)
			return 0;
		a = a / b;
	}
726 727 728
}

/**
729
 *	ext4_bg_has_super - number of blocks used by the superblock in group
730 731 732 733 734 735
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the superblock (primary or backup)
 *	in this group.  Currently this will be only 0 or 1.
 */
736
int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
737
{
738 739 740 741 742 743 744 745 746 747 748 749 750 751
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

	if (group == 0)
		return 1;
	if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_SPARSE_SUPER2)) {
		if (group == le32_to_cpu(es->s_backup_bgs[0]) ||
		    group == le32_to_cpu(es->s_backup_bgs[1]))
			return 1;
		return 0;
	}
	if ((group <= 1) || !EXT4_HAS_RO_COMPAT_FEATURE(sb,
					EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER))
		return 1;
	if (!(group & 1))
752
		return 0;
753 754 755 756 757
	if (test_root(group, 3) || (test_root(group, 5)) ||
	    test_root(group, 7))
		return 1;

	return 0;
758 759
}

760 761
static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
					ext4_group_t group)
762
{
763
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
764 765
	ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
	ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
766 767 768 769 770 771

	if (group == first || group == first + 1 || group == last)
		return 1;
	return 0;
}

772 773
static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
					ext4_group_t group)
774
{
775 776 777 778 779 780 781
	if (!ext4_bg_has_super(sb, group))
		return 0;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
		return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	else
		return EXT4_SB(sb)->s_gdb_count;
782 783 784
}

/**
785
 *	ext4_bg_num_gdb - number of blocks used by the group table in group
786 787 788 789 790 791 792
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the group descriptor table
 *	(primary or backup) in this group.  In the future there may be a
 *	different number of descriptor blocks in each group.
 */
793
unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
794 795
{
	unsigned long first_meta_bg =
796 797
			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
798

799
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
800
			metagroup < first_meta_bg)
801
		return ext4_bg_num_gdb_nometa(sb, group);
802

803
	return ext4_bg_num_gdb_meta(sb,group);
804 805

}
806

807
/*
808
 * This function returns the number of file system metadata clusters at
809 810
 * the beginning of a block group, including the reserved gdt blocks.
 */
E
Eric Sandeen 已提交
811
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
812
				     ext4_group_t block_group)
813 814
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
815
	unsigned num;
816 817 818 819 820 821 822 823 824 825 826 827 828 829

	/* Check for superblock and gdt backups in this group */
	num = ext4_bg_has_super(sb, block_group);

	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
	    block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
			  sbi->s_desc_per_block) {
		if (num) {
			num += ext4_bg_num_gdb(sb, block_group);
			num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
		}
	} else { /* For META_BG_BLOCK_GROUPS */
		num += ext4_bg_num_gdb(sb, block_group);
	}
830
	return EXT4_NUM_B2C(sbi, num);
831
}
832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
/**
 *	ext4_inode_to_goal_block - return a hint for block allocation
 *	@inode: inode for block allocation
 *
 *	Return the ideal location to start allocating blocks for a
 *	newly created inode.
 */
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
{
	struct ext4_inode_info *ei = EXT4_I(inode);
	ext4_group_t block_group;
	ext4_grpblk_t colour;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
	ext4_fsblk_t bg_start;
	ext4_fsblk_t last_block;

	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		/*
		 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
		 * block groups per flexgroup, reserve the first block
		 * group for directories and special files.  Regular
		 * files will start at the second block group.  This
		 * tends to speed up directory access and improves
		 * fsck times.
		 */
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
	return bg_start + colour;
}