balloc.c 25.0 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/balloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
20 21
#include "ext4.h"
#include "ext4_jbd2.h"
22
#include "mballoc.h"
23

24 25
#include <trace/events/ext4.h>

E
Eric Sandeen 已提交
26 27
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
					    ext4_group_t block_group);
28 29 30 31
/*
 * balloc.c contains the blocks allocation and deallocation routines
 */

32 33 34 35 36 37 38 39 40
/*
 * Calculate block group number for a given block number
 */
ext4_group_t ext4_get_group_number(struct super_block *sb,
				   ext4_fsblk_t block)
{
	ext4_group_t group;

	if (test_opt2(sb, STD_GROUP_SIZE))
41 42
		group = (block -
			 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >>
43 44 45 46 47 48
			(EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3);
	else
		ext4_get_group_no_and_offset(sb, block, &group, NULL);
	return group;
}

49
/*
50 51
 * Calculate the block group number and offset into the block/cluster
 * allocation bitmap, given a block number
52 53
 */
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
54
		ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
55
{
D
Dave Kleikamp 已提交
56
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
57 58
	ext4_grpblk_t offset;

D
Dave Kleikamp 已提交
59
	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
60 61
	offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
		EXT4_SB(sb)->s_cluster_bits;
62 63 64
	if (offsetp)
		*offsetp = offset;
	if (blockgrpp)
D
Dave Kleikamp 已提交
65
		*blockgrpp = blocknr;
66 67 68

}

69 70 71 72 73 74 75
/*
 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so
 * and 0 otherwise.
 */
static inline int ext4_block_in_group(struct super_block *sb,
				      ext4_fsblk_t block,
				      ext4_group_t block_group)
76 77
{
	ext4_group_t actual_group;
78

79
	actual_group = ext4_get_group_number(sb, block);
80
	return (actual_group == block_group) ? 1 : 0;
81 82
}

83 84 85
/* Return the number of clusters used for file system metadata; this
 * represents the overhead needed by the file system.
 */
86 87 88
static unsigned ext4_num_overhead_clusters(struct super_block *sb,
					   ext4_group_t block_group,
					   struct ext4_group_desc *gdp)
89
{
90 91 92 93
	unsigned num_clusters;
	int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
	ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
	ext4_fsblk_t itbl_blk;
94 95
	struct ext4_sb_info *sbi = EXT4_SB(sb);

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
	/* This is the number of clusters used by the superblock,
	 * block group descriptors, and reserved block group
	 * descriptor blocks */
	num_clusters = ext4_num_base_meta_clusters(sb, block_group);

	/*
	 * For the allocation bitmaps and inode table, we first need
	 * to check to see if the block is in the block group.  If it
	 * is, then check to see if the cluster is already accounted
	 * for in the clusters used for the base metadata cluster, or
	 * if we can increment the base metadata cluster to include
	 * that block.  Otherwise, we will have to track the cluster
	 * used for the allocation bitmap or inode table explicitly.
	 * Normally all of these blocks are contiguous, so the special
	 * case handling shouldn't be necessary except for *very*
	 * unusual file system layouts.
	 */
	if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
114 115
		block_cluster = EXT4_B2C(sbi,
					 ext4_block_bitmap(sb, gdp) - start);
116 117 118 119 120 121 122 123 124 125
		if (block_cluster < num_clusters)
			block_cluster = -1;
		else if (block_cluster == num_clusters) {
			num_clusters++;
			block_cluster = -1;
		}
	}

	if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
		inode_cluster = EXT4_B2C(sbi,
126
					 ext4_inode_bitmap(sb, gdp) - start);
127 128 129 130 131 132 133 134 135 136 137
		if (inode_cluster < num_clusters)
			inode_cluster = -1;
		else if (inode_cluster == num_clusters) {
			num_clusters++;
			inode_cluster = -1;
		}
	}

	itbl_blk = ext4_inode_table(sb, gdp);
	for (i = 0; i < sbi->s_itb_per_group; i++) {
		if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
138
			c = EXT4_B2C(sbi, itbl_blk + i - start);
139 140 141 142 143 144 145 146 147
			if ((c < num_clusters) || (c == inode_cluster) ||
			    (c == block_cluster) || (c == itbl_cluster))
				continue;
			if (c == num_clusters) {
				num_clusters++;
				continue;
			}
			num_clusters++;
			itbl_cluster = c;
148 149
		}
	}
150 151 152 153 154 155 156

	if (block_cluster != -1)
		num_clusters++;
	if (inode_cluster != -1)
		num_clusters++;

	return num_clusters;
157
}
158

159 160
static unsigned int num_clusters_in_group(struct super_block *sb,
					  ext4_group_t block_group)
161
{
162 163
	unsigned int blocks;

164 165 166 167 168 169 170
	if (block_group == ext4_get_groups_count(sb) - 1) {
		/*
		 * Even though mke2fs always initializes the first and
		 * last group, just in case some other tool was used,
		 * we need to make sure we calculate the right free
		 * blocks.
		 */
171
		blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
172 173
			ext4_group_first_block_no(sb, block_group);
	} else
174 175
		blocks = EXT4_BLOCKS_PER_GROUP(sb);
	return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
176 177
}

178
/* Initializes an uninitialized block bitmap */
179 180 181 182
static void ext4_init_block_bitmap(struct super_block *sb,
				   struct buffer_head *bh,
				   ext4_group_t block_group,
				   struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
183
{
184
	unsigned int bit, bit_max;
A
Andreas Dilger 已提交
185
	struct ext4_sb_info *sbi = EXT4_SB(sb);
186 187
	ext4_fsblk_t start, tmp;
	int flex_bg = 0;
188
	struct ext4_group_info *grp;
189 190 191 192 193

	J_ASSERT_BH(bh, buffer_locked(bh));

	/* If checksum is bad mark all blocks used to prevent allocation
	 * essentially implementing a per-group read-only flag. */
194
	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
195
		ext4_error(sb, "Checksum bad for group %u", block_group);
196 197 198
		grp = ext4_get_group_info(sb, block_group);
		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
199
		return;
A
Andreas Dilger 已提交
200
	}
201
	memset(bh->b_data, 0, sb->s_blocksize);
A
Andreas Dilger 已提交
202

203
	bit_max = ext4_num_base_meta_clusters(sb, block_group);
204 205
	for (bit = 0; bit < bit_max; bit++)
		ext4_set_bit(bit, bh->b_data);
206

207
	start = ext4_group_first_block_no(sb, block_group);
A
Andreas Dilger 已提交
208

209 210
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
		flex_bg = 1;
A
Andreas Dilger 已提交
211

212 213 214
	/* Set bits for block and inode bitmaps, and inode table */
	tmp = ext4_block_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
215
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
A
Andreas Dilger 已提交
216

217 218
	tmp = ext4_inode_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
219
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
220

221 222 223
	tmp = ext4_inode_table(sb, gdp);
	for (; tmp < ext4_inode_table(sb, gdp) +
		     sbi->s_itb_per_group; tmp++) {
224
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
225
			ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
A
Andreas Dilger 已提交
226
	}
227

228 229 230 231 232
	/*
	 * Also if the number of blocks within the group is less than
	 * the blocksize * 8 ( which is the size of bitmap ), set rest
	 * of the block bitmap to 1
	 */
233
	ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
234
			     sb->s_blocksize * 8, bh->b_data);
235
	ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
236
	ext4_group_desc_csum_set(sb, block_group, gdp);
A
Andreas Dilger 已提交
237 238
}

239 240 241
/* Return the number of free blocks in a block group.  It is used when
 * the block bitmap is uninitialized, so we can't just count the bits
 * in the bitmap. */
242 243 244
unsigned ext4_free_clusters_after_init(struct super_block *sb,
				       ext4_group_t block_group,
				       struct ext4_group_desc *gdp)
245
{
246 247
	return num_clusters_in_group(sb, block_group) - 
		ext4_num_overhead_clusters(sb, block_group, gdp);
248
}
A
Andreas Dilger 已提交
249

250 251 252 253 254 255 256 257
/*
 * The free blocks are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.  The descriptors are loaded in memory
258
 * when a file system is mounted (see ext4_fill_super).
259 260 261
 */

/**
262
 * ext4_get_group_desc() -- load group descriptor from disk
263 264 265 266 267
 * @sb:			super block
 * @block_group:	given block group
 * @bh:			pointer to the buffer head to store the block
 *			group descriptor
 */
268
struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
269
					     ext4_group_t block_group,
270
					     struct buffer_head **bh)
271
{
272 273
	unsigned int group_desc;
	unsigned int offset;
274
	ext4_group_t ngroups = ext4_get_groups_count(sb);
275
	struct ext4_group_desc *desc;
276
	struct ext4_sb_info *sbi = EXT4_SB(sb);
277

278
	if (block_group >= ngroups) {
279 280
		ext4_error(sb, "block_group >= groups_count - block_group = %u,"
			   " groups_count = %u", block_group, ngroups);
281 282 283 284

		return NULL;
	}

285 286
	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
287
	if (!sbi->s_group_desc[group_desc]) {
288
		ext4_error(sb, "Group descriptor not loaded - "
289
			   "block_group = %u, group_desc = %u, desc = %u",
290
			   block_group, group_desc, offset);
291 292 293
		return NULL;
	}

294 295 296
	desc = (struct ext4_group_desc *)(
		(__u8 *)sbi->s_group_desc[group_desc]->b_data +
		offset * EXT4_DESC_SIZE(sb));
297 298
	if (bh)
		*bh = sbi->s_group_desc[group_desc];
299
	return desc;
300 301
}

302 303 304 305 306 307
/*
 * Return the block number which was discovered to be invalid, or 0 if
 * the block bitmap is valid.
 */
static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
					    struct ext4_group_desc *desc,
308
					    ext4_group_t block_group,
309
					    struct buffer_head *bh)
310
{
311
	struct ext4_sb_info *sbi = EXT4_SB(sb);
312 313
	ext4_grpblk_t offset;
	ext4_grpblk_t next_zero_bit;
314
	ext4_fsblk_t blk;
315 316 317 318 319 320 321 322 323
	ext4_fsblk_t group_first_block;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		/* with FLEX_BG, the inode/block bitmaps and itable
		 * blocks may not be in the group at all
		 * so the bitmap validation will be skipped for those groups
		 * or it has to also read the block group where the bitmaps
		 * are located to verify they are set.
		 */
324
		return 0;
325 326 327 328
	}
	group_first_block = ext4_group_first_block_no(sb, block_group);

	/* check whether block bitmap block number is set */
329 330
	blk = ext4_block_bitmap(sb, desc);
	offset = blk - group_first_block;
331
	if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
332
		/* bad block bitmap */
333
		return blk;
334 335

	/* check whether the inode bitmap block number is set */
336 337
	blk = ext4_inode_bitmap(sb, desc);
	offset = blk - group_first_block;
338
	if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
339
		/* bad block bitmap */
340
		return blk;
341 342

	/* check whether the inode table block number is set */
343 344
	blk = ext4_inode_table(sb, desc);
	offset = blk - group_first_block;
345
	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
346 347 348 349
			EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group),
			EXT4_B2C(sbi, offset));
	if (next_zero_bit <
	    EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group))
350 351
		/* bad bitmap for inode tables */
		return blk;
352 353
	return 0;
}
354

355 356 357 358
static void ext4_validate_block_bitmap(struct super_block *sb,
				       struct ext4_group_desc *desc,
				       ext4_group_t block_group,
				       struct buffer_head *bh)
359
{
360
	ext4_fsblk_t	blk;
361
	struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
362

363 364 365 366
	if (buffer_verified(bh))
		return;

	ext4_lock_group(sb, block_group);
367 368 369 370 371
	blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
	if (unlikely(blk != 0)) {
		ext4_unlock_group(sb, block_group);
		ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
			   block_group, blk);
372
		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
373 374 375
		return;
	}
	if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
376
			desc, bh))) {
377 378
		ext4_unlock_group(sb, block_group);
		ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
379
		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
380 381 382
		return;
	}
	set_buffer_verified(bh);
383 384 385
	ext4_unlock_group(sb, block_group);
}

386
/**
387
 * ext4_read_block_bitmap_nowait()
388 389 390
 * @sb:			super block
 * @block_group:	given block group
 *
391 392
 * Read the bitmap for a given block_group,and validate the
 * bits for block/inode/inode tables are set in the bitmaps
393 394 395
 *
 * Return buffer_head on success or NULL in case of failure.
 */
A
Andreas Dilger 已提交
396
struct buffer_head *
397
ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
398
{
399
	struct ext4_group_desc *desc;
400
	struct buffer_head *bh;
401
	ext4_fsblk_t bitmap_blk;
402

A
Andreas Dilger 已提交
403
	desc = ext4_get_group_desc(sb, block_group, NULL);
404
	if (!desc)
405 406
		return NULL;
	bitmap_blk = ext4_block_bitmap(sb, desc);
407 408
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
409 410 411
		ext4_error(sb, "Cannot get buffer for block bitmap - "
			   "block_group = %u, block_bitmap = %llu",
			   block_group, bitmap_blk);
412 413
		return NULL;
	}
414 415

	if (bitmap_uptodate(bh))
416
		goto verify;
417

418
	lock_buffer(bh);
419 420
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
421
		goto verify;
422
	}
423
	ext4_lock_group(sb, block_group);
A
Andreas Dilger 已提交
424
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
425
		ext4_init_block_bitmap(sb, bh, block_group, desc);
426
		set_bitmap_uptodate(bh);
427
		set_buffer_uptodate(bh);
428
		ext4_unlock_group(sb, block_group);
A
Aneesh Kumar K.V 已提交
429
		unlock_buffer(bh);
430
		return bh;
A
Andreas Dilger 已提交
431
	}
432
	ext4_unlock_group(sb, block_group);
433 434 435 436 437 438 439
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
440
		goto verify;
441 442
	}
	/*
443
	 * submit the buffer_head for reading
444
	 */
445
	set_buffer_new(bh);
446
	trace_ext4_read_block_bitmap_load(sb, block_group);
447 448
	bh->b_end_io = ext4_end_bitmap_read;
	get_bh(bh);
449
	submit_bh(READ | REQ_META | REQ_PRIO, bh);
450
	return bh;
451 452
verify:
	ext4_validate_block_bitmap(sb, desc, block_group, bh);
453 454 455 456
	if (buffer_verified(bh))
		return bh;
	put_bh(bh);
	return NULL;
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
}

/* Returns 0 on success, 1 on error */
int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
			   struct buffer_head *bh)
{
	struct ext4_group_desc *desc;

	if (!buffer_new(bh))
		return 0;
	desc = ext4_get_group_desc(sb, block_group, NULL);
	if (!desc)
		return 1;
	wait_on_buffer(bh);
	if (!buffer_uptodate(bh)) {
472
		ext4_error(sb, "Cannot read block bitmap - "
473
			   "block_group = %u, block_bitmap = %llu",
474
			   block_group, (unsigned long long) bh->b_blocknr);
475
		return 1;
476
	}
477 478
	clear_buffer_new(bh);
	/* Panic or remount fs read-only if block bitmap is invalid */
479
	ext4_validate_block_bitmap(sb, desc, block_group, bh);
480 481
	/* ...but check for error just in case errors=continue. */
	return !buffer_verified(bh);
482 483 484 485 486 487 488 489
}

struct buffer_head *
ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
{
	struct buffer_head *bh;

	bh = ext4_read_block_bitmap_nowait(sb, block_group);
490 491
	if (!bh)
		return NULL;
492 493 494 495
	if (ext4_wait_block_bitmap(sb, block_group, bh)) {
		put_bh(bh);
		return NULL;
	}
496 497 498
	return bh;
}

499
/**
500
 * ext4_has_free_clusters()
501
 * @sbi:	in-core super block structure.
502 503
 * @nclusters:	number of needed blocks
 * @flags:	flags from ext4_mb_new_blocks()
504
 *
505
 * Check if filesystem has nclusters free & available for allocation.
506 507
 * On success return 1, return 0 on failure.
 */
508 509
static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
				  s64 nclusters, unsigned int flags)
510
{
L
Lukas Czerner 已提交
511
	s64 free_clusters, dirty_clusters, rsv, resv_clusters;
512
	struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
513
	struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
514

515 516
	free_clusters  = percpu_counter_read_positive(fcc);
	dirty_clusters = percpu_counter_read_positive(dcc);
L
Lukas Czerner 已提交
517
	resv_clusters = atomic64_read(&sbi->s_resv_clusters);
518 519 520 521 522

	/*
	 * r_blocks_count should always be multiple of the cluster ratio so
	 * we are safe to do a plane bit shift only.
	 */
L
Lukas Czerner 已提交
523 524
	rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) +
	      resv_clusters;
525

L
Lukas Czerner 已提交
526
	if (free_clusters - (nclusters + rsv + dirty_clusters) <
527
					EXT4_FREECLUSTERS_WATERMARK) {
528
		free_clusters  = percpu_counter_sum_positive(fcc);
529
		dirty_clusters = percpu_counter_sum_positive(dcc);
530
	}
531 532
	/* Check whether we have space after accounting for current
	 * dirty clusters & root reserved clusters.
533
	 */
L
Lukas Czerner 已提交
534
	if (free_clusters >= (rsv + nclusters + dirty_clusters))
535
		return 1;
536

537
	/* Hm, nope.  Are (enough) root reserved clusters available? */
538 539
	if (uid_eq(sbi->s_resuid, current_fsuid()) ||
	    (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
540
	    capable(CAP_SYS_RESOURCE) ||
L
Lukas Czerner 已提交
541
	    (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
542

L
Lukas Czerner 已提交
543 544 545 546 547 548
		if (free_clusters >= (nclusters + dirty_clusters +
				      resv_clusters))
			return 1;
	}
	/* No free blocks. Let's see if we can dip into reserved pool */
	if (flags & EXT4_MB_USE_RESERVED) {
549
		if (free_clusters >= (nclusters + dirty_clusters))
550 551 552 553
			return 1;
	}

	return 0;
554 555
}

556 557
int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
			     s64 nclusters, unsigned int flags)
558
{
559
	if (ext4_has_free_clusters(sbi, nclusters, flags)) {
560
		percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters);
561
		return 0;
562 563
	} else
		return -ENOSPC;
564
}
565

566
/**
567
 * ext4_should_retry_alloc()
568 569 570
 * @sb:			super block
 * @retries		number of attemps has been made
 *
571
 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
572
 * it is profitable to retry the operation, this function will wait
L
Lucas De Marchi 已提交
573
 * for the current or committing transaction to complete, and then
574 575 576 577
 * return TRUE.
 *
 * if the total number of retries exceed three times, return FALSE.
 */
578
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
579
{
580
	if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
581 582
	    (*retries)++ > 3 ||
	    !EXT4_SB(sb)->s_journal)
583 584 585 586
		return 0;

	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);

587
	return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
588 589
}

A
Aneesh Kumar K.V 已提交
590
/*
591
 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
A
Aneesh Kumar K.V 已提交
592 593 594 595
 *
 * @handle:             handle to this transaction
 * @inode:              file inode
 * @goal:               given target block(filesystem wide)
596
 * @count:		pointer to total number of clusters needed
A
Aneesh Kumar K.V 已提交
597 598
 * @errp:               error code
 *
T
Theodore Ts'o 已提交
599
 * Return 1st allocated block number on success, *count stores total account
600
 * error stores in errp pointer
A
Aneesh Kumar K.V 已提交
601
 */
602
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
603 604
				  ext4_fsblk_t goal, unsigned int flags,
				  unsigned long *count, int *errp)
A
Aneesh Kumar K.V 已提交
605
{
T
Theodore Ts'o 已提交
606
	struct ext4_allocation_request ar;
607
	ext4_fsblk_t ret;
T
Theodore Ts'o 已提交
608 609 610 611 612 613

	memset(&ar, 0, sizeof(ar));
	/* Fill with neighbour allocated blocks */
	ar.inode = inode;
	ar.goal = goal;
	ar.len = count ? *count : 1;
614
	ar.flags = flags;
T
Theodore Ts'o 已提交
615 616 617 618

	ret = ext4_mb_new_blocks(handle, &ar, errp);
	if (count)
		*count = ar.len;
619
	/*
620 621
	 * Account for the allocated meta blocks.  We will never
	 * fail EDQUOT for metdata, but we do account for it.
622
	 */
623 624
	if (!(*errp) &&
	    ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
625
		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
T
Theodore Ts'o 已提交
626
		EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
627
		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
628 629
		dquot_alloc_block_nofail(inode,
				EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
630 631
	}
	return ret;
A
Aneesh Kumar K.V 已提交
632 633
}

634
/**
635
 * ext4_count_free_clusters() -- count filesystem free clusters
636 637
 * @sb:		superblock
 *
638
 * Adds up the number of free clusters from each block group.
639
 */
640
ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
641
{
642 643
	ext4_fsblk_t desc_count;
	struct ext4_group_desc *gdp;
644
	ext4_group_t i;
645
	ext4_group_t ngroups = ext4_get_groups_count(sb);
646
	struct ext4_group_info *grp;
647 648 649
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
	ext4_fsblk_t bitmap_count;
650
	unsigned int x;
651 652
	struct buffer_head *bitmap_bh = NULL;

653
	es = EXT4_SB(sb)->s_es;
654 655 656 657 658
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;

	for (i = 0; i < ngroups; i++) {
659
		gdp = ext4_get_group_desc(sb, i, NULL);
660 661
		if (!gdp)
			continue;
662 663 664 665 666
		grp = NULL;
		if (EXT4_SB(sb)->s_group_info)
			grp = ext4_get_group_info(sb, i);
		if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			desc_count += ext4_free_group_clusters(sb, gdp);
667
		brelse(bitmap_bh);
668
		bitmap_bh = ext4_read_block_bitmap(sb, i);
669 670 671
		if (bitmap_bh == NULL)
			continue;

672
		x = ext4_count_free(bitmap_bh->b_data,
673
				    EXT4_CLUSTERS_PER_GROUP(sb) / 8);
674
		printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
675
			i, ext4_free_group_clusters(sb, gdp), x);
676 677 678
		bitmap_count += x;
	}
	brelse(bitmap_bh);
679 680
	printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
	       ", computed = %llu, %llu\n",
681
	       EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
682
	       desc_count, bitmap_count);
683 684 685 686
	return bitmap_count;
#else
	desc_count = 0;
	for (i = 0; i < ngroups; i++) {
687
		gdp = ext4_get_group_desc(sb, i, NULL);
688 689
		if (!gdp)
			continue;
690 691 692 693 694
		grp = NULL;
		if (EXT4_SB(sb)->s_group_info)
			grp = ext4_get_group_info(sb, i);
		if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			desc_count += ext4_free_group_clusters(sb, gdp);
695 696 697 698 699 700
	}

	return desc_count;
#endif
}

701
static inline int test_root(ext4_group_t a, int b)
702
{
T
Theodore Ts'o 已提交
703 704 705 706 707 708 709 710 711
	while (1) {
		if (a < b)
			return 0;
		if (a == b)
			return 1;
		if ((a % b) != 0)
			return 0;
		a = a / b;
	}
712 713 714
}

/**
715
 *	ext4_bg_has_super - number of blocks used by the superblock in group
716 717 718 719 720 721
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the superblock (primary or backup)
 *	in this group.  Currently this will be only 0 or 1.
 */
722
int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
723
{
724 725 726 727 728 729 730 731 732 733 734 735 736 737
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;

	if (group == 0)
		return 1;
	if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_SPARSE_SUPER2)) {
		if (group == le32_to_cpu(es->s_backup_bgs[0]) ||
		    group == le32_to_cpu(es->s_backup_bgs[1]))
			return 1;
		return 0;
	}
	if ((group <= 1) || !EXT4_HAS_RO_COMPAT_FEATURE(sb,
					EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER))
		return 1;
	if (!(group & 1))
738
		return 0;
739 740 741 742 743
	if (test_root(group, 3) || (test_root(group, 5)) ||
	    test_root(group, 7))
		return 1;

	return 0;
744 745
}

746 747
static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
					ext4_group_t group)
748
{
749
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
750 751
	ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
	ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
752 753 754 755 756 757

	if (group == first || group == first + 1 || group == last)
		return 1;
	return 0;
}

758 759
static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
					ext4_group_t group)
760
{
761 762 763 764 765 766 767
	if (!ext4_bg_has_super(sb, group))
		return 0;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
		return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	else
		return EXT4_SB(sb)->s_gdb_count;
768 769 770
}

/**
771
 *	ext4_bg_num_gdb - number of blocks used by the group table in group
772 773 774 775 776 777 778
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the group descriptor table
 *	(primary or backup) in this group.  In the future there may be a
 *	different number of descriptor blocks in each group.
 */
779
unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
780 781
{
	unsigned long first_meta_bg =
782 783
			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
784

785
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
786
			metagroup < first_meta_bg)
787
		return ext4_bg_num_gdb_nometa(sb, group);
788

789
	return ext4_bg_num_gdb_meta(sb,group);
790 791

}
792

793
/*
794
 * This function returns the number of file system metadata clusters at
795 796
 * the beginning of a block group, including the reserved gdt blocks.
 */
E
Eric Sandeen 已提交
797
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
798
				     ext4_group_t block_group)
799 800
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
801
	unsigned num;
802 803 804 805 806 807 808 809 810 811 812 813 814 815

	/* Check for superblock and gdt backups in this group */
	num = ext4_bg_has_super(sb, block_group);

	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
	    block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
			  sbi->s_desc_per_block) {
		if (num) {
			num += ext4_bg_num_gdb(sb, block_group);
			num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
		}
	} else { /* For META_BG_BLOCK_GROUPS */
		num += ext4_bg_num_gdb(sb, block_group);
	}
816
	return EXT4_NUM_B2C(sbi, num);
817
}
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
/**
 *	ext4_inode_to_goal_block - return a hint for block allocation
 *	@inode: inode for block allocation
 *
 *	Return the ideal location to start allocating blocks for a
 *	newly created inode.
 */
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
{
	struct ext4_inode_info *ei = EXT4_I(inode);
	ext4_group_t block_group;
	ext4_grpblk_t colour;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
	ext4_fsblk_t bg_start;
	ext4_fsblk_t last_block;

	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		/*
		 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
		 * block groups per flexgroup, reserve the first block
		 * group for directories and special files.  Regular
		 * files will start at the second block group.  This
		 * tends to speed up directory access and improves
		 * fsck times.
		 */
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
	return bg_start + colour;
}