balloc.c 24.6 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/balloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
20 21
#include "ext4.h"
#include "ext4_jbd2.h"
22
#include "mballoc.h"
23

24 25
#include <trace/events/ext4.h>

E
Eric Sandeen 已提交
26 27
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
					    ext4_group_t block_group);
28 29 30 31
/*
 * balloc.c contains the blocks allocation and deallocation routines
 */

32 33 34 35 36 37 38 39 40
/*
 * Calculate block group number for a given block number
 */
ext4_group_t ext4_get_group_number(struct super_block *sb,
				   ext4_fsblk_t block)
{
	ext4_group_t group;

	if (test_opt2(sb, STD_GROUP_SIZE))
41 42
		group = (block -
			 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >>
43 44 45 46 47 48
			(EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3);
	else
		ext4_get_group_no_and_offset(sb, block, &group, NULL);
	return group;
}

49
/*
50 51
 * Calculate the block group number and offset into the block/cluster
 * allocation bitmap, given a block number
52 53
 */
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
54
		ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
55
{
D
Dave Kleikamp 已提交
56
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
57 58
	ext4_grpblk_t offset;

D
Dave Kleikamp 已提交
59
	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
60 61
	offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
		EXT4_SB(sb)->s_cluster_bits;
62 63 64
	if (offsetp)
		*offsetp = offset;
	if (blockgrpp)
D
Dave Kleikamp 已提交
65
		*blockgrpp = blocknr;
66 67 68

}

69 70 71 72 73 74 75
/*
 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so
 * and 0 otherwise.
 */
static inline int ext4_block_in_group(struct super_block *sb,
				      ext4_fsblk_t block,
				      ext4_group_t block_group)
76 77
{
	ext4_group_t actual_group;
78

79
	actual_group = ext4_get_group_number(sb, block);
80
	return (actual_group == block_group) ? 1 : 0;
81 82
}

83 84 85 86 87 88
/* Return the number of clusters used for file system metadata; this
 * represents the overhead needed by the file system.
 */
unsigned ext4_num_overhead_clusters(struct super_block *sb,
				    ext4_group_t block_group,
				    struct ext4_group_desc *gdp)
89
{
90 91 92 93
	unsigned num_clusters;
	int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
	ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
	ext4_fsblk_t itbl_blk;
94 95
	struct ext4_sb_info *sbi = EXT4_SB(sb);

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
	/* This is the number of clusters used by the superblock,
	 * block group descriptors, and reserved block group
	 * descriptor blocks */
	num_clusters = ext4_num_base_meta_clusters(sb, block_group);

	/*
	 * For the allocation bitmaps and inode table, we first need
	 * to check to see if the block is in the block group.  If it
	 * is, then check to see if the cluster is already accounted
	 * for in the clusters used for the base metadata cluster, or
	 * if we can increment the base metadata cluster to include
	 * that block.  Otherwise, we will have to track the cluster
	 * used for the allocation bitmap or inode table explicitly.
	 * Normally all of these blocks are contiguous, so the special
	 * case handling shouldn't be necessary except for *very*
	 * unusual file system layouts.
	 */
	if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
114 115
		block_cluster = EXT4_B2C(sbi,
					 ext4_block_bitmap(sb, gdp) - start);
116 117 118 119 120 121 122 123 124 125
		if (block_cluster < num_clusters)
			block_cluster = -1;
		else if (block_cluster == num_clusters) {
			num_clusters++;
			block_cluster = -1;
		}
	}

	if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
		inode_cluster = EXT4_B2C(sbi,
126
					 ext4_inode_bitmap(sb, gdp) - start);
127 128 129 130 131 132 133 134 135 136 137
		if (inode_cluster < num_clusters)
			inode_cluster = -1;
		else if (inode_cluster == num_clusters) {
			num_clusters++;
			inode_cluster = -1;
		}
	}

	itbl_blk = ext4_inode_table(sb, gdp);
	for (i = 0; i < sbi->s_itb_per_group; i++) {
		if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
138
			c = EXT4_B2C(sbi, itbl_blk + i - start);
139 140 141 142 143 144 145 146 147
			if ((c < num_clusters) || (c == inode_cluster) ||
			    (c == block_cluster) || (c == itbl_cluster))
				continue;
			if (c == num_clusters) {
				num_clusters++;
				continue;
			}
			num_clusters++;
			itbl_cluster = c;
148 149
		}
	}
150 151 152 153 154 155 156

	if (block_cluster != -1)
		num_clusters++;
	if (inode_cluster != -1)
		num_clusters++;

	return num_clusters;
157
}
158

159 160
static unsigned int num_clusters_in_group(struct super_block *sb,
					  ext4_group_t block_group)
161
{
162 163
	unsigned int blocks;

164 165 166 167 168 169 170
	if (block_group == ext4_get_groups_count(sb) - 1) {
		/*
		 * Even though mke2fs always initializes the first and
		 * last group, just in case some other tool was used,
		 * we need to make sure we calculate the right free
		 * blocks.
		 */
171
		blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
172 173
			ext4_group_first_block_no(sb, block_group);
	} else
174 175
		blocks = EXT4_BLOCKS_PER_GROUP(sb);
	return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
176 177
}

178 179 180 181
/* Initializes an uninitialized block bitmap */
void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
			    ext4_group_t block_group,
			    struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
182
{
183
	unsigned int bit, bit_max;
A
Andreas Dilger 已提交
184
	struct ext4_sb_info *sbi = EXT4_SB(sb);
185 186
	ext4_fsblk_t start, tmp;
	int flex_bg = 0;
187
	struct ext4_group_info *grp;
188 189 190 191 192

	J_ASSERT_BH(bh, buffer_locked(bh));

	/* If checksum is bad mark all blocks used to prevent allocation
	 * essentially implementing a per-group read-only flag. */
193
	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
194
		ext4_error(sb, "Checksum bad for group %u", block_group);
195 196 197
		grp = ext4_get_group_info(sb, block_group);
		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
198
		return;
A
Andreas Dilger 已提交
199
	}
200
	memset(bh->b_data, 0, sb->s_blocksize);
A
Andreas Dilger 已提交
201

202
	bit_max = ext4_num_base_meta_clusters(sb, block_group);
203 204
	for (bit = 0; bit < bit_max; bit++)
		ext4_set_bit(bit, bh->b_data);
205

206
	start = ext4_group_first_block_no(sb, block_group);
A
Andreas Dilger 已提交
207

208 209
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
		flex_bg = 1;
A
Andreas Dilger 已提交
210

211 212 213
	/* Set bits for block and inode bitmaps, and inode table */
	tmp = ext4_block_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
214
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
A
Andreas Dilger 已提交
215

216 217
	tmp = ext4_inode_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
218
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
219

220 221 222
	tmp = ext4_inode_table(sb, gdp);
	for (; tmp < ext4_inode_table(sb, gdp) +
		     sbi->s_itb_per_group; tmp++) {
223
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
224
			ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
A
Andreas Dilger 已提交
225
	}
226

227 228 229 230 231
	/*
	 * Also if the number of blocks within the group is less than
	 * the blocksize * 8 ( which is the size of bitmap ), set rest
	 * of the block bitmap to 1
	 */
232
	ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
233
			     sb->s_blocksize * 8, bh->b_data);
234
	ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
235
	ext4_group_desc_csum_set(sb, block_group, gdp);
A
Andreas Dilger 已提交
236 237
}

238 239 240
/* Return the number of free blocks in a block group.  It is used when
 * the block bitmap is uninitialized, so we can't just count the bits
 * in the bitmap. */
241 242 243
unsigned ext4_free_clusters_after_init(struct super_block *sb,
				       ext4_group_t block_group,
				       struct ext4_group_desc *gdp)
244
{
245 246
	return num_clusters_in_group(sb, block_group) - 
		ext4_num_overhead_clusters(sb, block_group, gdp);
247
}
A
Andreas Dilger 已提交
248

249 250 251 252 253 254 255 256
/*
 * The free blocks are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.  The descriptors are loaded in memory
257
 * when a file system is mounted (see ext4_fill_super).
258 259 260
 */

/**
261
 * ext4_get_group_desc() -- load group descriptor from disk
262 263 264 265 266
 * @sb:			super block
 * @block_group:	given block group
 * @bh:			pointer to the buffer head to store the block
 *			group descriptor
 */
267
struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
268
					     ext4_group_t block_group,
269
					     struct buffer_head **bh)
270
{
271 272
	unsigned int group_desc;
	unsigned int offset;
273
	ext4_group_t ngroups = ext4_get_groups_count(sb);
274
	struct ext4_group_desc *desc;
275
	struct ext4_sb_info *sbi = EXT4_SB(sb);
276

277
	if (block_group >= ngroups) {
278 279
		ext4_error(sb, "block_group >= groups_count - block_group = %u,"
			   " groups_count = %u", block_group, ngroups);
280 281 282 283

		return NULL;
	}

284 285
	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
286
	if (!sbi->s_group_desc[group_desc]) {
287
		ext4_error(sb, "Group descriptor not loaded - "
288
			   "block_group = %u, group_desc = %u, desc = %u",
289
			   block_group, group_desc, offset);
290 291 292
		return NULL;
	}

293 294 295
	desc = (struct ext4_group_desc *)(
		(__u8 *)sbi->s_group_desc[group_desc]->b_data +
		offset * EXT4_DESC_SIZE(sb));
296 297
	if (bh)
		*bh = sbi->s_group_desc[group_desc];
298
	return desc;
299 300
}

301 302 303 304 305 306
/*
 * Return the block number which was discovered to be invalid, or 0 if
 * the block bitmap is valid.
 */
static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
					    struct ext4_group_desc *desc,
307
					    ext4_group_t block_group,
308
					    struct buffer_head *bh)
309 310 311
{
	ext4_grpblk_t offset;
	ext4_grpblk_t next_zero_bit;
312
	ext4_fsblk_t blk;
313 314 315 316 317 318 319 320 321
	ext4_fsblk_t group_first_block;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		/* with FLEX_BG, the inode/block bitmaps and itable
		 * blocks may not be in the group at all
		 * so the bitmap validation will be skipped for those groups
		 * or it has to also read the block group where the bitmaps
		 * are located to verify they are set.
		 */
322
		return 0;
323 324 325 326
	}
	group_first_block = ext4_group_first_block_no(sb, block_group);

	/* check whether block bitmap block number is set */
327 328
	blk = ext4_block_bitmap(sb, desc);
	offset = blk - group_first_block;
329 330
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
331
		return blk;
332 333

	/* check whether the inode bitmap block number is set */
334 335
	blk = ext4_inode_bitmap(sb, desc);
	offset = blk - group_first_block;
336 337
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
338
		return blk;
339 340

	/* check whether the inode table block number is set */
341 342
	blk = ext4_inode_table(sb, desc);
	offset = blk - group_first_block;
343 344 345
	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
				offset + EXT4_SB(sb)->s_itb_per_group,
				offset);
346 347 348
	if (next_zero_bit < offset + EXT4_SB(sb)->s_itb_per_group)
		/* bad bitmap for inode tables */
		return blk;
349 350
	return 0;
}
351 352 353

void ext4_validate_block_bitmap(struct super_block *sb,
			       struct ext4_group_desc *desc,
354
			       ext4_group_t block_group,
355 356
			       struct buffer_head *bh)
{
357
	ext4_fsblk_t	blk;
358
	struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
359

360 361 362 363
	if (buffer_verified(bh))
		return;

	ext4_lock_group(sb, block_group);
364 365 366 367 368
	blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
	if (unlikely(blk != 0)) {
		ext4_unlock_group(sb, block_group);
		ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
			   block_group, blk);
369
		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
370 371 372
		return;
	}
	if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
373
			desc, bh))) {
374 375
		ext4_unlock_group(sb, block_group);
		ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
376
		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
377 378 379
		return;
	}
	set_buffer_verified(bh);
380 381 382
	ext4_unlock_group(sb, block_group);
}

383
/**
384
 * ext4_read_block_bitmap_nowait()
385 386 387
 * @sb:			super block
 * @block_group:	given block group
 *
388 389
 * Read the bitmap for a given block_group,and validate the
 * bits for block/inode/inode tables are set in the bitmaps
390 391 392
 *
 * Return buffer_head on success or NULL in case of failure.
 */
A
Andreas Dilger 已提交
393
struct buffer_head *
394
ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
395
{
396
	struct ext4_group_desc *desc;
397
	struct buffer_head *bh;
398
	ext4_fsblk_t bitmap_blk;
399

A
Andreas Dilger 已提交
400
	desc = ext4_get_group_desc(sb, block_group, NULL);
401
	if (!desc)
402 403
		return NULL;
	bitmap_blk = ext4_block_bitmap(sb, desc);
404 405
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
406 407 408
		ext4_error(sb, "Cannot get buffer for block bitmap - "
			   "block_group = %u, block_bitmap = %llu",
			   block_group, bitmap_blk);
409 410
		return NULL;
	}
411 412

	if (bitmap_uptodate(bh))
413
		goto verify;
414

415
	lock_buffer(bh);
416 417
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
418
		goto verify;
419
	}
420
	ext4_lock_group(sb, block_group);
A
Andreas Dilger 已提交
421
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
422
		ext4_init_block_bitmap(sb, bh, block_group, desc);
423
		set_bitmap_uptodate(bh);
424
		set_buffer_uptodate(bh);
425
		ext4_unlock_group(sb, block_group);
A
Aneesh Kumar K.V 已提交
426
		unlock_buffer(bh);
427
		return bh;
A
Andreas Dilger 已提交
428
	}
429
	ext4_unlock_group(sb, block_group);
430 431 432 433 434 435 436
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
437
		goto verify;
438 439
	}
	/*
440
	 * submit the buffer_head for reading
441
	 */
442
	set_buffer_new(bh);
443
	trace_ext4_read_block_bitmap_load(sb, block_group);
444 445
	bh->b_end_io = ext4_end_bitmap_read;
	get_bh(bh);
446
	submit_bh(READ | REQ_META | REQ_PRIO, bh);
447
	return bh;
448 449
verify:
	ext4_validate_block_bitmap(sb, desc, block_group, bh);
450 451 452 453
	if (buffer_verified(bh))
		return bh;
	put_bh(bh);
	return NULL;
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
}

/* Returns 0 on success, 1 on error */
int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
			   struct buffer_head *bh)
{
	struct ext4_group_desc *desc;

	if (!buffer_new(bh))
		return 0;
	desc = ext4_get_group_desc(sb, block_group, NULL);
	if (!desc)
		return 1;
	wait_on_buffer(bh);
	if (!buffer_uptodate(bh)) {
469
		ext4_error(sb, "Cannot read block bitmap - "
470
			   "block_group = %u, block_bitmap = %llu",
471
			   block_group, (unsigned long long) bh->b_blocknr);
472
		return 1;
473
	}
474 475
	clear_buffer_new(bh);
	/* Panic or remount fs read-only if block bitmap is invalid */
476
	ext4_validate_block_bitmap(sb, desc, block_group, bh);
477 478
	/* ...but check for error just in case errors=continue. */
	return !buffer_verified(bh);
479 480 481 482 483 484 485 486
}

struct buffer_head *
ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
{
	struct buffer_head *bh;

	bh = ext4_read_block_bitmap_nowait(sb, block_group);
487 488
	if (!bh)
		return NULL;
489 490 491 492
	if (ext4_wait_block_bitmap(sb, block_group, bh)) {
		put_bh(bh);
		return NULL;
	}
493 494 495
	return bh;
}

496
/**
497
 * ext4_has_free_clusters()
498
 * @sbi:	in-core super block structure.
499 500
 * @nclusters:	number of needed blocks
 * @flags:	flags from ext4_mb_new_blocks()
501
 *
502
 * Check if filesystem has nclusters free & available for allocation.
503 504
 * On success return 1, return 0 on failure.
 */
505 506
static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
				  s64 nclusters, unsigned int flags)
507
{
L
Lukas Czerner 已提交
508
	s64 free_clusters, dirty_clusters, rsv, resv_clusters;
509
	struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
510
	struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
511

512 513
	free_clusters  = percpu_counter_read_positive(fcc);
	dirty_clusters = percpu_counter_read_positive(dcc);
L
Lukas Czerner 已提交
514
	resv_clusters = atomic64_read(&sbi->s_resv_clusters);
515 516 517 518 519

	/*
	 * r_blocks_count should always be multiple of the cluster ratio so
	 * we are safe to do a plane bit shift only.
	 */
L
Lukas Czerner 已提交
520 521
	rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) +
	      resv_clusters;
522

L
Lukas Czerner 已提交
523
	if (free_clusters - (nclusters + rsv + dirty_clusters) <
524
					EXT4_FREECLUSTERS_WATERMARK) {
525
		free_clusters  = percpu_counter_sum_positive(fcc);
526
		dirty_clusters = percpu_counter_sum_positive(dcc);
527
	}
528 529
	/* Check whether we have space after accounting for current
	 * dirty clusters & root reserved clusters.
530
	 */
L
Lukas Czerner 已提交
531
	if (free_clusters >= (rsv + nclusters + dirty_clusters))
532
		return 1;
533

534
	/* Hm, nope.  Are (enough) root reserved clusters available? */
535 536
	if (uid_eq(sbi->s_resuid, current_fsuid()) ||
	    (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
537
	    capable(CAP_SYS_RESOURCE) ||
L
Lukas Czerner 已提交
538
	    (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
539

L
Lukas Czerner 已提交
540 541 542 543 544 545
		if (free_clusters >= (nclusters + dirty_clusters +
				      resv_clusters))
			return 1;
	}
	/* No free blocks. Let's see if we can dip into reserved pool */
	if (flags & EXT4_MB_USE_RESERVED) {
546
		if (free_clusters >= (nclusters + dirty_clusters))
547 548 549 550
			return 1;
	}

	return 0;
551 552
}

553 554
int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
			     s64 nclusters, unsigned int flags)
555
{
556
	if (ext4_has_free_clusters(sbi, nclusters, flags)) {
557
		percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters);
558
		return 0;
559 560
	} else
		return -ENOSPC;
561
}
562

563
/**
564
 * ext4_should_retry_alloc()
565 566 567
 * @sb:			super block
 * @retries		number of attemps has been made
 *
568
 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
569
 * it is profitable to retry the operation, this function will wait
L
Lucas De Marchi 已提交
570
 * for the current or committing transaction to complete, and then
571 572 573 574
 * return TRUE.
 *
 * if the total number of retries exceed three times, return FALSE.
 */
575
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
576
{
577
	if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
578 579
	    (*retries)++ > 3 ||
	    !EXT4_SB(sb)->s_journal)
580 581 582 583
		return 0;

	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);

584
	return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
585 586
}

A
Aneesh Kumar K.V 已提交
587
/*
588
 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
A
Aneesh Kumar K.V 已提交
589 590 591 592
 *
 * @handle:             handle to this transaction
 * @inode:              file inode
 * @goal:               given target block(filesystem wide)
593
 * @count:		pointer to total number of clusters needed
A
Aneesh Kumar K.V 已提交
594 595
 * @errp:               error code
 *
T
Theodore Ts'o 已提交
596
 * Return 1st allocated block number on success, *count stores total account
597
 * error stores in errp pointer
A
Aneesh Kumar K.V 已提交
598
 */
599
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
600 601
				  ext4_fsblk_t goal, unsigned int flags,
				  unsigned long *count, int *errp)
A
Aneesh Kumar K.V 已提交
602
{
T
Theodore Ts'o 已提交
603
	struct ext4_allocation_request ar;
604
	ext4_fsblk_t ret;
T
Theodore Ts'o 已提交
605 606 607 608 609 610

	memset(&ar, 0, sizeof(ar));
	/* Fill with neighbour allocated blocks */
	ar.inode = inode;
	ar.goal = goal;
	ar.len = count ? *count : 1;
611
	ar.flags = flags;
T
Theodore Ts'o 已提交
612 613 614 615

	ret = ext4_mb_new_blocks(handle, &ar, errp);
	if (count)
		*count = ar.len;
616
	/*
617 618
	 * Account for the allocated meta blocks.  We will never
	 * fail EDQUOT for metdata, but we do account for it.
619
	 */
620 621
	if (!(*errp) &&
	    ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
622
		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
T
Theodore Ts'o 已提交
623
		EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
624
		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
625 626
		dquot_alloc_block_nofail(inode,
				EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
627 628
	}
	return ret;
A
Aneesh Kumar K.V 已提交
629 630
}

631
/**
632
 * ext4_count_free_clusters() -- count filesystem free clusters
633 634
 * @sb:		superblock
 *
635
 * Adds up the number of free clusters from each block group.
636
 */
637
ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
638
{
639 640
	ext4_fsblk_t desc_count;
	struct ext4_group_desc *gdp;
641
	ext4_group_t i;
642
	ext4_group_t ngroups = ext4_get_groups_count(sb);
643
	struct ext4_group_info *grp;
644 645 646
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
	ext4_fsblk_t bitmap_count;
647
	unsigned int x;
648 649
	struct buffer_head *bitmap_bh = NULL;

650
	es = EXT4_SB(sb)->s_es;
651 652 653 654 655
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;

	for (i = 0; i < ngroups; i++) {
656
		gdp = ext4_get_group_desc(sb, i, NULL);
657 658
		if (!gdp)
			continue;
659 660 661 662 663
		grp = NULL;
		if (EXT4_SB(sb)->s_group_info)
			grp = ext4_get_group_info(sb, i);
		if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			desc_count += ext4_free_group_clusters(sb, gdp);
664
		brelse(bitmap_bh);
665
		bitmap_bh = ext4_read_block_bitmap(sb, i);
666 667 668
		if (bitmap_bh == NULL)
			continue;

669
		x = ext4_count_free(bitmap_bh->b_data,
670
				    EXT4_CLUSTERS_PER_GROUP(sb) / 8);
671
		printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
672
			i, ext4_free_group_clusters(sb, gdp), x);
673 674 675
		bitmap_count += x;
	}
	brelse(bitmap_bh);
676 677
	printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
	       ", computed = %llu, %llu\n",
678
	       EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
679
	       desc_count, bitmap_count);
680 681 682 683
	return bitmap_count;
#else
	desc_count = 0;
	for (i = 0; i < ngroups; i++) {
684
		gdp = ext4_get_group_desc(sb, i, NULL);
685 686
		if (!gdp)
			continue;
687 688 689 690 691
		grp = NULL;
		if (EXT4_SB(sb)->s_group_info)
			grp = ext4_get_group_info(sb, i);
		if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
			desc_count += ext4_free_group_clusters(sb, gdp);
692 693 694 695 696 697
	}

	return desc_count;
#endif
}

698
static inline int test_root(ext4_group_t a, int b)
699
{
T
Theodore Ts'o 已提交
700 701 702 703 704 705 706 707 708
	while (1) {
		if (a < b)
			return 0;
		if (a == b)
			return 1;
		if ((a % b) != 0)
			return 0;
		a = a / b;
	}
709 710
}

711
static int ext4_group_sparse(ext4_group_t group)
712 713 714 715 716 717 718 719 720 721
{
	if (group <= 1)
		return 1;
	if (!(group & 1))
		return 0;
	return (test_root(group, 7) || test_root(group, 5) ||
		test_root(group, 3));
}

/**
722
 *	ext4_bg_has_super - number of blocks used by the superblock in group
723 724 725 726 727 728
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the superblock (primary or backup)
 *	in this group.  Currently this will be only 0 or 1.
 */
729
int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
730
{
731 732 733
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
				EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
			!ext4_group_sparse(group))
734 735 736 737
		return 0;
	return 1;
}

738 739
static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
					ext4_group_t group)
740
{
741
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
742 743
	ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
	ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
744 745 746 747 748 749

	if (group == first || group == first + 1 || group == last)
		return 1;
	return 0;
}

750 751
static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
					ext4_group_t group)
752
{
753 754 755 756 757 758 759
	if (!ext4_bg_has_super(sb, group))
		return 0;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
		return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	else
		return EXT4_SB(sb)->s_gdb_count;
760 761 762
}

/**
763
 *	ext4_bg_num_gdb - number of blocks used by the group table in group
764 765 766 767 768 769 770
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the group descriptor table
 *	(primary or backup) in this group.  In the future there may be a
 *	different number of descriptor blocks in each group.
 */
771
unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
772 773
{
	unsigned long first_meta_bg =
774 775
			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
776

777
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
778
			metagroup < first_meta_bg)
779
		return ext4_bg_num_gdb_nometa(sb, group);
780

781
	return ext4_bg_num_gdb_meta(sb,group);
782 783

}
784

785
/*
786
 * This function returns the number of file system metadata clusters at
787 788
 * the beginning of a block group, including the reserved gdt blocks.
 */
E
Eric Sandeen 已提交
789
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
790
				     ext4_group_t block_group)
791 792
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
793
	unsigned num;
794 795 796 797 798 799 800 801 802 803 804 805 806 807

	/* Check for superblock and gdt backups in this group */
	num = ext4_bg_has_super(sb, block_group);

	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
	    block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
			  sbi->s_desc_per_block) {
		if (num) {
			num += ext4_bg_num_gdb(sb, block_group);
			num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
		}
	} else { /* For META_BG_BLOCK_GROUPS */
		num += ext4_bg_num_gdb(sb, block_group);
	}
808
	return EXT4_NUM_B2C(sbi, num);
809
}
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
/**
 *	ext4_inode_to_goal_block - return a hint for block allocation
 *	@inode: inode for block allocation
 *
 *	Return the ideal location to start allocating blocks for a
 *	newly created inode.
 */
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
{
	struct ext4_inode_info *ei = EXT4_I(inode);
	ext4_group_t block_group;
	ext4_grpblk_t colour;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
	ext4_fsblk_t bg_start;
	ext4_fsblk_t last_block;

	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		/*
		 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
		 * block groups per flexgroup, reserve the first block
		 * group for directories and special files.  Regular
		 * files will start at the second block group.  This
		 * tends to speed up directory access and improves
		 * fsck times.
		 */
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
	return bg_start + colour;
}