balloc.c 23.2 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/balloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
20 21
#include "ext4.h"
#include "ext4_jbd2.h"
22
#include "mballoc.h"
23

24 25
#include <trace/events/ext4.h>

E
Eric Sandeen 已提交
26 27
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
					    ext4_group_t block_group);
28 29 30 31
/*
 * balloc.c contains the blocks allocation and deallocation routines
 */

32
/*
33 34
 * Calculate the block group number and offset into the block/cluster
 * allocation bitmap, given a block number
35 36
 */
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
37
		ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
38
{
D
Dave Kleikamp 已提交
39
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
40 41
	ext4_grpblk_t offset;

D
Dave Kleikamp 已提交
42
	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
43 44
	offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
		EXT4_SB(sb)->s_cluster_bits;
45 46 47
	if (offsetp)
		*offsetp = offset;
	if (blockgrpp)
D
Dave Kleikamp 已提交
48
		*blockgrpp = blocknr;
49 50 51

}

52 53 54 55
static int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block,
			ext4_group_t block_group)
{
	ext4_group_t actual_group;
A
Aneesh Kumar K.V 已提交
56
	ext4_get_group_no_and_offset(sb, block, &actual_group, NULL);
57 58 59 60 61
	if (actual_group == block_group)
		return 1;
	return 0;
}

62 63 64 65 66 67
/* Return the number of clusters used for file system metadata; this
 * represents the overhead needed by the file system.
 */
unsigned ext4_num_overhead_clusters(struct super_block *sb,
				    ext4_group_t block_group,
				    struct ext4_group_desc *gdp)
68
{
69 70 71 72
	unsigned num_clusters;
	int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
	ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
	ext4_fsblk_t itbl_blk;
73 74
	struct ext4_sb_info *sbi = EXT4_SB(sb);

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
	/* This is the number of clusters used by the superblock,
	 * block group descriptors, and reserved block group
	 * descriptor blocks */
	num_clusters = ext4_num_base_meta_clusters(sb, block_group);

	/*
	 * For the allocation bitmaps and inode table, we first need
	 * to check to see if the block is in the block group.  If it
	 * is, then check to see if the cluster is already accounted
	 * for in the clusters used for the base metadata cluster, or
	 * if we can increment the base metadata cluster to include
	 * that block.  Otherwise, we will have to track the cluster
	 * used for the allocation bitmap or inode table explicitly.
	 * Normally all of these blocks are contiguous, so the special
	 * case handling shouldn't be necessary except for *very*
	 * unusual file system layouts.
	 */
	if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
93 94
		block_cluster = EXT4_B2C(sbi,
					 ext4_block_bitmap(sb, gdp) - start);
95 96 97 98 99 100 101 102 103 104
		if (block_cluster < num_clusters)
			block_cluster = -1;
		else if (block_cluster == num_clusters) {
			num_clusters++;
			block_cluster = -1;
		}
	}

	if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
		inode_cluster = EXT4_B2C(sbi,
105
					 ext4_inode_bitmap(sb, gdp) - start);
106 107 108 109 110 111 112 113 114 115 116
		if (inode_cluster < num_clusters)
			inode_cluster = -1;
		else if (inode_cluster == num_clusters) {
			num_clusters++;
			inode_cluster = -1;
		}
	}

	itbl_blk = ext4_inode_table(sb, gdp);
	for (i = 0; i < sbi->s_itb_per_group; i++) {
		if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
117
			c = EXT4_B2C(sbi, itbl_blk + i - start);
118 119 120 121 122 123 124 125 126
			if ((c < num_clusters) || (c == inode_cluster) ||
			    (c == block_cluster) || (c == itbl_cluster))
				continue;
			if (c == num_clusters) {
				num_clusters++;
				continue;
			}
			num_clusters++;
			itbl_cluster = c;
127 128
		}
	}
129 130 131 132 133 134 135

	if (block_cluster != -1)
		num_clusters++;
	if (inode_cluster != -1)
		num_clusters++;

	return num_clusters;
136
}
137

138 139
static unsigned int num_clusters_in_group(struct super_block *sb,
					  ext4_group_t block_group)
140
{
141 142
	unsigned int blocks;

143 144 145 146 147 148 149
	if (block_group == ext4_get_groups_count(sb) - 1) {
		/*
		 * Even though mke2fs always initializes the first and
		 * last group, just in case some other tool was used,
		 * we need to make sure we calculate the right free
		 * blocks.
		 */
150
		blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
151 152
			ext4_group_first_block_no(sb, block_group);
	} else
153 154
		blocks = EXT4_BLOCKS_PER_GROUP(sb);
	return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
155 156
}

157 158 159 160
/* Initializes an uninitialized block bitmap */
void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
			    ext4_group_t block_group,
			    struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
161
{
162
	unsigned int bit, bit_max;
A
Andreas Dilger 已提交
163
	struct ext4_sb_info *sbi = EXT4_SB(sb);
164 165 166 167 168 169 170
	ext4_fsblk_t start, tmp;
	int flex_bg = 0;

	J_ASSERT_BH(bh, buffer_locked(bh));

	/* If checksum is bad mark all blocks used to prevent allocation
	 * essentially implementing a per-group read-only flag. */
171
	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
172
		ext4_error(sb, "Checksum bad for group %u", block_group);
173
		ext4_free_group_clusters_set(sb, gdp, 0);
174 175 176
		ext4_free_inodes_set(sb, gdp, 0);
		ext4_itable_unused_set(sb, gdp, 0);
		memset(bh->b_data, 0xff, sb->s_blocksize);
177
		ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
178
		return;
A
Andreas Dilger 已提交
179
	}
180
	memset(bh->b_data, 0, sb->s_blocksize);
A
Andreas Dilger 已提交
181

182
	bit_max = ext4_num_base_meta_clusters(sb, block_group);
183 184
	for (bit = 0; bit < bit_max; bit++)
		ext4_set_bit(bit, bh->b_data);
185

186
	start = ext4_group_first_block_no(sb, block_group);
A
Andreas Dilger 已提交
187

188 189
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
		flex_bg = 1;
A
Andreas Dilger 已提交
190

191 192 193
	/* Set bits for block and inode bitmaps, and inode table */
	tmp = ext4_block_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
194
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
A
Andreas Dilger 已提交
195

196 197
	tmp = ext4_inode_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
198
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
199

200 201 202
	tmp = ext4_inode_table(sb, gdp);
	for (; tmp < ext4_inode_table(sb, gdp) +
		     sbi->s_itb_per_group; tmp++) {
203
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
204
			ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
A
Andreas Dilger 已提交
205
	}
206

207 208 209 210 211
	/*
	 * Also if the number of blocks within the group is less than
	 * the blocksize * 8 ( which is the size of bitmap ), set rest
	 * of the block bitmap to 1
	 */
212
	ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
213
			     sb->s_blocksize * 8, bh->b_data);
214
	ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
215
	ext4_group_desc_csum_set(sb, block_group, gdp);
A
Andreas Dilger 已提交
216 217
}

218 219 220
/* Return the number of free blocks in a block group.  It is used when
 * the block bitmap is uninitialized, so we can't just count the bits
 * in the bitmap. */
221 222 223
unsigned ext4_free_clusters_after_init(struct super_block *sb,
				       ext4_group_t block_group,
				       struct ext4_group_desc *gdp)
224
{
225 226
	return num_clusters_in_group(sb, block_group) - 
		ext4_num_overhead_clusters(sb, block_group, gdp);
227
}
A
Andreas Dilger 已提交
228

229 230 231 232 233 234 235 236
/*
 * The free blocks are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.  The descriptors are loaded in memory
237
 * when a file system is mounted (see ext4_fill_super).
238 239 240
 */

/**
241
 * ext4_get_group_desc() -- load group descriptor from disk
242 243 244 245 246
 * @sb:			super block
 * @block_group:	given block group
 * @bh:			pointer to the buffer head to store the block
 *			group descriptor
 */
247
struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
248
					     ext4_group_t block_group,
249
					     struct buffer_head **bh)
250
{
251 252
	unsigned int group_desc;
	unsigned int offset;
253
	ext4_group_t ngroups = ext4_get_groups_count(sb);
254
	struct ext4_group_desc *desc;
255
	struct ext4_sb_info *sbi = EXT4_SB(sb);
256

257
	if (block_group >= ngroups) {
258 259
		ext4_error(sb, "block_group >= groups_count - block_group = %u,"
			   " groups_count = %u", block_group, ngroups);
260 261 262 263

		return NULL;
	}

264 265
	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
266
	if (!sbi->s_group_desc[group_desc]) {
267
		ext4_error(sb, "Group descriptor not loaded - "
268
			   "block_group = %u, group_desc = %u, desc = %u",
269
			   block_group, group_desc, offset);
270 271 272
		return NULL;
	}

273 274 275
	desc = (struct ext4_group_desc *)(
		(__u8 *)sbi->s_group_desc[group_desc]->b_data +
		offset * EXT4_DESC_SIZE(sb));
276 277
	if (bh)
		*bh = sbi->s_group_desc[group_desc];
278
	return desc;
279 280
}

281 282 283 284 285 286 287 288
/*
 * Return the block number which was discovered to be invalid, or 0 if
 * the block bitmap is valid.
 */
static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
					    struct ext4_group_desc *desc,
					    unsigned int block_group,
					    struct buffer_head *bh)
289 290 291
{
	ext4_grpblk_t offset;
	ext4_grpblk_t next_zero_bit;
292
	ext4_fsblk_t blk;
293 294 295 296 297 298 299 300 301
	ext4_fsblk_t group_first_block;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		/* with FLEX_BG, the inode/block bitmaps and itable
		 * blocks may not be in the group at all
		 * so the bitmap validation will be skipped for those groups
		 * or it has to also read the block group where the bitmaps
		 * are located to verify they are set.
		 */
302
		return 0;
303 304 305 306
	}
	group_first_block = ext4_group_first_block_no(sb, block_group);

	/* check whether block bitmap block number is set */
307 308
	blk = ext4_block_bitmap(sb, desc);
	offset = blk - group_first_block;
309 310
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
311
		return blk;
312 313

	/* check whether the inode bitmap block number is set */
314 315
	blk = ext4_inode_bitmap(sb, desc);
	offset = blk - group_first_block;
316 317
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
318
		return blk;
319 320

	/* check whether the inode table block number is set */
321 322
	blk = ext4_inode_table(sb, desc);
	offset = blk - group_first_block;
323 324 325
	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
				offset + EXT4_SB(sb)->s_itb_per_group,
				offset);
326 327 328
	if (next_zero_bit < offset + EXT4_SB(sb)->s_itb_per_group)
		/* bad bitmap for inode tables */
		return blk;
329 330
	return 0;
}
331 332 333 334 335 336

void ext4_validate_block_bitmap(struct super_block *sb,
			       struct ext4_group_desc *desc,
			       unsigned int block_group,
			       struct buffer_head *bh)
{
337 338
	ext4_fsblk_t	blk;

339 340 341 342
	if (buffer_verified(bh))
		return;

	ext4_lock_group(sb, block_group);
343 344 345 346 347 348 349 350
	blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
	if (unlikely(blk != 0)) {
		ext4_unlock_group(sb, block_group);
		ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
			   block_group, blk);
		return;
	}
	if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
351
			desc, bh))) {
352 353 354 355 356
		ext4_unlock_group(sb, block_group);
		ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
		return;
	}
	set_buffer_verified(bh);
357 358 359
	ext4_unlock_group(sb, block_group);
}

360
/**
361
 * ext4_read_block_bitmap_nowait()
362 363 364
 * @sb:			super block
 * @block_group:	given block group
 *
365 366
 * Read the bitmap for a given block_group,and validate the
 * bits for block/inode/inode tables are set in the bitmaps
367 368 369
 *
 * Return buffer_head on success or NULL in case of failure.
 */
A
Andreas Dilger 已提交
370
struct buffer_head *
371
ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
372
{
373
	struct ext4_group_desc *desc;
374
	struct buffer_head *bh;
375
	ext4_fsblk_t bitmap_blk;
376

A
Andreas Dilger 已提交
377
	desc = ext4_get_group_desc(sb, block_group, NULL);
378
	if (!desc)
379 380
		return NULL;
	bitmap_blk = ext4_block_bitmap(sb, desc);
381 382
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
383 384 385
		ext4_error(sb, "Cannot get buffer for block bitmap - "
			   "block_group = %u, block_bitmap = %llu",
			   block_group, bitmap_blk);
386 387
		return NULL;
	}
388 389

	if (bitmap_uptodate(bh))
390
		goto verify;
391

392
	lock_buffer(bh);
393 394
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
395
		goto verify;
396
	}
397
	ext4_lock_group(sb, block_group);
A
Andreas Dilger 已提交
398
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
399
		ext4_init_block_bitmap(sb, bh, block_group, desc);
400
		set_bitmap_uptodate(bh);
401
		set_buffer_uptodate(bh);
402
		ext4_unlock_group(sb, block_group);
A
Aneesh Kumar K.V 已提交
403
		unlock_buffer(bh);
404
		return bh;
A
Andreas Dilger 已提交
405
	}
406
	ext4_unlock_group(sb, block_group);
407 408 409 410 411 412 413
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
414
		goto verify;
415 416
	}
	/*
417
	 * submit the buffer_head for reading
418
	 */
419
	set_buffer_new(bh);
420
	trace_ext4_read_block_bitmap_load(sb, block_group);
421 422 423 424
	bh->b_end_io = ext4_end_bitmap_read;
	get_bh(bh);
	submit_bh(READ, bh);
	return bh;
425 426 427
verify:
	ext4_validate_block_bitmap(sb, desc, block_group, bh);
	return bh;
428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
}

/* Returns 0 on success, 1 on error */
int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
			   struct buffer_head *bh)
{
	struct ext4_group_desc *desc;

	if (!buffer_new(bh))
		return 0;
	desc = ext4_get_group_desc(sb, block_group, NULL);
	if (!desc)
		return 1;
	wait_on_buffer(bh);
	if (!buffer_uptodate(bh)) {
443
		ext4_error(sb, "Cannot read block bitmap - "
444
			   "block_group = %u, block_bitmap = %llu",
445
			   block_group, (unsigned long long) bh->b_blocknr);
446
		return 1;
447
	}
448 449
	clear_buffer_new(bh);
	/* Panic or remount fs read-only if block bitmap is invalid */
450
	ext4_validate_block_bitmap(sb, desc, block_group, bh);
451 452 453 454 455 456 457 458 459
	return 0;
}

struct buffer_head *
ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
{
	struct buffer_head *bh;

	bh = ext4_read_block_bitmap_nowait(sb, block_group);
460 461
	if (!bh)
		return NULL;
462 463 464 465
	if (ext4_wait_block_bitmap(sb, block_group, bh)) {
		put_bh(bh);
		return NULL;
	}
466 467 468
	return bh;
}

469
/**
470
 * ext4_has_free_clusters()
471
 * @sbi:	in-core super block structure.
472 473
 * @nclusters:	number of needed blocks
 * @flags:	flags from ext4_mb_new_blocks()
474
 *
475
 * Check if filesystem has nclusters free & available for allocation.
476 477
 * On success return 1, return 0 on failure.
 */
478 479
static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
				  s64 nclusters, unsigned int flags)
480
{
481
	s64 free_clusters, dirty_clusters, root_clusters;
482
	struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
483
	struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
484

485 486
	free_clusters  = percpu_counter_read_positive(fcc);
	dirty_clusters = percpu_counter_read_positive(dcc);
487 488 489 490 491 492

	/*
	 * r_blocks_count should always be multiple of the cluster ratio so
	 * we are safe to do a plane bit shift only.
	 */
	root_clusters = ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
493

494 495
	if (free_clusters - (nclusters + root_clusters + dirty_clusters) <
					EXT4_FREECLUSTERS_WATERMARK) {
496
		free_clusters  = percpu_counter_sum_positive(fcc);
497
		dirty_clusters = percpu_counter_sum_positive(dcc);
498
	}
499 500
	/* Check whether we have space after accounting for current
	 * dirty clusters & root reserved clusters.
501
	 */
502
	if (free_clusters >= ((root_clusters + nclusters) + dirty_clusters))
503
		return 1;
504

505
	/* Hm, nope.  Are (enough) root reserved clusters available? */
506 507
	if (uid_eq(sbi->s_resuid, current_fsuid()) ||
	    (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
508 509 510
	    capable(CAP_SYS_RESOURCE) ||
		(flags & EXT4_MB_USE_ROOT_BLOCKS)) {

511
		if (free_clusters >= (nclusters + dirty_clusters))
512 513 514 515
			return 1;
	}

	return 0;
516 517
}

518 519
int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
			     s64 nclusters, unsigned int flags)
520
{
521
	if (ext4_has_free_clusters(sbi, nclusters, flags)) {
522
		percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters);
523
		return 0;
524 525
	} else
		return -ENOSPC;
526
}
527

528
/**
529
 * ext4_should_retry_alloc()
530 531 532
 * @sb:			super block
 * @retries		number of attemps has been made
 *
533
 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
534
 * it is profitable to retry the operation, this function will wait
L
Lucas De Marchi 已提交
535
 * for the current or committing transaction to complete, and then
536 537 538 539
 * return TRUE.
 *
 * if the total number of retries exceed three times, return FALSE.
 */
540
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
541
{
542
	if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
543 544
	    (*retries)++ > 3 ||
	    !EXT4_SB(sb)->s_journal)
545 546 547 548
		return 0;

	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);

549
	return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
550 551
}

A
Aneesh Kumar K.V 已提交
552
/*
553
 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
A
Aneesh Kumar K.V 已提交
554 555 556 557
 *
 * @handle:             handle to this transaction
 * @inode:              file inode
 * @goal:               given target block(filesystem wide)
558
 * @count:		pointer to total number of clusters needed
A
Aneesh Kumar K.V 已提交
559 560
 * @errp:               error code
 *
T
Theodore Ts'o 已提交
561
 * Return 1st allocated block number on success, *count stores total account
562
 * error stores in errp pointer
A
Aneesh Kumar K.V 已提交
563
 */
564
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
565 566
				  ext4_fsblk_t goal, unsigned int flags,
				  unsigned long *count, int *errp)
A
Aneesh Kumar K.V 已提交
567
{
T
Theodore Ts'o 已提交
568
	struct ext4_allocation_request ar;
569
	ext4_fsblk_t ret;
T
Theodore Ts'o 已提交
570 571 572 573 574 575

	memset(&ar, 0, sizeof(ar));
	/* Fill with neighbour allocated blocks */
	ar.inode = inode;
	ar.goal = goal;
	ar.len = count ? *count : 1;
576
	ar.flags = flags;
T
Theodore Ts'o 已提交
577 578 579 580

	ret = ext4_mb_new_blocks(handle, &ar, errp);
	if (count)
		*count = ar.len;
581
	/*
582 583
	 * Account for the allocated meta blocks.  We will never
	 * fail EDQUOT for metdata, but we do account for it.
584
	 */
585 586
	if (!(*errp) &&
	    ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
587
		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
T
Theodore Ts'o 已提交
588
		EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
589
		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
590 591
		dquot_alloc_block_nofail(inode,
				EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
592 593
	}
	return ret;
A
Aneesh Kumar K.V 已提交
594 595
}

596
/**
597
 * ext4_count_free_clusters() -- count filesystem free clusters
598 599
 * @sb:		superblock
 *
600
 * Adds up the number of free clusters from each block group.
601
 */
602
ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
603
{
604 605
	ext4_fsblk_t desc_count;
	struct ext4_group_desc *gdp;
606
	ext4_group_t i;
607
	ext4_group_t ngroups = ext4_get_groups_count(sb);
608 609 610
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
	ext4_fsblk_t bitmap_count;
611
	unsigned int x;
612 613
	struct buffer_head *bitmap_bh = NULL;

614
	es = EXT4_SB(sb)->s_es;
615 616 617 618 619
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;

	for (i = 0; i < ngroups; i++) {
620
		gdp = ext4_get_group_desc(sb, i, NULL);
621 622
		if (!gdp)
			continue;
623
		desc_count += ext4_free_group_clusters(sb, gdp);
624
		brelse(bitmap_bh);
625
		bitmap_bh = ext4_read_block_bitmap(sb, i);
626 627 628
		if (bitmap_bh == NULL)
			continue;

629 630
		x = ext4_count_free(bitmap_bh->b_data,
				    EXT4_BLOCKS_PER_GROUP(sb) / 8);
631
		printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
632
			i, ext4_free_group_clusters(sb, gdp), x);
633 634 635
		bitmap_count += x;
	}
	brelse(bitmap_bh);
636 637
	printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
	       ", computed = %llu, %llu\n",
638
	       EXT4_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
639
	       desc_count, bitmap_count);
640 641 642 643
	return bitmap_count;
#else
	desc_count = 0;
	for (i = 0; i < ngroups; i++) {
644
		gdp = ext4_get_group_desc(sb, i, NULL);
645 646
		if (!gdp)
			continue;
647
		desc_count += ext4_free_group_clusters(sb, gdp);
648 649 650 651 652 653
	}

	return desc_count;
#endif
}

654
static inline int test_root(ext4_group_t a, int b)
655 656 657 658 659 660 661 662
{
	int num = b;

	while (a > num)
		num *= b;
	return num == a;
}

663
static int ext4_group_sparse(ext4_group_t group)
664 665 666 667 668 669 670 671 672 673
{
	if (group <= 1)
		return 1;
	if (!(group & 1))
		return 0;
	return (test_root(group, 7) || test_root(group, 5) ||
		test_root(group, 3));
}

/**
674
 *	ext4_bg_has_super - number of blocks used by the superblock in group
675 676 677 678 679 680
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the superblock (primary or backup)
 *	in this group.  Currently this will be only 0 or 1.
 */
681
int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
682
{
683 684 685
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
				EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
			!ext4_group_sparse(group))
686 687 688 689
		return 0;
	return 1;
}

690 691
static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
					ext4_group_t group)
692
{
693
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
694 695
	ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
	ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
696 697 698 699 700 701

	if (group == first || group == first + 1 || group == last)
		return 1;
	return 0;
}

702 703
static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
					ext4_group_t group)
704
{
705 706 707 708 709 710 711
	if (!ext4_bg_has_super(sb, group))
		return 0;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
		return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	else
		return EXT4_SB(sb)->s_gdb_count;
712 713 714
}

/**
715
 *	ext4_bg_num_gdb - number of blocks used by the group table in group
716 717 718 719 720 721 722
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the group descriptor table
 *	(primary or backup) in this group.  In the future there may be a
 *	different number of descriptor blocks in each group.
 */
723
unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
724 725
{
	unsigned long first_meta_bg =
726 727
			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
728

729
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
730
			metagroup < first_meta_bg)
731
		return ext4_bg_num_gdb_nometa(sb, group);
732

733
	return ext4_bg_num_gdb_meta(sb,group);
734 735

}
736

737
/*
738
 * This function returns the number of file system metadata clusters at
739 740
 * the beginning of a block group, including the reserved gdt blocks.
 */
E
Eric Sandeen 已提交
741
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
742
				     ext4_group_t block_group)
743 744
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
745
	unsigned num;
746 747 748 749 750 751 752 753 754 755 756 757 758 759

	/* Check for superblock and gdt backups in this group */
	num = ext4_bg_has_super(sb, block_group);

	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
	    block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
			  sbi->s_desc_per_block) {
		if (num) {
			num += ext4_bg_num_gdb(sb, block_group);
			num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
		}
	} else { /* For META_BG_BLOCK_GROUPS */
		num += ext4_bg_num_gdb(sb, block_group);
	}
760
	return EXT4_NUM_B2C(sbi, num);
761
}
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
/**
 *	ext4_inode_to_goal_block - return a hint for block allocation
 *	@inode: inode for block allocation
 *
 *	Return the ideal location to start allocating blocks for a
 *	newly created inode.
 */
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
{
	struct ext4_inode_info *ei = EXT4_I(inode);
	ext4_group_t block_group;
	ext4_grpblk_t colour;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
	ext4_fsblk_t bg_start;
	ext4_fsblk_t last_block;

	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		/*
		 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
		 * block groups per flexgroup, reserve the first block
		 * group for directories and special files.  Regular
		 * files will start at the second block group.  This
		 * tends to speed up directory access and improves
		 * fsck times.
		 */
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
	return bg_start + colour;
}