balloc.c 23.5 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/balloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
20 21
#include "ext4.h"
#include "ext4_jbd2.h"
22
#include "mballoc.h"
23

24 25
#include <trace/events/ext4.h>

E
Eric Sandeen 已提交
26 27
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
					    ext4_group_t block_group);
28 29 30 31
/*
 * balloc.c contains the blocks allocation and deallocation routines
 */

32
/*
33 34
 * Calculate the block group number and offset into the block/cluster
 * allocation bitmap, given a block number
35 36
 */
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
37
		ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
38
{
D
Dave Kleikamp 已提交
39
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
40 41
	ext4_grpblk_t offset;

D
Dave Kleikamp 已提交
42
	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
43 44
	offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
		EXT4_SB(sb)->s_cluster_bits;
45 46 47
	if (offsetp)
		*offsetp = offset;
	if (blockgrpp)
D
Dave Kleikamp 已提交
48
		*blockgrpp = blocknr;
49 50 51

}

52 53 54 55 56 57 58
/*
 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so
 * and 0 otherwise.
 */
static inline int ext4_block_in_group(struct super_block *sb,
				      ext4_fsblk_t block,
				      ext4_group_t block_group)
59 60
{
	ext4_group_t actual_group;
61 62 63 64 65 66 67 68 69

	if (test_opt2(sb, STD_GROUP_SIZE))
		actual_group =
			(le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) +
			block) >>
			(EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3);
	else
		ext4_get_group_no_and_offset(sb, block, &actual_group, NULL);
	return (actual_group == block_group) ? 1 : 0;
70 71
}

72 73 74 75 76 77
/* Return the number of clusters used for file system metadata; this
 * represents the overhead needed by the file system.
 */
unsigned ext4_num_overhead_clusters(struct super_block *sb,
				    ext4_group_t block_group,
				    struct ext4_group_desc *gdp)
78
{
79 80 81 82
	unsigned num_clusters;
	int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
	ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
	ext4_fsblk_t itbl_blk;
83 84
	struct ext4_sb_info *sbi = EXT4_SB(sb);

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
	/* This is the number of clusters used by the superblock,
	 * block group descriptors, and reserved block group
	 * descriptor blocks */
	num_clusters = ext4_num_base_meta_clusters(sb, block_group);

	/*
	 * For the allocation bitmaps and inode table, we first need
	 * to check to see if the block is in the block group.  If it
	 * is, then check to see if the cluster is already accounted
	 * for in the clusters used for the base metadata cluster, or
	 * if we can increment the base metadata cluster to include
	 * that block.  Otherwise, we will have to track the cluster
	 * used for the allocation bitmap or inode table explicitly.
	 * Normally all of these blocks are contiguous, so the special
	 * case handling shouldn't be necessary except for *very*
	 * unusual file system layouts.
	 */
	if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
103 104
		block_cluster = EXT4_B2C(sbi,
					 ext4_block_bitmap(sb, gdp) - start);
105 106 107 108 109 110 111 112 113 114
		if (block_cluster < num_clusters)
			block_cluster = -1;
		else if (block_cluster == num_clusters) {
			num_clusters++;
			block_cluster = -1;
		}
	}

	if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
		inode_cluster = EXT4_B2C(sbi,
115
					 ext4_inode_bitmap(sb, gdp) - start);
116 117 118 119 120 121 122 123 124 125 126
		if (inode_cluster < num_clusters)
			inode_cluster = -1;
		else if (inode_cluster == num_clusters) {
			num_clusters++;
			inode_cluster = -1;
		}
	}

	itbl_blk = ext4_inode_table(sb, gdp);
	for (i = 0; i < sbi->s_itb_per_group; i++) {
		if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
127
			c = EXT4_B2C(sbi, itbl_blk + i - start);
128 129 130 131 132 133 134 135 136
			if ((c < num_clusters) || (c == inode_cluster) ||
			    (c == block_cluster) || (c == itbl_cluster))
				continue;
			if (c == num_clusters) {
				num_clusters++;
				continue;
			}
			num_clusters++;
			itbl_cluster = c;
137 138
		}
	}
139 140 141 142 143 144 145

	if (block_cluster != -1)
		num_clusters++;
	if (inode_cluster != -1)
		num_clusters++;

	return num_clusters;
146
}
147

148 149
static unsigned int num_clusters_in_group(struct super_block *sb,
					  ext4_group_t block_group)
150
{
151 152
	unsigned int blocks;

153 154 155 156 157 158 159
	if (block_group == ext4_get_groups_count(sb) - 1) {
		/*
		 * Even though mke2fs always initializes the first and
		 * last group, just in case some other tool was used,
		 * we need to make sure we calculate the right free
		 * blocks.
		 */
160
		blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
161 162
			ext4_group_first_block_no(sb, block_group);
	} else
163 164
		blocks = EXT4_BLOCKS_PER_GROUP(sb);
	return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
165 166
}

167 168 169 170
/* Initializes an uninitialized block bitmap */
void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
			    ext4_group_t block_group,
			    struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
171
{
172
	unsigned int bit, bit_max;
A
Andreas Dilger 已提交
173
	struct ext4_sb_info *sbi = EXT4_SB(sb);
174 175 176 177 178 179 180
	ext4_fsblk_t start, tmp;
	int flex_bg = 0;

	J_ASSERT_BH(bh, buffer_locked(bh));

	/* If checksum is bad mark all blocks used to prevent allocation
	 * essentially implementing a per-group read-only flag. */
181
	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
182
		ext4_error(sb, "Checksum bad for group %u", block_group);
183
		ext4_free_group_clusters_set(sb, gdp, 0);
184 185 186
		ext4_free_inodes_set(sb, gdp, 0);
		ext4_itable_unused_set(sb, gdp, 0);
		memset(bh->b_data, 0xff, sb->s_blocksize);
187
		ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
188
		return;
A
Andreas Dilger 已提交
189
	}
190
	memset(bh->b_data, 0, sb->s_blocksize);
A
Andreas Dilger 已提交
191

192
	bit_max = ext4_num_base_meta_clusters(sb, block_group);
193 194
	for (bit = 0; bit < bit_max; bit++)
		ext4_set_bit(bit, bh->b_data);
195

196
	start = ext4_group_first_block_no(sb, block_group);
A
Andreas Dilger 已提交
197

198 199
	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
		flex_bg = 1;
A
Andreas Dilger 已提交
200

201 202 203
	/* Set bits for block and inode bitmaps, and inode table */
	tmp = ext4_block_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
204
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
A
Andreas Dilger 已提交
205

206 207
	tmp = ext4_inode_bitmap(sb, gdp);
	if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
208
		ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
209

210 211 212
	tmp = ext4_inode_table(sb, gdp);
	for (; tmp < ext4_inode_table(sb, gdp) +
		     sbi->s_itb_per_group; tmp++) {
213
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
214
			ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
A
Andreas Dilger 已提交
215
	}
216

217 218 219 220 221
	/*
	 * Also if the number of blocks within the group is less than
	 * the blocksize * 8 ( which is the size of bitmap ), set rest
	 * of the block bitmap to 1
	 */
222
	ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
223
			     sb->s_blocksize * 8, bh->b_data);
224
	ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
225
	ext4_group_desc_csum_set(sb, block_group, gdp);
A
Andreas Dilger 已提交
226 227
}

228 229 230
/* Return the number of free blocks in a block group.  It is used when
 * the block bitmap is uninitialized, so we can't just count the bits
 * in the bitmap. */
231 232 233
unsigned ext4_free_clusters_after_init(struct super_block *sb,
				       ext4_group_t block_group,
				       struct ext4_group_desc *gdp)
234
{
235 236
	return num_clusters_in_group(sb, block_group) - 
		ext4_num_overhead_clusters(sb, block_group, gdp);
237
}
A
Andreas Dilger 已提交
238

239 240 241 242 243 244 245 246
/*
 * The free blocks are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.  The descriptors are loaded in memory
247
 * when a file system is mounted (see ext4_fill_super).
248 249 250
 */

/**
251
 * ext4_get_group_desc() -- load group descriptor from disk
252 253 254 255 256
 * @sb:			super block
 * @block_group:	given block group
 * @bh:			pointer to the buffer head to store the block
 *			group descriptor
 */
257
struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
258
					     ext4_group_t block_group,
259
					     struct buffer_head **bh)
260
{
261 262
	unsigned int group_desc;
	unsigned int offset;
263
	ext4_group_t ngroups = ext4_get_groups_count(sb);
264
	struct ext4_group_desc *desc;
265
	struct ext4_sb_info *sbi = EXT4_SB(sb);
266

267
	if (block_group >= ngroups) {
268 269
		ext4_error(sb, "block_group >= groups_count - block_group = %u,"
			   " groups_count = %u", block_group, ngroups);
270 271 272 273

		return NULL;
	}

274 275
	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
276
	if (!sbi->s_group_desc[group_desc]) {
277
		ext4_error(sb, "Group descriptor not loaded - "
278
			   "block_group = %u, group_desc = %u, desc = %u",
279
			   block_group, group_desc, offset);
280 281 282
		return NULL;
	}

283 284 285
	desc = (struct ext4_group_desc *)(
		(__u8 *)sbi->s_group_desc[group_desc]->b_data +
		offset * EXT4_DESC_SIZE(sb));
286 287
	if (bh)
		*bh = sbi->s_group_desc[group_desc];
288
	return desc;
289 290
}

291 292 293 294 295 296 297 298
/*
 * Return the block number which was discovered to be invalid, or 0 if
 * the block bitmap is valid.
 */
static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
					    struct ext4_group_desc *desc,
					    unsigned int block_group,
					    struct buffer_head *bh)
299 300 301
{
	ext4_grpblk_t offset;
	ext4_grpblk_t next_zero_bit;
302
	ext4_fsblk_t blk;
303 304 305 306 307 308 309 310 311
	ext4_fsblk_t group_first_block;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		/* with FLEX_BG, the inode/block bitmaps and itable
		 * blocks may not be in the group at all
		 * so the bitmap validation will be skipped for those groups
		 * or it has to also read the block group where the bitmaps
		 * are located to verify they are set.
		 */
312
		return 0;
313 314 315 316
	}
	group_first_block = ext4_group_first_block_no(sb, block_group);

	/* check whether block bitmap block number is set */
317 318
	blk = ext4_block_bitmap(sb, desc);
	offset = blk - group_first_block;
319 320
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
321
		return blk;
322 323

	/* check whether the inode bitmap block number is set */
324 325
	blk = ext4_inode_bitmap(sb, desc);
	offset = blk - group_first_block;
326 327
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
328
		return blk;
329 330

	/* check whether the inode table block number is set */
331 332
	blk = ext4_inode_table(sb, desc);
	offset = blk - group_first_block;
333 334 335
	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
				offset + EXT4_SB(sb)->s_itb_per_group,
				offset);
336 337 338
	if (next_zero_bit < offset + EXT4_SB(sb)->s_itb_per_group)
		/* bad bitmap for inode tables */
		return blk;
339 340
	return 0;
}
341 342 343 344 345 346

void ext4_validate_block_bitmap(struct super_block *sb,
			       struct ext4_group_desc *desc,
			       unsigned int block_group,
			       struct buffer_head *bh)
{
347 348
	ext4_fsblk_t	blk;

349 350 351 352
	if (buffer_verified(bh))
		return;

	ext4_lock_group(sb, block_group);
353 354 355 356 357 358 359 360
	blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
	if (unlikely(blk != 0)) {
		ext4_unlock_group(sb, block_group);
		ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
			   block_group, blk);
		return;
	}
	if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
361
			desc, bh))) {
362 363 364 365 366
		ext4_unlock_group(sb, block_group);
		ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
		return;
	}
	set_buffer_verified(bh);
367 368 369
	ext4_unlock_group(sb, block_group);
}

370
/**
371
 * ext4_read_block_bitmap_nowait()
372 373 374
 * @sb:			super block
 * @block_group:	given block group
 *
375 376
 * Read the bitmap for a given block_group,and validate the
 * bits for block/inode/inode tables are set in the bitmaps
377 378 379
 *
 * Return buffer_head on success or NULL in case of failure.
 */
A
Andreas Dilger 已提交
380
struct buffer_head *
381
ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
382
{
383
	struct ext4_group_desc *desc;
384
	struct buffer_head *bh;
385
	ext4_fsblk_t bitmap_blk;
386

A
Andreas Dilger 已提交
387
	desc = ext4_get_group_desc(sb, block_group, NULL);
388
	if (!desc)
389 390
		return NULL;
	bitmap_blk = ext4_block_bitmap(sb, desc);
391 392
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
393 394 395
		ext4_error(sb, "Cannot get buffer for block bitmap - "
			   "block_group = %u, block_bitmap = %llu",
			   block_group, bitmap_blk);
396 397
		return NULL;
	}
398 399

	if (bitmap_uptodate(bh))
400
		goto verify;
401

402
	lock_buffer(bh);
403 404
	if (bitmap_uptodate(bh)) {
		unlock_buffer(bh);
405
		goto verify;
406
	}
407
	ext4_lock_group(sb, block_group);
A
Andreas Dilger 已提交
408
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
409
		ext4_init_block_bitmap(sb, bh, block_group, desc);
410
		set_bitmap_uptodate(bh);
411
		set_buffer_uptodate(bh);
412
		ext4_unlock_group(sb, block_group);
A
Aneesh Kumar K.V 已提交
413
		unlock_buffer(bh);
414
		return bh;
A
Andreas Dilger 已提交
415
	}
416
	ext4_unlock_group(sb, block_group);
417 418 419 420 421 422 423
	if (buffer_uptodate(bh)) {
		/*
		 * if not uninit if bh is uptodate,
		 * bitmap is also uptodate
		 */
		set_bitmap_uptodate(bh);
		unlock_buffer(bh);
424
		goto verify;
425 426
	}
	/*
427
	 * submit the buffer_head for reading
428
	 */
429
	set_buffer_new(bh);
430
	trace_ext4_read_block_bitmap_load(sb, block_group);
431 432 433 434
	bh->b_end_io = ext4_end_bitmap_read;
	get_bh(bh);
	submit_bh(READ, bh);
	return bh;
435 436 437
verify:
	ext4_validate_block_bitmap(sb, desc, block_group, bh);
	return bh;
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
}

/* Returns 0 on success, 1 on error */
int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
			   struct buffer_head *bh)
{
	struct ext4_group_desc *desc;

	if (!buffer_new(bh))
		return 0;
	desc = ext4_get_group_desc(sb, block_group, NULL);
	if (!desc)
		return 1;
	wait_on_buffer(bh);
	if (!buffer_uptodate(bh)) {
453
		ext4_error(sb, "Cannot read block bitmap - "
454
			   "block_group = %u, block_bitmap = %llu",
455
			   block_group, (unsigned long long) bh->b_blocknr);
456
		return 1;
457
	}
458 459
	clear_buffer_new(bh);
	/* Panic or remount fs read-only if block bitmap is invalid */
460
	ext4_validate_block_bitmap(sb, desc, block_group, bh);
461 462 463 464 465 466 467 468 469
	return 0;
}

struct buffer_head *
ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
{
	struct buffer_head *bh;

	bh = ext4_read_block_bitmap_nowait(sb, block_group);
470 471
	if (!bh)
		return NULL;
472 473 474 475
	if (ext4_wait_block_bitmap(sb, block_group, bh)) {
		put_bh(bh);
		return NULL;
	}
476 477 478
	return bh;
}

479
/**
480
 * ext4_has_free_clusters()
481
 * @sbi:	in-core super block structure.
482 483
 * @nclusters:	number of needed blocks
 * @flags:	flags from ext4_mb_new_blocks()
484
 *
485
 * Check if filesystem has nclusters free & available for allocation.
486 487
 * On success return 1, return 0 on failure.
 */
488 489
static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
				  s64 nclusters, unsigned int flags)
490
{
491
	s64 free_clusters, dirty_clusters, root_clusters;
492
	struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
493
	struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
494

495 496
	free_clusters  = percpu_counter_read_positive(fcc);
	dirty_clusters = percpu_counter_read_positive(dcc);
497 498 499 500 501 502

	/*
	 * r_blocks_count should always be multiple of the cluster ratio so
	 * we are safe to do a plane bit shift only.
	 */
	root_clusters = ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
503

504 505
	if (free_clusters - (nclusters + root_clusters + dirty_clusters) <
					EXT4_FREECLUSTERS_WATERMARK) {
506
		free_clusters  = percpu_counter_sum_positive(fcc);
507
		dirty_clusters = percpu_counter_sum_positive(dcc);
508
	}
509 510
	/* Check whether we have space after accounting for current
	 * dirty clusters & root reserved clusters.
511
	 */
512
	if (free_clusters >= ((root_clusters + nclusters) + dirty_clusters))
513
		return 1;
514

515
	/* Hm, nope.  Are (enough) root reserved clusters available? */
516 517
	if (uid_eq(sbi->s_resuid, current_fsuid()) ||
	    (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
518 519 520
	    capable(CAP_SYS_RESOURCE) ||
		(flags & EXT4_MB_USE_ROOT_BLOCKS)) {

521
		if (free_clusters >= (nclusters + dirty_clusters))
522 523 524 525
			return 1;
	}

	return 0;
526 527
}

528 529
int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
			     s64 nclusters, unsigned int flags)
530
{
531
	if (ext4_has_free_clusters(sbi, nclusters, flags)) {
532
		percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters);
533
		return 0;
534 535
	} else
		return -ENOSPC;
536
}
537

538
/**
539
 * ext4_should_retry_alloc()
540 541 542
 * @sb:			super block
 * @retries		number of attemps has been made
 *
543
 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
544
 * it is profitable to retry the operation, this function will wait
L
Lucas De Marchi 已提交
545
 * for the current or committing transaction to complete, and then
546 547 548 549
 * return TRUE.
 *
 * if the total number of retries exceed three times, return FALSE.
 */
550
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
551
{
552
	if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
553 554
	    (*retries)++ > 3 ||
	    !EXT4_SB(sb)->s_journal)
555 556 557 558
		return 0;

	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);

559
	return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
560 561
}

A
Aneesh Kumar K.V 已提交
562
/*
563
 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
A
Aneesh Kumar K.V 已提交
564 565 566 567
 *
 * @handle:             handle to this transaction
 * @inode:              file inode
 * @goal:               given target block(filesystem wide)
568
 * @count:		pointer to total number of clusters needed
A
Aneesh Kumar K.V 已提交
569 570
 * @errp:               error code
 *
T
Theodore Ts'o 已提交
571
 * Return 1st allocated block number on success, *count stores total account
572
 * error stores in errp pointer
A
Aneesh Kumar K.V 已提交
573
 */
574
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
575 576
				  ext4_fsblk_t goal, unsigned int flags,
				  unsigned long *count, int *errp)
A
Aneesh Kumar K.V 已提交
577
{
T
Theodore Ts'o 已提交
578
	struct ext4_allocation_request ar;
579
	ext4_fsblk_t ret;
T
Theodore Ts'o 已提交
580 581 582 583 584 585

	memset(&ar, 0, sizeof(ar));
	/* Fill with neighbour allocated blocks */
	ar.inode = inode;
	ar.goal = goal;
	ar.len = count ? *count : 1;
586
	ar.flags = flags;
T
Theodore Ts'o 已提交
587 588 589 590

	ret = ext4_mb_new_blocks(handle, &ar, errp);
	if (count)
		*count = ar.len;
591
	/*
592 593
	 * Account for the allocated meta blocks.  We will never
	 * fail EDQUOT for metdata, but we do account for it.
594
	 */
595 596
	if (!(*errp) &&
	    ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
597
		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
T
Theodore Ts'o 已提交
598
		EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
599
		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
600 601
		dquot_alloc_block_nofail(inode,
				EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
602 603
	}
	return ret;
A
Aneesh Kumar K.V 已提交
604 605
}

606
/**
607
 * ext4_count_free_clusters() -- count filesystem free clusters
608 609
 * @sb:		superblock
 *
610
 * Adds up the number of free clusters from each block group.
611
 */
612
ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
613
{
614 615
	ext4_fsblk_t desc_count;
	struct ext4_group_desc *gdp;
616
	ext4_group_t i;
617
	ext4_group_t ngroups = ext4_get_groups_count(sb);
618 619 620
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
	ext4_fsblk_t bitmap_count;
621
	unsigned int x;
622 623
	struct buffer_head *bitmap_bh = NULL;

624
	es = EXT4_SB(sb)->s_es;
625 626 627 628 629
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;

	for (i = 0; i < ngroups; i++) {
630
		gdp = ext4_get_group_desc(sb, i, NULL);
631 632
		if (!gdp)
			continue;
633
		desc_count += ext4_free_group_clusters(sb, gdp);
634
		brelse(bitmap_bh);
635
		bitmap_bh = ext4_read_block_bitmap(sb, i);
636 637 638
		if (bitmap_bh == NULL)
			continue;

639 640
		x = ext4_count_free(bitmap_bh->b_data,
				    EXT4_BLOCKS_PER_GROUP(sb) / 8);
641
		printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
642
			i, ext4_free_group_clusters(sb, gdp), x);
643 644 645
		bitmap_count += x;
	}
	brelse(bitmap_bh);
646 647
	printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
	       ", computed = %llu, %llu\n",
648
	       EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
649
	       desc_count, bitmap_count);
650 651 652 653
	return bitmap_count;
#else
	desc_count = 0;
	for (i = 0; i < ngroups; i++) {
654
		gdp = ext4_get_group_desc(sb, i, NULL);
655 656
		if (!gdp)
			continue;
657
		desc_count += ext4_free_group_clusters(sb, gdp);
658 659 660 661 662 663
	}

	return desc_count;
#endif
}

664
static inline int test_root(ext4_group_t a, int b)
665 666 667 668 669 670 671 672
{
	int num = b;

	while (a > num)
		num *= b;
	return num == a;
}

673
static int ext4_group_sparse(ext4_group_t group)
674 675 676 677 678 679 680 681 682 683
{
	if (group <= 1)
		return 1;
	if (!(group & 1))
		return 0;
	return (test_root(group, 7) || test_root(group, 5) ||
		test_root(group, 3));
}

/**
684
 *	ext4_bg_has_super - number of blocks used by the superblock in group
685 686 687 688 689 690
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the superblock (primary or backup)
 *	in this group.  Currently this will be only 0 or 1.
 */
691
int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
692
{
693 694 695
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
				EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
			!ext4_group_sparse(group))
696 697 698 699
		return 0;
	return 1;
}

700 701
static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
					ext4_group_t group)
702
{
703
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
704 705
	ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
	ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
706 707 708 709 710 711

	if (group == first || group == first + 1 || group == last)
		return 1;
	return 0;
}

712 713
static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
					ext4_group_t group)
714
{
715 716 717 718 719 720 721
	if (!ext4_bg_has_super(sb, group))
		return 0;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
		return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	else
		return EXT4_SB(sb)->s_gdb_count;
722 723 724
}

/**
725
 *	ext4_bg_num_gdb - number of blocks used by the group table in group
726 727 728 729 730 731 732
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the group descriptor table
 *	(primary or backup) in this group.  In the future there may be a
 *	different number of descriptor blocks in each group.
 */
733
unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
734 735
{
	unsigned long first_meta_bg =
736 737
			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
738

739
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
740
			metagroup < first_meta_bg)
741
		return ext4_bg_num_gdb_nometa(sb, group);
742

743
	return ext4_bg_num_gdb_meta(sb,group);
744 745

}
746

747
/*
748
 * This function returns the number of file system metadata clusters at
749 750
 * the beginning of a block group, including the reserved gdt blocks.
 */
E
Eric Sandeen 已提交
751
static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
752
				     ext4_group_t block_group)
753 754
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
755
	unsigned num;
756 757 758 759 760 761 762 763 764 765 766 767 768 769

	/* Check for superblock and gdt backups in this group */
	num = ext4_bg_has_super(sb, block_group);

	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
	    block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
			  sbi->s_desc_per_block) {
		if (num) {
			num += ext4_bg_num_gdb(sb, block_group);
			num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
		}
	} else { /* For META_BG_BLOCK_GROUPS */
		num += ext4_bg_num_gdb(sb, block_group);
	}
770
	return EXT4_NUM_B2C(sbi, num);
771
}
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
/**
 *	ext4_inode_to_goal_block - return a hint for block allocation
 *	@inode: inode for block allocation
 *
 *	Return the ideal location to start allocating blocks for a
 *	newly created inode.
 */
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
{
	struct ext4_inode_info *ei = EXT4_I(inode);
	ext4_group_t block_group;
	ext4_grpblk_t colour;
	int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
	ext4_fsblk_t bg_start;
	ext4_fsblk_t last_block;

	block_group = ei->i_block_group;
	if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
		/*
		 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
		 * block groups per flexgroup, reserve the first block
		 * group for directories and special files.  Regular
		 * files will start at the second block group.  This
		 * tends to speed up directory access and improves
		 * fsck times.
		 */
		block_group &= ~(flex_size-1);
		if (S_ISREG(inode->i_mode))
			block_group++;
	}
	bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
	last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;

	/*
	 * If we are doing delayed allocation, we don't need take
	 * colour into account.
	 */
	if (test_opt(inode->i_sb, DELALLOC))
		return bg_start;

	if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
		colour = (current->pid % 16) *
			(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
	else
		colour = (current->pid % 16) * ((last_block - bg_start) / 16);
	return bg_start + colour;
}