balloc.c 61.1 KB
Newer Older
1
/*
2
 *  linux/fs/ext4/balloc.c
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Copyright (C) 1992, 1993, 1994, 1995
 * Remy Card (card@masi.ibp.fr)
 * Laboratoire MASI - Institut Blaise Pascal
 * Universite Pierre et Marie Curie (Paris VI)
 *
 *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
 *  Big-endian to little-endian byte-swapping/bitmaps by
 *        David S. Miller (davem@caip.rutgers.edu), 1995
 */

#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
17
#include <linux/jbd2.h>
18 19
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
20 21
#include "ext4.h"
#include "ext4_jbd2.h"
A
Andreas Dilger 已提交
22
#include "group.h"
23

24 25 26 27
/*
 * balloc.c contains the blocks allocation and deallocation routines
 */

28 29 30 31
/*
 * Calculate the block group number and offset, given a block number
 */
void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
32
		ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
33
{
D
Dave Kleikamp 已提交
34
	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
35 36
	ext4_grpblk_t offset;

D
Dave Kleikamp 已提交
37
	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
A
Andrew Morton 已提交
38
	offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
39 40 41
	if (offsetp)
		*offsetp = offset;
	if (blockgrpp)
D
Dave Kleikamp 已提交
42
		*blockgrpp = blocknr;
43 44 45

}

46 47 48 49
static int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block,
			ext4_group_t block_group)
{
	ext4_group_t actual_group;
A
Aneesh Kumar K.V 已提交
50
	ext4_get_group_no_and_offset(sb, block, &actual_group, NULL);
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
	if (actual_group == block_group)
		return 1;
	return 0;
}

static int ext4_group_used_meta_blocks(struct super_block *sb,
				ext4_group_t block_group)
{
	ext4_fsblk_t tmp;
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	/* block bitmap, inode bitmap, and inode table blocks */
	int used_blocks = sbi->s_itb_per_group + 2;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		struct ext4_group_desc *gdp;
		struct buffer_head *bh;

		gdp = ext4_get_group_desc(sb, block_group, &bh);
		if (!ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp),
					block_group))
			used_blocks--;

		if (!ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp),
					block_group))
			used_blocks--;

		tmp = ext4_inode_table(sb, gdp);
		for (; tmp < ext4_inode_table(sb, gdp) +
				sbi->s_itb_per_group; tmp++) {
			if (!ext4_block_in_group(sb, tmp, block_group))
				used_blocks -= 1;
		}
	}
	return used_blocks;
}
A
Andreas Dilger 已提交
86 87 88
/* Initializes an uninitialized block bitmap if given, and returns the
 * number of blocks free in the group. */
unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
89
		 ext4_group_t block_group, struct ext4_group_desc *gdp)
A
Andreas Dilger 已提交
90 91 92 93 94 95 96 97 98 99 100
{
	int bit, bit_max;
	unsigned free_blocks, group_blocks;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

	if (bh) {
		J_ASSERT_BH(bh, buffer_locked(bh));

		/* If checksum is bad mark all blocks used to prevent allocation
		 * essentially implementing a per-group read-only flag. */
		if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
101
			ext4_error(sb, __func__,
102
				  "Checksum bad for group %lu\n", block_group);
A
Andreas Dilger 已提交
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
			gdp->bg_free_blocks_count = 0;
			gdp->bg_free_inodes_count = 0;
			gdp->bg_itable_unused = 0;
			memset(bh->b_data, 0xff, sb->s_blocksize);
			return 0;
		}
		memset(bh->b_data, 0, sb->s_blocksize);
	}

	/* Check for superblock and gdt backups in this group */
	bit_max = ext4_bg_has_super(sb, block_group);

	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
	    block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
			  sbi->s_desc_per_block) {
		if (bit_max) {
			bit_max += ext4_bg_num_gdb(sb, block_group);
			bit_max +=
				le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
		}
	} else { /* For META_BG_BLOCK_GROUPS */
		int group_rel = (block_group -
				 le32_to_cpu(sbi->s_es->s_first_meta_bg)) %
				EXT4_DESC_PER_BLOCK(sb);
		if (group_rel == 0 || group_rel == 1 ||
		    (group_rel == EXT4_DESC_PER_BLOCK(sb) - 1))
			bit_max += 1;
	}

	if (block_group == sbi->s_groups_count - 1) {
		/*
		 * Even though mke2fs always initialize first and last group
		 * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need
		 * to make sure we calculate the right free blocks
		 */
		group_blocks = ext4_blocks_count(sbi->s_es) -
			le32_to_cpu(sbi->s_es->s_first_data_block) -
			(EXT4_BLOCKS_PER_GROUP(sb) * (sbi->s_groups_count -1));
	} else {
		group_blocks = EXT4_BLOCKS_PER_GROUP(sb);
	}

	free_blocks = group_blocks - bit_max;

	if (bh) {
148 149
		ext4_fsblk_t start, tmp;
		int flex_bg = 0;
150

A
Andreas Dilger 已提交
151 152 153
		for (bit = 0; bit < bit_max; bit++)
			ext4_set_bit(bit, bh->b_data);

154
		start = ext4_group_first_block_no(sb, block_group);
A
Andreas Dilger 已提交
155

156 157 158
		if (EXT4_HAS_INCOMPAT_FEATURE(sb,
					      EXT4_FEATURE_INCOMPAT_FLEX_BG))
			flex_bg = 1;
A
Andreas Dilger 已提交
159

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
		/* Set bits for block and inode bitmaps, and inode table */
		tmp = ext4_block_bitmap(sb, gdp);
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
			ext4_set_bit(tmp - start, bh->b_data);

		tmp = ext4_inode_bitmap(sb, gdp);
		if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
			ext4_set_bit(tmp - start, bh->b_data);

		tmp = ext4_inode_table(sb, gdp);
		for (; tmp < ext4_inode_table(sb, gdp) +
				sbi->s_itb_per_group; tmp++) {
			if (!flex_bg ||
				ext4_block_in_group(sb, tmp, block_group))
				ext4_set_bit(tmp - start, bh->b_data);
		}
A
Andreas Dilger 已提交
176 177 178 179 180 181 182
		/*
		 * Also if the number of blocks within the group is
		 * less than the blocksize * 8 ( which is the size
		 * of bitmap ), set rest of the block bitmap to 1
		 */
		mark_bitmap_end(group_blocks, sb->s_blocksize * 8, bh->b_data);
	}
183
	return free_blocks - ext4_group_used_meta_blocks(sb, block_group);
A
Andreas Dilger 已提交
184 185 186
}


187 188 189 190 191 192 193 194
/*
 * The free blocks are managed by bitmaps.  A file system contains several
 * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
 * block for inodes, N blocks for the inode table and data blocks.
 *
 * The file system contains group descriptors which are located after the
 * super block.  Each descriptor contains the number of the bitmap block and
 * the free blocks count in the block.  The descriptors are loaded in memory
195
 * when a file system is mounted (see ext4_fill_super).
196 197 198 199 200 201
 */


#define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)

/**
202
 * ext4_get_group_desc() -- load group descriptor from disk
203 204 205 206 207
 * @sb:			super block
 * @block_group:	given block group
 * @bh:			pointer to the buffer head to store the block
 *			group descriptor
 */
208
struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
209
					     ext4_group_t block_group,
210 211 212 213
					     struct buffer_head ** bh)
{
	unsigned long group_desc;
	unsigned long offset;
214 215
	struct ext4_group_desc * desc;
	struct ext4_sb_info *sbi = EXT4_SB(sb);
216 217

	if (block_group >= sbi->s_groups_count) {
218
		ext4_error (sb, "ext4_get_group_desc",
219
			    "block_group >= groups_count - "
220
			    "block_group = %lu, groups_count = %lu",
221 222 223 224 225 226
			    block_group, sbi->s_groups_count);

		return NULL;
	}
	smp_rmb();

227 228
	group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
	offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
229
	if (!sbi->s_group_desc[group_desc]) {
230
		ext4_error (sb, "ext4_get_group_desc",
231
			    "Group descriptor not loaded - "
232
			    "block_group = %lu, group_desc = %lu, desc = %lu",
233 234 235 236
			     block_group, group_desc, offset);
		return NULL;
	}

237 238 239
	desc = (struct ext4_group_desc *)(
		(__u8 *)sbi->s_group_desc[group_desc]->b_data +
		offset * EXT4_DESC_SIZE(sb));
240 241
	if (bh)
		*bh = sbi->s_group_desc[group_desc];
242
	return desc;
243 244
}

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
static int ext4_valid_block_bitmap(struct super_block *sb,
					struct ext4_group_desc *desc,
					unsigned int block_group,
					struct buffer_head *bh)
{
	ext4_grpblk_t offset;
	ext4_grpblk_t next_zero_bit;
	ext4_fsblk_t bitmap_blk;
	ext4_fsblk_t group_first_block;

	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
		/* with FLEX_BG, the inode/block bitmaps and itable
		 * blocks may not be in the group at all
		 * so the bitmap validation will be skipped for those groups
		 * or it has to also read the block group where the bitmaps
		 * are located to verify they are set.
		 */
		return 1;
	}
	group_first_block = ext4_group_first_block_no(sb, block_group);

	/* check whether block bitmap block number is set */
	bitmap_blk = ext4_block_bitmap(sb, desc);
	offset = bitmap_blk - group_first_block;
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
		goto err_out;

	/* check whether the inode bitmap block number is set */
	bitmap_blk = ext4_inode_bitmap(sb, desc);
	offset = bitmap_blk - group_first_block;
	if (!ext4_test_bit(offset, bh->b_data))
		/* bad block bitmap */
		goto err_out;

	/* check whether the inode table block number is set */
	bitmap_blk = ext4_inode_table(sb, desc);
	offset = bitmap_blk - group_first_block;
	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
				offset + EXT4_SB(sb)->s_itb_per_group,
				offset);
	if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group)
		/* good bitmap for inode tables */
		return 1;

err_out:
291
	ext4_error(sb, __func__,
292 293 294 295 296
			"Invalid block bitmap - "
			"block_group = %d, block = %llu",
			block_group, bitmap_blk);
	return 0;
}
297
/**
298
 * ext4_read_block_bitmap()
299 300 301
 * @sb:			super block
 * @block_group:	given block group
 *
302 303
 * Read the bitmap for a given block_group,and validate the
 * bits for block/inode/inode tables are set in the bitmaps
304 305 306
 *
 * Return buffer_head on success or NULL in case of failure.
 */
A
Andreas Dilger 已提交
307
struct buffer_head *
308
ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
309
{
310
	struct ext4_group_desc * desc;
311
	struct buffer_head * bh = NULL;
312
	ext4_fsblk_t bitmap_blk;
313

A
Andreas Dilger 已提交
314
	desc = ext4_get_group_desc(sb, block_group, NULL);
315
	if (!desc)
316 317
		return NULL;
	bitmap_blk = ext4_block_bitmap(sb, desc);
318 319
	bh = sb_getblk(sb, bitmap_blk);
	if (unlikely(!bh)) {
320
		ext4_error(sb, __func__,
321 322 323 324 325 326 327 328
			    "Cannot read block bitmap - "
			    "block_group = %d, block_bitmap = %llu",
			    (int)block_group, (unsigned long long)bitmap_blk);
		return NULL;
	}
	if (bh_uptodate_or_lock(bh))
		return bh;

A
Andreas Dilger 已提交
329
	if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
330 331 332 333
		ext4_init_block_bitmap(sb, bh, block_group, desc);
		set_buffer_uptodate(bh);
		unlock_buffer(bh);
		return bh;
A
Andreas Dilger 已提交
334
	}
335 336
	if (bh_submit_read(bh) < 0) {
		put_bh(bh);
337
		ext4_error(sb, __func__,
338
			    "Cannot read block bitmap - "
339 340 341 342
			    "block_group = %d, block_bitmap = %llu",
			    (int)block_group, (unsigned long long)bitmap_blk);
		return NULL;
	}
343 344 345 346 347
	ext4_valid_block_bitmap(sb, desc, block_group, bh);
	/*
	 * file system mounted not to panic on error,
	 * continue with corrupt bitmap
	 */
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
	return bh;
}
/*
 * The reservation window structure operations
 * --------------------------------------------
 * Operations include:
 * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
 *
 * We use a red-black tree to represent per-filesystem reservation
 * windows.
 *
 */

/**
 * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
 * @rb_root:		root of per-filesystem reservation rb tree
 * @verbose:		verbose mode
 * @fn:			function which wishes to dump the reservation map
 *
 * If verbose is turned on, it will print the whole block reservation
 * windows(start, end).	Otherwise, it will only print out the "bad" windows,
 * those windows that overlap with their immediate neighbors.
 */
#if 1
static void __rsv_window_dump(struct rb_root *root, int verbose,
			      const char *fn)
{
	struct rb_node *n;
376
	struct ext4_reserve_window_node *rsv, *prev;
377 378 379 380 381 382 383 384 385
	int bad;

restart:
	n = rb_first(root);
	bad = 0;
	prev = NULL;

	printk("Block Allocation Reservation Windows Map (%s):\n", fn);
	while (n) {
386
		rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
387 388
		if (verbose)
			printk("reservation window 0x%p "
389
			       "start:  %llu, end:  %llu\n",
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
			       rsv, rsv->rsv_start, rsv->rsv_end);
		if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
			printk("Bad reservation %p (start >= end)\n",
			       rsv);
			bad = 1;
		}
		if (prev && prev->rsv_end >= rsv->rsv_start) {
			printk("Bad reservation %p (prev->end >= start)\n",
			       rsv);
			bad = 1;
		}
		if (bad) {
			if (!verbose) {
				printk("Restarting reservation walk in verbose mode\n");
				verbose = 1;
				goto restart;
			}
		}
		n = rb_next(n);
		prev = rsv;
	}
	printk("Window map complete.\n");
412
	BUG_ON(bad);
413 414
}
#define rsv_window_dump(root, verbose) \
415
	__rsv_window_dump((root), (verbose), __func__)
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
#else
#define rsv_window_dump(root, verbose) do {} while (0)
#endif

/**
 * goal_in_my_reservation()
 * @rsv:		inode's reservation window
 * @grp_goal:		given goal block relative to the allocation block group
 * @group:		the current allocation block group
 * @sb:			filesystem super block
 *
 * Test if the given goal block (group relative) is within the file's
 * own block reservation window range.
 *
 * If the reservation window is outside the goal allocation group, return 0;
 * grp_goal (given goal block) could be -1, which means no specific
 * goal block. In this case, always return 1.
 * If the goal block is within the reservation window, return 1;
 * otherwise, return 0;
 */
static int
437
goal_in_my_reservation(struct ext4_reserve_window *rsv, ext4_grpblk_t grp_goal,
438
			ext4_group_t group, struct super_block *sb)
439
{
440
	ext4_fsblk_t group_first_block, group_last_block;
441

442 443
	group_first_block = ext4_group_first_block_no(sb, group);
	group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462

	if ((rsv->_rsv_start > group_last_block) ||
	    (rsv->_rsv_end < group_first_block))
		return 0;
	if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
		|| (grp_goal + group_first_block > rsv->_rsv_end)))
		return 0;
	return 1;
}

/**
 * search_reserve_window()
 * @rb_root:		root of reservation tree
 * @goal:		target allocation block
 *
 * Find the reserved window which includes the goal, or the previous one
 * if the goal is not in any window.
 * Returns NULL if there are no windows or if all windows start after the goal.
 */
463 464
static struct ext4_reserve_window_node *
search_reserve_window(struct rb_root *root, ext4_fsblk_t goal)
465 466
{
	struct rb_node *n = root->rb_node;
467
	struct ext4_reserve_window_node *rsv;
468 469 470 471 472

	if (!n)
		return NULL;

	do {
473
		rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489

		if (goal < rsv->rsv_start)
			n = n->rb_left;
		else if (goal > rsv->rsv_end)
			n = n->rb_right;
		else
			return rsv;
	} while (n);
	/*
	 * We've fallen off the end of the tree: the goal wasn't inside
	 * any particular node.  OK, the previous node must be to one
	 * side of the interval containing the goal.  If it's the RHS,
	 * we need to back up one.
	 */
	if (rsv->rsv_start > goal) {
		n = rb_prev(&rsv->rsv_node);
490
		rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
491 492 493 494 495
	}
	return rsv;
}

/**
496
 * ext4_rsv_window_add() -- Insert a window to the block reservation rb tree.
497 498 499 500 501
 * @sb:			super block
 * @rsv:		reservation window to add
 *
 * Must be called with rsv_lock hold.
 */
502 503
void ext4_rsv_window_add(struct super_block *sb,
		    struct ext4_reserve_window_node *rsv)
504
{
505
	struct rb_root *root = &EXT4_SB(sb)->s_rsv_window_root;
506
	struct rb_node *node = &rsv->rsv_node;
507
	ext4_fsblk_t start = rsv->rsv_start;
508 509 510

	struct rb_node ** p = &root->rb_node;
	struct rb_node * parent = NULL;
511
	struct ext4_reserve_window_node *this;
512 513 514 515

	while (*p)
	{
		parent = *p;
516
		this = rb_entry(parent, struct ext4_reserve_window_node, rsv_node);
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532

		if (start < this->rsv_start)
			p = &(*p)->rb_left;
		else if (start > this->rsv_end)
			p = &(*p)->rb_right;
		else {
			rsv_window_dump(root, 1);
			BUG();
		}
	}

	rb_link_node(node, parent, p);
	rb_insert_color(node, root);
}

/**
533
 * ext4_rsv_window_remove() -- unlink a window from the reservation rb tree
534 535 536 537 538 539 540 541
 * @sb:			super block
 * @rsv:		reservation window to remove
 *
 * Mark the block reservation window as not allocated, and unlink it
 * from the filesystem reservation window rb tree. Must be called with
 * rsv_lock hold.
 */
static void rsv_window_remove(struct super_block *sb,
542
			      struct ext4_reserve_window_node *rsv)
543
{
544 545
	rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
	rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
546
	rsv->rsv_alloc_hit = 0;
547
	rb_erase(&rsv->rsv_node, &EXT4_SB(sb)->s_rsv_window_root);
548 549 550 551 552 553
}

/*
 * rsv_is_empty() -- Check if the reservation window is allocated.
 * @rsv:		given reservation window to check
 *
554
 * returns 1 if the end block is EXT4_RESERVE_WINDOW_NOT_ALLOCATED.
555
 */
556
static inline int rsv_is_empty(struct ext4_reserve_window *rsv)
557 558
{
	/* a valid reservation end block could not be 0 */
559
	return rsv->_rsv_end == EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
560 561 562
}

/**
563
 * ext4_init_block_alloc_info()
564 565 566
 * @inode:		file inode structure
 *
 * Allocate and initialize the	reservation window structure, and
567
 * link the window to the ext4 inode structure at last
568 569
 *
 * The reservation window structure is only dynamically allocated
570 571
 * and linked to ext4 inode the first time the open file
 * needs a new block. So, before every ext4_new_block(s) call, for
572 573 574 575 576
 * regular files, we should check whether the reservation window
 * structure exists or not. In the latter case, this function is called.
 * Fail to do so will result in block reservation being turned off for that
 * open file.
 *
577
 * This function is called from ext4_get_blocks_handle(), also called
578 579 580
 * when setting the reservation window size through ioctl before the file
 * is open for write (needs block allocation).
 *
581
 * Needs down_write(i_data_sem) protection prior to call this function.
582
 */
583
void ext4_init_block_alloc_info(struct inode *inode)
584
{
585 586
	struct ext4_inode_info *ei = EXT4_I(inode);
	struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
587 588 589 590
	struct super_block *sb = inode->i_sb;

	block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
	if (block_i) {
591
		struct ext4_reserve_window_node *rsv = &block_i->rsv_window_node;
592

593 594
		rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
		rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
595 596 597 598 599 600 601 602 603

		/*
		 * if filesystem is mounted with NORESERVATION, the goal
		 * reservation window size is set to zero to indicate
		 * block reservation is off
		 */
		if (!test_opt(sb, RESERVATION))
			rsv->rsv_goal_size = 0;
		else
604
			rsv->rsv_goal_size = EXT4_DEFAULT_RESERVE_BLOCKS;
605 606 607 608 609 610 611 612
		rsv->rsv_alloc_hit = 0;
		block_i->last_alloc_logical_block = 0;
		block_i->last_alloc_physical_block = 0;
	}
	ei->i_block_alloc_info = block_i;
}

/**
613
 * ext4_discard_reservation()
614 615 616 617 618 619
 * @inode:		inode
 *
 * Discard(free) block reservation window on last file close, or truncate
 * or at last iput().
 *
 * It is being called in three cases:
620 621 622
 *	ext4_release_file(): last writer close the file
 *	ext4_clear_inode(): last iput(), when nobody link to this file.
 *	ext4_truncate(): when the block indirect map is about to change.
623 624
 *
 */
625
void ext4_discard_reservation(struct inode *inode)
626
{
627 628 629 630
	struct ext4_inode_info *ei = EXT4_I(inode);
	struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
	struct ext4_reserve_window_node *rsv;
	spinlock_t *rsv_lock = &EXT4_SB(inode->i_sb)->s_rsv_window_lock;
631

632 633
	ext4_mb_discard_inode_preallocations(inode);

634 635 636 637 638 639 640 641 642 643 644 645 646
	if (!block_i)
		return;

	rsv = &block_i->rsv_window_node;
	if (!rsv_is_empty(&rsv->rsv_window)) {
		spin_lock(rsv_lock);
		if (!rsv_is_empty(&rsv->rsv_window))
			rsv_window_remove(inode->i_sb, rsv);
		spin_unlock(rsv_lock);
	}
}

/**
647
 * ext4_free_blocks_sb() -- Free given blocks and update quota
648 649 650 651 652 653
 * @handle:			handle to this transaction
 * @sb:				super block
 * @block:			start physcial block to free
 * @count:			number of blocks to free
 * @pdquot_freed_blocks:	pointer to quota
 */
654 655
void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
			 ext4_fsblk_t block, unsigned long count,
656 657 658 659
			 unsigned long *pdquot_freed_blocks)
{
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *gd_bh;
660
	ext4_group_t block_group;
661
	ext4_grpblk_t bit;
662 663
	unsigned long i;
	unsigned long overflow;
664 665 666
	struct ext4_group_desc * desc;
	struct ext4_super_block * es;
	struct ext4_sb_info *sbi;
667
	int err = 0, ret;
668
	ext4_grpblk_t group_freed;
669 670

	*pdquot_freed_blocks = 0;
671
	sbi = EXT4_SB(sb);
672 673 674
	es = sbi->s_es;
	if (block < le32_to_cpu(es->s_first_data_block) ||
	    block + count < block ||
L
Laurent Vivier 已提交
675
	    block + count > ext4_blocks_count(es)) {
676
		ext4_error (sb, "ext4_free_blocks",
677
			    "Freeing blocks not in datazone - "
678
			    "block = %llu, count = %lu", block, count);
679 680 681
		goto error_return;
	}

L
Laurent Vivier 已提交
682
	ext4_debug ("freeing block(s) %llu-%llu\n", block, block + count - 1);
683 684 685

do_more:
	overflow = 0;
686
	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
687 688 689 690
	/*
	 * Check to see if we are freeing blocks across a group
	 * boundary.
	 */
691 692
	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
		overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
693 694 695
		count -= overflow;
	}
	brelse(bitmap_bh);
696
	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
697 698
	if (!bitmap_bh)
		goto error_return;
699
	desc = ext4_get_group_desc (sb, block_group, &gd_bh);
700 701 702
	if (!desc)
		goto error_return;

703 704 705 706
	if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
	    in_range(ext4_inode_bitmap(sb, desc), block, count) ||
	    in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
	    in_range(block + count - 1, ext4_inode_table(sb, desc),
707
		     sbi->s_itb_per_group)) {
708
		ext4_error (sb, "ext4_free_blocks",
709
			    "Freeing blocks in system zones - "
710
			    "Block = %llu, count = %lu",
711
			    block, count);
712 713
		goto error_return;
	}
714 715 716 717 718 719 720

	/*
	 * We are about to start releasing blocks in the bitmap,
	 * so we need undo access.
	 */
	/* @@@ check errors */
	BUFFER_TRACE(bitmap_bh, "getting undo access");
721
	err = ext4_journal_get_undo_access(handle, bitmap_bh);
722 723 724 725 726 727 728 729 730
	if (err)
		goto error_return;

	/*
	 * We are about to modify some metadata.  Call the journal APIs
	 * to unshare ->b_data if a currently-committing transaction is
	 * using it
	 */
	BUFFER_TRACE(gd_bh, "get_write_access");
731
	err = ext4_journal_get_write_access(handle, gd_bh);
732 733 734 735 736 737 738 739 740
	if (err)
		goto error_return;

	jbd_lock_bh_state(bitmap_bh);

	for (i = 0, group_freed = 0; i < count; i++) {
		/*
		 * An HJ special.  This is expensive...
		 */
741
#ifdef CONFIG_JBD2_DEBUG
742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766
		jbd_unlock_bh_state(bitmap_bh);
		{
			struct buffer_head *debug_bh;
			debug_bh = sb_find_get_block(sb, block + i);
			if (debug_bh) {
				BUFFER_TRACE(debug_bh, "Deleted!");
				if (!bh2jh(bitmap_bh)->b_committed_data)
					BUFFER_TRACE(debug_bh,
						"No commited data in bitmap");
				BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
				__brelse(debug_bh);
			}
		}
		jbd_lock_bh_state(bitmap_bh);
#endif
		if (need_resched()) {
			jbd_unlock_bh_state(bitmap_bh);
			cond_resched();
			jbd_lock_bh_state(bitmap_bh);
		}
		/* @@@ This prevents newly-allocated data from being
		 * freed and then reallocated within the same
		 * transaction.
		 *
		 * Ideally we would want to allow that to happen, but to
767
		 * do so requires making jbd2_journal_forget() capable of
768 769 770 771
		 * revoking the queued write of a data block, which
		 * implies blocking on the journal lock.  *forget()
		 * cannot block due to truncate races.
		 *
772
		 * Eventually we can fix this by making jbd2_journal_forget()
773 774 775 776 777 778 779 780 781 782
		 * return a status indicating whether or not it was able
		 * to revoke the buffer.  On successful revoke, it is
		 * safe not to set the allocation bit in the committed
		 * bitmap, because we know that there is no outstanding
		 * activity on the buffer any more and so it is safe to
		 * reallocate it.
		 */
		BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
		J_ASSERT_BH(bitmap_bh,
				bh2jh(bitmap_bh)->b_committed_data != NULL);
783
		ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
784 785 786 787 788 789 790 791
				bh2jh(bitmap_bh)->b_committed_data);

		/*
		 * We clear the bit in the bitmap after setting the committed
		 * data bit, because this is the reverse order to that which
		 * the allocator uses.
		 */
		BUFFER_TRACE(bitmap_bh, "clear bit");
792
		if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
793 794
						bit + i, bitmap_bh->b_data)) {
			jbd_unlock_bh_state(bitmap_bh);
795
			ext4_error(sb, __func__,
796
				   "bit already cleared for block %llu",
L
Laurent Vivier 已提交
797
				   (ext4_fsblk_t)(block + i));
798 799 800 801 802 803 804 805 806
			jbd_lock_bh_state(bitmap_bh);
			BUFFER_TRACE(bitmap_bh, "bit already cleared");
		} else {
			group_freed++;
		}
	}
	jbd_unlock_bh_state(bitmap_bh);

	spin_lock(sb_bgl_lock(sbi, block_group));
M
Marcin Slusarz 已提交
807
	le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
A
Andreas Dilger 已提交
808
	desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
809
	spin_unlock(sb_bgl_lock(sbi, block_group));
P
Peter Zijlstra 已提交
810
	percpu_counter_add(&sbi->s_freeblocks_counter, count);
811

812 813 814 815 816 817 818
	if (sbi->s_log_groups_per_flex) {
		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
		spin_lock(sb_bgl_lock(sbi, flex_group));
		sbi->s_flex_groups[flex_group].free_blocks += count;
		spin_unlock(sb_bgl_lock(sbi, flex_group));
	}

819 820
	/* We dirtied the bitmap block */
	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
821
	err = ext4_journal_dirty_metadata(handle, bitmap_bh);
822 823 824

	/* And the group descriptor block */
	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
825
	ret = ext4_journal_dirty_metadata(handle, gd_bh);
826 827 828 829 830 831 832 833 834 835 836
	if (!err) err = ret;
	*pdquot_freed_blocks += group_freed;

	if (overflow && !err) {
		block += count;
		count = overflow;
		goto do_more;
	}
	sb->s_dirt = 1;
error_return:
	brelse(bitmap_bh);
837
	ext4_std_error(sb, err);
838 839 840 841
	return;
}

/**
842
 * ext4_free_blocks() -- Free given blocks and update quota
843 844 845 846
 * @handle:		handle for this transaction
 * @inode:		inode
 * @block:		start physical block to free
 * @count:		number of blocks to count
847
 * @metadata: 		Are these metadata blocks
848
 */
849
void ext4_free_blocks(handle_t *handle, struct inode *inode,
850 851
			ext4_fsblk_t block, unsigned long count,
			int metadata)
852 853 854 855
{
	struct super_block * sb;
	unsigned long dquot_freed_blocks;

856 857 858 859 860 861
	/* this isn't the right place to decide whether block is metadata
	 * inode.c/extents.c knows better, but for safety ... */
	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
			ext4_should_journal_data(inode))
		metadata = 1;

862
	sb = inode->i_sb;
863 864 865 866 867 868 869

	if (!test_opt(sb, MBALLOC) || !EXT4_SB(sb)->s_group_info)
		ext4_free_blocks_sb(handle, sb, block, count,
						&dquot_freed_blocks);
	else
		ext4_mb_free_blocks(handle, inode, block, count,
						metadata, &dquot_freed_blocks);
870 871 872 873 874 875
	if (dquot_freed_blocks)
		DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
	return;
}

/**
876
 * ext4_test_allocatable()
877 878 879
 * @nr:			given allocation block group
 * @bh:			bufferhead contains the bitmap of the given block group
 *
880
 * For ext4 allocations, we must not reuse any blocks which are
881 882 883 884 885 886 887 888 889 890 891 892 893 894
 * allocated in the bitmap buffer's "last committed data" copy.  This
 * prevents deletes from freeing up the page for reuse until we have
 * committed the delete transaction.
 *
 * If we didn't do this, then deleting something and reallocating it as
 * data would allow the old block to be overwritten before the
 * transaction committed (because we force data to disk before commit).
 * This would lead to corruption if we crashed between overwriting the
 * data and committing the delete.
 *
 * @@@ We may want to make this allocation behaviour conditional on
 * data-writes at some point, and disable it for metadata allocations or
 * sync-data inodes.
 */
895
static int ext4_test_allocatable(ext4_grpblk_t nr, struct buffer_head *bh)
896 897 898 899
{
	int ret;
	struct journal_head *jh = bh2jh(bh);

900
	if (ext4_test_bit(nr, bh->b_data))
901 902 903 904 905 906
		return 0;

	jbd_lock_bh_state(bh);
	if (!jh->b_committed_data)
		ret = 1;
	else
907
		ret = !ext4_test_bit(nr, jh->b_committed_data);
908 909 910 911 912 913 914 915 916 917 918 919 920 921
	jbd_unlock_bh_state(bh);
	return ret;
}

/**
 * bitmap_search_next_usable_block()
 * @start:		the starting block (group relative) of the search
 * @bh:			bufferhead contains the block group bitmap
 * @maxblocks:		the ending block (group relative) of the reservation
 *
 * The bitmap search --- search forward alternately through the actual
 * bitmap on disk and the last-committed copy in journal, until we find a
 * bit free in both bitmaps.
 */
922 923 924
static ext4_grpblk_t
bitmap_search_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
					ext4_grpblk_t maxblocks)
925
{
926
	ext4_grpblk_t next;
927 928 929
	struct journal_head *jh = bh2jh(bh);

	while (start < maxblocks) {
930
		next = ext4_find_next_zero_bit(bh->b_data, maxblocks, start);
931 932
		if (next >= maxblocks)
			return -1;
933
		if (ext4_test_allocatable(next, bh))
934 935 936
			return next;
		jbd_lock_bh_state(bh);
		if (jh->b_committed_data)
937
			start = ext4_find_next_zero_bit(jh->b_committed_data,
938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
							maxblocks, next);
		jbd_unlock_bh_state(bh);
	}
	return -1;
}

/**
 * find_next_usable_block()
 * @start:		the starting block (group relative) to find next
 *			allocatable block in bitmap.
 * @bh:			bufferhead contains the block group bitmap
 * @maxblocks:		the ending block (group relative) for the search
 *
 * Find an allocatable block in a bitmap.  We honor both the bitmap and
 * its last-committed copy (if that exists), and perform the "most
 * appropriate allocation" algorithm of looking for a free block near
 * the initial goal; then for a free byte somewhere in the bitmap; then
 * for any free bit in the bitmap.
 */
957 958 959
static ext4_grpblk_t
find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
			ext4_grpblk_t maxblocks)
960
{
961
	ext4_grpblk_t here, next;
962 963 964 965 966 967 968 969
	char *p, *r;

	if (start > 0) {
		/*
		 * The goal was occupied; search forward for a free
		 * block within the next XX blocks.
		 *
		 * end_goal is more or less random, but it has to be
970
		 * less than EXT4_BLOCKS_PER_GROUP. Aligning up to the
971 972
		 * next 64-bit boundary is simple..
		 */
973
		ext4_grpblk_t end_goal = (start + 63) & ~63;
974 975
		if (end_goal > maxblocks)
			end_goal = maxblocks;
976 977
		here = ext4_find_next_zero_bit(bh->b_data, end_goal, start);
		if (here < end_goal && ext4_test_allocatable(here, bh))
978
			return here;
979
		ext4_debug("Bit not found near goal\n");
980 981 982 983 984 985 986
	}

	here = start;
	if (here < 0)
		here = 0;

	p = ((char *)bh->b_data) + (here >> 3);
987
	r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
988 989
	next = (r - ((char *)bh->b_data)) << 3;

990
	if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh))
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
		return next;

	/*
	 * The bitmap search --- search forward alternately through the actual
	 * bitmap and the last-committed copy until we find a bit free in
	 * both
	 */
	here = bitmap_search_next_usable_block(here, bh, maxblocks);
	return here;
}

/**
 * claim_block()
 * @block:		the free block (group relative) to allocate
 * @bh:			the bufferhead containts the block group bitmap
 *
 * We think we can allocate this block in this bitmap.  Try to set the bit.
 * If that succeeds then check that nobody has allocated and then freed the
 * block since we saw that is was not marked in b_committed_data.  If it _was_
 * allocated and freed then clear the bit in the bitmap again and return
 * zero (failure).
 */
static inline int
1014
claim_block(spinlock_t *lock, ext4_grpblk_t block, struct buffer_head *bh)
1015 1016 1017 1018
{
	struct journal_head *jh = bh2jh(bh);
	int ret;

1019
	if (ext4_set_bit_atomic(lock, block, bh->b_data))
1020 1021
		return 0;
	jbd_lock_bh_state(bh);
1022 1023
	if (jh->b_committed_data && ext4_test_bit(block,jh->b_committed_data)) {
		ext4_clear_bit_atomic(lock, block, bh->b_data);
1024 1025 1026 1027 1028 1029 1030 1031 1032
		ret = 0;
	} else {
		ret = 1;
	}
	jbd_unlock_bh_state(bh);
	return ret;
}

/**
1033
 * ext4_try_to_allocate()
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
 * @sb:			superblock
 * @handle:		handle to this transaction
 * @group:		given allocation block group
 * @bitmap_bh:		bufferhead holds the block bitmap
 * @grp_goal:		given target block within the group
 * @count:		target number of blocks to allocate
 * @my_rsv:		reservation window
 *
 * Attempt to allocate blocks within a give range. Set the range of allocation
 * first, then find the first free bit(s) from the bitmap (within the range),
 * and at last, allocate the blocks by claiming the found free bit as allocated.
 *
 * To set the range of this allocation:
 *	if there is a reservation window, only try to allocate block(s) from the
 *	file's own reservation window;
 *	Otherwise, the allocation range starts from the give goal block, ends at
 *	the block group's last block.
 *
 * If we failed to allocate the desired block then we may end up crossing to a
 * new bitmap.  In that case we must release write access to the old one via
1054
 * ext4_journal_release_buffer(), else we'll run out of credits.
1055
 */
1056
static ext4_grpblk_t
1057 1058 1059 1060
ext4_try_to_allocate(struct super_block *sb, handle_t *handle,
			ext4_group_t group, struct buffer_head *bitmap_bh,
			ext4_grpblk_t grp_goal, unsigned long *count,
			struct ext4_reserve_window *my_rsv)
1061
{
1062 1063
	ext4_fsblk_t group_first_block;
	ext4_grpblk_t start, end;
1064 1065 1066 1067
	unsigned long num = 0;

	/* we do allocation within the reservation window if we have a window */
	if (my_rsv) {
1068
		group_first_block = ext4_group_first_block_no(sb, group);
1069 1070 1071 1072 1073 1074
		if (my_rsv->_rsv_start >= group_first_block)
			start = my_rsv->_rsv_start - group_first_block;
		else
			/* reservation window cross group boundary */
			start = 0;
		end = my_rsv->_rsv_end - group_first_block + 1;
1075
		if (end > EXT4_BLOCKS_PER_GROUP(sb))
1076
			/* reservation window crosses group boundary */
1077
			end = EXT4_BLOCKS_PER_GROUP(sb);
1078 1079 1080 1081 1082 1083 1084 1085 1086
		if ((start <= grp_goal) && (grp_goal < end))
			start = grp_goal;
		else
			grp_goal = -1;
	} else {
		if (grp_goal > 0)
			start = grp_goal;
		else
			start = 0;
1087
		end = EXT4_BLOCKS_PER_GROUP(sb);
1088 1089
	}

1090
	BUG_ON(start > EXT4_BLOCKS_PER_GROUP(sb));
1091 1092

repeat:
1093
	if (grp_goal < 0 || !ext4_test_allocatable(grp_goal, bitmap_bh)) {
1094 1095 1096 1097 1098 1099 1100
		grp_goal = find_next_usable_block(start, bitmap_bh, end);
		if (grp_goal < 0)
			goto fail_access;
		if (!my_rsv) {
			int i;

			for (i = 0; i < 7 && grp_goal > start &&
1101
					ext4_test_allocatable(grp_goal - 1,
1102 1103 1104 1105 1106 1107 1108
								bitmap_bh);
					i++, grp_goal--)
				;
		}
	}
	start = grp_goal;

1109
	if (!claim_block(sb_bgl_lock(EXT4_SB(sb), group),
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123
		grp_goal, bitmap_bh)) {
		/*
		 * The block was allocated by another thread, or it was
		 * allocated and then freed by another thread
		 */
		start++;
		grp_goal++;
		if (start >= end)
			goto fail_access;
		goto repeat;
	}
	num++;
	grp_goal++;
	while (num < *count && grp_goal < end
1124 1125
		&& ext4_test_allocatable(grp_goal, bitmap_bh)
		&& claim_block(sb_bgl_lock(EXT4_SB(sb), group),
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
				grp_goal, bitmap_bh)) {
		num++;
		grp_goal++;
	}
	*count = num;
	return grp_goal - num;
fail_access:
	*count = num;
	return -1;
}

/**
 *	find_next_reservable_window():
 *		find a reservable space within the given range.
 *		It does not allocate the reservation window for now:
 *		alloc_new_reservation() will do the work later.
 *
 *	@search_head: the head of the searching list;
 *		This is not necessarily the list head of the whole filesystem
 *
 *		We have both head and start_block to assist the search
 *		for the reservable space. The list starts from head,
 *		but we will shift to the place where start_block is,
 *		then start from there, when looking for a reservable space.
 *
 *	@size: the target new reservation window size
 *
 *	@group_first_block: the first block we consider to start
 *			the real search from
 *
 *	@last_block:
 *		the maximum block number that our goal reservable space
 *		could start from. This is normally the last block in this
 *		group. The search will end when we found the start of next
 *		possible reservable space is out of this boundary.
 *		This could handle the cross boundary reservation window
 *		request.
 *
 *	basically we search from the given range, rather than the whole
 *	reservation double linked list, (start_block, last_block)
 *	to find a free region that is of my size and has not
 *	been reserved.
 *
 */
static int find_next_reservable_window(
1171 1172
				struct ext4_reserve_window_node *search_head,
				struct ext4_reserve_window_node *my_rsv,
1173
				struct super_block * sb,
1174 1175
				ext4_fsblk_t start_block,
				ext4_fsblk_t last_block)
1176 1177
{
	struct rb_node *next;
1178 1179
	struct ext4_reserve_window_node *rsv, *prev;
	ext4_fsblk_t cur;
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
	int size = my_rsv->rsv_goal_size;

	/* TODO: make the start of the reservation window byte-aligned */
	/* cur = *start_block & ~7;*/
	cur = start_block;
	rsv = search_head;
	if (!rsv)
		return -1;

	while (1) {
		if (cur <= rsv->rsv_end)
			cur = rsv->rsv_end + 1;

		/* TODO?
		 * in the case we could not find a reservable space
		 * that is what is expected, during the re-search, we could
		 * remember what's the largest reservable space we could have
		 * and return that one.
		 *
		 * For now it will fail if we could not find the reservable
		 * space with expected-size (or more)...
		 */
		if (cur > last_block)
			return -1;		/* fail */

		prev = rsv;
		next = rb_next(&rsv->rsv_node);
1207
		rsv = rb_entry(next,struct ext4_reserve_window_node,rsv_node);
1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249

		/*
		 * Reached the last reservation, we can just append to the
		 * previous one.
		 */
		if (!next)
			break;

		if (cur + size <= rsv->rsv_start) {
			/*
			 * Found a reserveable space big enough.  We could
			 * have a reservation across the group boundary here
			 */
			break;
		}
	}
	/*
	 * we come here either :
	 * when we reach the end of the whole list,
	 * and there is empty reservable space after last entry in the list.
	 * append it to the end of the list.
	 *
	 * or we found one reservable space in the middle of the list,
	 * return the reservation window that we could append to.
	 * succeed.
	 */

	if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
		rsv_window_remove(sb, my_rsv);

	/*
	 * Let's book the whole avaliable window for now.  We will check the
	 * disk bitmap later and then, if there are free blocks then we adjust
	 * the window size if it's larger than requested.
	 * Otherwise, we will remove this node from the tree next time
	 * call find_next_reservable_window.
	 */
	my_rsv->rsv_start = cur;
	my_rsv->rsv_end = cur + size - 1;
	my_rsv->rsv_alloc_hit = 0;

	if (prev != my_rsv)
1250
		ext4_rsv_window_add(sb, my_rsv);
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291

	return 0;
}

/**
 *	alloc_new_reservation()--allocate a new reservation window
 *
 *		To make a new reservation, we search part of the filesystem
 *		reservation list (the list that inside the group). We try to
 *		allocate a new reservation window near the allocation goal,
 *		or the beginning of the group, if there is no goal.
 *
 *		We first find a reservable space after the goal, then from
 *		there, we check the bitmap for the first free block after
 *		it. If there is no free block until the end of group, then the
 *		whole group is full, we failed. Otherwise, check if the free
 *		block is inside the expected reservable space, if so, we
 *		succeed.
 *		If the first free block is outside the reservable space, then
 *		start from the first free block, we search for next available
 *		space, and go on.
 *
 *	on succeed, a new reservation will be found and inserted into the list
 *	It contains at least one free block, and it does not overlap with other
 *	reservation windows.
 *
 *	failed: we failed to find a reservation window in this group
 *
 *	@rsv: the reservation
 *
 *	@grp_goal: The goal (group-relative).  It is where the search for a
 *		free reservable space should start from.
 *		if we have a grp_goal(grp_goal >0 ), then start from there,
 *		no grp_goal(grp_goal = -1), we start from the first block
 *		of the group.
 *
 *	@sb: the super block
 *	@group: the group we are trying to allocate in
 *	@bitmap_bh: the block group block bitmap
 *
 */
1292 1293
static int alloc_new_reservation(struct ext4_reserve_window_node *my_rsv,
		ext4_grpblk_t grp_goal, struct super_block *sb,
1294
		ext4_group_t group, struct buffer_head *bitmap_bh)
1295
{
1296 1297 1298 1299
	struct ext4_reserve_window_node *search_head;
	ext4_fsblk_t group_first_block, group_end_block, start_block;
	ext4_grpblk_t first_free_block;
	struct rb_root *fs_rsv_root = &EXT4_SB(sb)->s_rsv_window_root;
1300 1301
	unsigned long size;
	int ret;
1302
	spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
1303

1304 1305
	group_first_block = ext4_group_first_block_no(sb, group);
	group_end_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342

	if (grp_goal < 0)
		start_block = group_first_block;
	else
		start_block = grp_goal + group_first_block;

	size = my_rsv->rsv_goal_size;

	if (!rsv_is_empty(&my_rsv->rsv_window)) {
		/*
		 * if the old reservation is cross group boundary
		 * and if the goal is inside the old reservation window,
		 * we will come here when we just failed to allocate from
		 * the first part of the window. We still have another part
		 * that belongs to the next group. In this case, there is no
		 * point to discard our window and try to allocate a new one
		 * in this group(which will fail). we should
		 * keep the reservation window, just simply move on.
		 *
		 * Maybe we could shift the start block of the reservation
		 * window to the first block of next group.
		 */

		if ((my_rsv->rsv_start <= group_end_block) &&
				(my_rsv->rsv_end > group_end_block) &&
				(start_block >= my_rsv->rsv_start))
			return -1;

		if ((my_rsv->rsv_alloc_hit >
		     (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
			/*
			 * if the previously allocation hit ratio is
			 * greater than 1/2, then we double the size of
			 * the reservation window the next time,
			 * otherwise we keep the same size window
			 */
			size = size * 2;
1343 1344
			if (size > EXT4_MAX_RESERVE_BLOCKS)
				size = EXT4_MAX_RESERVE_BLOCKS;
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405
			my_rsv->rsv_goal_size= size;
		}
	}

	spin_lock(rsv_lock);
	/*
	 * shift the search start to the window near the goal block
	 */
	search_head = search_reserve_window(fs_rsv_root, start_block);

	/*
	 * find_next_reservable_window() simply finds a reservable window
	 * inside the given range(start_block, group_end_block).
	 *
	 * To make sure the reservation window has a free bit inside it, we
	 * need to check the bitmap after we found a reservable window.
	 */
retry:
	ret = find_next_reservable_window(search_head, my_rsv, sb,
						start_block, group_end_block);

	if (ret == -1) {
		if (!rsv_is_empty(&my_rsv->rsv_window))
			rsv_window_remove(sb, my_rsv);
		spin_unlock(rsv_lock);
		return -1;
	}

	/*
	 * On success, find_next_reservable_window() returns the
	 * reservation window where there is a reservable space after it.
	 * Before we reserve this reservable space, we need
	 * to make sure there is at least a free block inside this region.
	 *
	 * searching the first free bit on the block bitmap and copy of
	 * last committed bitmap alternatively, until we found a allocatable
	 * block. Search start from the start block of the reservable space
	 * we just found.
	 */
	spin_unlock(rsv_lock);
	first_free_block = bitmap_search_next_usable_block(
			my_rsv->rsv_start - group_first_block,
			bitmap_bh, group_end_block - group_first_block + 1);

	if (first_free_block < 0) {
		/*
		 * no free block left on the bitmap, no point
		 * to reserve the space. return failed.
		 */
		spin_lock(rsv_lock);
		if (!rsv_is_empty(&my_rsv->rsv_window))
			rsv_window_remove(sb, my_rsv);
		spin_unlock(rsv_lock);
		return -1;		/* failed */
	}

	start_block = first_free_block + group_first_block;
	/*
	 * check if the first free block is within the
	 * free space we just reserved
	 */
1406
	if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
		return 0;		/* success */
	/*
	 * if the first free bit we found is out of the reservable space
	 * continue search for next reservable space,
	 * start from where the free block is,
	 * we also shift the list head to where we stopped last time
	 */
	search_head = my_rsv;
	spin_lock(rsv_lock);
	goto retry;
}

/**
 * try_to_extend_reservation()
 * @my_rsv:		given reservation window
 * @sb:			super block
 * @size:		the delta to extend
 *
 * Attempt to expand the reservation window large enough to have
 * required number of free blocks
 *
1428
 * Since ext4_try_to_allocate() will always allocate blocks within
1429 1430 1431 1432 1433
 * the reservation window range, if the window size is too small,
 * multiple blocks allocation has to stop at the end of the reservation
 * window. To make this more efficient, given the total number of
 * blocks needed and the current size of the window, we try to
 * expand the reservation window size if necessary on a best-effort
1434
 * basis before ext4_new_blocks() tries to allocate blocks,
1435
 */
1436
static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv,
1437 1438
			struct super_block *sb, int size)
{
1439
	struct ext4_reserve_window_node *next_rsv;
1440
	struct rb_node *next;
1441
	spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
1442 1443 1444 1445 1446 1447 1448 1449 1450

	if (!spin_trylock(rsv_lock))
		return;

	next = rb_next(&my_rsv->rsv_node);

	if (!next)
		my_rsv->rsv_end += size;
	else {
1451
		next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node);
1452 1453 1454 1455 1456 1457 1458 1459 1460 1461

		if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
			my_rsv->rsv_end += size;
		else
			my_rsv->rsv_end = next_rsv->rsv_start - 1;
	}
	spin_unlock(rsv_lock);
}

/**
1462
 * ext4_try_to_allocate_with_rsv()
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489
 * @sb:			superblock
 * @handle:		handle to this transaction
 * @group:		given allocation block group
 * @bitmap_bh:		bufferhead holds the block bitmap
 * @grp_goal:		given target block within the group
 * @count:		target number of blocks to allocate
 * @my_rsv:		reservation window
 * @errp:		pointer to store the error code
 *
 * This is the main function used to allocate a new block and its reservation
 * window.
 *
 * Each time when a new block allocation is need, first try to allocate from
 * its own reservation.  If it does not have a reservation window, instead of
 * looking for a free bit on bitmap first, then look up the reservation list to
 * see if it is inside somebody else's reservation window, we try to allocate a
 * reservation window for it starting from the goal first. Then do the block
 * allocation within the reservation window.
 *
 * This will avoid keeping on searching the reservation list again and
 * again when somebody is looking for a free block (without
 * reservation), and there are lots of free blocks, but they are all
 * being reserved.
 *
 * We use a red-black tree for the per-filesystem reservation list.
 *
 */
1490 1491
static ext4_grpblk_t
ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1492
			ext4_group_t group, struct buffer_head *bitmap_bh,
1493 1494
			ext4_grpblk_t grp_goal,
			struct ext4_reserve_window_node * my_rsv,
1495 1496
			unsigned long *count, int *errp)
{
1497 1498
	ext4_fsblk_t group_first_block, group_last_block;
	ext4_grpblk_t ret = 0;
1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
	int fatal;
	unsigned long num = *count;

	*errp = 0;

	/*
	 * Make sure we use undo access for the bitmap, because it is critical
	 * that we do the frozen_data COW on bitmap buffers in all cases even
	 * if the buffer is in BJ_Forget state in the committing transaction.
	 */
	BUFFER_TRACE(bitmap_bh, "get undo access for new block");
1510
	fatal = ext4_journal_get_undo_access(handle, bitmap_bh);
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522
	if (fatal) {
		*errp = fatal;
		return -1;
	}

	/*
	 * we don't deal with reservation when
	 * filesystem is mounted without reservation
	 * or the file is not a regular file
	 * or last attempt to allocate a block with reservation turned on failed
	 */
	if (my_rsv == NULL ) {
1523
		ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
1524 1525 1526 1527 1528
						grp_goal, count, NULL);
		goto out;
	}
	/*
	 * grp_goal is a group relative block number (if there is a goal)
1529
	 * 0 <= grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
1530 1531 1532
	 * first block is a filesystem wide block number
	 * first block is the block number of the first block in this group
	 */
1533 1534
	group_first_block = ext4_group_first_block_no(sb, group);
	group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564

	/*
	 * Basically we will allocate a new block from inode's reservation
	 * window.
	 *
	 * We need to allocate a new reservation window, if:
	 * a) inode does not have a reservation window; or
	 * b) last attempt to allocate a block from existing reservation
	 *    failed; or
	 * c) we come here with a goal and with a reservation window
	 *
	 * We do not need to allocate a new reservation window if we come here
	 * at the beginning with a goal and the goal is inside the window, or
	 * we don't have a goal but already have a reservation window.
	 * then we could go to allocate from the reservation window directly.
	 */
	while (1) {
		if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
			!goal_in_my_reservation(&my_rsv->rsv_window,
						grp_goal, group, sb)) {
			if (my_rsv->rsv_goal_size < *count)
				my_rsv->rsv_goal_size = *count;
			ret = alloc_new_reservation(my_rsv, grp_goal, sb,
							group, bitmap_bh);
			if (ret < 0)
				break;			/* failed */

			if (!goal_in_my_reservation(&my_rsv->rsv_window,
							grp_goal, group, sb))
				grp_goal = -1;
1565
		} else if (grp_goal >= 0) {
1566 1567 1568 1569 1570 1571 1572
			int curr = my_rsv->rsv_end -
					(grp_goal + group_first_block) + 1;

			if (curr < *count)
				try_to_extend_reservation(my_rsv, sb,
							*count - curr);
		}
1573 1574 1575

		if ((my_rsv->rsv_start > group_last_block) ||
				(my_rsv->rsv_end < group_first_block)) {
1576
			rsv_window_dump(&EXT4_SB(sb)->s_rsv_window_root, 1);
1577 1578
			BUG();
		}
1579
		ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591
					   grp_goal, &num, &my_rsv->rsv_window);
		if (ret >= 0) {
			my_rsv->rsv_alloc_hit += num;
			*count = num;
			break;				/* succeed */
		}
		num = *count;
	}
out:
	if (ret >= 0) {
		BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
					"bitmap block");
1592
		fatal = ext4_journal_dirty_metadata(handle, bitmap_bh);
1593 1594 1595 1596 1597 1598 1599 1600
		if (fatal) {
			*errp = fatal;
			return -1;
		}
		return ret;
	}

	BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
1601
	ext4_journal_release_buffer(handle, bitmap_bh);
1602 1603 1604 1605
	return ret;
}

/**
1606
 * ext4_has_free_blocks()
1607 1608 1609 1610
 * @sbi:		in-core super block structure.
 *
 * Check if filesystem has at least 1 free block available for allocation.
 */
1611
static int ext4_has_free_blocks(struct ext4_sb_info *sbi)
1612
{
1613
	ext4_fsblk_t free_blocks, root_blocks;
1614 1615

	free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
L
Laurent Vivier 已提交
1616
	root_blocks = ext4_r_blocks_count(sbi->s_es);
1617 1618 1619 1620 1621 1622 1623 1624 1625
	if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
		sbi->s_resuid != current->fsuid &&
		(sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
		return 0;
	}
	return 1;
}

/**
1626
 * ext4_should_retry_alloc()
1627 1628 1629
 * @sb:			super block
 * @retries		number of attemps has been made
 *
1630
 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
1631 1632 1633 1634 1635 1636
 * it is profitable to retry the operation, this function will wait
 * for the current or commiting transaction to complete, and then
 * return TRUE.
 *
 * if the total number of retries exceed three times, return FALSE.
 */
1637
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
1638
{
1639
	if (!ext4_has_free_blocks(EXT4_SB(sb)) || (*retries)++ > 3)
1640 1641 1642 1643
		return 0;

	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);

1644
	return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
1645 1646 1647
}

/**
1648
 * ext4_new_blocks_old() -- core block(s) allocation function
1649 1650 1651 1652 1653 1654
 * @handle:		handle to this transaction
 * @inode:		file inode
 * @goal:		given target block(filesystem wide)
 * @count:		target number of blocks to allocate
 * @errp:		error code
 *
1655
 * ext4_new_blocks uses a goal block to assist allocation.  It tries to
1656 1657 1658 1659 1660
 * allocate block(s) from the block group contains the goal block first. If that
 * fails, it will try to allocate block(s) from other block groups without
 * any specific goal block.
 *
 */
1661
ext4_fsblk_t ext4_new_blocks_old(handle_t *handle, struct inode *inode,
1662
			ext4_fsblk_t goal, unsigned long *count, int *errp)
1663 1664 1665
{
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *gdp_bh;
1666 1667
	ext4_group_t group_no;
	ext4_group_t goal_group;
1668 1669 1670
	ext4_grpblk_t grp_target_blk;	/* blockgroup relative goal block */
	ext4_grpblk_t grp_alloc_blk;	/* blockgroup-relative allocated block*/
	ext4_fsblk_t ret_block;		/* filesyetem-wide allocated block */
1671
	ext4_group_t bgi;			/* blockgroup iteration index */
1672 1673
	int fatal = 0, err;
	int performed_allocation = 0;
1674
	ext4_grpblk_t free_blocks;	/* number of free blocks in a group */
1675
	struct super_block *sb;
1676 1677 1678 1679 1680
	struct ext4_group_desc *gdp;
	struct ext4_super_block *es;
	struct ext4_sb_info *sbi;
	struct ext4_reserve_window_node *my_rsv = NULL;
	struct ext4_block_alloc_info *block_i;
1681
	unsigned short windowsz = 0;
1682
	ext4_group_t ngroups;
1683 1684 1685 1686 1687
	unsigned long num = *count;

	*errp = -ENOSPC;
	sb = inode->i_sb;
	if (!sb) {
1688
		printk("ext4_new_block: nonexistent device");
1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
		return 0;
	}

	/*
	 * Check quota for allocation of this block.
	 */
	if (DQUOT_ALLOC_BLOCK(inode, num)) {
		*errp = -EDQUOT;
		return 0;
	}

1700 1701
	sbi = EXT4_SB(sb);
	es = EXT4_SB(sb)->s_es;
E
Eric Sandeen 已提交
1702
	ext4_debug("goal=%llu.\n", goal);
1703 1704 1705 1706 1707
	/*
	 * Allocate a block from reservation only when
	 * filesystem is mounted with reservation(default,-o reservation), and
	 * it's a regular file, and
	 * the desired window size is greater than 0 (One could use ioctl
1708
	 * command EXT4_IOC_SETRSVSZ to set the window size to 0 to turn off
1709 1710
	 * reservation on that particular file)
	 */
1711
	block_i = EXT4_I(inode)->i_block_alloc_info;
1712 1713 1714
	if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
		my_rsv = &block_i->rsv_window_node;

1715
	if (!ext4_has_free_blocks(sbi)) {
1716 1717 1718 1719 1720 1721 1722 1723
		*errp = -ENOSPC;
		goto out;
	}

	/*
	 * First, test whether the goal block is free.
	 */
	if (goal < le32_to_cpu(es->s_first_data_block) ||
L
Laurent Vivier 已提交
1724
	    goal >= ext4_blocks_count(es))
1725
		goal = le32_to_cpu(es->s_first_data_block);
1726
	ext4_get_group_no_and_offset(sb, goal, &group_no, &grp_target_blk);
1727 1728
	goal_group = group_no;
retry_alloc:
1729
	gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742
	if (!gdp)
		goto io_error;

	free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
	/*
	 * if there is not enough free blocks to make a new resevation
	 * turn off reservation for this allocation
	 */
	if (my_rsv && (free_blocks < windowsz)
		&& (rsv_is_empty(&my_rsv->rsv_window)))
		my_rsv = NULL;

	if (free_blocks > 0) {
1743
		bitmap_bh = ext4_read_block_bitmap(sb, group_no);
1744 1745
		if (!bitmap_bh)
			goto io_error;
1746
		grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
1747 1748 1749 1750 1751 1752 1753 1754
					group_no, bitmap_bh, grp_target_blk,
					my_rsv,	&num, &fatal);
		if (fatal)
			goto out;
		if (grp_alloc_blk >= 0)
			goto allocated;
	}

1755
	ngroups = EXT4_SB(sb)->s_groups_count;
1756 1757 1758 1759
	smp_rmb();

	/*
	 * Now search the rest of the groups.  We assume that
1760
	 * group_no and gdp correctly point to the last group visited.
1761 1762 1763 1764 1765
	 */
	for (bgi = 0; bgi < ngroups; bgi++) {
		group_no++;
		if (group_no >= ngroups)
			group_no = 0;
1766
		gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
1767 1768
		if (!gdp)
			goto io_error;
1769 1770 1771 1772 1773 1774 1775 1776 1777 1778
		free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
		/*
		 * skip this group if the number of
		 * free blocks is less than half of the reservation
		 * window size.
		 */
		if (free_blocks <= (windowsz/2))
			continue;

		brelse(bitmap_bh);
1779
		bitmap_bh = ext4_read_block_bitmap(sb, group_no);
1780 1781 1782 1783 1784
		if (!bitmap_bh)
			goto io_error;
		/*
		 * try to allocate block(s) from this group, without a goal(-1).
		 */
1785
		grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
					group_no, bitmap_bh, -1, my_rsv,
					&num, &fatal);
		if (fatal)
			goto out;
		if (grp_alloc_blk >= 0)
			goto allocated;
	}
	/*
	 * We may end up a bogus ealier ENOSPC error due to
	 * filesystem is "full" of reservations, but
	 * there maybe indeed free blocks avaliable on disk
	 * In this case, we just forget about the reservations
	 * just do block allocation as without reservations.
	 */
	if (my_rsv) {
		my_rsv = NULL;
1802
		windowsz = 0;
1803 1804 1805 1806 1807 1808 1809 1810 1811
		group_no = goal_group;
		goto retry_alloc;
	}
	/* No space left on the device */
	*errp = -ENOSPC;
	goto out;

allocated:

E
Eric Sandeen 已提交
1812
	ext4_debug("using block group %lu(%d)\n",
1813 1814 1815
			group_no, gdp->bg_free_blocks_count);

	BUFFER_TRACE(gdp_bh, "get_write_access");
1816
	fatal = ext4_journal_get_write_access(handle, gdp_bh);
1817 1818 1819
	if (fatal)
		goto out;

1820
	ret_block = grp_alloc_blk + ext4_group_first_block_no(sb, group_no);
1821

1822
	if (in_range(ext4_block_bitmap(sb, gdp), ret_block, num) ||
1823
	    in_range(ext4_inode_bitmap(sb, gdp), ret_block, num) ||
1824
	    in_range(ret_block, ext4_inode_table(sb, gdp),
L
Laurent Vivier 已提交
1825
		     EXT4_SB(sb)->s_itb_per_group) ||
1826
	    in_range(ret_block + num - 1, ext4_inode_table(sb, gdp),
1827
		     EXT4_SB(sb)->s_itb_per_group)) {
1828
		ext4_error(sb, "ext4_new_block",
1829
			    "Allocating block in system zone - "
1830
			    "blocks from %llu, length %lu",
1831
			     ret_block, num);
1832 1833 1834 1835 1836 1837
		/*
		 * claim_block marked the blocks we allocated
		 * as in use. So we may want to selectively
		 * mark some of the blocks as free
		 */
		goto retry_alloc;
1838
	}
1839 1840 1841

	performed_allocation = 1;

1842
#ifdef CONFIG_JBD2_DEBUG
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859
	{
		struct buffer_head *debug_bh;

		/* Record bitmap buffer state in the newly allocated block */
		debug_bh = sb_find_get_block(sb, ret_block);
		if (debug_bh) {
			BUFFER_TRACE(debug_bh, "state when allocated");
			BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
			brelse(debug_bh);
		}
	}
	jbd_lock_bh_state(bitmap_bh);
	spin_lock(sb_bgl_lock(sbi, group_no));
	if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
		int i;

		for (i = 0; i < num; i++) {
1860
			if (ext4_test_bit(grp_alloc_blk+i,
1861 1862
					bh2jh(bitmap_bh)->b_committed_data)) {
				printk("%s: block was unexpectedly set in "
1863
					"b_committed_data\n", __func__);
1864 1865 1866
			}
		}
	}
1867
	ext4_debug("found bit %d\n", grp_alloc_blk);
1868 1869 1870 1871
	spin_unlock(sb_bgl_lock(sbi, group_no));
	jbd_unlock_bh_state(bitmap_bh);
#endif

L
Laurent Vivier 已提交
1872
	if (ret_block + num - 1 >= ext4_blocks_count(es)) {
1873
		ext4_error(sb, "ext4_new_block",
1874
			    "block(%llu) >= blocks count(%llu) - "
1875
			    "block_group = %lu, es == %p ", ret_block,
L
Laurent Vivier 已提交
1876
			ext4_blocks_count(es), group_no, es);
1877 1878 1879 1880 1881 1882 1883 1884 1885
		goto out;
	}

	/*
	 * It is up to the caller to add the new buffer to a journal
	 * list of some description.  We don't know in advance whether
	 * the caller wants to use it as metadata or data.
	 */
	spin_lock(sb_bgl_lock(sbi, group_no));
A
Andreas Dilger 已提交
1886 1887
	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
M
Marcin Slusarz 已提交
1888
	le16_add_cpu(&gdp->bg_free_blocks_count, -num);
A
Andreas Dilger 已提交
1889
	gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
1890
	spin_unlock(sb_bgl_lock(sbi, group_no));
P
Peter Zijlstra 已提交
1891
	percpu_counter_sub(&sbi->s_freeblocks_counter, num);
1892

1893 1894 1895 1896 1897 1898 1899
	if (sbi->s_log_groups_per_flex) {
		ext4_group_t flex_group = ext4_flex_group(sbi, group_no);
		spin_lock(sb_bgl_lock(sbi, flex_group));
		sbi->s_flex_groups[flex_group].free_blocks -= num;
		spin_unlock(sb_bgl_lock(sbi, flex_group));
	}

1900
	BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
1901
	err = ext4_journal_dirty_metadata(handle, gdp_bh);
1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919
	if (!fatal)
		fatal = err;

	sb->s_dirt = 1;
	if (fatal)
		goto out;

	*errp = 0;
	brelse(bitmap_bh);
	DQUOT_FREE_BLOCK(inode, *count-num);
	*count = num;
	return ret_block;

io_error:
	*errp = -EIO;
out:
	if (fatal) {
		*errp = fatal;
1920
		ext4_std_error(sb, fatal);
1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
	}
	/*
	 * Undo the block allocation
	 */
	if (!performed_allocation)
		DQUOT_FREE_BLOCK(inode, *count);
	brelse(bitmap_bh);
	return 0;
}

1931
ext4_fsblk_t ext4_new_block(handle_t *handle, struct inode *inode,
1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952
		ext4_fsblk_t goal, int *errp)
{
	struct ext4_allocation_request ar;
	ext4_fsblk_t ret;

	if (!test_opt(inode->i_sb, MBALLOC)) {
		unsigned long count = 1;
		ret = ext4_new_blocks_old(handle, inode, goal, &count, errp);
		return ret;
	}

	memset(&ar, 0, sizeof(ar));
	ar.inode = inode;
	ar.goal = goal;
	ar.len = 1;
	ret = ext4_mb_new_blocks(handle, &ar, errp);
	return ret;
}

ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
		ext4_fsblk_t goal, unsigned long *count, int *errp)
1953
{
1954 1955
	struct ext4_allocation_request ar;
	ext4_fsblk_t ret;
1956

1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968
	if (!test_opt(inode->i_sb, MBALLOC)) {
		ret = ext4_new_blocks_old(handle, inode, goal, count, errp);
		return ret;
	}

	memset(&ar, 0, sizeof(ar));
	ar.inode = inode;
	ar.goal = goal;
	ar.len = *count;
	ret = ext4_mb_new_blocks(handle, &ar, errp);
	*count = ar.len;
	return ret;
1969 1970
}

1971

1972
/**
1973
 * ext4_count_free_blocks() -- count filesystem free blocks
1974 1975 1976 1977
 * @sb:		superblock
 *
 * Adds up the number of free blocks from each block group.
 */
1978
ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
1979
{
1980 1981
	ext4_fsblk_t desc_count;
	struct ext4_group_desc *gdp;
1982 1983
	ext4_group_t i;
	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
1984 1985 1986
#ifdef EXT4FS_DEBUG
	struct ext4_super_block *es;
	ext4_fsblk_t bitmap_count;
1987 1988 1989
	unsigned long x;
	struct buffer_head *bitmap_bh = NULL;

1990
	es = EXT4_SB(sb)->s_es;
1991 1992 1993 1994 1995 1996
	desc_count = 0;
	bitmap_count = 0;
	gdp = NULL;

	smp_rmb();
	for (i = 0; i < ngroups; i++) {
1997
		gdp = ext4_get_group_desc(sb, i, NULL);
1998 1999 2000 2001
		if (!gdp)
			continue;
		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
		brelse(bitmap_bh);
2002
		bitmap_bh = ext4_read_block_bitmap(sb, i);
2003 2004 2005
		if (bitmap_bh == NULL)
			continue;

2006
		x = ext4_count_free(bitmap_bh, sb->s_blocksize);
2007
		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
2008 2009 2010 2011
			i, le16_to_cpu(gdp->bg_free_blocks_count), x);
		bitmap_count += x;
	}
	brelse(bitmap_bh);
2012 2013
	printk("ext4_count_free_blocks: stored = %llu"
		", computed = %llu, %llu\n",
E
Eric Sandeen 已提交
2014
		ext4_free_blocks_count(es),
2015 2016 2017 2018 2019 2020
		desc_count, bitmap_count);
	return bitmap_count;
#else
	desc_count = 0;
	smp_rmb();
	for (i = 0; i < ngroups; i++) {
2021
		gdp = ext4_get_group_desc(sb, i, NULL);
2022 2023 2024 2025 2026 2027 2028 2029 2030
		if (!gdp)
			continue;
		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
	}

	return desc_count;
#endif
}

2031
static inline int test_root(ext4_group_t a, int b)
2032 2033 2034 2035 2036 2037 2038 2039
{
	int num = b;

	while (a > num)
		num *= b;
	return num == a;
}

2040
static int ext4_group_sparse(ext4_group_t group)
2041 2042 2043 2044 2045 2046 2047 2048 2049 2050
{
	if (group <= 1)
		return 1;
	if (!(group & 1))
		return 0;
	return (test_root(group, 7) || test_root(group, 5) ||
		test_root(group, 3));
}

/**
2051
 *	ext4_bg_has_super - number of blocks used by the superblock in group
2052 2053 2054 2055 2056 2057
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the superblock (primary or backup)
 *	in this group.  Currently this will be only 0 or 1.
 */
2058
int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
2059
{
2060 2061 2062
	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
				EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
			!ext4_group_sparse(group))
2063 2064 2065 2066
		return 0;
	return 1;
}

2067 2068
static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
					ext4_group_t group)
2069
{
2070
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
2071 2072
	ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
	ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
2073 2074 2075 2076 2077 2078

	if (group == first || group == first + 1 || group == last)
		return 1;
	return 0;
}

2079 2080
static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
					ext4_group_t group)
2081
{
2082
	return ext4_bg_has_super(sb, group) ? EXT4_SB(sb)->s_gdb_count : 0;
2083 2084 2085
}

/**
2086
 *	ext4_bg_num_gdb - number of blocks used by the group table in group
2087 2088 2089 2090 2091 2092 2093
 *	@sb: superblock for filesystem
 *	@group: group number to check
 *
 *	Return the number of blocks used by the group descriptor table
 *	(primary or backup) in this group.  In the future there may be a
 *	different number of descriptor blocks in each group.
 */
2094
unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
2095 2096
{
	unsigned long first_meta_bg =
2097 2098
			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
	unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
2099

2100
	if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
2101
			metagroup < first_meta_bg)
2102
		return ext4_bg_num_gdb_nometa(sb,group);
2103

2104
	return ext4_bg_num_gdb_meta(sb,group);
2105 2106

}