rgrp.c 62.8 KB
Newer Older
D
David Teigland 已提交
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
D
David Teigland 已提交
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
D
David Teigland 已提交
8 9 10 11 12 13
 */

#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
14
#include <linux/fs.h>
15
#include <linux/gfs2_ondisk.h>
16
#include <linux/prefetch.h>
17
#include <linux/blkdev.h>
18
#include <linux/rbtree.h>
S
Steven Whitehouse 已提交
19
#include <linux/random.h>
D
David Teigland 已提交
20 21

#include "gfs2.h"
22
#include "incore.h"
D
David Teigland 已提交
23 24 25 26 27 28 29 30
#include "glock.h"
#include "glops.h"
#include "lops.h"
#include "meta_io.h"
#include "quota.h"
#include "rgrp.h"
#include "super.h"
#include "trans.h"
31
#include "util.h"
32
#include "log.h"
33
#include "inode.h"
S
Steven Whitehouse 已提交
34
#include "trace_gfs2.h"
D
David Teigland 已提交
35

S
Steven Whitehouse 已提交
36
#define BFITNOENT ((u32)~0)
37
#define NO_BLOCK ((u64)~0)
38

39 40 41 42 43 44 45 46 47 48
#if BITS_PER_LONG == 32
#define LBITMASK   (0x55555555UL)
#define LBITSKIP55 (0x55555555UL)
#define LBITSKIP00 (0x00000000UL)
#else
#define LBITMASK   (0x5555555555555555UL)
#define LBITSKIP55 (0x5555555555555555UL)
#define LBITSKIP00 (0x0000000000000000UL)
#endif

49 50 51
/*
 * These routines are used by the resource group routines (rgrp.c)
 * to keep track of block allocation.  Each block is represented by two
52 53 54 55 56 57
 * bits.  So, each byte represents GFS2_NBBY (i.e. 4) blocks.
 *
 * 0 = Free
 * 1 = Used (not metadata)
 * 2 = Unlinked (still in use) inode
 * 3 = Used (metadata)
58 59 60 61
 */

static const char valid_change[16] = {
	        /* current */
62
	/* n */ 0, 1, 1, 1,
63
	/* e */ 1, 0, 0, 0,
64
	/* w */ 0, 0, 0, 1,
65 66 67
	        1, 0, 0, 0
};

68 69 70 71
static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
                         const struct gfs2_inode *ip, bool nowrap);


72 73
/**
 * gfs2_setbit - Set a bit in the bitmaps
74 75
 * @rbm: The position of the bit to set
 * @do_clone: Also set the clone bitmap, if it exists
76 77 78 79
 * @new_state: the new state of the block
 *
 */

80
static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
81
			       unsigned char new_state)
82
{
83
	unsigned char *byte1, *byte2, *end, cur_state;
84 85
	unsigned int buflen = rbm->bi->bi_len;
	const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
86

87 88
	byte1 = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
	end = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + buflen;
89

90
	BUG_ON(byte1 >= end);
91

92
	cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
93

94
	if (unlikely(!valid_change[new_state * 4 + cur_state])) {
95 96 97 98 99 100 101
		printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, "
		       "new_state=%d\n", rbm->offset, cur_state, new_state);
		printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n",
		       (unsigned long long)rbm->rgd->rd_addr,
		       rbm->bi->bi_start);
		printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n",
		       rbm->bi->bi_offset, rbm->bi->bi_len);
102
		dump_stack();
103
		gfs2_consist_rgrpd(rbm->rgd);
104 105 106 107
		return;
	}
	*byte1 ^= (cur_state ^ new_state) << bit;

108 109
	if (do_clone && rbm->bi->bi_clone) {
		byte2 = rbm->bi->bi_clone + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
110 111 112
		cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
		*byte2 ^= (cur_state ^ new_state) << bit;
	}
113 114 115 116
}

/**
 * gfs2_testbit - test a bit in the bitmaps
117
 * @rbm: The bit to test
118
 *
119
 * Returns: The two bit block state of the requested bit
120 121
 */

122
static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
123
{
124 125
	const u8 *buffer = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset;
	const u8 *byte;
126 127
	unsigned int bit;

128 129
	byte = buffer + (rbm->offset / GFS2_NBBY);
	bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
130

131
	return (*byte >> bit) & GFS2_BIT_MASK;
132 133
}

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
/**
 * gfs2_bit_search
 * @ptr: Pointer to bitmap data
 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
 * @state: The state we are searching for
 *
 * We xor the bitmap data with a patter which is the bitwise opposite
 * of what we are looking for, this gives rise to a pattern of ones
 * wherever there is a match. Since we have two bits per entry, we
 * take this pattern, shift it down by one place and then and it with
 * the original. All the even bit positions (0,2,4, etc) then represent
 * successful matches, so we mask with 0x55555..... to remove the unwanted
 * odd bit positions.
 *
 * This allows searching of a whole u64 at once (32 blocks) with a
 * single test (on 64 bit arches).
 */

static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
{
	u64 tmp;
	static const u64 search[] = {
156 157 158 159
		[0] = 0xffffffffffffffffULL,
		[1] = 0xaaaaaaaaaaaaaaaaULL,
		[2] = 0x5555555555555555ULL,
		[3] = 0x0000000000000000ULL,
160 161 162 163 164 165 166
	};
	tmp = le64_to_cpu(*ptr) ^ search[state];
	tmp &= (tmp >> 1);
	tmp &= mask;
	return tmp;
}

B
Bob Peterson 已提交
167 168 169 170 171 172 173 174 175 176 177 178
/**
 * rs_cmp - multi-block reservation range compare
 * @blk: absolute file system block number of the new reservation
 * @len: number of blocks in the new reservation
 * @rs: existing reservation to compare against
 *
 * returns: 1 if the block range is beyond the reach of the reservation
 *         -1 if the block range is before the start of the reservation
 *          0 if the block range overlaps with the reservation
 */
static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
{
179
	u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
B
Bob Peterson 已提交
180 181 182 183 184 185 186 187

	if (blk >= startblk + rs->rs_free)
		return 1;
	if (blk + len - 1 < startblk)
		return -1;
	return 0;
}

188 189 190
/**
 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
 *       a block in a given allocation state.
191
 * @buf: the buffer that holds the bitmaps
192
 * @len: the length (in bytes) of the buffer
193
 * @goal: start search at this block's bit-pair (within @buffer)
194
 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
195 196 197
 *
 * Scope of @goal and returned block number is only within this bitmap buffer,
 * not entire rgrp or filesystem.  @buffer will be offset from the actual
198 199 200 201 202
 * beginning of a bitmap block buffer, skipping any header structures, but
 * headers are always a multiple of 64 bits long so that the buffer is
 * always aligned to a 64 bit boundary.
 *
 * The size of the buffer is in bytes, but is it assumed that it is
203
 * always ok to read a complete multiple of 64 bits at the end
204
 * of the block in case the end is no aligned to a natural boundary.
205 206 207 208
 *
 * Return: the block number (bitmap buffer scope) that was found
 */

209 210
static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
		       u32 goal, u8 state)
211
{
212 213 214 215
	u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
	const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
	const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
	u64 tmp;
216
	u64 mask = 0x5555555555555555ULL;
217 218 219 220 221 222 223
	u32 bit;

	/* Mask off bits we don't care about at the start of the search */
	mask <<= spoint;
	tmp = gfs2_bit_search(ptr, mask, state);
	ptr++;
	while(tmp == 0 && ptr < end) {
224
		tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
225
		ptr++;
226
	}
227 228 229 230 231 232 233
	/* Mask off any bits which are more than len bytes from the start */
	if (ptr == end && (len & (sizeof(u64) - 1)))
		tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
	/* Didn't find anything, so return */
	if (tmp == 0)
		return BFITNOENT;
	ptr--;
234
	bit = __ffs64(tmp);
235 236
	bit /= 2;	/* two bits per entry in the bitmap */
	return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
237 238
}

239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
/**
 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
 * @rbm: The rbm with rgd already set correctly
 * @block: The block number (filesystem relative)
 *
 * This sets the bi and offset members of an rbm based on a
 * resource group and a filesystem relative block number. The
 * resource group must be set in the rbm on entry, the bi and
 * offset members will be set by this function.
 *
 * Returns: 0 on success, or an error code
 */

static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
{
	u64 rblock = block - rbm->rgd->rd_data0;
B
Bob Peterson 已提交
255
	u32 x;
256 257 258 259 260 261

	if (WARN_ON_ONCE(rblock > UINT_MAX))
		return -EINVAL;
	if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
		return -E2BIG;

B
Bob Peterson 已提交
262 263 264 265 266 267 268 269 270 271 272 273
	rbm->bi = rbm->rgd->rd_bits;
	rbm->offset = (u32)(rblock);
	/* Check if the block is within the first block */
	if (rbm->offset < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY)
		return 0;

	/* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
	rbm->offset += (sizeof(struct gfs2_rgrp) -
			sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
	x = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
	rbm->offset -= x * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
	rbm->bi += x;
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
	return 0;
}

/**
 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
 * @rbm: Position to search (value/result)
 * @n_unaligned: Number of unaligned blocks to check
 * @len: Decremented for each block found (terminate on zero)
 *
 * Returns: true if a non-free block is encountered
 */

static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
{
	u64 block;
	u32 n;
	u8 res;

	for (n = 0; n < n_unaligned; n++) {
		res = gfs2_testbit(rbm);
		if (res != GFS2_BLKST_FREE)
			return true;
		(*len)--;
		if (*len == 0)
			return true;
		block = gfs2_rbm_to_block(rbm);
		if (gfs2_rbm_from_block(rbm, block + 1))
			return true;
	}

	return false;
}

/**
 * gfs2_free_extlen - Return extent length of free blocks
 * @rbm: Starting position
 * @len: Max length to check
 *
 * Starting at the block specified by the rbm, see how many free blocks
 * there are, not reading more than len blocks ahead. This can be done
 * using memchr_inv when the blocks are byte aligned, but has to be done
 * on a block by block basis in case of unaligned blocks. Also this
 * function can cope with bitmap boundaries (although it must stop on
 * a resource group boundary)
 *
 * Returns: Number of free blocks in the extent
 */

static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
{
	struct gfs2_rbm rbm = *rrbm;
	u32 n_unaligned = rbm.offset & 3;
	u32 size = len;
	u32 bytes;
	u32 chunk_size;
	u8 *ptr, *start, *end;
	u64 block;

	if (n_unaligned &&
	    gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
		goto out;

336
	n_unaligned = len & 3;
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
	/* Start is now byte aligned */
	while (len > 3) {
		start = rbm.bi->bi_bh->b_data;
		if (rbm.bi->bi_clone)
			start = rbm.bi->bi_clone;
		end = start + rbm.bi->bi_bh->b_size;
		start += rbm.bi->bi_offset;
		BUG_ON(rbm.offset & 3);
		start += (rbm.offset / GFS2_NBBY);
		bytes = min_t(u32, len / GFS2_NBBY, (end - start));
		ptr = memchr_inv(start, 0, bytes);
		chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
		chunk_size *= GFS2_NBBY;
		BUG_ON(len < chunk_size);
		len -= chunk_size;
		block = gfs2_rbm_to_block(&rbm);
		gfs2_rbm_from_block(&rbm, block + chunk_size);
		n_unaligned = 3;
		if (ptr)
			break;
		n_unaligned = len & 3;
	}

	/* Deal with any bits left over at the end */
	if (n_unaligned)
		gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
out:
	return size - len;
}

367 368
/**
 * gfs2_bitcount - count the number of bits in a certain state
369
 * @rgd: the resource group descriptor
370 371 372 373 374 375 376
 * @buffer: the buffer that holds the bitmaps
 * @buflen: the length (in bytes) of the buffer
 * @state: the state of the block we're looking for
 *
 * Returns: The number of bits
 */

377 378
static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
			 unsigned int buflen, u8 state)
379
{
380 381 382 383 384
	const u8 *byte = buffer;
	const u8 *end = buffer + buflen;
	const u8 state1 = state << 2;
	const u8 state2 = state << 4;
	const u8 state3 = state << 6;
385
	u32 count = 0;
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400

	for (; byte < end; byte++) {
		if (((*byte) & 0x03) == state)
			count++;
		if (((*byte) & 0x0C) == state1)
			count++;
		if (((*byte) & 0x30) == state2)
			count++;
		if (((*byte) & 0xC0) == state3)
			count++;
	}

	return count;
}

D
David Teigland 已提交
401 402 403 404 405 406 407 408 409 410
/**
 * gfs2_rgrp_verify - Verify that a resource group is consistent
 * @rgd: the rgrp
 *
 */

void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
{
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	struct gfs2_bitmap *bi = NULL;
411
	u32 length = rgd->rd_length;
412
	u32 count[4], tmp;
D
David Teigland 已提交
413 414
	int buf, x;

415
	memset(count, 0, 4 * sizeof(u32));
D
David Teigland 已提交
416 417 418 419 420 421 422 423 424 425 426

	/* Count # blocks in each of 4 possible allocation states */
	for (buf = 0; buf < length; buf++) {
		bi = rgd->rd_bits + buf;
		for (x = 0; x < 4; x++)
			count[x] += gfs2_bitcount(rgd,
						  bi->bi_bh->b_data +
						  bi->bi_offset,
						  bi->bi_len, x);
	}

427
	if (count[0] != rgd->rd_free) {
D
David Teigland 已提交
428 429
		if (gfs2_consist_rgrpd(rgd))
			fs_err(sdp, "free data mismatch:  %u != %u\n",
430
			       count[0], rgd->rd_free);
D
David Teigland 已提交
431 432 433
		return;
	}

434
	tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
435
	if (count[1] != tmp) {
D
David Teigland 已提交
436 437 438 439 440 441
		if (gfs2_consist_rgrpd(rgd))
			fs_err(sdp, "used data mismatch:  %u != %u\n",
			       count[1], tmp);
		return;
	}

442
	if (count[2] + count[3] != rgd->rd_dinodes) {
D
David Teigland 已提交
443
		if (gfs2_consist_rgrpd(rgd))
444
			fs_err(sdp, "used metadata mismatch:  %u != %u\n",
445
			       count[2] + count[3], rgd->rd_dinodes);
D
David Teigland 已提交
446 447 448 449
		return;
	}
}

450
static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
D
David Teigland 已提交
451
{
452 453
	u64 first = rgd->rd_data0;
	u64 last = first + rgd->rd_data;
454
	return first <= block && block < last;
D
David Teigland 已提交
455 456 457 458 459
}

/**
 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
 * @sdp: The GFS2 superblock
460 461
 * @blk: The data block number
 * @exact: True if this needs to be an exact match
D
David Teigland 已提交
462 463 464 465
 *
 * Returns: The resource group, or NULL if not found
 */

S
Steven Whitehouse 已提交
466
struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
D
David Teigland 已提交
467
{
S
Steven Whitehouse 已提交
468
	struct rb_node *n, *next;
469
	struct gfs2_rgrpd *cur;
D
David Teigland 已提交
470 471

	spin_lock(&sdp->sd_rindex_spin);
S
Steven Whitehouse 已提交
472 473 474 475
	n = sdp->sd_rindex_tree.rb_node;
	while (n) {
		cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
		next = NULL;
476
		if (blk < cur->rd_addr)
S
Steven Whitehouse 已提交
477
			next = n->rb_left;
478
		else if (blk >= cur->rd_data0 + cur->rd_data)
S
Steven Whitehouse 已提交
479 480
			next = n->rb_right;
		if (next == NULL) {
D
David Teigland 已提交
481
			spin_unlock(&sdp->sd_rindex_spin);
S
Steven Whitehouse 已提交
482 483 484 485 486 487
			if (exact) {
				if (blk < cur->rd_addr)
					return NULL;
				if (blk >= cur->rd_data0 + cur->rd_data)
					return NULL;
			}
488
			return cur;
D
David Teigland 已提交
489
		}
S
Steven Whitehouse 已提交
490
		n = next;
D
David Teigland 已提交
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
	}
	spin_unlock(&sdp->sd_rindex_spin);

	return NULL;
}

/**
 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
 * @sdp: The GFS2 superblock
 *
 * Returns: The first rgrp in the filesystem
 */

struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
{
506 507 508
	const struct rb_node *n;
	struct gfs2_rgrpd *rgd;

509
	spin_lock(&sdp->sd_rindex_spin);
510 511
	n = rb_first(&sdp->sd_rindex_tree);
	rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
512
	spin_unlock(&sdp->sd_rindex_spin);
513 514

	return rgd;
D
David Teigland 已提交
515 516 517 518
}

/**
 * gfs2_rgrpd_get_next - get the next RG
519
 * @rgd: the resource group descriptor
D
David Teigland 已提交
520 521 522 523 524 525
 *
 * Returns: The next rgrp
 */

struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
{
526 527 528 529 530 531 532 533 534 535
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	const struct rb_node *n;

	spin_lock(&sdp->sd_rindex_spin);
	n = rb_next(&rgd->rd_node);
	if (n == NULL)
		n = rb_first(&sdp->sd_rindex_tree);

	if (unlikely(&rgd->rd_node == n)) {
		spin_unlock(&sdp->sd_rindex_spin);
D
David Teigland 已提交
536
		return NULL;
537 538 539 540
	}
	rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
	spin_unlock(&sdp->sd_rindex_spin);
	return rgd;
D
David Teigland 已提交
541 542
}

543 544 545 546 547 548 549 550 551 552 553
void gfs2_free_clones(struct gfs2_rgrpd *rgd)
{
	int x;

	for (x = 0; x < rgd->rd_length; x++) {
		struct gfs2_bitmap *bi = rgd->rd_bits + x;
		kfree(bi->bi_clone);
		bi->bi_clone = NULL;
	}
}

554 555 556 557 558 559
/**
 * gfs2_rs_alloc - make sure we have a reservation assigned to the inode
 * @ip: the inode for this reservation
 */
int gfs2_rs_alloc(struct gfs2_inode *ip)
{
B
Bob Peterson 已提交
560 561 562 563 564 565 566
	struct gfs2_blkreserv *res;

	if (ip->i_res)
		return 0;

	res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
	if (!res)
567
		return -ENOMEM;
568

569
	RB_CLEAR_NODE(&res->rs_node);
570

571
	down_write(&ip->i_rw_mutex);
B
Bob Peterson 已提交
572 573 574 575
	if (ip->i_res)
		kmem_cache_free(gfs2_rsrv_cachep, res);
	else
		ip->i_res = res;
576
	up_write(&ip->i_rw_mutex);
577
	return 0;
578 579
}

580
static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
B
Bob Peterson 已提交
581
{
582 583 584
	gfs2_print_dbg(seq, "  B: n:%llu s:%llu b:%u f:%u\n",
		       (unsigned long long)rs->rs_inum,
		       (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
585
		       rs->rs_rbm.offset, rs->rs_free);
B
Bob Peterson 已提交
586 587
}

588
/**
B
Bob Peterson 已提交
589 590 591 592
 * __rs_deltree - remove a multi-block reservation from the rgd tree
 * @rs: The reservation to remove
 *
 */
593
static void __rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
B
Bob Peterson 已提交
594 595 596 597 598 599
{
	struct gfs2_rgrpd *rgd;

	if (!gfs2_rs_active(rs))
		return;

600
	rgd = rs->rs_rbm.rgd;
601
	trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
602
	rb_erase(&rs->rs_node, &rgd->rd_rstree);
603
	RB_CLEAR_NODE(&rs->rs_node);
B
Bob Peterson 已提交
604 605 606

	if (rs->rs_free) {
		/* return reserved blocks to the rgrp and the ip */
607 608
		BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
		rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
B
Bob Peterson 已提交
609
		rs->rs_free = 0;
610
		clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags);
B
Bob Peterson 已提交
611 612 613 614 615 616 617 618 619
		smp_mb__after_clear_bit();
	}
}

/**
 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
 * @rs: The reservation to remove
 *
 */
620
void gfs2_rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
B
Bob Peterson 已提交
621 622 623
{
	struct gfs2_rgrpd *rgd;

624 625 626 627 628 629
	rgd = rs->rs_rbm.rgd;
	if (rgd) {
		spin_lock(&rgd->rd_rsspin);
		__rs_deltree(ip, rs);
		spin_unlock(&rgd->rd_rsspin);
	}
B
Bob Peterson 已提交
630 631 632 633
}

/**
 * gfs2_rs_delete - delete a multi-block reservation
634 635 636 637 638 639 640
 * @ip: The inode for this reservation
 *
 */
void gfs2_rs_delete(struct gfs2_inode *ip)
{
	down_write(&ip->i_rw_mutex);
	if (ip->i_res) {
641
		gfs2_rs_deltree(ip, ip->i_res);
B
Bob Peterson 已提交
642
		BUG_ON(ip->i_res->rs_free);
643 644 645 646 647 648
		kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
		ip->i_res = NULL;
	}
	up_write(&ip->i_rw_mutex);
}

B
Bob Peterson 已提交
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
/**
 * return_all_reservations - return all reserved blocks back to the rgrp.
 * @rgd: the rgrp that needs its space back
 *
 * We previously reserved a bunch of blocks for allocation. Now we need to
 * give them back. This leave the reservation structures in tact, but removes
 * all of their corresponding "no-fly zones".
 */
static void return_all_reservations(struct gfs2_rgrpd *rgd)
{
	struct rb_node *n;
	struct gfs2_blkreserv *rs;

	spin_lock(&rgd->rd_rsspin);
	while ((n = rb_first(&rgd->rd_rstree))) {
		rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
665
		__rs_deltree(NULL, rs);
B
Bob Peterson 已提交
666 667 668 669
	}
	spin_unlock(&rgd->rd_rsspin);
}

670
void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
D
David Teigland 已提交
671
{
672
	struct rb_node *n;
D
David Teigland 已提交
673 674 675
	struct gfs2_rgrpd *rgd;
	struct gfs2_glock *gl;

676 677
	while ((n = rb_first(&sdp->sd_rindex_tree))) {
		rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
D
David Teigland 已提交
678 679
		gl = rgd->rd_gl;

680
		rb_erase(n, &sdp->sd_rindex_tree);
D
David Teigland 已提交
681 682

		if (gl) {
683
			spin_lock(&gl->gl_spin);
684
			gl->gl_object = NULL;
685
			spin_unlock(&gl->gl_spin);
686
			gfs2_glock_add_to_lru(gl);
D
David Teigland 已提交
687 688 689
			gfs2_glock_put(gl);
		}

690
		gfs2_free_clones(rgd);
D
David Teigland 已提交
691
		kfree(rgd->rd_bits);
B
Bob Peterson 已提交
692
		return_all_reservations(rgd);
693
		kmem_cache_free(gfs2_rgrpd_cachep, rgd);
D
David Teigland 已提交
694 695 696
	}
}

697 698 699 700 701 702 703 704 705
static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
{
	printk(KERN_INFO "  ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
	printk(KERN_INFO "  ri_length = %u\n", rgd->rd_length);
	printk(KERN_INFO "  ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
	printk(KERN_INFO "  ri_data = %u\n", rgd->rd_data);
	printk(KERN_INFO "  ri_bitbytes = %u\n", rgd->rd_bitbytes);
}

D
David Teigland 已提交
706 707 708 709 710 711 712 713 714 715 716 717 718
/**
 * gfs2_compute_bitstructs - Compute the bitmap sizes
 * @rgd: The resource group descriptor
 *
 * Calculates bitmap descriptors, one for each block that contains bitmap data
 *
 * Returns: errno
 */

static int compute_bitstructs(struct gfs2_rgrpd *rgd)
{
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	struct gfs2_bitmap *bi;
719
	u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
720
	u32 bytes_left, bytes;
D
David Teigland 已提交
721 722
	int x;

723 724 725
	if (!length)
		return -EINVAL;

726
	rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
D
David Teigland 已提交
727 728 729
	if (!rgd->rd_bits)
		return -ENOMEM;

730
	bytes_left = rgd->rd_bitbytes;
D
David Teigland 已提交
731 732 733 734

	for (x = 0; x < length; x++) {
		bi = rgd->rd_bits + x;

735
		bi->bi_flags = 0;
D
David Teigland 已提交
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
		/* small rgrp; bitmap stored completely in header block */
		if (length == 1) {
			bytes = bytes_left;
			bi->bi_offset = sizeof(struct gfs2_rgrp);
			bi->bi_start = 0;
			bi->bi_len = bytes;
		/* header block */
		} else if (x == 0) {
			bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
			bi->bi_offset = sizeof(struct gfs2_rgrp);
			bi->bi_start = 0;
			bi->bi_len = bytes;
		/* last block */
		} else if (x + 1 == length) {
			bytes = bytes_left;
			bi->bi_offset = sizeof(struct gfs2_meta_header);
752
			bi->bi_start = rgd->rd_bitbytes - bytes_left;
D
David Teigland 已提交
753 754 755
			bi->bi_len = bytes;
		/* other blocks */
		} else {
756 757
			bytes = sdp->sd_sb.sb_bsize -
				sizeof(struct gfs2_meta_header);
D
David Teigland 已提交
758
			bi->bi_offset = sizeof(struct gfs2_meta_header);
759
			bi->bi_start = rgd->rd_bitbytes - bytes_left;
D
David Teigland 已提交
760 761 762 763 764 765 766 767 768 769 770
			bi->bi_len = bytes;
		}

		bytes_left -= bytes;
	}

	if (bytes_left) {
		gfs2_consist_rgrpd(rgd);
		return -EIO;
	}
	bi = rgd->rd_bits + (length - 1);
771
	if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
D
David Teigland 已提交
772
		if (gfs2_consist_rgrpd(rgd)) {
773
			gfs2_rindex_print(rgd);
D
David Teigland 已提交
774 775 776 777 778 779 780 781 782
			fs_err(sdp, "start=%u len=%u offset=%u\n",
			       bi->bi_start, bi->bi_len, bi->bi_offset);
		}
		return -EIO;
	}

	return 0;
}

783 784
/**
 * gfs2_ri_total - Total up the file system space, according to the rindex.
785
 * @sdp: the filesystem
786 787 788 789 790 791 792 793 794 795 796 797 798
 *
 */
u64 gfs2_ri_total(struct gfs2_sbd *sdp)
{
	u64 total_data = 0;	
	struct inode *inode = sdp->sd_rindex;
	struct gfs2_inode *ip = GFS2_I(inode);
	char buf[sizeof(struct gfs2_rindex)];
	int error, rgrps;

	for (rgrps = 0;; rgrps++) {
		loff_t pos = rgrps * sizeof(struct gfs2_rindex);

799
		if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
800
			break;
801
		error = gfs2_internal_read(ip, buf, &pos,
802 803 804
					   sizeof(struct gfs2_rindex));
		if (error != sizeof(struct gfs2_rindex))
			break;
805
		total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
806 807 808 809
	}
	return total_data;
}

B
Bob Peterson 已提交
810
static int rgd_insert(struct gfs2_rgrpd *rgd)
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
{
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;

	/* Figure out where to put new node */
	while (*newn) {
		struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
						  rd_node);

		parent = *newn;
		if (rgd->rd_addr < cur->rd_addr)
			newn = &((*newn)->rb_left);
		else if (rgd->rd_addr > cur->rd_addr)
			newn = &((*newn)->rb_right);
		else
B
Bob Peterson 已提交
826
			return -EEXIST;
827 828 829 830
	}

	rb_link_node(&rgd->rd_node, parent, newn);
	rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
B
Bob Peterson 已提交
831 832
	sdp->sd_rgrps++;
	return 0;
833 834
}

D
David Teigland 已提交
835
/**
836
 * read_rindex_entry - Pull in a new resource index entry from the disk
837
 * @ip: Pointer to the rindex inode
D
David Teigland 已提交
838
 *
839
 * Returns: 0 on success, > 0 on EOF, error code otherwise
840 841
 */

842
static int read_rindex_entry(struct gfs2_inode *ip)
843 844 845
{
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
	loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
846
	struct gfs2_rindex buf;
847 848 849
	int error;
	struct gfs2_rgrpd *rgd;

850 851 852
	if (pos >= i_size_read(&ip->i_inode))
		return 1;

853
	error = gfs2_internal_read(ip, (char *)&buf, &pos,
854
				   sizeof(struct gfs2_rindex));
855 856 857

	if (error != sizeof(struct gfs2_rindex))
		return (error == 0) ? 1 : error;
858

859
	rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
860 861 862 863 864
	error = -ENOMEM;
	if (!rgd)
		return error;

	rgd->rd_sbd = sdp;
865 866 867 868 869
	rgd->rd_addr = be64_to_cpu(buf.ri_addr);
	rgd->rd_length = be32_to_cpu(buf.ri_length);
	rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
	rgd->rd_data = be32_to_cpu(buf.ri_data);
	rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
B
Bob Peterson 已提交
870
	spin_lock_init(&rgd->rd_rsspin);
871

872 873
	error = compute_bitstructs(rgd);
	if (error)
874
		goto fail;
875

876
	error = gfs2_glock_get(sdp, rgd->rd_addr,
877 878
			       &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
	if (error)
879
		goto fail;
880 881

	rgd->rd_gl->gl_object = rgd;
882
	rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lvb;
883
	rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
884 885
	if (rgd->rd_data > sdp->sd_max_rg_data)
		sdp->sd_max_rg_data = rgd->rd_data;
886
	spin_lock(&sdp->sd_rindex_spin);
B
Bob Peterson 已提交
887
	error = rgd_insert(rgd);
888
	spin_unlock(&sdp->sd_rindex_spin);
B
Bob Peterson 已提交
889 890 891 892
	if (!error)
		return 0;

	error = 0; /* someone else read in the rgrp; free it and ignore it */
893
	gfs2_glock_put(rgd->rd_gl);
894 895 896 897

fail:
	kfree(rgd->rd_bits);
	kmem_cache_free(gfs2_rgrpd_cachep, rgd);
898 899 900 901 902 903 904
	return error;
}

/**
 * gfs2_ri_update - Pull in a new resource index from the disk
 * @ip: pointer to the rindex inode
 *
D
David Teigland 已提交
905 906 907
 * Returns: 0 on successful update, error code otherwise
 */

908
static int gfs2_ri_update(struct gfs2_inode *ip)
D
David Teigland 已提交
909
{
910
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
D
David Teigland 已提交
911 912
	int error;

913
	do {
914
		error = read_rindex_entry(ip);
915 916 917 918
	} while (error == 0);

	if (error < 0)
		return error;
D
David Teigland 已提交
919

920
	sdp->sd_rindex_uptodate = 1;
921 922
	return 0;
}
D
David Teigland 已提交
923 924

/**
925
 * gfs2_rindex_update - Update the rindex if required
D
David Teigland 已提交
926 927 928 929 930 931 932 933 934 935 936 937
 * @sdp: The GFS2 superblock
 *
 * We grab a lock on the rindex inode to make sure that it doesn't
 * change whilst we are performing an operation. We keep this lock
 * for quite long periods of time compared to other locks. This
 * doesn't matter, since it is shared and it is very, very rarely
 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
 *
 * This makes sure that we're using the latest copy of the resource index
 * special file, which might have been updated if someone expanded the
 * filesystem (via gfs2_grow utility), which adds new resource groups.
 *
938
 * Returns: 0 on succeess, error code otherwise
D
David Teigland 已提交
939 940
 */

941
int gfs2_rindex_update(struct gfs2_sbd *sdp)
D
David Teigland 已提交
942
{
943
	struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
D
David Teigland 已提交
944
	struct gfs2_glock *gl = ip->i_gl;
945 946
	struct gfs2_holder ri_gh;
	int error = 0;
947
	int unlock_required = 0;
D
David Teigland 已提交
948 949

	/* Read new copy from disk if we don't have the latest */
950
	if (!sdp->sd_rindex_uptodate) {
951 952 953
		if (!gfs2_glock_is_locked_by_me(gl)) {
			error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
			if (error)
B
Bob Peterson 已提交
954
				return error;
955 956
			unlock_required = 1;
		}
957
		if (!sdp->sd_rindex_uptodate)
D
David Teigland 已提交
958
			error = gfs2_ri_update(ip);
959 960
		if (unlock_required)
			gfs2_glock_dq_uninit(&ri_gh);
D
David Teigland 已提交
961 962 963 964 965
	}

	return error;
}

966
static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
967 968
{
	const struct gfs2_rgrp *str = buf;
969
	u32 rg_flags;
970

971
	rg_flags = be32_to_cpu(str->rg_flags);
972
	rg_flags &= ~GFS2_RDF_MASK;
973 974
	rgd->rd_flags &= GFS2_RDF_MASK;
	rgd->rd_flags |= rg_flags;
975
	rgd->rd_free = be32_to_cpu(str->rg_free);
976
	rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
977
	rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
978 979
}

980
static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
981 982 983
{
	struct gfs2_rgrp *str = buf;

984
	str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
985
	str->rg_free = cpu_to_be32(rgd->rd_free);
986
	str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
987
	str->__pad = cpu_to_be32(0);
988
	str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
989 990 991
	memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
}

992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
{
	struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
	struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;

	if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
	    rgl->rl_dinodes != str->rg_dinodes ||
	    rgl->rl_igeneration != str->rg_igeneration)
		return 0;
	return 1;
}

static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
{
	const struct gfs2_rgrp *str = buf;

	rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
	rgl->rl_flags = str->rg_flags;
	rgl->rl_free = str->rg_free;
	rgl->rl_dinodes = str->rg_dinodes;
	rgl->rl_igeneration = str->rg_igeneration;
	rgl->__pad = 0UL;
}

static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
{
	struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
	u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
	rgl->rl_unlinked = cpu_to_be32(unlinked);
}

static u32 count_unlinked(struct gfs2_rgrpd *rgd)
{
	struct gfs2_bitmap *bi;
	const u32 length = rgd->rd_length;
	const u8 *buffer = NULL;
	u32 i, goal, count = 0;

	for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
		goal = 0;
		buffer = bi->bi_bh->b_data + bi->bi_offset;
		WARN_ON(!buffer_uptodate(bi->bi_bh));
		while (goal < bi->bi_len * GFS2_NBBY) {
			goal = gfs2_bitfit(buffer, bi->bi_len, goal,
					   GFS2_BLKST_UNLINKED);
			if (goal == BFITNOENT)
				break;
			count++;
			goal++;
		}
	}

	return count;
}


D
David Teigland 已提交
1048
/**
1049 1050
 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
 * @rgd: the struct gfs2_rgrpd describing the RG to read in
D
David Teigland 已提交
1051 1052 1053 1054 1055 1056 1057
 *
 * Read in all of a Resource Group's header and bitmap blocks.
 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
 *
 * Returns: errno
 */

1058
int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
D
David Teigland 已提交
1059 1060 1061
{
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	struct gfs2_glock *gl = rgd->rd_gl;
1062
	unsigned int length = rgd->rd_length;
D
David Teigland 已提交
1063 1064 1065 1066
	struct gfs2_bitmap *bi;
	unsigned int x, y;
	int error;

1067 1068 1069
	if (rgd->rd_bits[0].bi_bh != NULL)
		return 0;

D
David Teigland 已提交
1070 1071
	for (x = 0; x < length; x++) {
		bi = rgd->rd_bits + x;
1072
		error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh);
D
David Teigland 已提交
1073 1074 1075 1076 1077 1078
		if (error)
			goto fail;
	}

	for (y = length; y--;) {
		bi = rgd->rd_bits + y;
S
Steven Whitehouse 已提交
1079
		error = gfs2_meta_wait(sdp, bi->bi_bh);
D
David Teigland 已提交
1080 1081
		if (error)
			goto fail;
1082
		if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
D
David Teigland 已提交
1083 1084 1085 1086 1087 1088
					      GFS2_METATYPE_RG)) {
			error = -EIO;
			goto fail;
		}
	}

1089
	if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
1090 1091
		for (x = 0; x < length; x++)
			clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
1092
		gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1093
		rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1094
		rgd->rd_free_clone = rgd->rd_free;
D
David Teigland 已提交
1095
	}
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
	if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
		rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
		gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
				     rgd->rd_bits[0].bi_bh->b_data);
	}
	else if (sdp->sd_args.ar_rgrplvb) {
		if (!gfs2_rgrp_lvb_valid(rgd)){
			gfs2_consist_rgrpd(rgd);
			error = -EIO;
			goto fail;
		}
		if (rgd->rd_rgl->rl_unlinked == 0)
			rgd->rd_flags &= ~GFS2_RDF_CHECK;
	}
D
David Teigland 已提交
1110 1111
	return 0;

1112
fail:
D
David Teigland 已提交
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
	while (x--) {
		bi = rgd->rd_bits + x;
		brelse(bi->bi_bh);
		bi->bi_bh = NULL;
		gfs2_assert_warn(sdp, !bi->bi_clone);
	}

	return error;
}

1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
{
	u32 rl_flags;

	if (rgd->rd_flags & GFS2_RDF_UPTODATE)
		return 0;

	if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
		return gfs2_rgrp_bh_get(rgd);

	rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
	rl_flags &= ~GFS2_RDF_MASK;
	rgd->rd_flags &= GFS2_RDF_MASK;
	rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
	if (rgd->rd_rgl->rl_unlinked == 0)
		rgd->rd_flags &= ~GFS2_RDF_CHECK;
	rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
	rgd->rd_free_clone = rgd->rd_free;
	rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
	rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
	return 0;
}

int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
{
	struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
	struct gfs2_sbd *sdp = rgd->rd_sbd;

	if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
		return 0;
	return gfs2_rgrp_bh_get((struct gfs2_rgrpd *)gh->gh_gl->gl_object);
}

D
David Teigland 已提交
1156
/**
1157
 * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1158
 * @gh: The glock holder for the resource group
D
David Teigland 已提交
1159 1160 1161
 *
 */

1162
void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
D
David Teigland 已提交
1163
{
1164
	struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1165
	int x, length = rgd->rd_length;
D
David Teigland 已提交
1166 1167 1168

	for (x = 0; x < length; x++) {
		struct gfs2_bitmap *bi = rgd->rd_bits + x;
1169 1170 1171 1172
		if (bi->bi_bh) {
			brelse(bi->bi_bh);
			bi->bi_bh = NULL;
		}
D
David Teigland 已提交
1173 1174 1175 1176
	}

}

S
Steven Whitehouse 已提交
1177
int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1178
			     struct buffer_head *bh,
S
Steven Whitehouse 已提交
1179
			     const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
1180 1181 1182 1183
{
	struct super_block *sb = sdp->sd_vfs;
	struct block_device *bdev = sb->s_bdev;
	const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize /
1184
					   bdev_logical_block_size(sb->s_bdev);
1185
	u64 blk;
1186
	sector_t start = 0;
1187 1188 1189
	sector_t nr_sects = 0;
	int rv;
	unsigned int x;
S
Steven Whitehouse 已提交
1190 1191
	u32 trimmed = 0;
	u8 diff;
1192 1193

	for (x = 0; x < bi->bi_len; x++) {
S
Steven Whitehouse 已提交
1194 1195 1196 1197 1198 1199 1200 1201 1202
		const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
		clone += bi->bi_offset;
		clone += x;
		if (bh) {
			const u8 *orig = bh->b_data + bi->bi_offset + x;
			diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
		} else {
			diff = ~(*clone | (*clone >> 1));
		}
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
		diff &= 0x55;
		if (diff == 0)
			continue;
		blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
		blk *= sects_per_blk; /* convert to sectors */
		while(diff) {
			if (diff & 1) {
				if (nr_sects == 0)
					goto start_new_extent;
				if ((start + nr_sects) != blk) {
S
Steven Whitehouse 已提交
1213 1214 1215 1216 1217 1218 1219 1220
					if (nr_sects >= minlen) {
						rv = blkdev_issue_discard(bdev,
							start, nr_sects,
							GFP_NOFS, 0);
						if (rv)
							goto fail;
						trimmed += nr_sects;
					}
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
					nr_sects = 0;
start_new_extent:
					start = blk;
				}
				nr_sects += sects_per_blk;
			}
			diff >>= 2;
			blk += sects_per_blk;
		}
	}
S
Steven Whitehouse 已提交
1231
	if (nr_sects >= minlen) {
1232
		rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0);
1233 1234
		if (rv)
			goto fail;
S
Steven Whitehouse 已提交
1235
		trimmed += nr_sects;
1236
	}
S
Steven Whitehouse 已提交
1237 1238 1239 1240
	if (ptrimmed)
		*ptrimmed = trimmed;
	return 0;

1241
fail:
S
Steven Whitehouse 已提交
1242 1243
	if (sdp->sd_args.ar_discard)
		fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
1244
	sdp->sd_args.ar_discard = 0;
S
Steven Whitehouse 已提交
1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
	return -EIO;
}

/**
 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
 * @filp: Any file on the filesystem
 * @argp: Pointer to the arguments (also used to pass result)
 *
 * Returns: 0 on success, otherwise error code
 */

int gfs2_fitrim(struct file *filp, void __user *argp)
{
	struct inode *inode = filp->f_dentry->d_inode;
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
	struct buffer_head *bh;
	struct gfs2_rgrpd *rgd;
	struct gfs2_rgrpd *rgd_end;
	struct gfs2_holder gh;
	struct fstrim_range r;
	int ret = 0;
	u64 amt;
	u64 trimmed = 0;
L
Lukas Czerner 已提交
1269
	u64 start, end, minlen;
S
Steven Whitehouse 已提交
1270
	unsigned int x;
L
Lukas Czerner 已提交
1271
	unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
S
Steven Whitehouse 已提交
1272 1273 1274 1275 1276 1277 1278

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (!blk_queue_discard(q))
		return -EOPNOTSUPP;

1279
	if (copy_from_user(&r, argp, sizeof(r)))
S
Steven Whitehouse 已提交
1280 1281
		return -EFAULT;

1282 1283 1284 1285
	ret = gfs2_rindex_update(sdp);
	if (ret)
		return ret;

L
Lukas Czerner 已提交
1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
	start = r.start >> bs_shift;
	end = start + (r.len >> bs_shift);
	minlen = max_t(u64, r.minlen,
		       q->limits.discard_granularity) >> bs_shift;

	rgd = gfs2_blk2rgrpd(sdp, start, 0);
	rgd_end = gfs2_blk2rgrpd(sdp, end - 1, 0);

	if (end <= start ||
	    minlen > sdp->sd_max_rg_data ||
	    start > rgd_end->rd_data0 + rgd_end->rd_data)
		return -EINVAL;
S
Steven Whitehouse 已提交
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308

	while (1) {

		ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
		if (ret)
			goto out;

		if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
			/* Trim each bitmap in the rgrp */
			for (x = 0; x < rgd->rd_length; x++) {
				struct gfs2_bitmap *bi = rgd->rd_bits + x;
L
Lukas Czerner 已提交
1309 1310 1311
				ret = gfs2_rgrp_send_discards(sdp,
						rgd->rd_data0, NULL, bi, minlen,
						&amt);
S
Steven Whitehouse 已提交
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
				if (ret) {
					gfs2_glock_dq_uninit(&gh);
					goto out;
				}
				trimmed += amt;
			}

			/* Mark rgrp as having been trimmed */
			ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
			if (ret == 0) {
				bh = rgd->rd_bits[0].bi_bh;
				rgd->rd_flags |= GFS2_RGF_TRIMMED;
				gfs2_trans_add_bh(rgd->rd_gl, bh, 1);
				gfs2_rgrp_out(rgd, bh->b_data);
1326
				gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
S
Steven Whitehouse 已提交
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
				gfs2_trans_end(sdp);
			}
		}
		gfs2_glock_dq_uninit(&gh);

		if (rgd == rgd_end)
			break;

		rgd = gfs2_rgrpd_get_next(rgd);
	}

out:
	r.len = trimmed << 9;
1340
	if (copy_to_user(argp, &r, sizeof(r)))
S
Steven Whitehouse 已提交
1341 1342 1343
		return -EFAULT;

	return ret;
1344 1345
}

B
Bob Peterson 已提交
1346 1347 1348 1349 1350
/**
 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
 * @ip: the inode structure
 *
 */
1351
static void rs_insert(struct gfs2_inode *ip)
B
Bob Peterson 已提交
1352 1353 1354 1355
{
	struct rb_node **newn, *parent = NULL;
	int rc;
	struct gfs2_blkreserv *rs = ip->i_res;
1356
	struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
1357
	u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
B
Bob Peterson 已提交
1358 1359

	BUG_ON(gfs2_rs_active(rs));
1360

1361 1362
	spin_lock(&rgd->rd_rsspin);
	newn = &rgd->rd_rstree.rb_node;
B
Bob Peterson 已提交
1363 1364 1365 1366 1367
	while (*newn) {
		struct gfs2_blkreserv *cur =
			rb_entry(*newn, struct gfs2_blkreserv, rs_node);

		parent = *newn;
1368
		rc = rs_cmp(fsblock, rs->rs_free, cur);
B
Bob Peterson 已提交
1369 1370 1371 1372 1373 1374
		if (rc > 0)
			newn = &((*newn)->rb_right);
		else if (rc < 0)
			newn = &((*newn)->rb_left);
		else {
			spin_unlock(&rgd->rd_rsspin);
1375 1376
			WARN_ON(1);
			return;
B
Bob Peterson 已提交
1377 1378 1379 1380 1381 1382 1383
		}
	}

	rb_link_node(&rs->rs_node, parent, newn);
	rb_insert_color(&rs->rs_node, &rgd->rd_rstree);

	/* Do our rgrp accounting for the reservation */
1384
	rgd->rd_reserved += rs->rs_free; /* blocks reserved */
B
Bob Peterson 已提交
1385
	spin_unlock(&rgd->rd_rsspin);
1386
	trace_gfs2_rs(rs, TRACE_RS_INSERT);
B
Bob Peterson 已提交
1387 1388 1389
}

/**
1390
 * rg_mblk_search - find a group of multiple free blocks to form a reservation
B
Bob Peterson 已提交
1391 1392
 * @rgd: the resource group descriptor
 * @ip: pointer to the inode for which we're reserving blocks
1393
 * @requested: number of blocks required for this allocation
B
Bob Peterson 已提交
1394 1395 1396
 *
 */

1397 1398
static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
			   unsigned requested)
B
Bob Peterson 已提交
1399
{
1400 1401 1402 1403 1404 1405
	struct gfs2_rbm rbm = { .rgd = rgd, };
	u64 goal;
	struct gfs2_blkreserv *rs = ip->i_res;
	u32 extlen;
	u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
	int ret;
B
Bob Peterson 已提交
1406

1407 1408 1409
	extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
	extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
	if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
1410 1411
		return;

B
Bob Peterson 已提交
1412 1413
	/* Find bitmap block that contains bits for goal block */
	if (rgrp_contains_block(rgd, ip->i_goal))
1414
		goal = ip->i_goal;
B
Bob Peterson 已提交
1415
	else
1416
		goal = rgd->rd_last_alloc + rgd->rd_data0;
B
Bob Peterson 已提交
1417

1418 1419
	if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
		return;
B
Bob Peterson 已提交
1420

1421 1422 1423 1424 1425 1426
	ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, extlen, ip, true);
	if (ret == 0) {
		rs->rs_rbm = rbm;
		rs->rs_free = extlen;
		rs->rs_inum = ip->i_no_addr;
		rs_insert(ip);
B
Bob Peterson 已提交
1427
	}
B
Bob Peterson 已提交
1428 1429
}

1430 1431 1432 1433
/**
 * gfs2_next_unreserved_block - Return next block that is not reserved
 * @rgd: The resource group
 * @block: The starting block
1434
 * @length: The required length
1435 1436 1437 1438 1439 1440 1441 1442 1443
 * @ip: Ignore any reservations for this inode
 *
 * If the block does not appear in any reservation, then return the
 * block number unchanged. If it does appear in the reservation, then
 * keep looking through the tree of reservations in order to find the
 * first block number which is not reserved.
 */

static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1444
				      u32 length,
1445 1446 1447 1448 1449 1450 1451
				      const struct gfs2_inode *ip)
{
	struct gfs2_blkreserv *rs;
	struct rb_node *n;
	int rc;

	spin_lock(&rgd->rd_rsspin);
1452
	n = rgd->rd_rstree.rb_node;
1453 1454
	while (n) {
		rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1455
		rc = rs_cmp(block, length, rs);
1456 1457 1458 1459 1460 1461 1462 1463 1464
		if (rc < 0)
			n = n->rb_left;
		else if (rc > 0)
			n = n->rb_right;
		else
			break;
	}

	if (n) {
1465
		while ((rs_cmp(block, length, rs) == 0) && (ip->i_res != rs)) {
1466
			block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
1467
			n = n->rb_right;
1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
			if (n == NULL)
				break;
			rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
		}
	}

	spin_unlock(&rgd->rd_rsspin);
	return block;
}

/**
 * gfs2_reservation_check_and_update - Check for reservations during block alloc
 * @rbm: The current position in the resource group
1481 1482
 * @ip: The inode for which we are searching for blocks
 * @minext: The minimum extent length
1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
 *
 * This checks the current position in the rgrp to see whether there is
 * a reservation covering this block. If not then this function is a
 * no-op. If there is, then the position is moved to the end of the
 * contiguous reservation(s) so that we are pointing at the first
 * non-reserved block.
 *
 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
 */

static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1494 1495
					     const struct gfs2_inode *ip,
					     u32 minext)
1496 1497
{
	u64 block = gfs2_rbm_to_block(rbm);
1498
	u32 extlen = 1;
1499 1500 1501
	u64 nblock;
	int ret;

1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517
	/*
	 * If we have a minimum extent length, then skip over any extent
	 * which is less than the min extent length in size.
	 */
	if (minext) {
		extlen = gfs2_free_extlen(rbm, minext);
		nblock = block + extlen;
		if (extlen < minext)
			goto fail;
	}

	/*
	 * Check the extent which has been found against the reservations
	 * and skip if parts of it are already reserved
	 */
	nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
1518 1519
	if (nblock == block)
		return 0;
1520
fail:
1521 1522 1523 1524 1525 1526 1527 1528 1529 1530
	ret = gfs2_rbm_from_block(rbm, nblock);
	if (ret < 0)
		return ret;
	return 1;
}

/**
 * gfs2_rbm_find - Look for blocks of a particular state
 * @rbm: Value/result starting position and final position
 * @state: The state which we want to find
1531
 * @minext: The requested extent length (0 for a single block)
1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
 * @ip: If set, check for reservations
 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
 *          around until we've reached the starting point.
 *
 * Side effects:
 * - If looking for free blocks, we set GBF_FULL on each bitmap which
 *   has no free blocks in it.
 *
 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
 */

1543
static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581
			 const struct gfs2_inode *ip, bool nowrap)
{
	struct buffer_head *bh;
	struct gfs2_bitmap *initial_bi;
	u32 initial_offset;
	u32 offset;
	u8 *buffer;
	int index;
	int n = 0;
	int iters = rbm->rgd->rd_length;
	int ret;

	/* If we are not starting at the beginning of a bitmap, then we
	 * need to add one to the bitmap count to ensure that we search
	 * the starting bitmap twice.
	 */
	if (rbm->offset != 0)
		iters++;

	while(1) {
		if (test_bit(GBF_FULL, &rbm->bi->bi_flags) &&
		    (state == GFS2_BLKST_FREE))
			goto next_bitmap;

		bh = rbm->bi->bi_bh;
		buffer = bh->b_data + rbm->bi->bi_offset;
		WARN_ON(!buffer_uptodate(bh));
		if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone)
			buffer = rbm->bi->bi_clone + rbm->bi->bi_offset;
		initial_offset = rbm->offset;
		offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state);
		if (offset == BFITNOENT)
			goto bitmap_full;
		rbm->offset = offset;
		if (ip == NULL)
			return 0;

		initial_bi = rbm->bi;
1582
		ret = gfs2_reservation_check_and_update(rbm, ip, minext);
1583 1584 1585 1586
		if (ret == 0)
			return 0;
		if (ret > 0) {
			n += (rbm->bi - initial_bi);
B
Bob Peterson 已提交
1587
			goto next_iter;
1588
		}
1589 1590 1591 1592 1593 1594
		if (ret == -E2BIG) {
			index = 0;
			rbm->offset = 0;
			n += (rbm->bi - initial_bi);
			goto res_covered_end_of_rgrp;
		}
1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606
		return ret;

bitmap_full:	/* Mark bitmap as full and fall through */
		if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
			set_bit(GBF_FULL, &rbm->bi->bi_flags);

next_bitmap:	/* Find next bitmap in the rgrp */
		rbm->offset = 0;
		index = rbm->bi - rbm->rgd->rd_bits;
		index++;
		if (index == rbm->rgd->rd_length)
			index = 0;
1607
res_covered_end_of_rgrp:
1608 1609 1610 1611
		rbm->bi = &rbm->rgd->rd_bits[index];
		if ((index == 0) && nowrap)
			break;
		n++;
B
Bob Peterson 已提交
1612
next_iter:
1613 1614 1615 1616 1617 1618 1619
		if (n >= iters)
			break;
	}

	return -ENOSPC;
}

1620 1621 1622
/**
 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
 * @rgd: The rgrp
1623 1624
 * @last_unlinked: block address of the last dinode we unlinked
 * @skip: block address we should explicitly not unlink
1625
 *
B
Bob Peterson 已提交
1626 1627
 * Returns: 0 if no error
 *          The inode, if one has been found, in inode.
1628 1629
 */

1630
static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
1631
{
1632
	u64 block;
1633
	struct gfs2_sbd *sdp = rgd->rd_sbd;
1634 1635 1636 1637
	struct gfs2_glock *gl;
	struct gfs2_inode *ip;
	int error;
	int found = 0;
1638
	struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 };
1639

1640
	while (1) {
1641
		down_write(&sdp->sd_log_flush_lock);
1642
		error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, 0, NULL, true);
1643
		up_write(&sdp->sd_log_flush_lock);
1644 1645 1646
		if (error == -ENOSPC)
			break;
		if (WARN_ON_ONCE(error))
1647
			break;
B
Bob Peterson 已提交
1648

1649 1650 1651 1652
		block = gfs2_rbm_to_block(&rbm);
		if (gfs2_rbm_from_block(&rbm, block + 1))
			break;
		if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
1653
			continue;
1654
		if (block == skip)
1655
			continue;
1656
		*last_unlinked = block;
1657

1658
		error = gfs2_glock_get(sdp, block, &gfs2_inode_glops, CREATE, &gl);
1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676
		if (error)
			continue;

		/* If the inode is already in cache, we can ignore it here
		 * because the existing inode disposal code will deal with
		 * it when all refs have gone away. Accessing gl_object like
		 * this is not safe in general. Here it is ok because we do
		 * not dereference the pointer, and we only need an approx
		 * answer to whether it is NULL or not.
		 */
		ip = gl->gl_object;

		if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
			gfs2_glock_put(gl);
		else
			found++;

		/* Limit reclaim to sensible number of tasks */
1677
		if (found > NR_CPUS)
1678
			return;
1679 1680 1681
	}

	rgd->rd_flags &= ~GFS2_RDF_CHECK;
1682
	return;
1683 1684
}

1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766
/**
 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
 * @rgd: The rgrp in question
 * @loops: An indication of how picky we can be (0=very, 1=less so)
 *
 * This function uses the recently added glock statistics in order to
 * figure out whether a parciular resource group is suffering from
 * contention from multiple nodes. This is done purely on the basis
 * of timings, since this is the only data we have to work with and
 * our aim here is to reject a resource group which is highly contended
 * but (very important) not to do this too often in order to ensure that
 * we do not land up introducing fragmentation by changing resource
 * groups when not actually required.
 *
 * The calculation is fairly simple, we want to know whether the SRTTB
 * (i.e. smoothed round trip time for blocking operations) to acquire
 * the lock for this rgrp's glock is significantly greater than the
 * time taken for resource groups on average. We introduce a margin in
 * the form of the variable @var which is computed as the sum of the two
 * respective variences, and multiplied by a factor depending on @loops
 * and whether we have a lot of data to base the decision on. This is
 * then tested against the square difference of the means in order to
 * decide whether the result is statistically significant or not.
 *
 * Returns: A boolean verdict on the congestion status
 */

static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
{
	const struct gfs2_glock *gl = rgd->rd_gl;
	const struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_lkstats *st;
	s64 r_dcount, l_dcount;
	s64 r_srttb, l_srttb;
	s64 srttb_diff;
	s64 sqr_diff;
	s64 var;

	preempt_disable();
	st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
	r_srttb = st->stats[GFS2_LKS_SRTTB];
	r_dcount = st->stats[GFS2_LKS_DCOUNT];
	var = st->stats[GFS2_LKS_SRTTVARB] +
	      gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
	preempt_enable();

	l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
	l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];

	if ((l_dcount < 1) || (r_dcount < 1) || (r_srttb == 0))
		return false;

	srttb_diff = r_srttb - l_srttb;
	sqr_diff = srttb_diff * srttb_diff;

	var *= 2;
	if (l_dcount < 8 || r_dcount < 8)
		var *= 2;
	if (loops == 1)
		var *= 2;

	return ((srttb_diff < 0) && (sqr_diff > var));
}

/**
 * gfs2_rgrp_used_recently
 * @rs: The block reservation with the rgrp to test
 * @msecs: The time limit in milliseconds
 *
 * Returns: True if the rgrp glock has been used within the time limit
 */
static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
				    u64 msecs)
{
	u64 tdiff;

	tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
                            rs->rs_rbm.rgd->rd_gl->gl_dstamp));

	return tdiff > (msecs * 1000 * 1000);
}

S
Steven Whitehouse 已提交
1767 1768 1769 1770 1771 1772 1773 1774 1775
static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
{
	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
	u32 skip;

	get_random_bytes(&skip, sizeof(skip));
	return skip % sdp->sd_rgrps;
}

1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788
static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
{
	struct gfs2_rgrpd *rgd = *pos;

	rgd = gfs2_rgrpd_get_next(rgd);
	if (rgd == NULL)
		rgd = gfs2_rgrpd_get_next(NULL);
	*pos = rgd;
	if (rgd != begin) /* If we didn't wrap */
		return true;
	return false;
}

D
David Teigland 已提交
1789
/**
1790
 * gfs2_inplace_reserve - Reserve space in the filesystem
D
David Teigland 已提交
1791
 * @ip: the inode to reserve space for
1792
 * @requested: the number of blocks to be reserved
D
David Teigland 已提交
1793 1794 1795 1796
 *
 * Returns: errno
 */

S
Steven Whitehouse 已提交
1797
int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
D
David Teigland 已提交
1798
{
1799
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
B
Bob Peterson 已提交
1800
	struct gfs2_rgrpd *begin = NULL;
1801
	struct gfs2_blkreserv *rs = ip->i_res;
1802
	int error = 0, rg_locked, flags = 0;
1803
	u64 last_unlinked = NO_BLOCK;
1804
	int loops = 0;
S
Steven Whitehouse 已提交
1805
	u32 skip = 0;
D
David Teigland 已提交
1806

1807 1808
	if (sdp->sd_args.ar_rgrplvb)
		flags |= GL_SKIP;
1809 1810
	if (gfs2_assert_warn(sdp, requested))
		return -EINVAL;
B
Bob Peterson 已提交
1811
	if (gfs2_rs_active(rs)) {
1812
		begin = rs->rs_rbm.rgd;
B
Bob Peterson 已提交
1813 1814
		flags = 0; /* Yoda: Do or do not. There is no try */
	} else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
1815
		rs->rs_rbm.rgd = begin = ip->i_rgd;
B
Bob Peterson 已提交
1816
	} else {
1817
		rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
B
Bob Peterson 已提交
1818
	}
S
Steven Whitehouse 已提交
1819 1820
	if (S_ISDIR(ip->i_inode.i_mode) && (aflags & GFS2_AF_ORLOV))
		skip = gfs2_orlov_skip(ip);
1821
	if (rs->rs_rbm.rgd == NULL)
1822 1823 1824
		return -EBADSLT;

	while (loops < 3) {
1825 1826 1827 1828
		rg_locked = 1;

		if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
			rg_locked = 0;
S
Steven Whitehouse 已提交
1829 1830
			if (skip && skip--)
				goto next_rgrp;
1831 1832 1833 1834
			if (!gfs2_rs_active(rs) && (loops < 2) &&
			     gfs2_rgrp_used_recently(rs, 1000) &&
			     gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
				goto next_rgrp;
1835
			error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
B
Bob Peterson 已提交
1836 1837
						   LM_ST_EXCLUSIVE, flags,
						   &rs->rs_rgd_gh);
1838 1839
			if (unlikely(error))
				return error;
1840 1841 1842
			if (!gfs2_rs_active(rs) && (loops < 2) &&
			    gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
				goto skip_rgrp;
1843
			if (sdp->sd_args.ar_rgrplvb) {
1844
				error = update_rgrp_lvb(rs->rs_rbm.rgd);
1845
				if (unlikely(error)) {
1846 1847 1848 1849
					gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
					return error;
				}
			}
1850
		}
1851

1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870
		/* Skip unuseable resource groups */
		if (rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
			goto skip_rgrp;

		if (sdp->sd_args.ar_rgrplvb)
			gfs2_rgrp_bh_get(rs->rs_rbm.rgd);

		/* Get a reservation if we don't already have one */
		if (!gfs2_rs_active(rs))
			rg_mblk_search(rs->rs_rbm.rgd, ip, requested);

		/* Skip rgrps when we can't get a reservation on first pass */
		if (!gfs2_rs_active(rs) && (loops < 1))
			goto check_rgrp;

		/* If rgrp has enough free space, use it */
		if (rs->rs_rbm.rgd->rd_free_clone >= requested) {
			ip->i_rgd = rs->rs_rbm.rgd;
			return 0;
D
David Teigland 已提交
1871
		}
1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888

		/* Drop reservation, if we couldn't use reserved rgrp */
		if (gfs2_rs_active(rs))
			gfs2_rs_deltree(ip, rs);
check_rgrp:
		/* Check for unlinked inodes which can be reclaimed */
		if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
			try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
					ip->i_no_addr);
skip_rgrp:
		/* Unlock rgrp if required */
		if (!rg_locked)
			gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
next_rgrp:
		/* Find the next rgrp, and continue looking */
		if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
			continue;
S
Steven Whitehouse 已提交
1889 1890
		if (skip)
			continue;
1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905

		/* If we've scanned all the rgrps, but found no free blocks
		 * then this checks for some less likely conditions before
		 * trying again.
		 */
		loops++;
		/* Check that fs hasn't grown if writing to rindex */
		if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
			error = gfs2_ri_update(ip);
			if (error)
				return error;
		}
		/* Flushing the log may release space */
		if (loops == 2)
			gfs2_log_flush(sdp, NULL);
D
David Teigland 已提交
1906 1907
	}

1908
	return -ENOSPC;
D
David Teigland 已提交
1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919
}

/**
 * gfs2_inplace_release - release an inplace reservation
 * @ip: the inode the reservation was taken out on
 *
 * Release a reservation made by gfs2_inplace_reserve().
 */

void gfs2_inplace_release(struct gfs2_inode *ip)
{
1920
	struct gfs2_blkreserv *rs = ip->i_res;
D
David Teigland 已提交
1921

1922 1923
	if (rs->rs_rgd_gh.gh_gl)
		gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
D
David Teigland 已提交
1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
}

/**
 * gfs2_get_block_type - Check a block in a RG is of given type
 * @rgd: the resource group holding the block
 * @block: the block number
 *
 * Returns: The block type (GFS2_BLKST_*)
 */

1934
static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
D
David Teigland 已提交
1935
{
1936 1937
	struct gfs2_rbm rbm = { .rgd = rgd, };
	int ret;
D
David Teigland 已提交
1938

1939 1940
	ret = gfs2_rbm_from_block(&rbm, block);
	WARN_ON_ONCE(ret != 0);
D
David Teigland 已提交
1941

1942
	return gfs2_testbit(&rbm);
D
David Teigland 已提交
1943 1944
}

1945

B
Bob Peterson 已提交
1946 1947
/**
 * gfs2_alloc_extent - allocate an extent from a given bitmap
1948
 * @rbm: the resource group information
B
Bob Peterson 已提交
1949
 * @dinode: TRUE if the first block we allocate is for a dinode
1950
 * @n: The extent length (value/result)
B
Bob Peterson 已提交
1951
 *
1952
 * Add the bitmap buffer to the transaction.
B
Bob Peterson 已提交
1953 1954
 * Set the found bits to @new_state to change block's allocation state.
 */
1955
static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
1956
			     unsigned int *n)
B
Bob Peterson 已提交
1957
{
1958
	struct gfs2_rbm pos = { .rgd = rbm->rgd, };
B
Bob Peterson 已提交
1959
	const unsigned int elen = *n;
1960 1961
	u64 block;
	int ret;
B
Bob Peterson 已提交
1962

1963 1964 1965
	*n = 1;
	block = gfs2_rbm_to_block(rbm);
	gfs2_trans_add_bh(rbm->rgd->rd_gl, rbm->bi->bi_bh, 1);
1966
	gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
1967
	block++;
1968
	while (*n < elen) {
1969
		ret = gfs2_rbm_from_block(&pos, block);
1970
		if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
1971
			break;
1972
		gfs2_trans_add_bh(pos.rgd->rd_gl, pos.bi->bi_bh, 1);
1973
		gfs2_setbit(&pos, true, GFS2_BLKST_USED);
1974
		(*n)++;
1975
		block++;
1976
	}
D
David Teigland 已提交
1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988
}

/**
 * rgblk_free - Change alloc state of given block(s)
 * @sdp: the filesystem
 * @bstart: the start of a run of blocks to free
 * @blen: the length of the block run (all must lie within ONE RG!)
 * @new_state: GFS2_BLKST_XXX the after-allocation block state
 *
 * Returns:  Resource group containing the block(s)
 */

1989 1990
static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
				     u32 blen, unsigned char new_state)
D
David Teigland 已提交
1991
{
1992
	struct gfs2_rbm rbm;
D
David Teigland 已提交
1993

1994 1995
	rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
	if (!rbm.rgd) {
D
David Teigland 已提交
1996
		if (gfs2_consist(sdp))
1997
			fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
D
David Teigland 已提交
1998 1999 2000 2001
		return NULL;
	}

	while (blen--) {
2002 2003 2004 2005 2006 2007 2008 2009
		gfs2_rbm_from_block(&rbm, bstart);
		bstart++;
		if (!rbm.bi->bi_clone) {
			rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size,
						   GFP_NOFS | __GFP_NOFAIL);
			memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset,
			       rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
			       rbm.bi->bi_len);
D
David Teigland 已提交
2010
		}
2011
		gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.bi->bi_bh, 1);
2012
		gfs2_setbit(&rbm, false, new_state);
D
David Teigland 已提交
2013 2014
	}

2015
	return rbm.rgd;
D
David Teigland 已提交
2016 2017 2018
}

/**
2019 2020 2021 2022 2023 2024 2025 2026
 * gfs2_rgrp_dump - print out an rgrp
 * @seq: The iterator
 * @gl: The glock in question
 *
 */

int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
{
B
Bob Peterson 已提交
2027 2028 2029 2030
	struct gfs2_rgrpd *rgd = gl->gl_object;
	struct gfs2_blkreserv *trs;
	const struct rb_node *n;

2031 2032
	if (rgd == NULL)
		return 0;
B
Bob Peterson 已提交
2033
	gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u\n",
2034
		       (unsigned long long)rgd->rd_addr, rgd->rd_flags,
B
Bob Peterson 已提交
2035 2036 2037 2038 2039 2040 2041 2042
		       rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
		       rgd->rd_reserved);
	spin_lock(&rgd->rd_rsspin);
	for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
		trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
		dump_rs(seq, trs);
	}
	spin_unlock(&rgd->rd_rsspin);
2043 2044 2045
	return 0;
}

2046 2047 2048 2049
static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
{
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
S
Steven Whitehouse 已提交
2050
		(unsigned long long)rgd->rd_addr);
2051 2052 2053 2054 2055
	fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
	gfs2_rgrp_dump(NULL, rgd->rd_gl);
	rgd->rd_flags |= GFS2_RDF_ERROR;
}

B
Bob Peterson 已提交
2056
/**
2057 2058 2059 2060
 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
 * @ip: The inode we have just allocated blocks for
 * @rbm: The start of the allocated blocks
 * @len: The extent length
B
Bob Peterson 已提交
2061
 *
2062 2063 2064
 * Adjusts a reservation after an allocation has taken place. If the
 * reservation does not match the allocation, or if it is now empty
 * then it is removed.
B
Bob Peterson 已提交
2065
 */
2066 2067 2068

static void gfs2_adjust_reservation(struct gfs2_inode *ip,
				    const struct gfs2_rbm *rbm, unsigned len)
B
Bob Peterson 已提交
2069 2070
{
	struct gfs2_blkreserv *rs = ip->i_res;
2071 2072 2073 2074
	struct gfs2_rgrpd *rgd = rbm->rgd;
	unsigned rlen;
	u64 block;
	int ret;
B
Bob Peterson 已提交
2075

2076 2077 2078 2079 2080 2081 2082 2083
	spin_lock(&rgd->rd_rsspin);
	if (gfs2_rs_active(rs)) {
		if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
			block = gfs2_rbm_to_block(rbm);
			ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
			rlen = min(rs->rs_free, len);
			rs->rs_free -= rlen;
			rgd->rd_reserved -= rlen;
2084
			trace_gfs2_rs(rs, TRACE_RS_CLAIM);
2085 2086 2087 2088
			if (rs->rs_free && !ret)
				goto out;
		}
		__rs_deltree(ip, rs);
B
Bob Peterson 已提交
2089
	}
2090 2091
out:
	spin_unlock(&rgd->rd_rsspin);
B
Bob Peterson 已提交
2092 2093
}

2094
/**
2095
 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
2096
 * @ip: the inode to allocate the block for
2097
 * @bn: Used to return the starting block number
B
Bob Peterson 已提交
2098
 * @nblocks: requested number of blocks/extent length (value/result)
2099
 * @dinode: 1 if we're allocating a dinode block, else 0
2100
 * @generation: the generation number of the inode
D
David Teigland 已提交
2101
 *
2102
 * Returns: 0 or error
D
David Teigland 已提交
2103 2104
 */

2105
int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
2106
		      bool dinode, u64 *generation)
D
David Teigland 已提交
2107
{
2108
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2109
	struct buffer_head *dibh;
2110
	struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
2111
	unsigned int ndata;
2112
	u64 goal;
2113
	u64 block; /* block, within the file system scope */
2114
	int error;
D
David Teigland 已提交
2115

2116 2117 2118 2119
	if (gfs2_rs_active(ip->i_res))
		goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm);
	else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
		goal = ip->i_goal;
2120
	else
2121
		goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
2122

2123
	gfs2_rbm_from_block(&rbm, goal);
2124
	error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false);
2125

2126 2127
	if (error == -ENOSPC) {
		gfs2_rbm_from_block(&rbm, goal);
2128
		error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false);
2129 2130
	}

2131
	/* Since all blocks are reserved in advance, this shouldn't happen */
2132
	if (error) {
2133 2134
		fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d\n",
			(unsigned long long)ip->i_no_addr, error, *nblocks,
2135
			test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags));
2136
		goto rgrp_error;
B
Bob Peterson 已提交
2137
	}
2138

2139 2140
	gfs2_alloc_extent(&rbm, dinode, nblocks);
	block = gfs2_rbm_to_block(&rbm);
2141
	rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
2142 2143
	if (gfs2_rs_active(ip->i_res))
		gfs2_adjust_reservation(ip, &rbm, *nblocks);
2144 2145 2146
	ndata = *nblocks;
	if (dinode)
		ndata--;
B
Bob Peterson 已提交
2147

2148
	if (!dinode) {
2149
		ip->i_goal = block + ndata - 1;
2150 2151 2152 2153 2154 2155 2156 2157 2158
		error = gfs2_meta_inode_buffer(ip, &dibh);
		if (error == 0) {
			struct gfs2_dinode *di =
				(struct gfs2_dinode *)dibh->b_data;
			gfs2_trans_add_bh(ip->i_gl, dibh, 1);
			di->di_goal_meta = di->di_goal_data =
				cpu_to_be64(ip->i_goal);
			brelse(dibh);
		}
2159
	}
2160
	if (rbm.rgd->rd_free < *nblocks) {
B
Bob Peterson 已提交
2161
		printk(KERN_WARNING "nblocks=%u\n", *nblocks);
2162
		goto rgrp_error;
B
Bob Peterson 已提交
2163
	}
2164

2165
	rbm.rgd->rd_free -= *nblocks;
2166
	if (dinode) {
2167 2168
		rbm.rgd->rd_dinodes++;
		*generation = rbm.rgd->rd_igeneration++;
2169
		if (*generation == 0)
2170
			*generation = rbm.rgd->rd_igeneration++;
2171
	}
D
David Teigland 已提交
2172

2173 2174 2175
	gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh, 1);
	gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
	gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
D
David Teigland 已提交
2176

2177
	gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
2178 2179
	if (dinode)
		gfs2_trans_add_unrevoke(sdp, block, 1);
2180 2181 2182 2183 2184 2185 2186

	/*
	 * This needs reviewing to see why we cannot do the quota change
	 * at this point in the dinode case.
	 */
	if (ndata)
		gfs2_quota_change(ip, ndata, ip->i_inode.i_uid,
2187
				  ip->i_inode.i_gid);
D
David Teigland 已提交
2188

2189 2190
	rbm.rgd->rd_free_clone -= *nblocks;
	trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
2191
			       dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2192 2193 2194 2195
	*bn = block;
	return 0;

rgrp_error:
2196
	gfs2_rgrp_error(rbm.rgd);
2197
	return -EIO;
D
David Teigland 已提交
2198 2199 2200
}

/**
2201
 * __gfs2_free_blocks - free a contiguous run of block(s)
D
David Teigland 已提交
2202 2203 2204
 * @ip: the inode these blocks are being freed from
 * @bstart: first block of a run of contiguous blocks
 * @blen: the length of the block run
2205
 * @meta: 1 if the blocks represent metadata
D
David Teigland 已提交
2206 2207 2208
 *
 */

2209
void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
D
David Teigland 已提交
2210
{
2211
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
D
David Teigland 已提交
2212 2213 2214 2215 2216
	struct gfs2_rgrpd *rgd;

	rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
	if (!rgd)
		return;
2217
	trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
2218
	rgd->rd_free += blen;
S
Steven Whitehouse 已提交
2219
	rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
2220
	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
2221
	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2222
	gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
D
David Teigland 已提交
2223

2224
	/* Directories keep their data in the metadata address space */
2225
	if (meta || ip->i_depth)
2226
		gfs2_meta_wipe(ip, bstart, blen);
2227
}
D
David Teigland 已提交
2228

2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240
/**
 * gfs2_free_meta - free a contiguous run of data block(s)
 * @ip: the inode these blocks are being freed from
 * @bstart: first block of a run of contiguous blocks
 * @blen: the length of the block run
 *
 */

void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
{
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);

2241
	__gfs2_free_blocks(ip, bstart, blen, 1);
D
David Teigland 已提交
2242
	gfs2_statfs_change(sdp, 0, +blen, 0);
2243
	gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
D
David Teigland 已提交
2244 2245
}

2246 2247 2248 2249 2250
void gfs2_unlink_di(struct inode *inode)
{
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct gfs2_rgrpd *rgd;
2251
	u64 blkno = ip->i_no_addr;
2252 2253 2254 2255

	rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
	if (!rgd)
		return;
2256
	trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2257
	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
2258
	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2259 2260
	gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
	update_rgrp_lvb_unlinked(rgd, 1);
2261 2262
}

2263
static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
D
David Teigland 已提交
2264 2265 2266 2267 2268 2269 2270 2271 2272
{
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	struct gfs2_rgrpd *tmp_rgd;

	tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
	if (!tmp_rgd)
		return;
	gfs2_assert_withdraw(sdp, rgd == tmp_rgd);

2273
	if (!rgd->rd_dinodes)
D
David Teigland 已提交
2274
		gfs2_consist_rgrpd(rgd);
2275
	rgd->rd_dinodes--;
2276
	rgd->rd_free++;
D
David Teigland 已提交
2277

2278
	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
2279
	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2280 2281
	gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
	update_rgrp_lvb_unlinked(rgd, -1);
D
David Teigland 已提交
2282 2283 2284 2285 2286 2287 2288

	gfs2_statfs_change(sdp, 0, +1, -1);
}


void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
{
2289
	gfs2_free_uninit_di(rgd, ip->i_no_addr);
2290
	trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2291
	gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
2292
	gfs2_meta_wipe(ip, ip->i_no_addr, 1);
D
David Teigland 已提交
2293 2294
}

2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308
/**
 * gfs2_check_blk_type - Check the type of a block
 * @sdp: The superblock
 * @no_addr: The block number to check
 * @type: The block type we are looking for
 *
 * Returns: 0 if the block type matches the expected type
 *          -ESTALE if it doesn't match
 *          or -ve errno if something went wrong while checking
 */

int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
{
	struct gfs2_rgrpd *rgd;
2309
	struct gfs2_holder rgd_gh;
2310
	int error = -EINVAL;
2311

S
Steven Whitehouse 已提交
2312
	rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
2313
	if (!rgd)
2314
		goto fail;
2315 2316 2317

	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
	if (error)
2318
		goto fail;
2319 2320 2321 2322 2323 2324 2325 2326 2327

	if (gfs2_get_block_type(rgd, no_addr) != type)
		error = -ESTALE;

	gfs2_glock_dq_uninit(&rgd_gh);
fail:
	return error;
}

D
David Teigland 已提交
2328 2329
/**
 * gfs2_rlist_add - add a RG to a list of RGs
2330
 * @ip: the inode
D
David Teigland 已提交
2331 2332 2333 2334 2335 2336 2337 2338 2339
 * @rlist: the list of resource groups
 * @block: the block
 *
 * Figure out what RG a block belongs to and add that RG to the list
 *
 * FIXME: Don't use NOFAIL
 *
 */

2340
void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
2341
		    u64 block)
D
David Teigland 已提交
2342
{
2343
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
D
David Teigland 已提交
2344 2345 2346 2347 2348 2349 2350 2351
	struct gfs2_rgrpd *rgd;
	struct gfs2_rgrpd **tmp;
	unsigned int new_space;
	unsigned int x;

	if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
		return;

2352 2353 2354
	if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
		rgd = ip->i_rgd;
	else
S
Steven Whitehouse 已提交
2355
		rgd = gfs2_blk2rgrpd(sdp, block, 1);
D
David Teigland 已提交
2356
	if (!rgd) {
2357
		fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
D
David Teigland 已提交
2358 2359
		return;
	}
2360
	ip->i_rgd = rgd;
D
David Teigland 已提交
2361 2362 2363 2364 2365 2366 2367 2368 2369

	for (x = 0; x < rlist->rl_rgrps; x++)
		if (rlist->rl_rgd[x] == rgd)
			return;

	if (rlist->rl_rgrps == rlist->rl_space) {
		new_space = rlist->rl_space + 10;

		tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
2370
			      GFP_NOFS | __GFP_NOFAIL);
D
David Teigland 已提交
2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394

		if (rlist->rl_rgd) {
			memcpy(tmp, rlist->rl_rgd,
			       rlist->rl_space * sizeof(struct gfs2_rgrpd *));
			kfree(rlist->rl_rgd);
		}

		rlist->rl_space = new_space;
		rlist->rl_rgd = tmp;
	}

	rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
}

/**
 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
 *      and initialize an array of glock holders for them
 * @rlist: the list of resource groups
 * @state: the lock state to acquire the RG lock in
 *
 * FIXME: Don't use NOFAIL
 *
 */

2395
void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
D
David Teigland 已提交
2396 2397 2398 2399
{
	unsigned int x;

	rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
2400
				GFP_NOFS | __GFP_NOFAIL);
D
David Teigland 已提交
2401 2402
	for (x = 0; x < rlist->rl_rgrps; x++)
		gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
2403
				state, 0,
D
David Teigland 已提交
2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422
				&rlist->rl_ghs[x]);
}

/**
 * gfs2_rlist_free - free a resource group list
 * @list: the list of resource groups
 *
 */

void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
{
	unsigned int x;

	kfree(rlist->rl_rgd);

	if (rlist->rl_ghs) {
		for (x = 0; x < rlist->rl_rgrps; x++)
			gfs2_holder_uninit(&rlist->rl_ghs[x]);
		kfree(rlist->rl_ghs);
B
Bob Peterson 已提交
2423
		rlist->rl_ghs = NULL;
D
David Teigland 已提交
2424 2425 2426
	}
}