rgrp.c 59.0 KB
Newer Older
D
David Teigland 已提交
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
D
David Teigland 已提交
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
D
David Teigland 已提交
8 9 10 11 12 13
 */

#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
14
#include <linux/fs.h>
15
#include <linux/gfs2_ondisk.h>
16
#include <linux/prefetch.h>
17
#include <linux/blkdev.h>
18
#include <linux/rbtree.h>
D
David Teigland 已提交
19 20

#include "gfs2.h"
21
#include "incore.h"
D
David Teigland 已提交
22 23 24 25 26 27 28 29
#include "glock.h"
#include "glops.h"
#include "lops.h"
#include "meta_io.h"
#include "quota.h"
#include "rgrp.h"
#include "super.h"
#include "trans.h"
30
#include "util.h"
31
#include "log.h"
32
#include "inode.h"
S
Steven Whitehouse 已提交
33
#include "trace_gfs2.h"
D
David Teigland 已提交
34

S
Steven Whitehouse 已提交
35
#define BFITNOENT ((u32)~0)
36
#define NO_BLOCK ((u64)~0)
37

38 39 40 41 42 43 44 45 46 47
#if BITS_PER_LONG == 32
#define LBITMASK   (0x55555555UL)
#define LBITSKIP55 (0x55555555UL)
#define LBITSKIP00 (0x00000000UL)
#else
#define LBITMASK   (0x5555555555555555UL)
#define LBITSKIP55 (0x5555555555555555UL)
#define LBITSKIP00 (0x0000000000000000UL)
#endif

48 49 50
/*
 * These routines are used by the resource group routines (rgrp.c)
 * to keep track of block allocation.  Each block is represented by two
51 52 53 54 55 56
 * bits.  So, each byte represents GFS2_NBBY (i.e. 4) blocks.
 *
 * 0 = Free
 * 1 = Used (not metadata)
 * 2 = Unlinked (still in use) inode
 * 3 = Used (metadata)
57 58 59 60
 */

static const char valid_change[16] = {
	        /* current */
61
	/* n */ 0, 1, 1, 1,
62
	/* e */ 1, 0, 0, 0,
63
	/* w */ 0, 0, 0, 1,
64 65 66
	        1, 0, 0, 0
};

67 68 69 70
static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
                         const struct gfs2_inode *ip, bool nowrap);


71 72
/**
 * gfs2_setbit - Set a bit in the bitmaps
73 74
 * @rbm: The position of the bit to set
 * @do_clone: Also set the clone bitmap, if it exists
75 76 77 78
 * @new_state: the new state of the block
 *
 */

79
static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
80
			       unsigned char new_state)
81
{
82
	unsigned char *byte1, *byte2, *end, cur_state;
83 84
	unsigned int buflen = rbm->bi->bi_len;
	const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
85

86 87
	byte1 = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
	end = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + buflen;
88

89
	BUG_ON(byte1 >= end);
90

91
	cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
92

93
	if (unlikely(!valid_change[new_state * 4 + cur_state])) {
94 95 96 97 98 99 100
		printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, "
		       "new_state=%d\n", rbm->offset, cur_state, new_state);
		printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n",
		       (unsigned long long)rbm->rgd->rd_addr,
		       rbm->bi->bi_start);
		printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n",
		       rbm->bi->bi_offset, rbm->bi->bi_len);
101
		dump_stack();
102
		gfs2_consist_rgrpd(rbm->rgd);
103 104 105 106
		return;
	}
	*byte1 ^= (cur_state ^ new_state) << bit;

107 108
	if (do_clone && rbm->bi->bi_clone) {
		byte2 = rbm->bi->bi_clone + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
109 110 111
		cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
		*byte2 ^= (cur_state ^ new_state) << bit;
	}
112 113 114 115
}

/**
 * gfs2_testbit - test a bit in the bitmaps
116
 * @rbm: The bit to test
117
 *
118
 * Returns: The two bit block state of the requested bit
119 120
 */

121
static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
122
{
123 124
	const u8 *buffer = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset;
	const u8 *byte;
125 126
	unsigned int bit;

127 128
	byte = buffer + (rbm->offset / GFS2_NBBY);
	bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
129

130
	return (*byte >> bit) & GFS2_BIT_MASK;
131 132
}

133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
/**
 * gfs2_bit_search
 * @ptr: Pointer to bitmap data
 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
 * @state: The state we are searching for
 *
 * We xor the bitmap data with a patter which is the bitwise opposite
 * of what we are looking for, this gives rise to a pattern of ones
 * wherever there is a match. Since we have two bits per entry, we
 * take this pattern, shift it down by one place and then and it with
 * the original. All the even bit positions (0,2,4, etc) then represent
 * successful matches, so we mask with 0x55555..... to remove the unwanted
 * odd bit positions.
 *
 * This allows searching of a whole u64 at once (32 blocks) with a
 * single test (on 64 bit arches).
 */

static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
{
	u64 tmp;
	static const u64 search[] = {
155 156 157 158
		[0] = 0xffffffffffffffffULL,
		[1] = 0xaaaaaaaaaaaaaaaaULL,
		[2] = 0x5555555555555555ULL,
		[3] = 0x0000000000000000ULL,
159 160 161 162 163 164 165
	};
	tmp = le64_to_cpu(*ptr) ^ search[state];
	tmp &= (tmp >> 1);
	tmp &= mask;
	return tmp;
}

B
Bob Peterson 已提交
166 167 168 169 170 171 172 173 174 175 176 177
/**
 * rs_cmp - multi-block reservation range compare
 * @blk: absolute file system block number of the new reservation
 * @len: number of blocks in the new reservation
 * @rs: existing reservation to compare against
 *
 * returns: 1 if the block range is beyond the reach of the reservation
 *         -1 if the block range is before the start of the reservation
 *          0 if the block range overlaps with the reservation
 */
static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
{
178
	u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
B
Bob Peterson 已提交
179 180 181 182 183 184 185 186

	if (blk >= startblk + rs->rs_free)
		return 1;
	if (blk + len - 1 < startblk)
		return -1;
	return 0;
}

187 188 189
/**
 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
 *       a block in a given allocation state.
190
 * @buf: the buffer that holds the bitmaps
191
 * @len: the length (in bytes) of the buffer
192
 * @goal: start search at this block's bit-pair (within @buffer)
193
 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
194 195 196
 *
 * Scope of @goal and returned block number is only within this bitmap buffer,
 * not entire rgrp or filesystem.  @buffer will be offset from the actual
197 198 199 200 201
 * beginning of a bitmap block buffer, skipping any header structures, but
 * headers are always a multiple of 64 bits long so that the buffer is
 * always aligned to a 64 bit boundary.
 *
 * The size of the buffer is in bytes, but is it assumed that it is
202
 * always ok to read a complete multiple of 64 bits at the end
203
 * of the block in case the end is no aligned to a natural boundary.
204 205 206 207
 *
 * Return: the block number (bitmap buffer scope) that was found
 */

208 209
static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
		       u32 goal, u8 state)
210
{
211 212 213 214
	u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
	const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
	const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
	u64 tmp;
215
	u64 mask = 0x5555555555555555ULL;
216 217 218 219 220 221 222
	u32 bit;

	/* Mask off bits we don't care about at the start of the search */
	mask <<= spoint;
	tmp = gfs2_bit_search(ptr, mask, state);
	ptr++;
	while(tmp == 0 && ptr < end) {
223
		tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
224
		ptr++;
225
	}
226 227 228 229 230 231 232
	/* Mask off any bits which are more than len bytes from the start */
	if (ptr == end && (len & (sizeof(u64) - 1)))
		tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
	/* Didn't find anything, so return */
	if (tmp == 0)
		return BFITNOENT;
	ptr--;
233
	bit = __ffs64(tmp);
234 235
	bit /= 2;	/* two bits per entry in the bitmap */
	return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
236 237
}

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
/**
 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
 * @rbm: The rbm with rgd already set correctly
 * @block: The block number (filesystem relative)
 *
 * This sets the bi and offset members of an rbm based on a
 * resource group and a filesystem relative block number. The
 * resource group must be set in the rbm on entry, the bi and
 * offset members will be set by this function.
 *
 * Returns: 0 on success, or an error code
 */

static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
{
	u64 rblock = block - rbm->rgd->rd_data0;
	u32 goal = (u32)rblock;
	int x;

	if (WARN_ON_ONCE(rblock > UINT_MAX))
		return -EINVAL;
	if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
		return -E2BIG;

	for (x = 0; x < rbm->rgd->rd_length; x++) {
		rbm->bi = rbm->rgd->rd_bits + x;
		if (goal < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY) {
			rbm->offset = goal - (rbm->bi->bi_start * GFS2_NBBY);
			break;
		}
	}

	return 0;
}

/**
 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
 * @rbm: Position to search (value/result)
 * @n_unaligned: Number of unaligned blocks to check
 * @len: Decremented for each block found (terminate on zero)
 *
 * Returns: true if a non-free block is encountered
 */

static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
{
	u64 block;
	u32 n;
	u8 res;

	for (n = 0; n < n_unaligned; n++) {
		res = gfs2_testbit(rbm);
		if (res != GFS2_BLKST_FREE)
			return true;
		(*len)--;
		if (*len == 0)
			return true;
		block = gfs2_rbm_to_block(rbm);
		if (gfs2_rbm_from_block(rbm, block + 1))
			return true;
	}

	return false;
}

/**
 * gfs2_free_extlen - Return extent length of free blocks
 * @rbm: Starting position
 * @len: Max length to check
 *
 * Starting at the block specified by the rbm, see how many free blocks
 * there are, not reading more than len blocks ahead. This can be done
 * using memchr_inv when the blocks are byte aligned, but has to be done
 * on a block by block basis in case of unaligned blocks. Also this
 * function can cope with bitmap boundaries (although it must stop on
 * a resource group boundary)
 *
 * Returns: Number of free blocks in the extent
 */

static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
{
	struct gfs2_rbm rbm = *rrbm;
	u32 n_unaligned = rbm.offset & 3;
	u32 size = len;
	u32 bytes;
	u32 chunk_size;
	u8 *ptr, *start, *end;
	u64 block;

	if (n_unaligned &&
	    gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
		goto out;

332
	n_unaligned = len & 3;
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
	/* Start is now byte aligned */
	while (len > 3) {
		start = rbm.bi->bi_bh->b_data;
		if (rbm.bi->bi_clone)
			start = rbm.bi->bi_clone;
		end = start + rbm.bi->bi_bh->b_size;
		start += rbm.bi->bi_offset;
		BUG_ON(rbm.offset & 3);
		start += (rbm.offset / GFS2_NBBY);
		bytes = min_t(u32, len / GFS2_NBBY, (end - start));
		ptr = memchr_inv(start, 0, bytes);
		chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
		chunk_size *= GFS2_NBBY;
		BUG_ON(len < chunk_size);
		len -= chunk_size;
		block = gfs2_rbm_to_block(&rbm);
		gfs2_rbm_from_block(&rbm, block + chunk_size);
		n_unaligned = 3;
		if (ptr)
			break;
		n_unaligned = len & 3;
	}

	/* Deal with any bits left over at the end */
	if (n_unaligned)
		gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
out:
	return size - len;
}

363 364
/**
 * gfs2_bitcount - count the number of bits in a certain state
365
 * @rgd: the resource group descriptor
366 367 368 369 370 371 372
 * @buffer: the buffer that holds the bitmaps
 * @buflen: the length (in bytes) of the buffer
 * @state: the state of the block we're looking for
 *
 * Returns: The number of bits
 */

373 374
static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
			 unsigned int buflen, u8 state)
375
{
376 377 378 379 380
	const u8 *byte = buffer;
	const u8 *end = buffer + buflen;
	const u8 state1 = state << 2;
	const u8 state2 = state << 4;
	const u8 state3 = state << 6;
381
	u32 count = 0;
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396

	for (; byte < end; byte++) {
		if (((*byte) & 0x03) == state)
			count++;
		if (((*byte) & 0x0C) == state1)
			count++;
		if (((*byte) & 0x30) == state2)
			count++;
		if (((*byte) & 0xC0) == state3)
			count++;
	}

	return count;
}

D
David Teigland 已提交
397 398 399 400 401 402 403 404 405 406
/**
 * gfs2_rgrp_verify - Verify that a resource group is consistent
 * @rgd: the rgrp
 *
 */

void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
{
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	struct gfs2_bitmap *bi = NULL;
407
	u32 length = rgd->rd_length;
408
	u32 count[4], tmp;
D
David Teigland 已提交
409 410
	int buf, x;

411
	memset(count, 0, 4 * sizeof(u32));
D
David Teigland 已提交
412 413 414 415 416 417 418 419 420 421 422

	/* Count # blocks in each of 4 possible allocation states */
	for (buf = 0; buf < length; buf++) {
		bi = rgd->rd_bits + buf;
		for (x = 0; x < 4; x++)
			count[x] += gfs2_bitcount(rgd,
						  bi->bi_bh->b_data +
						  bi->bi_offset,
						  bi->bi_len, x);
	}

423
	if (count[0] != rgd->rd_free) {
D
David Teigland 已提交
424 425
		if (gfs2_consist_rgrpd(rgd))
			fs_err(sdp, "free data mismatch:  %u != %u\n",
426
			       count[0], rgd->rd_free);
D
David Teigland 已提交
427 428 429
		return;
	}

430
	tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
431
	if (count[1] != tmp) {
D
David Teigland 已提交
432 433 434 435 436 437
		if (gfs2_consist_rgrpd(rgd))
			fs_err(sdp, "used data mismatch:  %u != %u\n",
			       count[1], tmp);
		return;
	}

438
	if (count[2] + count[3] != rgd->rd_dinodes) {
D
David Teigland 已提交
439
		if (gfs2_consist_rgrpd(rgd))
440
			fs_err(sdp, "used metadata mismatch:  %u != %u\n",
441
			       count[2] + count[3], rgd->rd_dinodes);
D
David Teigland 已提交
442 443 444 445
		return;
	}
}

446
static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
D
David Teigland 已提交
447
{
448 449
	u64 first = rgd->rd_data0;
	u64 last = first + rgd->rd_data;
450
	return first <= block && block < last;
D
David Teigland 已提交
451 452 453 454 455
}

/**
 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
 * @sdp: The GFS2 superblock
456 457
 * @blk: The data block number
 * @exact: True if this needs to be an exact match
D
David Teigland 已提交
458 459 460 461
 *
 * Returns: The resource group, or NULL if not found
 */

S
Steven Whitehouse 已提交
462
struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
D
David Teigland 已提交
463
{
S
Steven Whitehouse 已提交
464
	struct rb_node *n, *next;
465
	struct gfs2_rgrpd *cur;
D
David Teigland 已提交
466 467

	spin_lock(&sdp->sd_rindex_spin);
S
Steven Whitehouse 已提交
468 469 470 471
	n = sdp->sd_rindex_tree.rb_node;
	while (n) {
		cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
		next = NULL;
472
		if (blk < cur->rd_addr)
S
Steven Whitehouse 已提交
473
			next = n->rb_left;
474
		else if (blk >= cur->rd_data0 + cur->rd_data)
S
Steven Whitehouse 已提交
475 476
			next = n->rb_right;
		if (next == NULL) {
D
David Teigland 已提交
477
			spin_unlock(&sdp->sd_rindex_spin);
S
Steven Whitehouse 已提交
478 479 480 481 482 483
			if (exact) {
				if (blk < cur->rd_addr)
					return NULL;
				if (blk >= cur->rd_data0 + cur->rd_data)
					return NULL;
			}
484
			return cur;
D
David Teigland 已提交
485
		}
S
Steven Whitehouse 已提交
486
		n = next;
D
David Teigland 已提交
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
	}
	spin_unlock(&sdp->sd_rindex_spin);

	return NULL;
}

/**
 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
 * @sdp: The GFS2 superblock
 *
 * Returns: The first rgrp in the filesystem
 */

struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
{
502 503 504
	const struct rb_node *n;
	struct gfs2_rgrpd *rgd;

505
	spin_lock(&sdp->sd_rindex_spin);
506 507
	n = rb_first(&sdp->sd_rindex_tree);
	rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
508
	spin_unlock(&sdp->sd_rindex_spin);
509 510

	return rgd;
D
David Teigland 已提交
511 512 513 514
}

/**
 * gfs2_rgrpd_get_next - get the next RG
515
 * @rgd: the resource group descriptor
D
David Teigland 已提交
516 517 518 519 520 521
 *
 * Returns: The next rgrp
 */

struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
{
522 523 524 525 526 527 528 529 530 531
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	const struct rb_node *n;

	spin_lock(&sdp->sd_rindex_spin);
	n = rb_next(&rgd->rd_node);
	if (n == NULL)
		n = rb_first(&sdp->sd_rindex_tree);

	if (unlikely(&rgd->rd_node == n)) {
		spin_unlock(&sdp->sd_rindex_spin);
D
David Teigland 已提交
532
		return NULL;
533 534 535 536
	}
	rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
	spin_unlock(&sdp->sd_rindex_spin);
	return rgd;
D
David Teigland 已提交
537 538
}

539 540 541 542 543 544 545 546 547 548 549
void gfs2_free_clones(struct gfs2_rgrpd *rgd)
{
	int x;

	for (x = 0; x < rgd->rd_length; x++) {
		struct gfs2_bitmap *bi = rgd->rd_bits + x;
		kfree(bi->bi_clone);
		bi->bi_clone = NULL;
	}
}

550 551 552 553 554 555
/**
 * gfs2_rs_alloc - make sure we have a reservation assigned to the inode
 * @ip: the inode for this reservation
 */
int gfs2_rs_alloc(struct gfs2_inode *ip)
{
B
Bob Peterson 已提交
556 557 558 559 560 561 562
	struct gfs2_blkreserv *res;

	if (ip->i_res)
		return 0;

	res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
	if (!res)
563
		return -ENOMEM;
564

565
	RB_CLEAR_NODE(&res->rs_node);
566

567
	down_write(&ip->i_rw_mutex);
B
Bob Peterson 已提交
568 569 570 571
	if (ip->i_res)
		kmem_cache_free(gfs2_rsrv_cachep, res);
	else
		ip->i_res = res;
572
	up_write(&ip->i_rw_mutex);
573
	return 0;
574 575
}

576
static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
B
Bob Peterson 已提交
577
{
578 579 580
	gfs2_print_dbg(seq, "  B: n:%llu s:%llu b:%u f:%u\n",
		       (unsigned long long)rs->rs_inum,
		       (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
581
		       rs->rs_rbm.offset, rs->rs_free);
B
Bob Peterson 已提交
582 583
}

584
/**
B
Bob Peterson 已提交
585 586 587 588
 * __rs_deltree - remove a multi-block reservation from the rgd tree
 * @rs: The reservation to remove
 *
 */
589
static void __rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
B
Bob Peterson 已提交
590 591 592 593 594 595
{
	struct gfs2_rgrpd *rgd;

	if (!gfs2_rs_active(rs))
		return;

596
	rgd = rs->rs_rbm.rgd;
597
	trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
598
	rb_erase(&rs->rs_node, &rgd->rd_rstree);
599
	RB_CLEAR_NODE(&rs->rs_node);
B
Bob Peterson 已提交
600 601 602

	if (rs->rs_free) {
		/* return reserved blocks to the rgrp and the ip */
603 604
		BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
		rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
B
Bob Peterson 已提交
605
		rs->rs_free = 0;
606
		clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags);
B
Bob Peterson 已提交
607 608 609 610 611 612 613 614 615
		smp_mb__after_clear_bit();
	}
}

/**
 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
 * @rs: The reservation to remove
 *
 */
616
void gfs2_rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
B
Bob Peterson 已提交
617 618 619
{
	struct gfs2_rgrpd *rgd;

620 621 622 623 624 625
	rgd = rs->rs_rbm.rgd;
	if (rgd) {
		spin_lock(&rgd->rd_rsspin);
		__rs_deltree(ip, rs);
		spin_unlock(&rgd->rd_rsspin);
	}
B
Bob Peterson 已提交
626 627 628 629
}

/**
 * gfs2_rs_delete - delete a multi-block reservation
630 631 632 633 634 635 636
 * @ip: The inode for this reservation
 *
 */
void gfs2_rs_delete(struct gfs2_inode *ip)
{
	down_write(&ip->i_rw_mutex);
	if (ip->i_res) {
637
		gfs2_rs_deltree(ip, ip->i_res);
B
Bob Peterson 已提交
638
		BUG_ON(ip->i_res->rs_free);
639 640 641 642 643 644
		kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
		ip->i_res = NULL;
	}
	up_write(&ip->i_rw_mutex);
}

B
Bob Peterson 已提交
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
/**
 * return_all_reservations - return all reserved blocks back to the rgrp.
 * @rgd: the rgrp that needs its space back
 *
 * We previously reserved a bunch of blocks for allocation. Now we need to
 * give them back. This leave the reservation structures in tact, but removes
 * all of their corresponding "no-fly zones".
 */
static void return_all_reservations(struct gfs2_rgrpd *rgd)
{
	struct rb_node *n;
	struct gfs2_blkreserv *rs;

	spin_lock(&rgd->rd_rsspin);
	while ((n = rb_first(&rgd->rd_rstree))) {
		rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
661
		__rs_deltree(NULL, rs);
B
Bob Peterson 已提交
662 663 664 665
	}
	spin_unlock(&rgd->rd_rsspin);
}

666
void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
D
David Teigland 已提交
667
{
668
	struct rb_node *n;
D
David Teigland 已提交
669 670 671
	struct gfs2_rgrpd *rgd;
	struct gfs2_glock *gl;

672 673
	while ((n = rb_first(&sdp->sd_rindex_tree))) {
		rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
D
David Teigland 已提交
674 675
		gl = rgd->rd_gl;

676
		rb_erase(n, &sdp->sd_rindex_tree);
D
David Teigland 已提交
677 678

		if (gl) {
679
			spin_lock(&gl->gl_spin);
680
			gl->gl_object = NULL;
681
			spin_unlock(&gl->gl_spin);
682
			gfs2_glock_add_to_lru(gl);
D
David Teigland 已提交
683 684 685
			gfs2_glock_put(gl);
		}

686
		gfs2_free_clones(rgd);
D
David Teigland 已提交
687
		kfree(rgd->rd_bits);
B
Bob Peterson 已提交
688
		return_all_reservations(rgd);
689
		kmem_cache_free(gfs2_rgrpd_cachep, rgd);
D
David Teigland 已提交
690 691 692
	}
}

693 694 695 696 697 698 699 700 701
static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
{
	printk(KERN_INFO "  ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
	printk(KERN_INFO "  ri_length = %u\n", rgd->rd_length);
	printk(KERN_INFO "  ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
	printk(KERN_INFO "  ri_data = %u\n", rgd->rd_data);
	printk(KERN_INFO "  ri_bitbytes = %u\n", rgd->rd_bitbytes);
}

D
David Teigland 已提交
702 703 704 705 706 707 708 709 710 711 712 713 714
/**
 * gfs2_compute_bitstructs - Compute the bitmap sizes
 * @rgd: The resource group descriptor
 *
 * Calculates bitmap descriptors, one for each block that contains bitmap data
 *
 * Returns: errno
 */

static int compute_bitstructs(struct gfs2_rgrpd *rgd)
{
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	struct gfs2_bitmap *bi;
715
	u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
716
	u32 bytes_left, bytes;
D
David Teigland 已提交
717 718
	int x;

719 720 721
	if (!length)
		return -EINVAL;

722
	rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
D
David Teigland 已提交
723 724 725
	if (!rgd->rd_bits)
		return -ENOMEM;

726
	bytes_left = rgd->rd_bitbytes;
D
David Teigland 已提交
727 728 729 730

	for (x = 0; x < length; x++) {
		bi = rgd->rd_bits + x;

731
		bi->bi_flags = 0;
D
David Teigland 已提交
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
		/* small rgrp; bitmap stored completely in header block */
		if (length == 1) {
			bytes = bytes_left;
			bi->bi_offset = sizeof(struct gfs2_rgrp);
			bi->bi_start = 0;
			bi->bi_len = bytes;
		/* header block */
		} else if (x == 0) {
			bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
			bi->bi_offset = sizeof(struct gfs2_rgrp);
			bi->bi_start = 0;
			bi->bi_len = bytes;
		/* last block */
		} else if (x + 1 == length) {
			bytes = bytes_left;
			bi->bi_offset = sizeof(struct gfs2_meta_header);
748
			bi->bi_start = rgd->rd_bitbytes - bytes_left;
D
David Teigland 已提交
749 750 751
			bi->bi_len = bytes;
		/* other blocks */
		} else {
752 753
			bytes = sdp->sd_sb.sb_bsize -
				sizeof(struct gfs2_meta_header);
D
David Teigland 已提交
754
			bi->bi_offset = sizeof(struct gfs2_meta_header);
755
			bi->bi_start = rgd->rd_bitbytes - bytes_left;
D
David Teigland 已提交
756 757 758 759 760 761 762 763 764 765 766
			bi->bi_len = bytes;
		}

		bytes_left -= bytes;
	}

	if (bytes_left) {
		gfs2_consist_rgrpd(rgd);
		return -EIO;
	}
	bi = rgd->rd_bits + (length - 1);
767
	if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
D
David Teigland 已提交
768
		if (gfs2_consist_rgrpd(rgd)) {
769
			gfs2_rindex_print(rgd);
D
David Teigland 已提交
770 771 772 773 774 775 776 777 778
			fs_err(sdp, "start=%u len=%u offset=%u\n",
			       bi->bi_start, bi->bi_len, bi->bi_offset);
		}
		return -EIO;
	}

	return 0;
}

779 780
/**
 * gfs2_ri_total - Total up the file system space, according to the rindex.
781
 * @sdp: the filesystem
782 783 784 785 786 787 788 789 790 791 792 793 794
 *
 */
u64 gfs2_ri_total(struct gfs2_sbd *sdp)
{
	u64 total_data = 0;	
	struct inode *inode = sdp->sd_rindex;
	struct gfs2_inode *ip = GFS2_I(inode);
	char buf[sizeof(struct gfs2_rindex)];
	int error, rgrps;

	for (rgrps = 0;; rgrps++) {
		loff_t pos = rgrps * sizeof(struct gfs2_rindex);

795
		if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
796
			break;
797
		error = gfs2_internal_read(ip, buf, &pos,
798 799 800
					   sizeof(struct gfs2_rindex));
		if (error != sizeof(struct gfs2_rindex))
			break;
801
		total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
802 803 804 805
	}
	return total_data;
}

B
Bob Peterson 已提交
806
static int rgd_insert(struct gfs2_rgrpd *rgd)
807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
{
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;

	/* Figure out where to put new node */
	while (*newn) {
		struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
						  rd_node);

		parent = *newn;
		if (rgd->rd_addr < cur->rd_addr)
			newn = &((*newn)->rb_left);
		else if (rgd->rd_addr > cur->rd_addr)
			newn = &((*newn)->rb_right);
		else
B
Bob Peterson 已提交
822
			return -EEXIST;
823 824 825 826
	}

	rb_link_node(&rgd->rd_node, parent, newn);
	rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
B
Bob Peterson 已提交
827 828
	sdp->sd_rgrps++;
	return 0;
829 830
}

D
David Teigland 已提交
831
/**
832
 * read_rindex_entry - Pull in a new resource index entry from the disk
833
 * @ip: Pointer to the rindex inode
D
David Teigland 已提交
834
 *
835
 * Returns: 0 on success, > 0 on EOF, error code otherwise
836 837
 */

838
static int read_rindex_entry(struct gfs2_inode *ip)
839 840 841
{
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
	loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
842
	struct gfs2_rindex buf;
843 844 845
	int error;
	struct gfs2_rgrpd *rgd;

846 847 848
	if (pos >= i_size_read(&ip->i_inode))
		return 1;

849
	error = gfs2_internal_read(ip, (char *)&buf, &pos,
850
				   sizeof(struct gfs2_rindex));
851 852 853

	if (error != sizeof(struct gfs2_rindex))
		return (error == 0) ? 1 : error;
854

855
	rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
856 857 858 859 860
	error = -ENOMEM;
	if (!rgd)
		return error;

	rgd->rd_sbd = sdp;
861 862 863 864 865
	rgd->rd_addr = be64_to_cpu(buf.ri_addr);
	rgd->rd_length = be32_to_cpu(buf.ri_length);
	rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
	rgd->rd_data = be32_to_cpu(buf.ri_data);
	rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
B
Bob Peterson 已提交
866
	spin_lock_init(&rgd->rd_rsspin);
867

868 869
	error = compute_bitstructs(rgd);
	if (error)
870
		goto fail;
871

872
	error = gfs2_glock_get(sdp, rgd->rd_addr,
873 874
			       &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
	if (error)
875
		goto fail;
876 877

	rgd->rd_gl->gl_object = rgd;
878
	rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lvb;
879
	rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
880 881
	if (rgd->rd_data > sdp->sd_max_rg_data)
		sdp->sd_max_rg_data = rgd->rd_data;
882
	spin_lock(&sdp->sd_rindex_spin);
B
Bob Peterson 已提交
883
	error = rgd_insert(rgd);
884
	spin_unlock(&sdp->sd_rindex_spin);
B
Bob Peterson 已提交
885 886 887 888
	if (!error)
		return 0;

	error = 0; /* someone else read in the rgrp; free it and ignore it */
889
	gfs2_glock_put(rgd->rd_gl);
890 891 892 893

fail:
	kfree(rgd->rd_bits);
	kmem_cache_free(gfs2_rgrpd_cachep, rgd);
894 895 896 897 898 899 900
	return error;
}

/**
 * gfs2_ri_update - Pull in a new resource index from the disk
 * @ip: pointer to the rindex inode
 *
D
David Teigland 已提交
901 902 903
 * Returns: 0 on successful update, error code otherwise
 */

904
static int gfs2_ri_update(struct gfs2_inode *ip)
D
David Teigland 已提交
905
{
906
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
D
David Teigland 已提交
907 908
	int error;

909
	do {
910
		error = read_rindex_entry(ip);
911 912 913 914
	} while (error == 0);

	if (error < 0)
		return error;
D
David Teigland 已提交
915

916
	sdp->sd_rindex_uptodate = 1;
917 918
	return 0;
}
D
David Teigland 已提交
919 920

/**
921
 * gfs2_rindex_update - Update the rindex if required
D
David Teigland 已提交
922 923 924 925 926 927 928 929 930 931 932 933
 * @sdp: The GFS2 superblock
 *
 * We grab a lock on the rindex inode to make sure that it doesn't
 * change whilst we are performing an operation. We keep this lock
 * for quite long periods of time compared to other locks. This
 * doesn't matter, since it is shared and it is very, very rarely
 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
 *
 * This makes sure that we're using the latest copy of the resource index
 * special file, which might have been updated if someone expanded the
 * filesystem (via gfs2_grow utility), which adds new resource groups.
 *
934
 * Returns: 0 on succeess, error code otherwise
D
David Teigland 已提交
935 936
 */

937
int gfs2_rindex_update(struct gfs2_sbd *sdp)
D
David Teigland 已提交
938
{
939
	struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
D
David Teigland 已提交
940
	struct gfs2_glock *gl = ip->i_gl;
941 942
	struct gfs2_holder ri_gh;
	int error = 0;
943
	int unlock_required = 0;
D
David Teigland 已提交
944 945

	/* Read new copy from disk if we don't have the latest */
946
	if (!sdp->sd_rindex_uptodate) {
947 948 949
		if (!gfs2_glock_is_locked_by_me(gl)) {
			error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
			if (error)
B
Bob Peterson 已提交
950
				return error;
951 952
			unlock_required = 1;
		}
953
		if (!sdp->sd_rindex_uptodate)
D
David Teigland 已提交
954
			error = gfs2_ri_update(ip);
955 956
		if (unlock_required)
			gfs2_glock_dq_uninit(&ri_gh);
D
David Teigland 已提交
957 958 959 960 961
	}

	return error;
}

962
static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
963 964
{
	const struct gfs2_rgrp *str = buf;
965
	u32 rg_flags;
966

967
	rg_flags = be32_to_cpu(str->rg_flags);
968
	rg_flags &= ~GFS2_RDF_MASK;
969 970
	rgd->rd_flags &= GFS2_RDF_MASK;
	rgd->rd_flags |= rg_flags;
971
	rgd->rd_free = be32_to_cpu(str->rg_free);
972
	rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
973
	rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
974 975
}

976
static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
977 978 979
{
	struct gfs2_rgrp *str = buf;

980
	str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
981
	str->rg_free = cpu_to_be32(rgd->rd_free);
982
	str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
983
	str->__pad = cpu_to_be32(0);
984
	str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
985 986 987
	memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
}

988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
{
	struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
	struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;

	if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
	    rgl->rl_dinodes != str->rg_dinodes ||
	    rgl->rl_igeneration != str->rg_igeneration)
		return 0;
	return 1;
}

static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
{
	const struct gfs2_rgrp *str = buf;

	rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
	rgl->rl_flags = str->rg_flags;
	rgl->rl_free = str->rg_free;
	rgl->rl_dinodes = str->rg_dinodes;
	rgl->rl_igeneration = str->rg_igeneration;
	rgl->__pad = 0UL;
}

static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
{
	struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
	u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
	rgl->rl_unlinked = cpu_to_be32(unlinked);
}

static u32 count_unlinked(struct gfs2_rgrpd *rgd)
{
	struct gfs2_bitmap *bi;
	const u32 length = rgd->rd_length;
	const u8 *buffer = NULL;
	u32 i, goal, count = 0;

	for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
		goal = 0;
		buffer = bi->bi_bh->b_data + bi->bi_offset;
		WARN_ON(!buffer_uptodate(bi->bi_bh));
		while (goal < bi->bi_len * GFS2_NBBY) {
			goal = gfs2_bitfit(buffer, bi->bi_len, goal,
					   GFS2_BLKST_UNLINKED);
			if (goal == BFITNOENT)
				break;
			count++;
			goal++;
		}
	}

	return count;
}


D
David Teigland 已提交
1044
/**
1045 1046
 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
 * @rgd: the struct gfs2_rgrpd describing the RG to read in
D
David Teigland 已提交
1047 1048 1049 1050 1051 1052 1053
 *
 * Read in all of a Resource Group's header and bitmap blocks.
 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
 *
 * Returns: errno
 */

1054
int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
D
David Teigland 已提交
1055 1056 1057
{
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	struct gfs2_glock *gl = rgd->rd_gl;
1058
	unsigned int length = rgd->rd_length;
D
David Teigland 已提交
1059 1060 1061 1062
	struct gfs2_bitmap *bi;
	unsigned int x, y;
	int error;

1063 1064 1065
	if (rgd->rd_bits[0].bi_bh != NULL)
		return 0;

D
David Teigland 已提交
1066 1067
	for (x = 0; x < length; x++) {
		bi = rgd->rd_bits + x;
1068
		error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh);
D
David Teigland 已提交
1069 1070 1071 1072 1073 1074
		if (error)
			goto fail;
	}

	for (y = length; y--;) {
		bi = rgd->rd_bits + y;
S
Steven Whitehouse 已提交
1075
		error = gfs2_meta_wait(sdp, bi->bi_bh);
D
David Teigland 已提交
1076 1077
		if (error)
			goto fail;
1078
		if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
D
David Teigland 已提交
1079 1080 1081 1082 1083 1084
					      GFS2_METATYPE_RG)) {
			error = -EIO;
			goto fail;
		}
	}

1085
	if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
1086 1087
		for (x = 0; x < length; x++)
			clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
1088
		gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1089
		rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1090
		rgd->rd_free_clone = rgd->rd_free;
D
David Teigland 已提交
1091
	}
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
	if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
		rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
		gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
				     rgd->rd_bits[0].bi_bh->b_data);
	}
	else if (sdp->sd_args.ar_rgrplvb) {
		if (!gfs2_rgrp_lvb_valid(rgd)){
			gfs2_consist_rgrpd(rgd);
			error = -EIO;
			goto fail;
		}
		if (rgd->rd_rgl->rl_unlinked == 0)
			rgd->rd_flags &= ~GFS2_RDF_CHECK;
	}
D
David Teigland 已提交
1106 1107
	return 0;

1108
fail:
D
David Teigland 已提交
1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
	while (x--) {
		bi = rgd->rd_bits + x;
		brelse(bi->bi_bh);
		bi->bi_bh = NULL;
		gfs2_assert_warn(sdp, !bi->bi_clone);
	}

	return error;
}

1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151
int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
{
	u32 rl_flags;

	if (rgd->rd_flags & GFS2_RDF_UPTODATE)
		return 0;

	if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
		return gfs2_rgrp_bh_get(rgd);

	rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
	rl_flags &= ~GFS2_RDF_MASK;
	rgd->rd_flags &= GFS2_RDF_MASK;
	rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
	if (rgd->rd_rgl->rl_unlinked == 0)
		rgd->rd_flags &= ~GFS2_RDF_CHECK;
	rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
	rgd->rd_free_clone = rgd->rd_free;
	rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
	rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
	return 0;
}

int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
{
	struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
	struct gfs2_sbd *sdp = rgd->rd_sbd;

	if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
		return 0;
	return gfs2_rgrp_bh_get((struct gfs2_rgrpd *)gh->gh_gl->gl_object);
}

D
David Teigland 已提交
1152
/**
1153
 * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1154
 * @gh: The glock holder for the resource group
D
David Teigland 已提交
1155 1156 1157
 *
 */

1158
void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
D
David Teigland 已提交
1159
{
1160
	struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1161
	int x, length = rgd->rd_length;
D
David Teigland 已提交
1162 1163 1164

	for (x = 0; x < length; x++) {
		struct gfs2_bitmap *bi = rgd->rd_bits + x;
1165 1166 1167 1168
		if (bi->bi_bh) {
			brelse(bi->bi_bh);
			bi->bi_bh = NULL;
		}
D
David Teigland 已提交
1169 1170 1171 1172
	}

}

S
Steven Whitehouse 已提交
1173
int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
1174
			     struct buffer_head *bh,
S
Steven Whitehouse 已提交
1175
			     const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
1176 1177 1178 1179
{
	struct super_block *sb = sdp->sd_vfs;
	struct block_device *bdev = sb->s_bdev;
	const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize /
1180
					   bdev_logical_block_size(sb->s_bdev);
1181
	u64 blk;
1182
	sector_t start = 0;
1183 1184 1185
	sector_t nr_sects = 0;
	int rv;
	unsigned int x;
S
Steven Whitehouse 已提交
1186 1187
	u32 trimmed = 0;
	u8 diff;
1188 1189

	for (x = 0; x < bi->bi_len; x++) {
S
Steven Whitehouse 已提交
1190 1191 1192 1193 1194 1195 1196 1197 1198
		const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
		clone += bi->bi_offset;
		clone += x;
		if (bh) {
			const u8 *orig = bh->b_data + bi->bi_offset + x;
			diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
		} else {
			diff = ~(*clone | (*clone >> 1));
		}
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
		diff &= 0x55;
		if (diff == 0)
			continue;
		blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
		blk *= sects_per_blk; /* convert to sectors */
		while(diff) {
			if (diff & 1) {
				if (nr_sects == 0)
					goto start_new_extent;
				if ((start + nr_sects) != blk) {
S
Steven Whitehouse 已提交
1209 1210 1211 1212 1213 1214 1215 1216
					if (nr_sects >= minlen) {
						rv = blkdev_issue_discard(bdev,
							start, nr_sects,
							GFP_NOFS, 0);
						if (rv)
							goto fail;
						trimmed += nr_sects;
					}
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
					nr_sects = 0;
start_new_extent:
					start = blk;
				}
				nr_sects += sects_per_blk;
			}
			diff >>= 2;
			blk += sects_per_blk;
		}
	}
S
Steven Whitehouse 已提交
1227
	if (nr_sects >= minlen) {
1228
		rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0);
1229 1230
		if (rv)
			goto fail;
S
Steven Whitehouse 已提交
1231
		trimmed += nr_sects;
1232
	}
S
Steven Whitehouse 已提交
1233 1234 1235 1236
	if (ptrimmed)
		*ptrimmed = trimmed;
	return 0;

1237
fail:
S
Steven Whitehouse 已提交
1238 1239
	if (sdp->sd_args.ar_discard)
		fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
1240
	sdp->sd_args.ar_discard = 0;
S
Steven Whitehouse 已提交
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279
	return -EIO;
}

/**
 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
 * @filp: Any file on the filesystem
 * @argp: Pointer to the arguments (also used to pass result)
 *
 * Returns: 0 on success, otherwise error code
 */

int gfs2_fitrim(struct file *filp, void __user *argp)
{
	struct inode *inode = filp->f_dentry->d_inode;
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
	struct buffer_head *bh;
	struct gfs2_rgrpd *rgd;
	struct gfs2_rgrpd *rgd_end;
	struct gfs2_holder gh;
	struct fstrim_range r;
	int ret = 0;
	u64 amt;
	u64 trimmed = 0;
	unsigned int x;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (!blk_queue_discard(q))
		return -EOPNOTSUPP;

	if (argp == NULL) {
		r.start = 0;
		r.len = ULLONG_MAX;
		r.minlen = 0;
	} else if (copy_from_user(&r, argp, sizeof(r)))
		return -EFAULT;

1280 1281 1282 1283
	ret = gfs2_rindex_update(sdp);
	if (ret)
		return ret;

S
Steven Whitehouse 已提交
1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
	rgd = gfs2_blk2rgrpd(sdp, r.start, 0);
	rgd_end = gfs2_blk2rgrpd(sdp, r.start + r.len, 0);

	while (1) {

		ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
		if (ret)
			goto out;

		if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
			/* Trim each bitmap in the rgrp */
			for (x = 0; x < rgd->rd_length; x++) {
				struct gfs2_bitmap *bi = rgd->rd_bits + x;
				ret = gfs2_rgrp_send_discards(sdp, rgd->rd_data0, NULL, bi, r.minlen, &amt);
				if (ret) {
					gfs2_glock_dq_uninit(&gh);
					goto out;
				}
				trimmed += amt;
			}

			/* Mark rgrp as having been trimmed */
			ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
			if (ret == 0) {
				bh = rgd->rd_bits[0].bi_bh;
				rgd->rd_flags |= GFS2_RGF_TRIMMED;
				gfs2_trans_add_bh(rgd->rd_gl, bh, 1);
				gfs2_rgrp_out(rgd, bh->b_data);
1312
				gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
S
Steven Whitehouse 已提交
1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
				gfs2_trans_end(sdp);
			}
		}
		gfs2_glock_dq_uninit(&gh);

		if (rgd == rgd_end)
			break;

		rgd = gfs2_rgrpd_get_next(rgd);
	}

out:
	r.len = trimmed << 9;
	if (argp && copy_to_user(argp, &r, sizeof(r)))
		return -EFAULT;

	return ret;
1330 1331
}

B
Bob Peterson 已提交
1332 1333 1334 1335 1336
/**
 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
 * @ip: the inode structure
 *
 */
1337
static void rs_insert(struct gfs2_inode *ip)
B
Bob Peterson 已提交
1338 1339 1340 1341
{
	struct rb_node **newn, *parent = NULL;
	int rc;
	struct gfs2_blkreserv *rs = ip->i_res;
1342
	struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
1343
	u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
B
Bob Peterson 已提交
1344 1345

	BUG_ON(gfs2_rs_active(rs));
1346

1347 1348
	spin_lock(&rgd->rd_rsspin);
	newn = &rgd->rd_rstree.rb_node;
B
Bob Peterson 已提交
1349 1350 1351 1352 1353
	while (*newn) {
		struct gfs2_blkreserv *cur =
			rb_entry(*newn, struct gfs2_blkreserv, rs_node);

		parent = *newn;
1354
		rc = rs_cmp(fsblock, rs->rs_free, cur);
B
Bob Peterson 已提交
1355 1356 1357 1358 1359 1360
		if (rc > 0)
			newn = &((*newn)->rb_right);
		else if (rc < 0)
			newn = &((*newn)->rb_left);
		else {
			spin_unlock(&rgd->rd_rsspin);
1361 1362
			WARN_ON(1);
			return;
B
Bob Peterson 已提交
1363 1364 1365 1366 1367 1368 1369
		}
	}

	rb_link_node(&rs->rs_node, parent, newn);
	rb_insert_color(&rs->rs_node, &rgd->rd_rstree);

	/* Do our rgrp accounting for the reservation */
1370
	rgd->rd_reserved += rs->rs_free; /* blocks reserved */
B
Bob Peterson 已提交
1371
	spin_unlock(&rgd->rd_rsspin);
1372
	trace_gfs2_rs(rs, TRACE_RS_INSERT);
B
Bob Peterson 已提交
1373 1374 1375
}

/**
1376
 * rg_mblk_search - find a group of multiple free blocks to form a reservation
B
Bob Peterson 已提交
1377 1378
 * @rgd: the resource group descriptor
 * @ip: pointer to the inode for which we're reserving blocks
1379
 * @requested: number of blocks required for this allocation
B
Bob Peterson 已提交
1380 1381 1382
 *
 */

1383 1384
static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
			   unsigned requested)
B
Bob Peterson 已提交
1385
{
1386 1387 1388 1389 1390 1391
	struct gfs2_rbm rbm = { .rgd = rgd, };
	u64 goal;
	struct gfs2_blkreserv *rs = ip->i_res;
	u32 extlen;
	u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
	int ret;
B
Bob Peterson 已提交
1392

1393 1394 1395
	extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
	extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
	if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
1396 1397
		return;

B
Bob Peterson 已提交
1398 1399
	/* Find bitmap block that contains bits for goal block */
	if (rgrp_contains_block(rgd, ip->i_goal))
1400
		goal = ip->i_goal;
B
Bob Peterson 已提交
1401
	else
1402
		goal = rgd->rd_last_alloc + rgd->rd_data0;
B
Bob Peterson 已提交
1403

1404 1405
	if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
		return;
B
Bob Peterson 已提交
1406

1407 1408 1409 1410 1411 1412
	ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, extlen, ip, true);
	if (ret == 0) {
		rs->rs_rbm = rbm;
		rs->rs_free = extlen;
		rs->rs_inum = ip->i_no_addr;
		rs_insert(ip);
B
Bob Peterson 已提交
1413
	}
B
Bob Peterson 已提交
1414 1415
}

1416 1417 1418 1419
/**
 * gfs2_next_unreserved_block - Return next block that is not reserved
 * @rgd: The resource group
 * @block: The starting block
1420
 * @length: The required length
1421 1422 1423 1424 1425 1426 1427 1428 1429
 * @ip: Ignore any reservations for this inode
 *
 * If the block does not appear in any reservation, then return the
 * block number unchanged. If it does appear in the reservation, then
 * keep looking through the tree of reservations in order to find the
 * first block number which is not reserved.
 */

static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1430
				      u32 length,
1431 1432 1433 1434 1435 1436 1437
				      const struct gfs2_inode *ip)
{
	struct gfs2_blkreserv *rs;
	struct rb_node *n;
	int rc;

	spin_lock(&rgd->rd_rsspin);
1438
	n = rgd->rd_rstree.rb_node;
1439 1440
	while (n) {
		rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1441
		rc = rs_cmp(block, length, rs);
1442 1443 1444 1445 1446 1447 1448 1449 1450
		if (rc < 0)
			n = n->rb_left;
		else if (rc > 0)
			n = n->rb_right;
		else
			break;
	}

	if (n) {
1451
		while ((rs_cmp(block, length, rs) == 0) && (ip->i_res != rs)) {
1452
			block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
1453
			n = n->rb_right;
1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466
			if (n == NULL)
				break;
			rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
		}
	}

	spin_unlock(&rgd->rd_rsspin);
	return block;
}

/**
 * gfs2_reservation_check_and_update - Check for reservations during block alloc
 * @rbm: The current position in the resource group
1467 1468
 * @ip: The inode for which we are searching for blocks
 * @minext: The minimum extent length
1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479
 *
 * This checks the current position in the rgrp to see whether there is
 * a reservation covering this block. If not then this function is a
 * no-op. If there is, then the position is moved to the end of the
 * contiguous reservation(s) so that we are pointing at the first
 * non-reserved block.
 *
 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
 */

static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1480 1481
					     const struct gfs2_inode *ip,
					     u32 minext)
1482 1483
{
	u64 block = gfs2_rbm_to_block(rbm);
1484
	u32 extlen = 1;
1485 1486 1487
	u64 nblock;
	int ret;

1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
	/*
	 * If we have a minimum extent length, then skip over any extent
	 * which is less than the min extent length in size.
	 */
	if (minext) {
		extlen = gfs2_free_extlen(rbm, minext);
		nblock = block + extlen;
		if (extlen < minext)
			goto fail;
	}

	/*
	 * Check the extent which has been found against the reservations
	 * and skip if parts of it are already reserved
	 */
	nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
1504 1505
	if (nblock == block)
		return 0;
1506
fail:
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516
	ret = gfs2_rbm_from_block(rbm, nblock);
	if (ret < 0)
		return ret;
	return 1;
}

/**
 * gfs2_rbm_find - Look for blocks of a particular state
 * @rbm: Value/result starting position and final position
 * @state: The state which we want to find
1517
 * @minext: The requested extent length (0 for a single block)
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
 * @ip: If set, check for reservations
 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
 *          around until we've reached the starting point.
 *
 * Side effects:
 * - If looking for free blocks, we set GBF_FULL on each bitmap which
 *   has no free blocks in it.
 *
 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
 */

1529
static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567
			 const struct gfs2_inode *ip, bool nowrap)
{
	struct buffer_head *bh;
	struct gfs2_bitmap *initial_bi;
	u32 initial_offset;
	u32 offset;
	u8 *buffer;
	int index;
	int n = 0;
	int iters = rbm->rgd->rd_length;
	int ret;

	/* If we are not starting at the beginning of a bitmap, then we
	 * need to add one to the bitmap count to ensure that we search
	 * the starting bitmap twice.
	 */
	if (rbm->offset != 0)
		iters++;

	while(1) {
		if (test_bit(GBF_FULL, &rbm->bi->bi_flags) &&
		    (state == GFS2_BLKST_FREE))
			goto next_bitmap;

		bh = rbm->bi->bi_bh;
		buffer = bh->b_data + rbm->bi->bi_offset;
		WARN_ON(!buffer_uptodate(bh));
		if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone)
			buffer = rbm->bi->bi_clone + rbm->bi->bi_offset;
		initial_offset = rbm->offset;
		offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state);
		if (offset == BFITNOENT)
			goto bitmap_full;
		rbm->offset = offset;
		if (ip == NULL)
			return 0;

		initial_bi = rbm->bi;
1568
		ret = gfs2_reservation_check_and_update(rbm, ip, minext);
1569 1570 1571 1572
		if (ret == 0)
			return 0;
		if (ret > 0) {
			n += (rbm->bi - initial_bi);
B
Bob Peterson 已提交
1573
			goto next_iter;
1574
		}
1575 1576 1577 1578 1579 1580
		if (ret == -E2BIG) {
			index = 0;
			rbm->offset = 0;
			n += (rbm->bi - initial_bi);
			goto res_covered_end_of_rgrp;
		}
1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
		return ret;

bitmap_full:	/* Mark bitmap as full and fall through */
		if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
			set_bit(GBF_FULL, &rbm->bi->bi_flags);

next_bitmap:	/* Find next bitmap in the rgrp */
		rbm->offset = 0;
		index = rbm->bi - rbm->rgd->rd_bits;
		index++;
		if (index == rbm->rgd->rd_length)
			index = 0;
1593
res_covered_end_of_rgrp:
1594 1595 1596 1597
		rbm->bi = &rbm->rgd->rd_bits[index];
		if ((index == 0) && nowrap)
			break;
		n++;
B
Bob Peterson 已提交
1598
next_iter:
1599 1600 1601 1602 1603 1604 1605
		if (n >= iters)
			break;
	}

	return -ENOSPC;
}

1606 1607 1608
/**
 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
 * @rgd: The rgrp
1609 1610
 * @last_unlinked: block address of the last dinode we unlinked
 * @skip: block address we should explicitly not unlink
1611
 *
B
Bob Peterson 已提交
1612 1613
 * Returns: 0 if no error
 *          The inode, if one has been found, in inode.
1614 1615
 */

1616
static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
1617
{
1618
	u64 block;
1619
	struct gfs2_sbd *sdp = rgd->rd_sbd;
1620 1621 1622 1623
	struct gfs2_glock *gl;
	struct gfs2_inode *ip;
	int error;
	int found = 0;
1624
	struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 };
1625

1626
	while (1) {
1627
		down_write(&sdp->sd_log_flush_lock);
1628
		error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, 0, NULL, true);
1629
		up_write(&sdp->sd_log_flush_lock);
1630 1631 1632
		if (error == -ENOSPC)
			break;
		if (WARN_ON_ONCE(error))
1633
			break;
B
Bob Peterson 已提交
1634

1635 1636 1637 1638
		block = gfs2_rbm_to_block(&rbm);
		if (gfs2_rbm_from_block(&rbm, block + 1))
			break;
		if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
1639
			continue;
1640
		if (block == skip)
1641
			continue;
1642
		*last_unlinked = block;
1643

1644
		error = gfs2_glock_get(sdp, block, &gfs2_inode_glops, CREATE, &gl);
1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
		if (error)
			continue;

		/* If the inode is already in cache, we can ignore it here
		 * because the existing inode disposal code will deal with
		 * it when all refs have gone away. Accessing gl_object like
		 * this is not safe in general. Here it is ok because we do
		 * not dereference the pointer, and we only need an approx
		 * answer to whether it is NULL or not.
		 */
		ip = gl->gl_object;

		if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
			gfs2_glock_put(gl);
		else
			found++;

		/* Limit reclaim to sensible number of tasks */
1663
		if (found > NR_CPUS)
1664
			return;
1665 1666 1667
	}

	rgd->rd_flags &= ~GFS2_RDF_CHECK;
1668
	return;
1669 1670
}

1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
{
	struct gfs2_rgrpd *rgd = *pos;

	rgd = gfs2_rgrpd_get_next(rgd);
	if (rgd == NULL)
		rgd = gfs2_rgrpd_get_next(NULL);
	*pos = rgd;
	if (rgd != begin) /* If we didn't wrap */
		return true;
	return false;
}

D
David Teigland 已提交
1684
/**
1685
 * gfs2_inplace_reserve - Reserve space in the filesystem
D
David Teigland 已提交
1686
 * @ip: the inode to reserve space for
1687
 * @requested: the number of blocks to be reserved
D
David Teigland 已提交
1688 1689 1690 1691
 *
 * Returns: errno
 */

1692
int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
D
David Teigland 已提交
1693
{
1694
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
B
Bob Peterson 已提交
1695
	struct gfs2_rgrpd *begin = NULL;
1696
	struct gfs2_blkreserv *rs = ip->i_res;
1697 1698
	int error = 0, rg_locked, flags = LM_FLAG_TRY;
	u64 last_unlinked = NO_BLOCK;
1699
	int loops = 0;
D
David Teigland 已提交
1700

1701 1702
	if (sdp->sd_args.ar_rgrplvb)
		flags |= GL_SKIP;
1703 1704
	if (gfs2_assert_warn(sdp, requested))
		return -EINVAL;
B
Bob Peterson 已提交
1705
	if (gfs2_rs_active(rs)) {
1706
		begin = rs->rs_rbm.rgd;
B
Bob Peterson 已提交
1707 1708
		flags = 0; /* Yoda: Do or do not. There is no try */
	} else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
1709
		rs->rs_rbm.rgd = begin = ip->i_rgd;
B
Bob Peterson 已提交
1710
	} else {
1711
		rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
B
Bob Peterson 已提交
1712
	}
1713
	if (rs->rs_rbm.rgd == NULL)
1714 1715 1716
		return -EBADSLT;

	while (loops < 3) {
1717 1718 1719 1720
		rg_locked = 1;

		if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
			rg_locked = 0;
1721
			error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
B
Bob Peterson 已提交
1722 1723
						   LM_ST_EXCLUSIVE, flags,
						   &rs->rs_rgd_gh);
1724 1725 1726 1727 1728
			if (error == GLR_TRYFAILED)
				goto next_rgrp;
			if (unlikely(error))
				return error;
			if (sdp->sd_args.ar_rgrplvb) {
1729
				error = update_rgrp_lvb(rs->rs_rbm.rgd);
1730
				if (unlikely(error)) {
1731 1732 1733 1734
					gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
					return error;
				}
			}
1735
		}
1736

1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755
		/* Skip unuseable resource groups */
		if (rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
			goto skip_rgrp;

		if (sdp->sd_args.ar_rgrplvb)
			gfs2_rgrp_bh_get(rs->rs_rbm.rgd);

		/* Get a reservation if we don't already have one */
		if (!gfs2_rs_active(rs))
			rg_mblk_search(rs->rs_rbm.rgd, ip, requested);

		/* Skip rgrps when we can't get a reservation on first pass */
		if (!gfs2_rs_active(rs) && (loops < 1))
			goto check_rgrp;

		/* If rgrp has enough free space, use it */
		if (rs->rs_rbm.rgd->rd_free_clone >= requested) {
			ip->i_rgd = rs->rs_rbm.rgd;
			return 0;
D
David Teigland 已提交
1756
		}
1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789

		/* Drop reservation, if we couldn't use reserved rgrp */
		if (gfs2_rs_active(rs))
			gfs2_rs_deltree(ip, rs);
check_rgrp:
		/* Check for unlinked inodes which can be reclaimed */
		if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
			try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
					ip->i_no_addr);
skip_rgrp:
		/* Unlock rgrp if required */
		if (!rg_locked)
			gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
next_rgrp:
		/* Find the next rgrp, and continue looking */
		if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
			continue;

		/* If we've scanned all the rgrps, but found no free blocks
		 * then this checks for some less likely conditions before
		 * trying again.
		 */
		flags &= ~LM_FLAG_TRY;
		loops++;
		/* Check that fs hasn't grown if writing to rindex */
		if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
			error = gfs2_ri_update(ip);
			if (error)
				return error;
		}
		/* Flushing the log may release space */
		if (loops == 2)
			gfs2_log_flush(sdp, NULL);
D
David Teigland 已提交
1790 1791
	}

1792
	return -ENOSPC;
D
David Teigland 已提交
1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
}

/**
 * gfs2_inplace_release - release an inplace reservation
 * @ip: the inode the reservation was taken out on
 *
 * Release a reservation made by gfs2_inplace_reserve().
 */

void gfs2_inplace_release(struct gfs2_inode *ip)
{
1804
	struct gfs2_blkreserv *rs = ip->i_res;
D
David Teigland 已提交
1805

1806 1807
	if (rs->rs_rgd_gh.gh_gl)
		gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
D
David Teigland 已提交
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
}

/**
 * gfs2_get_block_type - Check a block in a RG is of given type
 * @rgd: the resource group holding the block
 * @block: the block number
 *
 * Returns: The block type (GFS2_BLKST_*)
 */

1818
static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
D
David Teigland 已提交
1819
{
1820 1821
	struct gfs2_rbm rbm = { .rgd = rgd, };
	int ret;
D
David Teigland 已提交
1822

1823 1824
	ret = gfs2_rbm_from_block(&rbm, block);
	WARN_ON_ONCE(ret != 0);
D
David Teigland 已提交
1825

1826
	return gfs2_testbit(&rbm);
D
David Teigland 已提交
1827 1828
}

1829

B
Bob Peterson 已提交
1830 1831
/**
 * gfs2_alloc_extent - allocate an extent from a given bitmap
1832
 * @rbm: the resource group information
B
Bob Peterson 已提交
1833
 * @dinode: TRUE if the first block we allocate is for a dinode
1834
 * @n: The extent length (value/result)
B
Bob Peterson 已提交
1835
 *
1836
 * Add the bitmap buffer to the transaction.
B
Bob Peterson 已提交
1837 1838
 * Set the found bits to @new_state to change block's allocation state.
 */
1839
static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
1840
			     unsigned int *n)
B
Bob Peterson 已提交
1841
{
1842
	struct gfs2_rbm pos = { .rgd = rbm->rgd, };
B
Bob Peterson 已提交
1843
	const unsigned int elen = *n;
1844 1845
	u64 block;
	int ret;
B
Bob Peterson 已提交
1846

1847 1848 1849
	*n = 1;
	block = gfs2_rbm_to_block(rbm);
	gfs2_trans_add_bh(rbm->rgd->rd_gl, rbm->bi->bi_bh, 1);
1850
	gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
1851
	block++;
1852
	while (*n < elen) {
1853
		ret = gfs2_rbm_from_block(&pos, block);
1854
		if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
1855
			break;
1856
		gfs2_trans_add_bh(pos.rgd->rd_gl, pos.bi->bi_bh, 1);
1857
		gfs2_setbit(&pos, true, GFS2_BLKST_USED);
1858
		(*n)++;
1859
		block++;
1860
	}
D
David Teigland 已提交
1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872
}

/**
 * rgblk_free - Change alloc state of given block(s)
 * @sdp: the filesystem
 * @bstart: the start of a run of blocks to free
 * @blen: the length of the block run (all must lie within ONE RG!)
 * @new_state: GFS2_BLKST_XXX the after-allocation block state
 *
 * Returns:  Resource group containing the block(s)
 */

1873 1874
static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
				     u32 blen, unsigned char new_state)
D
David Teigland 已提交
1875
{
1876
	struct gfs2_rbm rbm;
D
David Teigland 已提交
1877

1878 1879
	rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
	if (!rbm.rgd) {
D
David Teigland 已提交
1880
		if (gfs2_consist(sdp))
1881
			fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
D
David Teigland 已提交
1882 1883 1884 1885
		return NULL;
	}

	while (blen--) {
1886 1887 1888 1889 1890 1891 1892 1893
		gfs2_rbm_from_block(&rbm, bstart);
		bstart++;
		if (!rbm.bi->bi_clone) {
			rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size,
						   GFP_NOFS | __GFP_NOFAIL);
			memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset,
			       rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
			       rbm.bi->bi_len);
D
David Teigland 已提交
1894
		}
1895
		gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.bi->bi_bh, 1);
1896
		gfs2_setbit(&rbm, false, new_state);
D
David Teigland 已提交
1897 1898
	}

1899
	return rbm.rgd;
D
David Teigland 已提交
1900 1901 1902
}

/**
1903 1904 1905 1906 1907 1908 1909 1910
 * gfs2_rgrp_dump - print out an rgrp
 * @seq: The iterator
 * @gl: The glock in question
 *
 */

int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
{
B
Bob Peterson 已提交
1911 1912 1913 1914
	struct gfs2_rgrpd *rgd = gl->gl_object;
	struct gfs2_blkreserv *trs;
	const struct rb_node *n;

1915 1916
	if (rgd == NULL)
		return 0;
B
Bob Peterson 已提交
1917
	gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u\n",
1918
		       (unsigned long long)rgd->rd_addr, rgd->rd_flags,
B
Bob Peterson 已提交
1919 1920 1921 1922 1923 1924 1925 1926
		       rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
		       rgd->rd_reserved);
	spin_lock(&rgd->rd_rsspin);
	for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
		trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
		dump_rs(seq, trs);
	}
	spin_unlock(&rgd->rd_rsspin);
1927 1928 1929
	return 0;
}

1930 1931 1932 1933
static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
{
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
S
Steven Whitehouse 已提交
1934
		(unsigned long long)rgd->rd_addr);
1935 1936 1937 1938 1939
	fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
	gfs2_rgrp_dump(NULL, rgd->rd_gl);
	rgd->rd_flags |= GFS2_RDF_ERROR;
}

B
Bob Peterson 已提交
1940
/**
1941 1942 1943 1944
 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
 * @ip: The inode we have just allocated blocks for
 * @rbm: The start of the allocated blocks
 * @len: The extent length
B
Bob Peterson 已提交
1945
 *
1946 1947 1948
 * Adjusts a reservation after an allocation has taken place. If the
 * reservation does not match the allocation, or if it is now empty
 * then it is removed.
B
Bob Peterson 已提交
1949
 */
1950 1951 1952

static void gfs2_adjust_reservation(struct gfs2_inode *ip,
				    const struct gfs2_rbm *rbm, unsigned len)
B
Bob Peterson 已提交
1953 1954
{
	struct gfs2_blkreserv *rs = ip->i_res;
1955 1956 1957 1958
	struct gfs2_rgrpd *rgd = rbm->rgd;
	unsigned rlen;
	u64 block;
	int ret;
B
Bob Peterson 已提交
1959

1960 1961 1962 1963 1964 1965 1966 1967
	spin_lock(&rgd->rd_rsspin);
	if (gfs2_rs_active(rs)) {
		if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
			block = gfs2_rbm_to_block(rbm);
			ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
			rlen = min(rs->rs_free, len);
			rs->rs_free -= rlen;
			rgd->rd_reserved -= rlen;
1968
			trace_gfs2_rs(rs, TRACE_RS_CLAIM);
1969 1970 1971 1972
			if (rs->rs_free && !ret)
				goto out;
		}
		__rs_deltree(ip, rs);
B
Bob Peterson 已提交
1973
	}
1974 1975
out:
	spin_unlock(&rgd->rd_rsspin);
B
Bob Peterson 已提交
1976 1977
}

1978
/**
1979
 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
1980
 * @ip: the inode to allocate the block for
1981
 * @bn: Used to return the starting block number
B
Bob Peterson 已提交
1982
 * @nblocks: requested number of blocks/extent length (value/result)
1983
 * @dinode: 1 if we're allocating a dinode block, else 0
1984
 * @generation: the generation number of the inode
D
David Teigland 已提交
1985
 *
1986
 * Returns: 0 or error
D
David Teigland 已提交
1987 1988
 */

1989
int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
1990
		      bool dinode, u64 *generation)
D
David Teigland 已提交
1991
{
1992
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1993
	struct buffer_head *dibh;
1994
	struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
1995
	unsigned int ndata;
1996
	u64 goal;
1997
	u64 block; /* block, within the file system scope */
1998
	int error;
D
David Teigland 已提交
1999

2000 2001 2002 2003
	if (gfs2_rs_active(ip->i_res))
		goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm);
	else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
		goal = ip->i_goal;
2004
	else
2005
		goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
2006

2007
	gfs2_rbm_from_block(&rbm, goal);
2008
	error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false);
2009

2010 2011
	if (error == -ENOSPC) {
		gfs2_rbm_from_block(&rbm, goal);
2012
		error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false);
2013 2014
	}

2015
	/* Since all blocks are reserved in advance, this shouldn't happen */
2016
	if (error) {
2017 2018
		fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d\n",
			(unsigned long long)ip->i_no_addr, error, *nblocks,
2019
			test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags));
2020
		goto rgrp_error;
B
Bob Peterson 已提交
2021
	}
2022

2023 2024
	gfs2_alloc_extent(&rbm, dinode, nblocks);
	block = gfs2_rbm_to_block(&rbm);
2025
	rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
2026 2027
	if (gfs2_rs_active(ip->i_res))
		gfs2_adjust_reservation(ip, &rbm, *nblocks);
2028 2029 2030
	ndata = *nblocks;
	if (dinode)
		ndata--;
B
Bob Peterson 已提交
2031

2032
	if (!dinode) {
2033
		ip->i_goal = block + ndata - 1;
2034 2035 2036 2037 2038 2039 2040 2041 2042
		error = gfs2_meta_inode_buffer(ip, &dibh);
		if (error == 0) {
			struct gfs2_dinode *di =
				(struct gfs2_dinode *)dibh->b_data;
			gfs2_trans_add_bh(ip->i_gl, dibh, 1);
			di->di_goal_meta = di->di_goal_data =
				cpu_to_be64(ip->i_goal);
			brelse(dibh);
		}
2043
	}
2044
	if (rbm.rgd->rd_free < *nblocks) {
B
Bob Peterson 已提交
2045
		printk(KERN_WARNING "nblocks=%u\n", *nblocks);
2046
		goto rgrp_error;
B
Bob Peterson 已提交
2047
	}
2048

2049
	rbm.rgd->rd_free -= *nblocks;
2050
	if (dinode) {
2051 2052
		rbm.rgd->rd_dinodes++;
		*generation = rbm.rgd->rd_igeneration++;
2053
		if (*generation == 0)
2054
			*generation = rbm.rgd->rd_igeneration++;
2055
	}
D
David Teigland 已提交
2056

2057 2058 2059
	gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh, 1);
	gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
	gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
D
David Teigland 已提交
2060

2061
	gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
2062 2063
	if (dinode)
		gfs2_trans_add_unrevoke(sdp, block, 1);
2064 2065 2066 2067 2068 2069 2070

	/*
	 * This needs reviewing to see why we cannot do the quota change
	 * at this point in the dinode case.
	 */
	if (ndata)
		gfs2_quota_change(ip, ndata, ip->i_inode.i_uid,
2071
				  ip->i_inode.i_gid);
D
David Teigland 已提交
2072

2073 2074
	rbm.rgd->rd_free_clone -= *nblocks;
	trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
2075
			       dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
2076 2077 2078 2079
	*bn = block;
	return 0;

rgrp_error:
2080
	gfs2_rgrp_error(rbm.rgd);
2081
	return -EIO;
D
David Teigland 已提交
2082 2083 2084
}

/**
2085
 * __gfs2_free_blocks - free a contiguous run of block(s)
D
David Teigland 已提交
2086 2087 2088
 * @ip: the inode these blocks are being freed from
 * @bstart: first block of a run of contiguous blocks
 * @blen: the length of the block run
2089
 * @meta: 1 if the blocks represent metadata
D
David Teigland 已提交
2090 2091 2092
 *
 */

2093
void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
D
David Teigland 已提交
2094
{
2095
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
D
David Teigland 已提交
2096 2097 2098 2099 2100
	struct gfs2_rgrpd *rgd;

	rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
	if (!rgd)
		return;
2101
	trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
2102
	rgd->rd_free += blen;
S
Steven Whitehouse 已提交
2103
	rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
2104
	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
2105
	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2106
	gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
D
David Teigland 已提交
2107

2108
	/* Directories keep their data in the metadata address space */
2109
	if (meta || ip->i_depth)
2110
		gfs2_meta_wipe(ip, bstart, blen);
2111
}
D
David Teigland 已提交
2112

2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124
/**
 * gfs2_free_meta - free a contiguous run of data block(s)
 * @ip: the inode these blocks are being freed from
 * @bstart: first block of a run of contiguous blocks
 * @blen: the length of the block run
 *
 */

void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
{
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);

2125
	__gfs2_free_blocks(ip, bstart, blen, 1);
D
David Teigland 已提交
2126
	gfs2_statfs_change(sdp, 0, +blen, 0);
2127
	gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
D
David Teigland 已提交
2128 2129
}

2130 2131 2132 2133 2134
void gfs2_unlink_di(struct inode *inode)
{
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct gfs2_rgrpd *rgd;
2135
	u64 blkno = ip->i_no_addr;
2136 2137 2138 2139

	rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
	if (!rgd)
		return;
2140
	trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
2141
	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
2142
	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2143 2144
	gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
	update_rgrp_lvb_unlinked(rgd, 1);
2145 2146
}

2147
static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
D
David Teigland 已提交
2148 2149 2150 2151 2152 2153 2154 2155 2156
{
	struct gfs2_sbd *sdp = rgd->rd_sbd;
	struct gfs2_rgrpd *tmp_rgd;

	tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
	if (!tmp_rgd)
		return;
	gfs2_assert_withdraw(sdp, rgd == tmp_rgd);

2157
	if (!rgd->rd_dinodes)
D
David Teigland 已提交
2158
		gfs2_consist_rgrpd(rgd);
2159
	rgd->rd_dinodes--;
2160
	rgd->rd_free++;
D
David Teigland 已提交
2161

2162
	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
2163
	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
2164 2165
	gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
	update_rgrp_lvb_unlinked(rgd, -1);
D
David Teigland 已提交
2166 2167 2168 2169 2170 2171 2172

	gfs2_statfs_change(sdp, 0, +1, -1);
}


void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
{
2173
	gfs2_free_uninit_di(rgd, ip->i_no_addr);
2174
	trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2175
	gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
2176
	gfs2_meta_wipe(ip, ip->i_no_addr, 1);
D
David Teigland 已提交
2177 2178
}

2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192
/**
 * gfs2_check_blk_type - Check the type of a block
 * @sdp: The superblock
 * @no_addr: The block number to check
 * @type: The block type we are looking for
 *
 * Returns: 0 if the block type matches the expected type
 *          -ESTALE if it doesn't match
 *          or -ve errno if something went wrong while checking
 */

int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
{
	struct gfs2_rgrpd *rgd;
2193
	struct gfs2_holder rgd_gh;
2194
	int error = -EINVAL;
2195

S
Steven Whitehouse 已提交
2196
	rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
2197
	if (!rgd)
2198
		goto fail;
2199 2200 2201

	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
	if (error)
2202
		goto fail;
2203 2204 2205 2206 2207 2208 2209 2210 2211

	if (gfs2_get_block_type(rgd, no_addr) != type)
		error = -ESTALE;

	gfs2_glock_dq_uninit(&rgd_gh);
fail:
	return error;
}

D
David Teigland 已提交
2212 2213
/**
 * gfs2_rlist_add - add a RG to a list of RGs
2214
 * @ip: the inode
D
David Teigland 已提交
2215 2216 2217 2218 2219 2220 2221 2222 2223
 * @rlist: the list of resource groups
 * @block: the block
 *
 * Figure out what RG a block belongs to and add that RG to the list
 *
 * FIXME: Don't use NOFAIL
 *
 */

2224
void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
2225
		    u64 block)
D
David Teigland 已提交
2226
{
2227
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
D
David Teigland 已提交
2228 2229 2230 2231 2232 2233 2234 2235
	struct gfs2_rgrpd *rgd;
	struct gfs2_rgrpd **tmp;
	unsigned int new_space;
	unsigned int x;

	if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
		return;

2236 2237 2238
	if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
		rgd = ip->i_rgd;
	else
S
Steven Whitehouse 已提交
2239
		rgd = gfs2_blk2rgrpd(sdp, block, 1);
D
David Teigland 已提交
2240
	if (!rgd) {
2241
		fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
D
David Teigland 已提交
2242 2243
		return;
	}
2244
	ip->i_rgd = rgd;
D
David Teigland 已提交
2245 2246 2247 2248 2249 2250 2251 2252 2253

	for (x = 0; x < rlist->rl_rgrps; x++)
		if (rlist->rl_rgd[x] == rgd)
			return;

	if (rlist->rl_rgrps == rlist->rl_space) {
		new_space = rlist->rl_space + 10;

		tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
2254
			      GFP_NOFS | __GFP_NOFAIL);
D
David Teigland 已提交
2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278

		if (rlist->rl_rgd) {
			memcpy(tmp, rlist->rl_rgd,
			       rlist->rl_space * sizeof(struct gfs2_rgrpd *));
			kfree(rlist->rl_rgd);
		}

		rlist->rl_space = new_space;
		rlist->rl_rgd = tmp;
	}

	rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
}

/**
 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
 *      and initialize an array of glock holders for them
 * @rlist: the list of resource groups
 * @state: the lock state to acquire the RG lock in
 *
 * FIXME: Don't use NOFAIL
 *
 */

2279
void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
D
David Teigland 已提交
2280 2281 2282 2283
{
	unsigned int x;

	rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
2284
				GFP_NOFS | __GFP_NOFAIL);
D
David Teigland 已提交
2285 2286
	for (x = 0; x < rlist->rl_rgrps; x++)
		gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
2287
				state, 0,
D
David Teigland 已提交
2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306
				&rlist->rl_ghs[x]);
}

/**
 * gfs2_rlist_free - free a resource group list
 * @list: the list of resource groups
 *
 */

void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
{
	unsigned int x;

	kfree(rlist->rl_rgd);

	if (rlist->rl_ghs) {
		for (x = 0; x < rlist->rl_rgrps; x++)
			gfs2_holder_uninit(&rlist->rl_ghs[x]);
		kfree(rlist->rl_ghs);
B
Bob Peterson 已提交
2307
		rlist->rl_ghs = NULL;
D
David Teigland 已提交
2308 2309 2310
	}
}